text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for layers_with_attention."""
from absl.testing import parameterized
import lingvo.compat as tf
from lingvo.core import gshard_builder
from lingvo.core import layers
from lingvo.core import layers_with_attention
from lingvo.core import py_utils
from lingvo.core import symbolic
from lingvo.core import test_utils
from lingvo.core.test_utils import CompareToGoldenSingleFloat
import numpy as np
class LayersWithAttentionTest(test_utils.TestCase, parameterized.TestCase):
def testTransformerFeedForwardLayerConstruction(self):
p = layers_with_attention.TransformerFeedForwardLayer.Params()
p.name = 'transformer_fflayer_1'
p.input_dim = 3
p.hidden_dim = 7
transformer_fflayer = layers_with_attention.TransformerFeedForwardLayer(p)
self.assertEqual(0, p.output_dim)
# output_dim = p.input_dim when p.output_dim is zero.
self.assertEqual(p.input_dim, transformer_fflayer.output_dim)
# output_dim equals p.output_dim when p.output_dim is non zero.
p.output_dim = 10
p.name = 'transformer_fflayer_2'
transformer_fflayer = p.Instantiate()
self.assertEqual(p.output_dim, transformer_fflayer.output_dim)
def testTransformerFeedForwardLayer(self):
with self.session(use_gpu=True):
tf.random.set_seed(3980847392)
inputs = tf.random.normal([5, 2, 3], seed=948387483)
paddings = tf.zeros([5, 2])
p = layers_with_attention.TransformerFeedForwardLayer.Params()
p.name = 'transformer_fflayer'
p.input_dim = 3
p.hidden_dim = 7
transformer_fflayer = layers_with_attention.TransformerFeedForwardLayer(p)
h = transformer_fflayer.FPropDefaultTheta(inputs, paddings)
self.evaluate(tf.global_variables_initializer())
actual_layer_output = self.evaluate(h)
# pylint: disable=bad-whitespace
# pyformat: disable
expected_output = [
[[-0.88366592, -0.05049637, 0.01003706],
[-0.10550675, 1.68050027, 2.29110384]],
[[-1.30083609, -0.40521634, 0.1911681 ],
[ 1.2597878 , 1.45850968, 1.58734488]],
[[ 0.10373873, -0.2716777 , 0.2314173 ],
[ 0.46293864, -0.06359965, 1.20189023]],
[[ 0.3673597 , -0.1691664 , 0.78656065],
[-1.51081395, -0.70281881, -0.9093715 ]],
[[-1.04800868, -0.70610946, -0.35321558],
[-1.92480004, 0.08361804, 0.62713993]]]
# pyformat: enable
# pylint: enable=bad-whitespace
print(np.array_repr(actual_layer_output))
self.assertAllClose(actual_layer_output, expected_output)
@parameterized.named_parameters(('_3D', 3), ('_4D', 4))
def testReshapedTransformerFeedForwardLayer(self, rank):
with self.session(use_gpu=True):
tf.random.set_seed(3980847392)
input_dim = 6
if rank == 3:
dims = [input_dim]
else:
self.assertEqual(rank, 4)
dims = [2, input_dim // 2]
shape = [5, 2] + dims
inputs = tf.random.normal(shape, seed=948387483)
paddings = tf.zeros([5, 2])
p = layers_with_attention.ReshapedTransformerFeedForwardLayer.Params()
p.name = 'reshaped_transformer_fflayer'
p.input_dim = input_dim
p.hidden_dim = 7
p.fflayer_tpl.weight_split_dims_mapping_list = [[-1, -1], [-1, -1]]
p.fflayer_tpl.activation_split_dims_mapping_list = [[-1, -1], [-1, -1]]
p.device_mesh = np.reshape(np.arange(4), [2, 2])
l = p.Instantiate()
outputs = l.FPropDefaultTheta(inputs, paddings)
self.evaluate(tf.global_variables_initializer())
outputs = self.evaluate(outputs)
self.assertAllClose(outputs.shape, inputs.shape)
def testHybridFeedforwardLayer(self):
with self.session(use_gpu=True):
tf.random.set_seed(3980847392)
inputs = tf.random.normal([5, 2, 3], seed=948387483)
paddings = tf.zeros([5, 2])
symbol_sub_key = symbolic.Symbol('sub_key')
# create a basic fflayer.
fflayer_p = (layers_with_attention.TransformerFeedForwardLayer.Params())
fflayer_p.name = 'fflayer'
fflayer_p.input_dim = 3
fflayer_p.hidden_dim = 7
# create a moe layer.
moe_p = layers_with_attention.MoEFeedforwardLayer.Params()
moe_p.name = 'moe'
moe_p.moe_builder_p = gshard_builder.MoEBuilder.Params().Set(
model_dim=3,
num_devices=2,
num_groups=2,
e_dim=2,
c_dim=4,
moe_hidden_dim=7)
# create a hybrid layer.
hybrid_p = layers_with_attention.HybridFeedforwardLayer.Params()
hybrid_p.name = 'hybrid'
hybrid_p.sub = py_utils.NestedMap({'ff': fflayer_p, 'moe': moe_p})
hybrid_p.sub_key = symbol_sub_key
hybrid_fflayer = layers_with_attention.HybridFeedforwardLayer(hybrid_p)
with py_utils.AuxLossContext() as aux_loss_ctx:
with symbolic.SymbolToValueMap(symbolic.STATIC_VALUES,
{symbol_sub_key: 'ff'}):
outputs_ff = hybrid_fflayer.FPropDefaultTheta(inputs, paddings)
self.assertEmpty(aux_loss_ctx.aux_losses)
with symbolic.SymbolToValueMap(symbolic.STATIC_VALUES,
{symbol_sub_key: 'moe'}):
outputs_moe = hybrid_fflayer.FPropDefaultTheta(inputs, paddings)
self.assertNotEmpty(aux_loss_ctx.aux_losses)
self.evaluate(tf.global_variables_initializer())
actual_layer_output_ff = self.evaluate(outputs_ff)
actual_layer_output_moe = self.evaluate(outputs_moe)
# pylint: disable=bad-whitespace
expected_output_ff = ([[[-0.05825481, -0.07296887, 0.04780552],
[0.40495688, 1.3521885, 1.9623209]],
[[-0.538299, -0.51939666, 0.14743209],
[2.0082633, 0.41585845, 1.2604249]],
[[-0.16540301, -0.588541, -0.68776536],
[0.22190702, 0.32639492, 0.5300334]],
[[0.06300206, -0.01546569, 0.0259212],
[-0.9785279, -0.96456575, -1.2386773]],
[[-0.8001151, -0.08313039, -0.7068999],
[-1.4299163, -0.22745167, 0.2734915]]])
# pylint: disable=bad-whitespace
expected_output_moe = ([[[0.4632624, -0.08097249, -0.10976761],
[-1.1534482, 0.20076305, 2.2456918]],
[[0.42073604, 1.262385, -0.47051585],
[1.0274936, 1.9002852, 1.4712151]],
[[-0.03316217, 0.38010496, 0.24893013],
[0.34987167, -0.6271608, 1.3136444]],
[[-0.68526286, 0.08780301, -0.9903437],
[0.39456585, 0.1792891, 0.84773403]],
[[0.08420426, -1.4146113, 0.9402321],
[0.22846438, -1.857454, -0.59214497]]])
print(np.array_repr(actual_layer_output_ff))
print(np.array_repr(actual_layer_output_moe))
self.assertAllClose(actual_layer_output_ff, expected_output_ff)
self.assertAllClose(actual_layer_output_moe, expected_output_moe)
def testTransformerShardedMoeLayer(self):
with self.session(use_gpu=True):
tf.random.set_seed(3980847392)
inputs = tf.random.normal([5, 2, 3], seed=948387483)
paddings = tf.zeros([5, 2])
p = layers_with_attention.TransformerShardedMoeLayer.Params()
p.name = 'transformer_fflayer'
p.input_dim = 3
p.hidden_dim = 7
p.output_dim = 3
p.num_groups = 2
p.num_experts = 4
p.expert_capacity_factor = 2
moe_fflayer = layers_with_attention.TransformerShardedMoeLayer(p)
h = moe_fflayer.FPropDefaultTheta(inputs, paddings)
self.evaluate(tf.global_variables_initializer())
actual_layer_output = self.evaluate(h)
# pylint: disable=bad-whitespace
expected_output = [[[-0.34213868, -0.1577737, 0.15908651],
[0.0995039, 2.0593567, 2.422616]],
[[-0.9544622, -0.289206, 0.3745581],
[2.7121983, 0.49732625, 0.98936653]],
[[-0.22911909, -0.52321994, -1.3037556],
[0.29460418, 0.14727175, 0.3075519]],
[[-0.03022301, 0.00274765, -0.4092078],
[-1.0508028, 0.11724383, -0.70965374]],
[[-0.3473336, -0.4793697, -0.26441547],
[-1.6704988, 0.60920537, 0.7469079]]]
# pyformat: enable
# pylint: enable=bad-whitespace
print(np.array_repr(actual_layer_output))
self.assertAllClose(actual_layer_output, expected_output)
def testTransformerShardedMoeLayerShardedWeights(self):
with self.session(use_gpu=True):
tf.random.set_seed(3980847392)
inputs = tf.random.normal([5, 2, 4], seed=948387483)
paddings = tf.zeros([5, 2])
p = layers_with_attention.TransformerShardedMoeLayer.Params()
p.name = 'transformer_fflayer'
p.input_dim = 4
p.hidden_dim = 7
p.output_dim = 4
p.num_groups = 2
p.num_experts = 4
p.expert_capacity_factor = 2
p.expert_weight_shards = 2
moe_fflayer = layers_with_attention.TransformerShardedMoeLayer(p)
h = moe_fflayer.FPropDefaultTheta(inputs, paddings)
self.evaluate(tf.global_variables_initializer())
actual_layer_output = self.evaluate(h)
# pylint: disable=bad-whitespace
expected_output = [[[-1.6894771, -0.6188934, 1.3259739, 0.6954013],
[2.0653946, 2.946611, -0.8549718, -0.5904686]],
[[0.15020806, 1.439679, 0.54579806, 2.0817866],
[-0.08175106, -0.7739575, -0.9843587, 0.46894]],
[[0.8291634, -0.58913743, 0.6789296, 0.08628751],
[-0.431438, -1.3788042, -0.8718487, -0.6101668]],
[[0.32909858, 0.5900509, -0.7350087, -1.3075548],
[0.46176028, 1.3289857, -1.640419, -0.9618089]],
[[-1.2423284, -0.26266062, 2.591324, 0.13978946],
[-0.10520535, -0.00721201, -0.44894043, 1.3547784]]]
# pyformat: enable
# pylint: enable=bad-whitespace
print(np.array_repr(actual_layer_output))
self.assertAllClose(actual_layer_output, expected_output)
@parameterized.named_parameters(
('F32FPropF32Input', tf.float32, tf.float32, 7.182965),
('F32FPropBF16Input', tf.float32, tf.bfloat16, 7.183718),
('BF16FPropF32Input', tf.bfloat16, tf.float32, 7.15625),
('BF16FPropBF16Input', tf.bfloat16, tf.bfloat16, 7.15625),
)
def testTransformerFeedForwardLayerFPropDtype(self,
fprop_dtype,
input_dtype,
expected_sum=0.):
with self.session(use_gpu=True):
tf.random.set_seed(3980847392)
inputs = tf.cast(
tf.random.normal([5, 2, 3], seed=948387483), dtype=input_dtype)
paddings = tf.zeros([5, 2], dtype=input_dtype)
p = layers_with_attention.TransformerFeedForwardLayer.Params()
p.name = 'transformer_fflayer'
p.input_dim = 3
p.hidden_dim = 7
p.random_seed = 1234
p.cls.SetFPropDtype(p, fprop_dtype)
# fprop_dtype set accordingly.
self.assertEqual(fprop_dtype, p.fprop_dtype)
transformer_fflayer = layers_with_attention.TransformerFeedForwardLayer(p)
h = transformer_fflayer.FPropDefaultTheta(inputs, paddings)
h *= tf.cast(1 - paddings[:, :, tf.newaxis], h.dtype)
self.evaluate(tf.global_variables_initializer())
self.assertAllClose(expected_sum, tf.reduce_sum(h).eval())
def testTransformerFeedForwardLayerSpecOutDim(self):
with self.session(use_gpu=True):
tf.random.set_seed(3980847392)
inputs = tf.random.normal([5, 2, 3], seed=948387483)
paddings = tf.zeros([5, 2])
p = layers_with_attention.TransformerFeedForwardLayer.Params()
p.name = 'transformer_fflayer'
p.input_dim = 3
p.output_dim = 5
p.hidden_dim = 7
transformer_fflayer = layers_with_attention.TransformerFeedForwardLayer(p)
h = transformer_fflayer.FPropDefaultTheta(inputs, paddings)
self.evaluate(tf.global_variables_initializer())
actual_layer_output = self.evaluate(h)
# pylint: disable=bad-whitespace
# pyformat: disable
expected_output = [
[[ 1.42697251, 0.79269135, -0.85500956, -0.8122285 , -1.56555367],
[-1.7876718 , 0.26025945, -3.18244219, 1.34756351, 0.25739765]],
[[ 1.27962363, 0.88677615, -1.23556185, -1.06855559, -1.27293301],
[ 0.89336467, 2.46229172, 0.11302143, 1.19385004, -2.37805009]],
[[ 2.80146003, -0.66912627, 1.50160134, -2.30645609, -1.18872762],
[ 1.61967182, -0.51639485, 0.24441491, -1.0871532 , -0.95539457]],
[[ 2.03333473, -0.78205228, 0.71245927, -1.63276744, -0.91654319],
[ 1.54542768, -0.30343491, 0.10666496, -1.67965126, -0.15671858]],
[[ 1.60873222, -1.88402128, 0.79040933, -1.97199082, 0.4778356 ],
[-0.13516766, -0.42583361, -1.86275542, -1.09650302, 0.83263111]]]
# pyformat: enable
# pylint: enable=bad-whitespace
print(np.array_repr(actual_layer_output))
self.assertAllClose(actual_layer_output, expected_output)
def _testTransformerAttentionLayerInputs(self,
depth=3,
context_depth=3,
dtype=tf.float32):
np.random.seed(505837249)
source_vecs = tf.stack(
[tf.constant(np.random.rand(2, depth), dtype=dtype) for _ in range(5)])
source_padding = tf.transpose(
tf.constant([[0, 0, 1, 1, 0], [1, 0, 0, 0, 1]], dtype=dtype))
aux_source_vecs = tf.stack(
[tf.constant(np.random.rand(2, depth), dtype=dtype) for _ in range(7)])
aux_source_paddings = tf.transpose(
tf.constant([[0, 1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 0, 1]],
dtype=dtype))
context_vecs = tf.stack([
tf.constant(np.random.rand(2, context_depth), dtype=dtype)
for _ in range(7)
])
return (source_vecs, source_padding, aux_source_vecs, aux_source_paddings,
context_vecs)
def testTransformerAttentionLayerCase1(self):
with self.session(use_gpu=True):
depth = 4
p = layers_with_attention.TransformerAttentionLayer.Params()
p.name = 'transformer_atten'
p.source_dim = depth
p.is_masked = False
p.num_attention_heads = 2
transformer_atten = layers_with_attention.TransformerAttentionLayer(p)
(source_vecs, source_padding, _, _,
_) = self._testTransformerAttentionLayerInputs(depth=depth)
ctx, probs = transformer_atten.FPropDefaultTheta(source_vecs,
source_padding)
self.evaluate(tf.global_variables_initializer())
actual_ctx, actual_probs = self.evaluate([ctx, probs])
# pylint: disable=bad-whitespace
# pyformat: disable
expected_ctx = [
[[-1.47126436, 1.46579707, 0.39105844, -0.88563323],
[-1.29514003, -1.08241224, 1.49894714, 2.5935874 ]],
[[-0.00313053, 1.17399275, -1.28071034, -1.6311729 ],
[-0.77028418, -0.18855178, -0.75814998, 2.19872856]],
[[ 1.72851753, -0.40323859, -1.19053328, -1.39761829],
[-1.72141743, -0.78715289, 1.28404212, 2.78338313]],
[[-0.8881942 , 0.33776048, 1.28791749, -0.45082122],
[ 1.4362365 , 0.46009994, -1.45436597, -1.90602148]],
[[-0.51681399, -0.70075679, -0.48352116, 1.93754733],
[-1.44486678, 0.81801879, -1.03079689, 1.86697066]]]
expected_probs = [
[[ 0.21387868, 0.22080734, 0. , 0. , 0.56531399],
[ 0. , 0.30584112, 0.24723588, 0.44692296, 0. ]],
[[ 0.25358215, 0.50932312, 0. , 0. , 0.23709476],
[ 0. , 0.56834149, 0.2632803 , 0.16837817, 0. ]],
[[ 0.38519409, 0.55454361, 0. , 0. , 0.06026226],
[ 0. , 0.33708778, 0.21976741, 0.4431448 , 0. ]],
[[ 0.27139962, 0.12790371, 0. , 0. , 0.60069668],
[ 0. , 0.31849149, 0.28174096, 0.39976761, 0. ]],
[[ 0.16272782, 0.15781289, 0. , 0. , 0.67945927],
[ 0. , 0.55003977, 0.26049581, 0.18946445, 0. ]]]
# pyformat: enable
# pylint: enable=bad-whitespace
self.assertAllClose(expected_ctx, actual_ctx, rtol=1e-05, atol=1e-05)
self.assertAllClose(expected_probs, actual_probs, rtol=1e-05, atol=1e-05)
def testTransformerAttentionLayerCase1GatedResidualConnection(self):
with self.session(use_gpu=True):
depth = 4
p = layers_with_attention.TransformerAttentionLayer.Params()
p.name = 'transformer_atten'
p.source_dim = depth
p.is_masked = False
p.num_attention_heads = 2
p.add_unnormalized_input = True
p.residual_function = layers.HighwaySkipLayer.Params().Set(
carry_bias_init=100, couple_carry_transform_gates=True)
transformer_atten = layers_with_attention.TransformerAttentionLayer(p)
(source_vecs, source_padding, _, _,
_) = self._testTransformerAttentionLayerInputs(depth=depth)
ctx, probs = transformer_atten.FPropDefaultTheta(source_vecs,
source_padding)
self.evaluate(tf.global_variables_initializer())
actual_ctx, _, actual_source_vecs = self.evaluate(
[ctx, probs, source_vecs])
# Due to the high bias, the gated residual connection is saturated and
# returns the original (unnormalized) input.
self.assertAllClose(actual_source_vecs, actual_ctx, rtol=1e-4, atol=1e-4)
def testTransformerAttentionLayerCase2(self):
with self.session(use_gpu=True):
depth = 4
p = layers_with_attention.TransformerAttentionLayer.Params()
p.name = 'transformer_atten'
p.source_dim = depth
p.is_masked = True
p.num_attention_heads = 2
transformer_atten = layers_with_attention.TransformerAttentionLayer(p)
(source_vecs, source_padding, _, _,
_) = self._testTransformerAttentionLayerInputs(depth=depth)
ctx, probs = transformer_atten.FPropDefaultTheta(source_vecs,
source_padding)
self.evaluate(tf.global_variables_initializer())
actual_ctx, actual_probs = self.evaluate([ctx, probs])
tf.logging.info(np.array_repr(actual_ctx))
tf.logging.info(np.array_repr(actual_probs))
# pylint: disable=bad-whitespace
# pyformat: disable
expected_ctx = [
[[-0.14429152, 1.15510106, 1.11930299, -1.19245839],
[-0.69580591, -0.47006619, 0.82592297, 0.69593251]],
[[ 0.24164687, 0.53328454, -1.02119482, -1.49412084],
[-0.82601064, 0.024203 , -1.11880171, 1.80784416]],
[[ 1.7644347 , -0.53346401, -1.1461122 , -1.42797422],
[-0.95326459, 0.39580142, 0.39262164, 0.67513674]],
[[-0.28252155, -0.95237327, 2.08757687, -0.21231559],
[ 1.4362365 , 0.46009994, -1.45436597, -1.90602148]],
[[-0.51681399, -0.70075679, -0.48352116, 1.93754733],
[-1.44486678, 0.81801879, -1.03079689, 1.86697066]]]
expected_probs = [
[[ 1. , 0. , 0. , 0. , 0. ],
[ 0.2 , 0.2 , 0.2 , 0.2 , 0.2 ]],
[[ 0.3966811 , 0.60331887, 0. , 0. , 0. ],
[ 0. , 1. , 0. , 0. , 0. ]],
[[ 0.41050252, 0.58949745, 0. , 0. , 0. ],
[ 0. , 0.5245893 , 0.4754107 , 0. , 0. ]],
[[ 0.58882225, 0.41117775, 0. , 0. , 0. ],
[ 0. , 0.31849149, 0.28174096, 0.39976761, 0. ]],
[[ 0.16272782, 0.15781289, 0. , 0. , 0.67945927],
[ 0. , 0.55003977, 0.26049581, 0.18946445, 0. ]]]
# pyformat: enable
# pylint: enable=bad-whitespace
self.assertAllClose(expected_ctx, actual_ctx)
self.assertAllClose(expected_probs, actual_probs)
def testTransformerAttentionLayerDeterministicDropout(self):
with self.session(use_gpu=True):
depth = 4
p = layers_with_attention.TransformerAttentionLayer.Params()
p.name = 'transformer_atten'
p.source_dim = depth
p.is_masked = False
p.num_attention_heads = 2
p.residual_dropout_tpl = layers.DeterministicDropoutLayer.Params()
p.residual_dropout_prob = 0.1
transformer_atten = layers_with_attention.TransformerAttentionLayer(p)
(source_vecs, source_padding, _, _,
_) = self._testTransformerAttentionLayerInputs(depth=depth)
ctx, probs = transformer_atten.FProp(transformer_atten.theta, source_vecs,
source_padding)
self.evaluate(tf.global_variables_initializer())
actual_ctx, actual_probs = self.evaluate([ctx, probs])
# pylint: disable=bad-whitespace
# pyformat: disable
print(np.array_repr(actual_ctx))
expected_ctx = np.array([
[[-1.45762944, 1.5337404 , 0.34037334, -0.97208667],
[-1.35992002, -1.06530988, 1.53705895, 2.79370689]],
[[ 0.00657134, 1.12030125, -1.32564592, -1.73569465],
[-0.80793667, -0.10877949, -0.80295694, 2.25494242]],
[[ 1.76956046, -0.50777751, -1.19745886, -1.46751583],
[-1.79178905, -0.77374339, 1.31586027, 2.98173356]],
[[-0.85498607, -0.37413225, 1.25707364, -0.50043333],
[ 1.62276983, 0.50820369, -1.52967572, -2.02076197]],
[[-0.66754031, -0.68657839, -0.51643699, 1.96581018],
[-1.4816376 , 0.89419198, -0.57226259, 1.90177512]]
], dtype=np.float32)
print(np.array_repr(actual_probs))
expected_probs = np.array([
[[ 0.21387868, 0.22080734, 0. , 0. , 0.56531399],
[ 0. , 0.30584112, 0.24723588, 0.44692296, 0. ]],
[[ 0.25358215, 0.50932312, 0. , 0. , 0.23709476],
[ 0. , 0.56834149, 0.2632803 , 0.16837817, 0. ]],
[[ 0.38519409, 0.55454361, 0. , 0. , 0.06026226],
[ 0. , 0.33708778, 0.21976741, 0.4431448 , 0. ]],
[[ 0.27139962, 0.12790371, 0. , 0. , 0.60069668],
[ 0. , 0.31849149, 0.28174096, 0.39976761, 0. ]],
[[ 0.16272782, 0.15781289, 0. , 0. , 0.67945927],
[ 0. , 0.55003977, 0.26049581, 0.18946445, 0. ]]
], dtype=np.float32)
# pyformat: enable
# pylint: enable=bad-whitespace
self.assertAllClose(expected_ctx, actual_ctx, rtol=1e-05, atol=1e-05)
self.assertAllClose(expected_probs, actual_probs, rtol=1e-05, atol=1e-05)
def testTransformerAttentionLayerStepByStep(self):
with self.session(use_gpu=True):
depth = 4
p = layers_with_attention.TransformerAttentionLayer.Params()
p.name = 'transformer_atten'
p.source_dim = depth
p.is_masked = True
p.num_attention_heads = 2
x_atten = layers_with_attention.TransformerAttentionLayer(p)
(source_vecs, _, _, _,
_) = self._testTransformerAttentionLayerInputs(depth=depth)
source_padding = tf.zeros([5, 2])
ctx1, probs1 = x_atten.FPropDefaultTheta(source_vecs, source_padding)
ctx2 = []
probs2 = []
cached_source_vecs = tf.zeros([0, 2, 4])
cached_source_contexts = tf.zeros([0, 2, 4])
prefix_states = py_utils.NestedMap(
key=cached_source_vecs, value=cached_source_contexts)
for i in range(5):
ctx, probs, prefix_states = x_atten.ExtendStep(x_atten.theta,
source_vecs[i, :, :],
prefix_states)
probs_pad = tf.zeros([2, 5 - i - 1])
padded_probs = tf.concat([probs, probs_pad], 1)
ctx2.append(ctx)
probs2.append(padded_probs)
ctx2 = tf.stack(ctx2)
probs2 = tf.stack(probs2)
self.evaluate(tf.global_variables_initializer())
ctx1_v, probs1_v, ctx2_v, probs2_v = self.evaluate(
[ctx1, probs1, ctx2, probs2])
tf.logging.info(np.array_repr(ctx1_v))
tf.logging.info(np.array_repr(probs1_v))
tf.logging.info(np.array_repr(ctx2_v))
tf.logging.info(np.array_repr(probs2_v))
self.assertAllClose(ctx1_v, ctx2_v)
self.assertAllClose(probs1_v, probs2_v)
def testTransformerAttentionLayerGatedResidualConnectionStepByStep(self):
with self.session(use_gpu=True):
depth = 4
p = layers_with_attention.TransformerAttentionLayer.Params()
p.name = 'transformer_atten'
p.source_dim = depth
p.is_masked = True
p.num_attention_heads = 2
p.residual_function = layers.HighwaySkipLayer.Params().Set(
couple_carry_transform_gates=True)
x_atten = layers_with_attention.TransformerAttentionLayer(p)
(source_vecs, _, _, _,
_) = self._testTransformerAttentionLayerInputs(depth=depth)
source_padding = tf.zeros([5, 2])
ctx1, probs1 = x_atten.FPropDefaultTheta(source_vecs, source_padding)
ctx2 = []
probs2 = []
cached_source_vecs = tf.zeros([0, 2, 4])
cached_source_contexts = tf.zeros([0, 2, 4])
prefix_states = py_utils.NestedMap(
key=cached_source_vecs, value=cached_source_contexts)
for i in range(5):
ctx, probs, prefix_states = x_atten.ExtendStep(x_atten.theta,
source_vecs[i, :, :],
prefix_states)
probs_pad = tf.zeros([2, 5 - i - 1])
padded_probs = tf.concat([probs, probs_pad], 1)
ctx2.append(ctx)
probs2.append(padded_probs)
ctx2 = tf.stack(ctx2)
probs2 = tf.stack(probs2)
self.evaluate(tf.global_variables_initializer())
ctx1_v, probs1_v, ctx2_v, probs2_v = self.evaluate(
[ctx1, probs1, ctx2, probs2])
self.assertAllClose(ctx1_v, ctx2_v)
self.assertAllClose(probs1_v, probs2_v)
def testTransformerAttentionLayerCase3(self):
with self.session(use_gpu=True):
depth = 4
p = layers_with_attention.TransformerAttentionLayer.Params()
p.name = 'transformer_atten'
p.source_dim = depth
p.is_masked = False
p.num_attention_heads = 2
transformer_atten = layers_with_attention.TransformerAttentionLayer(p)
(query_vec, _, aux_vecs, aux_paddings,
_) = self._testTransformerAttentionLayerInputs(depth=depth)
ctx, probs = transformer_atten.FPropDefaultTheta(query_vec, aux_paddings,
aux_vecs)
self.evaluate(tf.global_variables_initializer())
actual_ctx, actual_probs = self.evaluate([ctx, probs])
tf.logging.info(np.array_repr(actual_ctx))
tf.logging.info(np.array_repr(actual_probs))
# pylint: disable=bad-whitespace
# pyformat: disable
expected_ctx = [
[[-1.42420077, 1.19024372, 1.35146523, 0.85896158],
[-0.44974625, -1.00108492, 1.63387251, 1.678146 ]],
[[ 0.1134335 , 1.97617495, -0.35918081, 0.26396495],
[-0.19688171, -0.71197301, 0.0659425 , 2.5417304 ]],
[[ 1.58169425, 0.81259179, -0.58948535, 0.20254248],
[-0.84438968, -0.65845209, 1.45584249, 1.87587976]],
[[-1.01532316, -0.05166581, 2.07901478, 0.97540361],
[ 2.08563352, 0.34328598, -0.23240227, -0.19035631]],
[[-0.53881919, -0.60117185, 0.29170275, 2.6474514 ],
[-0.88318163, 0.37149727, -0.16098523, 2.3810885 ]]]
expected_probs = [
[[ 0.32392544, 0., 0.27218491, 0., 0.19574419, 0., 0.20814547],
[ 0., 0.273045 , 0., 0.43572819, 0., 0.2912268 , 0.]],
[[ 0.24094662, 0., 0.23919827, 0., 0.26563686, 0., 0.25421822],
[ 0., 0.21680018, 0., 0.33962148, 0.,0.44357836 , 0.]],
[[ 0.20083594, 0., 0.20683075, 0., 0.28931937, 0., 0.30301392],
[ 0., 0.24710922, 0., 0.453915 , 0.,0.29897571 , 0.]],
[[ 0.32845193, 0., 0.26491433, 0., 0.18304622, 0., 0.22358747],
[ 0., 0.39426237, 0., 0.19774443, 0.,0.4079932 , 0.]],
[[ 0.23542665, 0., 0.27910906, 0., 0.30036426, 0., 0.18510005],
[ 0., 0.20147586, 0., 0.37759233, 0., 0.42093182, 0.]]]
# pyformat: enable
# pylint: enable=bad-whitespace
self.assertAllClose(expected_ctx, actual_ctx, rtol=1e-05, atol=1e-05)
self.assertAllClose(expected_probs, actual_probs, rtol=1e-05, atol=1e-05)
def _testTransformerAttentionLayerInputsMultiAuxSource(
self, aux_source_list, depth=3, context_depth=3, dtype=tf.float32):
(source_vecs, source_padding, _, _, _) = (
self._testTransformerAttentionLayerInputs(depth, context_depth, dtype))
np.random.seed(505837249)
aux_source_vecs = py_utils.NestedMap()
for aux_src_key in aux_source_list:
aux_source_vecs[aux_src_key] = tf.stack([
tf.constant(np.random.rand(2, depth), dtype=dtype) for _ in range(7)
])
aux_source_paddings = py_utils.NestedMap({
aux_src_key: tf.transpose(
tf.constant([[0, 1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 0, 1]],
dtype=dtype)) for aux_src_key in aux_source_list
})
context_vecs = py_utils.NestedMap()
for aux_src_key in aux_source_list:
context_vecs[aux_src_key] = tf.stack([
tf.constant(np.random.rand(2, context_depth), dtype=dtype)
for _ in range(7)
])
return (source_vecs, source_padding, aux_source_vecs, aux_source_paddings,
context_vecs)
def testTransformerAttentionLayerCase3MultiSource(self):
with self.session(use_gpu=True) as sess:
depth = 4
p = layers_with_attention.TransformerMultiSourceAttentionLayer.Params()
p.name = 'transformer_atten_multisource'
p.source_dim = depth
p.is_masked = False
p.num_attention_heads = 2
p.num_source = 2
transformer_atten = (
layers_with_attention.TransformerMultiSourceAttentionLayer(p))
(query_vec, _, aux_vecs, aux_paddings, _) = (
self._testTransformerAttentionLayerInputsMultiAuxSource(
['source_0', 'source_1'], depth=depth))
ctx, probs = transformer_atten.FPropDefaultTheta(query_vec, aux_paddings,
aux_vecs)
tf.global_variables_initializer().run()
actual_ctx, actual_probs = sess.run([ctx, probs])
tf.logging.info(np.array_repr(actual_ctx))
tf.logging.info(np.array_repr(actual_probs))
# pylint: disable=bad-whitespace
# pyformat: disable
expected_ctx = [
[[-1.9893163 , 0.8076348 , -0.33805895, -0.20369706],
[-1.4164762 , -1.0597495 , -0.3834126 , 0.3456189 ]],
[[-0.32503036, 1.4952568 , -1.9324137 , -0.77024114],
[-0.9230547 , -0.89096445, -1.7928462 , 1.0901089 ]],
[[ 1.2240632 , 0.26689315, -2.0940783 , -0.9101793 ],
[-1.805772 , -0.74725944, -0.5485071 , 0.5403221 ]],
[[-1.5880606 , -0.43595213, 0.3818947 , -0.15712431],
[ 0.968494 , 0.19423638, -2.308594 , -1.4253062 ]],
[[-0.8178122 , -1.1570994 , -1.1993079 , 1.4127911 ],
[-1.7231476 , 0.17116357, -2.0703826 , 0.96320933]]]
expected_probs = [
[[0.16679956, 0., 0.2122806 , 0., 0.23512313, 0., 0.38579667],
[0., 0.28562695, 0., 0.3442661 , 0., 0.370107 , 0.]],
[[0.28629708, 0., 0.18837643, 0., 0.2644571 , 0., 0.26086944],
[0., 0.5590873 , 0., 0.22519027, 0., 0.21572247, 0.]],
[[0.3374045 , 0., 0.21468817, 0., 0.25822428, 0., 0.18968314],
[0., 0.2896077 , 0., 0.34381902, 0., 0.36657327, 0.]],
[[0.14310986, 0., 0.2507791 , 0., 0.22308563, 0., 0.3830254 ],
[0., 0.43070328, 0., 0.2930708 , 0., 0.27622598, 0.]],
[[0.30523974, 0., 0.30610216, 0., 0.2248916 , 0., 0.1637665 ],
[0., 0.49082592, 0., 0.26013914, 0., 0.24903494, 0.]]]
# pyformat: enable
# pylint: enable=bad-whitespace
self.assertAllClose(expected_ctx, actual_ctx, rtol=1e-05, atol=1e-05)
self.assertAllClose(expected_probs, actual_probs, rtol=1e-05, atol=1e-05)
def testTransformerAttentionLayerCase3MultiSourceMatchSingle(self):
with self.session(use_gpu=True) as sess:
# Prepare inputs.
depth = 4
(query_vec, _, aux_vecs, aux_paddings, _) = (
self._testTransformerAttentionLayerInputsMultiAuxSource(
['source_0', 'source_1'], depth=depth))
# Create two source inputs but use single-source attention
p = layers_with_attention.TransformerMultiSourceAttentionLayer.Params()
p.random_seed = 123
p.name = 'transformer_atten_multisource_single'
p.source_dim = depth
p.is_masked = False
p.num_attention_heads = 2
p.num_source = 1
msa = layers_with_attention.TransformerMultiSourceAttentionLayer(p)
msa_ctx, msa_probs = (
msa.FPropDefaultTheta(query_vec, aux_paddings, aux_vecs))
# Original single source attention layer.
p = layers_with_attention.TransformerAttentionLayer.Params()
p.random_seed = 123
p.name = 'transformer_atten'
p.source_dim = depth
p.is_masked = False
p.num_attention_heads = 2
ssa = layers_with_attention.TransformerAttentionLayer(p)
ssa_ctx, ssa_probs = ssa.FPropDefaultTheta(query_vec,
aux_paddings['source_0'],
aux_vecs['source_0'])
# Compare two context vectors and probabilities.
tf.global_variables_initializer().run()
actual_msa_ctx, actual_msa_probs, actual_ssa_ctx, actual_ssa_probs = (
sess.run([msa_ctx, msa_probs, ssa_ctx, ssa_probs]))
# pylint: disable=bad-whitespace
# pyformat: disable
self.assertAllClose(actual_msa_ctx, actual_ssa_ctx,
rtol=1e-05, atol=1e-05)
self.assertAllClose(actual_msa_probs, actual_ssa_probs,
rtol=1e-05, atol=1e-05)
def testTransformerAttentionLayerSourceContext(self):
# Equivalent: Passing no context vecs and source vecs as context vecs.
with self.session(use_gpu=True):
depth = 4
p = layers_with_attention.TransformerAttentionLayer.Params()
p.name = 'transformer_atten'
p.source_dim = depth
p.is_masked = False
p.num_attention_heads = 2
transformer_atten = layers_with_attention.TransformerAttentionLayer(p)
(query_vec, _, aux_vecs, aux_paddings,
_) = self._testTransformerAttentionLayerInputs(
depth=depth, context_depth=depth)
ctx1, probs1 = transformer_atten.FPropDefaultTheta(
query_vec=query_vec,
source_paddings=aux_paddings,
source_vecs=aux_vecs,
context_vecs=aux_vecs)
ctx2, probs2 = transformer_atten.FPropDefaultTheta(
query_vec=query_vec,
source_paddings=aux_paddings,
source_vecs=aux_vecs)
self.evaluate(tf.global_variables_initializer())
actual_ctx1, actual_probs1, actual_ctx2, actual_probs2 = self.evaluate(
[ctx1, probs1, ctx2, probs2])
self.assertAllEqual(actual_ctx1, actual_ctx2)
self.assertAllEqual(actual_probs1, actual_probs2)
def testTransformerAttentionLayerCase4a(self):
# Distinct key and value vectors of the same size.
with self.session(use_gpu=True):
depth = 4
p = layers_with_attention.TransformerAttentionLayer.Params()
p.name = 'transformer_atten'
p.source_dim = depth
p.is_masked = False
p.num_attention_heads = 2
transformer_atten = layers_with_attention.TransformerAttentionLayer(p)
(query_vec, _, aux_vecs, aux_paddings,
context_vecs) = self._testTransformerAttentionLayerInputs(
depth=depth, context_depth=depth)
ctx, probs = transformer_atten.FPropDefaultTheta(
query_vec=query_vec,
source_paddings=aux_paddings,
source_vecs=aux_vecs,
context_vecs=context_vecs)
self.evaluate(tf.global_variables_initializer())
actual_ctx, actual_probs = self.evaluate([ctx, probs])
tf.logging.info(np.array_repr(actual_ctx))
tf.logging.info(np.array_repr(actual_probs))
# pylint: disable=bad-whitespace
# pyformat: disable
expected_ctx = [
[[-1.20854747, 1.25685954, 1.39818001, 0.558267 ],
[-0.39904317, -0.85738903, 1.45404375, 1.16389585]],
[[ 0.27544549, 1.93070388, -0.24477535, 0.12131107],
[-0.07007086, -0.53334039, -0.01144788, 2.03883505]],
[[ 1.72718525, 0.73558617, -0.45405889, 0.1063388 ],
[-0.76255953, -0.52610761, 1.30195093, 1.3571732 ]],
[[-0.79346895, 0.03049853, 2.11432981, 0.64747918],
[ 1.86823332, 0.3250314 , -0.50979781, -0.40038702]],
[[-0.30053592, -0.53348505, 0.41098642, 2.43903708],
[-0.75298154, 0.50427407, -0.23542863, 1.89634883]]]
expected_probs = [
[[ 0.32392544, 0., 0.27218491, 0., 0.19574417, 0., 0.20814548],
[ 0., 0.273045 , 0., 0.43572825, 0., 0.2912268 , 0.]],
[[ 0.24094665, 0., 0.23919825, 0., 0.26563686, 0., 0.25421822],
[ 0., 0.21680018, 0., 0.33962148, 0., 0.44357836, 0.]],
[[ 0.20083596, 0., 0.20683077, 0., 0.28931937, 0., 0.30301392],
[ 0., 0.24710923, 0., 0.45391506, 0., 0.29897574, 0.]],
[[ 0.32845187, 0., 0.26491439, 0., 0.18304622, 0., 0.22358751],
[ 0., 0.39426237, 0., 0.1977444 , 0., 0.4079932 , 0.]],
[[ 0.23542665, 0., 0.27910906, 0., 0.30036426, 0., 0.18510005],
[ 0., 0.20147583, 0., 0.37759233, 0., 0.42093182, 0.]]]
# pyformat: enable
# pylint: enable=bad-whitespace
self.assertAllClose(expected_ctx, actual_ctx, rtol=1e-05, atol=1e-05)
self.assertAllClose(expected_probs, actual_probs, rtol=1e-05, atol=1e-05)
def testTransformerAttentionLayerCase4aMultiSource(self):
# Distinct key and value vectors of the same size.
with self.session(use_gpu=True) as sess:
depth = 4
p = layers_with_attention.TransformerMultiSourceAttentionLayer.Params()
p.name = 'transformer_atten'
p.source_dim = depth
p.is_masked = False
p.num_attention_heads = 2
p.num_source = 2
transformer_atten = (
layers_with_attention.TransformerMultiSourceAttentionLayer(p))
(query_vec, _, aux_vecs, aux_paddings,
context_vecs) = self._testTransformerAttentionLayerInputsMultiAuxSource(
['source_0', 'source_1'], depth=depth, context_depth=depth)
ctx, probs = transformer_atten.FPropDefaultTheta(
query_vec=query_vec,
source_paddings=aux_paddings,
source_vecs=aux_vecs,
context_vecs=context_vecs)
tf.global_variables_initializer().run()
actual_ctx, actual_probs = sess.run([ctx, probs])
tf.logging.info(np.array_repr(actual_ctx))
tf.logging.info(np.array_repr(actual_probs))
# pylint: disable=bad-whitespace
# pyformat: disable
expected_ctx = [
[[-2.263544 , -0.6288333 , 0.56436384, 0.01389617],
[-1.2714428 , -2.6551175 , 1.2088637 , 0.48963785]],
[[-0.7530552 , 0.2863059 , -1.0583341 , -0.62887365],
[-0.96861804, -2.3108015 , -0.32213187, 1.4070555 ]],
[[ 0.6888912 , -0.83782226, -1.3349627 , -0.69250315],
[-1.646423 , -2.3046758 , 1.0617565 , 0.6768545 ]],
[[-1.8710074 , -1.9080507 , 1.2318314 , 0.14334393],
[ 0.92007947, -1.775676 , -1.1390316 , -0.9541185 ]],
[[-1.375605 , -2.3637016 , -0.5955716 , 1.8448071 ],
[-1.6682272 , -1.2519215 , -0.5330956 , 1.2296966 ]]]
expected_probs = [
[[0.22346233, 0., 0.27624047, 0., 0.18855348, 0., 0.31174374],
[0., 0.17387941, 0., 0.4642802 , 0., 0.36184043, 0.]],
[[0.23724607, 0., 0.24033949, 0., 0.3725937 , 0., 0.14982074],
[0., 0.15892553, 0., 0.4639521 , 0., 0.37712237, 0.]],
[[0.25570837, 0., 0.21216837, 0., 0.40378904, 0., 0.12833425],
[0., 0.16656096, 0., 0.47455215, 0., 0.3588869 , 0.]],
[[0.22077632, 0., 0.27379048, 0., 0.14691363, 0., 0.35851952],
[0., 0.5620029 , 0., 0.21104112, 0., 0.22695602, 0.]],
[[0.20673111, 0., 0.22832122, 0., 0.12665181, 0., 0.43829578],
[0., 0.17881572, 0., 0.45228398, 0., 0.36890027, 0.]]]
# pyformat: enable
# pylint: enable=bad-whitespace
self.assertAllClose(expected_ctx, actual_ctx, rtol=1e-05, atol=1e-05)
self.assertAllClose(expected_probs, actual_probs, rtol=1e-05, atol=1e-05)
def testTransformerAttentionLayerCase4b(self):
# Distinct key and value vectors of different sizes.
with self.session(use_gpu=True):
depth = 4
context_depth = 3
p = layers_with_attention.TransformerAttentionLayer.Params()
p.name = 'transformer_atten'
p.source_dim = depth
p.is_masked = False
print(p)
p.num_attention_heads = 2
p.atten_tpl.enable_ctx_pre_proj = True # Project values first.
p.context_dim = context_depth
transformer_atten = layers_with_attention.TransformerAttentionLayer(p)
(query_vec, _, aux_vecs, aux_paddings,
context_vecs) = self._testTransformerAttentionLayerInputs(
depth=depth, context_depth=context_depth)
ctx, probs = transformer_atten.FPropDefaultTheta(
query_vec=query_vec,
source_paddings=aux_paddings,
source_vecs=aux_vecs,
context_vecs=context_vecs)
self.evaluate(tf.global_variables_initializer())
actual_ctx, actual_probs = self.evaluate([ctx, probs])
tf.logging.info(np.array_repr(actual_ctx))
tf.logging.info(np.array_repr(actual_probs))
# pylint: disable=bad-whitespace
# pyformat: disable
expected_ctx = [
[[-1.78694427, 0.47923172, 0.89032698, 0.05556235],
[-0.91133636, -2.05677342, 1.30821121, 1.17388368]],
[[-0.24106422, 1.27436733, -0.84274787, -0.58437365],
[-0.58214164, -1.7144506 , -0.21780583, 2.03152227]],
[[ 1.22925639, 0.15926462, -1.10279834, -0.69442266],
[-1.2955091 , -1.72805309, 1.15411568, 1.39945638]],
[[-1.38178754, -0.7436831 , 1.60785818, 0.16023314],
[ 1.5662415 , -0.77094424, -0.63392496, -0.6477108 ]],
[[-0.83664525, -1.20021605, -0.15795891, 1.81301379],
[-1.27991939, -0.67706013, -0.42443359, 1.92405224]]]
# Probabilities are unaffected by change of value vectors.
expected_probs = [
[[ 0.32392544, 0., 0.27218491, 0., 0.19574417, 0., 0.20814548],
[ 0., 0.273045 , 0., 0.43572825, 0., 0.2912268 , 0.]],
[[ 0.24094665, 0., 0.23919825, 0., 0.26563686, 0., 0.25421822],
[ 0., 0.21680018, 0., 0.33962148, 0., 0.44357836, 0.]],
[[ 0.20083596, 0., 0.20683077, 0., 0.28931937, 0., 0.30301392],
[ 0., 0.24710923, 0., 0.45391506, 0., 0.29897574, 0.]],
[[ 0.32845187, 0., 0.26491439, 0., 0.18304622, 0., 0.22358751],
[ 0., 0.39426237, 0., 0.1977444 , 0., 0.4079932 , 0.]],
[[ 0.23542665, 0., 0.27910906, 0., 0.30036426, 0., 0.18510005],
[ 0., 0.20147583, 0., 0.37759233, 0., 0.42093182, 0.]]]
# pyformat: enable
# pylint: enable=bad-whitespace
self.assertAllClose(expected_ctx, actual_ctx, rtol=1e-05, atol=1e-05)
self.assertAllClose(expected_probs, actual_probs, rtol=1e-05, atol=1e-05)
def testTransformerAttentionLayerCase4bMultiSource(self):
# Distinct key and value vectors of different sizes.
with self.session(use_gpu=True) as sess:
depth = 4
context_depth = 3
p = layers_with_attention.TransformerMultiSourceAttentionLayer.Params()
p.name = 'transformer_atten'
p.source_dim = depth
p.is_masked = False
print(p)
p.num_attention_heads = 2
p.atten_tpl.enable_ctx_pre_proj = True # Project values first.
p.context_dim = context_depth
p.num_source = 2
transformer_atten = (
layers_with_attention.TransformerMultiSourceAttentionLayer(p))
(query_vec, _, aux_vecs, aux_paddings,
context_vecs) = self._testTransformerAttentionLayerInputsMultiAuxSource(
['source_0', 'source_1'], depth=depth, context_depth=context_depth)
ctx, probs = transformer_atten.FPropDefaultTheta(
query_vec=query_vec,
source_paddings=aux_paddings,
source_vecs=aux_vecs,
context_vecs=context_vecs)
tf.global_variables_initializer().run()
actual_ctx, actual_probs = sess.run([ctx, probs])
tf.logging.info(np.array_repr(actual_ctx))
tf.logging.info(np.array_repr(actual_probs))
# pylint: disable=bad-whitespace
# pyformat: disable
expected_ctx = [
[[-0.52144265, 1.7370229 , 0.09479183, 1.3142197 ],
[ 0.48182625, -0.41524518, 0.2950616 , 2.3245158 ]],
[[ 1.0139368 , 2.6589985 , -1.528513 , 0.6880791 ],
[ 0.7810391 , -0.05419022, -1.227257 , 3.2472034 ]],
[[ 2.4781933 , 1.5413835 , -1.7759092 , 0.6057711 ],
[ 0.11952043, -0.07813096, 0.12346762, 2.5386043 ]],
[[-0.12219751, 0.46310303, 0.7768879 , 1.4295386 ],
[ 2.8404353 , 0.901297 , -1.5073049 , 0.60736287]],
[[ 0.37801886, -0.05114734, -1.003877 , 3.0894797 ],
[ 0.10942292, 0.975695 , -1.4856565 , 3.1215234 ]]]
# Probabilities are unaffected by change of value vectors.
expected_probs = [
[[0.22346234, 0., 0.27624047, 0., 0.18855348, 0., 0.31174374],
[0., 0.17387941, 0., 0.4642802 , 0., 0.36184043, 0.]],
[[0.23724607, 0., 0.24033949, 0., 0.3725937 , 0., 0.14982076],
[0., 0.15892553, 0., 0.4639521 , 0., 0.3771224 , 0.]],
[[0.2557084 , 0., 0.21216837, 0., 0.403789 , 0., 0.12833424],
[0., 0.16656098, 0., 0.47455215, 0., 0.3588869 , 0.]],
[[0.22077632, 0., 0.27379048, 0., 0.14691365, 0., 0.35851952],
[0., 0.5620028 , 0., 0.21104114, 0., 0.22695604, 0.]],
[[0.20673111, 0., 0.22832122, 0., 0.12665181, 0., 0.43829578],
[0., 0.17881574, 0., 0.45228398, 0., 0.36890027, 0.]]]
# pyformat: enable
# pylint: enable=bad-whitespace
self.assertAllClose(expected_ctx, actual_ctx, rtol=1e-05, atol=1e-05)
self.assertAllClose(expected_probs, actual_probs, rtol=1e-05, atol=1e-05)
def testTransformerAttentionLayerCase5(self):
with self.session(use_gpu=True):
depth = 4
p = layers_with_attention.TransformerAttentionLayer.Params()
p.name = 'transformer_atten'
p.source_dim = depth
p.is_masked = True
p.mask_type = 'eye'
p.num_attention_heads = 2
transformer_atten = layers_with_attention.TransformerAttentionLayer(p)
(source_vecs, source_padding, _, _,
_) = self._testTransformerAttentionLayerInputs(depth=depth)
ctx, probs = transformer_atten.FPropDefaultTheta(source_vecs,
source_padding)
self.evaluate(tf.global_variables_initializer())
actual_ctx, actual_probs = self.evaluate([ctx, probs])
tf.logging.info(np.array_repr(actual_ctx))
tf.logging.info(np.array_repr(actual_probs))
# pylint: disable=bad-whitespace
# pyformat: disable
expected_ctx = [
[[-1.89149332, 1.18417633, 0.09695292, -0.83397102],
[-1.29514003, -1.08241224, 1.49894726, 2.59358764]],
[[ 0.79232693, 2.47633171, -0.90657401, -1.5221628 ],
[-0.14457735, 0.09040731, -0.12422991, 2.13300467]],
[[ 1.72851753, -0.40323859, -1.19053328, -1.39761829],
[-2.15129089, -1.16594994, 1.1004864 , 3.07194686]],
[[-0.88819426, 0.3377606 , 1.28791749, -0.45082125],
[1.97874951, 1.50414598, -1.15547466, -1.18697572]],
[[ 0.10235745, -1.51675844, 0.13308235, 1.26194644],
[-1.44486666, 0.81801897, -1.03079677, 1.86697078]]]
expected_probs = [
[[ 0. , 0.33807203, 0. , 0. , 0.661928 ],
[ 0. , 0.30584112, 0.24723586, 0.44692296, 0. ]],
[[ 0.63300228, 0. , 0. , 0. , 0.36699772],
[ 0. , 0. , 0.70683479, 0.29316518, 0. ]],
[[ 0.38519406, 0.55454367, 0. , 0. , 0.06026225],
[ 0. , 0.51602799, 0. , 0.48397198, 0. ]],
[[ 0.27139962, 0.12790368, 0. , 0. , 0.60069668],
[ 0. , 0.46712866, 0.53287131, 0. , 0. ]],
[[ 0.55518425, 0.4448157 , 0. , 0. , 0. ],
[ 0. , 0.55003977, 0.26049584, 0.18946445, 0. ]]]
# pyformat: enable
# pylint: enable=bad-whitespace
self.assertAllClose(expected_ctx, actual_ctx)
self.assertAllClose(expected_probs, actual_probs)
def testTransformerAttentionLayerCase6(self):
with self.session(use_gpu=True):
depth = 4
p = layers_with_attention.TransformerAttentionLayer.Params()
p.name = 'transformer_atten'
p.source_dim = depth
p.is_masked = True
p.mask_type = 'ngram'
p.mask_ngram_order = 3
p.num_attention_heads = 2
transformer_atten = layers_with_attention.TransformerAttentionLayer(p)
(source_vecs, source_padding, _, _,
_) = self._testTransformerAttentionLayerInputs(depth=depth)
ctx, probs = transformer_atten.FPropDefaultTheta(source_vecs,
source_padding)
self.evaluate(tf.global_variables_initializer())
actual_ctx, actual_probs = self.evaluate([ctx, probs])
tf.logging.info(np.array_repr(actual_ctx))
tf.logging.info('actual_probs=%r', np.array_repr(actual_probs))
# pylint: disable=bad-whitespace
# pyformat: disable
expected_ctx = [
[[-0.14429152, 1.155101, 1.119303, -1.1924583],
[-0.6958059, -0.47006613, 0.8259231, 0.6959326]],
[[0.24164662, 0.5332843, -1.0211949, -1.4941208],
[-0.8260106, 0.024203, -1.1188016, 1.807844]],
[[1.7644346, -0.533464, -1.1461123, -1.4279743],
[-0.95326424, 0.39580172, 0.39262217, 0.6751373]],
[[-1.3441969, -2.3305228, 1.7523124, 0.15416345],
[1.4362367, 0.46009994, -1.4543657, -1.9060212]],
[[-0.8291472, 0.21259767, -0.9077787, 1.6243731],
[-1.0709695, 0.74920934, -0.5950014, 1.5919089]]]
expected_probs = [
[[1. , 0. , 0. , 0. , 0. ],
[0.2 , 0.2 , 0.2 , 0.2 , 0.2 ]],
[[0.3966811 , 0.6033189 , 0. , 0. , 0. ],
[0. , 1. , 0. , 0. , 0. ]],
[[0.41050246, 0.5894975 , 0. , 0. , 0. ],
[0. , 0.5245893 , 0.4754107 , 0. , 0. ]],
[[0. , 1. , 0. , 0. , 0. ],
[0. , 0.31849146, 0.28174093, 0.39976764, 0. ]],
[[0. , 0. , 0. , 0. , 1. ],
[0. , 0. , 0.5881755 , 0.41182452, 0. ]]]
# pyformat: enable
# pylint: enable=bad-whitespace
self.assertAllClose(expected_ctx, actual_ctx)
self.assertAllClose(expected_probs, actual_probs)
def testTransformerLayerConstruction(self):
p = layers_with_attention.TransformerLayer.Params()
p.name = 'transformer_1'
p.source_dim = 4
p.tr_fflayer_tpl.hidden_dim = 7
p.tr_atten_tpl.num_attention_heads = 2
p.has_aux_atten = True
p.mask_self_atten = True
layer = layers_with_attention.TransformerLayer(p)
# output_dim is equal to source_dim when p.output_dim == 0
self.assertEqual(0, p.output_dim)
self.assertEqual(p.source_dim, layer.output_dim)
# output_dim corresponds to p.output_dim when it is non-zero.
p.output_dim = 6
p.name = 'transformer_2'
layer = p.Instantiate()
self.assertEqual(p.output_dim, layer.output_dim)
def testTransformerLayerFProp(self):
with self.session(use_gpu=True):
np.random.seed(6348575)
depth = 4
p = layers_with_attention.TransformerLayer.Params()
p.name = 'transformer'
p.source_dim = depth
p.has_aux_atten = True
p.mask_self_atten = True
p.tr_fflayer_tpl.hidden_dim = 7
p.tr_atten_tpl.num_attention_heads = 2
transformer = layers_with_attention.TransformerLayer(p)
(source_vecs, source_padding, aux_vecs, aux_paddings,
_) = self._testTransformerAttentionLayerInputs(depth=depth)
h, probs = transformer.FPropDefaultTheta(
source_vecs,
source_padding,
aux_vecs=aux_vecs,
aux_paddings=aux_paddings)
self.evaluate(tf.global_variables_initializer())
actual_layer_output, actual_prob_output = self.evaluate([h, probs])
tf.logging.info(np.array_repr(actual_layer_output))
tf.logging.info(np.array_repr(actual_prob_output))
# pylint: disable=bad-whitespace
# pyformat: disable
expected_layer_output = [
[[ 0.68134278, 0.74287307, 0.04602078, 1.99463582],
[ 0.20382279, -1.50973201, 1.33421206, 0.53317755]],
[[ 2.46715426, 2.84406185, -0.60359633, 0.51742059],
[ 1.06444919, -1.45264888, -0.06196141, 0.35242724]],
[[ 2.3442452 , -0.56243378, -1.1149826 , 0.50276589],
[ 1.04868603, -1.68515253, 0.3093726 , -0.19512933]],
[[-0.11517292, -1.21290886, 1.31996512, 1.14821553],
[ 3.14395714, -1.07060659, 0.27842081, -1.81273639]],
[[ 1.39219522, -0.81882864, -0.32732445, 1.36851478],
[-0.79119539, -0.28148842, 0.29963702, 1.37034667]]]
expected_prob_output = [
[[ 0.21795762, 0., 0.26612395, 0., 0.31251648, 0., 0.20340192],
[ 0., 0.2677784 , 0., 0.32895881, 0., 0.40326279, 0.]],
[[ 0.25721505, 0., 0.24116731, 0., 0.25138181, 0., 0.2502358 ],
[ 0., 0.25691482, 0., 0.31076014, 0., 0.43232504, 0.]],
[[ 0.24550268, 0., 0.25128055, 0., 0.25109866, 0., 0.25211811],
[ 0., 0.26769161, 0., 0.32481128, 0., 0.40749705, 0.]],
[[ 0.22675318, 0., 0.26633731, 0., 0.28919035, 0., 0.21771915],
[ 0., 0.35955882, 0., 0.36869824, 0., 0.271743 , 0.]],
[[ 0.21504655, 0., 0.26958644, 0., 0.30847484, 0., 0.20689213],
[ 0., 0.29516917, 0., 0.29359812, 0., 0.41123265, 0.]]]
# pyformat: enable
# pylint: enable=bad-whitespace
self.assertAllClose(expected_layer_output, actual_layer_output)
self.assertAllClose(expected_prob_output, actual_prob_output)
def testMultiAuxSourceTransformerLayerFProp(self):
with self.session(use_gpu=True):
np.random.seed(6348575)
depth = 4
p = layers_with_attention.TransformerLayer.Params()
p.name = 'transformer'
p.source_dim = depth
p.has_aux_atten = True
p.tr_aux_atten_tpl = (
layers_with_attention.TransformerMultiSourceAttentionLayer.Params()
.Set(
source_dim=p.source_dim,
num_source=2,
primary_source_index=0,
num_attention_heads=4))
p.mask_self_atten = True
p.tr_fflayer_tpl.hidden_dim = 7
p.tr_atten_tpl.num_attention_heads = 2
transformer = layers_with_attention.TransformerLayer(p)
(source_vecs, source_padding, aux_vecs, aux_paddings,
_) = self._testTransformerAttentionLayerInputsMultiAuxSource(
['source_0', 'source_1'], depth=depth)
h, probs = transformer.FPropDefaultTheta(
source_vecs,
source_padding,
aux_vecs=aux_vecs,
aux_paddings=aux_paddings)
self.evaluate(tf.global_variables_initializer())
actual_layer_output, actual_prob_output = self.evaluate([h, probs])
tf.logging.info(np.array_repr(actual_layer_output))
tf.logging.info(np.array_repr(actual_prob_output))
# pylint: disable=bad-whitespace
# pyformat: disable
expected_layer_output = [
[[-0.06297368, 0.75025094, -0.18167767, 2.27935 ],
[-0.22771487, -1.9459789 , 0.758848 , 1.2273839 ]],
[[ 1.6866916 , 2.9894042 , -1.2287276 , 0.8018402 ],
[ 0.656631 , -1.2074132 , -0.41612232, 1.4099871 ]],
[[ 1.6463919 , -0.493517 , -1.3494966 , 0.6977608 ],
[ 0.49527422, -1.5192728 , -0.1677584 , 0.781141 ]],
[[-0.86701846, -1.2044021 , 1.0710557 , 1.4103888 ],
[ 3.0039275 , -0.98788637, -0.48796502, -0.90612394]],
[[ 0.6298464 , -0.33676302, -0.22484902, 1.8341833 ],
[-1.2259507 , -0.716857 , -0.1336647 , 1.9020087 ]]]
expected_prob_output = [
[[0.23055646, 0., 0.270754 , 0., 0.20824522, 0., 0.2904443 ],
[0., 0.34072176, 0., 0.34083408, 0., 0.31844413, 0.]],
[[0.25588194, 0., 0.21465777, 0., 0.26527345, 0., 0.26418683],
[0., 0.31694067, 0., 0.35715103, 0., 0.32590824, 0.]],
[[0.24147315, 0., 0.22742277, 0., 0.2734162 , 0., 0.25768787],
[0., 0.33686832, 0., 0.34380934, 0., 0.31932235, 0.]],
[[0.22445586, 0., 0.29794338, 0., 0.20764738, 0., 0.26995337],
[0., 0.3731808 , 0., 0.29736063, 0., 0.32945853, 0.]],
[[0.2221506 , 0., 0.2830769 , 0., 0.21007922, 0., 0.2846933 ],
[0., 0.3024338 , 0., 0.36399618, 0., 0.33357003, 0.]]]
# # pyformat: enable
# # pylint: enable=bad-whitespace
self.assertAllClose(expected_layer_output, actual_layer_output)
self.assertAllClose(expected_prob_output, actual_prob_output)
def testMultiAuxSourceTransformerLayerFPropMatchSingle(self):
with self.session(use_gpu=True):
np.random.seed(6348575)
depth = 4
# Multi-source transformer layer
p = layers_with_attention.TransformerLayer.Params().Set(
name='multi_source_trans', random_seed=123)
p.tr_atten_tpl.num_attention_heads = 4
p.source_dim = depth
p.has_aux_atten = True
p.tr_aux_atten_tpl = (
layers_with_attention.TransformerMultiSourceAttentionLayer.Params()
.Set(
source_dim=p.source_dim,
num_source=1,
primary_source_index=0,
num_attention_heads=4))
p.mask_self_atten = True
p.tr_fflayer_tpl.hidden_dim = 7
msa_trans = layers_with_attention.TransformerLayer(p)
(source_vecs, source_padding, aux_vecs, aux_paddings,
_) = self._testTransformerAttentionLayerInputsMultiAuxSource(
['source_0', 'source_1'], depth=depth)
msa_h, msa_probs = msa_trans.FPropDefaultTheta(
source_vecs,
source_padding,
aux_vecs=aux_vecs,
aux_paddings=aux_paddings)
# Original single-source transformer decoder.
p = layers_with_attention.TransformerLayer.Params().Set(
name='single_source_trans', random_seed=123)
p.tr_atten_tpl.num_attention_heads = 4
p.tr_atten_tpl.random_seed = 123
p.source_dim = depth
p.has_aux_atten = True
p.mask_self_atten = True
p.tr_fflayer_tpl.hidden_dim = 7
ssa_trans = layers_with_attention.TransformerLayer(p)
ssa_h, ssa_probs = ssa_trans.FPropDefaultTheta(
source_vecs,
source_padding,
aux_vecs=aux_vecs['source_0'],
aux_paddings=aux_paddings['source_0'])
self.evaluate(tf.global_variables_initializer())
msa_layer_output, msa_prob_output, ssa_layer_output, ssa_prob_output = (
self.evaluate([msa_h, msa_probs, ssa_h, ssa_probs]))
self.assertAllClose(
msa_layer_output, ssa_layer_output, rtol=1e-05, atol=1e-05)
self.assertAllClose(
msa_prob_output, ssa_prob_output, rtol=1e-05, atol=1e-05)
def testTransformerLayerOutputLayerNormFProp(self):
"""Test post-layernorm Fprop."""
with self.session(use_gpu=True):
np.random.seed(6348575)
depth = 4
p = layers_with_attention.TransformerLayer.Params()
p.name = 'transformer'
p.source_dim = depth
p.has_aux_atten = True
p.tr_post_ln_tpl = layers.LayerNorm.Params()
p.mask_self_atten = True
p.tr_fflayer_tpl.hidden_dim = 7
p.tr_atten_tpl.num_attention_heads = 2
transformer = layers_with_attention.TransformerLayer(p)
(source_vecs, source_padding, aux_vecs, aux_paddings,
_) = self._testTransformerAttentionLayerInputs(depth=depth)
h, probs = transformer.FPropDefaultTheta(
source_vecs,
source_padding,
aux_vecs=aux_vecs,
aux_paddings=aux_paddings)
self.evaluate(tf.global_variables_initializer())
actual_layer_output, actual_prob_output = self.evaluate([h, probs])
tf.logging.info(np.array_repr(actual_layer_output))
tf.logging.info(np.array_repr(actual_prob_output))
# pylint: disable=bad-whitespace
# pyformat: disable
expected_layer_output = [
[[-0.2617511, -0.17463534, -1.1612566, 1.5976431],
[ 0.06115358, -1.5903126, 1.1505843, 0.37857458]],
[[ 0.821784, 1.0885929, -1.351966, -0.5584109],
[ 1.1864979, -1.5562507, -0.04089222, 0.41064504]],
[[ 1.5548539, -0.6477773, -1.0664893, 0.15941268],
[ 1.1784918, -1.5536082, 0.43964866, -0.06453241]],
[[-0.38961875, -1.4583365, 1.0075824, 0.84037286],
[ 1.5903242, -0.6370207, 0.07592358, -1.0292271]],
[[ 0.99643826, -1.232215, -0.73679215, 0.972569],
[-1.1702524, -0.5360445, 0.18702725, 1.5192697]]]
expected_prob_output = [
[[ 0.21795762, 0., 0.26612395, 0., 0.31251648, 0., 0.20340192],
[ 0., 0.2677784 , 0., 0.32895881, 0., 0.40326279, 0.]],
[[ 0.25721505, 0., 0.24116731, 0., 0.25138181, 0., 0.2502358 ],
[ 0., 0.25691482, 0., 0.31076014, 0., 0.43232504, 0.]],
[[ 0.24550268, 0., 0.25128055, 0., 0.25109866, 0., 0.25211811],
[ 0., 0.26769161, 0., 0.32481128, 0., 0.40749705, 0.]],
[[ 0.22675318, 0., 0.26633731, 0., 0.28919035, 0., 0.21771915],
[ 0., 0.35955882, 0., 0.36869824, 0., 0.271743 , 0.]],
[[ 0.21504655, 0., 0.26958644, 0., 0.30847484, 0., 0.20689213],
[ 0., 0.29516917, 0., 0.29359812, 0., 0.41123265, 0.]]]
# pyformat: enable
# pylint: enable=bad-whitespace
self.assertAllClose(expected_layer_output, actual_layer_output)
self.assertAllClose(expected_prob_output, actual_prob_output)
def testTransformerLayerFPropMultiPostProj(self):
with self.session(use_gpu=True):
np.random.seed(6348575)
depth = 4
p = layers_with_attention.TransformerLayer.Params()
p.name = 'transformer'
p.source_dim = depth
p.has_aux_atten = True
p.mask_self_atten = True
p.tr_fflayer_tpl.hidden_dim = 7
p.tr_atten_tpl.num_attention_heads = 2
p.num_aux_atten_post_proj = 2
transformer = layers_with_attention.TransformerLayer(p)
(source_vecs, source_padding, aux_vecs, aux_paddings,
_) = self._testTransformerAttentionLayerInputs(depth=depth)
# Duplicate atten_idx n=2 times.
atten_idx = tf.constant([0, 1, 1, 0, 1] * 2, dtype=tf.int32)
h, probs = transformer.FPropDefaultTheta(
source_vecs,
source_padding,
aux_vecs=aux_vecs,
aux_paddings=aux_paddings,
atten_idx=atten_idx)
self.evaluate(tf.global_variables_initializer())
actual_layer_output, actual_prob_output = self.evaluate([h, probs])
tf.logging.info(np.array_repr(actual_layer_output))
tf.logging.info(np.array_repr(actual_prob_output))
# pylint: disable=bad-whitespace
# pyformat: disable
expected_layer_output = [
[[-0.77411413, 0.86493313, 0.08914688, 1.4910977 ],
[-1.0093606 , -1.7337079 , 1.2784883 , 0.49974248]],
[[ 1.0396315 , 2.902943 , -1.1812847 , 0.19860795],
[-0.37676954, -0.79837584, 0.6419263 , 0.45496815]],
[[ 1.0858665 , -0.6838142 , -1.2464247 , 0.14764154],
[-0.45331526, -1.0229169 , 1.0660815 , -0.06151289]],
[[-1.3433903 , -1.3154784 , 1.1818855 , 0.790216 ],
[ 1.8400799 , -1.5192697 , 0.05896807, -1.94113 ]],
[[-0.11429042, -0.24730963, 0.06099784, 1.0156208 ],
[-1.9910344 , -0.5176018 , 0.2490384 , 1.3254449 ]]]
expected_prob_output = [
[[ 0.21795762, 0., 0.26612395, 0., 0.31251648, 0., 0.20340192],
[ 0., 0.2677784 , 0., 0.32895881, 0., 0.40326279, 0.]],
[[ 0.25721505, 0., 0.24116731, 0., 0.25138181, 0., 0.2502358 ],
[ 0., 0.25691482, 0., 0.31076014, 0., 0.43232504, 0.]],
[[ 0.24550268, 0., 0.25128055, 0., 0.25109866, 0., 0.25211811],
[ 0., 0.26769161, 0., 0.32481128, 0., 0.40749705, 0.]],
[[ 0.22675318, 0., 0.26633731, 0., 0.28919035, 0., 0.21771915],
[ 0., 0.35955882, 0., 0.36869824, 0., 0.271743 , 0.]],
[[ 0.21504655, 0., 0.26958644, 0., 0.30847484, 0., 0.20689213],
[ 0., 0.29516917, 0., 0.29359812, 0., 0.41123265, 0.]]]
# pyformat: enable
# pylint: enable=bad-whitespace
self.assertAllClose(expected_layer_output, actual_layer_output)
self.assertAllClose(expected_prob_output, actual_prob_output)
def testTransformerLayerWithInputPackingFProp(self):
with self.session(use_gpu=True):
with tf.variable_scope('transformer_packed_test', reuse=tf.AUTO_REUSE):
np.random.seed(6348575)
depth = 4
p = layers_with_attention.TransformerLayer.Params()
p.name = 'transformer'
p.source_dim = depth
p.has_aux_atten = True
p.mask_self_atten = True
p.tr_fflayer_tpl.hidden_dim = 7
p.tr_atten_tpl.num_attention_heads = 2
packed_params = p.Copy()
transformer = layers_with_attention.TransformerLayer(p)
packed_params.packed_input = True
transformer_packed = layers_with_attention.TransformerLayer(
packed_params)
dtype = tf.float32
source_vecs = tf.stack([
tf.constant(np.random.rand(2, depth), dtype=dtype) for _ in range(5)
])
source_padding = tf.transpose(
tf.constant([[0, 0, 0, 0, 1], [0, 0, 0, 0, 0]], dtype=dtype))
aux_vecs = tf.stack([
tf.constant(np.random.rand(2, depth), dtype=dtype) for _ in range(7)
])
aux_paddings = tf.transpose(
tf.constant([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1]],
dtype=dtype))
source_vecs_packed = tf.reshape(source_vecs, [-1, 1, depth])
aux_vecs_packed = tf.reshape(aux_vecs, [-1, 1, depth])
source_padding_packed = tf.reshape(source_padding, [-1, 1])
aux_padding_packed = tf.reshape(aux_paddings, [-1, 1])
source_segment_id = tf.transpose(
tf.constant([[0, 1, 0, 1, 0, 1, 0, 1, 0, 1]], dtype=tf.float32))
aux_segment_id = tf.transpose(
tf.constant([[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]],
dtype=tf.float32))
h, _ = transformer.FPropDefaultTheta(
source_vecs,
source_padding,
aux_vecs=aux_vecs,
aux_paddings=aux_paddings,
source_segment_id=None,
aux_segment_id=None)
h_packed, _ = transformer_packed.FPropDefaultTheta(
source_vecs_packed,
source_padding_packed,
aux_vecs=aux_vecs_packed,
aux_paddings=aux_padding_packed,
source_segment_id=source_segment_id,
aux_segment_id=aux_segment_id)
h_packed = tf.reshape(h_packed, tf.shape(h))
self.evaluate(tf.global_variables_initializer())
actual_layer, p_layer = self.evaluate([h, h_packed])
self.assertAllClose(actual_layer, p_layer)
def testTransformerLayerExtendStep(self):
with self.session(use_gpu=True):
np.random.seed(6348575)
depth = 4
p = layers_with_attention.TransformerLayer.Params()
p.name = 'transformer'
p.source_dim = depth
p.has_aux_atten = True
p.mask_self_atten = True
p.tr_atten_tpl.num_attention_heads = 2
transformer = layers_with_attention.TransformerLayer(p)
(source_vecs, _, aux_vecs, aux_paddings,
_) = self._testTransformerAttentionLayerInputs(depth=depth)
source_padding = tf.zeros([5, 2])
h1, probs1 = transformer.FPropDefaultTheta(
source_vecs,
source_padding,
aux_vecs=aux_vecs,
aux_paddings=aux_paddings)
h2 = []
probs2 = []
cached_source_vecs = tf.zeros([0, 2, 4])
cached_source_contexts = tf.zeros([0, 2, 4])
prefix_states = py_utils.NestedMap(
key=cached_source_vecs, value=cached_source_contexts)
for i in range(5):
h, probs, prefix_states = transformer.ExtendStep(
transformer.theta, source_vecs[i, :, :], prefix_states, aux_vecs,
aux_paddings)
h2.append(h)
probs2.append(probs)
h2 = tf.stack(h2)
probs2 = tf.concat(probs2, 0)
self.evaluate(tf.global_variables_initializer())
h1_v, probs1_v, h2_v, probs2_v = self.evaluate([h1, probs1, h2, probs2])
self.assertAllClose(h1_v, h2_v)
self.assertAllClose(probs1_v, probs2_v)
def testMultiAuxSourceTransformerLayerExtendStep(self):
with self.session(use_gpu=True):
np.random.seed(6348575)
depth = 4
p = layers_with_attention.TransformerLayer.Params()
p.name = 'transformer'
p.source_dim = depth
p.has_aux_atten = True
p.tr_aux_atten_tpl = (
layers_with_attention.TransformerMultiSourceAttentionLayer.Params()
.Set(
source_dim=p.source_dim,
num_source=2,
primary_source_index=0,
num_attention_heads=4))
p.mask_self_atten = True
p.tr_atten_tpl.num_attention_heads = 2
transformer = layers_with_attention.TransformerLayer(p)
(source_vecs, _, aux_vecs, aux_paddings,
_) = self._testTransformerAttentionLayerInputsMultiAuxSource(
['source_0', 'source_1'], depth=depth)
source_padding = tf.zeros([5, 2])
h1, probs1 = transformer.FPropDefaultTheta(
source_vecs,
source_padding,
aux_vecs=aux_vecs,
aux_paddings=aux_paddings)
h2 = []
probs2 = []
cached_source_vecs = tf.zeros([0, 2, 4])
cached_source_contexts = tf.zeros([0, 2, 4])
prefix_states = py_utils.NestedMap(
key=cached_source_vecs, value=cached_source_contexts)
for i in range(5):
h, probs, prefix_states = transformer.ExtendStep(
transformer.theta, source_vecs[i, :, :], prefix_states, aux_vecs,
aux_paddings)
h2.append(h)
probs2.append(probs)
h2 = tf.stack(h2)
probs2 = tf.concat(probs2, 0)
self.evaluate(tf.global_variables_initializer())
h1_v, probs1_v, h2_v, probs2_v = self.evaluate([h1, probs1, h2, probs2])
self.assertAllClose(h1_v, h2_v)
self.assertAllClose(probs1_v, probs2_v)
def testMultiAuxSourceTransformerLayerExtendStepMatchSingle(self):
with self.session(use_gpu=True):
# Prepare inputs
np.random.seed(6348575)
depth = 4
(source_vecs, _, aux_vecs, aux_paddings,
_) = self._testTransformerAttentionLayerInputsMultiAuxSource(
['source_0', 'source_1'], depth=depth)
# Multi-source transformer layer
p = layers_with_attention.TransformerLayer.Params().Set(
name='multi_source_trans', random_seed=123)
p.tr_atten_tpl.num_attention_heads = 4
p.source_dim = depth
p.has_aux_atten = True
p.tr_aux_atten_tpl = (
layers_with_attention.TransformerMultiSourceAttentionLayer.Params()
.Set(
source_dim=p.source_dim,
num_source=1,
primary_source_index=0,
num_attention_heads=4))
p.mask_self_atten = True
p.tr_fflayer_tpl.hidden_dim = 7
msa_trans = layers_with_attention.TransformerLayer(p)
h_msa = []
probs_msa = []
cached_source_vecs = tf.zeros([0, 2, 4])
cached_source_contexts = tf.zeros([0, 2, 4])
prefix_states = py_utils.NestedMap(
key=cached_source_vecs, value=cached_source_contexts)
for i in range(5):
h, probs, prefix_states = msa_trans.ExtendStep(msa_trans.theta,
source_vecs[i, :, :],
prefix_states, aux_vecs,
aux_paddings)
h_msa.append(h)
probs_msa.append(probs)
h_msa = tf.stack(h_msa)
probs_msa = tf.concat(probs_msa, 0)
# Original single-source transformer decoder.
p = layers_with_attention.TransformerLayer.Params().Set(
name='single_source_trans', random_seed=123)
p.tr_atten_tpl.num_attention_heads = 4
p.source_dim = depth
p.has_aux_atten = True
p.mask_self_atten = True
p.tr_fflayer_tpl.hidden_dim = 7
ssa_trans = layers_with_attention.TransformerLayer(p)
h_ssa = []
probs_ssa = []
cached_source_vecs = tf.zeros([0, 2, 4])
cached_source_contexts = tf.zeros([0, 2, 4])
prefix_states = py_utils.NestedMap(
key=cached_source_vecs, value=cached_source_contexts)
for i in range(5):
h, probs, prefix_states = ssa_trans.ExtendStep(ssa_trans.theta,
source_vecs[i, :, :],
prefix_states,
aux_vecs['source_0'],
aux_paddings['source_0'])
h_ssa.append(h)
probs_ssa.append(probs)
h_ssa = tf.stack(h_ssa)
probs_ssa = tf.concat(probs_ssa, 0)
self.evaluate(tf.global_variables_initializer())
h_msa_v, h_ssa_v, probs_msa_v, probs_ssa_v = self.evaluate(
[h_msa, h_ssa, probs_msa, probs_ssa])
tf.logging.info(np.array_repr(h_msa_v))
tf.logging.info(np.array_repr(h_ssa_v))
self.assertAllClose(h_msa_v, h_ssa_v)
self.assertAllClose(probs_msa_v, probs_ssa_v)
def testTransformerLayerWithNgramMaskExtendStep(self):
with self.session(use_gpu=True):
np.random.seed(6348575)
depth = 4
p = layers_with_attention.TransformerLayer.Params()
p.name = 'transformer'
p.source_dim = depth
p.has_aux_atten = True
p.mask_self_atten = True
p.tr_atten_tpl.num_attention_heads = 2
# Turn on N-gram masking in the TransformerLayer.
# Before doing so though copy the self-attention params to avoid
# the auxilliary attention being masked as well.
p.tr_aux_atten_tpl = p.tr_atten_tpl.Copy()
p.tr_atten_tpl.is_masked = True
p.tr_atten_tpl.mask_ngram_order = 3
p.tr_atten_tpl.mask_type = 'ngram'
transformer = layers_with_attention.TransformerLayer(p)
(source_vecs, _, aux_vecs, aux_paddings,
_) = self._testTransformerAttentionLayerInputs(depth=depth)
source_padding = tf.zeros([5, 2])
h1, probs1 = transformer.FPropDefaultTheta(
source_vecs,
source_padding,
aux_vecs=aux_vecs,
aux_paddings=aux_paddings)
h2 = []
probs2 = []
cached_source_vecs = tf.zeros([0, 2, 4])
cached_source_contexts = tf.zeros([0, 2, 4])
prefix_states = py_utils.NestedMap(
key=cached_source_vecs, value=cached_source_contexts)
for i in range(5):
h, probs, prefix_states = transformer.ExtendStep(
transformer.theta, source_vecs[i, :, :], prefix_states, aux_vecs,
aux_paddings)
h2.append(h)
probs2.append(probs)
h2 = tf.stack(h2)
probs2 = tf.concat(probs2, 0)
self.evaluate(tf.global_variables_initializer())
h1_v, probs1_v, h2_v, probs2_v = self.evaluate([h1, probs1, h2, probs2])
self.assertAllClose(h1_v, h2_v)
self.assertAllClose(probs1_v, probs2_v)
def testTransformerLayerWithPostLayernormExtendStep(self):
with self.session(use_gpu=True):
np.random.seed(6348575)
depth = 4
p = layers_with_attention.TransformerLayer.Params()
p.name = 'transformer'
p.source_dim = depth
p.has_aux_atten = True
p.mask_self_atten = True
p.tr_atten_tpl.num_attention_heads = 2
p.tr_post_ln_tpl = layers.LayerNorm.Params()
transformer = layers_with_attention.TransformerLayer(p)
(source_vecs, _, aux_vecs, aux_paddings,
_) = self._testTransformerAttentionLayerInputs(depth=depth)
source_padding = tf.zeros([5, 2])
h1, probs1 = transformer.FPropDefaultTheta(
source_vecs,
source_padding,
aux_vecs=aux_vecs,
aux_paddings=aux_paddings)
h2 = []
probs2 = []
cached_source_vecs = tf.zeros([0, 2, 4])
cached_source_contexts = tf.zeros([0, 2, 4])
prefix_states = py_utils.NestedMap(
key=cached_source_vecs, value=cached_source_contexts)
for i in range(5):
h, probs, prefix_states = transformer.ExtendStep(
transformer.theta, source_vecs[i, :, :], prefix_states, aux_vecs,
aux_paddings)
h2.append(h)
probs2.append(probs)
h2 = tf.stack(h2)
probs2 = tf.concat(probs2, 0)
self.evaluate(tf.global_variables_initializer())
h1_v, probs1_v, h2_v, probs2_v = self.evaluate([h1, probs1, h2, probs2])
self.assertAllClose(h1_v, h2_v)
self.assertAllClose(probs1_v, probs2_v)
def testEvolvedTransformerEncoderBranchedConvsLayer(self):
layer = layers_with_attention.EvolvedTransformerEncoderBranchedConvsLayer
with self.session(use_gpu=True):
tf.random.set_seed(3980847392)
inputs = tf.random.normal([5, 2, 3], seed=948387483)
paddings = tf.zeros([5, 2])
p = layer.Params()
p.name = 'et_encoder_branched_convs'
p.input_dim = 3
et_branched_convs = layer(p)
h = et_branched_convs.FPropDefaultTheta(inputs, paddings)
self.evaluate(tf.global_variables_initializer())
actual_layer_output = self.evaluate(h)
# pylint: disable=bad-whitespace
# pyformat: disable
expected_output = [
[[-0.13232423, -0.46060669, 0.72598207],
[ 0.6725747 , 1.58664441, 2.64087844]],
[[-0.21702465, -0.68267912, 1.20886588],
[ 1.69793618, 0.53306532, 1.02958691]],
[[-0.46037287, -0.42950529, -1.68443251],
[ 0.21459752, 0.42246291, -0.01271994]],
[[-0.23293658, 0.15300342, -0.83518255],
[-0.48914853, -0.44239512, -0.2328119 ]],
[[-0.57934833, 0.24165238, -1.05392623],
[-0.8292231 , 0.06175411, 1.28672981]]]
# pyformat: enable
# pylint: enable=bad-whitespace
print(np.array_repr(actual_layer_output))
self.assertAllClose(actual_layer_output, expected_output)
def testEvolvedTransformerDecoderBranchedConvsLayer(self):
layer = layers_with_attention.EvolvedTransformerDecoderBranchedConvsLayer
with self.session(use_gpu=True):
tf.random.set_seed(3980847392)
inputs = tf.random.normal([5, 2, 3], seed=948387483)
paddings = tf.zeros([5, 2])
p = layer.Params()
p.name = 'et_decoder_branched_convs'
p.input_dim = 3
et_branched_convs = layer(p)
h = et_branched_convs.FPropDefaultTheta(inputs, paddings)
self.evaluate(tf.global_variables_initializer())
actual_layer_output = self.evaluate(h)
# pylint: disable=bad-whitespace
# pyformat: disable
expected_output = [
[[-0.31987068, -0.65715098, 0.90350437],
[ 0.00773269, 1.07779562, 4.11094666]],
[[-0.84862059, -0.93186408, 1.16371167],
[ 1.31467259, 0.03560367, 2.36822462]],
[[ 0.02183507, -0.0799394 , -1.68870354],
[ 0.77921551, 1.30145741, -0.86353606]],
[[ 0.31672907, 0.50000876, -0.93973017],
[-0.54707348, 0.19211179, -1.45307386]],
[[-0.46405494, 0.65833056, -1.09345317],
[-1.17221224, -0.08027397, 0.84021652]]]
# pyformat: enable
# pylint: enable=bad-whitespace
print(np.array_repr(actual_layer_output))
self.assertAllClose(actual_layer_output, expected_output)
def testEvolvedTransformerEncoderLayerConstruction(self):
p = layers_with_attention.EvolvedTransformerEncoderLayer.Params()
p.name = 'evolved_transformer_encoder'
p.source_dim = 4
p.transformer_tpl.tr_fflayer_tpl.hidden_dim = 7
p.transformer_tpl.tr_atten_tpl.num_attention_heads = 2
_ = layers_with_attention.EvolvedTransformerEncoderLayer(p)
def testEvolvedTransformerEncoderLayerFProp(self):
with self.session(use_gpu=True):
np.random.seed(6348575)
depth = 4
p = layers_with_attention.EvolvedTransformerEncoderLayer.Params()
p.name = 'evolved_transformer_encoder'
p.source_dim = depth
p.transformer_tpl.tr_atten_tpl.num_attention_heads = 2
transformer = layers_with_attention.EvolvedTransformerEncoderLayer(p)
(source_vecs, source_padding, aux_vecs, aux_paddings,
_) = self._testTransformerAttentionLayerInputs(depth=depth)
h, probs = transformer.FPropDefaultTheta(
source_vecs,
source_padding,
aux_vecs=aux_vecs,
aux_paddings=aux_paddings)
self.evaluate(tf.global_variables_initializer())
actual_layer_output, actual_prob_output = self.evaluate([h, probs])
tf.logging.info(np.array_repr(actual_layer_output))
tf.logging.info(np.array_repr(actual_prob_output))
# pylint: disable=bad-whitespace
# pyformat: disable
expected_layer_output = [
[[-1.6823182 , -0.33362526, 2.3092952 , -1.2768047 ],
[-1.2375467 , -1.7528018 , 0.6906311 , 1.4148781 ]],
[[-0.3703399 , -0.8586656 , 2.4906673 , -2.2977662 ],
[-0.60055196, -0.23450398, -1.2372489 , 1.1125396 ]],
[[ 2.0659933 , 0.82173675, -0.17450655, -1.7258614 ],
[-0.9853776 , -0.37829524, -0.77619284, 1.516935 ]],
[[-0.5684509 , -0.15367106, 2.3549438 , -0.7618298 ],
[ 1.9434962 , -1.6360642 , -2.0586298 , 0.6888489 ]],
[[-1.4064629 , 0.5313531 , 1.5535516 , -1.0066429 ],
[-1.5438917 , -0.40709162, -0.8882869 , 2.037459 ]]]
expected_prob_output = [
[[0.3098957 , 0.21260454, 0. , 0. , 0.47749978],
[0. , 0.24464089, 0.24325356, 0.5121056 , 0. ]],
[[0.27023065, 0.43278426, 0. , 0. , 0.29698506],
[0. , 0.35950065, 0.2941079 , 0.3463914 , 0. ]],
[[0.350026 , 0.38011283, 0. , 0. , 0.26986116],
[0. , 0.32311335, 0.25958124, 0.41730544, 0. ]],
[[0.31028467, 0.31974676, 0. , 0. , 0.36996856],
[0. , 0.34648925, 0.38719398, 0.2663167 , 0. ]],
[[0.28063056, 0.15659373, 0. , 0. , 0.5627757 ],
[0. , 0.28404602, 0.23116755, 0.4847864 , 0. ]]]
# pyformat: enable
# pylint: enable=bad-whitespace
self.assertAllClose(expected_layer_output, actual_layer_output)
self.assertAllClose(expected_prob_output, actual_prob_output)
def testEvolvedTransformerDecoderLayerConstruction(self):
p = layers_with_attention.EvolvedTransformerDecoderLayer.Params()
p.name = 'evolved_transformer_decoder'
p.source_dim = 16
p.transformer_tpl.tr_atten_tpl.num_attention_heads = 2
p.has_aux_atten = True
p.mask_self_atten = True
_ = layers_with_attention.EvolvedTransformerDecoderLayer(p)
def testEvolvedTransformerDecoderLayerFProp(self):
with self.session(use_gpu=True):
np.random.seed(6348575)
depth = 4
p = layers_with_attention.EvolvedTransformerDecoderLayer.Params()
p.name = 'evolved_transformer_decoder'
p.source_dim = depth
p.has_aux_atten = True
p.mask_self_atten = True
p.tr_double_heads_atten_tpl.num_attention_heads = 2
p.tr_atten_tpl.num_attention_heads = 2
p.transformer_tpl.tr_atten_tpl.num_attention_heads = 2
transformer = layers_with_attention.EvolvedTransformerDecoderLayer(p)
(source_vecs, source_padding, aux_vecs, aux_paddings,
_) = self._testTransformerAttentionLayerInputs(depth=depth)
h, probs = transformer.FPropDefaultTheta(
source_vecs,
source_padding,
aux_vecs=aux_vecs,
aux_paddings=aux_paddings)
self.evaluate(tf.global_variables_initializer())
actual_layer_output, actual_prob_output = self.evaluate([h, probs])
tf.logging.info(np.array_repr(actual_layer_output))
tf.logging.info(np.array_repr(actual_prob_output))
# pylint: disable=bad-whitespace
# pyformat: disable
expected_layer_output =[
[[-2.15844011, 0.54941475, 1.01636434, 0.13751738],
[-1.31499887, -0.9501676, 0.874282, 0.58270419]],
[[-0.49268177, 2.71167898, -0.78087997, 0.43936318],
[-1.11428595, -1.38933206, 0.34404463, 0.43363893]],
[[ 0.57303172, 0.42080224, -0.50416583, -1.36097562],
[-1.26460135, -1.21081781, 0.9377467, 0.03642488]],
[[-1.52767372, -0.93615997, 1.33185053, 0.24640131],
[ 0.16062447, 2.39912128, 0.1896024, -0.70986807]],
[[-1.27725732, -1.51283062, 0.26704332, 0.65503371],
[-1.64287043, -0.30310085, -0.36987182, 1.57325172]]]
expected_prob_output = [
[[0.28604817, 0., 0.24327257, 0., 0.26117378, 0., 0.20950545],
[0., 0.26639479, 0., 0.38120365, 0., 0.35240155, 0.]],
[[0.24309734, 0., 0.24040565, 0., 0.22922358, 0., 0.2872735],
[0., 0.27082229, 0., 0.36431897, 0., 0.36485875, 0.]],
[[0.25640261, 0., 0.25117433, 0., 0.25067171, 0., 0.24175137],
[0., 0.27037328, 0., 0.38163245, 0., 0.34799421, 0.]],
[[0.27474535, 0., 0.25523224, 0., 0.27800021, 0., 0.19202216],
[0., 0.34553668, 0., 0.35240823, 0., 0.30205506, 0.]],
[[0.24020916, 0., 0.25431803, 0., 0.26219654, 0., 0.24327625],
[0., 0.30723149, 0., 0.32563132, 0., 0.36713719, 0.]]]
# pyformat: enable
# pylint: enable=bad-whitespace
self.assertAllClose(expected_layer_output, actual_layer_output)
self.assertAllClose(expected_prob_output, actual_prob_output)
def testEvolvedTransformerDecoderLayerExtendStep(self):
with self.session(use_gpu=True):
np.random.seed(6348575)
depth = 4
p = layers_with_attention.EvolvedTransformerDecoderLayer.Params()
p.name = 'evolved_transformer_decoder'
p.source_dim = depth
p.has_aux_atten = True
p.mask_self_atten = True
p.tr_double_heads_atten_tpl.num_attention_heads = 2
p.tr_atten_tpl.num_attention_heads = 2
p.transformer_tpl.tr_atten_tpl.num_attention_heads = 2
et_decoder = layers_with_attention.EvolvedTransformerDecoderLayer(p)
(source_vecs, _, aux_vecs, aux_paddings,
_) = self._testTransformerAttentionLayerInputs(depth=depth)
source_padding = tf.zeros([5, 2])
h1, probs1 = et_decoder.FPropDefaultTheta(
source_vecs,
source_padding,
aux_vecs=aux_vecs,
aux_paddings=aux_paddings)
h2 = []
probs2 = []
double_head_attention_states = py_utils.NestedMap(
key=tf.zeros([0, 2, 4]), value=tf.zeros([0, 2, 4]))
transformer_layer_states = py_utils.NestedMap(
key=tf.zeros([0, 2, 4]), value=tf.zeros([0, 2, 4]))
branched_convs_input = tf.zeros([0, 2, 4])
prefix_states = py_utils.NestedMap(
double_head_attention_states=double_head_attention_states,
transformer_layer_states=transformer_layer_states,
branched_convs_input=branched_convs_input)
for i in range(5):
h, probs, prefix_states = et_decoder.ExtendStep(et_decoder.theta,
source_vecs[i, :, :],
prefix_states, aux_vecs,
aux_paddings)
h2.append(h)
probs2.append(probs)
h2 = tf.stack(h2)
probs2 = tf.concat(probs2, 0)
self.evaluate(tf.global_variables_initializer())
h1_v, probs1_v, h2_v, probs2_v = self.evaluate([h1, probs1, h2, probs2])
self.assertAllClose(h1_v, h2_v)
self.assertAllClose(probs1_v, probs2_v)
def testStyleLayer(self):
with self.session(use_gpu=False):
p = layers_with_attention.StyleLayer.Params().Set(
name='style_layer',
input_dim=10,
output_dim=8,
num_styles=16,
random_seed=28384)
tf.random.set_seed(8372749040)
np.random.seed(12345)
sl = p.Instantiate()
features = tf.random.normal([2, 10], seed=28384)
latent, atten_probs = sl.FPropDefaultTheta(features)
self.evaluate(tf.global_variables_initializer())
latent_v, atten_probs_v = self.evaluate([latent, atten_probs])
CompareToGoldenSingleFloat(self, -1.208686, np.sum(latent_v))
CompareToGoldenSingleFloat(self, 2.0, np.sum(atten_probs_v))
def testStyleLayerWithFeedinAttenProbs(self):
with self.session(use_gpu=False):
p = layers_with_attention.StyleLayer.Params().Set(
name='style_layer',
input_dim=10,
output_dim=8,
num_styles=16,
num_heads=4,
enable_ctx_post_proj=False,
random_seed=28384)
tf.random.set_seed(8372749040)
np.random.seed(12345)
sl = p.Instantiate()
atten_probs = tf.constant([[1.0] + [0.0] * 15] * 2, dtype=tf.float32)
ids = tf.constant([0, 0], dtype=tf.int32)
latent_from_probs = sl.StyleEmbFromProbs(sl.theta, atten_probs)
latent_from_lookup = sl.EmbLookup(sl.theta, ids)
self.evaluate(tf.global_variables_initializer())
latent_p, latent_l = self.evaluate(
[latent_from_probs, latent_from_lookup])
self.assertAllClose(latent_p, latent_l)
def testStyleLayer02(self):
with self.session(use_gpu=False):
p = layers_with_attention.StyleLayer.Params().Set(
name='style_layer',
input_dim=10,
output_dim=8,
num_styles=16,
random_seed=72738)
tf.random.set_seed(8372749040)
np.random.seed(12345)
sl = p.Instantiate()
features = tf.random.normal([2, 10])
features = tf.concat([features, features], 0)
latent, _ = sl.FPropDefaultTheta(features)
self.evaluate(tf.global_variables_initializer())
latent_v = self.evaluate(latent)
# Makes sure identical input results in identical style output.
self.assertAllClose(latent_v[:2], latent_v[2:])
def _testTransformerMultitaskLayerInputs(self, depth=3, dtype=tf.float32):
np.random.seed(505837249)
source_vecs = tf.stack(
[tf.constant(np.random.rand(2, depth), dtype=dtype) for _ in range(5)])
source_padding = tf.transpose(
tf.constant([[0, 0, 1, 1, 0], [1, 0, 0, 0, 1]], dtype=dtype))
aux_source_vecs = tf.stack(
[tf.constant(np.random.rand(2, depth), dtype=dtype) for _ in range(7)])
aux_source_paddings = tf.transpose(
tf.constant([[0, 1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 0, 1]],
dtype=dtype))
source_task_id = tf.constant([[2, 3]], dtype=tf.int32)
return (source_vecs, source_padding, aux_source_vecs, aux_source_paddings,
source_task_id)
def testTransformerLayerWithMultitaskAdaptersConstruction(self):
p = layers_with_attention.TransformerLayerWithMultitaskAdapters.Params()
p.name = 'transformer_with_adapters'
p.source_dim = 4
p.tr_fflayer_tpl.hidden_dim = 7
p.tr_atten_tpl.num_attention_heads = 2
p.has_aux_atten = True
p.mask_self_atten = True
p.adapter_tpl.input_dim = 4
p.adapter_tpl.num_tasks = 4
p.adapter_tpl.bottleneck_dim = 2
_ = layers_with_attention.TransformerLayerWithMultitaskAdapters(p)
def testTransformerLayerWithMultitaskAdaptersFProp(self):
with self.session(use_gpu=True):
np.random.seed(6348575)
depth = 4
p = layers_with_attention.TransformerLayerWithMultitaskAdapters.Params()
p.name = 'transformer'
p.source_dim = depth
p.has_aux_atten = True
p.mask_self_atten = True
p.tr_fflayer_tpl.hidden_dim = 7
p.tr_atten_tpl.num_attention_heads = 2
p.adapter_tpl.input_dim = 4
p.adapter_tpl.num_tasks = 4
p.adapter_tpl.bottleneck_dim = 2
transformer = layers_with_attention.TransformerLayerWithMultitaskAdapters(
p)
(source_vecs, source_padding, aux_vecs, aux_paddings,
source_task_id) = self._testTransformerMultitaskLayerInputs(depth=depth)
h, probs = transformer.FPropDefaultTheta(
source_vecs,
source_padding,
aux_vecs=aux_vecs,
aux_paddings=aux_paddings,
source_task_id=source_task_id)
self.evaluate(tf.global_variables_initializer())
actual_layer_output, actual_prob_output = self.evaluate([h, probs])
tf.logging.info(np.array_repr(actual_layer_output))
tf.logging.info(np.array_repr(actual_prob_output))
# pylint: disable=bad-whitespace
# pyformat: disable
expected_layer_output = [
[[ 0.02441728, 0.26923186, 0.68582684, 1.1531992 ],
[ 0.69027936, -1.94770098, 2.00558615, 0.17057157]],
[[ 1.81022859, 2.37042093, 0.03620988, -0.32401592],
[ 1.66707945, -1.95131969, 0.64937419, 0.05853128]],
[[ 1.53475547, -0.60239077, -0.05797344, -0.48760295],
[ 1.53514266, -2.1231215 , 0.98074663, -0.5577352 ]],
[[-1.32504404, -1.28702664, 2.597996 , 0.24809647],
[ 3.7842629 , -1.46549737, 0.91363102, -2.37071466]],
[[ 0.52196532, -0.73371518, 0.86030912, 0.33838278],
[ 0.01923725, -0.8887378 , 1.08245265, 1.19935369]]
]
expected_prob_output = [
[[ 0.21795765, 0, 0.26612395, 0, 0.31251645, 0, 0.20340192],
[ 0, 0.2677784 , 0, 0.32895881, 0, 0.40326279, 0]],
[[ 0.25721508, 0, 0.24116732, 0, 0.25138181, 0, 0.2502358 ],
[ 0, 0.25691482, 0, 0.31076014, 0, 0.43232504, 0]],
[[ 0.24550268, 0, 0.25128055, 0, 0.25109866, 0, 0.25211811],
[ 0, 0.26769164, 0, 0.32481131, 0, 0.40749705, 0]],
[[ 0.22675318, 0, 0.26633731, 0, 0.28919035, 0, 0.21771917],
[ 0, 0.35955882, 0, 0.36869821, 0, 0.271743 , 0]],
[[ 0.21504655, 0, 0.26958644, 0, 0.30847484, 0, 0.20689213],
[ 0, 0.29516917, 0, 0.29359812, 0, 0.41123268, 0]]]
# pyformat: enable
# pylint: enable=bad-whitespace
self.assertAllClose(expected_layer_output, actual_layer_output)
self.assertAllClose(expected_prob_output, actual_prob_output)
def testTransformerLayerWithMultitaskAdaptersWithInputPackingFProp(self):
with self.session(use_gpu=True):
with tf.variable_scope('transformer_packed_test', reuse=tf.AUTO_REUSE):
np.random.seed(6348575)
depth = 4
p = layers_with_attention.TransformerLayerWithMultitaskAdapters.Params()
p.name = 'transformer_with_adapters'
p.source_dim = depth
p.has_aux_atten = True
p.mask_self_atten = True
p.tr_fflayer_tpl.hidden_dim = 7
p.tr_atten_tpl.num_attention_heads = 2
p.adapter_tpl.input_dim = 4
p.adapter_tpl.num_tasks = 4
p.adapter_tpl.bottleneck_dim = 2
packed_params = p.Copy()
transformer = layers_with_attention.TransformerLayerWithMultitaskAdapters(
p)
packed_params.packed_input = True
transformer_packed = layers_with_attention.TransformerLayerWithMultitaskAdapters(
packed_params)
dtype = tf.float32
source_vecs = tf.stack([
tf.constant(np.random.rand(2, depth), dtype=dtype) for _ in range(5)
])
source_padding = tf.transpose(
tf.constant([[0, 0, 0, 0, 1], [0, 0, 0, 0, 0]], dtype=dtype))
aux_vecs = tf.stack([
tf.constant(np.random.rand(2, depth), dtype=dtype) for _ in range(7)
])
aux_paddings = tf.transpose(
tf.constant([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1]],
dtype=dtype))
source_task_id = tf.constant([[2, 3]], dtype=tf.int32)
source_vecs_packed = tf.reshape(source_vecs, [-1, 1, depth])
aux_vecs_packed = tf.reshape(aux_vecs, [-1, 1, depth])
source_padding_packed = tf.reshape(source_padding, [-1, 1])
aux_padding_packed = tf.reshape(aux_paddings, [-1, 1])
source_task_id_packed = tf.transpose(
tf.constant([[2, 3, 2, 3, 2, 3, 2, 3, 2, 3]], dtype=tf.int32))
source_segment_id = tf.transpose(
tf.constant([[0, 1, 0, 1, 0, 1, 0, 1, 0, 1]], dtype=tf.float32))
aux_segment_id = tf.transpose(
tf.constant([[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]],
dtype=tf.float32))
h, _ = transformer.FPropDefaultTheta(
source_vecs,
source_padding,
aux_vecs=aux_vecs,
aux_paddings=aux_paddings,
source_segment_id=None,
aux_segment_id=None,
source_task_id=source_task_id)
h_packed, _ = transformer_packed.FPropDefaultTheta(
source_vecs_packed,
source_padding_packed,
aux_vecs=aux_vecs_packed,
aux_paddings=aux_padding_packed,
source_segment_id=source_segment_id,
aux_segment_id=aux_segment_id,
source_task_id=source_task_id_packed)
h_packed = tf.reshape(h_packed, tf.shape(h))
self.evaluate(tf.global_variables_initializer())
actual_layer, p_layer = self.evaluate([h, h_packed])
self.assertAllClose(actual_layer, p_layer)
def testTransformerLayerWithMultitaskAdaptersExtendStep(self):
with self.session(use_gpu=True):
np.random.seed(6348575)
depth = 4
p = layers_with_attention.TransformerLayerWithMultitaskAdapters.Params()
p.name = 'transformer'
p.source_dim = depth
p.has_aux_atten = True
p.mask_self_atten = True
p.tr_atten_tpl.num_attention_heads = 2
p.adapter_tpl.input_dim = 4
p.adapter_tpl.num_tasks = 4
p.adapter_tpl.bottleneck_dim = 2
transformer = layers_with_attention.TransformerLayerWithMultitaskAdapters(
p)
(source_vecs, _, aux_vecs, aux_paddings,
source_task_id) = self._testTransformerMultitaskLayerInputs(depth=depth)
source_padding = tf.zeros([5, 2])
h1, probs1 = transformer.FPropDefaultTheta(
source_vecs,
source_padding,
aux_vecs=aux_vecs,
aux_paddings=aux_paddings,
source_task_id=source_task_id)
h2 = []
probs2 = []
cached_source_vecs = tf.zeros([0, 2, 4])
cached_source_contexts = tf.zeros([0, 2, 4])
prefix_states = py_utils.NestedMap(
key=cached_source_vecs, value=cached_source_contexts)
for i in range(5):
h, probs, prefix_states = transformer.ExtendStep(
transformer.theta,
source_vecs[i, :, :],
prefix_states,
aux_vecs,
aux_paddings,
source_task_id=source_task_id[0, :])
h2.append(h)
probs2.append(probs)
h2 = tf.stack(h2)
probs2 = tf.concat(probs2, 0)
self.evaluate(tf.global_variables_initializer())
h1_v, probs1_v, h2_v, probs2_v = self.evaluate([h1, probs1, h2, probs2])
self.assertAllClose(h1_v, h2_v)
self.assertAllClose(probs1_v, probs2_v)
def testCCTFeedForwardLayerConstruction(self):
p = layers_with_attention.CCTFeedForwardLayer.Params()
p.name = 'cct_fflayer_1'
p.input_dim = 3
p.hidden_dim = 7
p.num_blocks = 2
p.gating_tpl.hidden_layer_dim = 2
p.gating_tpl.noise_std = 5.0
p.gating_tpl.noise_warmup_steps = 100
_ = layers_with_attention.CCTFeedForwardLayer(p)
def testCCTFeedForwardLayerTraining(self):
with self.session(use_gpu=True):
tf.random.set_seed(3980847392)
inputs = tf.random.normal([5, 2, 3], seed=948387483)
paddings = tf.zeros([5, 2])
p = layers_with_attention.CCTFeedForwardLayer.Params()
p.name = 'transformer_fflayer'
p.input_dim = 3
p.hidden_dim = 7
p.num_blocks = 2
p.gating_tpl.hidden_layer_dim = 2
p.gating_tpl.noise_std = 5.0
p.gating_tpl.noise_warmup_steps = 100
cct_fflayer = layers_with_attention.CCTFeedForwardLayer(p)
h, p_c = cct_fflayer.FPropDefaultTheta(inputs, paddings)
self.evaluate(tf.global_variables_initializer())
actual_layer_output, p_c_val = self.evaluate([h, p_c])
# pylint: disable=bad-whitespace
# pyformat: disable
expected_output = [
[[ 0.49714983, -1.1684668 , 0.4889576 ],
[ 1.7869478 , 1.4456576 , 1.4123362 ]],
[[ 0.10564739, -1.5359519 , 0.67742175],
[ 1.6211604 , 0.583192 , 1.056936 ]],
[[-0.01121134, -0.78554434, -0.84111285],
[ 0.45078042, 0.63005054, 0.08024757]],
[[ 0.162924 , 0.14500974, -0.32797086],
[ 0.41885388, -0.5852693 , -1.7245001 ]],
[[-0.6601118 , 0.30835745, -0.48543385],
[-0.04813027, -0.04633661, -0.21723843]]]
expected_p_c = [
[[0.5607947 , 0.49624035],
[0.72082597, 0.50216115]],
[[0.6352798 , 0.49843985],
[0.5 , 0.5 ]],
[[0.5 , 0.5 ],
[0.5 , 0.5 ]],
[[0.5 , 0.5 ],
[0.7562946 , 0.50510687]],
[[0.62267053, 0.50738835],
[0.73273706, 0.5029184 ]]]
# pyformat: enable
# pylint: enable=bad-whitespace
self.assertAllClose(actual_layer_output, expected_output)
self.assertAllClose(p_c_val, expected_p_c)
def testCCTFeedForwardLayerInference(self):
with self.session(use_gpu=True), self.SetEval(True):
tf.random.set_seed(3980847392)
inputs = tf.random.normal([5, 2, 3], seed=948387483)
paddings = tf.zeros([5, 2])
p = layers_with_attention.CCTFeedForwardLayer.Params()
p.name = 'transformer_fflayer'
p.input_dim = 3
p.hidden_dim = 7
p.num_blocks = 2
p.gating_tpl.hidden_layer_dim = 2
p.gating_tpl.noise_std = 5.0
p.gating_tpl.noise_warmup_steps = 100
cct_fflayer = layers_with_attention.CCTFeedForwardLayer(p)
h, p_c = cct_fflayer.FPropDefaultTheta(inputs, paddings)
self.evaluate(tf.global_variables_initializer())
actual_layer_output, p_c_val = self.evaluate([h, p_c])
# pylint: disable=bad-whitespace
# pyformat: disable
expected_output = [
[[ 1.1921753 , -0.78980637, -0.58472836],
[ 2.5051842 , 1.6491661 , 0.49059153]],
[[ 0.6877271 , -1.1452659 , -0.29534382],
[ 1.5774723 , 0.6462606 , 1.0375552 ]],
[[ 0.12175584, -1.2262938 , -0.5333306 ],
[ 0.4632102 , 0.7119628 , -0.01409443]],
[[ 0.16090955, 0.06721614, -0.24816278],
[ 0.9799552 , -0.2861529 , -2.5847178 ]],
[[-0.48719 , 0.18763718, -0.53763545],
[ 0.5886377 , 0.21293162, -1.1132748 ]]
]
expected_p_c = [
[[1., 0.],
[1., 1.]],
[[1., 0.],
[1., 1.]],
[[1., 1.],
[1., 1.]],
[[1., 1.],
[1., 1.]],
[[1., 1.],
[1., 1.]]
]
# pyformat: enable
# pylint: enable=bad-whitespace
self.assertAllClose(actual_layer_output, expected_output, atol=2e-6)
self.assertAllClose(p_c_val, expected_p_c)
def testTransformerWithContextLayerConstruction(self):
p = layers_with_attention.TransformerWithContextLayer.Params()
p.name = 'transformer_1'
p.source_dim = 4
p.tr_fflayer_tpl.hidden_dim = 7
p.tr_atten_tpl.num_attention_heads = 2
layer = p.Instantiate()
# output_dim is equal to source_dim when p.output_dim == 0
self.assertEqual(0, p.output_dim)
self.assertEqual(p.source_dim, layer.fflayer.output_dim)
def testTransformerWithContextLayerFProp(self):
with self.session(use_gpu=True):
np.random.seed(6348575)
depth = 4
p = layers_with_attention.TransformerWithContextLayer.Params()
p.name = 'transformer'
p.source_dim = depth
p.tr_fflayer_tpl.hidden_dim = 7
p.tr_atten_tpl.num_attention_heads = 2
transformer = p.Instantiate()
(source_vecs, source_padding, aux_vecs, aux_paddings,
_) = self._testTransformerAttentionLayerInputs(depth)
h, probs = transformer.FPropDefaultTheta(
source_vecs,
source_padding,
aux_vecs=aux_vecs,
aux_paddings=aux_paddings,
tertiary_vecs=aux_vecs,
tertiary_paddings=aux_paddings)
self.evaluate(tf.global_variables_initializer())
actual_layer_output, actual_prob_output = self.evaluate([h, probs])
tf.logging.info(np.array_repr(actual_layer_output))
tf.logging.info(np.array_repr(actual_prob_output))
# pylint: disable=bad-whitespace
# pyformat: disable
expected_layer_output = [
[[ 0.55129296, -0.7571765 , 0.281192 , 0.8710322 ],
[ 0.5072957 , -1.3714458 , 1.5689826 , -0.0971924 ]],
[[ 2.2560897 , 2.7890472 , 0.016873 , -0.5172725 ],
[ 1.4128124 , -2.0595124 , 0.37241971, -0.6075135 ]],
[[ 2.57011 , -0.8678784 , -0.33203793, -0.18508816],
[ 1.3549538 , -2.0990794 , 0.62103236, -0.9975941 ]],
[[ 0.15144205, -1.1681134 , 1.7113727 , 0.4682465 ],
[ 2.9454587 , -1.4413761 , 0.5215157 , -2.1541023 ]],
[[ 1.5092299 , -1.7608491 , 0.21144068, 0.22785848],
[-0.766488 , -0.487573 , 1.0574573 , 0.81118184]]]
expected_prob_output = [
[[0.223735 , 0. , 0.26685917, 0. , 0.2968173 ,
0. , 0.2125885 ],
[0. , 0.28585374, 0. , 0.35088098, 0. ,
0.36326528, 0. ]],
[[0.2703818 , 0. , 0.23092957, 0. , 0.2249705 ,
0. , 0.27371815],
[0. , 0.26997963, 0. , 0.33745134, 0. ,
0.39256904, 0. ]],
[[0.25208434, 0. , 0.24830116, 0. , 0.23168065,
0. , 0.26793382],
[0. , 0.2847324 , 0. , 0.3477454 , 0. ,
0.36752218, 0. ]],
[[0.23778549, 0. , 0.26169604, 0. , 0.26542395,
0. , 0.23509452],
[0. , 0.3603859 , 0. , 0.37519425, 0. ,
0.26441985, 0. ]],
[[0.22522289, 0. , 0.26782405, 0. , 0.28599125,
0. , 0.22096181],
[0. , 0.29979968, 0. , 0.31155068, 0. ,
0.38864967, 0. ]]]
# pyformat: enable
# pylint: enable=bad-whitespace
self.assertAllClose(expected_layer_output, actual_layer_output)
self.assertAllClose(expected_prob_output, actual_prob_output)
def testTransformerWithContextLayerPackedInputFProp(self):
with self.session(use_gpu=True):
with tf.variable_scope('transformer_packed_test', reuse=tf.AUTO_REUSE):
np.random.seed(6348575)
depth = 4
p = layers_with_attention.TransformerLayer.Params()
p.name = 'transformer'
p.source_dim = depth
p.tr_fflayer_tpl.hidden_dim = 7
p.tr_atten_tpl.num_attention_heads = 2
transformer = p.Instantiate()
packed_params = p.Copy()
packed_params.packed_input = True
transformer_packed = packed_params.Instantiate()
dtype = tf.float32
source_vecs = tf.stack([
tf.constant(np.random.rand(2, depth), dtype=dtype) for _ in range(5)
])
source_padding = tf.transpose(
tf.constant([[0, 0, 0, 0, 1], [0, 0, 0, 0, 0]], dtype=dtype))
aux_vecs = tf.stack([
tf.constant(np.random.rand(2, depth), dtype=dtype) for _ in range(7)
])
tertiary_vecs = tf.stack([
tf.constant(np.random.rand(2, depth), dtype=dtype) for _ in range(7)
])
aux_paddings = tf.transpose(
tf.constant([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1]],
dtype=dtype))
source_vecs_packed = tf.reshape(source_vecs, [-1, 1, depth])
aux_vecs_packed = tf.reshape(aux_vecs, [-1, 1, depth])
tertiary_vecs_packed = tf.reshape(tertiary_vecs, [-1, 1, depth])
source_padding_packed = tf.reshape(source_padding, [-1, 1])
aux_padding_packed = tf.reshape(aux_paddings, [-1, 1])
source_segment_id = tf.transpose(
tf.constant([[0, 1, 0, 1, 0, 1, 0, 1, 0, 1]], dtype=tf.float32))
aux_segment_id = tf.transpose(
tf.constant([[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]],
dtype=tf.float32))
h, _ = transformer.FPropDefaultTheta(
source_vecs,
source_padding,
aux_vecs=aux_vecs,
aux_paddings=aux_paddings,
tertiary_vecs=tertiary_vecs,
tertiary_paddings=aux_paddings)
h_packed, _ = transformer_packed.FPropDefaultTheta(
source_vecs_packed,
source_padding_packed,
aux_vecs=aux_vecs_packed,
aux_paddings=aux_padding_packed,
source_segment_id=source_segment_id,
aux_segment_id=aux_segment_id,
tertiary_vecs=tertiary_vecs_packed,
tertiary_paddings=aux_padding_packed,
tertiary_segment_id=aux_segment_id)
h_packed = tf.reshape(h_packed, tf.shape(h))
self.evaluate(tf.global_variables_initializer())
actual_layer, p_layer = self.evaluate([h, h_packed])
self.assertAllClose(actual_layer, p_layer)
def testTransformerWithContextLayerExtendStep(self):
with self.session(use_gpu=True):
np.random.seed(6348575)
depth = 4
p = layers_with_attention.TransformerWithContextLayer.Params()
p.name = 'transformer'
p.source_dim = depth
p.tr_atten_tpl.num_attention_heads = 2
transformer = p.Instantiate()
(source_vecs, source_padding, aux_vecs, aux_paddings,
_) = self._testTransformerAttentionLayerInputs(depth)
source_padding = tf.zeros([5, 2])
h1, probs1 = transformer.FPropDefaultTheta(
source_vecs,
source_padding,
aux_vecs=aux_vecs,
aux_paddings=aux_paddings,
tertiary_vecs=aux_vecs,
tertiary_paddings=aux_paddings)
h2 = []
probs2 = []
cached_source_vecs = tf.zeros([0, 2, 4])
cached_source_contexts = tf.zeros([0, 2, 4])
prefix_states = py_utils.NestedMap(
key=cached_source_vecs, value=cached_source_contexts)
for i in range(5):
h, probs, prefix_states = transformer.ExtendStep(
transformer.theta,
source_vecs[i, :, :],
prefix_states,
aux_vecs,
aux_paddings,
tertiary_vecs=aux_vecs,
tertiary_paddings=aux_paddings)
h2.append(h)
probs2.append(probs)
h2 = tf.stack(h2)
probs2 = tf.concat(probs2, 0)
self.evaluate(tf.global_variables_initializer())
h1_v, probs1_v, h2_v, probs2_v = self.evaluate([h1, probs1, h2, probs2])
self.assertAllClose(h1_v, h2_v)
self.assertAllClose(probs1_v, probs2_v)
def testCCTAttentionLayerSelfAttentionTraining(self):
with self.session(use_gpu=True) as sess:
depth = 4
p = layers_with_attention.CCTAttentionLayer.Params()
p.name = 'transformer_atten'
p.source_dim = depth
p.is_masked = True
p.num_attention_heads = 2
p.gating_tpl.hidden_layer_dim = 2
p.gating_tpl.noise_std = 5.0
p.gating_tpl.noise_warmup_steps = 100
transformer_atten = layers_with_attention.CCTAttentionLayer(p)
(source_vecs, source_padding, _, _,
_) = self._testTransformerAttentionLayerInputs(depth=depth)
ctx, probs, qpc, spc = transformer_atten.FPropDefaultTheta(
source_vecs, source_padding)
tf.global_variables_initializer().run()
actual_ctx, actual_probs, actual_qpc, actual_spc = sess.run(
[ctx, probs, qpc, spc])
# pylint: disable=bad-whitespace
# pyformat: disable
expected_ctx = [
[[-0.9170906 , 0.89127994, 0.8682031 , -0.8423924 ],
[-1.2874005 , -0.76474655, 0.5771928 , 1.4749541 ]],
[[ 0.34465155, 0.74996084, -0.48622286, -0.6083897 ],
[-0.7486481 , -0.07628638, -0.99187833, 1.8168143 ]],
[[ 1.6986014 , -0.44173932, -0.7130059 , -0.5438557 ],
[-1.3927674 , -0.09861529, 0.3361559 , 1.1552272 ]],
[[-0.5439662 , -1.0707575 , 1.8813989 , -0.26667514],
[ 1.1484473 , 0.9964316 , -1.2344118 , -0.91046673]],
[[-0.06898946, -1.5815425 , -0.45298773, 2.1035194 ],
[-1.7475295 , 0.27231437, -0.8034381 , 2.2786536 ]]]
expected_probs = [
[[1. , 0. , 0. , 0. , 0. ],
[0.2 , 0.2 , 0.2 , 0.2 , 0.2 ]],
[[0.4238176 , 0.57618237, 0. , 0. , 0. ],
[0. , 1. , 0. , 0. , 0. ]],
[[0.34105754, 0.65894246, 0. , 0. , 0. ],
[0. , 0.55719167, 0.44280833, 0. , 0. ]],
[[0.6528083 , 0.34719166, 0. , 0. , 0. ],
[0. , 0.32477915, 0.36445653, 0.31076428, 0. ]],
[[0.28325003, 0.21873125, 0. , 0. , 0.49801874],
[0. , 0.43867606, 0.2793855 , 0.28193837, 0. ]]]
expected_qpc = [
[[0.5 ],
[0.5818492 ]],
[[0.5411409 ],
[0.55023897]],
[[0.56948507],
[0.5499979 ]],
[[0.5166038 ],
[0.58645904]],
[[0.54155153],
[0.5 ]]]
expected_spc = [
[[0.21472901],
[0.06997871]],
[[0.53207266],
[0.39812705]],
[[0.5217048 ],
[0.07829338]],
[[0.06743541],
[0.5 ]],
[[0.32987863],
[0.5442441 ]]]
# pyformat: enable
# pylint: enable=bad-whitespace
self.assertAllClose(expected_ctx, actual_ctx, rtol=1e-05, atol=1e-05)
self.assertAllClose(expected_probs, actual_probs, rtol=1e-05, atol=1e-05)
self.assertAllClose(expected_qpc, actual_qpc, rtol=1e-05, atol=1e-05)
self.assertAllClose(expected_spc, actual_spc, rtol=1e-05, atol=1e-05)
def testCCTAttentionLayerSelfAttentionEval(self):
with self.session(use_gpu=True) as sess, self.SetEval(True):
depth = 4
p = layers_with_attention.CCTAttentionLayer.Params()
p.name = 'transformer_atten'
p.source_dim = depth
p.is_masked = True
p.num_attention_heads = 2
p.gating_tpl.hidden_layer_dim = 2
p.gating_tpl.noise_std = 5.0
p.gating_tpl.noise_warmup_steps = 100
transformer_atten = layers_with_attention.CCTAttentionLayer(p)
(source_vecs, source_padding, _, _,
_) = self._testTransformerAttentionLayerInputs(depth=depth)
ctx, probs, qpc, spc = transformer_atten.FPropDefaultTheta(
source_vecs, source_padding)
tf.global_variables_initializer().run()
actual_ctx, actual_probs, actual_qpc, actual_spc = sess.run(
[ctx, probs, qpc, spc])
# pylint: disable=bad-whitespace
# pyformat: disable
expected_ctx = [
[[-1.5939784e+00, 8.5430717e-01, 8.4722424e-01, -1.0755297e-01],
[-1.6199683e+00, -1.9144357e+00, 1.0950426e+00, 2.4393613e+00]],
[[ 2.0492536e-01, -5.0217152e-02, -1.5521961e-01, 5.1122904e-04],
[-4.3141130e-01, -9.0650195e-01, -3.5488802e-01, 1.6928028e+00]],
[[ 1.7034934e+00, -1.1774492e+00, -4.2603785e-01, -1.0000569e-01],
[-1.0880733e+00, -9.0783793e-01, 9.9768031e-01, 9.9823117e-01]],
[[-1.1584746e+00, -2.0163212e+00, 2.3776212e+00, 7.9717481e-01],
[ 1.3303024e+00, -1.4763023e+00, 2.6441175e-01, -1.1841190e-01]],
[[-3.0323851e-01, -2.5461116e+00, 5.0698155e-01, 2.3423686e+00],
[-2.0771229e+00, -8.0027932e-01, -7.4258000e-02, 2.9516606e+00]]]
expected_probs = [
[[1. , 0. , 0. , 0. , 0. ],
[0.2 , 0.2 , 0.2 , 0.2 , 0.2 ]],
[[0.35538384, 0.6446162 , 0. , 0. , 0. ],
[0. , 1. , 0. , 0. , 0. ]],
[[0.18125553, 0.8187444 , 0. , 0. , 0. ],
[0. , 0.5 , 0.5 , 0. , 0. ]],
[[0.7752405 , 0.22475953, 0. , 0. , 0. ],
[0. , 0.36166608, 0.36166608, 0.27666792, 0. ]],
[[0.40603536, 0.18792923, 0. , 0. , 0.40603536],
[0. , 0.32476988, 0.32476988, 0.35046023, 0. ]]]
expected_qpc = [
[[1.],
[1.]],
[[1.],
[1.]],
[[1.],
[1.]],
[[1.],
[1.]],
[[1.],
[1.]]]
expected_spc = [
[[0.],
[0.]],
[[1.],
[0.]],
[[1.],
[0.]],
[[0.],
[1.]],
[[0.],
[1.]]]
# pyformat: enable
# pylint: enable=bad-whitespace
self.assertAllClose(expected_ctx, actual_ctx, rtol=1e-05, atol=1e-05)
self.assertAllClose(expected_probs, actual_probs, rtol=1e-05, atol=1e-05)
self.assertAllClose(expected_qpc, actual_qpc, rtol=1e-05, atol=1e-05)
self.assertAllClose(expected_spc, actual_spc, rtol=1e-05, atol=1e-05)
def testCCTAttentionLayerStepByStep(self):
with self.session(use_gpu=True) as sess, self.SetEval(True):
depth = 4
p = layers_with_attention.CCTAttentionLayer.Params()
p.name = 'transformer_atten'
p.source_dim = depth
p.is_masked = True
p.num_attention_heads = 2
p.gating_tpl.hidden_layer_dim = 2
p.gating_tpl.noise_std = 5.0
p.gating_tpl.noise_warmup_steps = 100
x_atten = layers_with_attention.CCTAttentionLayer(p)
(source_vecs, _, _, _,
_) = self._testTransformerAttentionLayerInputs(depth=depth)
source_padding = tf.zeros([5, 2])
ctx1, probs1, _, _ = x_atten.FPropDefaultTheta(source_vecs,
source_padding)
ctx2 = []
probs2 = []
cached_source_vecs = tf.zeros([0, 2, 4])
cached_source_contexts = tf.zeros([0, 2, 4])
prefix_states = py_utils.NestedMap(
key=cached_source_vecs, value=cached_source_contexts)
for i in range(5):
ctx, probs, prefix_states = x_atten.ExtendStep(x_atten.theta,
source_vecs[i, :, :],
prefix_states)
probs_pad = tf.zeros([2, 5 - i - 1])
padded_probs = tf.concat([probs, probs_pad], 1)
ctx2.append(ctx)
probs2.append(padded_probs)
ctx2 = tf.stack(ctx2)
probs2 = tf.stack(probs2)
tf.global_variables_initializer().run()
ctx1_v, probs1_v, ctx2_v, probs2_v = sess.run(
[ctx1, probs1, ctx2, probs2])
self.assertAllClose(ctx1_v, ctx2_v)
self.assertAllClose(probs1_v, probs2_v)
def testCCTAttentionLayerCrossAttenTraining(self):
with self.session(use_gpu=True) as sess:
depth = 4
p = layers_with_attention.CCTAttentionLayer.Params()
p.name = 'transformer_atten'
p.source_dim = depth
p.is_masked = False
p.num_attention_heads = 2
p.gating_tpl.hidden_layer_dim = 2
p.gating_tpl.noise_std = 5.0
p.gating_tpl.noise_warmup_steps = 100
transformer_atten = layers_with_attention.CCTAttentionLayer(p)
(query_vec, _, aux_vecs, aux_paddings,
_) = self._testTransformerAttentionLayerInputs(depth=depth)
ctx, probs, qpc, spc = transformer_atten.FPropDefaultTheta(
query_vec, aux_paddings, aux_vecs)
tf.global_variables_initializer().run()
actual_ctx, actual_probs, actual_qpc, actual_spc = sess.run(
[ctx, probs, qpc, spc])
# pylint: disable=bad-whitespace
# pyformat: disable
expected_ctx = [
[[-1.9043474 , 1.6999874 , 0.4292767 , -0.22491673],
[-0.84242177, -0.50577486, 0.29762083, 1.0505756 ]],
[[-0.33607534, 2.5800223 , -1.3375163 , -0.90643084],
[-0.4973639 , -0.17019022, -1.1589761 , 1.8265318 ]],
[[ 1.1859869 , 1.5021455 , -1.6327672 , -1.0553647 ],
[-1.2359238 , -0.22244841, 0.19330817, 1.2650642 ]],
[[-1.5131142 , 0.49699292, 1.129034 , -0.11291274],
[ 2.1162672 , 0.6308829 , -1.0373113 , -1.7098385 ]],
[[-0.9935959 , 0.07386243, -0.6836246 , 1.6033579 ],
[-1.0807116 , 0.85268646, -1.2622242 , 1.4902495 ]]]
expected_probs = [
[[0.24303743, 0. , 0.30685946, 0. , 0.25564623,
0. , 0.1944569 ],
[0. , 0.28801104, 0. , 0.34431183, 0. ,
0.36767715, 0. ]],
[[0.2644446 , 0. , 0.23458862, 0. , 0.23393473,
0. , 0.26703206],
[0. , 0.22837642, 0. , 0.2820819 , 0. ,
0.4895417 , 0. ]],
[[0.2599384 , 0. , 0.19412258, 0. , 0.21307275,
0. , 0.33286628],
[0. , 0.27514488, 0. , 0.35259444, 0. ,
0.3722607 , 0. ]],
[[0.24153353, 0. , 0.3045342 , 0. , 0.2569951 ,
0. , 0.19693717],
[0. , 0.36325702, 0. , 0.26765382, 0. ,
0.36908916, 0. ]],
[[0.21663833, 0. , 0.28198314, 0. , 0.29308724,
0. , 0.20829134],
[0. , 0.2337277 , 0. , 0.319759 , 0. ,
0.44651327, 0. ]]]
expected_qpc = [
[[0.5 ],
[0.5818492 ]],
[[0.541141 ],
[0.55023897]],
[[0.56948507],
[0.5499979 ]],
[[0.5166038 ],
[0.58645904]],
[[0.54155153],
[0.5 ]]]
expected_spc = [
[[0.09838167],
[0.5 ]],
[[0.51203823],
[0.22011107]],
[[0.27349436],
[0.5230051 ]],
[[0.5 ],
[0.0911701 ]],
[[0.2730832 ],
[0.5 ]],
[[0.54982626],
[0.44889307]],
[[0.10193098],
[0.11123485]]]
# pyformat: enable
# pylint: enable=bad-whitespace
self.assertAllClose(expected_ctx, actual_ctx, rtol=1e-05, atol=1e-05)
self.assertAllClose(expected_probs, actual_probs, rtol=1e-05, atol=1e-05)
self.assertAllClose(expected_qpc, actual_qpc, rtol=1e-05, atol=1e-05)
self.assertAllClose(expected_spc, actual_spc, rtol=1e-05, atol=1e-05)
def testCCTAttentionLayerCrossAttenEval(self):
with self.session(use_gpu=True) as sess, self.SetEval(True):
depth = 4
p = layers_with_attention.CCTAttentionLayer.Params()
p.name = 'transformer_atten'
p.source_dim = depth
p.is_masked = False
p.num_attention_heads = 2
p.gating_tpl.hidden_layer_dim = 2
p.gating_tpl.noise_std = 5.0
p.gating_tpl.noise_warmup_steps = 100
transformer_atten = layers_with_attention.CCTAttentionLayer(p)
(query_vec, _, aux_vecs, aux_paddings,
_) = self._testTransformerAttentionLayerInputs(depth=depth)
ctx, probs, qpc, spc = transformer_atten.FPropDefaultTheta(
query_vec, aux_paddings, aux_vecs)
tf.global_variables_initializer().run()
actual_ctx, actual_probs, actual_qpc, actual_spc = sess.run(
[ctx, probs, qpc, spc])
tf.logging.info(np.array_repr(actual_ctx))
tf.logging.info(np.array_repr(actual_probs))
# pylint: disable=bad-whitespace
# pyformat: disable
expected_ctx = [
[[-1.5939784 , 0.8543072 , 0.84722424, -0.10755297],
[-0.7121205 , -1.2363338 , 1.1559415 , 0.7925127 ]],
[[-0.09044743, 1.6572162 , -0.87628996, -0.69047904],
[-0.4314113 , -0.90650195, -0.35488802, 1.6928028 ]],
[[ 1.3591317 , 0.5376119 , -1.1282029 , -0.7685402 ],
[-1.0880733 , -0.9078379 , 0.9976803 , 0.9982312 ]],
[[-1.1870676 , -0.37413225, 1.5655125 , -0.00431258],
[ 1.62277 , 0.02716666, -0.7765793 , -0.87335706]],
[[-0.6675403 , -0.8283625 , -0.18727894, 1.6831816 ],
[-1.113929 , 0.13246097, -0.57226247, 1.5537308 ]]]
expected_probs = [
[[0.25 , 0. , 0.25 , 0. , 0.25 ,
0. , 0.25 ],
[0. , 0.33333334, 0. , 0.33333334, 0. ,
0.33333334, 0. ]],
[[0.25 , 0. , 0.25 , 0. , 0.25 ,
0. , 0.25 ],
[0. , 0.33333334, 0. , 0.33333334, 0. ,
0.33333334, 0. ]],
[[0.25 , 0. , 0.25 , 0. , 0.25 ,
0. , 0.25 ],
[0. , 0.33333334, 0. , 0.33333334, 0. ,
0.33333334, 0. ]],
[[0.25 , 0. , 0.25 , 0. , 0.25 ,
0. , 0.25 ],
[0. , 0.33333334, 0. , 0.33333334, 0. ,
0.33333334, 0. ]],
[[0.25 , 0. , 0.25 , 0. , 0.25 ,
0. , 0.25 ],
[0. , 0.33333334, 0. , 0.33333334, 0. ,
0.33333334, 0. ]]]
expected_qpc = [
[[1.],
[1.]],
[[1.],
[1.]],
[[1.],
[1.]],
[[1.],
[1.]],
[[1.],
[1.]]]
expected_spc = [
[[0.],
[1.]],
[[1.],
[0.]],
[[0.],
[1.]],
[[1.],
[0.]],
[[0.],
[1.]],
[[1.],
[0.]],
[[0.],
[0.]]]
# pyformat: enable
# pylint: enable=bad-whitespace
self.assertAllClose(expected_ctx, actual_ctx, rtol=1e-05, atol=1e-05)
self.assertAllClose(expected_probs, actual_probs, rtol=1e-05, atol=1e-05)
self.assertAllClose(expected_qpc, actual_qpc, rtol=1e-05, atol=1e-05)
self.assertAllClose(expected_spc, actual_spc, rtol=1e-05, atol=1e-05)
class SelfAttentiveLayerTest(test_utils.TestCase):
def testFPropForTrain(self):
with self.session(use_gpu=False) as session:
# time = 5, batch = 4, depth = 2
features = tf.constant(np.random.normal(size=(5, 4, 2)), dtype=tf.float32)
paddings = tf.constant(
[[0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0], [0.0, 1.0, 1.0, 1.0]],
dtype=tf.float32)
features = tf.transpose(features, [1, 0, 2])
paddings = tf.transpose(paddings, [1, 0])
# init parameters for the pooling layer
params = layers_with_attention.SelfAttentiveLayer.Params()
params.name = 'self_attentive_pooling'
params.num_heads = 3
params.input_dim = 2
params.hidden_dim = 7
params.penalty_coef = 1.0
params.penalty_terms = [1.0, 0.33, 0.01]
params.params_init = py_utils.WeightInit.Gaussian(0.1)
# forward through the layer
with py_utils.AuxLossContext() as aux_loss_ctx:
att_layer = layers_with_attention.SelfAttentiveLayer(params)
outputs = att_layer.FProp(att_layer.theta, features, paddings=paddings)
tf.global_variables_initializer().run()
outputs, aux_loss = session.run([outputs, aux_loss_ctx.aux_losses[0]])
# check the shapes of the resulted tensors
self.assertEqual(
outputs.shape,
(features.shape[0], params.num_heads, params.input_dim))
self.assertEqual(aux_loss.shape, (features.shape[0],))
if __name__ == '__main__':
tf.test.main()
|
tensorflow/lingvo
|
lingvo/core/layers_with_attention_test.py
|
Python
|
apache-2.0
| 132,081
|
[
"Gaussian",
"MOE"
] |
3f709d7e70c9dfa37c022dd6e53f53595f3101394b51d9c40388b5370275e8b1
|
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
# Enthought library imports.
from traits.api import Instance
from tvtk.api import tvtk
# Local imports
from mayavi.filters.filter_base import FilterBase
from mayavi.core.pipeline_info import PipelineInfo
######################################################################
# `Stripper` class.
######################################################################
class Stripper(FilterBase):
""" Create triangle strips and/or poly-lines. Useful for regularizing
broken up surfaces, such as those created by the Tube
filter.
"""
# The version of this class. Used for persistence.
__version__ = 0
# The actual TVTK filter that this class manages.
filter = Instance(tvtk.Stripper, args=(), allow_none=False, record=True)
input_info = PipelineInfo(datasets=['poly_data'],
attribute_types=['any'],
attributes=['any'])
output_info = PipelineInfo(datasets=['poly_data'],
attribute_types=['any'],
attributes=['any'])
|
dmsurti/mayavi
|
mayavi/filters/stripper.py
|
Python
|
bsd-3-clause
| 1,209
|
[
"Mayavi"
] |
ee4a355fd1a6f92381318abbf1e9fe016a3335117f0b002f03dc066b159d2baa
|
#!/usr/bin/env python
"""
Local Processing Unit (LPU) draft implementation.
"""
import importlib
import numbers
import pycuda.gpuarray as garray
from pycuda.tools import dtype_to_ctype
import pycuda.driver as cuda
from pycuda.compiler import SourceModule
import pycuda.elementwise as elementwise
import numpy as np
import networkx as nx
from collections import Counter
# Work around bug in networkx < 1.9 that causes networkx to choke on GEXF
# files with boolean attributes that contain the strings 'True' or 'False'
# (bug already observed in https://github.com/networkx/networkx/pull/971)
nx.readwrite.gexf.GEXF.convert_bool['false'] = False
nx.readwrite.gexf.GEXF.convert_bool['False'] = False
nx.readwrite.gexf.GEXF.convert_bool['true'] = True
nx.readwrite.gexf.GEXF.convert_bool['True'] = True
from neurokernel.mixins import LoggerMixin
from neurokernel.core_gpu import Module, CTRL_TAG, GPOT_TAG, SPIKE_TAG
import neurokernel.LPU.utils.parray as parray
from neurokernel.LPU.utils.simpleio import *
# all neurons are instantiated by class names
from neurons import *
from synapses import *
PORT_IN_GPOT = 'port_in_gpot'
PORT_IN_SPK = 'port_in_spk'
class LPU(Module):
"""
Retina Local Processing Unit (LPU).
TODO (this documentation refers to a previous version)
Parameters
----------
dt : double
Time step (s).
n_dict_list : list of dict
List of dictionaries describing the neurons in this LPU;
each dictionary corresponds to a single neuron model.
s_dict_list : list of dict
List of dictionaries describing the synapses in this LPU; each
dictionary corresponds to a single synapse model.
input_file : str
Name of input file
output_file : str
Name of output files
port_data : int
Port to use when communicating with broker.
port_ctrl : int
Port used by broker to control module.
device : int
GPU device number.
id : str
Name of the LPU
debug : boolean
Passed to all the neuron and synapse objects instantiated by this LPU
for debugging purposes. False by default.
cuda_verbose : boolean
If True, compile kernels with option '--ptxas-options=-v'.
"""
@staticmethod
def graph_to_dicts(graph):
"""
Convert graph of LPU neuron/synapse data to Python data structures.
Parameters
----------
graph : networkx.MultiDiGraph
NetworkX graph containing LPU data.
Returns
-------
n_dict : dict of dict of neuron
Each key of `n_dict` is the name of a neuron model; the values
are dicts that map each attribute name to a list that contains the
attribute values for each neuron.
s_dict : dict of dict of synapse
Each key of `s_dict` is the name of a synapse model; the values are
dicts that map each attribute name to a list that contains the
attribute values for each each neuron.
Example
-------
>>> n_dict = {'LeakyIAF': {'Vr': [0.5, 0.6], 'Vt': [0.3, 0.2]},
'MorrisLecar': {'V1': [0.15, 0.16], 'Vt': [0.13, 0.27]}}
Notes
-----
All neurons must have the following attributes; any additional
attributes for a specific neuron model must be provided
for all neurons of that model type:
1. spiking - True if the neuron emits spikes, False if it emits graded
potentials.
2. model - model identifier string, e.g., 'LeakyIAF', 'MorrisLecar'
3. public - True if the neuron emits output exposed to other LPUS.
4. extern - True if the neuron can receive external input from a file.
All synapses must have the following attributes:
1. class - int indicating connection class of synapse;
it may assume the following values:
0. spike to spike synapse
1. spike to graded potential synapse
2. graded potential to spike synapse
3. graded potential to graded potential synapse
2. model - model identifier string, e.g., 'AlphaSynapse'
3. conductance - True if the synapse emits conductance values, False if
it emits current values.
4. reverse - If the `conductance` attribute is True, this attribute
should be set to the reverse potential.
TODO
----
Input data should be validated.
"""
# parse neuron data
neurons = graph.node.items()
n_dict = {}
# sort based on id (id is first converted to an integer)
# this is done so that consecutive neurons of the same type
# in the constructed LPU is the same in neurokernel
neurons.sort(cmp=neuron_cmp)
for nid, neu in neurons:
model = neu['model']
# if an input_port, make sure selector is specified
if model == PORT_IN_GPOT or model == PORT_IN_SPK:
assert('selector' in neu.keys())
if model == PORT_IN_GPOT:
neu['spiking'] = False
neu['public'] = False
else:
neu['spiking'] = True
neu['public'] = False
# if an output_port, make sure selector is specified
if 'public' in neu.keys():
if neu['public']:
assert('selector' in neu.keys())
else:
neu['public'] = False
if 'selector' not in neu.keys():
neu['selector'] = ''
# if the neuron model does not appear before, add it into n_dict
if model not in n_dict:
n_dict[model] = {k: [] for k in neu.keys() + ['id']}
# neurons of the same model should have the same attributes
assert(set(n_dict[model].keys()) == set(neu.keys() + ['id']))
# add neuron data into the subdictionary of n_dict
for key in neu.iterkeys():
n_dict[model][key].append( neu[key] )
n_dict[model]['id'].append(int(nid))
# remove duplicate model information
for val in n_dict.itervalues():
val.pop('model')
if not n_dict:
n_dict = None
# parse synapse data
synapses = graph.edges(data=True)
s_dict = {}
synapses.sort(cmp=synapse_cmp)
for syn in synapses:
# syn[0/1]: pre-/post-neu id; syn[2]: dict of synaptic data
model = syn[2]['model']
# Assign the synapse edge an ID if none exists (e.g., because the
# graph was never stored/read to/from GEXF):
if syn[2].has_key('id'):
syn[2]['id'] = int(syn[2]['id'])
else:
syn[2]['id'] = id
# If the synapse model does not appear before, add it into s_dict:
if model not in s_dict:
s_dict[model] = {k: [] for k in syn[2].keys() + ['pre', 'post']}
# Synapses of the same model should have the same attributes:
assert(set(s_dict[model].keys()) == set(syn[2].keys() + ['pre', 'post']))
# Add synaptic data into the subdictionary of s_dict:
for key in syn[2].iterkeys():
s_dict[model][key].append(syn[2][key])
s_dict[model]['pre'].append(syn[0])
s_dict[model]['post'].append(syn[1])
for val in s_dict.itervalues():
val.pop('model')
if not s_dict:
s_dict = {}
return n_dict, s_dict
@staticmethod
def lpu_parser(filename):
"""
GEXF LPU specification parser.
Extract LPU specification data from a GEXF file and store it in
Python data structures. All nodes in the GEXF file are assumed to
correspond to neuron model instances while all edges are assumed to
correspond to synapse model instances.
Parameters
----------
filename : str
GEXF filename.
Returns
-------
n_dict : dict of dict of list
Each key of `n_dict` is the name of a neuron model; the values
are dicts that map each attribute name to a list that contains the
attribute values for each neuron class.
s_dict : dict of dict of list
Each key of `s_dict` is the name of a synapse model; the values are
dicts that map each attribute name to a list that contains the
attribute values for each each neuron.
"""
graph = nx.read_gexf(filename)
return LPU.graph_to_dicts(graph)
@classmethod
def extract_in_gpot(cls, n_dict):
"""
Return selectors of non-spiking input ports.
"""
if PORT_IN_GPOT in n_dict:
return ','.join(filter(None, n_dict[PORT_IN_GPOT]['selector']))
else:
return ''
@classmethod
def extract_in_spk(cls, n_dict):
"""
Return selectors of spiking input ports.
"""
if PORT_IN_SPK in n_dict:
return ','.join(filter(None, n_dict[PORT_IN_SPK]['selector']))
else:
return ''
@classmethod
def extract_out_gpot(cls, n_dict):
"""
Return selectors of non-spiking output neurons.
"""
return ','.join(filter(None,
[sel for _, n in n_dict.items() for sel, pub, spk in \
zip(n['selector'], n['public'], n['spiking']) \
if pub and not spk ]))
@classmethod
def extract_out_spk(cls, n_dict):
"""
Return selectors of spiking output neurons.
"""
return ','.join(filter(None,
[sel for _, n in n_dict.items() for sel, pub, spk in \
zip(n['selector'], n['public'], n['spiking']) \
if pub and spk ]))
@classmethod
def extract_in(cls, n_dict):
"""
Return selectors of all input ports.
"""
return ','.join(filter(None,
[cls.extract_in_spk(n_dict), cls.extract_in_gpot(n_dict)]))
@classmethod
def extract_out(cls, n_dict):
"""
Return selectors of all output neurons.
"""
return ','.join(filter(None,
[cls.extract_out_spk(n_dict), cls.extract_out_gpot(n_dict)]))
@classmethod
def extract_all(cls, n_dict):
"""
Return selectors for all input ports and output neurons.
"""
return ','.join(filter(None,
[cls.extract_in(n_dict), cls.extract_out(n_dict)]))
def __init__(self, dt, n_dict, s_dict, input_file=None, output_file=None,
device=0, ctrl_tag=CTRL_TAG, gpot_tag=GPOT_TAG,
spike_tag=SPIKE_TAG, rank_to_id=None, routing_table=None,
id=None, debug=False, columns=['io', 'type', 'interface'],
cuda_verbose=False, time_sync=False,
modules=None, input_generator=None):
LoggerMixin.__init__(self, 'mod {}'.format(id))
assert('io' in columns)
assert('type' in columns)
assert('interface' in columns)
self.LPU_id = id
self.dt = dt
self.debug = debug
self.device = device
if cuda_verbose:
self.compile_options = ['--ptxas-options=-v']
else:
self.compile_options = []
# Handle file I/O:
self.output_file = output_file
self.output = True if output_file else False
self.input_file = input_file
self.input_eof = False if input_file else True
self.input_generator = input_generator
# Load neurons and synapse data:
self._import_modules(modules)
self._load_neurons()
self._load_synapses()
# Set default one time import:
self._one_time_import = 10
# Save neuron data in the form
# [('Model0', {'attrib0': [..], 'attrib1': [..]}), ('Model1', ...)]
self.n_list = n_dict.items()
# List of booleans indicating whether first neuron of each model is a
# spiking model:
n_model_is_spk = [ n['spiking'][0] for _, n in self.n_list ]
# Number of neurons of each model:
n_model_num = [ len(n['id']) for _, n in self.n_list ]
# Concatenate lists of integers corresponding to neuron positions in LPU
# graph for all of the models into a single list:
n_id = np.array(sum( [ n['id'] for _, n in self.n_list ], []),
dtype=np.int32)
# Concatenate lists of common attributes in model dictionaries into
# single lists:
n_is_spk = np.array(sum( [ n['spiking'] for _, n in self.n_list ], []))
n_is_pub = np.array(sum( [ n['public'] for _, n in self.n_list ], []))
n_has_in = np.array(sum( [ n['extern'] for _, n in self.n_list ], []))
# Get selectors and positions of input ports:
try:
sel_in_gpot = self.extract_in_gpot(n_dict)
in_ports_ids_gpot = np.array(n_dict[PORT_IN_GPOT]['id'])
self.ports_in_gpot_mem_ind = zip(*self.n_list)[0].index(PORT_IN_GPOT)
except KeyError:
sel_in_gpot = ''
in_ports_ids_gpot = np.array([], dtype=np.int32)
self.ports_in_gpot_mem_ind = None
try:
sel_in_spk = self.extract_in_spk(n_dict)
in_ports_ids_spk = np.array(n_dict[PORT_IN_SPK]['id'],
dtype=np.int32)
self.ports_in_spk_mem_ind = zip(*self.n_list)[0].index(PORT_IN_SPK)
except KeyError:
sel_in_spk = ''
in_ports_ids_spk = np.array([], dtype=np.int32)
self.ports_in_spk_mem_ind = None
sel_in = ','.join(filter(None, [sel_in_gpot, sel_in_spk]))
# Get selectors and positions of output neurons:
sel_out_gpot = self.extract_out_gpot(n_dict)
sel_out_spk = self.extract_out_spk(n_dict)
self.out_ports_ids_gpot = np.array([nid for _, n in self.n_list for nid, pub, spk in
zip(n['id'], n['public'], n['spiking'])
if pub and not spk], dtype=np.int32)
self.out_ports_ids_spk = np.array([nid for _, n in self.n_list for nid, pub, spk in
zip(n['id'], n['public'], n['spiking'])
if pub and spk], dtype=np.int32)
sel_out = ','.join(filter(None, [sel_out_gpot, sel_out_spk]))
sel_gpot = ','.join(filter(None, [sel_in_gpot, sel_out_gpot]))
sel_spk = ','.join(filter(None, [sel_in_spk, sel_out_spk]))
sel = ','.join(filter(None, [sel_gpot, sel_spk]))
self.sel_in_spk = sel_in_spk
self.sel_out_spk = sel_out_spk
self.sel_in_gpot = sel_in_gpot
self.sel_out_gpot = sel_out_gpot
# Lists of numbers of neurons of gpot and spiking model types:
num_gpot_neurons = np.where(n_model_is_spk, 0, n_model_num)
num_spike_neurons = np.where(n_model_is_spk, n_model_num, 0)
# Total numbers of gpot and spiking neurons:
self.total_num_gpot_neurons = sum(num_gpot_neurons)
self.total_num_spike_neurons = sum(num_spike_neurons)
gpot_idx = n_id[~n_is_spk]
spike_idx = n_id[n_is_spk]
self.order = np.argsort(
np.concatenate((gpot_idx, spike_idx))).astype(np.int32)
self.gpot_order = np.argsort(gpot_idx).astype(np.int32)
self.spike_order = np.argsort(spike_idx).astype(np.int32)
self.spike_shift = self.total_num_gpot_neurons
in_id = n_id[n_has_in]
in_id.sort()
pub_spk_id = n_id[ n_is_pub & n_is_spk ]
pub_spk_id.sort()
pub_gpot_id = n_id[ n_is_pub & ~n_is_spk ]
pub_gpot_id.sort()
self.input_neuron_list = self.order[in_id]
public_spike_list = self.order[pub_spk_id]
public_gpot_list = self.order[pub_gpot_id]
self.num_public_gpot = len( public_gpot_list )
self.num_public_spike = len( public_spike_list )
self.num_input = len( self.input_neuron_list )
in_ports_ids_gpot = self.order[in_ports_ids_gpot]
in_ports_ids_spk = self.order[in_ports_ids_spk]
self.out_ports_ids_gpot = self.order[self.out_ports_ids_gpot]
self.out_ports_ids_spk = self.order[self.out_ports_ids_spk]
# Get presynaptic
self.s_dict = s_dict
if s_dict:
for s in self.s_dict.itervalues():
# TODO, synapse class can be inferred by
# synapse model or pre, post neuron models
# or spiking parameter of them
shift = self.spike_shift \
if s['class'][0] == 0 or s['class'][0] == 1 else 0
s['pre'] = [self.order[int(neu_id)] - shift
for neu_id in s['pre'] ]
s['post'] = [self.order[int(neu_id)]
for neu_id in s['post'] ]
gpot_delay_steps = 0
spike_delay_steps = 0
spike_shift = self.spike_shift
g_pre = []
g_post = []
I_pre = []
I_post = []
V_rev = []
count = 0
self.s_list = self.s_dict.items()
num_synapses = [ len(s['id']) for _, s in self.s_list ]
for (_, s) in self.s_list:
order = np.argsort(s['post']).astype(np.int32)
for k, v in s.items():
s[k] = np.asarray(v)[order]
if s['conductance'][0]:
g_post.extend(s['post'])
V_rev.extend(s['reverse'])
g_pre.extend(range(count, count+len(s['post'])))
count += len(s['post'])
if 'delay' in s:
max_del = np.max( s['delay'] )
gpot_delay_steps = max_del if max_del > gpot_delay_steps \
else gpot_delay_steps
else:
I_post.extend(s['post'])
I_pre.extend(range(count, count+len(s['post'])))
count += len(s['post'])
if 'delay' in s:
max_del = np.max( s['delay'] )
spike_delay_steps = max_del if max_del > spike_delay_steps \
else spike_delay_steps
self.total_synapses = int(np.sum(num_synapses))
# input is treated as current by default
I_post.extend(self.input_neuron_list)
I_pre.extend(range(self.total_synapses,
self.total_synapses + self.num_input))
g_post = np.asarray(g_post, dtype=np.int32)
g_pre = np.asarray(g_pre, dtype = np.int32)
V_rev = np.asarray(V_rev, dtype=np.double)
order1 = np.argsort(g_post, kind='mergesort')
g_post = g_post[order1]
g_pre = g_pre[order1]
V_rev = V_rev[order1]
I_post = np.asarray(I_post, dtype=np.int32)
I_pre = np.asarray(I_pre, dtype=np.int32)
order1 = np.argsort(I_post, kind='mergesort')
I_post = I_post[order1]
I_pre = I_pre[order1]
self.idx_start_gpot = np.concatenate(
(np.asarray([0,], dtype=np.int32),
np.cumsum(num_gpot_neurons, dtype=np.int32)))
self.idx_start_spike = np.concatenate(
(np.asarray([0,], dtype=np.int32),
np.cumsum(num_spike_neurons, dtype=np.int32)))
self.idx_start_synapse = np.concatenate(
(np.asarray([0,], dtype=np.int32),
np.cumsum(num_synapses, dtype=np.int32)))
for i, (t, n) in enumerate(self.n_list):
if n['spiking'][0]:
idx = np.where(
(cond_post >= self.idx_start_spike[i] + spike_shift)&
(cond_post < self.idx_start_spike[i+1] + spike_shift) )
n['g_post'] = g_post[idx] - self.idx_start_spike[i] - spike_shift
n['cond_pre'] = g_pre[idx]
n['reverse'] = V_rev[idx]
idx = np.where(
(I_post >= self.idx_start_spike[i] + spike_shift)&
(I_post < self.idx_start_spike[i+1] + spike_shift) )
n['I_post'] = I_post[idx] - self.idx_start_spike[i] - spike_shift
n['I_pre'] = I_pre[idx]
else:
idx = np.where( (g_post >= self.idx_start_gpot[i])&
(g_post < self.idx_start_gpot[i+1]) )
n['g_post'] = g_post[idx] - self.idx_start_gpot[i]
n['cond_pre'] = g_pre[idx]
n['reverse'] = V_rev[idx]
idx = np.where( (I_post >= self.idx_start_gpot[i])&
(I_post < self.idx_start_gpot[i+1]) )
n['I_post'] = I_post[idx] - self.idx_start_gpot[i]
n['I_pre'] = I_pre[idx]
n['num_dendrites_cond'] = Counter(n['g_post'])
n['num_dendrites_I'] = Counter(n['I_post'])
self.gpot_delay_steps = int(round(gpot_delay_steps*1e-3/self.dt)) + 1
self.spike_delay_steps = int(round(spike_delay_steps*1e-3/self.dt)) + 1
data_gpot = np.zeros(self.num_public_gpot + len(in_ports_ids_gpot),
np.double)
data_spike = np.zeros(self.num_public_spike + len(in_ports_ids_spk),
np.int32)
super(LPU, self).__init__(sel=sel, sel_in=sel_in, sel_out=sel_out,
sel_gpot=sel_gpot, sel_spike=sel_spk,
data_gpot=data_gpot, data_spike=data_spike,
columns=columns, ctrl_tag=ctrl_tag, gpot_tag=gpot_tag,
spike_tag=spike_tag, id=self.LPU_id,
rank_to_id=rank_to_id, routing_table=routing_table,
device=device, debug=debug, time_sync=time_sync)
self.sel_in_gpot_ids = np.array(self.pm['gpot'].ports_to_inds(self.sel_in_gpot),
dtype=np.int32)
self.sel_out_gpot_ids = np.array(self.pm['gpot'].ports_to_inds(self.sel_out_gpot),
dtype=np.int32)
self.sel_in_spk_ids = np.array(self.pm['spike'].ports_to_inds(self.sel_in_spk),
dtype=np.int32)
self.sel_out_spk_ids = np.array(self.pm['spike'].ports_to_inds(self.sel_out_spk),
dtype=np.int32)
def pre_run(self):
super(LPU, self).pre_run()
self._initialize_gpu_ds()
self._init_objects()
self.first_step = True
def post_run(self):
super(LPU, self).post_run()
if self.output:
if self.total_num_gpot_neurons > 0:
self.output_gpot_file.close()
if self.total_num_spike_neurons > 0:
self.output_spike_file.close()
if self.debug:
self.gpot_buffer_file.close()
if self.has_synapse:
self.synapse_state_file.close()
for neuron in self.neurons:
neuron.post_run()
for synapse in self.synapses:
synapse.post_run()
def run_step(self):
super(LPU, self).run_step()
self._read_LPU_input()
if self.input_file is not None:
self._read_external_input()
elif self.input_generator is not None:
self._get_external_input()
if not self.first_step:
for neuron in self.neurons:
if self.has_synapse:
neuron.update_internal_state(self.synapse_state.gpudata)
neuron.eval()
self._update_buffer()
for synapse in self.synapses:
synapse.update_state(self.buffer)
self.buffer.step()
else:
self.first_step = False
if self.debug:
if self.total_num_gpot_neurons > 0:
dataset_append(self.gpot_buffer_file['/array'],
self.buffer.gpot_buffer.get()
.reshape(1, self.gpot_delay_steps, -1))
if self.has_synapse:
dataset_append(self.synapse_state_file['/array'],
self.synapse_state.get().reshape(1, -1))
self._extract_output()
# Save output data to disk:
if self.output:
self._write_output()
def _init_objects(self):
self.neurons = [ self._instantiate_neuron(i, t, n)
for i, (t, n) in enumerate(self.n_list)
if t!=PORT_IN_GPOT and t!=PORT_IN_SPK]
self.synapses = [ self._instantiate_synapse(i, t, n)
for i, (t, n) in enumerate(self.s_list)
if t!='pass']
self.buffer = CircularArray(self.total_num_gpot_neurons,
self.gpot_delay_steps, self.V,
self.total_num_spike_neurons,
self.spike_delay_steps)
if self.input_file is not None:
self.input_h5file = h5py.File(self.input_file, 'r')
self.file_pointer = 0
self.I_ext = \
parray.to_gpu(self.input_h5file['/array'][self.file_pointer:
self.file_pointer+self._one_time_import])
self.file_pointer += self._one_time_import
self.frame_count = 0
self.frames_in_buffer = self._one_time_import
elif self.input_generator is not None:
self.I_ext = garray.zeros(self.num_input, np.double)
if self.output:
output_file = self.output_file.rsplit('.', 1)
filename = output_file[0]
if len(output_file) > 1:
ext = output_file[1]
else:
ext = 'h5'
if self.total_num_gpot_neurons > 0:
self.output_gpot_file = h5py.File(filename+'_gpot.' + ext, 'w')
self.output_gpot_file.create_dataset(
'/array',
(0, self.total_num_gpot_neurons),
dtype=np.float64,
maxshape=(None, self.total_num_gpot_neurons))
if self.total_num_spike_neurons > 0:
self.output_spike_file = h5py.File(filename+'_spike.'+ext, 'w')
self.output_spike_file.create_dataset(
'/array',
(0, self.total_num_spike_neurons),
dtype=np.float64,
maxshape=(None, self.total_num_spike_neurons))
if self.debug:
if self.total_num_gpot_neurons > 0:
self.gpot_buffer_file = h5py.File(self.id + '_buffer.h5', 'w')
self.gpot_buffer_file.create_dataset(
'/array',
(0, self.gpot_delay_steps, self.total_num_gpot_neurons),
dtype=np.float64,
maxshape=(None, self.gpot_delay_steps, self.total_num_gpot_neurons))
if self.has_synapse:
self.synapse_state_file = h5py.File(self.id + '_synapses.h5', 'w')
self.synapse_state_file.create_dataset(
'/array',
(0, self.total_synapses + len(self.input_neuron_list)),
dtype=np.float64,
maxshape=(None, self.total_synapses + self.num_input))
if self.input_generator is not None:
self.input_generator.generate_receptive_fields()
def _initialize_gpu_ds(self):
"""
Setup GPU arrays.
"""
# XXX how should a zero length vector be handled
if self.has_synapse:
self.synapse_state = garray.zeros(
self.total_synapses + self.num_input, np.double)
if self.total_num_gpot_neurons > 0:
self.V = garray.zeros(int(self.total_num_gpot_neurons), np.float64)
else:
self.V = None
if self.total_num_spike_neurons > 0:
self.spike_state = garray.zeros(int(self.total_num_spike_neurons),
np.int32)
else:
self.spike_state = None
self.block_extract = (256, 1, 1)
if len(self.out_ports_ids_gpot) > 0:
self.out_ports_ids_gpot_g = garray.to_gpu(self.out_ports_ids_gpot)
self.sel_out_gpot_ids_g = garray.to_gpu(self.sel_out_gpot_ids)
self._extract_gpot = self._extract_projection_gpot_func()
if len(self.out_ports_ids_spk) > 0:
self.out_ports_ids_spk_g = garray.to_gpu(
(self.out_ports_ids_spk - self.spike_shift).astype(np.int32))
self.sel_out_spk_ids_g = garray.to_gpu(self.sel_out_spk_ids)
self._extract_spike = self._extract_projection_spike_func()
if self.ports_in_gpot_mem_ind is not None:
inds = self.sel_in_gpot_ids
self.inds_gpot = garray.to_gpu(inds)
if self.ports_in_spk_mem_ind is not None:
inds = self.sel_in_spk_ids
self.inds_spike = garray.to_gpu(inds)
def _read_LPU_input(self):
"""
Put inputs from other LPUs to buffer.
"""
if self.ports_in_gpot_mem_ind is not None:
self.set_inds(self.pm['gpot'].data, self.V, self.inds_gpot,
self.idx_start_gpot[self.ports_in_gpot_mem_ind])
if self.ports_in_spk_mem_ind is not None:
self.set_inds(self.pm['spike'].data, self.spike_state,
self.inds_spike,
self.idx_start_spike[self.ports_in_spk_mem_ind])
def set_inds(self, src, dest, inds, dest_shift=0):
assert isinstance(dest_shift, numbers.Integral)
try:
func = self.set_inds.cache[(inds.dtype, dest_shift)]
except KeyError:
inds_ctype = dtype_to_ctype(inds.dtype)
data_ctype = dtype_to_ctype(src.dtype)
v = "{data_ctype} *dest, {inds_ctype} *inds, {data_ctype} *src"\
.format(data_ctype=data_ctype, inds_ctype=inds_ctype)
func = elementwise.ElementwiseKernel(v,
"dest[i+%i] = src[inds[i]]" % dest_shift)
self.set_inds.cache[(inds.dtype, dest_shift)] = func
func(dest, inds, src, range=slice(0, len(inds), 1))
set_inds.cache = {}
def _extract_output(self, st=None):
if len(self.out_ports_ids_gpot) > 0:
self._extract_gpot.prepared_async_call(
self.grid_extract_gpot,
self.block_extract, st, self.V.gpudata,
self.pm['gpot'].data.gpudata,
self.out_ports_ids_gpot_g.gpudata,
self.sel_out_gpot_ids_g.gpudata,
self.num_public_gpot)
if len(self.out_ports_ids_spk) > 0:
self._extract_spike.prepared_async_call(
self.grid_extract_spike,
self.block_extract, st, self.spike_state.gpudata,
self.pm['spike'].data.gpudata,
self.out_ports_ids_spk_g.gpudata,
self.sel_out_spk_ids_g.gpudata,
len(self.out_ports_ids_spk))
def _write_output(self):
"""
Save neuron states or spikes to output file.
The order is the same as the order of the assigned ids in gexf
"""
if self.total_num_gpot_neurons > 0:
dataset_append(self.output_gpot_file['/array'],
self.V.get()[self.gpot_order].reshape((1, -1)))
if self.total_num_spike_neurons > 0:
dataset_append(self.output_spike_file['/array'],
self.spike_state.get()[self.spike_order].reshape((1, -1)))
def _read_external_input(self):
# if eof not reached or there are unread frames in buffer (I_ext),
# copy the input from buffer to synapse state array
if not self.input_eof or self.frame_count < self.frames_in_buffer:
# copy to the end of synapse state array
# after the entries reserved for synapses
cuda.memcpy_dtod(
int(int(self.synapse_state.gpudata) +
self.total_synapses*self.synapse_state.dtype.itemsize),
int(int(self.I_ext.gpudata) +
self.frame_count*self.I_ext.ld*self.I_ext.dtype.itemsize),
self.num_input*self.synapse_state.dtype.itemsize)
self.frame_count += 1
else:
self.log_info('Input end of file reached. '
'Subsequent behaviour is undefined.')
# if all buffer(I_ext) frames were read, read from file
if self.frame_count >= self._one_time_import and not self.input_eof:
input_ld = self.input_h5file['/array'].shape[0]
if input_ld - self.file_pointer < self._one_time_import:
h_ext = self.input_h5file['/array'][self.file_pointer:input_ld]
else:
h_ext = self.input_h5file['/array'][self.file_pointer:
self.file_pointer+self._one_time_import]
if h_ext.shape[0] == self.I_ext.shape[0]:
self.I_ext.set(h_ext)
self.file_pointer += self._one_time_import
self.frame_count = 0
else:
pad_shape = list(h_ext.shape)
self.frames_in_buffer = h_ext.shape[0]
pad_shape[0] = self._one_time_import - h_ext.shape[0]
h_ext = np.concatenate((h_ext, np.zeros(pad_shape)), axis=0)
self.I_ext.set(h_ext)
self.file_pointer = input_ld
if self.file_pointer == self.input_h5file['/array'].shape[0]:
self.input_eof = True
def _get_external_input(self):
# use of intermediate I_ext can possibly be avoided
input_ext = self.input_generator.next_input()
if type(input_ext) == np.ndarray:
self.I_ext.set(input_ext)
cuda.memcpy_dtod(
int(int(self.synapse_state.gpudata) +
self.total_synapses*self.synapse_state.dtype.itemsize),
int(self.I_ext.gpudata),
self.num_input*self.synapse_state.dtype.itemsize)
else:
cuda.memcpy_dtod(
int(int(self.synapse_state.gpudata) +
self.total_synapses*self.synapse_state.dtype.itemsize),
int(input_ext.gpudata),
self.num_input*self.synapse_state.dtype.itemsize)
# TODO
def _update_buffer(self):
if self.total_num_gpot_neurons>0:
cuda.memcpy_dtod(int(self.buffer.gpot_buffer.gpudata) +
self.buffer.gpot_current*self.buffer.gpot_buffer.ld*
self.buffer.gpot_buffer.dtype.itemsize,
self.V.gpudata, self.V.nbytes)
if self.total_num_spike_neurons>0:
cuda.memcpy_dtod(int(self.buffer.spike_buffer.gpudata) +
self.buffer.spike_current*self.buffer.spike_buffer.ld*
self.buffer.spike_buffer.dtype.itemsize,
self.spike_state.gpudata,
int(self.spike_state.dtype.itemsize*self.total_num_spike_neurons))
# TODO
def _extract_projection_gpot_func(self):
self.grid_extract_gpot = (min(6 * cuda.Context.get_device().MULTIPROCESSOR_COUNT,
(self.num_public_gpot-1) / 256 + 1),
1)
return self._extract_projection_func(self.V)
#TODO
def _extract_projection_spike_func(self):
self.grid_extract_spike = (min(6 * cuda.Context.get_device().MULTIPROCESSOR_COUNT,
(self.num_public_spike-1) / 256 + 1),
1)
return self._extract_projection_func(self.spike_state)
def _extract_projection_func(self, state_var):
template = """
__global__ void extract_projection(%(type)s* all_V,
%(type)s* projection_V,
int* all_index,
int* projection_index, int N)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int total_threads = blockDim.x * gridDim.x;
int a_ind, p_ind;
for(int i = tid; i < N; i += total_threads)
{
a_ind = all_index[i];
p_ind = projection_index[i];
projection_V[p_ind] = all_V[a_ind];
}
}
"""
mod = SourceModule(
template % {"type": dtype_to_ctype(state_var.dtype)},
options=self.compile_options)
func = mod.get_function("extract_projection")
func.prepare('PPPPi')#[np.intp, np.intp, np.intp, np.intp, np.int32])
return func
# TODO
def _instantiate_neuron(self, i, t, n):
try:
ind = self._neuron_names.index(t)
except:
try:
ind = int(t)
except:
self.log_info("Problem instantiating neurons of model '{}'. "
"Model is probably not in loaded modules".format(t))
return None
if n['spiking'][0]:
neuron = self._neuron_classes[ind].initneuron(
n, int(int(self.spike_state.gpudata) +
self.spike_state.dtype.itemsize*self.idx_start_spike[i]),
self.dt, debug=self.debug, LPU_id=self.LPU_id)
else:
neuron = self._neuron_classes[ind].initneuron(
n, int(int(self.V.gpudata) +
self.V.dtype.itemsize*self.idx_start_gpot[i]),
self.dt, debug=self.debug, LPU_id=self.LPU_id)
return neuron
# TODO
def _instantiate_synapse(self, i, t, s):
try:
ind = self._synapse_names.index(t)
except:
try:
ind = int(t)
except:
self.log_info("Problem instantiating synapses of model '{}'."
"Model is probably not in loaded modules".format(t))
return None
return self._synapse_classes[ind](
s, int(int(self.synapse_state.gpudata) +
self.synapse_state.dtype.itemsize*self.idx_start_synapse[i]),
self.dt, debug=self.debug)
def _import_modules(self, modules):
# if modules contain subclasses of BaseNeuron or BaseSynapse
# they will be associated automatically
if modules is not None:
for module in modules:
importlib.import_module(module)
#TODO
def _load_neurons(self):
self._neuron_classes = baseneuron.BaseNeuron.__subclasses__()
self._neuron_names = [cls.__name__ for cls in self._neuron_classes]
#TODO
def _load_synapses(self):
self._synapse_classes = basesynapse.BaseSynapse.__subclasses__()
self._synapse_names = [cls.__name__ for cls in self._synapse_classes]
@property
def one_time_import(self):
return self._one_time_import
@one_time_import.setter
def one_time_import(self, value):
self._one_time_import = value
@property
def has_synapse(self):
return self.total_synapses + self.num_input > 0
def neuron_cmp(x, y):
if int(x[0]) < int(y[0]):
return -1
elif int(x[0]) > int(y[0]):
return 1
else:
return 0
def synapse_cmp(x, y):
if int(x[1]) < int(y[1]):
return -1
elif int(x[1]) > int(y[1]):
return 1
else:
return 0
class CircularArray:
"""
This class implements a circular buffer to support synapses with delays.
Please refer the documentation of the template synapse class on information
on how to access data correctly from this buffer
"""
def __init__(self, num_gpot_neurons, gpot_delay_steps,
rest, num_spike_neurons, spike_delay_steps):
self.num_gpot_neurons = num_gpot_neurons
if num_gpot_neurons > 0:
self.dtype = np.double
self.gpot_delay_steps = gpot_delay_steps
self.gpot_buffer = parray.empty(
(gpot_delay_steps, num_gpot_neurons), np.double)
self.gpot_current = 0
for i in range(gpot_delay_steps):
cuda.memcpy_dtod(
int(self.gpot_buffer.gpudata) +
self.gpot_buffer.ld * i * self.gpot_buffer.dtype.itemsize,
rest.gpudata, rest.nbytes)
self.num_spike_neurons = num_spike_neurons
if num_spike_neurons > 0:
self.spike_delay_steps = spike_delay_steps
self.spike_buffer = parray.zeros(
(spike_delay_steps, num_spike_neurons), np.int32)
self.spike_current = 0
def step(self):
if self.num_gpot_neurons > 0:
self.gpot_current += 1
if self.gpot_current >= self.gpot_delay_steps:
self.gpot_current = 0
if self.num_spike_neurons > 0:
self.spike_current += 1
if self.spike_current >= self.spike_delay_steps:
self.spike_current = 0
|
neurokernel/lamina
|
lamina/LPU.py
|
Python
|
bsd-3-clause
| 41,754
|
[
"NEURON"
] |
4e734831cc2243e1a380973d843278dff7d3bda9e72646f27ae23c0941a2538a
|
#!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''Density functional calculations can be run with either the default
backend library, libxc, or an alternative library, xcfun. See also
example 32-xcfun_as_default.py for how to set xcfun as the default XC
functional library.
'''
from pyscf import gto, dft
from pyscf.hessian import uks as uks_hess
from pyscf import tdscf
mol = gto.M(atom="H; F 1 1.", basis='631g')
# Calculation using libxc
mf = dft.UKS(mol)
mf.xc = 'CAMB3LYP'
mf.kernel()
mf.nuc_grad_method().kernel()
# We can also evaluate the geometric hessian
hess = uks_hess.Hessian(mf).kernel()
print(hess.reshape(2,3,2,3))
# or TDDFT gradients
tdks = tdscf.TDA(mf)
tdks.nstates = 3
tdks.kernel()
tdks.nuc_grad_method().kernel()
# Switch to the xcfun library on the fly
mf._numint.libxc = dft.xcfun
# Repeat the geometric hessian
hess = uks_hess.Hessian(mf).kernel()
print(hess.reshape(2,3,2,3))
# and the TDDFT gradient calculation
tdks = tdscf.TDA(mf)
tdks.nstates = 3
tdks.kernel()
tdks.nuc_grad_method().kernel()
|
sunqm/pyscf
|
examples/dft/12-camb3lyp.py
|
Python
|
apache-2.0
| 1,055
|
[
"PySCF"
] |
04fa7f1c4319c1c98481fcd899a546078cdf6cc53cc77ccd19f6137498f0d0f7
|
"""
Trace object types that are inserted into Python list.
"""
import ast
from clike import CLikeTranspiler
def decltype(node):
"""Create C++ decltype statement"""
if is_list(node):
return "std::vector<decltype({0})>".format(value_type(node))
else:
return "decltype({0})".format(value_type(node))
def is_builtin_import(name):
return name == "sys" or name == "math"
def is_list(node):
"""Check if a node was assigned as a list"""
if isinstance(node, ast.List):
return True
elif isinstance(node, ast.Assign):
return is_list(node.value)
elif isinstance(node, ast.Name):
var = node.scopes.find(node.id)
return (hasattr(var, "assigned_from") and not
isinstance(var.assigned_from, ast.FunctionDef) and
is_list(var.assigned_from.value))
else:
return False
def value_expr(node):
"""
Follow all assignments down the rabbit hole in order to find
the value expression of a name.
The boundary is set to the current scope.
"""
return ValueExpressionVisitor().visit(node)
def value_type(node):
"""
Guess the value type of a node based on the manipulations or assignments
in the current scope.
Special case: If node is a container like a list the value type inside the
list is returned not the list type itself.
"""
return ValueTypeVisitor().visit(node)
class ValueExpressionVisitor(ast.NodeVisitor):
def visit_Num(self, node):
return str(node.n)
def visit_Str(self, node):
return node.s
def visit_Name(self, node):
var = node.scopes.find(node.id)
if isinstance(var.assigned_from, ast.For):
it = var.assigned_from.iter
return "std::declval<typename decltype({0})::value_type>()".format(
self.visit(it))
elif isinstance(var.assigned_from, ast.FunctionDef):
return var.id
else:
return self.visit(var.assigned_from.value)
def visit_Call(self, node):
params = ",".join([self.visit(arg) for arg in node.args])
return "{0}({1})".format(node.func.id, params)
def visit_Assign(self, node):
return self.visit(node.value)
def visit_BinOp(self, node):
return "{0} {1} {2}".format(self.visit(node.left),
CLikeTranspiler().visit(node.op),
self.visit(node.right))
class ValueTypeVisitor(ast.NodeVisitor):
def visit_Num(self, node):
return value_expr(node)
def visit_Str(self, node):
return value_expr(node)
def visit_Name(self, node):
if node.id == 'True' or node.id == 'False':
return CLikeTranspiler().visit(node)
var = node.scopes.find(node.id)
if defined_before(var, node):
return node.id
else:
return self.visit(var.assigned_from.value)
def visit_Call(self, node):
params = ",".join([self.visit(arg) for arg in node.args])
return "{0}({1})".format(node.func.id, params)
def visit_Assign(self, node):
if isinstance(node.value, ast.List):
if len(node.value.elts) > 0:
val = node.value.elts[0]
return self.visit(val)
else:
target = node.targets[0]
var = node.scopes.find(target.id)
first_added_value = var.calls[0].args[0]
return value_expr(first_added_value)
else:
return self.visit(node.value)
def defined_before(node1, node2):
"""Check if node a has been defined before an other node b"""
return node1.lineno < node2.lineno
def is_list_assignment(node):
return (isinstance(node.value, ast.List) and
isinstance(node.targets[0].ctx, ast.Store))
def is_list_addition(node):
"""Check if operation is adding something to a list"""
list_operations = ["append", "extend", "insert"]
return (isinstance(node.func.ctx, ast.Load) and
hasattr(node.func, "value") and
isinstance(node.func.value, ast.Name) and
node.func.attr in list_operations)
def is_recursive(fun):
finder = RecursionFinder()
finder.visit(fun)
return finder.recursive
class RecursionFinder(ast.NodeVisitor):
function_name = None
recursive = False
def visit_FunctionDef(self, node):
self.function_name = node.name
self.generic_visit(node)
def visit_Call(self, node):
self.recursive = (isinstance(node.func, ast.Name) and
node.func.id == self.function_name)
self.generic_visit(node)
|
lukasmartinelli/py14
|
py14/tracer.py
|
Python
|
mit
| 4,694
|
[
"VisIt"
] |
22c63c744977f72bd22d73e5163f89ea62b9b1a5c54c754dec1341e3431015d3
|
#!/opt/local/bin/python2.7
#
# script to take a CSV list of filenames/NetID/name pairs and import to Evernote
#
# Usage: if mfst.csv is of the form
#
# filename,NetID,LastName__FirstNames
#
# then
#
# $ ./enimport.py [options] < mfst.csv
#
# will import into Evernote all the specified files
import hashlib
import binascii
import evernote.edam.userstore.constants as UserStoreConstants
import evernote.edam.type.ttypes as Types
from evernote.api.client import EvernoteClient
import argparse
import ConfigParser
import logging
import csv
import sys
import mimetypes
import dateutil.parser # sudo port -v install py27-dateutil
from dateutil.tz import *
from datetime import datetime, timedelta
import time
def toTimestamp(dt, epoch=datetime.fromtimestamp(0,tzutc())):
"""
convert a datetime object to a unix timestamp
See http://stackoverflow.com/a/8778548/297797
"""
logging.debug("epoch: %s",repr(epoch))
td = dt - epoch
# return td.total_seconds()
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 1e6
# Parse any conf_file specification
# We make this parser with add_help=False so that
# it doesn't parse -h and print help.
conf_parser = argparse.ArgumentParser(
description=__doc__, # printed with -h/--help
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
# Turn off help, so we print all options in response to -h
add_help=False
)
conf_parser.add_argument("-c", "--conf",
help="Specify config file (default: enimport.rc)", metavar="FILE",
default="enimport.rc")
args, remaining_argv = conf_parser.parse_known_args()
if args.conf:
config = ConfigParser.SafeConfigParser()
config.read([args.conf])
defaults = dict(config.items("defaults"))
else:
defaults = { }
# Parse rest of arguments
# Don't suppress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser]
)
# TODO: maybe move this next line down to make sure config file is processed AFTER option defaults?
parser.set_defaults(**defaults)
parser.add_argument('-d','--debug',
help='Print lots of debugging statements',
action="store_const",dest="loglevel",const=logging.DEBUG,
default=logging.WARNING
)
parser.add_argument('-v','--verbose',
help='Be verbose',
action="store_const",dest="loglevel",const=logging.INFO
)
parser.add_argument('--dry-run',
help='do not save any notes',
action='store_true',dest='dry_run')
parser.add_argument('--auth-token',
help='authentication token (visit https://sandbox.evernote.com/api/DeveloperToken.action)',
action='store',dest='auth_token')
parser.add_argument('--sandbox',
help='use the sandbox server',
action='store_true',dest='sandbox',
default=False)
parser.add_argument('-nb','--notebook',
help="Store note in this notebook",
action="store",dest="notebook",
)
parser.add_argument('--docname',
help="Name of the document",
# default="Untitled document",
action="store", dest="doc_name")
parser.add_argument('--docdate',
help="Date of the document (use ISO 8601 format)",
action="store",dest='doc_date')
parser.add_argument('--course',
help="Name of the course",
action="store", dest="course")
parser.add_argument('--term',
help='term name',
action='store',dest='term',
# default="Fall 2014" FIXME: defaults here clobber values from config file :-(
)
# TODO: tag names (multiple optional argument)
parser.add_argument('--tag',metavar='TAG',
help='Add tag to note (as many as you like)',
action='append',dest='tags')
parser.add_argument('csvfile', nargs='?',
help="CSV file from which to read (default: standard input)",
type=argparse.FileType('r'),default=sys.stdin)
args = parser.parse_args(remaining_argv)
logging.basicConfig(level=args.loglevel)
if not(args.auth_token):
logging.error("Please fill in your developer token. To get a developer token, visit https://sandbox.evernote.com/api/DeveloperToken.action")
exit(1)
client = EvernoteClient(token=args.auth_token, sandbox=args.sandbox)
user_store = client.get_user_store()
version_ok = user_store.checkVersion(
"Evernote EDAMTest (Python)",
UserStoreConstants.EDAM_VERSION_MAJOR,
UserStoreConstants.EDAM_VERSION_MINOR
)
if (version_ok):
logging.debug("Evernote API version up to date: %d",version_ok)
else:
logging.error("Evernote API version NOT up to date")
exit(1)
note_store = client.get_note_store()
## get the right notebook
if (args.notebook):
logging.debug("Searching for notebook named '%s'",args.notebook)
notebooks = note_store.listNotebooks()
found=False
for notebook in notebooks:
logging.debug("Notebook: name='%s' guid=%s", notebook.name, notebook.guid)
if (notebook.name == args.notebook):
logging.debug("match")
found=True
break
if (not(found)):
logging.error("Notebook named '%s' not found", args.notebook)
else:
logging.debug("Using default notebook")
notebook = note_store.getDefaultNotebook()
logging.info("Using Notebook '%s' with guid %s", notebook.name, notebook.guid)
for rec in csv.reader(args.csvfile):
logging.debug("rec: %s", repr(rec))
filename,student_netid,student_fname_rev = rec
# check if the first field is a valid file (it might be a header)
try:
file = open(filename, 'rb').read()
except IOError:
logging.info("Skipping %s as it does not seem to be a file",filename)
continue
student_lname,student_gnames=student_fname_rev.split('__')
student_gnames = student_gnames.replace('_',' ')
student_fname = "%s %s" % (student_gnames,student_lname)
logging.debug("student_fname: '%s'",student_fname)
student_tagname="student: %s; %s <%s@nyu.edu>" % (student_lname, student_gnames, student_netid)
logging.debug("student_tagname: '%s'",student_tagname)
# To create a new note, simply create a new Note object and fill in
# attributes such as the note's title.
note = Types.Note()
note.notebookGuid=notebook.guid
note.title = "%s for %s from %s" % (args.doc_name,student_fname,args.course)
note.tagNames=list(args.tags)
note.tagNames.append('student work')
note.tagNames.append(student_tagname)
if (args.term):
note.title += ", " + args.term
note.tagNames.append('term: ' + args.term)
note.tagNames.append('course: ' + args.course)
logging.info("note.title: '%s'", note.title)
logging.info("note.tags: %s",repr(note.tagNames))
## TODO: add some more note attributes
# created - exam date/time (Timestamp) # parse ISO 8601!
currentTime = time.time() * 1000
if (args.doc_date):
createdDate = dateutil.parser.parse(args.doc_date)
logging.debug("createdDate: %s",repr(createdDate))
createdTimestamp = toTimestamp(createdDate)
note.created = createdTimestamp * 1000
logging.info("note.created: %d",note.created)
else:
note.created=currentTime
logging.info("note.created: %d (now)",note.created)
# updated - now, obvs (Timestamp)
note.updated=currentTime
logging.info("note.updated: %d (now)",note.updated)
## TODO: add some more attributes with the NoteAttributes type
# https://dev.evernote.com/doc/reference/Types.html#Struct_NoteAttributes
# latitude
# longitude
# altitude
# author - student <email>
# source - progname
# placeName - "CIMS"? "Work"?
# To include an attachment such as an image in a note, first create a Resource
# for the attachment. At a minimum, the Resource contains the binary attachment
# data, an MD5 hash of the binary data, and the attachment MIME type.
# It can also include attributes such as filename and location.
md5 = hashlib.md5()
md5.update(file)
hash = md5.digest()
logging.debug("hash: %s", hash)
data = Types.Data()
data.size = len(file)
data.bodyHash = hash
data.body = file
resource = Types.Resource()
(resource.mime,encoding) = mimetypes.guess_type(filename)
logging.debug("resource.mime: %s",resource.mime)
resource.data = data
# adding a file name to the resource with a ResourceAttributes type.
resource_attributes=Types.ResourceAttributes()
resource_attributes.fileName=note.title + mimetypes.guess_extension(resource.mime)
resource.attributes=resource_attributes
# Now, add the new Resource to the note's list of resources
note.resources = [resource]
# To display the Resource as part of the note's content, include an <en-media>
# tag in the note's ENML content. The en-media tag identifies the corresponding
# Resource using the MD5 hash.
hash_hex = binascii.hexlify(hash)
# The content of an Evernote note is represented using Evernote Markup Language
# (ENML). The full ENML specification can be found in the Evernote API Overview
# at http://dev.evernote.com/documentation/cloud/chapters/ENML.php
note.content = '<?xml version="1.0" encoding="UTF-8"?>'
note.content += '<!DOCTYPE en-note SYSTEM ' \
'"http://xml.evernote.com/pub/enml2.dtd">'
note.content += '<en-note>'
note.content += '<en-media type="' + resource.mime + '" hash="' + hash_hex + '"/>'
note.content += '</en-note>'
# Finally, send the new note to Evernote using the createNote method
# The new Note object that is returned will contain server-generated
# attributes such as the new note's unique GUID.
if (args.dry_run):
logging.info("If this were not a dry run, would save a note here")
else:
logging.info("Adding note to note_store")
created_note = note_store.createNote(note)
logging.info("Successfully created a new note with GUID: %s", created_note.guid)
|
leingang/plg
|
bin/enimport.py
|
Python
|
gpl-3.0
| 9,381
|
[
"VisIt"
] |
0d28178ecc3e274b0df4f0ab6bfbeb86d2804bd546b28c027dd0d04bcd8ed1d5
|
'''
This is the implementation of the proof search algorithm. I'm pretty sure that
I know this well enough to just flat out implement it.
The tricky part is going to be merging of the threading and multiprocessing
libraries. I predict that this will be ugly.
We're going to make all of the interface calls properties of the threads.
I assume that the payouts range from 0 to 1, with proven nodes returning 1.0
'''
# we make these global variables because we want them to
# be accessible after forking.
global_interface = None
global_context = None
global_problem = None
global_using_threads = None
inf = float('inf')
import threading
import multiprocessing
import signal
from interface import *
import heapq
import time
import naive_tree_search_problem as tsp
import tree_parser
import traceback
import write_proof
import last_step
from IPython.display import clear_output
BEAM_SIZE = 10
VERBOSE = False
def printv(*x):
if VERBOSE:
print(x)
# value for UCT is calculated as:
# c.value/(c.visits + GAMMA * c.visiting_threads)
# + BETA * c.prob/(1.0 + c.visits)
# +ALPHA * np.sqrt(np.log(self.visits)/(1.0+c.visits))
# we want to try everything with p > 0.05 when starting with a value of 0.5
# so we should set BETA = 10
CHECK_TAUTOLOGIES = True
CHECK_LAST_STEP = True
APPLY_EASY_PROPS_FIRST = False # with this, all of the constrained propositions are applied immediately at the first (second) visit
REDUCED_TREE_VALUE = True # whether the prover uses the reduced-tree formalism (only considering the least-promising child)
HYP_BONUS = 3.0
ALPHA = 1.0
BETA = 0.5
GAMMA = 3.0 # penalty to currently considered paths
DELTA = 4.0 # the depth at which the value is halved
def valuation_function(child_value, child_visits, visits, child_prob, visiting_threads, fix_payout=None):
score = fix_payout if fix_payout is not None else child_value/(child_visits + GAMMA * visiting_threads)
return (score #* DELTA/(DELTA+np.log(child_visits))
+ BETA * child_prob/(1.0 + child_visits)
+ALPHA * np.sqrt(np.log(visits)/(1.0+child_visits)))
def depth_cost(value, depth):
# return value * DELTA / (DELTA + depth)
return value
def desired_children(num_visits):
return 0.01 + num_visits/6.0 # maybe this will be better
#return (1.0+num_visits) ** 0.75
''' some auxiliary functions for the printing of proof trees '''
# def print_tree(tree, instance):
# string = tree_parser.tree_to_string(tree, instance.language_model.database, instance.context)
# return ' '.join(string)
def print_pp(tree, depth):
string = tree_parser.tree_to_string(tree, global_problem.lm.database, global_context)
string = ' '.join(string)
#string = string.replace(" ", "") # so that the display fits on one line
return string
''' copies of the interface functions rewritten to
include the global variables '''
def global_get_payout(tree):
try:
return global_interface.get_payout(tree, global_context)
except:
print('ERROR IN GET PAYOUT')
print(tree)
print(('%s: %s' % ('test', traceback.format_exc())))
def global_apply_prop(tree, prop_name):
try:
return global_interface.apply_prop(tree, global_context, prop_name, n=BEAM_SIZE)
except:
print('ERROR IN APPLY PROP')
print(tree, prop_name)
print(('%s: %s' % ('test', traceback.format_exc())))
def global_props(tree):
try:
return global_interface.props(tree, global_context)
except:
print('ERROR IN PROPS')
print(tree)
print(('%s: %s' % ('test', traceback.format_exc())))
''' some stuff for multithreading.
I would have expected Pool to work with with. Maybe
I'm missing something?'''
def init_func():
signal.signal(signal.SIGINT, signal.SIG_IGN)
class withPool:
def __init__(self, procs):
self.p = multiprocessing.Pool(procs, init_func)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
#print 'closing pool'
self.p.close()
self.p.terminate() # I have no idea why the fuck this needs to be here, but otherwise everything has a 50% chance of breaking
#print 'requested close'
self.p.join()
#print 'finished join'
self.p = None
def apply(self, *args, **kwargs):
return self.p.apply(*args, **kwargs)
''' the threading stuff '''
class myThread (threading.Thread):
def __init__(self, name, problem, multi=False):
threading.Thread.__init__(self)
self.finished = False
self.name = name
self.multi = multi
self.problem = problem
if multi:
#self.p = multiprocessing.Pool(1,init_func)
#self.p = withPool(1)
# pool.apply(time.sleep, (10,))
# self.p.start()
pass
else:
self.p=None
''' these functions are all defined again to make the reference to the
thread's pool cleaner '''
def get_payout(self, tree):
#print ' '*0+str(self.name)+' starting payout'
out = self.p.apply(global_get_payout, (tree,)) if self.multi else global_get_payout(tree)
#print ' '*0+str(self.name)+' stopping payout'
return out
def apply_prop(self, tree, prop_name):
#print ' '*30+str(self.name)+' starting gen'
out = self.p.apply(global_apply_prop, (tree,prop_name)) if self.multi else global_apply_prop(tree, prop_name)
#print ' '*30+str(self.name)+' stopping gen'
return out
def props(self, tree):
#print ' '*60+str(self.name)+' starting prop'
out = self.p.apply(global_props, (tree,)) if self.multi else global_props(tree)
#print ' '*60+str(self.name)+' stopping prop'
return out
def run(self):
if global_problem.done():
#print 'Should not be running. Problem already done', self.name
return
# print "Starting " + self.name, time.time()
if self.multi:
#print "is multi, about to call pool " + self.name
#with multiprocessing.Pool(1,init_func) as self.p:
with withPool(1) as self.p:
#print 'Created process for'+self.name
while not self.problem.done():
#print 'stepping'+self.name
self.problem.visit()
#print 'end stepping'+self.name
#print
#print 'Terminating process for'+self.name
#print 'Terminated process for'+self.name
self.p = None
else:
while not self.problem.done():
self.problem.visit()
# print "Exiting " + self.name
self.finished = True
class TypeA:
''' a type A node is a tree.
It has children that are type B nodes'''
def __init__(self, tree, depth, proven=False,label=None):
self.dead = False
if proven:
# this Type A node has already been proven, probably because
# it was one of the original hypotheses
self.depth = depth
self.tree = tree
self.value = 1.0
self.visits = 1
self.children = []
self.initial_payout = 1.0
self.proven = True
self.label = label
self.is_hypothesis = True
# these should never be used
self.modification_lock = threading.Lock()
self.children_lock = threading.Lock()
return
self.is_hypothesis = False
self.label = None
self.depth = depth
self.tree = tree
if global_using_threads:
self.initial_payout = threading.current_thread().get_payout(self.tree)
else:
self.initial_payout = global_get_payout(self.tree)
self.initial_payout = depth_cost(self.initial_payout, self.depth)
self.value = self.initial_payout
self.visits = 1
self.modified_visits = 1
self.children = []
self.proven = False
self.modification_lock = threading.Lock()
self.children_lock = threading.Lock()
self.childless_visits = 0
# controlled by heap_lock
self.in_queue = 0 # the number of things from the heap that are being processed
''' self.heap stores the potential new propositions to apply. Entries
are of the form (-log probability, prop_label, tree or None) '''
self.heap_lock = threading.Lock()
self.heap = None
# do the tautology checking now.
if CHECK_TAUTOLOGIES and not CHECK_LAST_STEP:
taut = global_interface.is_tautology(self.tree, global_context)
if taut is None:
self.tautology = False
else:
self.tautology = True
# add the blue child immediately.
b = TypeB([], np.exp(0.0), self, taut)
self.children.append(b)
self.update_proven()
printv('added tautology:', taut," ", print_pp(self.tree, None) )
elif CHECK_LAST_STEP:
out = last_step.is_easy(self.tree, global_context, global_problem.lm)
if out is not None:
label, hyps = out
b = TypeB(hyps, np.exp(0.0), self, label)
self.children.append(b)
self.update_proven()
assert self.proven
printv('added last_step:', label," ", print_pp(self.tree, None) )
def update_proven(self):
if any(c.proven for c in self.children):
self.proven = True
# check whether we already knew about it
# global_problem.tsp.add(self)
# self.prune()
def prune(self):
# this prunes the tree down, remove unproven children
with self.children_lock:
for c in self.children:
if c.proven:
self.children = [c]
return
def create_child(self, child_params, parent_trees=None):
nlp, label, tree = child_params
lp = -nlp
# print self.heap
# print 'creating child from', child_params, 'avoiding', parent_trees
if tree is None:
#lptrees = threading.current_thread().apply_prop(self.tree, label)
if global_using_threads:
lptrees = threading.current_thread().apply_prop(self.tree, label)
else:
lptrees = global_apply_prop(self.tree, label)
#lptrees = [(x+lp, y) for x, y in lptrees]
else:
# we've already expanded this one
lptrees = [(lp, tree)]
child = None
while child is None and len(lptrees)>0:
assert len(lptrees)>0
lp_new, trees = lptrees.pop(0)
if not any(t in parent_trees for t in trees):
child = TypeB(trees, np.exp(lp), self, label)
else:
printv('FAILED TO CREATE CHILD: CIRCULARITY WHEN APPLYING', label, 'TO', print_pp(self.tree, None))
if child is None:
# We're still going to count this as a visit with value 0, just to discourage continued exploration
# around this node.
printv('FAILED TO CREATE CHILD: NO TREES WHEN APPLYING OR CIRCULAR', label, 'TO', print_pp(self.tree, None))
self.childless_visits += 1
# print 'child', [c.tree for c in child.children]
# else:
# print 'abandoned child', trees
# with self.children_lock:
# self.children.append(child)
if len(lptrees)>0:
with self.heap_lock:
# add the rest of the items back onto the heap
for lptree in lptrees:
this_lp, this_tree = lptree
if any(t in parent_trees for t in this_tree):
continue
this_lp = this_lp+lp-lp_new #
heapq.heappush(self.heap, (-1.0*this_lp, label, this_tree))
return child
def attempt_to_add_child(self, next_child, parent_trees):
child = self.create_child(next_child, parent_trees=parent_trees+[self.tree])
#print 'child', child
if child is not None:
with self.children_lock:
self.children.append(child)
with self.heap_lock:
self.in_queue -= 1
return (child.value, child.visits)
else:
#print 'Caught child but it was None'
with self.heap_lock:
self.in_queue -= 1
# TODO: it's possible that I should keep trying things until they work
return None
def apply_easy_props(self, parent_trees):
with self.heap_lock:
# this figures out all the propositions that are easy and adds them
# immediately. This will hopefully give us a performance boost. Maybe.
old_heap = self.heap
self.heap = []
children_to_add = []
for child_params in old_heap:
nlp, label, tree = child_params
if label in global_problem.lm.constrained_propositions:
children_to_add.append(child_params)
else:
heapq.heappush(self.heap, child_params)
self.in_queue += len(children_to_add)
self.modified_visits = len(children_to_add)
#print 'children to add: ', children_to_add
for next_child in children_to_add:
self.attempt_to_add_child(next_child, parent_trees)
self.update_proven()
self.update_value()
if self.proven:
break
def visit_next_child(self, parent_trees):
'''
let's try something different: keep track of how many
children we want to have as a function of the number of visits.
'''
#if desired_children(self.visits) > len(self.children) and len(self.heap) > 0:
with self.children_lock:
self.remove_dead_children()
#min_child_visits = min(c.visits for c in self.children) if len(self.children)>0 else 1000
if APPLY_EASY_PROPS_FIRST and self.visits == 1:
self.apply_easy_props(parent_trees)
if len(self.children) > 0:
return True
with self.heap_lock:
#if min_child_visits > 1 and len(self.heap) > 0:
if (desired_children(self.visits) > len(self.children)+self.childless_visits or len(self.children)==0) and len(self.heap) > 0:
# pull a new child from the heap
next_child = heapq.heappop(self.heap)
self.in_queue += 1
has_child = True
else:
has_child = False
# if we managed to catch a child:
if has_child:
return self.attempt_to_add_child(next_child, parent_trees)
if len(self.children)>0:
old_scores = np.array(
[valuation_function(c.value, c.visits, self.visits, c.prob, c.visiting_threads)
for c in self.children])
best_old_score_index = np.argmax(old_scores)
next_child = self.children[best_old_score_index]
best_old_score = old_scores[best_old_score_index]
exists_children = True
return next_child.visit(parent_trees+[self.tree])
else:
self.check_death()
printv('NODE HAS NO CHILDREN, DEAD?', self.dead)
return None
def check_death(self):
# is the thing really really dead?
with self.children_lock:
with self.heap_lock:
if len(self.heap) == 0 and all(c.dead for c in self.children) and self.in_queue == 0 and not self.proven:
self.dead = True
def remove_dead_children(self):
# this should be in the children_lock
for c in self.children:
if c.dead:
self.children.remove(c)
def can_be_visited(self):
# checks a bunch of things to determine whether this can be visited
# mostly this avoids visiting nodes where the only child is being considered
if self.dead: return False
if self.proven: return False
if self.heap is None: return True # hasn't been visited twice
if len(self.heap) == 0 and len(self.children) == 0: return False
return True
def get_props(self):
# lists all the propositions, and sorts them into a heap
if global_using_threads:
labels, log_probs = threading.current_thread().props(self.tree)
else:
labels, log_probs = global_props(self.tree)
log_probs -= np.max(log_probs)
# print log_probs
self.heap = []
for l, p in zip(labels, log_probs):
heapq.heappush(self.heap, (-1.0*p, l, None) )
def visit(self, tree_stack=[]):
#print 'visiting node with', self.tree, self.proven
# if we haven't expanded yet, do so.
with self.heap_lock:
if self.heap is None:
self.get_props()
# figure out what child to visit via UCT. Possibly expand one
# of the children
out = self.visit_next_child(tree_stack)
# update my parameters based off of the the returned value
with self.modification_lock:
self.update_value()
# if out is not None:
# self.value += out[0]
# self.visits += out[1]
#else:
#print 'failed to create child blue node'
self.update_proven()
def update_value(self):
self.value = self.initial_payout + sum(c.value for c in self.children)
self.visits = 1 + sum(c.visits for c in self.children)+self.childless_visits
self.modified_visits = self.visits
def print_proof(self, prefix, depth):
if len(self.children)==0:
if self.proven:
#print '{1:6.2f}% {0:4.2f} {2:4.2f} {3:4} '.format(uct_score, self.prob*100.0, self.value/(self.visits+0.00001), self.visits)
#print ' '*(8+10) + '{1:4.2f} ! {0:9}'.format('HYP', self.initial_payout)+prefix+str(depth)+' '+print_pp(self.tree, depth)
print(' '*(8+5) + '{1:4.2f} 1 ! {0:9}'.format('HYP', self.initial_payout)+prefix+str(depth)+' '+print_pp(self.tree, depth))
else:
#print ' '*(8+10) + '{1:4.2f} {0:9}'.format('????', self.initial_payout)+prefix+str(depth)+' '+print_pp(self.tree, depth)
print(' '*(8+5) + '{1:4.2f} 1 {0:9}'.format('????', self.initial_payout)+prefix+str(depth)+' '+print_pp(self.tree, depth))
return
#sorted_children = sorted(self.children)
#print self.children
unsorted = [(c.visits + c.value/c.visits, c) for c in self.children]
unsorted.sort()
unsorted.reverse()
_, sorted_children = list(zip(*unsorted))
# sorted_children = self.children
sorted_children[0].print_proof(prefix, depth)
for c in sorted_children[1:]:
string = ''
print(' '*(8+10) +' {0:9}'.format('')+prefix+'or')
c.print_proof(prefix, depth)
def generate_mm_format_proof(self):
if self.label is not None:
return [self.label] # this is a hypothesis
xlist = [x for x in self.children if x.proven]
assert len(xlist)>0
x = xlist[0]
assert x.proven
return x.generate_mm_format_proof()
class TypeB:
''' a type B is the application of a proposition to a tree.
It has children that are type A nodes'''
def __init__(self, child_trees, prob, parent, label):
self.parent = parent
self.label = label
self.proven = False
# some locks
self.modification_lock = threading.Lock()
self.visits_lock = threading.Lock()
self.prob = prob
self.children = [self.create_child(t) for t in child_trees]
self.visiting_threads = 0 # this adjusts the value for UCT
self.value = 1.0
self.visits = 1
self.dead = False
with self.modification_lock:
self.update_proven()
updated_value = self.update_value()
def check_death(self):
# really really dead.
#with self.modification_lock:
if any(c.dead for c in self.children):
self.dead = True
def create_child(self, tree):
# check if the child has already been proven.
child = global_problem.tsp.search(tree)
if child is None: child = TypeA(tree, self.parent.depth+1)
return child
def update_value(self):
if REDUCED_TREE_VALUE:
self.update_value_reduced_tree()
else:
self.update_value_full_tree()
def update_value_reduced_tree(self):
# lock the values and then update them
self.update_proven()
self.check_death()
if self.proven or self.dead:
return None
# checks whether the child node with dominent value has changed
# and if so, potentially propagates things up
unproven_children = [c for c in self.children if c.can_be_visited()]
child_values = [c.value/c.visits for c in unproven_children]
if len(child_values) == 0: return None
best_child = unproven_children[np.argmin(child_values)]
#child_values = [c.value/c.visits for c in self.children]
#best_child = self.children[np.argmin(child_values)]
# check whether any children are proven
proven_children = len([c for c in self.children if c.proven])
bonus_value = proven_children * HYP_BONUS
#if proven_children > 0: print 'PROVEN CHILDREN BONUS', bonus_value
# calculate the changes from the current condition
delta_visits = best_child.visits-self.visits
delta_value = best_child.value + bonus_value -self.value
self.value = best_child.value + bonus_value
self.visits = best_child.visits
return (delta_value, delta_visits)
def update_value_full_tree(self):
self.update_proven()
self.check_death()
if self.proven or self.dead:
return None
if len(self.children) == 0:
self.value = 1.0
self.visits = 1
else:
self.visits = sum(c.visits for c in self.children)
self.value = sum(c.value for c in self.children)
def visit(self, parent_trees):
# always visit the child with the lowest *true* value
child_values = [c.value/c.visits for c in self.children if c.can_be_visited()]
uproven_children = [c for c in self.children if not c.proven]
if len(child_values) > 0:
# actually the worst child. *that* was an annoying bug.
best_child = uproven_children[np.argmin(child_values)]
with self.visits_lock:
self.visiting_threads += 1
# visit the child
best_child.visit(parent_trees)
with self.visits_lock:
self.visiting_threads -= 1
# updates the current node and returns the updated value so that
# we can propagate up.
with self.modification_lock:
self.update_proven()
updated_value = self.update_value()
return updated_value
def update_proven(self):
if all(c.proven for c in self.children):
self.proven = True
def print_proof(self, prefix, depth):
if self.proven:
uct_score = 9.99
else:
uct_score =valuation_function(self.value, self.visits, self.parent.visits, self.prob, 0)
string = '{1:6.2f}% {0:4.2f} {2:4.2f} {3:4} '.format(self.value/(self.visits+0.00001), self.prob*100.0, self.parent.initial_payout, self.visits)
if self.proven:
string += '!'
else:
string += ' '
if global_problem.lm.database.propositions[self.label].unconstrained_arity() > 0:
string += '*'
else:
string +=' '
print(string+'{0:9}'.format(self.label[:9])+prefix+str(depth)+' '+print_pp(self.parent.tree, global_context))
for c in self.children:
c.print_proof(prefix + '| ', depth+1)
def generate_mm_format_proof(self):
prop = global_problem.lm.database.propositions[self.label]
child_trees = [c.tree for c in self.children]
fit = global_problem.lm.reconstruct_fit(self.parent.tree, child_trees, self.label)
assert fit is not None # this worked the first time
out = []
next_out = 0
for h in prop.hyps:
if h.type == 'e':
x = self.children[next_out]
next_out+=1
out += x.generate_mm_format_proof()
else:
var = h.label
assert var in fit # fit should have all mandatory variables
out+=fit[var].right_list()
out.append(self.label)
return out
class ProofSearcher:
def __init__(self, prop, lm, tree=None, directory='searcher', timeout=None):
# timeout is in minutes
self.start_time = time.time()
self.timeout = timeout
self.lm = lm
self.directory = directory
# the number of passes
self.passes = 0
self.max_passes = None
self.pass_lock = threading.Lock()
# set up the threading lock for printing
self.print_lock = threading.Lock()
self.last_print_time = time.time()
self.print_frequency = 10.0
# set up the globals
global global_interface
global global_context
global global_problem
global global_using_threads
global_using_threads = False
if global_interface is None:
print(global_interface)
global_interface = ProofInterface(lm, directory=directory)
global_context = lm.standardize_context(prop)
global_problem = self
self.context = global_context
# define the tree in terms of the context
if tree is None: tree=global_context.tree
global_context.tree = tree
global_interface.initialize_payout(global_context)
# build the search database
self.tsp = tsp.ExactSearchProblem()
for hyp in global_context.hyps:
if hyp.type == 'f':
continue
node = TypeA(hyp.tree, None, proven=True, label=hyp.label)
self.tsp.add(node)
if hyp.tree == tree:
# Oh, look. We're already done.
self.root = node
# build the root node
self.root = TypeA(tree, 0)
def run(self, passes, multi=False, threads=None, print_output = True, clear_output=True):
self.print_output = print_output
self.clear_output = clear_output
global global_using_threads
global_using_threads = not (threads is None)
# set the ending condition
self.max_passes = self.passes + passes
# start by printing the current tree
self.print_proof(force=True)
if global_using_threads:
#threaded
# build the threads
self.threads = []
for i in range(threads):
t = myThread(i, self, multi=multi)
t.start()
self.threads.append(t)
#print 'threads:', len(self.threads), self.threads,
# now wait for the threads to finish
for t in self.threads:
#print 'joined', t.name
t.join()
else:
# unthreaded
while not self.done():
self.visit()
self.print_proof(force=True)
def print_proof(self, force=False):
# skip this if something is already printing
if time.time()-self.last_print_time < self.print_frequency and not force:
return
if self.print_lock.locked() and not force:
return
with self.print_lock:
# iPython only
if self.clear_output:
clear_output()
if self.root.proven:
print('PROVEN')
self.last_print_time = time.time()
if self.print_output:
print('Current proof after {0} / {1} passes'.format(self.passes, self.max_passes))
self.root.print_proof('', 0)
def visit(self):
with self.pass_lock:
self.passes += 1
self.root.visit()
self.print_proof()
def done(self):
elapsed_time = time.time()-self.start_time
if self.timeout is not None and elapsed_time > self.timeout * 60:
print('search ended: reached timeout of {0} minutes'.format(self.timeout))
return True
return (self.passes >= self.max_passes) or self.root.proven or self.root.dead
def proven(self):
return self.root.proven
def generate_mm_format_proof(self):
self.root.prune()
string = self.root.generate_mm_format_proof()
#print 'string', string
# now we substitute the variable constructors back in.
dereplace = {v: k for k, v in self.context.replacement_dict.items() if k in self.context.mandatory}
#print dereplace
string = [label if label not in dereplace else dereplace[label]
for label in string]
string = ' '.join(string)
print(string)
return string
def write(self):
# writes to the modified set.mm file.
write_proof.write({global_context.label:self.generate_mm_format_proof()})
def proof_object(self):
assert self.proven()
out = write_proof.Proof(self.context.label, self.generate_mm_format_proof(), self.passes)
# write the proof. Why not?
out.save(directory=self.directory)
return out
|
dwhalen/holophrasm
|
proof_search.py
|
Python
|
mit
| 30,034
|
[
"VisIt"
] |
2f12981de1142826410ee429f3f66ba8160bd81448ac8b4b1deb7a2a7beaaee8
|
#
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy.misc.doccer import (extend_notes_in_docstring,
replace_notes_in_docstring)
from scipy import optimize
from scipy import integrate
import scipy.special as sc
from scipy._lib._numpy_compat import broadcast_to
from . import _stats
from ._tukeylambda_stats import (tukeylambda_variance as _tlvar,
tukeylambda_kurtosis as _tlkurt)
from ._distn_infrastructure import (get_distribution_names, _kurtosis,
_lazyselect, _lazywhere, _ncx2_cdf,
_ncx2_log_pdf, _ncx2_pdf,
rv_continuous, _skew, valarray)
from ._constants import _XMIN, _EULER, _ZETA3, _XMAX, _LOGXMAX
# In numpy 1.12 and above, np.power refuses to raise integers to negative
# powers, and `np.float_power` is a new replacement.
try:
float_power = np.float_power
except AttributeError:
float_power = np.power
## Kolmogorov-Smirnov one-sided and two-sided test statistics
class ksone_gen(rv_continuous):
"""General Kolmogorov-Smirnov one-sided test.
%(default)s
"""
def _cdf(self, x, n):
return 1.0 - sc.smirnov(n, x)
def _ppf(self, q, n):
return sc.smirnovi(n, 1.0 - q)
ksone = ksone_gen(a=0.0, name='ksone')
class kstwobign_gen(rv_continuous):
"""Kolmogorov-Smirnov two-sided test for large N.
%(default)s
"""
def _cdf(self, x):
return 1.0 - sc.kolmogorov(x)
def _sf(self, x):
return sc.kolmogorov(x)
def _ppf(self, q):
return sc.kolmogi(1.0 - q)
kstwobign = kstwobign_gen(a=0.0, name='kstwobign')
## Normal distribution
# loc = mu, scale = std
# Keep these implementations out of the class definition so they can be reused
# by other distributions.
_norm_pdf_C = np.sqrt(2*np.pi)
_norm_pdf_logC = np.log(_norm_pdf_C)
def _norm_pdf(x):
return np.exp(-x**2/2.0) / _norm_pdf_C
def _norm_logpdf(x):
return -x**2 / 2.0 - _norm_pdf_logC
def _norm_cdf(x):
return sc.ndtr(x)
def _norm_logcdf(x):
return sc.log_ndtr(x)
def _norm_ppf(q):
return sc.ndtri(q)
def _norm_sf(x):
return _norm_cdf(-x)
def _norm_logsf(x):
return _norm_logcdf(-x)
def _norm_isf(q):
return -_norm_ppf(q)
class norm_gen(rv_continuous):
r"""A normal continuous random variable.
The location (loc) keyword specifies the mean.
The scale (scale) keyword specifies the standard deviation.
%(before_notes)s
Notes
-----
The probability density function for `norm` is:
.. math::
f(x) = \frac{\exp(-x^2/2)}{\sqrt{2\pi}}
The survival function, ``norm.sf``, is also referred to as the
Q-function in some contexts (see, e.g.,
`Wikipedia's <https://en.wikipedia.org/wiki/Q-function>`_ definition).
%(after_notes)s
%(example)s
"""
def _rvs(self):
return self._random_state.standard_normal(self._size)
def _pdf(self, x):
# norm.pdf(x) = exp(-x**2/2)/sqrt(2*pi)
return _norm_pdf(x)
def _logpdf(self, x):
return _norm_logpdf(x)
def _cdf(self, x):
return _norm_cdf(x)
def _logcdf(self, x):
return _norm_logcdf(x)
def _sf(self, x):
return _norm_sf(x)
def _logsf(self, x):
return _norm_logsf(x)
def _ppf(self, q):
return _norm_ppf(q)
def _isf(self, q):
return _norm_isf(q)
def _stats(self):
return 0.0, 1.0, 0.0, 0.0
def _entropy(self):
return 0.5*(np.log(2*np.pi)+1)
@replace_notes_in_docstring(rv_continuous, notes="""\
This function uses explicit formulas for the maximum likelihood
estimation of the normal distribution parameters, so the
`optimizer` argument is ignored.\n\n""")
def fit(self, data, **kwds):
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
# Without this check, this function would just return the
# parameters that were given.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
if floc is None:
loc = data.mean()
else:
loc = floc
if fscale is None:
scale = np.sqrt(((data - loc)**2).mean())
else:
scale = fscale
return loc, scale
norm = norm_gen(name='norm')
class alpha_gen(rv_continuous):
r"""An alpha continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `alpha` is:
.. math::
f(x, a) = \frac{1}{x^2 \Phi(a) \sqrt{2\pi}} *
\exp(-\frac{1}{2} (a-1/x)^2)
where ``Phi(alpha)`` is the normal CDF, ``x > 0``, and ``a > 0``.
`alpha` takes ``a`` as a shape parameter.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, a):
# alpha.pdf(x, a) = 1/(x**2*Phi(a)*sqrt(2*pi)) * exp(-1/2 * (a-1/x)**2)
return 1.0/(x**2)/_norm_cdf(a)*_norm_pdf(a-1.0/x)
def _logpdf(self, x, a):
return -2*np.log(x) + _norm_logpdf(a-1.0/x) - np.log(_norm_cdf(a))
def _cdf(self, x, a):
return _norm_cdf(a-1.0/x) / _norm_cdf(a)
def _ppf(self, q, a):
return 1.0/np.asarray(a-sc.ndtri(q*_norm_cdf(a)))
def _stats(self, a):
return [np.inf]*2 + [np.nan]*2
alpha = alpha_gen(a=0.0, name='alpha')
class anglit_gen(rv_continuous):
r"""An anglit continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `anglit` is:
.. math::
f(x) = \sin(2x + \pi/2) = \cos(2x)
for :math:`-\pi/4 \le x \le \pi/4`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# anglit.pdf(x) = sin(2*x + \pi/2) = cos(2*x)
return np.cos(2*x)
def _cdf(self, x):
return np.sin(x+np.pi/4)**2.0
def _ppf(self, q):
return np.arcsin(np.sqrt(q))-np.pi/4
def _stats(self):
return 0.0, np.pi*np.pi/16-0.5, 0.0, -2*(np.pi**4 - 96)/(np.pi*np.pi-8)**2
def _entropy(self):
return 1-np.log(2)
anglit = anglit_gen(a=-np.pi/4, b=np.pi/4, name='anglit')
class arcsine_gen(rv_continuous):
r"""An arcsine continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `arcsine` is:
.. math::
f(x) = \frac{1}{\pi \sqrt{x (1-x)}}
for :math:`0 \le x \le 1`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# arcsine.pdf(x) = 1/(pi*sqrt(x*(1-x)))
return 1.0/np.pi/np.sqrt(x*(1-x))
def _cdf(self, x):
return 2.0/np.pi*np.arcsin(np.sqrt(x))
def _ppf(self, q):
return np.sin(np.pi/2.0*q)**2.0
def _stats(self):
mu = 0.5
mu2 = 1.0/8
g1 = 0
g2 = -3.0/2.0
return mu, mu2, g1, g2
def _entropy(self):
return -0.24156447527049044468
arcsine = arcsine_gen(a=0.0, b=1.0, name='arcsine')
class FitDataError(ValueError):
# This exception is raised by, for example, beta_gen.fit when both floc
# and fscale are fixed and there are values in the data not in the open
# interval (floc, floc+fscale).
def __init__(self, distr, lower, upper):
self.args = (
"Invalid values in `data`. Maximum likelihood "
"estimation with {distr!r} requires that {lower!r} < x "
"< {upper!r} for each x in `data`.".format(
distr=distr, lower=lower, upper=upper),
)
class FitSolverError(RuntimeError):
# This exception is raised by, for example, beta_gen.fit when
# optimize.fsolve returns with ier != 1.
def __init__(self, mesg):
emsg = "Solver for the MLE equations failed to converge: "
emsg += mesg.replace('\n', '')
self.args = (emsg,)
def _beta_mle_a(a, b, n, s1):
# The zeros of this function give the MLE for `a`, with
# `b`, `n` and `s1` given. `s1` is the sum of the logs of
# the data. `n` is the number of data points.
psiab = sc.psi(a + b)
func = s1 - n * (-psiab + sc.psi(a))
return func
def _beta_mle_ab(theta, n, s1, s2):
# Zeros of this function are critical points of
# the maximum likelihood function. Solving this system
# for theta (which contains a and b) gives the MLE for a and b
# given `n`, `s1` and `s2`. `s1` is the sum of the logs of the data,
# and `s2` is the sum of the logs of 1 - data. `n` is the number
# of data points.
a, b = theta
psiab = sc.psi(a + b)
func = [s1 - n * (-psiab + sc.psi(a)),
s2 - n * (-psiab + sc.psi(b))]
return func
class beta_gen(rv_continuous):
r"""A beta continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `beta` is:
.. math::
f(x, a, b) = \frac{\gamma(a+b) x^{a-1} (1-x)^{b-1}}
{\gamma(a) \gamma(b)}
for :math:`0 < x < 1`, :math:`a > 0`, :math:`b > 0`, where
:math:`\gamma(z)` is the gamma function (`scipy.special.gamma`).
`beta` takes :math:`a` and :math:`b` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, a, b):
return self._random_state.beta(a, b, self._size)
def _pdf(self, x, a, b):
# gamma(a+b) * x**(a-1) * (1-x)**(b-1)
# beta.pdf(x, a, b) = ------------------------------------
# gamma(a)*gamma(b)
return np.exp(self._logpdf(x, a, b))
def _logpdf(self, x, a, b):
lPx = sc.xlog1py(b - 1.0, -x) + sc.xlogy(a - 1.0, x)
lPx -= sc.betaln(a, b)
return lPx
def _cdf(self, x, a, b):
return sc.btdtr(a, b, x)
def _ppf(self, q, a, b):
return sc.btdtri(a, b, q)
def _stats(self, a, b):
mn = a*1.0 / (a + b)
var = (a*b*1.0)/(a+b+1.0)/(a+b)**2.0
g1 = 2.0*(b-a)*np.sqrt((1.0+a+b)/(a*b)) / (2+a+b)
g2 = 6.0*(a**3 + a**2*(1-2*b) + b**2*(1+b) - 2*a*b*(2+b))
g2 /= a*b*(a+b+2)*(a+b+3)
return mn, var, g1, g2
def _fitstart(self, data):
g1 = _skew(data)
g2 = _kurtosis(data)
def func(x):
a, b = x
sk = 2*(b-a)*np.sqrt(a + b + 1) / (a + b + 2) / np.sqrt(a*b)
ku = a**3 - a**2*(2*b-1) + b**2*(b+1) - 2*a*b*(b+2)
ku /= a*b*(a+b+2)*(a+b+3)
ku *= 6
return [sk-g1, ku-g2]
a, b = optimize.fsolve(func, (1.0, 1.0))
return super(beta_gen, self)._fitstart(data, args=(a, b))
@extend_notes_in_docstring(rv_continuous, notes="""\
In the special case where both `floc` and `fscale` are given, a
`ValueError` is raised if any value `x` in `data` does not satisfy
`floc < x < floc + fscale`.\n\n""")
def fit(self, data, *args, **kwds):
# Override rv_continuous.fit, so we can more efficiently handle the
# case where floc and fscale are given.
f0 = (kwds.get('f0', None) or kwds.get('fa', None) or
kwds.get('fix_a', None))
f1 = (kwds.get('f1', None) or kwds.get('fb', None) or
kwds.get('fix_b', None))
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is None or fscale is None:
# do general fit
return super(beta_gen, self).fit(data, *args, **kwds)
if f0 is not None and f1 is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
# Special case: loc and scale are constrained, so we are fitting
# just the shape parameters. This can be done much more efficiently
# than the method used in `rv_continuous.fit`. (See the subsection
# "Two unknown parameters" in the section "Maximum likelihood" of
# the Wikipedia article on the Beta distribution for the formulas.)
# Normalize the data to the interval [0, 1].
data = (np.ravel(data) - floc) / fscale
if np.any(data <= 0) or np.any(data >= 1):
raise FitDataError("beta", lower=floc, upper=floc + fscale)
xbar = data.mean()
if f0 is not None or f1 is not None:
# One of the shape parameters is fixed.
if f0 is not None:
# The shape parameter a is fixed, so swap the parameters
# and flip the data. We always solve for `a`. The result
# will be swapped back before returning.
b = f0
data = 1 - data
xbar = 1 - xbar
else:
b = f1
# Initial guess for a. Use the formula for the mean of the beta
# distribution, E[x] = a / (a + b), to generate a reasonable
# starting point based on the mean of the data and the given
# value of b.
a = b * xbar / (1 - xbar)
# Compute the MLE for `a` by solving _beta_mle_a.
theta, info, ier, mesg = optimize.fsolve(
_beta_mle_a, a,
args=(b, len(data), np.log(data).sum()),
full_output=True
)
if ier != 1:
raise FitSolverError(mesg=mesg)
a = theta[0]
if f0 is not None:
# The shape parameter a was fixed, so swap back the
# parameters.
a, b = b, a
else:
# Neither of the shape parameters is fixed.
# s1 and s2 are used in the extra arguments passed to _beta_mle_ab
# by optimize.fsolve.
s1 = np.log(data).sum()
s2 = sc.log1p(-data).sum()
# Use the "method of moments" to estimate the initial
# guess for a and b.
fac = xbar * (1 - xbar) / data.var(ddof=0) - 1
a = xbar * fac
b = (1 - xbar) * fac
# Compute the MLE for a and b by solving _beta_mle_ab.
theta, info, ier, mesg = optimize.fsolve(
_beta_mle_ab, [a, b],
args=(len(data), s1, s2),
full_output=True
)
if ier != 1:
raise FitSolverError(mesg=mesg)
a, b = theta
return a, b, floc, fscale
beta = beta_gen(a=0.0, b=1.0, name='beta')
class betaprime_gen(rv_continuous):
r"""A beta prime continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `betaprime` is:
.. math::
f(x, a, b) = \frac{x^{a-1} (1+x)^{-a-b}}{\beta(a, b)}
for ``x > 0``, ``a > 0``, ``b > 0``, where ``beta(a, b)`` is the beta
function (see `scipy.special.beta`).
`betaprime` takes ``a`` and ``b`` as shape parameters.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, a, b):
sz, rndm = self._size, self._random_state
u1 = gamma.rvs(a, size=sz, random_state=rndm)
u2 = gamma.rvs(b, size=sz, random_state=rndm)
return u1 / u2
def _pdf(self, x, a, b):
# betaprime.pdf(x, a, b) = x**(a-1) * (1+x)**(-a-b) / beta(a, b)
return np.exp(self._logpdf(x, a, b))
def _logpdf(self, x, a, b):
return sc.xlogy(a - 1.0, x) - sc.xlog1py(a + b, x) - sc.betaln(a, b)
def _cdf(self, x, a, b):
return sc.betainc(a, b, x/(1.+x))
def _munp(self, n, a, b):
if n == 1.0:
return np.where(b > 1,
a/(b-1.0),
np.inf)
elif n == 2.0:
return np.where(b > 2,
a*(a+1.0)/((b-2.0)*(b-1.0)),
np.inf)
elif n == 3.0:
return np.where(b > 3,
a*(a+1.0)*(a+2.0)/((b-3.0)*(b-2.0)*(b-1.0)),
np.inf)
elif n == 4.0:
return np.where(b > 4,
(a*(a + 1.0)*(a + 2.0)*(a + 3.0) /
((b - 4.0)*(b - 3.0)*(b - 2.0)*(b - 1.0))),
np.inf)
else:
raise NotImplementedError
betaprime = betaprime_gen(a=0.0, name='betaprime')
class bradford_gen(rv_continuous):
r"""A Bradford continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `bradford` is:
.. math::
f(x, c) = \frac{c}{k (1+cx)}
for :math:`0 < x < 1`, :math:`c > 0` and :math:`k = \log(1+c)`.
`bradford` takes :math:`c` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# bradford.pdf(x, c) = c / (k * (1+c*x))
return c / (c*x + 1.0) / sc.log1p(c)
def _cdf(self, x, c):
return sc.log1p(c*x) / sc.log1p(c)
def _ppf(self, q, c):
return sc.expm1(q * sc.log1p(c)) / c
def _stats(self, c, moments='mv'):
k = np.log(1.0+c)
mu = (c-k)/(c*k)
mu2 = ((c+2.0)*k-2.0*c)/(2*c*k*k)
g1 = None
g2 = None
if 's' in moments:
g1 = np.sqrt(2)*(12*c*c-9*c*k*(c+2)+2*k*k*(c*(c+3)+3))
g1 /= np.sqrt(c*(c*(k-2)+2*k))*(3*c*(k-2)+6*k)
if 'k' in moments:
g2 = (c**3*(k-3)*(k*(3*k-16)+24)+12*k*c*c*(k-4)*(k-3) +
6*c*k*k*(3*k-14) + 12*k**3)
g2 /= 3*c*(c*(k-2)+2*k)**2
return mu, mu2, g1, g2
def _entropy(self, c):
k = np.log(1+c)
return k/2.0 - np.log(c/k)
bradford = bradford_gen(a=0.0, b=1.0, name='bradford')
class burr_gen(rv_continuous):
r"""A Burr (Type III) continuous random variable.
%(before_notes)s
See Also
--------
fisk : a special case of either `burr` or ``burr12`` with ``d = 1``
burr12 : Burr Type XII distribution
Notes
-----
The probability density function for `burr` is:
.. math::
f(x, c, d) = c d x^{-c-1} (1+x^{-c})^{-d-1}
for :math:`x > 0`.
`burr` takes :math:`c` and :math:`d` as shape parameters.
This is the PDF corresponding to the third CDF given in Burr's list;
specifically, it is equation (11) in Burr's paper [1]_.
%(after_notes)s
References
----------
.. [1] Burr, I. W. "Cumulative frequency functions", Annals of
Mathematical Statistics, 13(2), pp 215-232 (1942).
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, c, d):
# burr.pdf(x, c, d) = c * d * x**(-c-1) * (1+x**(-c))**(-d-1)
return c * d * (x**(-c - 1.0)) * ((1 + x**(-c))**(-d - 1.0))
def _cdf(self, x, c, d):
return (1 + x**(-c))**(-d)
def _ppf(self, q, c, d):
return (q**(-1.0/d) - 1)**(-1.0/c)
def _munp(self, n, c, d):
nc = 1. * n / c
return d * sc.beta(1.0 - nc, d + nc)
burr = burr_gen(a=0.0, name='burr')
class burr12_gen(rv_continuous):
r"""A Burr (Type XII) continuous random variable.
%(before_notes)s
See Also
--------
fisk : a special case of either `burr` or ``burr12`` with ``d = 1``
burr : Burr Type III distribution
Notes
-----
The probability density function for `burr` is:
.. math::
f(x, c, d) = c d x^{c-1} (1+x^c)^{-d-1}
for :math:`x > 0`.
`burr12` takes :math:`c` and :math:`d` as shape parameters.
This is the PDF corresponding to the twelfth CDF given in Burr's list;
specifically, it is equation (20) in Burr's paper [1]_.
%(after_notes)s
The Burr type 12 distribution is also sometimes referred to as
the Singh-Maddala distribution from NIST [2]_.
References
----------
.. [1] Burr, I. W. "Cumulative frequency functions", Annals of
Mathematical Statistics, 13(2), pp 215-232 (1942).
.. [2] http://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/b12pdf.htm
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, c, d):
# burr12.pdf(x, c, d) = c * d * x**(c-1) * (1+x**(c))**(-d-1)
return np.exp(self._logpdf(x, c, d))
def _logpdf(self, x, c, d):
return np.log(c) + np.log(d) + sc.xlogy(c - 1, x) + sc.xlog1py(-d-1, x**c)
def _cdf(self, x, c, d):
return -sc.expm1(self._logsf(x, c, d))
def _logcdf(self, x, c, d):
return sc.log1p(-(1 + x**c)**(-d))
def _sf(self, x, c, d):
return np.exp(self._logsf(x, c, d))
def _logsf(self, x, c, d):
return sc.xlog1py(-d, x**c)
def _ppf(self, q, c, d):
# The following is an implementation of
# ((1 - q)**(-1.0/d) - 1)**(1.0/c)
# that does a better job handling small values of q.
return sc.expm1(-1/d * sc.log1p(-q))**(1/c)
def _munp(self, n, c, d):
nc = 1. * n / c
return d * sc.beta(1.0 + nc, d - nc)
burr12 = burr12_gen(a=0.0, name='burr12')
class fisk_gen(burr_gen):
r"""A Fisk continuous random variable.
The Fisk distribution is also known as the log-logistic distribution, and
equals the Burr distribution with ``d == 1``.
`fisk` takes :math:`c` as a shape parameter.
%(before_notes)s
Notes
-----
The probability density function for `fisk` is:
.. math::
f(x, c) = c x^{-c-1} (1 + x^{-c})^{-2}
for :math:`x > 0`.
`fisk` takes :math:`c` as a shape parameters.
%(after_notes)s
See Also
--------
burr
%(example)s
"""
def _pdf(self, x, c):
# fisk.pdf(x, c) = c * x**(-c-1) * (1 + x**(-c))**(-2)
return burr_gen._pdf(self, x, c, 1.0)
def _cdf(self, x, c):
return burr_gen._cdf(self, x, c, 1.0)
def _ppf(self, x, c):
return burr_gen._ppf(self, x, c, 1.0)
def _munp(self, n, c):
return burr_gen._munp(self, n, c, 1.0)
def _entropy(self, c):
return 2 - np.log(c)
fisk = fisk_gen(a=0.0, name='fisk')
# median = loc
class cauchy_gen(rv_continuous):
r"""A Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `cauchy` is:
.. math::
f(x) = \frac{1}{\pi (1 + x^2)}
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# cauchy.pdf(x) = 1 / (pi * (1 + x**2))
return 1.0/np.pi/(1.0+x*x)
def _cdf(self, x):
return 0.5 + 1.0/np.pi*np.arctan(x)
def _ppf(self, q):
return np.tan(np.pi*q-np.pi/2.0)
def _sf(self, x):
return 0.5 - 1.0/np.pi*np.arctan(x)
def _isf(self, q):
return np.tan(np.pi/2.0-np.pi*q)
def _stats(self):
return np.nan, np.nan, np.nan, np.nan
def _entropy(self):
return np.log(4*np.pi)
def _fitstart(self, data, args=None):
# Initialize ML guesses using quartiles instead of moments.
p25, p50, p75 = np.percentile(data, [25, 50, 75])
return p50, (p75 - p25)/2
cauchy = cauchy_gen(name='cauchy')
class chi_gen(rv_continuous):
r"""A chi continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `chi` is:
.. math::
f(x, df) = \frac{x^{df-1} \exp(-x^2/2)}{2^{df/2-1} \gamma(df/2)}
for :math:`x > 0`.
Special cases of `chi` are:
- ``chi(1, loc, scale)`` is equivalent to `halfnorm`
- ``chi(2, 0, scale)`` is equivalent to `rayleigh`
- ``chi(3, 0, scale)`` is equivalent to `maxwell`
`chi` takes ``df`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, df):
sz, rndm = self._size, self._random_state
return np.sqrt(chi2.rvs(df, size=sz, random_state=rndm))
def _pdf(self, x, df):
# x**(df-1) * exp(-x**2/2)
# chi.pdf(x, df) = -------------------------
# 2**(df/2-1) * gamma(df/2)
return np.exp(self._logpdf(x, df))
def _logpdf(self, x, df):
l = np.log(2) - .5*np.log(2)*df - sc.gammaln(.5*df)
return l + sc.xlogy(df - 1., x) - .5*x**2
def _cdf(self, x, df):
return sc.gammainc(.5*df, .5*x**2)
def _ppf(self, q, df):
return np.sqrt(2*sc.gammaincinv(.5*df, q))
def _stats(self, df):
mu = np.sqrt(2)*sc.gamma(df/2.0+0.5)/sc.gamma(df/2.0)
mu2 = df - mu*mu
g1 = (2*mu**3.0 + mu*(1-2*df))/np.asarray(np.power(mu2, 1.5))
g2 = 2*df*(1.0-df)-6*mu**4 + 4*mu**2 * (2*df-1)
g2 /= np.asarray(mu2**2.0)
return mu, mu2, g1, g2
chi = chi_gen(a=0.0, name='chi')
## Chi-squared (gamma-distributed with loc=0 and scale=2 and shape=df/2)
class chi2_gen(rv_continuous):
r"""A chi-squared continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `chi2` is:
.. math::
f(x, df) = \frac{1}{(2 \gamma(df/2)} (x/2)^{df/2-1} \exp(-x/2)
`chi2` takes ``df`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, df):
return self._random_state.chisquare(df, self._size)
def _pdf(self, x, df):
# chi2.pdf(x, df) = 1 / (2*gamma(df/2)) * (x/2)**(df/2-1) * exp(-x/2)
return np.exp(self._logpdf(x, df))
def _logpdf(self, x, df):
return sc.xlogy(df/2.-1, x) - x/2. - sc.gammaln(df/2.) - (np.log(2)*df)/2.
def _cdf(self, x, df):
return sc.chdtr(df, x)
def _sf(self, x, df):
return sc.chdtrc(df, x)
def _isf(self, p, df):
return sc.chdtri(df, p)
def _ppf(self, p, df):
return self._isf(1.0-p, df)
def _stats(self, df):
mu = df
mu2 = 2*df
g1 = 2*np.sqrt(2.0/df)
g2 = 12.0/df
return mu, mu2, g1, g2
chi2 = chi2_gen(a=0.0, name='chi2')
class cosine_gen(rv_continuous):
r"""A cosine continuous random variable.
%(before_notes)s
Notes
-----
The cosine distribution is an approximation to the normal distribution.
The probability density function for `cosine` is:
.. math::
f(x) = \frac{1}{2\pi} (1+\cos(x))
for :math:`-\pi \le x \le \pi`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# cosine.pdf(x) = 1/(2*pi) * (1+cos(x))
return 1.0/2/np.pi*(1+np.cos(x))
def _cdf(self, x):
return 1.0/2/np.pi*(np.pi + x + np.sin(x))
def _stats(self):
return 0.0, np.pi*np.pi/3.0-2.0, 0.0, -6.0*(np.pi**4-90)/(5.0*(np.pi*np.pi-6)**2)
def _entropy(self):
return np.log(4*np.pi)-1.0
cosine = cosine_gen(a=-np.pi, b=np.pi, name='cosine')
class dgamma_gen(rv_continuous):
r"""A double gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `dgamma` is:
.. math::
f(x, a) = \frac{1}{2\gamma(a)} |x|^{a-1} \exp(-|x|)
for :math:`a > 0`.
`dgamma` takes :math:`a` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, a):
sz, rndm = self._size, self._random_state
u = rndm.random_sample(size=sz)
gm = gamma.rvs(a, size=sz, random_state=rndm)
return gm * np.where(u >= 0.5, 1, -1)
def _pdf(self, x, a):
# dgamma.pdf(x, a) = 1 / (2*gamma(a)) * abs(x)**(a-1) * exp(-abs(x))
ax = abs(x)
return 1.0/(2*sc.gamma(a))*ax**(a-1.0) * np.exp(-ax)
def _logpdf(self, x, a):
ax = abs(x)
return sc.xlogy(a - 1.0, ax) - ax - np.log(2) - sc.gammaln(a)
def _cdf(self, x, a):
fac = 0.5*sc.gammainc(a, abs(x))
return np.where(x > 0, 0.5 + fac, 0.5 - fac)
def _sf(self, x, a):
fac = 0.5*sc.gammainc(a, abs(x))
return np.where(x > 0, 0.5-fac, 0.5+fac)
def _ppf(self, q, a):
fac = sc.gammainccinv(a, 1-abs(2*q-1))
return np.where(q > 0.5, fac, -fac)
def _stats(self, a):
mu2 = a*(a+1.0)
return 0.0, mu2, 0.0, (a+2.0)*(a+3.0)/mu2-3.0
dgamma = dgamma_gen(name='dgamma')
class dweibull_gen(rv_continuous):
r"""A double Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `dweibull` is:
.. math::
f(x, c) = c / 2 |x|^{c-1} \exp(-|x|^c)
`dweibull` takes :math:`d` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, c):
sz, rndm = self._size, self._random_state
u = rndm.random_sample(size=sz)
w = weibull_min.rvs(c, size=sz, random_state=rndm)
return w * (np.where(u >= 0.5, 1, -1))
def _pdf(self, x, c):
# dweibull.pdf(x, c) = c / 2 * abs(x)**(c-1) * exp(-abs(x)**c)
ax = abs(x)
Px = c / 2.0 * ax**(c-1.0) * np.exp(-ax**c)
return Px
def _logpdf(self, x, c):
ax = abs(x)
return np.log(c) - np.log(2.0) + sc.xlogy(c - 1.0, ax) - ax**c
def _cdf(self, x, c):
Cx1 = 0.5 * np.exp(-abs(x)**c)
return np.where(x > 0, 1 - Cx1, Cx1)
def _ppf(self, q, c):
fac = 2. * np.where(q <= 0.5, q, 1. - q)
fac = np.power(-np.log(fac), 1.0 / c)
return np.where(q > 0.5, fac, -fac)
def _munp(self, n, c):
return (1 - (n % 2)) * sc.gamma(1.0 + 1.0 * n / c)
# since we know that all odd moments are zeros, return them at once.
# returning Nones from _stats makes the public stats call _munp
# so overall we're saving one or two gamma function evaluations here.
def _stats(self, c):
return 0, None, 0, None
dweibull = dweibull_gen(name='dweibull')
## Exponential (gamma distributed with a=1.0, loc=loc and scale=scale)
class expon_gen(rv_continuous):
r"""An exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `expon` is:
.. math::
f(x) = \exp(-x)
for :math:`x \ge 0`.
%(after_notes)s
A common parameterization for `expon` is in terms of the rate parameter
``lambda``, such that ``pdf = lambda * exp(-lambda * x)``. This
parameterization corresponds to using ``scale = 1 / lambda``.
%(example)s
"""
def _rvs(self):
return self._random_state.standard_exponential(self._size)
def _pdf(self, x):
# expon.pdf(x) = exp(-x)
return np.exp(-x)
def _logpdf(self, x):
return -x
def _cdf(self, x):
return -sc.expm1(-x)
def _ppf(self, q):
return -sc.log1p(-q)
def _sf(self, x):
return np.exp(-x)
def _logsf(self, x):
return -x
def _isf(self, q):
return -np.log(q)
def _stats(self):
return 1.0, 1.0, 2.0, 6.0
def _entropy(self):
return 1.0
@replace_notes_in_docstring(rv_continuous, notes="""\
This function uses explicit formulas for the maximum likelihood
estimation of the exponential distribution parameters, so the
`optimizer`, `loc` and `scale` keyword arguments are ignored.\n\n""")
def fit(self, data, *args, **kwds):
if len(args) > 0:
raise TypeError("Too many arguments.")
floc = kwds.pop('floc', None)
fscale = kwds.pop('fscale', None)
# Ignore the optimizer-related keyword arguments, if given.
kwds.pop('loc', None)
kwds.pop('scale', None)
kwds.pop('optimizer', None)
if kwds:
raise TypeError("Unknown arguments: %s." % kwds)
if floc is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
data_min = data.min()
if floc is None:
# ML estimate of the location is the minimum of the data.
loc = data_min
else:
loc = floc
if data_min < loc:
# There are values that are less than the specified loc.
raise FitDataError("expon", lower=floc, upper=np.inf)
if fscale is None:
# ML estimate of the scale is the shifted mean.
scale = data.mean() - loc
else:
scale = fscale
# We expect the return values to be floating point, so ensure it
# by explicitly converting to float.
return float(loc), float(scale)
expon = expon_gen(a=0.0, name='expon')
## Exponentially Modified Normal (exponential distribution
## convolved with a Normal).
## This is called an exponentially modified gaussian on wikipedia
class exponnorm_gen(rv_continuous):
r"""An exponentially modified Normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponnorm` is:
.. math::
f(x, K) = \frac{1}{2K} \exp\left(\frac{1}{2 K^2}\right) \exp(-x / K)
\text{erfc}\left(-\frac{x - 1/K}{\sqrt{2}}\right)
where the shape parameter :math:`K > 0`.
It can be thought of as the sum of a normally distributed random
value with mean ``loc`` and sigma ``scale`` and an exponentially
distributed random number with a pdf proportional to ``exp(-lambda * x)``
where ``lambda = (K * scale)**(-1)``.
%(after_notes)s
An alternative parameterization of this distribution (for example, in
`Wikipedia <http://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution>`_)
involves three parameters, :math:`\mu`, :math:`\lambda` and
:math:`\sigma`.
In the present parameterization this corresponds to having ``loc`` and
``scale`` equal to :math:`\mu` and :math:`\sigma`, respectively, and
shape parameter :math:`K = 1/(\sigma\lambda)`.
.. versionadded:: 0.16.0
%(example)s
"""
def _rvs(self, K):
expval = self._random_state.standard_exponential(self._size) * K
gval = self._random_state.standard_normal(self._size)
return expval + gval
def _pdf(self, x, K):
# exponnorm.pdf(x, K) =
# 1/(2*K) exp(1/(2 * K**2)) exp(-x / K) * erfc-(x - 1/K) / sqrt(2))
invK = 1.0 / K
exparg = 0.5 * invK**2 - invK * x
# Avoid overflows; setting np.exp(exparg) to the max float works
# all right here
expval = _lazywhere(exparg < _LOGXMAX, (exparg,), np.exp, _XMAX)
return 0.5 * invK * expval * sc.erfc(-(x - invK) / np.sqrt(2))
def _logpdf(self, x, K):
invK = 1.0 / K
exparg = 0.5 * invK**2 - invK * x
return exparg + np.log(0.5 * invK * sc.erfc(-(x - invK) / np.sqrt(2)))
def _cdf(self, x, K):
invK = 1.0 / K
expval = invK * (0.5 * invK - x)
return _norm_cdf(x) - np.exp(expval) * _norm_cdf(x - invK)
def _sf(self, x, K):
invK = 1.0 / K
expval = invK * (0.5 * invK - x)
return _norm_cdf(-x) + np.exp(expval) * _norm_cdf(x - invK)
def _stats(self, K):
K2 = K * K
opK2 = 1.0 + K2
skw = 2 * K**3 * opK2**(-1.5)
krt = 6.0 * K2 * K2 * opK2**(-2)
return K, opK2, skw, krt
exponnorm = exponnorm_gen(name='exponnorm')
class exponweib_gen(rv_continuous):
r"""An exponentiated Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponweib` is:
.. math::
f(x, a, c) = a c (1-\exp(-x^c))^{a-1} \exp(-x^c) x^{c-1}
for :math:`x > 0`, :math:`a > 0`, :math:`c > 0`.
`exponweib` takes :math:`a` and :math:`c` as shape parameters.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, a, c):
# exponweib.pdf(x, a, c) =
# a * c * (1-exp(-x**c))**(a-1) * exp(-x**c)*x**(c-1)
return np.exp(self._logpdf(x, a, c))
def _logpdf(self, x, a, c):
negxc = -x**c
exm1c = -sc.expm1(negxc)
logp = (np.log(a) + np.log(c) + sc.xlogy(a - 1.0, exm1c) +
negxc + sc.xlogy(c - 1.0, x))
return logp
def _cdf(self, x, a, c):
exm1c = -sc.expm1(-x**c)
return exm1c**a
def _ppf(self, q, a, c):
return (-sc.log1p(-q**(1.0/a)))**np.asarray(1.0/c)
exponweib = exponweib_gen(a=0.0, name='exponweib')
class exponpow_gen(rv_continuous):
r"""An exponential power continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponpow` is:
.. math::
f(x, b) = b x^{b-1} \exp(1 + x^b - \exp(x^b))
for :math:`x \ge 0`, :math:`b > 0``. Note that this is a different
distribution from the exponential power distribution that is also known
under the names "generalized normal" or "generalized Gaussian".
`exponpow` takes :math:`b` as a shape parameter.
%(after_notes)s
References
----------
http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Exponentialpower.pdf
%(example)s
"""
def _pdf(self, x, b):
# exponpow.pdf(x, b) = b * x**(b-1) * exp(1 + x**b - exp(x**b))
return np.exp(self._logpdf(x, b))
def _logpdf(self, x, b):
xb = x**b
f = 1 + np.log(b) + sc.xlogy(b - 1.0, x) + xb - np.exp(xb)
return f
def _cdf(self, x, b):
return -sc.expm1(-sc.expm1(x**b))
def _sf(self, x, b):
return np.exp(-sc.expm1(x**b))
def _isf(self, x, b):
return (sc.log1p(-np.log(x)))**(1./b)
def _ppf(self, q, b):
return pow(sc.log1p(-sc.log1p(-q)), 1.0/b)
exponpow = exponpow_gen(a=0.0, name='exponpow')
class fatiguelife_gen(rv_continuous):
r"""A fatigue-life (Birnbaum-Saunders) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `fatiguelife` is:
.. math::
f(x, c) = \frac{x+1}{ 2c\sqrt{2\pi x^3} \exp(-\frac{(x-1)^2}{2x c^2}}
for :math:`x > 0`.
`fatiguelife` takes :math:`c` as a shape parameter.
%(after_notes)s
References
----------
.. [1] "Birnbaum-Saunders distribution",
http://en.wikipedia.org/wiki/Birnbaum-Saunders_distribution
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, c):
z = self._random_state.standard_normal(self._size)
x = 0.5*c*z
x2 = x*x
t = 1.0 + 2*x2 + 2*x*np.sqrt(1 + x2)
return t
def _pdf(self, x, c):
# fatiguelife.pdf(x, c) =
# (x+1) / (2*c*sqrt(2*pi*x**3)) * exp(-(x-1)**2/(2*x*c**2))
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return (np.log(x+1) - (x-1)**2 / (2.0*x*c**2) - np.log(2*c) -
0.5*(np.log(2*np.pi) + 3*np.log(x)))
def _cdf(self, x, c):
return _norm_cdf(1.0 / c * (np.sqrt(x) - 1.0/np.sqrt(x)))
def _ppf(self, q, c):
tmp = c*sc.ndtri(q)
return 0.25 * (tmp + np.sqrt(tmp**2 + 4))**2
def _stats(self, c):
# NB: the formula for kurtosis in wikipedia seems to have an error:
# it's 40, not 41. At least it disagrees with the one from Wolfram
# Alpha. And the latter one, below, passes the tests, while the wiki
# one doesn't So far I didn't have the guts to actually check the
# coefficients from the expressions for the raw moments.
c2 = c*c
mu = c2 / 2.0 + 1.0
den = 5.0 * c2 + 4.0
mu2 = c2*den / 4.0
g1 = 4 * c * (11*c2 + 6.0) / np.power(den, 1.5)
g2 = 6 * c2 * (93*c2 + 40.0) / den**2.0
return mu, mu2, g1, g2
fatiguelife = fatiguelife_gen(a=0.0, name='fatiguelife')
class foldcauchy_gen(rv_continuous):
r"""A folded Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `foldcauchy` is:
.. math::
f(x, c) = \frac{1}{\pi (1+(x-c)^2)} + \frac{1}{\pi (1+(x+c)^2)}
for :math:`x \ge 0``.
`foldcauchy` takes :math:`c` as a shape parameter.
%(example)s
"""
def _rvs(self, c):
return abs(cauchy.rvs(loc=c, size=self._size,
random_state=self._random_state))
def _pdf(self, x, c):
# foldcauchy.pdf(x, c) = 1/(pi*(1+(x-c)**2)) + 1/(pi*(1+(x+c)**2))
return 1.0/np.pi*(1.0/(1+(x-c)**2) + 1.0/(1+(x+c)**2))
def _cdf(self, x, c):
return 1.0/np.pi*(np.arctan(x-c) + np.arctan(x+c))
def _stats(self, c):
return np.inf, np.inf, np.nan, np.nan
foldcauchy = foldcauchy_gen(a=0.0, name='foldcauchy')
class f_gen(rv_continuous):
r"""An F continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `f` is:
.. math::
f(x, df_1, df_2) = \frac{df_2^{df_2/2} df_1^{df_1/2} x^{df_1 / 2-1}}
{(df_2+df_1 x)^{(df_1+df_2)/2}
B(df_1/2, df_2/2)}
for :math:`x > 0`.
`f` takes ``dfn`` and ``dfd`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, dfn, dfd):
return self._random_state.f(dfn, dfd, self._size)
def _pdf(self, x, dfn, dfd):
# df2**(df2/2) * df1**(df1/2) * x**(df1/2-1)
# F.pdf(x, df1, df2) = --------------------------------------------
# (df2+df1*x)**((df1+df2)/2) * B(df1/2, df2/2)
return np.exp(self._logpdf(x, dfn, dfd))
def _logpdf(self, x, dfn, dfd):
n = 1.0 * dfn
m = 1.0 * dfd
lPx = m/2 * np.log(m) + n/2 * np.log(n) + (n/2 - 1) * np.log(x)
lPx -= ((n+m)/2) * np.log(m + n*x) + sc.betaln(n/2, m/2)
return lPx
def _cdf(self, x, dfn, dfd):
return sc.fdtr(dfn, dfd, x)
def _sf(self, x, dfn, dfd):
return sc.fdtrc(dfn, dfd, x)
def _ppf(self, q, dfn, dfd):
return sc.fdtri(dfn, dfd, q)
def _stats(self, dfn, dfd):
v1, v2 = 1. * dfn, 1. * dfd
v2_2, v2_4, v2_6, v2_8 = v2 - 2., v2 - 4., v2 - 6., v2 - 8.
mu = _lazywhere(
v2 > 2, (v2, v2_2),
lambda v2, v2_2: v2 / v2_2,
np.inf)
mu2 = _lazywhere(
v2 > 4, (v1, v2, v2_2, v2_4),
lambda v1, v2, v2_2, v2_4:
2 * v2 * v2 * (v1 + v2_2) / (v1 * v2_2**2 * v2_4),
np.inf)
g1 = _lazywhere(
v2 > 6, (v1, v2_2, v2_4, v2_6),
lambda v1, v2_2, v2_4, v2_6:
(2 * v1 + v2_2) / v2_6 * np.sqrt(v2_4 / (v1 * (v1 + v2_2))),
np.nan)
g1 *= np.sqrt(8.)
g2 = _lazywhere(
v2 > 8, (g1, v2_6, v2_8),
lambda g1, v2_6, v2_8: (8 + g1 * g1 * v2_6) / v2_8,
np.nan)
g2 *= 3. / 2.
return mu, mu2, g1, g2
f = f_gen(a=0.0, name='f')
## Folded Normal
## abs(Z) where (Z is normal with mu=L and std=S so that c=abs(L)/S)
##
## note: regress docs have scale parameter correct, but first parameter
## he gives is a shape parameter A = c * scale
## Half-normal is folded normal with shape-parameter c=0.
class foldnorm_gen(rv_continuous):
r"""A folded normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `foldnorm` is:
.. math::
f(x, c) = \sqrt{2/\pi} cosh(c x) \exp(-\frac{x^2+c^2}{2})
for :math:`c \ge 0`.
`foldnorm` takes :math:`c` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return c >= 0
def _rvs(self, c):
return abs(self._random_state.standard_normal(self._size) + c)
def _pdf(self, x, c):
# foldnormal.pdf(x, c) = sqrt(2/pi) * cosh(c*x) * exp(-(x**2+c**2)/2)
return _norm_pdf(x + c) + _norm_pdf(x-c)
def _cdf(self, x, c):
return _norm_cdf(x-c) + _norm_cdf(x+c) - 1.0
def _stats(self, c):
# Regina C. Elandt, Technometrics 3, 551 (1961)
# http://www.jstor.org/stable/1266561
#
c2 = c*c
expfac = np.exp(-0.5*c2) / np.sqrt(2.*np.pi)
mu = 2.*expfac + c * sc.erf(c/np.sqrt(2))
mu2 = c2 + 1 - mu*mu
g1 = 2. * (mu*mu*mu - c2*mu - expfac)
g1 /= np.power(mu2, 1.5)
g2 = c2 * (c2 + 6.) + 3 + 8.*expfac*mu
g2 += (2. * (c2 - 3.) - 3. * mu**2) * mu**2
g2 = g2 / mu2**2.0 - 3.
return mu, mu2, g1, g2
foldnorm = foldnorm_gen(a=0.0, name='foldnorm')
class weibull_min_gen(rv_continuous):
r"""Weibull minimum continuous random variable.
%(before_notes)s
See Also
--------
weibull_max
Notes
-----
The probability density function for `weibull_min` is:
.. math::
f(x, c) = c x^{c-1} \exp(-x^c)
for :math:`x > 0`, :math:`c > 0`.
`weibull_min` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# frechet_r.pdf(x, c) = c * x**(c-1) * exp(-x**c)
return c*pow(x, c-1)*np.exp(-pow(x, c))
def _logpdf(self, x, c):
return np.log(c) + sc.xlogy(c - 1, x) - pow(x, c)
def _cdf(self, x, c):
return -sc.expm1(-pow(x, c))
def _sf(self, x, c):
return np.exp(-pow(x, c))
def _logsf(self, x, c):
return -pow(x, c)
def _ppf(self, q, c):
return pow(-sc.log1p(-q), 1.0/c)
def _munp(self, n, c):
return sc.gamma(1.0+n*1.0/c)
def _entropy(self, c):
return -_EULER / c - np.log(c) + _EULER + 1
weibull_min = weibull_min_gen(a=0.0, name='weibull_min')
class weibull_max_gen(rv_continuous):
r"""Weibull maximum continuous random variable.
%(before_notes)s
See Also
--------
weibull_min
Notes
-----
The probability density function for `weibull_max` is:
.. math::
f(x, c) = c (-x)^{c-1} \exp(-(-x)^c)
for :math:`x < 0`, :math:`c > 0`.
`weibull_max` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# frechet_l.pdf(x, c) = c * (-x)**(c-1) * exp(-(-x)**c)
return c*pow(-x, c-1)*np.exp(-pow(-x, c))
def _logpdf(self, x, c):
return np.log(c) + sc.xlogy(c-1, -x) - pow(-x, c)
def _cdf(self, x, c):
return np.exp(-pow(-x, c))
def _logcdf(self, x, c):
return -pow(-x, c)
def _sf(self, x, c):
return -sc.expm1(-pow(-x, c))
def _ppf(self, q, c):
return -pow(-np.log(q), 1.0/c)
def _munp(self, n, c):
val = sc.gamma(1.0+n*1.0/c)
if int(n) % 2:
sgn = -1
else:
sgn = 1
return sgn * val
def _entropy(self, c):
return -_EULER / c - np.log(c) + _EULER + 1
weibull_max = weibull_max_gen(b=0.0, name='weibull_max')
# Public methods to be deprecated in frechet_r and frechet_l:
# ['__call__', 'cdf', 'entropy', 'expect', 'fit', 'fit_loc_scale', 'freeze',
# 'interval', 'isf', 'logcdf', 'logpdf', 'logsf', 'mean', 'median', 'moment',
# 'nnlf', 'pdf', 'ppf', 'rvs', 'sf', 'stats', 'std', 'var']
_frechet_r_deprec_msg = """\
The distribution `frechet_r` is a synonym for `weibull_min`; this historical
usage is deprecated because of possible confusion with the (quite different)
Frechet distribution. To preserve the existing behavior of the program, use
`scipy.stats.weibull_min`. For the Frechet distribution (i.e. the Type II
extreme value distribution), use `scipy.stats.invweibull`."""
class frechet_r_gen(weibull_min_gen):
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def __call__(self, *args, **kwargs):
return weibull_min_gen.__call__(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def cdf(self, *args, **kwargs):
return weibull_min_gen.cdf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def entropy(self, *args, **kwargs):
return weibull_min_gen.entropy(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def expect(self, *args, **kwargs):
return weibull_min_gen.expect(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def fit(self, *args, **kwargs):
return weibull_min_gen.fit(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def fit_loc_scale(self, *args, **kwargs):
return weibull_min_gen.fit_loc_scale(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def freeze(self, *args, **kwargs):
return weibull_min_gen.freeze(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def interval(self, *args, **kwargs):
return weibull_min_gen.interval(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def isf(self, *args, **kwargs):
return weibull_min_gen.isf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def logcdf(self, *args, **kwargs):
return weibull_min_gen.logcdf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def logpdf(self, *args, **kwargs):
return weibull_min_gen.logpdf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def logsf(self, *args, **kwargs):
return weibull_min_gen.logsf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def mean(self, *args, **kwargs):
return weibull_min_gen.mean(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def median(self, *args, **kwargs):
return weibull_min_gen.median(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def moment(self, *args, **kwargs):
return weibull_min_gen.moment(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def nnlf(self, *args, **kwargs):
return weibull_min_gen.nnlf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def pdf(self, *args, **kwargs):
return weibull_min_gen.pdf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def ppf(self, *args, **kwargs):
return weibull_min_gen.ppf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def rvs(self, *args, **kwargs):
return weibull_min_gen.rvs(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def sf(self, *args, **kwargs):
return weibull_min_gen.sf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def stats(self, *args, **kwargs):
return weibull_min_gen.stats(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def std(self, *args, **kwargs):
return weibull_min_gen.std(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def var(self, *args, **kwargs):
return weibull_min_gen.var(self, *args, **kwargs)
frechet_r = frechet_r_gen(a=0.0, name='frechet_r')
_frechet_l_deprec_msg = """\
The distribution `frechet_l` is a synonym for `weibull_max`; this historical
usage is deprecated because of possible confusion with the (quite different)
Frechet distribution. To preserve the existing behavior of the program, use
`scipy.stats.weibull_max`. For the Frechet distribution (i.e. the Type II
extreme value distribution), use `scipy.stats.invweibull`."""
class frechet_l_gen(weibull_max_gen):
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def __call__(self, *args, **kwargs):
return weibull_max_gen.__call__(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def cdf(self, *args, **kwargs):
return weibull_max_gen.cdf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def entropy(self, *args, **kwargs):
return weibull_max_gen.entropy(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def expect(self, *args, **kwargs):
return weibull_max_gen.expect(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def fit(self, *args, **kwargs):
return weibull_max_gen.fit(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def fit_loc_scale(self, *args, **kwargs):
return weibull_max_gen.fit_loc_scale(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def freeze(self, *args, **kwargs):
return weibull_max_gen.freeze(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def interval(self, *args, **kwargs):
return weibull_max_gen.interval(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def isf(self, *args, **kwargs):
return weibull_max_gen.isf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def logcdf(self, *args, **kwargs):
return weibull_max_gen.logcdf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def logpdf(self, *args, **kwargs):
return weibull_max_gen.logpdf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def logsf(self, *args, **kwargs):
return weibull_max_gen.logsf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def mean(self, *args, **kwargs):
return weibull_max_gen.mean(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def median(self, *args, **kwargs):
return weibull_max_gen.median(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def moment(self, *args, **kwargs):
return weibull_max_gen.moment(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def nnlf(self, *args, **kwargs):
return weibull_max_gen.nnlf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def pdf(self, *args, **kwargs):
return weibull_max_gen.pdf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def ppf(self, *args, **kwargs):
return weibull_max_gen.ppf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def rvs(self, *args, **kwargs):
return weibull_max_gen.rvs(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def sf(self, *args, **kwargs):
return weibull_max_gen.sf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def stats(self, *args, **kwargs):
return weibull_max_gen.stats(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def std(self, *args, **kwargs):
return weibull_max_gen.std(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def var(self, *args, **kwargs):
return weibull_max_gen.var(self, *args, **kwargs)
frechet_l = frechet_l_gen(b=0.0, name='frechet_l')
class genlogistic_gen(rv_continuous):
r"""A generalized logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genlogistic` is:
.. math::
f(x, c) = c \frac{\exp(-x)}
{(1 + \exp(-x))^{c+1}}
for :math:`x > 0`, :math:`c > 0`.
`genlogistic` takes :math:`c` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# genlogistic.pdf(x, c) = c * exp(-x) / (1 + exp(-x))**(c+1)
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return np.log(c) - x - (c+1.0)*sc.log1p(np.exp(-x))
def _cdf(self, x, c):
Cx = (1+np.exp(-x))**(-c)
return Cx
def _ppf(self, q, c):
vals = -np.log(pow(q, -1.0/c)-1)
return vals
def _stats(self, c):
mu = _EULER + sc.psi(c)
mu2 = np.pi*np.pi/6.0 + sc.zeta(2, c)
g1 = -2*sc.zeta(3, c) + 2*_ZETA3
g1 /= np.power(mu2, 1.5)
g2 = np.pi**4/15.0 + 6*sc.zeta(4, c)
g2 /= mu2**2.0
return mu, mu2, g1, g2
genlogistic = genlogistic_gen(name='genlogistic')
class genpareto_gen(rv_continuous):
r"""A generalized Pareto continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genpareto` is:
.. math::
f(x, c) = (1 + c x)^{-1 - 1/c}
defined for :math:`x \ge 0` if :math:`c \ge 0`, and for
:math:`0 \le x \le -1/c` if :math:`c < 0`.
`genpareto` takes :math:`c` as a shape parameter.
For ``c == 0``, `genpareto` reduces to the exponential
distribution, `expon`:
.. math::
f(x, c=0) = \exp(-x)
For ``c == -1``, `genpareto` is uniform on ``[0, 1]``:
.. math::
f(x, c=-1) = x
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
c = np.asarray(c)
self.b = _lazywhere(c < 0, (c,),
lambda c: -1. / c,
np.inf)
return True
def _pdf(self, x, c):
# genpareto.pdf(x, c) = (1 + c * x)**(-1 - 1/c)
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.xlog1py(c + 1., c*x) / c,
-x)
def _cdf(self, x, c):
return -sc.inv_boxcox1p(-x, -c)
def _sf(self, x, c):
return sc.inv_boxcox(-x, -c)
def _logsf(self, x, c):
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.log1p(c*x) / c,
-x)
def _ppf(self, q, c):
return -sc.boxcox1p(-q, -c)
def _isf(self, q, c):
return -sc.boxcox(q, -c)
def _munp(self, n, c):
def __munp(n, c):
val = 0.0
k = np.arange(0, n + 1)
for ki, cnk in zip(k, sc.comb(n, k)):
val = val + cnk * (-1) ** ki / (1.0 - c * ki)
return np.where(c * n < 1, val * (-1.0 / c) ** n, np.inf)
return _lazywhere(c != 0, (c,),
lambda c: __munp(n, c),
sc.gamma(n + 1))
def _entropy(self, c):
return 1. + c
genpareto = genpareto_gen(a=0.0, name='genpareto')
class genexpon_gen(rv_continuous):
r"""A generalized exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genexpon` is:
.. math::
f(x, a, b, c) = (a + b (1 - \exp(-c x)))
\exp(-a x - b x + \frac{b}{c} (1-\exp(-c x)))
for :math:`x \ge 0`, :math:`a, b, c > 0`.
`genexpon` takes :math:`a`, :math:`b` and :math:`c` as shape parameters.
%(after_notes)s
References
----------
H.K. Ryu, "An Extension of Marshall and Olkin's Bivariate Exponential
Distribution", Journal of the American Statistical Association, 1993.
N. Balakrishnan, "The Exponential Distribution: Theory, Methods and
Applications", Asit P. Basu.
%(example)s
"""
def _pdf(self, x, a, b, c):
# genexpon.pdf(x, a, b, c) = (a + b * (1 - exp(-c*x))) * \
# exp(-a*x - b*x + b/c * (1-exp(-c*x)))
return (a + b*(-sc.expm1(-c*x)))*np.exp((-a-b)*x +
b*(-sc.expm1(-c*x))/c)
def _cdf(self, x, a, b, c):
return -sc.expm1((-a-b)*x + b*(-sc.expm1(-c*x))/c)
def _logpdf(self, x, a, b, c):
return np.log(a+b*(-sc.expm1(-c*x))) + (-a-b)*x+b*(-sc.expm1(-c*x))/c
genexpon = genexpon_gen(a=0.0, name='genexpon')
class genextreme_gen(rv_continuous):
r"""A generalized extreme value continuous random variable.
%(before_notes)s
See Also
--------
gumbel_r
Notes
-----
For :math:`c=0`, `genextreme` is equal to `gumbel_r`.
The probability density function for `genextreme` is:
.. math::
f(x, c) = \begin{cases}
\exp(-\exp(-x)) \exp(-x) &\text{for } c = 0\\
\exp(-(1-c x)^{1/c}) (1-c x)^{1/c-1} &\text{for }
x \le 1/c, c > 0
\end{cases}
Note that several sources and software packages use the opposite
convention for the sign of the shape parameter :math:`c`.
`genextreme` takes :math:`c` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
self.b = np.where(c > 0, 1.0 / np.maximum(c, _XMIN), np.inf)
self.a = np.where(c < 0, 1.0 / np.minimum(c, -_XMIN), -np.inf)
return np.where(abs(c) == np.inf, 0, 1)
def _loglogcdf(self, x, c):
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: sc.log1p(-c*x)/c, -x)
def _pdf(self, x, c):
# genextreme.pdf(x, c) =
# exp(-exp(-x))*exp(-x), for c==0
# exp(-(1-c*x)**(1/c))*(1-c*x)**(1/c-1), for x \le 1/c, c > 0
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
cx = _lazywhere((x == x) & (c != 0), (x, c), lambda x, c: c*x, 0.0)
logex2 = sc.log1p(-cx)
logpex2 = self._loglogcdf(x, c)
pex2 = np.exp(logpex2)
# Handle special cases
np.putmask(logpex2, (c == 0) & (x == -np.inf), 0.0)
logpdf = np.where((cx == 1) | (cx == -np.inf),
-np.inf,
-pex2+logpex2-logex2)
np.putmask(logpdf, (c == 1) & (x == 1), 0.0)
return logpdf
def _logcdf(self, x, c):
return -np.exp(self._loglogcdf(x, c))
def _cdf(self, x, c):
return np.exp(self._logcdf(x, c))
def _sf(self, x, c):
return -sc.expm1(self._logcdf(x, c))
def _ppf(self, q, c):
x = -np.log(-np.log(q))
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.expm1(-c * x) / c, x)
def _isf(self, q, c):
x = -np.log(-sc.log1p(-q))
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.expm1(-c * x) / c, x)
def _stats(self, c):
g = lambda n: sc.gamma(n*c + 1)
g1 = g(1)
g2 = g(2)
g3 = g(3)
g4 = g(4)
g2mg12 = np.where(abs(c) < 1e-7, (c*np.pi)**2.0/6.0, g2-g1**2.0)
gam2k = np.where(abs(c) < 1e-7, np.pi**2.0/6.0,
sc.expm1(sc.gammaln(2.0*c+1.0)-2*sc.gammaln(c + 1.0))/c**2.0)
eps = 1e-14
gamk = np.where(abs(c) < eps, -_EULER, sc.expm1(sc.gammaln(c + 1))/c)
m = np.where(c < -1.0, np.nan, -gamk)
v = np.where(c < -0.5, np.nan, g1**2.0*gam2k)
# skewness
sk1 = _lazywhere(c >= -1./3,
(c, g1, g2, g3, g2mg12),
lambda c, g1, g2, g3, g2gm12:
np.sign(c)*(-g3 + (g2 + 2*g2mg12)*g1)/g2mg12**1.5,
fillvalue=np.nan)
sk = np.where(abs(c) <= eps**0.29, 12*np.sqrt(6)*_ZETA3/np.pi**3, sk1)
# kurtosis
ku1 = _lazywhere(c >= -1./4,
(g1, g2, g3, g4, g2mg12),
lambda g1, g2, g3, g4, g2mg12:
(g4 + (-4*g3 + 3*(g2 + g2mg12)*g1)*g1)/g2mg12**2,
fillvalue=np.nan)
ku = np.where(abs(c) <= (eps)**0.23, 12.0/5.0, ku1-3.0)
return m, v, sk, ku
def _fitstart(self, data):
# This is better than the default shape of (1,).
g = _skew(data)
if g < 0:
a = 0.5
else:
a = -0.5
return super(genextreme_gen, self)._fitstart(data, args=(a,))
def _munp(self, n, c):
k = np.arange(0, n+1)
vals = 1.0/c**n * np.sum(
sc.comb(n, k) * (-1)**k * sc.gamma(c*k + 1),
axis=0)
return np.where(c*n > -1, vals, np.inf)
def _entropy(self, c):
return _EULER*(1 - c) + 1
genextreme = genextreme_gen(name='genextreme')
def _digammainv(y):
# Inverse of the digamma function (real positive arguments only).
# This function is used in the `fit` method of `gamma_gen`.
# The function uses either optimize.fsolve or optimize.newton
# to solve `sc.digamma(x) - y = 0`. There is probably room for
# improvement, but currently it works over a wide range of y:
# >>> y = 64*np.random.randn(1000000)
# >>> y.min(), y.max()
# (-311.43592651416662, 351.77388222276869)
# x = [_digammainv(t) for t in y]
# np.abs(sc.digamma(x) - y).max()
# 1.1368683772161603e-13
#
_em = 0.5772156649015328606065120
func = lambda x: sc.digamma(x) - y
if y > -0.125:
x0 = np.exp(y) + 0.5
if y < 10:
# Some experimentation shows that newton reliably converges
# must faster than fsolve in this y range. For larger y,
# newton sometimes fails to converge.
value = optimize.newton(func, x0, tol=1e-10)
return value
elif y > -3:
x0 = np.exp(y/2.332) + 0.08661
else:
x0 = 1.0 / (-y - _em)
value, info, ier, mesg = optimize.fsolve(func, x0, xtol=1e-11,
full_output=True)
if ier != 1:
raise RuntimeError("_digammainv: fsolve failed, y = %r" % y)
return value[0]
## Gamma (Use MATLAB and MATHEMATICA (b=theta=scale, a=alpha=shape) definition)
## gamma(a, loc, scale) with a an integer is the Erlang distribution
## gamma(1, loc, scale) is the Exponential distribution
## gamma(df/2, 0, 2) is the chi2 distribution with df degrees of freedom.
class gamma_gen(rv_continuous):
r"""A gamma continuous random variable.
%(before_notes)s
See Also
--------
erlang, expon
Notes
-----
The probability density function for `gamma` is:
.. math::
f(x, a) = \frac{x^{a-1} \exp(-x)}{\Gamma(a)}
for :math:`x \ge 0`, :math:`a > 0`. Here :math:`\Gamma(a)` refers to the
gamma function.
`gamma` has a shape parameter `a` which needs to be set explicitly.
When :math:`a` is an integer, `gamma` reduces to the Erlang
distribution, and when :math:`a=1` to the exponential distribution.
%(after_notes)s
%(example)s
"""
def _rvs(self, a):
return self._random_state.standard_gamma(a, self._size)
def _pdf(self, x, a):
# gamma.pdf(x, a) = x**(a-1) * exp(-x) / gamma(a)
return np.exp(self._logpdf(x, a))
def _logpdf(self, x, a):
return sc.xlogy(a-1.0, x) - x - sc.gammaln(a)
def _cdf(self, x, a):
return sc.gammainc(a, x)
def _sf(self, x, a):
return sc.gammaincc(a, x)
def _ppf(self, q, a):
return sc.gammaincinv(a, q)
def _stats(self, a):
return a, a, 2.0/np.sqrt(a), 6.0/a
def _entropy(self, a):
return sc.psi(a)*(1-a) + a + sc.gammaln(a)
def _fitstart(self, data):
# The skewness of the gamma distribution is `4 / np.sqrt(a)`.
# We invert that to estimate the shape `a` using the skewness
# of the data. The formula is regularized with 1e-8 in the
# denominator to allow for degenerate data where the skewness
# is close to 0.
a = 4 / (1e-8 + _skew(data)**2)
return super(gamma_gen, self)._fitstart(data, args=(a,))
@extend_notes_in_docstring(rv_continuous, notes="""\
When the location is fixed by using the argument `floc`, this
function uses explicit formulas or solves a simpler numerical
problem than the full ML optimization problem. So in that case,
the `optimizer`, `loc` and `scale` arguments are ignored.\n\n""")
def fit(self, data, *args, **kwds):
f0 = (kwds.get('f0', None) or kwds.get('fa', None) or
kwds.get('fix_a', None))
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is None:
# loc is not fixed. Use the default fit method.
return super(gamma_gen, self).fit(data, *args, **kwds)
# Special case: loc is fixed.
if f0 is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
# Without this check, this function would just return the
# parameters that were given.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
# Fixed location is handled by shifting the data.
data = np.asarray(data)
if np.any(data <= floc):
raise FitDataError("gamma", lower=floc, upper=np.inf)
if floc != 0:
# Don't do the subtraction in-place, because `data` might be a
# view of the input array.
data = data - floc
xbar = data.mean()
# Three cases to handle:
# * shape and scale both free
# * shape fixed, scale free
# * shape free, scale fixed
if fscale is None:
# scale is free
if f0 is not None:
# shape is fixed
a = f0
else:
# shape and scale are both free.
# The MLE for the shape parameter `a` is the solution to:
# np.log(a) - sc.digamma(a) - np.log(xbar) +
# np.log(data.mean) = 0
s = np.log(xbar) - np.log(data).mean()
func = lambda a: np.log(a) - sc.digamma(a) - s
aest = (3-s + np.sqrt((s-3)**2 + 24*s)) / (12*s)
xa = aest*(1-0.4)
xb = aest*(1+0.4)
a = optimize.brentq(func, xa, xb, disp=0)
# The MLE for the scale parameter is just the data mean
# divided by the shape parameter.
scale = xbar / a
else:
# scale is fixed, shape is free
# The MLE for the shape parameter `a` is the solution to:
# sc.digamma(a) - np.log(data).mean() + np.log(fscale) = 0
c = np.log(data).mean() - np.log(fscale)
a = _digammainv(c)
scale = fscale
return a, floc, scale
gamma = gamma_gen(a=0.0, name='gamma')
class erlang_gen(gamma_gen):
"""An Erlang continuous random variable.
%(before_notes)s
See Also
--------
gamma
Notes
-----
The Erlang distribution is a special case of the Gamma distribution, with
the shape parameter `a` an integer. Note that this restriction is not
enforced by `erlang`. It will, however, generate a warning the first time
a non-integer value is used for the shape parameter.
Refer to `gamma` for examples.
"""
def _argcheck(self, a):
allint = np.all(np.floor(a) == a)
allpos = np.all(a > 0)
if not allint:
# An Erlang distribution shouldn't really have a non-integer
# shape parameter, so warn the user.
warnings.warn(
'The shape parameter of the erlang distribution '
'has been given a non-integer value %r.' % (a,),
RuntimeWarning)
return allpos
def _fitstart(self, data):
# Override gamma_gen_fitstart so that an integer initial value is
# used. (Also regularize the division, to avoid issues when
# _skew(data) is 0 or close to 0.)
a = int(4.0 / (1e-8 + _skew(data)**2))
return super(gamma_gen, self)._fitstart(data, args=(a,))
# Trivial override of the fit method, so we can monkey-patch its
# docstring.
def fit(self, data, *args, **kwds):
return super(erlang_gen, self).fit(data, *args, **kwds)
if fit.__doc__ is not None:
fit.__doc__ = (rv_continuous.fit.__doc__ +
"""
Notes
-----
The Erlang distribution is generally defined to have integer values
for the shape parameter. This is not enforced by the `erlang` class.
When fitting the distribution, it will generally return a non-integer
value for the shape parameter. By using the keyword argument
`f0=<integer>`, the fit method can be constrained to fit the data to
a specific integer shape parameter.
""")
erlang = erlang_gen(a=0.0, name='erlang')
class gengamma_gen(rv_continuous):
r"""A generalized gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gengamma` is:
.. math::
f(x, a, c) = \frac{|c| x^{c a-1} \exp(-x^c)}{\gamma(a)}
for :math:`x \ge 0`, :math:`a > 0`, and :math:`c \ne 0`.
`gengamma` takes :math:`a` and :math:`c` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, c):
return (a > 0) & (c != 0)
def _pdf(self, x, a, c):
# gengamma.pdf(x, a, c) = abs(c) * x**(c*a-1) * exp(-x**c) / gamma(a)
return np.exp(self._logpdf(x, a, c))
def _logpdf(self, x, a, c):
return np.log(abs(c)) + sc.xlogy(c*a - 1, x) - x**c - sc.gammaln(a)
def _cdf(self, x, a, c):
xc = x**c
val1 = sc.gammainc(a, xc)
val2 = sc.gammaincc(a, xc)
return np.where(c > 0, val1, val2)
def _sf(self, x, a, c):
xc = x**c
val1 = sc.gammainc(a, xc)
val2 = sc.gammaincc(a, xc)
return np.where(c > 0, val2, val1)
def _ppf(self, q, a, c):
val1 = sc.gammaincinv(a, q)
val2 = sc.gammainccinv(a, q)
return np.where(c > 0, val1, val2)**(1.0/c)
def _isf(self, q, a, c):
val1 = sc.gammaincinv(a, q)
val2 = sc.gammainccinv(a, q)
return np.where(c > 0, val2, val1)**(1.0/c)
def _munp(self, n, a, c):
# Pochhammer symbol: sc.pocha,n) = gamma(a+n)/gamma(a)
return sc.poch(a, n*1.0/c)
def _entropy(self, a, c):
val = sc.psi(a)
return a*(1-val) + 1.0/c*val + sc.gammaln(a) - np.log(abs(c))
gengamma = gengamma_gen(a=0.0, name='gengamma')
class genhalflogistic_gen(rv_continuous):
r"""A generalized half-logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genhalflogistic` is:
.. math::
f(x, c) = \frac{2 (1 - c x)^{1/(c-1)}}{[1 + (1 - c x)^{1/c}]^2}
for :math:`0 \le x \le 1/c`, and :math:`c > 0`.
`genhalflogistic` takes :math:`c` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
self.b = 1.0 / c
return c > 0
def _pdf(self, x, c):
# genhalflogistic.pdf(x, c) =
# 2 * (1-c*x)**(1/c-1) / (1+(1-c*x)**(1/c))**2
limit = 1.0/c
tmp = np.asarray(1-c*x)
tmp0 = tmp**(limit-1)
tmp2 = tmp0*tmp
return 2*tmp0 / (1+tmp2)**2
def _cdf(self, x, c):
limit = 1.0/c
tmp = np.asarray(1-c*x)
tmp2 = tmp**(limit)
return (1.0-tmp2) / (1+tmp2)
def _ppf(self, q, c):
return 1.0/c*(1-((1.0-q)/(1.0+q))**c)
def _entropy(self, c):
return 2 - (2*c+1)*np.log(2)
genhalflogistic = genhalflogistic_gen(a=0.0, name='genhalflogistic')
class gompertz_gen(rv_continuous):
r"""A Gompertz (or truncated Gumbel) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gompertz` is:
.. math::
f(x, c) = c \exp(x) \exp(-c (e^x-1))
for :math:`x \ge 0`, :math:`c > 0`.
`gompertz` takes :math:`c` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# gompertz.pdf(x, c) = c * exp(x) * exp(-c*(exp(x)-1))
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return np.log(c) + x - c * sc.expm1(x)
def _cdf(self, x, c):
return -sc.expm1(-c * sc.expm1(x))
def _ppf(self, q, c):
return sc.log1p(-1.0 / c * sc.log1p(-q))
def _entropy(self, c):
return 1.0 - np.log(c) - np.exp(c)*sc.expn(1, c)
gompertz = gompertz_gen(a=0.0, name='gompertz')
class gumbel_r_gen(rv_continuous):
r"""A right-skewed Gumbel continuous random variable.
%(before_notes)s
See Also
--------
gumbel_l, gompertz, genextreme
Notes
-----
The probability density function for `gumbel_r` is:
.. math::
f(x) = \exp(-(x + e^{-x}))
The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
distribution. It is also related to the extreme value distribution,
log-Weibull and Gompertz distributions.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# gumbel_r.pdf(x) = exp(-(x + exp(-x)))
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return -x - np.exp(-x)
def _cdf(self, x):
return np.exp(-np.exp(-x))
def _logcdf(self, x):
return -np.exp(-x)
def _ppf(self, q):
return -np.log(-np.log(q))
def _stats(self):
return _EULER, np.pi*np.pi/6.0, 12*np.sqrt(6)/np.pi**3 * _ZETA3, 12.0/5
def _entropy(self):
# http://en.wikipedia.org/wiki/Gumbel_distribution
return _EULER + 1.
gumbel_r = gumbel_r_gen(name='gumbel_r')
class gumbel_l_gen(rv_continuous):
r"""A left-skewed Gumbel continuous random variable.
%(before_notes)s
See Also
--------
gumbel_r, gompertz, genextreme
Notes
-----
The probability density function for `gumbel_l` is:
.. math::
f(x) = \exp(x - e^x)
The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
distribution. It is also related to the extreme value distribution,
log-Weibull and Gompertz distributions.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# gumbel_l.pdf(x) = exp(x - exp(x))
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return x - np.exp(x)
def _cdf(self, x):
return -sc.expm1(-np.exp(x))
def _ppf(self, q):
return np.log(-sc.log1p(-q))
def _logsf(self, x):
return -np.exp(x)
def _sf(self, x):
return np.exp(-np.exp(x))
def _isf(self, x):
return np.log(-np.log(x))
def _stats(self):
return -_EULER, np.pi*np.pi/6.0, \
-12*np.sqrt(6)/np.pi**3 * _ZETA3, 12.0/5
def _entropy(self):
return _EULER + 1.
gumbel_l = gumbel_l_gen(name='gumbel_l')
class halfcauchy_gen(rv_continuous):
r"""A Half-Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfcauchy` is:
.. math::
f(x) = \frac{2}{\pi (1 + x^2)}
for :math:`x \ge 0`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# halfcauchy.pdf(x) = 2 / (pi * (1 + x**2))
return 2.0/np.pi/(1.0+x*x)
def _logpdf(self, x):
return np.log(2.0/np.pi) - sc.log1p(x*x)
def _cdf(self, x):
return 2.0/np.pi*np.arctan(x)
def _ppf(self, q):
return np.tan(np.pi/2*q)
def _stats(self):
return np.inf, np.inf, np.nan, np.nan
def _entropy(self):
return np.log(2*np.pi)
halfcauchy = halfcauchy_gen(a=0.0, name='halfcauchy')
class halflogistic_gen(rv_continuous):
r"""A half-logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halflogistic` is:
.. math::
f(x) = \frac{ 2 e^{-x} }{ (1+e^{-x})^2 } = \frac{1}{2} sech(x/2)^2
for :math:`x \ge 0`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# halflogistic.pdf(x) = 2 * exp(-x) / (1+exp(-x))**2
# = 1/2 * sech(x/2)**2
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return np.log(2) - x - 2. * sc.log1p(np.exp(-x))
def _cdf(self, x):
return np.tanh(x/2.0)
def _ppf(self, q):
return 2*np.arctanh(q)
def _munp(self, n):
if n == 1:
return 2*np.log(2)
if n == 2:
return np.pi*np.pi/3.0
if n == 3:
return 9*_ZETA3
if n == 4:
return 7*np.pi**4 / 15.0
return 2*(1-pow(2.0, 1-n))*sc.gamma(n+1)*sc.zeta(n, 1)
def _entropy(self):
return 2-np.log(2)
halflogistic = halflogistic_gen(a=0.0, name='halflogistic')
class halfnorm_gen(rv_continuous):
r"""A half-normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfnorm` is:
.. math::
f(x) = \sqrt{2/\pi} e^{-\frac{x^2}{2}}
for :math:`x > 0`.
`halfnorm` is a special case of :math`\chi` with ``df == 1``.
%(after_notes)s
%(example)s
"""
def _rvs(self):
return abs(self._random_state.standard_normal(size=self._size))
def _pdf(self, x):
# halfnorm.pdf(x) = sqrt(2/pi) * exp(-x**2/2)
return np.sqrt(2.0/np.pi)*np.exp(-x*x/2.0)
def _logpdf(self, x):
return 0.5 * np.log(2.0/np.pi) - x*x/2.0
def _cdf(self, x):
return _norm_cdf(x)*2-1.0
def _ppf(self, q):
return sc.ndtri((1+q)/2.0)
def _stats(self):
return (np.sqrt(2.0/np.pi),
1-2.0/np.pi,
np.sqrt(2)*(4-np.pi)/(np.pi-2)**1.5,
8*(np.pi-3)/(np.pi-2)**2)
def _entropy(self):
return 0.5*np.log(np.pi/2.0)+0.5
halfnorm = halfnorm_gen(a=0.0, name='halfnorm')
class hypsecant_gen(rv_continuous):
r"""A hyperbolic secant continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `hypsecant` is:
.. math::
f(x) = \frac{1}{\pi} sech(x)
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# hypsecant.pdf(x) = 1/pi * sech(x)
return 1.0/(np.pi*np.cosh(x))
def _cdf(self, x):
return 2.0/np.pi*np.arctan(np.exp(x))
def _ppf(self, q):
return np.log(np.tan(np.pi*q/2.0))
def _stats(self):
return 0, np.pi*np.pi/4, 0, 2
def _entropy(self):
return np.log(2*np.pi)
hypsecant = hypsecant_gen(name='hypsecant')
class gausshyper_gen(rv_continuous):
r"""A Gauss hypergeometric continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gausshyper` is:
.. math::
f(x, a, b, c, z) = C x^{a-1} (1-x)^{b-1} (1+zx)^{-c}
for :math:`0 \le x \le 1`, :math:`a > 0`, :math:`b > 0`, and
:math:`C = \frac{1}{B(a, b) F[2, 1](c, a; a+b; -z)}`
`gausshyper` takes :math:`a`, :math:`b`, :math:`c` and :math:`z` as shape
parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b, c, z):
return (a > 0) & (b > 0) & (c == c) & (z == z)
def _pdf(self, x, a, b, c, z):
# gausshyper.pdf(x, a, b, c, z) =
# C * x**(a-1) * (1-x)**(b-1) * (1+z*x)**(-c)
Cinv = sc.gamma(a)*sc.gamma(b)/sc.gamma(a+b)*sc.hyp2f1(c, a, a+b, -z)
return 1.0/Cinv * x**(a-1.0) * (1.0-x)**(b-1.0) / (1.0+z*x)**c
def _munp(self, n, a, b, c, z):
fac = sc.beta(n+a, b) / sc.beta(a, b)
num = sc.hyp2f1(c, a+n, a+b+n, -z)
den = sc.hyp2f1(c, a, a+b, -z)
return fac*num / den
gausshyper = gausshyper_gen(a=0.0, b=1.0, name='gausshyper')
class invgamma_gen(rv_continuous):
r"""An inverted gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invgamma` is:
.. math::
f(x, a) = \frac{x^{-a-1}}{\gamma(a)} \exp(-\frac{1}{x})
for :math:`x > 0`, :math:`a > 0`.
`invgamma` takes :math:`a` as a shape parameter.
`invgamma` is a special case of `gengamma` with ``c == -1``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, a):
# invgamma.pdf(x, a) = x**(-a-1) / gamma(a) * exp(-1/x)
return np.exp(self._logpdf(x, a))
def _logpdf(self, x, a):
return -(a+1) * np.log(x) - sc.gammaln(a) - 1.0/x
def _cdf(self, x, a):
return sc.gammaincc(a, 1.0 / x)
def _ppf(self, q, a):
return 1.0 / sc.gammainccinv(a, q)
def _sf(self, x, a):
return sc.gammainc(a, 1.0 / x)
def _isf(self, q, a):
return 1.0 / sc.gammaincinv(a, q)
def _stats(self, a, moments='mvsk'):
m1 = _lazywhere(a > 1, (a,), lambda x: 1. / (x - 1.), np.inf)
m2 = _lazywhere(a > 2, (a,), lambda x: 1. / (x - 1.)**2 / (x - 2.),
np.inf)
g1, g2 = None, None
if 's' in moments:
g1 = _lazywhere(
a > 3, (a,),
lambda x: 4. * np.sqrt(x - 2.) / (x - 3.), np.nan)
if 'k' in moments:
g2 = _lazywhere(
a > 4, (a,),
lambda x: 6. * (5. * x - 11.) / (x - 3.) / (x - 4.), np.nan)
return m1, m2, g1, g2
def _entropy(self, a):
return a - (a+1.0) * sc.psi(a) + sc.gammaln(a)
invgamma = invgamma_gen(a=0.0, name='invgamma')
# scale is gamma from DATAPLOT and B from Regress
class invgauss_gen(rv_continuous):
r"""An inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invgauss` is:
.. math::
f(x, \mu) = \frac{1}{\sqrt{2 \pi x^3}}
\exp(-\frac{(x-\mu)^2}{2 x \mu^2})
for :math:`x > 0`.
`invgauss` takes :math:`\mu` as a shape parameter.
%(after_notes)s
When :math:`\mu` is too small, evaluating the cumulative distribution
function will be inaccurate due to ``cdf(mu -> 0) = inf * 0``.
NaNs are returned for :math:`\mu \le 0.0028`.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, mu):
return self._random_state.wald(mu, 1.0, size=self._size)
def _pdf(self, x, mu):
# invgauss.pdf(x, mu) =
# 1 / sqrt(2*pi*x**3) * exp(-(x-mu)**2/(2*x*mu**2))
return 1.0/np.sqrt(2*np.pi*x**3.0)*np.exp(-1.0/(2*x)*((x-mu)/mu)**2)
def _logpdf(self, x, mu):
return -0.5*np.log(2*np.pi) - 1.5*np.log(x) - ((x-mu)/mu)**2/(2*x)
def _cdf(self, x, mu):
fac = np.sqrt(1.0/x)
# Numerical accuracy for small `mu` is bad. See #869.
C1 = _norm_cdf(fac*(x-mu)/mu)
C1 += np.exp(1.0/mu) * _norm_cdf(-fac*(x+mu)/mu) * np.exp(1.0/mu)
return C1
def _stats(self, mu):
return mu, mu**3.0, 3*np.sqrt(mu), 15*mu
invgauss = invgauss_gen(a=0.0, name='invgauss')
class norminvgauss_gen(rv_continuous):
r"""A Normal Inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `norminvgauss` is:
.. math::
f(x; a, b) = (a \exp(\sqrt{a^2 - b^2} + b x)) /
(\pi \sqrt{1 + x^2} \, K_1(a * \sqrt{1 + x^2}))
where `x` is a real number, the parameter `a` is the tail heaviness
and `b` is the asymmetry parameter satisfying `a > 0` and `abs(b) <= a`.
`K_1` is the modified Bessel function of second kind (`scipy.special.k1`).
%(after_notes)s
A normal inverse Gaussian random variable with parameters `a` and `b` can
be expressed as `X = b * V + sqrt(V) * X` where `X` is `norm(0,1)`
and `V` is `invgauss(mu=1/sqrt(a**2 - b**2))`. This representation is used
to generate random variates.
References
----------
O. Barndorff-Nielsen, "Hyperbolic Distributions and Distributions on
Hyperbolae", Scandinavian Journal of Statistics, Vol. 5(3),
pp. 151-157, 1978.
O. Barndorff-Nielsen, "Normal Inverse Gaussian Distributions and Stochastic
Volatility Modelling", Scandinavian Journal of Statistics, Vol. 24,
pp. 1–13, 1997.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _argcheck(self, a, b):
return (a > 0) & (np.absolute(b) < a)
def _pdf(self, x, a, b):
gamma = np.sqrt(a**2 - b**2)
fac1 = a / np.pi * np.exp(gamma)
sq = np.hypot(1, x) # reduce overflows
return fac1 * sc.k1e(a * sq) * np.exp(b*x - a*sq) / sq
def _rvs(self, a, b):
# note: X = b * V + sqrt(V) * X is norminvgaus(a,b) if X is standard
# normal and V is invgauss(mu=1/sqrt(a**2 - b**2))
gamma = np.sqrt(a**2 - b**2)
sz, rndm = self._size, self._random_state
ig = invgauss.rvs(mu=1/gamma, size=sz, random_state=rndm)
return b * ig + np.sqrt(ig) * norm.rvs(size=sz, random_state=rndm)
def _stats(self, a, b):
gamma = np.sqrt(a**2 - b**2)
mean = b / gamma
variance = a**2 / gamma**3
skewness = 3.0 * b / (a * np.sqrt(gamma))
kurtosis = 3.0 * (1 + 4 * b**2 / a**2) / gamma
return mean, variance, skewness, kurtosis
norminvgauss = norminvgauss_gen(name="norminvgauss")
class invweibull_gen(rv_continuous):
r"""An inverted Weibull continuous random variable.
This distribution is also known as the Fréchet distribution or the
type II extreme value distribution.
%(before_notes)s
Notes
-----
The probability density function for `invweibull` is:
.. math::
f(x, c) = c x^{-c-1} \exp(-x^{-c})
for :math:`x > 0``, :math:`c > 0``.
`invweibull` takes :math:`c`` as a shape parameter.
%(after_notes)s
References
----------
F.R.S. de Gusmao, E.M.M Ortega and G.M. Cordeiro, "The generalized inverse
Weibull distribution", Stat. Papers, vol. 52, pp. 591-619, 2011.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, c):
# invweibull.pdf(x, c) = c * x**(-c-1) * exp(-x**(-c))
xc1 = np.power(x, -c - 1.0)
xc2 = np.power(x, -c)
xc2 = np.exp(-xc2)
return c * xc1 * xc2
def _cdf(self, x, c):
xc1 = np.power(x, -c)
return np.exp(-xc1)
def _ppf(self, q, c):
return np.power(-np.log(q), -1.0/c)
def _munp(self, n, c):
return sc.gamma(1 - n / c)
def _entropy(self, c):
return 1+_EULER + _EULER / c - np.log(c)
invweibull = invweibull_gen(a=0, name='invweibull')
class johnsonsb_gen(rv_continuous):
r"""A Johnson SB continuous random variable.
%(before_notes)s
See Also
--------
johnsonsu
Notes
-----
The probability density function for `johnsonsb` is:
.. math::
f(x, a, b) = \frac{b}{x(1-x)} \phi(a + b \log \frac{x}{1-x} )
for :math:`0 < x < 1` and :math:`a, b > 0`, and :math:`\phi` is the normal
pdf.
`johnsonsb` takes :math:`a` and :math:`b` as shape parameters.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _argcheck(self, a, b):
return (b > 0) & (a == a)
def _pdf(self, x, a, b):
# johnsonsb.pdf(x, a, b) = b / (x*(1-x)) * phi(a + b * log(x/(1-x)))
trm = _norm_pdf(a + b*np.log(x/(1.0-x)))
return b*1.0/(x*(1-x))*trm
def _cdf(self, x, a, b):
return _norm_cdf(a + b*np.log(x/(1.0-x)))
def _ppf(self, q, a, b):
return 1.0 / (1 + np.exp(-1.0 / b * (_norm_ppf(q) - a)))
johnsonsb = johnsonsb_gen(a=0.0, b=1.0, name='johnsonsb')
class johnsonsu_gen(rv_continuous):
r"""A Johnson SU continuous random variable.
%(before_notes)s
See Also
--------
johnsonsb
Notes
-----
The probability density function for `johnsonsu` is:
.. math::
f(x, a, b) = \frac{b}{\sqrt{x^2 + 1}}
\phi(a + b \log(x + \sqrt{x^2 + 1}))
for all :math:`x, a, b > 0`, and :math:`\phi` is the normal pdf.
`johnsonsu` takes :math:`a` and :math:`b` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b):
return (b > 0) & (a == a)
def _pdf(self, x, a, b):
# johnsonsu.pdf(x, a, b) = b / sqrt(x**2 + 1) *
# phi(a + b * log(x + sqrt(x**2 + 1)))
x2 = x*x
trm = _norm_pdf(a + b * np.log(x + np.sqrt(x2+1)))
return b*1.0/np.sqrt(x2+1.0)*trm
def _cdf(self, x, a, b):
return _norm_cdf(a + b * np.log(x + np.sqrt(x*x + 1)))
def _ppf(self, q, a, b):
return np.sinh((_norm_ppf(q) - a) / b)
johnsonsu = johnsonsu_gen(name='johnsonsu')
class laplace_gen(rv_continuous):
r"""A Laplace continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `laplace` is:
.. math::
f(x) = \frac{1}{2} \exp(-|x|)
%(after_notes)s
%(example)s
"""
def _rvs(self):
return self._random_state.laplace(0, 1, size=self._size)
def _pdf(self, x):
# laplace.pdf(x) = 1/2 * exp(-abs(x))
return 0.5*np.exp(-abs(x))
def _cdf(self, x):
return np.where(x > 0, 1.0-0.5*np.exp(-x), 0.5*np.exp(x))
def _ppf(self, q):
return np.where(q > 0.5, -np.log(2*(1-q)), np.log(2*q))
def _stats(self):
return 0, 2, 0, 3
def _entropy(self):
return np.log(2)+1
laplace = laplace_gen(name='laplace')
class levy_gen(rv_continuous):
r"""A Levy continuous random variable.
%(before_notes)s
See Also
--------
levy_stable, levy_l
Notes
-----
The probability density function for `levy` is:
.. math::
f(x) = \frac{1}{x \sqrt{2\pi x}) \exp(-\frac{1}{2x}}
for :math:`x > 0`.
This is the same as the Levy-stable distribution with :math:`a=1/2` and
:math:`b=1`.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x):
# levy.pdf(x) = 1 / (x * sqrt(2*pi*x)) * exp(-1/(2*x))
return 1 / np.sqrt(2*np.pi*x) / x * np.exp(-1/(2*x))
def _cdf(self, x):
# Equivalent to 2*norm.sf(np.sqrt(1/x))
return sc.erfc(np.sqrt(0.5 / x))
def _ppf(self, q):
# Equivalent to 1.0/(norm.isf(q/2)**2) or 0.5/(erfcinv(q)**2)
val = -sc.ndtri(q/2)
return 1.0 / (val * val)
def _stats(self):
return np.inf, np.inf, np.nan, np.nan
levy = levy_gen(a=0.0, name="levy")
class levy_l_gen(rv_continuous):
r"""A left-skewed Levy continuous random variable.
%(before_notes)s
See Also
--------
levy, levy_stable
Notes
-----
The probability density function for `levy_l` is:
.. math::
f(x) = \frac{1}{|x| \sqrt{2\pi |x|}} \exp(-\frac{1}{2 |x|})
for :math:`x < 0`.
This is the same as the Levy-stable distribution with :math:`a=1/2` and
:math:`b=-1`.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x):
# levy_l.pdf(x) = 1 / (abs(x) * sqrt(2*pi*abs(x))) * exp(-1/(2*abs(x)))
ax = abs(x)
return 1/np.sqrt(2*np.pi*ax)/ax*np.exp(-1/(2*ax))
def _cdf(self, x):
ax = abs(x)
return 2 * _norm_cdf(1 / np.sqrt(ax)) - 1
def _ppf(self, q):
val = _norm_ppf((q + 1.0) / 2)
return -1.0 / (val * val)
def _stats(self):
return np.inf, np.inf, np.nan, np.nan
levy_l = levy_l_gen(b=0.0, name="levy_l")
class levy_stable_gen(rv_continuous):
r"""A Levy-stable continuous random variable.
%(before_notes)s
See Also
--------
levy, levy_l
Notes
-----
Levy-stable distribution (only random variates available -- ignore other
docs)
"""
def _rvs(self, alpha, beta):
def alpha1func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
return (2/np.pi*(np.pi/2 + bTH)*tanTH -
beta*np.log((np.pi/2*W*cosTH)/(np.pi/2 + bTH)))
def beta0func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
return (W/(cosTH/np.tan(aTH) + np.sin(TH)) *
((np.cos(aTH) + np.sin(aTH)*tanTH)/W)**(1.0/alpha))
def otherwise(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
# alpha is not 1 and beta is not 0
val0 = beta*np.tan(np.pi*alpha/2)
th0 = np.arctan(val0)/alpha
val3 = W/(cosTH/np.tan(alpha*(th0 + TH)) + np.sin(TH))
res3 = val3*((np.cos(aTH) + np.sin(aTH)*tanTH -
val0*(np.sin(aTH) - np.cos(aTH)*tanTH))/W)**(1.0/alpha)
return res3
def alphanot1func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
res = _lazywhere(beta == 0,
(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W),
beta0func, f2=otherwise)
return res
sz = self._size
alpha = broadcast_to(alpha, sz)
beta = broadcast_to(beta, sz)
TH = uniform.rvs(loc=-np.pi/2.0, scale=np.pi, size=sz,
random_state=self._random_state)
W = expon.rvs(size=sz, random_state=self._random_state)
aTH = alpha*TH
bTH = beta*TH
cosTH = np.cos(TH)
tanTH = np.tan(TH)
res = _lazywhere(alpha == 1,
(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W),
alpha1func, f2=alphanot1func)
return res
def _argcheck(self, alpha, beta):
return (alpha > 0) & (alpha <= 2) & (beta <= 1) & (beta >= -1)
def _pdf(self, x, alpha, beta):
raise NotImplementedError
levy_stable = levy_stable_gen(name='levy_stable')
class logistic_gen(rv_continuous):
r"""A logistic (or Sech-squared) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `logistic` is:
.. math::
f(x) = \frac{\exp(-x)}
{(1+exp(-x))^2}
`logistic` is a special case of `genlogistic` with ``c == 1``.
%(after_notes)s
%(example)s
"""
def _rvs(self):
return self._random_state.logistic(size=self._size)
def _pdf(self, x):
# logistic.pdf(x) = exp(-x) / (1+exp(-x))**2
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return -x - 2. * sc.log1p(np.exp(-x))
def _cdf(self, x):
return sc.expit(x)
def _ppf(self, q):
return sc.logit(q)
def _sf(self, x):
return sc.expit(-x)
def _isf(self, q):
return -sc.logit(q)
def _stats(self):
return 0, np.pi*np.pi/3.0, 0, 6.0/5.0
def _entropy(self):
# http://en.wikipedia.org/wiki/Logistic_distribution
return 2.0
logistic = logistic_gen(name='logistic')
class loggamma_gen(rv_continuous):
r"""A log gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `loggamma` is:
.. math::
f(x, c) = \frac{\exp(c x - \exp(x))}
{\gamma(c)}
for all :math:`x, c > 0`.
`loggamma` takes :math:`c` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, c):
return np.log(self._random_state.gamma(c, size=self._size))
def _pdf(self, x, c):
# loggamma.pdf(x, c) = exp(c*x-exp(x)) / gamma(c)
return np.exp(c*x-np.exp(x)-sc.gammaln(c))
def _cdf(self, x, c):
return sc.gammainc(c, np.exp(x))
def _ppf(self, q, c):
return np.log(sc.gammaincinv(c, q))
def _stats(self, c):
# See, for example, "A Statistical Study of Log-Gamma Distribution", by
# Ping Shing Chan (thesis, McMaster University, 1993).
mean = sc.digamma(c)
var = sc.polygamma(1, c)
skewness = sc.polygamma(2, c) / np.power(var, 1.5)
excess_kurtosis = sc.polygamma(3, c) / (var*var)
return mean, var, skewness, excess_kurtosis
loggamma = loggamma_gen(name='loggamma')
class loglaplace_gen(rv_continuous):
r"""A log-Laplace continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `loglaplace` is:
.. math::
f(x, c) = \begin{cases}\frac{c}{2} x^{ c-1} &\text{for } 0 < x < 1\\
\frac{c}{2} x^{-c-1} &\text{for } x \ge 1
\end{cases}
for ``c > 0``.
`loglaplace` takes ``c`` as a shape parameter.
%(after_notes)s
References
----------
T.J. Kozubowski and K. Podgorski, "A log-Laplace growth rate model",
The Mathematical Scientist, vol. 28, pp. 49-60, 2003.
%(example)s
"""
def _pdf(self, x, c):
# loglaplace.pdf(x, c) = c / 2 * x**(c-1), for 0 < x < 1
# = c / 2 * x**(-c-1), for x >= 1
cd2 = c/2.0
c = np.where(x < 1, c, -c)
return cd2*x**(c-1)
def _cdf(self, x, c):
return np.where(x < 1, 0.5*x**c, 1-0.5*x**(-c))
def _ppf(self, q, c):
return np.where(q < 0.5, (2.0*q)**(1.0/c), (2*(1.0-q))**(-1.0/c))
def _munp(self, n, c):
return c**2 / (c**2 - n**2)
def _entropy(self, c):
return np.log(2.0/c) + 1.0
loglaplace = loglaplace_gen(a=0.0, name='loglaplace')
def _lognorm_logpdf(x, s):
return _lazywhere(x != 0, (x, s),
lambda x, s: -np.log(x)**2 / (2*s**2) - np.log(s*x*np.sqrt(2*np.pi)),
-np.inf)
class lognorm_gen(rv_continuous):
r"""A lognormal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `lognorm` is:
.. math::
f(x, s) = \frac{1}{s x \sqrt{2\pi}}
\exp(-\frac{1}{2} (\frac{\log(x)}{s})^2)
for ``x > 0``, ``s > 0``.
`lognorm` takes ``s`` as a shape parameter.
%(after_notes)s
A common parametrization for a lognormal random variable ``Y`` is in
terms of the mean, ``mu``, and standard deviation, ``sigma``, of the
unique normally distributed random variable ``X`` such that exp(X) = Y.
This parametrization corresponds to setting ``s = sigma`` and ``scale =
exp(mu)``.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, s):
return np.exp(s * self._random_state.standard_normal(self._size))
def _pdf(self, x, s):
# lognorm.pdf(x, s) = 1 / (s*x*sqrt(2*pi)) * exp(-1/2*(log(x)/s)**2)
return np.exp(self._logpdf(x, s))
def _logpdf(self, x, s):
return _lognorm_logpdf(x, s)
def _cdf(self, x, s):
return _norm_cdf(np.log(x) / s)
def _logcdf(self, x, s):
return _norm_logcdf(np.log(x) / s)
def _ppf(self, q, s):
return np.exp(s * _norm_ppf(q))
def _sf(self, x, s):
return _norm_sf(np.log(x) / s)
def _logsf(self, x, s):
return _norm_logsf(np.log(x) / s)
def _stats(self, s):
p = np.exp(s*s)
mu = np.sqrt(p)
mu2 = p*(p-1)
g1 = np.sqrt((p-1))*(2+p)
g2 = np.polyval([1, 2, 3, 0, -6.0], p)
return mu, mu2, g1, g2
def _entropy(self, s):
return 0.5 * (1 + np.log(2*np.pi) + 2 * np.log(s))
@extend_notes_in_docstring(rv_continuous, notes="""\
When the location parameter is fixed by using the `floc` argument,
this function uses explicit formulas for the maximum likelihood
estimation of the log-normal shape and scale parameters, so the
`optimizer`, `loc` and `scale` keyword arguments are ignored.\n\n""")
def fit(self, data, *args, **kwds):
floc = kwds.get('floc', None)
if floc is None:
# loc is not fixed. Use the default fit method.
return super(lognorm_gen, self).fit(data, *args, **kwds)
f0 = (kwds.get('f0', None) or kwds.get('fs', None) or
kwds.get('fix_s', None))
fscale = kwds.get('fscale', None)
if len(args) > 1:
raise TypeError("Too many input arguments.")
for name in ['f0', 'fs', 'fix_s', 'floc', 'fscale', 'loc', 'scale',
'optimizer']:
kwds.pop(name, None)
if kwds:
raise TypeError("Unknown arguments: %s." % kwds)
# Special case: loc is fixed. Use the maximum likelihood formulas
# instead of the numerical solver.
if f0 is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
floc = float(floc)
if floc != 0:
# Shifting the data by floc. Don't do the subtraction in-place,
# because `data` might be a view of the input array.
data = data - floc
if np.any(data <= 0):
raise FitDataError("lognorm", lower=floc, upper=np.inf)
lndata = np.log(data)
# Three cases to handle:
# * shape and scale both free
# * shape fixed, scale free
# * shape free, scale fixed
if fscale is None:
# scale is free.
scale = np.exp(lndata.mean())
if f0 is None:
# shape is free.
shape = lndata.std()
else:
# shape is fixed.
shape = float(f0)
else:
# scale is fixed, shape is free
scale = float(fscale)
shape = np.sqrt(((lndata - np.log(scale))**2).mean())
return shape, floc, scale
lognorm = lognorm_gen(a=0.0, name='lognorm')
class gilbrat_gen(rv_continuous):
r"""A Gilbrat continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gilbrat` is:
.. math::
f(x) = \frac{1}{x \sqrt{2\pi}} \exp(-\frac{1}{2} (\log(x))^2)
`gilbrat` is a special case of `lognorm` with ``s = 1``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self):
return np.exp(self._random_state.standard_normal(self._size))
def _pdf(self, x):
# gilbrat.pdf(x) = 1/(x*sqrt(2*pi)) * exp(-1/2*(log(x))**2)
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return _lognorm_logpdf(x, 1.0)
def _cdf(self, x):
return _norm_cdf(np.log(x))
def _ppf(self, q):
return np.exp(_norm_ppf(q))
def _stats(self):
p = np.e
mu = np.sqrt(p)
mu2 = p * (p - 1)
g1 = np.sqrt((p - 1)) * (2 + p)
g2 = np.polyval([1, 2, 3, 0, -6.0], p)
return mu, mu2, g1, g2
def _entropy(self):
return 0.5 * np.log(2 * np.pi) + 0.5
gilbrat = gilbrat_gen(a=0.0, name='gilbrat')
class maxwell_gen(rv_continuous):
r"""A Maxwell continuous random variable.
%(before_notes)s
Notes
-----
A special case of a `chi` distribution, with ``df = 3``, ``loc = 0.0``,
and given ``scale = a``, where ``a`` is the parameter used in the
Mathworld description [1]_.
The probability density function for `maxwell` is:
.. math::
f(x) = \sqrt{2/\pi}x^2 \exp(-x^2/2)
for ``x > 0``.
%(after_notes)s
References
----------
.. [1] http://mathworld.wolfram.com/MaxwellDistribution.html
%(example)s
"""
def _rvs(self):
return chi.rvs(3.0, size=self._size, random_state=self._random_state)
def _pdf(self, x):
# maxwell.pdf(x) = sqrt(2/pi)x**2 * exp(-x**2/2)
return np.sqrt(2.0/np.pi)*x*x*np.exp(-x*x/2.0)
def _cdf(self, x):
return sc.gammainc(1.5, x*x/2.0)
def _ppf(self, q):
return np.sqrt(2*sc.gammaincinv(1.5, q))
def _stats(self):
val = 3*np.pi-8
return (2*np.sqrt(2.0/np.pi),
3-8/np.pi,
np.sqrt(2)*(32-10*np.pi)/val**1.5,
(-12*np.pi*np.pi + 160*np.pi - 384) / val**2.0)
def _entropy(self):
return _EULER + 0.5*np.log(2*np.pi)-0.5
maxwell = maxwell_gen(a=0.0, name='maxwell')
class mielke_gen(rv_continuous):
r"""A Mielke's Beta-Kappa continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `mielke` is:
.. math::
f(x, k, s) = \frac{k x^{k-1}}{(1+x^s)^{1+k/s}}
for ``x > 0``.
`mielke` takes ``k`` and ``s`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, k, s):
# mielke.pdf(x, k, s) = k * x**(k-1) / (1+x**s)**(1+k/s)
return k*x**(k-1.0) / (1.0+x**s)**(1.0+k*1.0/s)
def _cdf(self, x, k, s):
return x**k / (1.0+x**s)**(k*1.0/s)
def _ppf(self, q, k, s):
qsk = pow(q, s*1.0/k)
return pow(qsk/(1.0-qsk), 1.0/s)
mielke = mielke_gen(a=0.0, name='mielke')
class kappa4_gen(rv_continuous):
r"""Kappa 4 parameter distribution.
%(before_notes)s
Notes
-----
The probability density function for kappa4 is:
.. math::
f(x, h, k) = (1 - k x)^{1/k - 1} (1 - h (1 - k x)^{1/k})^{1/h-1}
if :math:`h` and :math:`k` are not equal to 0.
If :math:`h` or :math:`k` are zero then the pdf can be simplified:
h = 0 and k != 0::
kappa4.pdf(x, h, k) = (1.0 - k*x)**(1.0/k - 1.0)*
exp(-(1.0 - k*x)**(1.0/k))
h != 0 and k = 0::
kappa4.pdf(x, h, k) = exp(-x)*(1.0 - h*exp(-x))**(1.0/h - 1.0)
h = 0 and k = 0::
kappa4.pdf(x, h, k) = exp(-x)*exp(-exp(-x))
kappa4 takes :math:`h` and :math:`k` as shape parameters.
The kappa4 distribution returns other distributions when certain
:math:`h` and :math:`k` values are used.
+------+-------------+----------------+------------------+
| h | k=0.0 | k=1.0 | -inf<=k<=inf |
+======+=============+================+==================+
| -1.0 | Logistic | | Generalized |
| | | | Logistic(1) |
| | | | |
| | logistic(x) | | |
+------+-------------+----------------+------------------+
| 0.0 | Gumbel | Reverse | Generalized |
| | | Exponential(2) | Extreme Value |
| | | | |
| | gumbel_r(x) | | genextreme(x, k) |
+------+-------------+----------------+------------------+
| 1.0 | Exponential | Uniform | Generalized |
| | | | Pareto |
| | | | |
| | expon(x) | uniform(x) | genpareto(x, -k) |
+------+-------------+----------------+------------------+
(1) There are at least five generalized logistic distributions.
Four are described here:
https://en.wikipedia.org/wiki/Generalized_logistic_distribution
The "fifth" one is the one kappa4 should match which currently
isn't implemented in scipy:
https://en.wikipedia.org/wiki/Talk:Generalized_logistic_distribution
http://www.mathwave.com/help/easyfit/html/analyses/distributions/gen_logistic.html
(2) This distribution is currently not in scipy.
References
----------
J.C. Finney, "Optimization of a Skewed Logistic Distribution With Respect
to the Kolmogorov-Smirnov Test", A Dissertation Submitted to the Graduate
Faculty of the Louisiana State University and Agricultural and Mechanical
College, (August, 2004),
http://digitalcommons.lsu.edu/cgi/viewcontent.cgi?article=4671&context=gradschool_dissertations
J.R.M. Hosking, "The four-parameter kappa distribution". IBM J. Res.
Develop. 38 (3), 25 1-258 (1994).
B. Kumphon, A. Kaew-Man, P. Seenoi, "A Rainfall Distribution for the Lampao
Site in the Chi River Basin, Thailand", Journal of Water Resource and
Protection, vol. 4, 866-869, (2012).
http://file.scirp.org/pdf/JWARP20121000009_14676002.pdf
C. Winchester, "On Estimation of the Four-Parameter Kappa Distribution", A
Thesis Submitted to Dalhousie University, Halifax, Nova Scotia, (March
2000).
http://www.nlc-bnc.ca/obj/s4/f2/dsk2/ftp01/MQ57336.pdf
%(after_notes)s
%(example)s
"""
def _argcheck(self, h, k):
condlist = [np.logical_and(h > 0, k > 0),
np.logical_and(h > 0, k == 0),
np.logical_and(h > 0, k < 0),
np.logical_and(h <= 0, k > 0),
np.logical_and(h <= 0, k == 0),
np.logical_and(h <= 0, k < 0)]
def f0(h, k):
return (1.0 - float_power(h, -k))/k
def f1(h, k):
return np.log(h)
def f3(h, k):
a = np.empty(np.shape(h))
a[:] = -np.inf
return a
def f5(h, k):
return 1.0/k
self.a = _lazyselect(condlist,
[f0, f1, f0, f3, f3, f5],
[h, k],
default=np.nan)
def f0(h, k):
return 1.0/k
def f1(h, k):
a = np.empty(np.shape(h))
a[:] = np.inf
return a
self.b = _lazyselect(condlist,
[f0, f1, f1, f0, f1, f1],
[h, k],
default=np.nan)
return h == h
def _pdf(self, x, h, k):
# kappa4.pdf(x, h, k) = (1.0 - k*x)**(1.0/k - 1.0)*
# (1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h-1)
return np.exp(self._logpdf(x, h, k))
def _logpdf(self, x, h, k):
condlist = [np.logical_and(h != 0, k != 0),
np.logical_and(h == 0, k != 0),
np.logical_and(h != 0, k == 0),
np.logical_and(h == 0, k == 0)]
def f0(x, h, k):
'''pdf = (1.0 - k*x)**(1.0/k - 1.0)*(
1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h-1.0)
logpdf = ...
'''
return (sc.xlog1py(1.0/k - 1.0, -k*x) +
sc.xlog1py(1.0/h - 1.0, -h*(1.0 - k*x)**(1.0/k)))
def f1(x, h, k):
'''pdf = (1.0 - k*x)**(1.0/k - 1.0)*np.exp(-(
1.0 - k*x)**(1.0/k))
logpdf = ...
'''
return sc.xlog1py(1.0/k - 1.0, -k*x) - (1.0 - k*x)**(1.0/k)
def f2(x, h, k):
'''pdf = np.exp(-x)*(1.0 - h*np.exp(-x))**(1.0/h - 1.0)
logpdf = ...
'''
return -x + sc.xlog1py(1.0/h - 1.0, -h*np.exp(-x))
def f3(x, h, k):
'''pdf = np.exp(-x-np.exp(-x))
logpdf = ...
'''
return -x - np.exp(-x)
return _lazyselect(condlist,
[f0, f1, f2, f3],
[x, h, k],
default=np.nan)
def _cdf(self, x, h, k):
return np.exp(self._logcdf(x, h, k))
def _logcdf(self, x, h, k):
condlist = [np.logical_and(h != 0, k != 0),
np.logical_and(h == 0, k != 0),
np.logical_and(h != 0, k == 0),
np.logical_and(h == 0, k == 0)]
def f0(x, h, k):
'''cdf = (1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h)
logcdf = ...
'''
return (1.0/h)*sc.log1p(-h*(1.0 - k*x)**(1.0/k))
def f1(x, h, k):
'''cdf = np.exp(-(1.0 - k*x)**(1.0/k))
logcdf = ...
'''
return -(1.0 - k*x)**(1.0/k)
def f2(x, h, k):
'''cdf = (1.0 - h*np.exp(-x))**(1.0/h)
logcdf = ...
'''
return (1.0/h)*sc.log1p(-h*np.exp(-x))
def f3(x, h, k):
'''cdf = np.exp(-np.exp(-x))
logcdf = ...
'''
return -np.exp(-x)
return _lazyselect(condlist,
[f0, f1, f2, f3],
[x, h, k],
default=np.nan)
def _ppf(self, q, h, k):
condlist = [np.logical_and(h != 0, k != 0),
np.logical_and(h == 0, k != 0),
np.logical_and(h != 0, k == 0),
np.logical_and(h == 0, k == 0)]
def f0(q, h, k):
return 1.0/k*(1.0 - ((1.0 - (q**h))/h)**k)
def f1(q, h, k):
return 1.0/k*(1.0 - (-np.log(q))**k)
def f2(q, h, k):
'''ppf = -np.log((1.0 - (q**h))/h)
'''
return -sc.log1p(-(q**h)) + np.log(h)
def f3(q, h, k):
return -np.log(-np.log(q))
return _lazyselect(condlist,
[f0, f1, f2, f3],
[q, h, k],
default=np.nan)
def _stats(self, h, k):
if h >= 0 and k >= 0:
maxr = 5
elif h < 0 and k >= 0:
maxr = int(-1.0/h*k)
elif k < 0:
maxr = int(-1.0/k)
else:
maxr = 5
outputs = [None if r < maxr else np.nan for r in range(1, 5)]
return outputs[:]
kappa4 = kappa4_gen(name='kappa4')
class kappa3_gen(rv_continuous):
r"""Kappa 3 parameter distribution.
%(before_notes)s
Notes
-----
The probability density function for `kappa` is:
.. math::
f(x, a) = \begin{cases}
a [a + x^a]^{-(a + 1)/a}, &\text{for } x > 0\\
0.0, &\text{for } x \le 0
\end{cases}
`kappa3` takes :math:`a` as a shape parameter and :math:`a > 0`.
References
----------
P.W. Mielke and E.S. Johnson, "Three-Parameter Kappa Distribution Maximum
Likelihood and Likelihood Ratio Tests", Methods in Weather Research,
701-707, (September, 1973),
http://docs.lib.noaa.gov/rescue/mwr/101/mwr-101-09-0701.pdf
B. Kumphon, "Maximum Entropy and Maximum Likelihood Estimation for the
Three-Parameter Kappa Distribution", Open Journal of Statistics, vol 2,
415-419 (2012)
http://file.scirp.org/pdf/OJS20120400011_95789012.pdf
%(after_notes)s
%(example)s
"""
def _argcheck(self, a):
return a > 0
def _pdf(self, x, a):
# kappa3.pdf(x, a) =
# a*[a + x**a]**(-(a + 1)/a), for x > 0
# 0.0, for x <= 0
return a*(a + x**a)**(-1.0/a-1)
def _cdf(self, x, a):
return x*(a + x**a)**(-1.0/a)
def _ppf(self, q, a):
return (a/(q**-a - 1.0))**(1.0/a)
def _stats(self, a):
outputs = [None if i < a else np.nan for i in range(1, 5)]
return outputs[:]
kappa3 = kappa3_gen(a=0.0, name='kappa3')
class moyal_gen(rv_continuous):
r"""A Moyal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `moyal` is:
.. math::
f(x) = \exp(-(x + \exp(-x))/2) / \sqrt{2\pi}
%(after_notes)s
This distribution has utility in high-energy physics and radiation
detection. It describes the energy loss of a charged relativistic
particle due to ionization of the medium [1]_. It also provides an
approximation for the Landau distribution. For an in depth description
see [2]_. For additional description, see [3]_.
References
----------
.. [1] J.E. Moyal, "XXX. Theory of ionization fluctuations",
The London, Edinburgh, and Dublin Philosophical Magazine
and Journal of Science, vol 46, 263-280, (1955).
https://doi.org/10.1080/14786440308521076 (gated)
.. [2] G. Cordeiro et al., "The beta Moyal: a useful skew distribution",
International Journal of Research and Reviews in Applied Sciences,
vol 10, 171-192, (2012).
http://www.arpapress.com/Volumes/Vol10Issue2/IJRRAS_10_2_02.pdf
.. [3] C. Walck, "Handbook on Statistical Distributions for
Experimentalists; International Report SUF-PFY/96-01", Chapter 26,
University of Stockholm: Stockholm, Sweden, (2007).
www.stat.rice.edu/~dobelman/textfiles/DistributionsHandbook.pdf
.. versionadded:: 1.1.0
%(example)s
"""
def _rvs(self):
sz, rndm = self._size, self._random_state
u1 = gamma.rvs(a = 0.5, scale = 2, size=sz, random_state=rndm)
return -np.log(u1)
def _pdf(self, x):
return np.exp(-0.5 * (x + np.exp(-x))) / np.sqrt(2*np.pi)
def _cdf(self, x):
return sc.erfc(np.exp(-0.5 * x) / np.sqrt(2))
def _sf(self, x):
return sc.erf(np.exp(-0.5 * x) / np.sqrt(2))
def _ppf(self, x):
return -np.log(2 * sc.erfcinv(x)**2)
def _stats(self):
mu = np.log(2) + np.euler_gamma
mu2 = np.pi**2 / 2
g1 = 28 * np.sqrt(2) * sc.zeta(3) / np.pi**3
g2 = 4.
return mu, mu2, g1, g2
def _munp(self, n):
if n == 1.0:
return np.log(2) + np.euler_gamma
elif n == 2.0:
return np.pi**2 / 2 + (np.log(2) + np.euler_gamma)**2
elif n == 3.0:
tmp1 = 1.5 * np.pi**2 * (np.log(2)+np.euler_gamma)
tmp2 = (np.log(2)+np.euler_gamma)**3
tmp3 = 14 * sc.zeta(3)
return tmp1 + tmp2 + tmp3
elif n == 4.0:
tmp1 = 4 * 14 * sc.zeta(3) * (np.log(2) + np.euler_gamma)
tmp2 = 3 * np.pi**2 * (np.log(2) + np.euler_gamma)**2
tmp3 = (np.log(2) + np.euler_gamma)**4
tmp4 = 7 * np.pi**4 / 4
return tmp1 + tmp2 + tmp3 + tmp4
else:
# return generic for higher moments
# return rv_continuous._mom1_sc(self, n, b)
return self._mom1_sc(n)
moyal = moyal_gen(name="moyal")
class nakagami_gen(rv_continuous):
r"""A Nakagami continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `nakagami` is:
.. math::
f(x, nu) = \frac{2 \nu^\nu}{\Gamma(\nu)} x^{2\nu-1} \exp(-\nu x^2)
for ``x > 0``, ``nu > 0``.
`nakagami` takes ``nu`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, nu):
# nakagami.pdf(x, nu) = 2 * nu**nu / gamma(nu) *
# x**(2*nu-1) * exp(-nu*x**2)
return 2*nu**nu/sc.gamma(nu)*(x**(2*nu-1.0))*np.exp(-nu*x*x)
def _cdf(self, x, nu):
return sc.gammainc(nu, nu*x*x)
def _ppf(self, q, nu):
return np.sqrt(1.0/nu*sc.gammaincinv(nu, q))
def _stats(self, nu):
mu = sc.gamma(nu+0.5)/sc.gamma(nu)/np.sqrt(nu)
mu2 = 1.0-mu*mu
g1 = mu * (1 - 4*nu*mu2) / 2.0 / nu / np.power(mu2, 1.5)
g2 = -6*mu**4*nu + (8*nu-2)*mu**2-2*nu + 1
g2 /= nu*mu2**2.0
return mu, mu2, g1, g2
nakagami = nakagami_gen(a=0.0, name="nakagami")
class ncx2_gen(rv_continuous):
r"""A non-central chi-squared continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `ncx2` is:
.. math::
f(x, df, nc) = \exp(-\frac{nc+x}{2}) \frac{1}{2} (x/nc)^{(df-2)/4}
I[(df-2)/2](\sqrt{nc x})
for :math:`x > 0`.
`ncx2` takes ``df`` and ``nc`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, df, nc):
return self._random_state.noncentral_chisquare(df, nc, self._size)
def _logpdf(self, x, df, nc):
return _ncx2_log_pdf(x, df, nc)
def _pdf(self, x, df, nc):
# ncx2.pdf(x, df, nc) = exp(-(nc+x)/2) * 1/2 * (x/nc)**((df-2)/4)
# * I[(df-2)/2](sqrt(nc*x))
return _ncx2_pdf(x, df, nc)
def _cdf(self, x, df, nc):
return _ncx2_cdf(x, df, nc)
def _ppf(self, q, df, nc):
return sc.chndtrix(q, df, nc)
def _stats(self, df, nc):
val = df + 2.0*nc
return (df + nc,
2*val,
np.sqrt(8)*(val+nc)/val**1.5,
12.0*(val+2*nc)/val**2.0)
ncx2 = ncx2_gen(a=0.0, name='ncx2')
class ncf_gen(rv_continuous):
r"""A non-central F distribution continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `ncf` is:
.. math::
f(x, n_1, n_2, \lambda) =
\exp(\frac{\lambda}{2} + \lambda n_1 \frac{x}{2(n_1 x+n_2)})
n_1^{n_1/2} n_2^{n_2/2} x^{n_1/2 - 1} \\
(n_2+n_1 x)^{-(n_1+n_2)/2}
\gamma(n_1/2) \gamma(1+n_2/2) \\
\frac{L^{\frac{v_1}{2}-1}_{v_2/2}
(-\lambda v_1 \frac{x}{2(v_1 x+v_2)})}
{(B(v_1/2, v_2/2) \gamma(\frac{v_1+v_2}{2})}
for :math:`n_1 > 1`, :math:`n_2, \lambda > 0`. Here :math:`n_1` is the
degrees of freedom in the numerator, :math:`n_2` the degrees of freedom in
the denominator, :math:`\lambda` the non-centrality parameter,
:math:`\gamma` is the logarithm of the Gamma function, :math:`L_n^k` is a
generalized Laguerre polynomial and :math:`B` is the beta function.
`ncf` takes ``df1``, ``df2`` and ``nc`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, dfn, dfd, nc):
return self._random_state.noncentral_f(dfn, dfd, nc, self._size)
def _pdf_skip(self, x, dfn, dfd, nc):
# ncf.pdf(x, df1, df2, nc) = exp(nc/2 + nc*df1*x/(2*(df1*x+df2))) *
# df1**(df1/2) * df2**(df2/2) * x**(df1/2-1) *
# (df2+df1*x)**(-(df1+df2)/2) *
# gamma(df1/2)*gamma(1+df2/2) *
# L^{v1/2-1}^{v2/2}(-nc*v1*x/(2*(v1*x+v2))) /
# (B(v1/2, v2/2) * gamma((v1+v2)/2))
n1, n2 = dfn, dfd
term = -nc/2+nc*n1*x/(2*(n2+n1*x)) + sc.gammaln(n1/2.)+sc.gammaln(1+n2/2.)
term -= sc.gammaln((n1+n2)/2.0)
Px = np.exp(term)
Px *= n1**(n1/2) * n2**(n2/2) * x**(n1/2-1)
Px *= (n2+n1*x)**(-(n1+n2)/2)
Px *= sc.assoc_laguerre(-nc*n1*x/(2.0*(n2+n1*x)), n2/2, n1/2-1)
Px /= sc.beta(n1/2, n2/2)
# This function does not have a return. Drop it for now, the generic
# function seems to work OK.
def _cdf(self, x, dfn, dfd, nc):
return sc.ncfdtr(dfn, dfd, nc, x)
def _ppf(self, q, dfn, dfd, nc):
return sc.ncfdtri(dfn, dfd, nc, q)
def _munp(self, n, dfn, dfd, nc):
val = (dfn * 1.0/dfd)**n
term = sc.gammaln(n+0.5*dfn) + sc.gammaln(0.5*dfd-n) - sc.gammaln(dfd*0.5)
val *= np.exp(-nc / 2.0+term)
val *= sc.hyp1f1(n+0.5*dfn, 0.5*dfn, 0.5*nc)
return val
def _stats(self, dfn, dfd, nc):
mu = np.where(dfd <= 2, np.inf, dfd / (dfd-2.0)*(1+nc*1.0/dfn))
mu2 = np.where(dfd <= 4, np.inf, 2*(dfd*1.0/dfn)**2.0 *
((dfn+nc/2.0)**2.0 + (dfn+nc)*(dfd-2.0)) /
((dfd-2.0)**2.0 * (dfd-4.0)))
return mu, mu2, None, None
ncf = ncf_gen(a=0.0, name='ncf')
class t_gen(rv_continuous):
r"""A Student's T continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `t` is:
.. math::
f(x, df) = \frac{\gamma((df+1)/2)}
{\sqrt{\pi*df} \gamma(df/2) (1+x^2/df)^{(df+1)/2}}
for ``df > 0``.
`t` takes ``df`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, df):
return self._random_state.standard_t(df, size=self._size)
def _pdf(self, x, df):
# gamma((df+1)/2)
# t.pdf(x, df) = ---------------------------------------------------
# sqrt(pi*df) * gamma(df/2) * (1+x**2/df)**((df+1)/2)
r = np.asarray(df*1.0)
Px = np.exp(sc.gammaln((r+1)/2)-sc.gammaln(r/2))
Px /= np.sqrt(r*np.pi)*(1+(x**2)/r)**((r+1)/2)
return Px
def _logpdf(self, x, df):
r = df*1.0
lPx = sc.gammaln((r+1)/2)-sc.gammaln(r/2)
lPx -= 0.5*np.log(r*np.pi) + (r+1)/2*np.log(1+(x**2)/r)
return lPx
def _cdf(self, x, df):
return sc.stdtr(df, x)
def _sf(self, x, df):
return sc.stdtr(df, -x)
def _ppf(self, q, df):
return sc.stdtrit(df, q)
def _isf(self, q, df):
return -sc.stdtrit(df, q)
def _stats(self, df):
mu2 = _lazywhere(df > 2, (df,),
lambda df: df / (df-2.0),
np.inf)
g1 = np.where(df > 3, 0.0, np.nan)
g2 = _lazywhere(df > 4, (df,),
lambda df: 6.0 / (df-4.0),
np.nan)
return 0, mu2, g1, g2
t = t_gen(name='t')
class nct_gen(rv_continuous):
r"""A non-central Student's T continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `nct` is:
.. math::
f(x, df, nc) = \frac{df^{df/2} \gamma(df+1)}{2^{df}
\exp(nc^2 / 2) (df+x^2)^{df/2} \gamma(df/2)}
for ``df > 0``.
`nct` takes ``df`` and ``nc`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, df, nc):
return (df > 0) & (nc == nc)
def _rvs(self, df, nc):
sz, rndm = self._size, self._random_state
n = norm.rvs(loc=nc, size=sz, random_state=rndm)
c2 = chi2.rvs(df, size=sz, random_state=rndm)
return n * np.sqrt(df) / np.sqrt(c2)
def _pdf(self, x, df, nc):
# nct.pdf(x, df, nc) =
# df**(df/2) * gamma(df+1)
# ----------------------------------------------------
# 2**df*exp(nc**2/2) * (df+x**2)**(df/2) * gamma(df/2)
n = df*1.0
nc = nc*1.0
x2 = x*x
ncx2 = nc*nc*x2
fac1 = n + x2
trm1 = n/2.*np.log(n) + sc.gammaln(n+1)
trm1 -= n*np.log(2)+nc*nc/2.+(n/2.)*np.log(fac1)+sc.gammaln(n/2.)
Px = np.exp(trm1)
valF = ncx2 / (2*fac1)
trm1 = np.sqrt(2)*nc*x*sc.hyp1f1(n/2+1, 1.5, valF)
trm1 /= np.asarray(fac1*sc.gamma((n+1)/2))
trm2 = sc.hyp1f1((n+1)/2, 0.5, valF)
trm2 /= np.asarray(np.sqrt(fac1)*sc.gamma(n/2+1))
Px *= trm1+trm2
return Px
def _cdf(self, x, df, nc):
return sc.nctdtr(df, nc, x)
def _ppf(self, q, df, nc):
return sc.nctdtrit(df, nc, q)
def _stats(self, df, nc, moments='mv'):
#
# See D. Hogben, R.S. Pinkham, and M.B. Wilk,
# 'The moments of the non-central t-distribution'
# Biometrika 48, p. 465 (2961).
# e.g. http://www.jstor.org/stable/2332772 (gated)
#
mu, mu2, g1, g2 = None, None, None, None
gfac = sc.gamma(df/2.-0.5) / sc.gamma(df/2.)
c11 = np.sqrt(df/2.) * gfac
c20 = df / (df-2.)
c22 = c20 - c11*c11
mu = np.where(df > 1, nc*c11, np.inf)
mu2 = np.where(df > 2, c22*nc*nc + c20, np.inf)
if 's' in moments:
c33t = df * (7.-2.*df) / (df-2.) / (df-3.) + 2.*c11*c11
c31t = 3.*df / (df-2.) / (df-3.)
mu3 = (c33t*nc*nc + c31t) * c11*nc
g1 = np.where(df > 3, mu3 / np.power(mu2, 1.5), np.nan)
# kurtosis
if 'k' in moments:
c44 = df*df / (df-2.) / (df-4.)
c44 -= c11*c11 * 2.*df*(5.-df) / (df-2.) / (df-3.)
c44 -= 3.*c11**4
c42 = df / (df-4.) - c11*c11 * (df-1.) / (df-3.)
c42 *= 6.*df / (df-2.)
c40 = 3.*df*df / (df-2.) / (df-4.)
mu4 = c44 * nc**4 + c42*nc**2 + c40
g2 = np.where(df > 4, mu4/mu2**2 - 3., np.nan)
return mu, mu2, g1, g2
nct = nct_gen(name="nct")
class pareto_gen(rv_continuous):
r"""A Pareto continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `pareto` is:
.. math::
f(x, b) = \frac{b}{x^{b+1}}
for :math:`x \ge 1`, :math:`b > 0`.
`pareto` takes :math:`b` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, b):
# pareto.pdf(x, b) = b / x**(b+1)
return b * x**(-b-1)
def _cdf(self, x, b):
return 1 - x**(-b)
def _ppf(self, q, b):
return pow(1-q, -1.0/b)
def _sf(self, x, b):
return x**(-b)
def _stats(self, b, moments='mv'):
mu, mu2, g1, g2 = None, None, None, None
if 'm' in moments:
mask = b > 1
bt = np.extract(mask, b)
mu = valarray(np.shape(b), value=np.inf)
np.place(mu, mask, bt / (bt-1.0))
if 'v' in moments:
mask = b > 2
bt = np.extract(mask, b)
mu2 = valarray(np.shape(b), value=np.inf)
np.place(mu2, mask, bt / (bt-2.0) / (bt-1.0)**2)
if 's' in moments:
mask = b > 3
bt = np.extract(mask, b)
g1 = valarray(np.shape(b), value=np.nan)
vals = 2 * (bt + 1.0) * np.sqrt(bt - 2.0) / ((bt - 3.0) * np.sqrt(bt))
np.place(g1, mask, vals)
if 'k' in moments:
mask = b > 4
bt = np.extract(mask, b)
g2 = valarray(np.shape(b), value=np.nan)
vals = (6.0*np.polyval([1.0, 1.0, -6, -2], bt) /
np.polyval([1.0, -7.0, 12.0, 0.0], bt))
np.place(g2, mask, vals)
return mu, mu2, g1, g2
def _entropy(self, c):
return 1 + 1.0/c - np.log(c)
pareto = pareto_gen(a=1.0, name="pareto")
class lomax_gen(rv_continuous):
r"""A Lomax (Pareto of the second kind) continuous random variable.
%(before_notes)s
Notes
-----
The Lomax distribution is a special case of the Pareto distribution, with
(loc=-1.0).
The probability density function for `lomax` is:
.. math::
f(x, c) = \frac{c}{(1+x)^{c+1}}
for :math:`x \ge 0`, ``c > 0``.
`lomax` takes :math:`c` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# lomax.pdf(x, c) = c / (1+x)**(c+1)
return c*1.0/(1.0+x)**(c+1.0)
def _logpdf(self, x, c):
return np.log(c) - (c+1)*sc.log1p(x)
def _cdf(self, x, c):
return -sc.expm1(-c*sc.log1p(x))
def _sf(self, x, c):
return np.exp(-c*sc.log1p(x))
def _logsf(self, x, c):
return -c*sc.log1p(x)
def _ppf(self, q, c):
return sc.expm1(-sc.log1p(-q)/c)
def _stats(self, c):
mu, mu2, g1, g2 = pareto.stats(c, loc=-1.0, moments='mvsk')
return mu, mu2, g1, g2
def _entropy(self, c):
return 1+1.0/c-np.log(c)
lomax = lomax_gen(a=0.0, name="lomax")
class pearson3_gen(rv_continuous):
r"""A pearson type III continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `pearson3` is:
.. math::
f(x, skew) = \frac{|\beta|}{\gamma(\alpha)}
(\beta (x - \zeta))^{alpha - 1} \exp(-\beta (x - \zeta))
where:
.. math::
\beta = \frac{2}{skew stddev}
\alpha = (stddev \beta)^2
\zeta = loc - \frac{\alpha}{\beta}
`pearson3` takes ``skew`` as a shape parameter.
%(after_notes)s
%(example)s
References
----------
R.W. Vogel and D.E. McMartin, "Probability Plot Goodness-of-Fit and
Skewness Estimation Procedures for the Pearson Type 3 Distribution", Water
Resources Research, Vol.27, 3149-3158 (1991).
L.R. Salvosa, "Tables of Pearson's Type III Function", Ann. Math. Statist.,
Vol.1, 191-198 (1930).
"Using Modern Computing Tools to Fit the Pearson Type III Distribution to
Aviation Loads Data", Office of Aviation Research (2003).
"""
def _preprocess(self, x, skew):
# The real 'loc' and 'scale' are handled in the calling pdf(...). The
# local variables 'loc' and 'scale' within pearson3._pdf are set to
# the defaults just to keep them as part of the equations for
# documentation.
loc = 0.0
scale = 1.0
# If skew is small, return _norm_pdf. The divide between pearson3
# and norm was found by brute force and is approximately a skew of
# 0.000016. No one, I hope, would actually use a skew value even
# close to this small.
norm2pearson_transition = 0.000016
ans, x, skew = np.broadcast_arrays([1.0], x, skew)
ans = ans.copy()
# mask is True where skew is small enough to use the normal approx.
mask = np.absolute(skew) < norm2pearson_transition
invmask = ~mask
beta = 2.0 / (skew[invmask] * scale)
alpha = (scale * beta)**2
zeta = loc - alpha / beta
transx = beta * (x[invmask] - zeta)
return ans, x, transx, mask, invmask, beta, alpha, zeta
def _argcheck(self, skew):
# The _argcheck function in rv_continuous only allows positive
# arguments. The skew argument for pearson3 can be zero (which I want
# to handle inside pearson3._pdf) or negative. So just return True
# for all skew args.
return np.ones(np.shape(skew), dtype=bool)
def _stats(self, skew):
_, _, _, _, _, beta, alpha, zeta = (
self._preprocess([1], skew))
m = zeta + alpha / beta
v = alpha / (beta**2)
s = 2.0 / (alpha**0.5) * np.sign(beta)
k = 6.0 / alpha
return m, v, s, k
def _pdf(self, x, skew):
# pearson3.pdf(x, skew) = abs(beta) / gamma(alpha) *
# (beta * (x - zeta))**(alpha - 1) * exp(-beta*(x - zeta))
# Do the calculation in _logpdf since helps to limit
# overflow/underflow problems
ans = np.exp(self._logpdf(x, skew))
if ans.ndim == 0:
if np.isnan(ans):
return 0.0
return ans
ans[np.isnan(ans)] = 0.0
return ans
def _logpdf(self, x, skew):
# PEARSON3 logpdf GAMMA logpdf
# np.log(abs(beta))
# + (alpha - 1)*np.log(beta*(x - zeta)) + (a - 1)*np.log(x)
# - beta*(x - zeta) - x
# - sc.gammalnalpha) - sc.gammalna)
ans, x, transx, mask, invmask, beta, alpha, _ = (
self._preprocess(x, skew))
ans[mask] = np.log(_norm_pdf(x[mask]))
ans[invmask] = np.log(abs(beta)) + gamma._logpdf(transx, alpha)
return ans
def _cdf(self, x, skew):
ans, x, transx, mask, invmask, _, alpha, _ = (
self._preprocess(x, skew))
ans[mask] = _norm_cdf(x[mask])
ans[invmask] = gamma._cdf(transx, alpha)
return ans
def _rvs(self, skew):
skew = broadcast_to(skew, self._size)
ans, _, _, mask, invmask, beta, alpha, zeta = (
self._preprocess([0], skew))
nsmall = mask.sum()
nbig = mask.size - nsmall
ans[mask] = self._random_state.standard_normal(nsmall)
ans[invmask] = (self._random_state.standard_gamma(alpha, nbig)/beta +
zeta)
if self._size == ():
ans = ans[0]
return ans
def _ppf(self, q, skew):
ans, q, _, mask, invmask, beta, alpha, zeta = (
self._preprocess(q, skew))
ans[mask] = _norm_ppf(q[mask])
ans[invmask] = sc.gammaincinv(alpha, q[invmask])/beta + zeta
return ans
pearson3 = pearson3_gen(name="pearson3")
class powerlaw_gen(rv_continuous):
r"""A power-function continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powerlaw` is:
.. math::
f(x, a) = a x^{a-1}
for :math:`0 \le x \le 1`, :math:`a > 0`.
`powerlaw` takes :math:`a` as a shape parameter.
%(after_notes)s
`powerlaw` is a special case of `beta` with ``b == 1``.
%(example)s
"""
def _pdf(self, x, a):
# powerlaw.pdf(x, a) = a * x**(a-1)
return a*x**(a-1.0)
def _logpdf(self, x, a):
return np.log(a) + sc.xlogy(a - 1, x)
def _cdf(self, x, a):
return x**(a*1.0)
def _logcdf(self, x, a):
return a*np.log(x)
def _ppf(self, q, a):
return pow(q, 1.0/a)
def _stats(self, a):
return (a / (a + 1.0),
a / (a + 2.0) / (a + 1.0) ** 2,
-2.0 * ((a - 1.0) / (a + 3.0)) * np.sqrt((a + 2.0) / a),
6 * np.polyval([1, -1, -6, 2], a) / (a * (a + 3.0) * (a + 4)))
def _entropy(self, a):
return 1 - 1.0/a - np.log(a)
powerlaw = powerlaw_gen(a=0.0, b=1.0, name="powerlaw")
class powerlognorm_gen(rv_continuous):
r"""A power log-normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powerlognorm` is:
.. math::
f(x, c, s) = \frac{c}{x s} \phi(\log(x)/s)
(\Phi(-\log(x)/s))^{c-1}
where :math:`\phi` is the normal pdf, and :math:`\Phi` is the normal cdf,
and :math:`x > 0`, :math:`s, c > 0`.
`powerlognorm` takes :math:`c` and :math:`s` as shape parameters.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, c, s):
# powerlognorm.pdf(x, c, s) = c / (x*s) * phi(log(x)/s) *
# (Phi(-log(x)/s))**(c-1),
return (c/(x*s) * _norm_pdf(np.log(x)/s) *
pow(_norm_cdf(-np.log(x)/s), c*1.0-1.0))
def _cdf(self, x, c, s):
return 1.0 - pow(_norm_cdf(-np.log(x)/s), c*1.0)
def _ppf(self, q, c, s):
return np.exp(-s * _norm_ppf(pow(1.0 - q, 1.0 / c)))
powerlognorm = powerlognorm_gen(a=0.0, name="powerlognorm")
class powernorm_gen(rv_continuous):
r"""A power normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powernorm` is:
.. math::
f(x, c) = c \phi(x) (\Phi(-x))^{c-1}
where :math:`\phi` is the normal pdf, and :math:`\Phi` is the normal cdf,
and :math:`x > 0`, :math:`c > 0`.
`powernorm` takes :math:`c` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# powernorm.pdf(x, c) = c * phi(x) * (Phi(-x))**(c-1)
return c*_norm_pdf(x) * (_norm_cdf(-x)**(c-1.0))
def _logpdf(self, x, c):
return np.log(c) + _norm_logpdf(x) + (c-1)*_norm_logcdf(-x)
def _cdf(self, x, c):
return 1.0-_norm_cdf(-x)**(c*1.0)
def _ppf(self, q, c):
return -_norm_ppf(pow(1.0 - q, 1.0 / c))
powernorm = powernorm_gen(name='powernorm')
class rdist_gen(rv_continuous):
r"""An R-distributed continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rdist` is:
.. math::
f(x, c) = \frac{(1-x^2)^{c/2-1}}{B(1/2, c/2)}
for :math:`-1 \le x \le 1`, :math:`c > 0`.
`rdist` takes :math:`c` as a shape parameter.
This distribution includes the following distribution kernels as
special cases::
c = 2: uniform
c = 4: Epanechnikov (parabolic)
c = 6: quartic (biweight)
c = 8: triweight
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# rdist.pdf(x, c) = (1-x**2)**(c/2-1) / B(1/2, c/2)
return np.power((1.0 - x**2), c / 2.0 - 1) / sc.beta(0.5, c / 2.0)
def _cdf(self, x, c):
term1 = x / sc.beta(0.5, c / 2.0)
res = 0.5 + term1 * sc.hyp2f1(0.5, 1 - c / 2.0, 1.5, x**2)
# There's an issue with hyp2f1, it returns nans near x = +-1, c > 100.
# Use the generic implementation in that case. See gh-1285 for
# background.
if np.any(np.isnan(res)):
return rv_continuous._cdf(self, x, c)
return res
def _munp(self, n, c):
numerator = (1 - (n % 2)) * sc.beta((n + 1.0) / 2, c / 2.0)
return numerator / sc.beta(1. / 2, c / 2.)
rdist = rdist_gen(a=-1.0, b=1.0, name="rdist")
class rayleigh_gen(rv_continuous):
r"""A Rayleigh continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rayleigh` is:
.. math::
f(r) = r \exp(-r^2/2)
for :math:`x \ge 0`.
`rayleigh` is a special case of `chi` with ``df == 2``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self):
return chi.rvs(2, size=self._size, random_state=self._random_state)
def _pdf(self, r):
# rayleigh.pdf(r) = r * exp(-r**2/2)
return np.exp(self._logpdf(r))
def _logpdf(self, r):
return np.log(r) - 0.5 * r * r
def _cdf(self, r):
return -sc.expm1(-0.5 * r**2)
def _ppf(self, q):
return np.sqrt(-2 * sc.log1p(-q))
def _sf(self, r):
return np.exp(self._logsf(r))
def _logsf(self, r):
return -0.5 * r * r
def _isf(self, q):
return np.sqrt(-2 * np.log(q))
def _stats(self):
val = 4 - np.pi
return (np.sqrt(np.pi/2),
val/2,
2*(np.pi-3)*np.sqrt(np.pi)/val**1.5,
6*np.pi/val-16/val**2)
def _entropy(self):
return _EULER/2.0 + 1 - 0.5*np.log(2)
rayleigh = rayleigh_gen(a=0.0, name="rayleigh")
class reciprocal_gen(rv_continuous):
r"""A reciprocal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `reciprocal` is:
.. math::
f(x, a, b) = \frac{1}{x \log(b/a)}
for :math:`a \le x \le b`, :math:`a, b > 0`.
`reciprocal` takes :math:`a` and :math:`b` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b):
self.a = a
self.b = b
self.d = np.log(b*1.0 / a)
return (a > 0) & (b > 0) & (b > a)
def _pdf(self, x, a, b):
# reciprocal.pdf(x, a, b) = 1 / (x*log(b/a))
return 1.0 / (x * self.d)
def _logpdf(self, x, a, b):
return -np.log(x) - np.log(self.d)
def _cdf(self, x, a, b):
return (np.log(x)-np.log(a)) / self.d
def _ppf(self, q, a, b):
return a*pow(b*1.0/a, q)
def _munp(self, n, a, b):
return 1.0/self.d / n * (pow(b*1.0, n) - pow(a*1.0, n))
def _entropy(self, a, b):
return 0.5*np.log(a*b)+np.log(np.log(b/a))
reciprocal = reciprocal_gen(name="reciprocal")
class rice_gen(rv_continuous):
r"""A Rice continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rice` is:
.. math::
f(x, b) = x \exp(- \frac{x^2 + b^2}{2}) I[0](x b)
for :math:`x > 0`, :math:`b > 0`.
`rice` takes :math:`b` as a shape parameter.
%(after_notes)s
The Rice distribution describes the length, :math:`r`, of a 2-D vector with
components :math:`(U+u, V+v)`, where :math:`U, V` are constant, :math:`u,
v` are independent Gaussian random variables with standard deviation
:math:`s`. Let :math:`R = \sqrt{U^2 + V^2}`. Then the pdf of :math:`r` is
``rice.pdf(x, R/s, scale=s)``.
%(example)s
"""
def _argcheck(self, b):
return b >= 0
def _rvs(self, b):
# http://en.wikipedia.org/wiki/Rice_distribution
t = b/np.sqrt(2) + self._random_state.standard_normal(size=(2,) +
self._size)
return np.sqrt((t*t).sum(axis=0))
def _cdf(self, x, b):
return sc.chndtr(np.square(x), 2, np.square(b))
def _ppf(self, q, b):
return np.sqrt(sc.chndtrix(q, 2, np.square(b)))
def _pdf(self, x, b):
# rice.pdf(x, b) = x * exp(-(x**2+b**2)/2) * I[0](x*b)
#
# We use (x**2 + b**2)/2 = ((x-b)**2)/2 + xb.
# The factor of np.exp(-xb) is then included in the i0e function
# in place of the modified Bessel function, i0, improving
# numerical stability for large values of xb.
return x * np.exp(-(x-b)*(x-b)/2.0) * sc.i0e(x*b)
def _munp(self, n, b):
nd2 = n/2.0
n1 = 1 + nd2
b2 = b*b/2.0
return (2.0**(nd2) * np.exp(-b2) * sc.gamma(n1) *
sc.hyp1f1(n1, 1, b2))
rice = rice_gen(a=0.0, name="rice")
# FIXME: PPF does not work.
class recipinvgauss_gen(rv_continuous):
r"""A reciprocal inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `recipinvgauss` is:
.. math::
f(x, \mu) = \frac{1}{\sqrt{2\pi x}} \frac{\exp(-(1-\mu x)^2}{2x\mu^2)}
for :math:`x \ge 0`.
`recipinvgauss` takes :math:`\mu` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, mu):
# recipinvgauss.pdf(x, mu) =
# 1/sqrt(2*pi*x) * exp(-(1-mu*x)**2/(2*x*mu**2))
return 1.0/np.sqrt(2*np.pi*x)*np.exp(-(1-mu*x)**2.0 / (2*x*mu**2.0))
def _logpdf(self, x, mu):
return -(1-mu*x)**2.0 / (2*x*mu**2.0) - 0.5*np.log(2*np.pi*x)
def _cdf(self, x, mu):
trm1 = 1.0/mu - x
trm2 = 1.0/mu + x
isqx = 1.0/np.sqrt(x)
return 1.0-_norm_cdf(isqx*trm1)-np.exp(2.0/mu)*_norm_cdf(-isqx*trm2)
def _rvs(self, mu):
return 1.0/self._random_state.wald(mu, 1.0, size=self._size)
recipinvgauss = recipinvgauss_gen(a=0.0, name='recipinvgauss')
class semicircular_gen(rv_continuous):
r"""A semicircular continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `semicircular` is:
.. math::
f(x) = \frac{2}{\pi} \sqrt{1-x^2}
for :math:`-1 \le x \le 1`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# semicircular.pdf(x) = 2/pi * sqrt(1-x**2)
return 2.0/np.pi*np.sqrt(1-x*x)
def _cdf(self, x):
return 0.5+1.0/np.pi*(x*np.sqrt(1-x*x) + np.arcsin(x))
def _stats(self):
return 0, 0.25, 0, -1.0
def _entropy(self):
return 0.64472988584940017414
semicircular = semicircular_gen(a=-1.0, b=1.0, name="semicircular")
class skew_norm_gen(rv_continuous):
r"""A skew-normal random variable.
%(before_notes)s
Notes
-----
The pdf is::
skewnorm.pdf(x, a) = 2 * norm.pdf(x) * norm.cdf(a*x)
`skewnorm` takes :math:`a` as a skewness parameter
When ``a = 0`` the distribution is identical to a normal distribution.
rvs implements the method of [1]_.
%(after_notes)s
%(example)s
References
----------
.. [1] A. Azzalini and A. Capitanio (1999). Statistical applications of the
multivariate skew-normal distribution. J. Roy. Statist. Soc., B 61, 579-602.
http://azzalini.stat.unipd.it/SN/faq-r.html
"""
def _argcheck(self, a):
return np.isfinite(a)
def _pdf(self, x, a):
return 2.*_norm_pdf(x)*_norm_cdf(a*x)
def _cdf_single(self, x, *args):
if x <= 0:
cdf = integrate.quad(self._pdf, self.a, x, args=args)[0]
else:
t1 = integrate.quad(self._pdf, self.a, 0, args=args)[0]
t2 = integrate.quad(self._pdf, 0, x, args=args)[0]
cdf = t1 + t2
if cdf > 1:
# Presumably numerical noise, e.g. 1.0000000000000002
cdf = 1.0
return cdf
def _sf(self, x, a):
return self._cdf(-x, -a)
def _rvs(self, a):
u0 = self._random_state.normal(size=self._size)
v = self._random_state.normal(size=self._size)
d = a/np.sqrt(1 + a**2)
u1 = d*u0 + v*np.sqrt(1 - d**2)
return np.where(u0 >= 0, u1, -u1)
def _stats(self, a, moments='mvsk'):
output = [None, None, None, None]
const = np.sqrt(2/np.pi) * a/np.sqrt(1 + a**2)
if 'm' in moments:
output[0] = const
if 'v' in moments:
output[1] = 1 - const**2
if 's' in moments:
output[2] = ((4 - np.pi)/2) * (const/np.sqrt(1 - const**2))**3
if 'k' in moments:
output[3] = (2*(np.pi - 3)) * (const**4/(1 - const**2)**2)
return output
skewnorm = skew_norm_gen(name='skewnorm')
class trapz_gen(rv_continuous):
r"""A trapezoidal continuous random variable.
%(before_notes)s
Notes
-----
The trapezoidal distribution can be represented with an up-sloping line
from ``loc`` to ``(loc + c*scale)``, then constant to ``(loc + d*scale)``
and then downsloping from ``(loc + d*scale)`` to ``(loc+scale)``.
`trapz` takes :math:`c` and :math:`d` as shape parameters.
%(after_notes)s
The standard form is in the range [0, 1] with c the mode.
The location parameter shifts the start to `loc`.
The scale parameter changes the width from 1 to `scale`.
%(example)s
"""
def _argcheck(self, c, d):
return (c >= 0) & (c <= 1) & (d >= 0) & (d <= 1) & (d >= c)
def _pdf(self, x, c, d):
u = 2 / (d-c+1)
return _lazyselect([x < c,
(c <= x) & (x <= d),
x > d],
[lambda x, c, d, u: u * x / c,
lambda x, c, d, u: u,
lambda x, c, d, u: u * (1-x) / (1-d)],
(x, c, d, u))
def _cdf(self, x, c, d):
return _lazyselect([x < c,
(c <= x) & (x <= d),
x > d],
[lambda x, c, d: x**2 / c / (d-c+1),
lambda x, c, d: (c + 2 * (x-c)) / (d-c+1),
lambda x, c, d: 1-((1-x) ** 2
/ (d-c+1) / (1-d))],
(x, c, d))
def _ppf(self, q, c, d):
qc, qd = self._cdf(c, c, d), self._cdf(d, c, d)
condlist = [q < qc, q <= qd, q > qd]
choicelist = [np.sqrt(q * c * (1 + d - c)),
0.5 * q * (1 + d - c) + 0.5 * c,
1 - np.sqrt((1 - q) * (d - c + 1) * (1 - d))]
return np.select(condlist, choicelist)
trapz = trapz_gen(a=0.0, b=1.0, name="trapz")
class triang_gen(rv_continuous):
r"""A triangular continuous random variable.
%(before_notes)s
Notes
-----
The triangular distribution can be represented with an up-sloping line from
``loc`` to ``(loc + c*scale)`` and then downsloping for ``(loc + c*scale)``
to ``(loc+scale)``.
`triang` takes :math:`c` as a shape parameter.
%(after_notes)s
The standard form is in the range [0, 1] with c the mode.
The location parameter shifts the start to `loc`.
The scale parameter changes the width from 1 to `scale`.
%(example)s
"""
def _rvs(self, c):
return self._random_state.triangular(0, c, 1, self._size)
def _argcheck(self, c):
return (c >= 0) & (c <= 1)
def _pdf(self, x, c):
# 0: edge case where c=0
# 1: generalised case for x < c, don't use x <= c, as it doesn't cope
# with c = 0.
# 2: generalised case for x >= c, but doesn't cope with c = 1
# 3: edge case where c=1
r = _lazyselect([c == 0,
x < c,
(x >= c) & (c != 1),
c == 1],
[lambda x, c: 2 - 2 * x,
lambda x, c: 2 * x / c,
lambda x, c: 2 * (1 - x) / (1 - c),
lambda x, c: 2 * x],
(x, c))
return r
def _cdf(self, x, c):
r = _lazyselect([c == 0,
x < c,
(x >= c) & (c != 1),
c == 1],
[lambda x, c: 2*x - x*x,
lambda x, c: x * x / c,
lambda x, c: (x*x - 2*x + c) / (c-1),
lambda x, c: x * x],
(x, c))
return r
def _ppf(self, q, c):
return np.where(q < c, np.sqrt(c * q), 1-np.sqrt((1-c) * (1-q)))
def _stats(self, c):
return ((c+1.0)/3.0,
(1.0-c+c*c)/18,
np.sqrt(2)*(2*c-1)*(c+1)*(c-2) / (5*np.power((1.0-c+c*c), 1.5)),
-3.0/5.0)
def _entropy(self, c):
return 0.5-np.log(2)
triang = triang_gen(a=0.0, b=1.0, name="triang")
class truncexpon_gen(rv_continuous):
r"""A truncated exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `truncexpon` is:
.. math::
f(x, b) = \frac{\exp(-x)}{1 - \exp(-b)}
for :math:`0 < x < b`.
`truncexpon` takes :math:`b` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, b):
self.b = b
return b > 0
def _pdf(self, x, b):
# truncexpon.pdf(x, b) = exp(-x) / (1-exp(-b))
return np.exp(-x)/(-sc.expm1(-b))
def _logpdf(self, x, b):
return -x - np.log(-sc.expm1(-b))
def _cdf(self, x, b):
return sc.expm1(-x)/sc.expm1(-b)
def _ppf(self, q, b):
return -sc.log1p(q*sc.expm1(-b))
def _munp(self, n, b):
# wrong answer with formula, same as in continuous.pdf
# return sc.gamman+1)-sc.gammainc1+n, b)
if n == 1:
return (1-(b+1)*np.exp(-b))/(-sc.expm1(-b))
elif n == 2:
return 2*(1-0.5*(b*b+2*b+2)*np.exp(-b))/(-sc.expm1(-b))
else:
# return generic for higher moments
# return rv_continuous._mom1_sc(self, n, b)
return self._mom1_sc(n, b)
def _entropy(self, b):
eB = np.exp(b)
return np.log(eB-1)+(1+eB*(b-1.0))/(1.0-eB)
truncexpon = truncexpon_gen(a=0.0, name='truncexpon')
class truncnorm_gen(rv_continuous):
r"""A truncated normal continuous random variable.
%(before_notes)s
Notes
-----
The standard form of this distribution is a standard normal truncated to
the range [a, b] --- notice that a and b are defined over the domain of the
standard normal. To convert clip values for a specific mean and standard
deviation, use::
a, b = (myclip_a - my_mean) / my_std, (myclip_b - my_mean) / my_std
`truncnorm` takes :math:`a` and :math:`b` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b):
self.a = a
self.b = b
self._nb = _norm_cdf(b)
self._na = _norm_cdf(a)
self._sb = _norm_sf(b)
self._sa = _norm_sf(a)
self._delta = np.where(self.a > 0,
-(self._sb - self._sa),
self._nb - self._na)
self._logdelta = np.log(self._delta)
return a != b
def _pdf(self, x, a, b):
return _norm_pdf(x) / self._delta
def _logpdf(self, x, a, b):
return _norm_logpdf(x) - self._logdelta
def _cdf(self, x, a, b):
return (_norm_cdf(x) - self._na) / self._delta
def _ppf(self, q, a, b):
# XXX Use _lazywhere...
ppf = np.where(self.a > 0,
_norm_isf(q*self._sb + self._sa*(1.0-q)),
_norm_ppf(q*self._nb + self._na*(1.0-q)))
return ppf
def _stats(self, a, b):
nA, nB = self._na, self._nb
d = nB - nA
pA, pB = _norm_pdf(a), _norm_pdf(b)
mu = (pA - pB) / d # correction sign
mu2 = 1 + (a*pA - b*pB) / d - mu*mu
return mu, mu2, None, None
truncnorm = truncnorm_gen(name='truncnorm')
# FIXME: RVS does not work.
class tukeylambda_gen(rv_continuous):
r"""A Tukey-Lamdba continuous random variable.
%(before_notes)s
Notes
-----
A flexible distribution, able to represent and interpolate between the
following distributions:
- Cauchy (lam=-1)
- logistic (lam=0.0)
- approx Normal (lam=0.14)
- u-shape (lam = 0.5)
- uniform from -1 to 1 (lam = 1)
`tukeylambda` takes ``lam`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, lam):
return np.ones(np.shape(lam), dtype=bool)
def _pdf(self, x, lam):
Fx = np.asarray(sc.tklmbda(x, lam))
Px = Fx**(lam-1.0) + (np.asarray(1-Fx))**(lam-1.0)
Px = 1.0/np.asarray(Px)
return np.where((lam <= 0) | (abs(x) < 1.0/np.asarray(lam)), Px, 0.0)
def _cdf(self, x, lam):
return sc.tklmbda(x, lam)
def _ppf(self, q, lam):
return sc.boxcox(q, lam) - sc.boxcox1p(-q, lam)
def _stats(self, lam):
return 0, _tlvar(lam), 0, _tlkurt(lam)
def _entropy(self, lam):
def integ(p):
return np.log(pow(p, lam-1)+pow(1-p, lam-1))
return integrate.quad(integ, 0, 1)[0]
tukeylambda = tukeylambda_gen(name='tukeylambda')
class FitUniformFixedScaleDataError(FitDataError):
def __init__(self, ptp, fscale):
self.args = (
"Invalid values in `data`. Maximum likelihood estimation with "
"the uniform distribution and fixed scale requires that "
"data.ptp() <= fscale, but data.ptp() = %r and fscale = %r." %
(ptp, fscale),
)
class uniform_gen(rv_continuous):
r"""A uniform continuous random variable.
This distribution is constant between `loc` and ``loc + scale``.
%(before_notes)s
%(example)s
"""
def _rvs(self):
return self._random_state.uniform(0.0, 1.0, self._size)
def _pdf(self, x):
return 1.0*(x == x)
def _cdf(self, x):
return x
def _ppf(self, q):
return q
def _stats(self):
return 0.5, 1.0/12, 0, -1.2
def _entropy(self):
return 0.0
def fit(self, data, *args, **kwds):
"""
Maximum likelihood estimate for the location and scale parameters.
`uniform.fit` uses only the following parameters. Because exact
formulas are used, the parameters related to optimization that are
available in the `fit` method of other distributions are ignored
here. The only positional argument accepted is `data`.
Parameters
----------
data : array_like
Data to use in calculating the maximum likelihood estimate.
floc : float, optional
Hold the location parameter fixed to the specified value.
fscale : float, optional
Hold the scale parameter fixed to the specified value.
Returns
-------
loc, scale : float
Maximum likelihood estimates for the location and scale.
Notes
-----
An error is raised if `floc` is given and any values in `data` are
less than `floc`, or if `fscale` is given and `fscale` is less
than ``data.max() - data.min()``. An error is also raised if both
`floc` and `fscale` are given.
Examples
--------
>>> from scipy.stats import uniform
We'll fit the uniform distribution to `x`:
>>> x = np.array([2, 2.5, 3.1, 9.5, 13.0])
For a uniform distribution MLE, the location is the minimum of the
data, and the scale is the maximum minus the minimum.
>>> loc, scale = uniform.fit(x)
>>> loc
2.0
>>> scale
11.0
If we know the data comes from a uniform distribution where the support
starts at 0, we can use `floc=0`:
>>> loc, scale = uniform.fit(x, floc=0)
>>> loc
0.0
>>> scale
13.0
Alternatively, if we know the length of the support is 12, we can use
`fscale=12`:
>>> loc, scale = uniform.fit(x, fscale=12)
>>> loc
1.5
>>> scale
12.0
In that last example, the support interval is [1.5, 13.5]. This
solution is not unique. For example, the distribution with ``loc=2``
and ``scale=12`` has the same likelihood as the one above. When
`fscale` is given and it is larger than ``data.max() - data.min()``,
the parameters returned by the `fit` method center the support over
the interval ``[data.min(), data.max()]``.
"""
if len(args) > 0:
raise TypeError("Too many arguments.")
floc = kwds.pop('floc', None)
fscale = kwds.pop('fscale', None)
# Ignore the optimizer-related keyword arguments, if given.
kwds.pop('loc', None)
kwds.pop('scale', None)
kwds.pop('optimizer', None)
if kwds:
raise TypeError("Unknown arguments: %s." % kwds)
if floc is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
# MLE for the uniform distribution
# --------------------------------
# The PDF is
#
# f(x, loc, scale) = {1/scale for loc <= x <= loc + scale
# {0 otherwise}
#
# The likelihood function is
# L(x, loc, scale) = (1/scale)**n
# where n is len(x), assuming loc <= x <= loc + scale for all x.
# The log-likelihood is
# l(x, loc, scale) = -n*log(scale)
# The log-likelihood is maximized by making scale as small as possible,
# while keeping loc <= x <= loc + scale. So if neither loc nor scale
# are fixed, the log-likelihood is maximized by choosing
# loc = x.min()
# scale = x.ptp()
# If loc is fixed, it must be less than or equal to x.min(), and then
# the scale is
# scale = x.max() - loc
# If scale is fixed, it must not be less than x.ptp(). If scale is
# greater than x.ptp(), the solution is not unique. Note that the
# likelihood does not depend on loc, except for the requirement that
# loc <= x <= loc + scale. All choices of loc for which
# x.max() - scale <= loc <= x.min()
# have the same log-likelihood. In this case, we choose loc such that
# the support is centered over the interval [data.min(), data.max()]:
# loc = x.min() = 0.5*(scale - x.ptp())
if fscale is None:
# scale is not fixed.
if floc is None:
# loc is not fixed, scale is not fixed.
loc = data.min()
scale = data.ptp()
else:
# loc is fixed, scale is not fixed.
loc = floc
scale = data.max() - loc
if data.min() < loc:
raise FitDataError("uniform", lower=loc, upper=loc + scale)
else:
# loc is not fixed, scale is fixed.
ptp = data.ptp()
if ptp > fscale:
raise FitUniformFixedScaleDataError(ptp=ptp, fscale=fscale)
# If ptp < fscale, the ML estimate is not unique; see the comments
# above. We choose the distribution for which the support is
# centered over the interval [data.min(), data.max()].
loc = data.min() - 0.5*(fscale - ptp)
scale = fscale
# We expect the return values to be floating point, so ensure it
# by explicitly converting to float.
return float(loc), float(scale)
uniform = uniform_gen(a=0.0, b=1.0, name='uniform')
class vonmises_gen(rv_continuous):
r"""A Von Mises continuous random variable.
%(before_notes)s
Notes
-----
If `x` is not in range or `loc` is not in range it assumes they are angles
and converts them to [-\pi, \pi] equivalents.
The probability density function for `vonmises` is:
.. math::
f(x, \kappa) = \frac{ \exp(\kappa \cos(x)) }{ 2 \pi I[0](\kappa) }
for :math:`-\pi \le x \le \pi`, :math:`\kappa > 0`.
`vonmises` takes :math:`\kappa` as a shape parameter.
%(after_notes)s
See Also
--------
vonmises_line : The same distribution, defined on a [-\pi, \pi] segment
of the real line.
%(example)s
"""
def _rvs(self, kappa):
return self._random_state.vonmises(0.0, kappa, size=self._size)
def _pdf(self, x, kappa):
# vonmises.pdf(x, \kappa) = exp(\kappa * cos(x)) / (2*pi*I[0](\kappa))
return np.exp(kappa * np.cos(x)) / (2*np.pi*sc.i0(kappa))
def _cdf(self, x, kappa):
return _stats.von_mises_cdf(kappa, x)
def _stats_skip(self, kappa):
return 0, None, 0, None
def _entropy(self, kappa):
return (-kappa * sc.i1(kappa) / sc.i0(kappa) +
np.log(2 * np.pi * sc.i0(kappa)))
vonmises = vonmises_gen(name='vonmises')
vonmises_line = vonmises_gen(a=-np.pi, b=np.pi, name='vonmises_line')
class wald_gen(invgauss_gen):
r"""A Wald continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `wald` is:
.. math::
f(x) = \frac{1}{\sqrt{2\pi x^3}} \exp(- \frac{ (x-1)^2 }{ 2x })
for :math:`x > 0`.
`wald` is a special case of `invgauss` with ``mu == 1``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self):
return self._random_state.wald(1.0, 1.0, size=self._size)
def _pdf(self, x):
# wald.pdf(x) = 1/sqrt(2*pi*x**3) * exp(-(x-1)**2/(2*x))
return invgauss._pdf(x, 1.0)
def _logpdf(self, x):
return invgauss._logpdf(x, 1.0)
def _cdf(self, x):
return invgauss._cdf(x, 1.0)
def _stats(self):
return 1.0, 1.0, 3.0, 15.0
wald = wald_gen(a=0.0, name="wald")
class wrapcauchy_gen(rv_continuous):
r"""A wrapped Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `wrapcauchy` is:
.. math::
f(x, c) = \frac{1-c^2}{2\pi (1+c^2 - 2c \cos(x))}
for :math:`0 \le x \le 2\pi`, :math:`0 < c < 1`.
`wrapcauchy` takes :math:`c` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return (c > 0) & (c < 1)
def _pdf(self, x, c):
# wrapcauchy.pdf(x, c) = (1-c**2) / (2*pi*(1+c**2-2*c*cos(x)))
return (1.0-c*c)/(2*np.pi*(1+c*c-2*c*np.cos(x)))
def _cdf(self, x, c):
output = np.zeros(x.shape, dtype=x.dtype)
val = (1.0+c)/(1.0-c)
c1 = x < np.pi
c2 = 1-c1
xp = np.extract(c1, x)
xn = np.extract(c2, x)
if np.any(xn):
valn = np.extract(c2, np.ones_like(x)*val)
xn = 2*np.pi - xn
yn = np.tan(xn/2.0)
on = 1.0-1.0/np.pi*np.arctan(valn*yn)
np.place(output, c2, on)
if np.any(xp):
valp = np.extract(c1, np.ones_like(x)*val)
yp = np.tan(xp/2.0)
op = 1.0/np.pi*np.arctan(valp*yp)
np.place(output, c1, op)
return output
def _ppf(self, q, c):
val = (1.0-c)/(1.0+c)
rcq = 2*np.arctan(val*np.tan(np.pi*q))
rcmq = 2*np.pi-2*np.arctan(val*np.tan(np.pi*(1-q)))
return np.where(q < 1.0/2, rcq, rcmq)
def _entropy(self, c):
return np.log(2*np.pi*(1-c*c))
wrapcauchy = wrapcauchy_gen(a=0.0, b=2*np.pi, name='wrapcauchy')
class gennorm_gen(rv_continuous):
r"""A generalized normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gennorm` is [1]_::
beta
gennorm.pdf(x, beta) = --------------- exp(-|x|**beta)
2 gamma(1/beta)
`gennorm` takes :math:`\beta` as a shape parameter.
For :math:`\beta = 1`, it is identical to a Laplace distribution.
For ``\beta = 2``, it is identical to a normal distribution
(with :math:`scale=1/\sqrt{2}`).
See Also
--------
laplace : Laplace distribution
norm : normal distribution
References
----------
.. [1] "Generalized normal distribution, Version 1",
https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1
%(example)s
"""
def _pdf(self, x, beta):
return np.exp(self._logpdf(x, beta))
def _logpdf(self, x, beta):
return np.log(0.5*beta) - sc.gammaln(1.0/beta) - abs(x)**beta
def _cdf(self, x, beta):
c = 0.5 * np.sign(x)
# evaluating (.5 + c) first prevents numerical cancellation
return (0.5 + c) - c * sc.gammaincc(1.0/beta, abs(x)**beta)
def _ppf(self, x, beta):
c = np.sign(x - 0.5)
# evaluating (1. + c) first prevents numerical cancellation
return c * sc.gammainccinv(1.0/beta, (1.0 + c) - 2.0*c*x)**(1.0/beta)
def _sf(self, x, beta):
return self._cdf(-x, beta)
def _isf(self, x, beta):
return -self._ppf(x, beta)
def _stats(self, beta):
c1, c3, c5 = sc.gammaln([1.0/beta, 3.0/beta, 5.0/beta])
return 0., np.exp(c3 - c1), 0., np.exp(c5 + c1 - 2.0*c3) - 3.
def _entropy(self, beta):
return 1. / beta - np.log(.5 * beta) + sc.gammaln(1. / beta)
gennorm = gennorm_gen(name='gennorm')
class halfgennorm_gen(rv_continuous):
r"""The upper half of a generalized normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfgennorm` is:
.. math::
f(x, \beta) = \frac{\beta}{\gamma(1/\beta)} \exp(-|x|^\beta)
`gennorm` takes :math:`\beta` as a shape parameter.
For :math:`\beta = 1`, it is identical to an exponential distribution.
For :math:`\beta = 2`, it is identical to a half normal distribution
(with :math:`scale=1/\sqrt{2}`).
See Also
--------
gennorm : generalized normal distribution
expon : exponential distribution
halfnorm : half normal distribution
References
----------
.. [1] "Generalized normal distribution, Version 1",
https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1
%(example)s
"""
def _pdf(self, x, beta):
# beta
# halfgennorm.pdf(x, beta) = ------------- exp(-|x|**beta)
# gamma(1/beta)
return np.exp(self._logpdf(x, beta))
def _logpdf(self, x, beta):
return np.log(beta) - sc.gammaln(1.0/beta) - x**beta
def _cdf(self, x, beta):
return sc.gammainc(1.0/beta, x**beta)
def _ppf(self, x, beta):
return sc.gammaincinv(1.0/beta, x)**(1.0/beta)
def _sf(self, x, beta):
return sc.gammaincc(1.0/beta, x**beta)
def _isf(self, x, beta):
return sc.gammainccinv(1.0/beta, x)**(1.0/beta)
def _entropy(self, beta):
return 1.0/beta - np.log(beta) + sc.gammaln(1.0/beta)
halfgennorm = halfgennorm_gen(a=0, name='halfgennorm')
class crystalball_gen(rv_continuous):
r"""
Crystalball distribution
%(before_notes)s
Notes
-----
The probability density function for `crystalball` is:
.. math::
f(x, \beta, m) = \begin{cases}
N \exp(-x^2 / 2), &\text{for } x > -\beta\\
N A (B - x)^{-m} &\text{for } x \le -\beta
\end{cases}
where :math:`A = (m / |beta|)**n * exp(-beta**2 / 2)`,
:math:`B = m/|beta| - |beta|` and :math:`N` is a normalisation constant.
`crystalball` takes :math:`\beta` and :math:`m` as shape parameters.
:math:`\beta` defines the point where the pdf changes from a power-law to a
gaussian distribution :math:`m` is power of the power-law tail.
References
----------
.. [1] "Crystal Ball Function",
https://en.wikipedia.org/wiki/Crystal_Ball_function
%(after_notes)s
.. versionadded:: 0.19.0
%(example)s
"""
def _pdf(self, x, beta, m):
"""
Return PDF of the crystalball function.
--
| exp(-x**2 / 2), for x > -beta
crystalball.pdf(x, beta, m) = N * |
| A * (B - x)**(-m), for x <= -beta
--
"""
N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) + _norm_pdf_C * _norm_cdf(beta))
rhs = lambda x, beta, m: np.exp(-x**2 / 2)
lhs = lambda x, beta, m: (m/beta)**m * np.exp(-beta**2 / 2.0) * (m/beta - beta - x)**(-m)
return N * _lazywhere(np.atleast_1d(x > -beta), (x, beta, m), f=rhs, f2=lhs)
def _cdf(self, x, beta, m):
"""
Return CDF of the crystalball function
"""
N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) + _norm_pdf_C * _norm_cdf(beta))
rhs = lambda x, beta, m: (m/beta) * np.exp(-beta**2 / 2.0) / (m-1) + _norm_pdf_C * (_norm_cdf(x) - _norm_cdf(-beta))
lhs = lambda x, beta, m: (m/beta)**m * np.exp(-beta**2 / 2.0) * (m/beta - beta - x)**(-m+1) / (m-1)
return N * _lazywhere(np.atleast_1d(x > -beta), (x, beta, m), f=rhs, f2=lhs)
def _munp(self, n, beta, m):
"""
Returns the n-th non-central moment of the crystalball function.
"""
N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) + _norm_pdf_C * _norm_cdf(beta))
def n_th_moment(n, beta, m):
"""
Returns n-th moment. Defined only if n+1 < m
Function cannot broadcast due to the loop over n
"""
A = (m/beta)**m * np.exp(-beta**2 / 2.0)
B = m/beta - beta
rhs = 2**((n-1)/2.0) * sc.gamma((n+1)/2) * (1.0 + (-1)**n * sc.gammainc((n+1)/2, beta**2 / 2))
lhs = np.zeros(rhs.shape)
for k in range(n + 1):
lhs += sc.binom(n, k) * B**(n-k) * (-1)**k / (m - k - 1) * (m/beta)**(-m + k + 1)
return A * lhs + rhs
return N * _lazywhere(np.atleast_1d(n + 1 < m),
(n, beta, m),
np.vectorize(n_th_moment, otypes=[np.float]),
np.inf)
def _argcheck(self, beta, m):
"""
In HEP crystal-ball is also defined for m = 1 (see plot on wikipedia)
But the function doesn't have a finite integral in this corner case,
and isn't a PDF anymore (but can still be used on a finite range).
Here we restrict the function to m > 1.
In addition we restrict beta to be positive
"""
return (m > 1) & (beta > 0)
crystalball = crystalball_gen(name='crystalball', longname="A Crystalball Function")
def _argus_phi(chi):
"""
Utility function for the argus distribution
used in the CDF and norm of the Argus Funktion
"""
return _norm_cdf(chi) - chi * _norm_pdf(chi) - 0.5
class argus_gen(rv_continuous):
r"""
Argus distribution
%(before_notes)s
Notes
-----
The probability density function for `argus` is:
.. math::
f(x, \chi) = \frac{\chi^3}{\sqrt{2\pi} \Psi(\chi)} x \sqrt{1-x^2}
\exp(- 0.5 \chi^2 (1 - x^2))
where:
.. math::
\Psi(\chi) = \Phi(\chi) - \chi \phi(\chi) - 1/2
with :math:`\Phi` and :math:`\phi` being the CDF and PDF of a standard
normal distribution, respectively.
`argus` takes :math:`\chi` as shape a parameter.
References
----------
.. [1] "ARGUS distribution",
https://en.wikipedia.org/wiki/ARGUS_distribution
%(after_notes)s
.. versionadded:: 0.19.0
%(example)s
"""
def _pdf(self, x, chi):
"""
Return PDF of the argus function
argus.pdf(x, chi) = chi**3 / (sqrt(2*pi) * Psi(chi)) * x *
sqrt(1-x**2) * exp(- 0.5 * chi**2 * (1 - x**2))
"""
y = 1.0 - x**2
return chi**3 / (_norm_pdf_C * _argus_phi(chi)) * x * np.sqrt(y) * np.exp(-chi**2 * y / 2)
def _cdf(self, x, chi):
"""
Return CDF of the argus function
"""
return 1.0 - self._sf(x, chi)
def _sf(self, x, chi):
"""
Return survival function of the argus function
"""
return _argus_phi(chi * np.sqrt(1 - x**2)) / _argus_phi(chi)
argus = argus_gen(name='argus', longname="An Argus Function", a=0.0, b=1.0)
class rv_histogram(rv_continuous):
"""
Generates a distribution given by a histogram.
This is useful to generate a template distribution from a binned
datasample.
As a subclass of the `rv_continuous` class, `rv_histogram` inherits from it
a collection of generic methods (see `rv_continuous` for the full list),
and implements them based on the properties of the provided binned
datasample.
Parameters
----------
histogram : tuple of array_like
Tuple containing two array_like objects
The first containing the content of n bins
The second containing the (n+1) bin boundaries
In particular the return value np.histogram is accepted
Notes
-----
There are no additional shape parameters except for the loc and scale.
The pdf is defined as a stepwise function from the provided histogram
The cdf is a linear interpolation of the pdf.
.. versionadded:: 0.19.0
Examples
--------
Create a scipy.stats distribution from a numpy histogram
>>> import scipy.stats
>>> import numpy as np
>>> data = scipy.stats.norm.rvs(size=100000, loc=0, scale=1.5, random_state=123)
>>> hist = np.histogram(data, bins=100)
>>> hist_dist = scipy.stats.rv_histogram(hist)
Behaves like an ordinary scipy rv_continuous distribution
>>> hist_dist.pdf(1.0)
0.20538577847618705
>>> hist_dist.cdf(2.0)
0.90818568543056499
PDF is zero above (below) the highest (lowest) bin of the histogram,
defined by the max (min) of the original dataset
>>> hist_dist.pdf(np.max(data))
0.0
>>> hist_dist.cdf(np.max(data))
1.0
>>> hist_dist.pdf(np.min(data))
7.7591907244498314e-05
>>> hist_dist.cdf(np.min(data))
0.0
PDF and CDF follow the histogram
>>> import matplotlib.pyplot as plt
>>> X = np.linspace(-5.0, 5.0, 100)
>>> plt.title("PDF from Template")
>>> plt.hist(data, density=True, bins=100)
>>> plt.plot(X, hist_dist.pdf(X), label='PDF')
>>> plt.plot(X, hist_dist.cdf(X), label='CDF')
>>> plt.show()
"""
_support_mask = rv_continuous._support_mask
def __init__(self, histogram, *args, **kwargs):
"""
Create a new distribution using the given histogram
Parameters
----------
histogram : tuple of array_like
Tuple containing two array_like objects
The first containing the content of n bins
The second containing the (n+1) bin boundaries
In particular the return value np.histogram is accepted
"""
self._histogram = histogram
if len(histogram) != 2:
raise ValueError("Expected length 2 for parameter histogram")
self._hpdf = np.asarray(histogram[0])
self._hbins = np.asarray(histogram[1])
if len(self._hpdf) + 1 != len(self._hbins):
raise ValueError("Number of elements in histogram content "
"and histogram boundaries do not match, "
"expected n and n+1.")
self._hbin_widths = self._hbins[1:] - self._hbins[:-1]
self._hpdf = self._hpdf / float(np.sum(self._hpdf * self._hbin_widths))
self._hcdf = np.cumsum(self._hpdf * self._hbin_widths)
self._hpdf = np.hstack([0.0, self._hpdf, 0.0])
self._hcdf = np.hstack([0.0, self._hcdf])
# Set support
kwargs['a'] = self._hbins[0]
kwargs['b'] = self._hbins[-1]
super(rv_histogram, self).__init__(*args, **kwargs)
def _pdf(self, x):
"""
PDF of the histogram
"""
return self._hpdf[np.searchsorted(self._hbins, x, side='right')]
def _cdf(self, x):
"""
CDF calculated from the histogram
"""
return np.interp(x, self._hbins, self._hcdf)
def _ppf(self, x):
"""
Percentile function calculated from the histogram
"""
return np.interp(x, self._hcdf, self._hbins)
def _munp(self, n):
"""Compute the n-th non-central moment."""
integrals = (self._hbins[1:]**(n+1) - self._hbins[:-1]**(n+1)) / (n+1)
return np.sum(self._hpdf[1:-1] * integrals)
def _entropy(self):
"""Compute entropy of distribution"""
res = _lazywhere(self._hpdf[1:-1] > 0.0,
(self._hpdf[1:-1],),
np.log,
0.0)
return -np.sum(self._hpdf[1:-1] * res * self._hbin_widths)
def _updated_ctor_param(self):
"""
Set the histogram as additional constructor argument
"""
dct = super(rv_histogram, self)._updated_ctor_param()
dct['histogram'] = self._histogram
return dct
# Collect names of classes and objects in this module.
pairs = list(globals().items())
_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_continuous)
__all__ = _distn_names + _distn_gen_names + ['rv_histogram']
|
kenshay/ImageScript
|
ProgramData/SystemFiles/Python/Lib/site-packages/scipy/stats/_continuous_distns.py
|
Python
|
gpl-3.0
| 186,903
|
[
"CRYSTAL",
"Gaussian"
] |
5c00f8381f8e68f91b08e6fe8732a4b3cb6a516b3db3ede7dcaa914da0993d26
|
### AUTHOR: William F. Hooper
### Affiliation: Rensselaer Polytechnic Institute
### Based off of kic.py, included with InteractiveROSETTA
### Additional dependencies: LoopHash, available at github.com/willhooper/LoopHash (might be packaged with this install already)
### Database files iRosetta_Lookup.exe, pdbselect.dat, looplist.dat, and grid.dat should be in sandbox
import wx
import wx.grid
import wx.lib.scrolledpanel
import wx.lib.intctrl
import os
import os.path
import time
import platform
import multiprocessing
import webbrowser
import datetime
from threading import Thread
from tools import *
class INDELmodelPanel(wx.lib.scrolledpanel.ScrolledPanel):
def __init__(self, parent, W, H):
#if (platform.system() == "Windows"):
wx.lib.scrolledpanel.ScrolledPanel.__init__(self, parent, id=-1, pos=(10, 60), size=(340, H-330), name="INDEL")
winh = H-330
#else:
#wx.lib.scrolledpanel.ScrolledPanel.__init__(self, parent, id=-1, pos=(10, 60), size=(340, H-330), name="ProtMinimization")
#winh = H-290
self.SetBackgroundColour("#333333")
self.parent = parent
self.areCST = False
if (platform.system() == "Windows"):
self.lblProt = wx.StaticText(self, -1, "INDEL Loop Design", (25, 15), (270, 25), wx.ALIGN_CENTRE)
self.lblProt.SetFont(wx.Font(12, wx.DEFAULT, wx.ITALIC, wx.BOLD))
# elif (platform.system() == "Darwin"):
# self.lblProt = wx.StaticBitmap(self, -1, wx.Image(self.parent.parent.scriptdir + "/images/osx/indel/label_INDEL.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(25, 15), size=(270, 25))
else:
self.lblProt = wx.StaticText(self, -1, "INDEL Loop Design", (70, 15), style=wx.ALIGN_CENTRE)
self.lblProt.SetFont(wx.Font(12, wx.DEFAULT, wx.ITALIC, wx.BOLD))
resizeTextControlForUNIX(self.lblProt, 0, self.GetSize()[0]-20)
self.lblProt.SetForegroundColour("#FFFFFF")
# if (platform.system() == "Darwin"):
# self.HelpBtn = wx.BitmapButton(self, id=-1, bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/HelpBtn.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(295, 10), size=(25, 25))
# else:
self.HelpBtn = wx.Button(self, id=-1, label="?", pos=(295, 10), size=(25, 25))
self.HelpBtn.SetForegroundColour("#0000FF")
self.HelpBtn.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
self.HelpBtn.Bind(wx.EVT_BUTTON, self.showHelp)
self.HelpBtn.SetToolTipString("Display the help file for this window")
if (platform.system() == "Windows"):
self.lblInst = wx.StaticText(self, -1, "Remodels loops via a \n fragment database search", (0, 45), (320, 25), wx.ALIGN_CENTRE)
self.lblInst.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.NORMAL))
# elif (platform.system() == "Darwin"):
# self.lblInst = wx.StaticBitmap(self, -1, wx.Image(self.parent.parent.scriptdir + "/images/osx/indel/lbl_description_INDEL.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(0, 45), size=(320, 25))
else:
self.lblInst = wx.StaticText(self, -1, "Remodels loops via a \n fragment database search", (5, 45), style=wx.ALIGN_CENTRE)
self.lblInst.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.NORMAL))
resizeTextControlForUNIX(self.lblInst, 0, self.GetSize()[0]-20)
self.lblInst.SetForegroundColour("#FFFFFF")
# Model selection
if (platform.system() == "Windows"):
self.lblModel = wx.StaticText(self, -1, "Model", (10, 90), (140, 20), wx.ALIGN_CENTRE)
self.lblModel.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
# elif (platform.system() == "Darwin"):
# self.lblModel = wx.StaticBitmap(self, -1, wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/lblModelKIC.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(10, 90), size=(140, 20))
else:
self.lblModel = wx.StaticText(self, -1, "Model", (10, 90), style=wx.ALIGN_CENTRE)
self.lblModel.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
resizeTextControlForUNIX(self.lblModel, 10, 140)
self.lblModel.SetForegroundColour("#FFFFFF")
self.modelMenu = wx.ComboBox(self, pos=(10, 110), size=(140, 25), choices=[], style=wx.CB_READONLY)
self.modelMenu.Bind(wx.EVT_COMBOBOX, self.modelMenuSelect)
self.modelMenu.SetToolTipString("Model on which to perform loop modeling")
self.selectedModel = ""
#Constraints button
self.btnCst = wx.Button(self,-1,"Constraints",(170,110),(140,20))
self.btnCst.SetFont(wx.Font(10,wx.DEFAULT,wx.NORMAL,wx.BOLD))
self.btnCst.SetForegroundColour("#000000")
self.btnCst.Bind(wx.EVT_BUTTON,self.open_csts)
self.ConstraintSet = []
# N-term anchor selection
if (platform.system() == "Windows"):
self.lblBegin = wx.StaticText(self, -1, "Loop Begin", (10, 140), (120, 20), wx.ALIGN_CENTRE)
self.lblBegin.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
# elif (platform.system() == "Darwin"):
# self.lblBegin = wx.StaticBitmap(self, -1, wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/lblBegin.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(10, 140), size=(140, 20))
else:
self.lblBegin = wx.StaticText(self, -1, "Loop Begin", (10, 140), style=wx.ALIGN_CENTRE)
self.lblBegin.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
resizeTextControlForUNIX(self.lblBegin, 10, 140)
self.lblBegin.SetForegroundColour("#FFFFFF")
self.beginMenu = wx.ComboBox(self, pos=(10, 160), size=(140, 25), choices=[], style=wx.CB_READONLY)
self.beginMenu.Bind(wx.EVT_COMBOBOX, self.beginMenuSelect)
self.beginMenu.Bind(wx.EVT_RIGHT_DOWN, self.rightClick)
self.beginMenu.SetToolTipString("Loop N-terminus")
self.loopBegin = -1
# C-term anchor selection
if (platform.system() == "Windows"):
self.lblEnd = wx.StaticText(self, -1, "Loop End", (170, 140), (140, 20), wx.ALIGN_CENTRE)
self.lblEnd.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
# elif (platform.system() == "Darwin"):
# self.lblEnd = wx.StaticBitmap(self, -1, wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/lblEnd.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(170, 140), size=(140, 20))
else:
self.lblEnd = wx.StaticText(self, -1, "Loop End", (170, 140), style=wx.ALIGN_CENTRE)
self.lblEnd.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
resizeTextControlForUNIX(self.lblEnd, 170, 140)
self.lblEnd.SetForegroundColour("#FFFFFF")
self.endMenu = wx.ComboBox(self, pos=(170, 160), size=(140, 25), choices=[], style=wx.CB_READONLY)
self.endMenu.Bind(wx.EVT_COMBOBOX, self.endMenuSelect)
self.endMenu.Bind(wx.EVT_RIGHT_DOWN, self.rightClick)
self.endMenu.SetToolTipString("Loop C-terminus")
self.loopEnd = -1
# Minimum loop length (in residues)
# can't get wx.ComboBox to accept a list of integers as choices
minmax_length = ['3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19']
if (platform.system() == "Windows"):
self.lblMin = wx.StaticText(self, -1, "Minimum length", (10, 190), (140, 20), wx.ALIGN_CENTRE)
self.lblMin.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
# elif (platform.system() == "Darwin"):
# self.lblMin = wx.StaticBitmap(self, -1, wx.Image(self.parent.parent.scriptdir + "/images/osx/indel/minLength.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(10, 140), size=(140, 20))
else:
self.lblMin = wx.StaticText(self, -1, "Minimum length", (20, 190), style=wx.ALIGN_CENTRE)
self.lblMin.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
resizeTextControlForUNIX(self.lblEnd, 170, 140)
self.lblMin.SetForegroundColour("#FFFFFF")
self.minMenu = wx.ComboBox(self, pos=(10, 210), size=(140, 25), choices=minmax_length, style=wx.CB_READONLY)
self.minMenu.Bind(wx.EVT_COMBOBOX, self.minMenuSelect)
self.minMenu.Bind(wx.EVT_RIGHT_DOWN, self.rightClick)
self.minMenu.SetToolTipString("Minimum length of loop in residues")
self.minMenu.SetSelection(0)
self.minLength = int(self.minMenu.GetStringSelection())
# Max loop length (in residues)
if (platform.system() == "Windows"):
self.lblMax = wx.StaticText(self, -1, "Maximum length", (170, 190), (140, 20), wx.ALIGN_CENTRE)
self.lblMax.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
# elif (platform.system() == "Darwin"):
# self.lblMax = wx.StaticBitmap(self, -1, wx.Image(self.parent.parent.scriptdir + "/images/osx/indel/maxLength.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(10, 140), size=(140, 20))
else:
self.lblMax = wx.StaticText(self, -1, "Maximum length", (180, 190), style=wx.ALIGN_CENTRE)
self.lblMax.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
resizeTextControlForUNIX(self.lblEnd, 170, 140)
self.lblMax.SetForegroundColour("#FFFFFF")
self.maxMenu = wx.ComboBox(self, pos=(170, 210), size=(140, 25), choices=minmax_length, style=wx.CB_READONLY)
self.maxMenu.Bind(wx.EVT_COMBOBOX, self.maxMenuSelect)
self.maxMenu.Bind(wx.EVT_RIGHT_DOWN, self.rightClick)
self.maxMenu.SetToolTipString("Maximum length of loop in residues")
self.maxMenu.SetSelection(len(minmax_length)-1)
self.maxLength = int(self.maxMenu.GetStringSelection())
# Min number of models to evaluate
if (platform.system() == "Windows"):
self.lblResultsMin = wx.StaticText(self, -1, "Minumum results", (10, 240), (140, 20), wx.ALIGN_CENTRE)
self.lblResultsMin.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
# elif (platform.system() == "Darwin"):
# self.lblResultsMin = wx.StaticBitmap(self, -1, wx.Image(self.parent.parent.scriptdir + "/images/osx/indel/minResults.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(10, 240), size=(140, 20))
else:
self.lblResultsMin = wx.StaticText(self, -1, "Minimum results", (20, 240), style=wx.ALIGN_CENTRE)
self.lblResultsMin.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
resizeTextControlForUNIX(self.lblEnd, 170, 140)
self.lblResultsMin.SetForegroundColour("#FFFFFF")
self.ResultsMin = wx.lib.intctrl.IntCtrl(self, pos=(10, 260), size=(140, 25))
self.ResultsMin.Bind(wx.EVT_COMBOBOX, self.maxMenuSelect)
self.ResultsMin.Bind(wx.EVT_RIGHT_DOWN, self.rightClick)
self.ResultsMin.SetToolTipString("Minimum number of loop search results.")
self.ResultsMin.SetValue(10)
self.minResultsval = self.ResultsMin.GetValue()
# Max number of models to evaluate
if (platform.system() == "Windows"):
self.lblResultsMax = wx.StaticText(self, -1, "Maximum results", (170, 240), (140, 20), wx.ALIGN_CENTRE)
self.lblResultsMax.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
# elif (platform.system() == "Darwin"):
# self.lblResultsMax = wx.StaticBitmap(self, -1, wx.Image(self.parent.parent.scriptdir + "/images/osx/indel/maxResults.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(10, 240), size=(140, 20))
else:
self.lblResultsMax = wx.StaticText(self, -1, "Maximum results", (180, 240), style=wx.ALIGN_CENTRE)
self.lblResultsMax.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
resizeTextControlForUNIX(self.lblEnd, 170, 140)
self.lblResultsMax.SetForegroundColour("#FFFFFF")
self.ResultsMax = wx.lib.intctrl.IntCtrl(self, pos=(170, 260), size=(140, 25))
self.ResultsMax.Bind(wx.EVT_COMBOBOX, self.maxMenuSelect)
self.ResultsMax.Bind(wx.EVT_RIGHT_DOWN, self.rightClick)
self.ResultsMax.SetToolTipString("If the loop search returns many results, try to insert this many.")
self.ResultsMax.SetValue(25)
self.maxResultsval = self.ResultsMax.GetValue()
if (platform.system() == "Windows"):
self.lblRendundancy = wx.StaticText(self, -1, "Redundancy Cutoff", (5, 290), (140, 20), wx.ALIGN_CENTRE)
self.lblRendundancy.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
# elif (platform.system() == "Darwin"):
# self.lblResultsMax = wx.StaticBitmap(self, -1, wx.Image(self.parent.parent.scriptdir + "/images/osx/indel/maxResults.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(10, 240), size=(140, 20))
else:
self.lblRendundancy = wx.StaticText(self, -1, "Redundancy Cutoff", (15, 290), style=wx.ALIGN_CENTRE)
self.lblRendundancy.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
resizeTextControlForUNIX(self.lblEnd, 170, 140)
self.lblRendundancy.SetForegroundColour("#FFFFFF")
self.RedundancyCutoff = wx.SpinCtrlDouble(self, min=0.1, max=5,inc=0.1, initial=1.0 , pos=(10, 310), size=(140, 25))
#self.RedundancyCutoff.Bind(wx.EVT_COMBOBOX, self.maxMenuSelect)
self.RedundancyCutoff.Bind(wx.EVT_RIGHT_DOWN, self.rightClick)
self.RedundancyCutoff.SetToolTipString("If the loop search returns many results, try to insert this many.")
# if (platform.system() == "Darwin"):
# self.btnClear = wx.BitmapButton(self, id=-1, bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/btnClear.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(220, 305), size=(90, 25))
# else:
self.btnClear = wx.Button(self, id=-1, label="Clear", pos=(220, 305), size=(90, 25))
self.btnClear.SetForegroundColour("#000000")
self.btnClear.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
self.btnClear.Bind(wx.EVT_BUTTON, self.clear)
self.btnClear.SetToolTipString("Clear parameters")
# Checkbox toggles
self.preserve_sequence = wx.CheckBox(self, -1, 'Preserve indel sequence', (10, 335) )
self.preserve_sequence.SetToolTipString("Return native loop sequence instead of Alanines")
self.preserve_sequence.SetForegroundColour("#FFFFFF")
self.preserve_sequence.SetValue(False)
self.symmetric_design = wx.CheckBox(self, -1, 'Symmetric homo-oligomer loop design', (10, 355) )
self.symmetric_design.SetToolTipString("Attempt to design the same loop onto each monomer")
self.symmetric_design.SetForegroundColour("#FFFFFF")
self.symmetric_design.SetValue(False)
self.symmetric_design.Disable()
self.grdLoops = wx.grid.Grid(self)
self.grdLoops.CreateGrid(0, 2)
self.grdLoops.SetSize((320, 200))
self.grdLoops.SetPosition((0, 385))
self.grdLoops.SetLabelFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
self.grdLoops.DisableDragColSize()
self.grdLoops.DisableDragRowSize()
self.grdLoops.SetColLabelValue(0, "Length")
self.grdLoops.SetColLabelValue(1, "Score")
self.grdLoops.SetRowLabelSize(50)
self.grdLoops.SetColSize(0, 70)
self.grdLoops.SetColSize(1, 200)
self.grdLoops.Bind(wx.grid.EVT_GRID_CELL_LEFT_CLICK, self.gridClick)
self.loops = []
self.selectedr = -1
ypos = self.grdLoops.GetPosition()[1] + self.grdLoops.GetSize()[1] + 10
self.indel_model_selected = ""
self.previous_indel_model_selected = ""
self.model_names = []
self.scores = []
self.lengths = []
self.save_model = wx.Button(self, id=-1, label="Save", pos=(40, ypos), size=(100, 25))
self.save_model.SetForegroundColour("#000000")
self.save_model.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.BOLD))
self.save_model.Bind(wx.EVT_BUTTON, self.saveClick)
self.save_model.SetToolTipString("Save selected model pdb")
self.save_model.Disable()
# if (platform.system() == "Darwin"):
# self.save_all = wx.BitmapButton(self, id=-1, bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/btnServer_Off.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(40, ypos+215), size=(100, 25))
# else:
self.save_all = wx.Button(self, id=-1, label="Save all", pos=(40, ypos + 40), size=(100, 25))
self.save_all.SetForegroundColour("#000000")
self.save_all.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.BOLD))
self.save_all.Bind(wx.EVT_BUTTON, self.saveAll)
self.save_all.SetToolTipString("Save all results at once")
self.save_all.Disable()
# if (platform.system() == "Darwin"):
# self.btnINDEL = wx.BitmapButton(self, id=-1, bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/indel/btnINDEL.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(180, ypos+215), size=(100, 25))
# else:
self.btnINDEL = wx.Button(self, id=-1, label="Model!", pos=(180, ypos), size=(100, 25))
self.btnINDEL.SetForegroundColour("#000000")
self.btnINDEL.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.BOLD))
self.btnINDEL.Bind(wx.EVT_BUTTON, self.INDELClick)
self.btnINDEL.SetToolTipString("Begin INDEL simulation with selected parameters")
self.buttonState = "Model!"
self.scrollh = self.btnINDEL.GetPosition()[1] + self.btnINDEL.GetSize()[1] + 5
self.SetScrollbars(1, 1, 320, self.scrollh)
self.winscrollpos = 0
self.Bind(wx.EVT_SCROLLWIN, self.scrolled)
def open_csts(self,event):
'''Creates the Constraints menu and allows constraints to be added.
Each time a constraint is added, it is put in the local constraints set,
so constraints are maintained as the menu is destroyed and recreated'''
try:
import constraints
# print 'constraints imported'
self.frame = wx.Frame(None,-1,title="Constraints Menu")
# print 'frame generated'
self.ConstraintPanel=constraints.ConstraintPanel(self.frame,self)
self.frame.Fit()
# print 'constraintpanel created'
self.frame.Show()
# print 'showing frame'
self.ConstraintPanel.setSelectWin(self.selectWin)
self.ConstraintPanel.setSeqWin(self.seqWin)
self.ConstraintPanel.setPyMOL(self.pymol)
except Exception as e:
import traceback
# print 'Error importing constraints',e.message
traceback.print_tb(sys.exc_info()[2])
pass
def showHelp(self, event):
# Open the help page
if (platform.system() == "Darwin"):
try:
browser = webbrowser.get("Safari")
except:
print "Could not load Safari! The help files are located at " + self.scriptdir + "/help"
return
browser.open(self.parent.parent.scriptdir + "/help/indel.html")
else:
webbrowser.open(self.parent.parent.scriptdir + "/help/indel.html")
def setSeqWin(self, seqWin):
self.seqWin = seqWin
# So the sequence window knows about what model "designed_view" really is
self.seqWin.setProtocolPanel(self)
def setPyMOL(self, pymol):
self.pymol = pymol
self.cmd = pymol.cmd
self.stored = pymol.stored
def setSelectWin(self, selectWin):
self.selectWin = selectWin
self.selectWin.setProtPanel(self)
def scrolled(self, event):
self.winscrollpos = self.GetScrollPos(wx.VERTICAL)
event.Skip()
def enableAll(self, save_enable):
# Are there results to save?
if (save_enable):
self.save_model.Enable()
self.save_all.Enable()
# Enable all controls
self.modelMenu.Enable()
self.beginMenu.Enable()
self.endMenu.Enable()
self.parent.GoBtn.Enable()
self.minMenu.Enable()
self.maxMenu.Enable()
self.btnClear.Enable()
self.ResultsMin.Enable()
self.ResultsMax.Enable()
self.RedundancyCutoff.Enable()
self.btnINDEL.Enable()
self.preserve_sequence.Enable()
self.btnCst.Enable()
self.seqWin.cannotDelete = False
def disableAll(self, model_menu_disable, seq_win_disable):
if (model_menu_disable):
self.modelMenu.Disable()
if (seq_win_disable):
self.seqWin.cannotDelete = True
self.parent.GoBtn.Disable()
self.beginMenu.Disable()
self.endMenu.Disable()
self.minMenu.Disable()
self.maxMenu.Disable()
self.ResultsMin.Disable()
self.ResultsMax.Disable()
self.btnClear.Disable()
self.RedundancyCutoff.Disable()
self.preserve_sequence.Disable()
self.save_model.Disable()
self.save_all.Disable()
self.btnCst.Disable()
def isAA(self, residue):
return residue.resname in "ALA CYS ASP GLU PHE GLY HIS ILE LYS LEU MET ASN PRO GLN ARG SER THR VAL TRP TYR "
def symmetry(self):
# Check to see if we should turn the symmetry button on, and how many symmetric molecules there are
poseindex = self.seqWin.getPoseIndexForModel(self.selectedModel)
chains = [x for x in self.seqWin.poses[poseindex][0]]
self.symmetry_value = 1
print self.symmetry_value
# chains = [x for x in self.seqWin.poses[poseindex][0].get_chains()]
if len(chains) == 1:
# self.symmetry_value = 1
return self.symmetry_value != 1
# Check if each chain is the same size
for i in range(0,1): #len(chains)
print 'chain i:',sorted(chains)[i]
i_chainlength = len([x for x in sorted(chains)[i].get_residues() if self.isAA(x)])
for j in range(i+1, len(chains)):
print 'chain j:',sorted(chains)[j]
j_chainlength = len([x for x in sorted(chains)[j].get_residues() if self.isAA(x)])
if i_chainlength != j_chainlength:
return self.symmetry_value != 1
# self.symmetry_value = 1
# return False
self.symmetry_value += 1
print self.symmetry_value
"""
# Check to make sure that each chain has the same residues
for i in range(len(chains)):
i_residues = [x for x in chains[i].get_residues() if self.isAA(x)]
for j in range(i+1, len(chains)):
j_residues = [x for x in chains[j].get_residues() if self.isAA(x)]
for k in range(len(j_residues)):
if i_residues[k] != j_residues[k]:
self.symmetry_value = 1
return False
"""
# We have symmetric chains, allow symmetric design
# self.symmetry_value = len(chains)
# return True
return self.symmetry_value != 1
def activate(self):
# Get the list of all the PROTEIN models in the sequence viewer
modelList = []
for r in range(0, self.seqWin.SeqViewer.NumberRows):
model = self.seqWin.getModelForChain(r)
poseindx = self.seqWin.getPoseIndexForModel(self.selectedModel)
if (not(model in modelList)):
modelList.append(model)
# Update the combobox list if the list has changed
if (modelList != self.modelMenu.GetItems()):
self.modelMenu.Clear()
if modelList == []: modelList = ['']
self.modelMenu.AppendItems(modelList)
self.selectedModel = ""
if (platform.system() == "Windows"):
self.modelMenu.SetSelection(-1)
else:
self.modelMenu.SetSelection(0)
self.modelMenuSelect(None)
# Did we lose the model for the data in the loops grid? If so, clear the loops
if (len(self.loops) > 0 and not(self.loops[0][2] in modelList)):
self.loops = []
self.updateLoops()
# If the user was deleting things in the sequence window, the specified begin and end positions might
# not be valid anymore so we should erase them
poseindx = self.seqWin.getPoseIndexForModel(self.selectedModel)
if (poseindx >= 0):
naa = 0
for ch in self.seqWin.poses[poseindx][0]:
for residue in ch:
if (residue.resname in "ALA CYS ASP GLU PHE GLY HIS ILE LYS LEU MET ASN PRO GLN ARG SER THR VAL TRP TYR "):
naa = naa + 1
if (len(self.beginMenu.GetItems()) != naa-1):
self.selectedModel = ""
self.modelMenuSelect(None)
self.Scroll(0, self.winscrollpos)
# Check to see if there's more than one model currently open. If so,
# allow the user to include those in calculations
if len(self.seqWin.poses) > 1:
self.enableAll(save_enable=False)
if not self.symmetry():
self.symmetric_design.Disable()
self.symmetric_design.SetValue(False)
elif len(self.seqWin.poses) == 0:
self.disableAll(model_menu_disable=False, seq_win_disable=False)
self.btnINDEL.Disable()
self.symmetric_design.Disable()
self.symmetric_design.SetValue(False)
else:
self.enableAll(save_enable=False)
self.symmetric_design.Disable()
self.symmetric_design.SetValue(False)
def rightClick(self, event):
# Attempt to fill in loop values from a selection to bypass having to use the ComboBox
try:
topLefts = self.seqWin.SeqViewer.GetSelectionBlockTopLeft()
bottomRights = self.seqWin.SeqViewer.GetSelectionBlockBottomRight()
row = topLefts[0][0]
begin = 9999999
end = 0
for i in range(0, len(topLefts)):
for r in range(topLefts[i][0], bottomRights[i][0]+1):
if (r != row):
continue
for c in range(topLefts[i][1], bottomRights[i][1]+1):
if (c > end and self.seqWin.sequences[row][c] != "-"):
end = c
if (c < begin and self.seqWin.sequences[row][c] != "-"):
begin = c
if (begin == end):
# Have to get at least two residues
return
model = self.seqWin.IDs[row]
chain = model[len(model)-1]
model = model[:len(model)-2]
beginres = chain + ":" + self.seqWin.sequences[row][begin] + str(self.seqWin.indxToSeqPos[row][begin][1])
endres = chain + ":" + self.seqWin.sequences[row][end] + str(self.seqWin.indxToSeqPos[row][end][1])
mindx = self.modelMenu.GetItems().index(model)
bindx = self.beginMenu.GetItems().index(beginres)
eindx = self.endMenu.GetItems().index(endres)
self.modelMenu.SetSelection(mindx)
self.beginMenu.SetSelection(bindx)
self.endMenu.SetSelection(eindx)
chain = self.beginMenu.GetStringSelection()[0]
seqpos = self.beginMenu.GetStringSelection()[3:].strip()
rindx = self.seqWin.getRosettaIndex(self.selectedModel, chain, seqpos)
self.loopBegin = rindx
chain = self.endMenu.GetStringSelection()[0]
seqpos = self.endMenu.GetStringSelection()[3:].strip()
rindx = self.seqWin.getRosettaIndex(self.selectedModel, chain, seqpos)
self.loopEnd = rindx
self.focusView(self.endMenu.GetStringSelection(), self.selectedModel)
self.populatePivots()
except:
pass
def gridClick(self, event):
# Set the selected residue's row to blue so it is easy to see what the selection is
self.selectedr = event.GetRow()
if (self.selectedr >= self.grdLoops.NumberRows):
self.save_model.Disable()
self.selectedr = -1
if (self.selectedr >= len(self.model_names)):
self.save_model.Disable()
event.Skip()
return
for r in range(0, self.grdLoops.NumberRows):
if (r == self.selectedr):
for c in range(0, self.grdLoops.NumberCols):
self.grdLoops.SetCellBackgroundColour(r, c, "light blue")
else:
for c in range(0, self.grdLoops.NumberCols):
self.grdLoops.SetCellBackgroundColour(r, c, "white")
self.grdLoops.Refresh()
# Make sure we're not trying to load an empty row
if (self.selectedr < len(self.model_names)):
self.loopEnd = self.begin_seqpos + self.lengths[self.selectedr]
self.indel_model_selected = self.model_names[self.selectedr]
self.save_model.Enable()
else:
self.save_model.Disable()
self.selectedr = -1
event.Skip()
# Remove the previously selected model from the viewer if it's not the same as the one just selected
if (self.previous_indel_model_selected != "" or self.previous_indel_model_selected != self.indel_model_selected):
try:
self.cmd.remove(self.previous_indel_model_selected)
self.cmd.delete(self.previous_indel_model_selected)
except:
pass
# Load the model, zoom in on designed loop
if (self.indel_model_selected != self.previous_indel_model_selected and self.indel_model_selected != ""):
self.cmd.load(self.indel_model_selected, self.indel_model_selected)
self.cmd.show()
self.cmd.show("cartoon")
self.cmd.hide("lines")
self.cmd.hide("sticks")
self.previous_indel_model_selected = self.indel_model_selected
self.cmd.zoom("resi " + str(self.begin_seqpos) + "-" + str(int(self.begin_seqpos) + 3), 2.0)
#self.cmd.color("blue", self.indel_model_selected)
self.cmd.color('white')
self.cmd.color('red', 'ss h')
self.cmd.color('yellow', 'ss s')
self.cmd.select('original', self.selectedModel)
self.cmd.hide('everything', 'original')
self.cmd.deselect()
event.Skip()
def modelMenuSelect(self, event):
# Update the list of positions with the new model
if (self.selectedModel == self.modelMenu.GetStringSelection()):
return
self.selectedModel = self.modelMenu.GetStringSelection()
logInfo("Selected model " + self.selectedModel)
# Get the location of the pose
poseindx = self.seqWin.getPoseIndexForModel(self.selectedModel)
# Read the positions
pose = self.seqWin.poses[poseindx]
positions = []
for ch in pose[0]:
for residue in ch:
if ("ALA CYS ASP GLU PHE GLY HIS ILE LYS LEU MET ASN PRO GLN ARG SER THR VAL TRP TYR ".find(residue.resname) >= 0):
chain = ch.id
if (len(chain.strip()) == 0):
chain = "_"
label = chain + ":" + AA3to1(residue.resname) + str(residue.id[1])
positions.append(label)
# Check to make sure the selected model was a protein by seeing if any amino acids were read in
# if not, disable everything except for the model select control
if (len(positions) == 0):
self.disableAll(model_menu_disable=False, seq_win_disable=False)
return
else:
self.enableAll(save_enable=False)
# Update the beginning and ending positions menus with the available sequence positions
self.beginMenu.Clear()
self.beginMenu.AppendItems(positions[0:len(positions)-1])
if (platform.system() == "Windows"):
self.beginMenu.SetSelection(-1)
self.loopBegin = -1
else:
self.beginMenu.SetSelection(0)
self.loopBegin = 1
self.endMenu.Clear()
self.endMenu.AppendItems(positions[1:])
if (platform.system() == "Windows"):
self.endMenu.SetSelection(-1)
self.loopEnd = -1
else:
self.endMenu.SetSelection(0)
self.loopEnd = 2
#self.txtNStruct.Enable()
#self.populatePivots()
if self.symmetry():
self.symmetric_design.Enable()
def changeLoopType(self, event):
if (self.loopType == "Refine"):
self.loopType = "Reconstruct"
# if (platform.system() == "Darwin"):
# self.btnLoopType.SetBitmapLabel(bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/btnLoopType_Reconstruct.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
# else:
self.btnLoopType.SetLabel(self.loopType)
self.btnLoopType.SetToolTipString("Reconstruct the current loop using the wildtype sequence")
self.btnPerturb.Enable()
self.txtNStruct.Enable()
elif (self.loopType == "Reconstruct"):
self.loopType = "De Novo"
# if (platform.system() == "Darwin"):
# self.btnLoopType.SetBitmapLabel(bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/btnLoopType_DeNovo.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
# else:
self.btnLoopType.SetLabel(self.loopType)
self.btnLoopType.SetToolTipString("Construct a new loop with a new sequence")
self.txtSequence.Enable()
else:
self.loopType = "Refine"
# if (platform.system() == "Darwin"):
# self.btnLoopType.SetBitmapLabel(bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/btnLoopType_Refine.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
# else:
self.btnLoopType.SetLabel(self.loopType)
self.btnLoopType.SetToolTipString("Refine a pre-existing loop using the high resolution KIC remodeler only")
self.txtSequence.Disable()
self.btnPerturb.Disable()
self.txtNStruct.Disable()
logInfo("Changed loop type to " + self.loopType)
def changePerturbType(self, event):
if (self.perturbType == "Perturb+Refine"):
self.perturbType = "Perturb Only, Fullatom"
# if (platform.system() == "Darwin"):
# self.btnPerturb.SetBitmapLabel(bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/btnPerturb_Fullatom.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
# else:
self.btnPerturb.SetLabel(self.perturbType)
self.btnPerturb.SetToolTipString("Perform only KIC coarse perturbations but convert outputted models to repacked fullatom PDBs")
#elif (self.perturbType == "Perturb Only, Fullatom"):
# self.perturbType = "Perturb Only, Centroid"
# self.btnPerturb.SetToolTipString("Perform only KIC coarse perturbations and leave outputted PDBs in coarse centroid mode")
else:
self.perturbType = "Perturb+Refine"
# if (platform.system() == "Darwin"):
# self.btnPerturb.SetBitmapLabel(bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/btnPerturb_Refine.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
# else:
self.btnPerturb.SetLabel(self.perturbType)
self.btnPerturb.SetToolTipString("Perform KIC coarse perturbation followed by high resolution refinement")
logInfo("Changed perturbation type to " + self.perturbType)
def setOutputDir(self, event):
logInfo("Clicked Output Dir button")
dlg = wx.DirDialog(
self, message="Choose a directory",
defaultPath=self.seqWin.cwd,
style=wx.DD_DEFAULT_STYLE | wx.DD_DIR_MUST_EXIST)
if (dlg.ShowModal() == wx.ID_OK):
path = dlg.GetPath()
self.outputdir = str(path)
# Change cwd to the last opened file
self.seqWin.cwd = self.outputdir
self.seqWin.saveWindowData(None)
self.lblDir.SetLabel(self.outputdir)
self.lblDir.SetForegroundColour("#FFFFFF")
if (platform.system() == "Linux"):
resizeTextControlForUNIX(self.lblDir, 130, 190)
logInfo("Set output directory as " + self.outputdir)
else:
logInfo("Cancelled out of Load PDB")
def populatePivots(self):
self.menuPivot.Enable()
# Get the location of the pose
poseindx = self.seqWin.getPoseIndexForModel(self.selectedModel)
# Read the positions
pose = self.seqWin.poses[poseindx]
positions = []
ires = 1
for ch in pose[0]:
for residue in ch:
if (ires >= self.loopBegin and ires <= self.loopEnd):
if ("ALA CYS ASP GLU PHE GLY HIS ILE LYS LEU MET ASN PRO GLN ARG SER THR VAL TRP TYR ".find(residue.resname) >= 0):
chain = ch.id
if (len(chain.strip()) == 0):
chain = "_"
label = chain + ":" + AA3to1(residue.resname) + str(residue.id[1])
positions.append(label)
ires = ires + 1
self.menuPivot.Clear()
self.menuPivot.AppendItems(positions)
self.menuPivot.SetSelection(0)
def beginMenuSelect(self, event):
try:
chain = self.beginMenu.GetStringSelection()[0]
seqpos = self.beginMenu.GetStringSelection()[3:].strip()
rindx = self.seqWin.getRosettaIndex(self.selectedModel, chain, seqpos)
self.loopBegin = rindx
# If this new loop begin is further down than what is set for loop end, then it needs
# to be reset and the user should be notified
if (self.loopEnd >= 0 and self.loopEnd <= rindx):
if (platform.system() == "Windows"):
self.endMenu.SetSelection(-1)
self.loopEnd = -1
else:
self.endMenu.SetSelection(self.beginMenu.GetSelection()) # This clears the menu, SetStringSelection/SetValue doesn't seem to work
self.endMenuSelect(event)
#wx.MessageBox("Your selected end loop value is no longer valid. Please choose an ending position after the one you've selected here.", "Loop End No Longer Valid", wx.OK|wx.ICON_EXCLAMATION)
self.focusView(self.beginMenu.GetStringSelection(), self.selectedModel)
logInfo("Selected " + self.beginMenu.GetStringSelection() + " as the beginning of the loop")
except:
# Probably the user left the field blank, do nothing
pass
def endMenuSelect(self, event):
try:
chain = self.endMenu.GetStringSelection()[0]
seqpos = self.endMenu.GetStringSelection()[3:].strip()
rindx = self.seqWin.getRosettaIndex(self.selectedModel, chain, seqpos)
self.loopEnd = rindx
# If this new loop begin is further up than what is set for loop begin, then it needs
# to be reset and the user should be notified
if (self.loopBegin >= 0 and self.loopBegin >= rindx):
if (platform.system() == "Windows"):
self.beginMenu.SetSelection(-1)
self.loopBegin = -1
else:
self.beginMenu.SetSelection(self.endMenu.GetSelection()) # This clears the menu, SetStringSelection/SetValue doesn't seem to work
self.beginMenuSelect(event)
wx.MessageBox("Your selected begin loop value is no longer valid. Please choose a beginning position before the one you've selected here.", "Loop Begin No Longer Valid", wx.OK|wx.ICON_EXCLAMATION)
self.focusView(self.endMenu.GetStringSelection(), self.selectedModel)
logInfo("Selected " + self.endMenu.GetStringSelection() + " as the ending of the loop")
except:
# Probably the user left the field blank, do nothing
pass
def minMenuSelect(self, event):
self.minLength = int(self.minMenu.GetStringSelection())
def maxMenuSelect(self,event):
self.maxLength = int(self.maxMenu.GetStringSelection())
def updateLoops(self):
# Redraw the loops grid with current loop information
scrollpos = self.grdLoops.GetScrollPos(wx.VERTICAL)
if (self.grdLoops.NumberRows > 0):
self.grdLoops.DeleteRows(0, self.grdLoops.NumberRows)
if (len(self.loops) > 0):
self.grdLoops.AppendRows(len(self.loops))
row = 0
for [loopType, sequence, model, begin, pivot, end] in self.loops:
self.grdLoops.SetRowLabelValue(row, loopType)
self.grdLoops.SetCellValue(row, 0, sequence)
chainID, resindx = self.seqWin.getResidueInfo(model, begin)
if (len(chainID.strip()) == 0):
chainID = "_"
self.grdLoops.SetCellValue(row, 1, chainID + "|" + self.seqWin.getResidueTypeFromRosettaIndx(model, begin) + str(resindx))
chainID, resindx = self.seqWin.getResidueInfo(model, pivot)
if (len(chainID.strip()) == 0):
chainID = "_"
self.grdLoops.SetCellValue(row, 2, chainID + "|" + self.seqWin.getResidueTypeFromRosettaIndx(model, pivot) + str(resindx))
chainID, resindx = self.seqWin.getResidueInfo(model, end)
if (len(chainID.strip()) == 0):
chainID = "_"
self.grdLoops.SetCellValue(row, 3, chainID + "|" + self.seqWin.getResidueTypeFromRosettaIndx(model, end) + str(resindx))
readOnly = wx.grid.GridCellAttr()
readOnly.SetReadOnly(True)
readOnly.SetAlignment(wx.ALIGN_CENTRE, wx.ALIGN_CENTRE)
readOnly.SetBackgroundColour("#FFFFFF")
self.grdLoops.SetRowAttr(row, readOnly)
row += 1
self.grdLoops.Scroll(0, scrollpos)
def saveClick(self, event):
# Borrowed from sequence.py starting at line 2580
# self.indel_model_selected is the current model selected
while(True):
dlg = wx.FileDialog(
self, message="Save a PDB File",
defaultDir=self.seqWin.cwd,
defaultFile=self.indel_model_selected,
wildcard="PDB Files (*.pdb)|*.pdb",
style=wx.SAVE | wx.CHANGE_DIR)
if (dlg.ShowModal() == wx.ID_OK):
path = dlg.GetPath()
# Change cwd to the last opened file
if (platform.system() == "Windows"):
lastDirIndx = path.rfind("\\")
else:
lastDirIndx = path.rfind("/")
self.cwd = str(path[0:lastDirIndx])
#self.saveWindowData(None)
filename = str(path).split(".pdb")[0] + ".pdb"
# Does it exist already? If so, ask if user wants to overwrite it
if (os.path.isfile(filename)):
dlg2 = wx.MessageDialog(self, "The file " + filename + " already exists. Overwrite it?", "Filename Already Exists", wx.YES_NO | wx.ICON_QUESTION | wx.CENTRE)
if (dlg2.ShowModal() == wx.ID_NO):
dlg2.Destroy()
logInfo("Cancelled Indel save operation due to filename already existing")
continue
dlg2.Destroy()
goToSandbox()
logInfo("Saved a PDB to " + filename.strip())
self.cmd.save(filename.strip(), self.indel_model_selected)
fixPyMOLSave(filename.strip())
else:
logInfo("Cancelled out of Save PDB (Indel)")
break
dlg.Destroy()
def saveAll(self, event):
# Borrowed from sequence.py starting at line 2580
# All the models are in the sandbox
number_of_models = self.grdLoops.NumberRows
while(True):
dlg = wx.FileDialog(
self, message="Save all PDB files",
defaultDir=self.seqWin.cwd,
defaultFile=self.indel_model_selected,
wildcard="PDB Files (*.pdb)|*.pdb",
style=wx.SAVE | wx.CHANGE_DIR)
if (dlg.ShowModal() == wx.ID_OK):
path = dlg.GetPath()
# Change cwd to the last opened file
if (platform.system() == "Windows"):
lastDirIndx = path.rfind("\\")
else:
lastDirIndx = path.rfind("/")
self.cwd = str(path[0:lastDirIndx])
#self.saveWindowData(None)
filename = str(path).split(".pdb")[0] + ".pdb"
# Does it exist already? If so, ask if user wants to overwrite it
if (os.path.isfile(filename)):
dlg2 = wx.MessageDialog(self, "The file " + filename + " already exists. Overwrite it?", "Filename Already Exists", wx.YES_NO | wx.ICON_QUESTION | wx.CENTRE)
if (dlg2.ShowModal() == wx.ID_NO):
dlg2.Destroy()
logInfo("Cancelled Indel save operation due to filename already existing")
continue
dlg2.Destroy()
goToSandbox()
logInfo("Saved a PDB to " + filename.strip())
# Copy all of the indel models in the sandbox to the desired directory
all_model_files = [x for x in os.listdir(os.getcwd()) if x[:5] == "INDEL"]
from shutil import copy2
n = 1
for m in all_model_files:
if ".log" in m: continue
# pdb = self.selectedModel
# q = m.strip(".pdb")
q = filename.strip(".pdb")
# out = q + "_" + self.selectedModel + ".pdb"
out = "%s_%i.pdb"%(q,n)
print m,q, out
print self.cwd+'/'+out
copy2(m,out)
n += 1
else:
logInfo("Cancelled out of Save PDB (Indel)")
break
dlg.Destroy()
def add(self, event):
# Is the loop valid?
if (self.loopBegin < 0 or self.loopBegin < 0 or self.loopBegin >= self.loopEnd):
dlg = wx.MessageDialog(self, "You do not have a valid loop specified!", "Loop Not Valid", wx.OK | wx.ICON_ERROR | wx.CENTRE)
dlg.ShowModal()
dlg.Destroy()
return
# If we're doing a de novo search, is the sequence specified?
if (self.loopType == "De Novo"):
sequence = self.txtSequence.GetValue().strip().upper()
for AA in sequence:
if (not(AA in "ACDEFGHIKLMNPQRSTVWY")):
wx.MessageBox("The sequence you have provided is invalid. Please only use canonical amino acids.", "Sequence Invalid", wx.OK|wx.ICON_EXCLAMATION)
return
if (len(sequence) == 0):
wx.MessageBox("You have indicated that you want to design a loop de novo but have not provided the putative sequence of the loop. Please provide one or switch to use a pre-existing loop.", "No Sequence Indicated", wx.OK|wx.ICON_EXCLAMATION)
return
else:
sequence = ""
# Did the model change? If yes, and loops is not empty, then tell the user that this
# will remove all loops to make room for the new model
if (len(self.loops) > 0 and self.modelMenu.GetValue() != self.loops[0][2]):
dlg = wx.MessageDialog(self, "You are attempting to add a loop for a different model. If you continue, all current loops will be removed. Is this okay?", "Loop Model Changed", wx.YES_NO | wx.ICON_QUESTION | wx.CENTRE)
if (dlg.ShowModal() == wx.ID_NO):
return
dlg.Destroy()
self.loops = []
# Does this loop overlap with a previously-specified loop? If so, do not add
i = 1
for loopType, s, model, begin, pivot, end in self.loops:
if ((self.loopBegin >= begin and self.loopBegin <= end) or (self.loopEnd >= begin and self.loopEnd <= end)):
dlg = wx.MessageDialog(self, "The loop you have indicated overlaps with loop " + str(i) + ". Either change the current loop or remove loop " + str(i) + ".", "Loop Overlap", wx.OK | wx.ICON_ERROR | wx.CENTRE)
dlg.ShowModal()
dlg.Destroy()
return
i += 1
# Add this loop to the list of loops currently active
self.loops.append([self.loopType, sequence, self.modelMenu.GetValue(), self.loopBegin, self.menuPivot.GetSelection() + self.loopBegin, self.loopEnd])
self.updateLoops()
def remove(self, event):
# For this function, remove the indicated loop
self.activate()
logInfo("Remove button clicked")
if (self.selectedr >= 0 and self.selectedr < len(self.loops)):
self.loops.pop(self.selectedr)
self.selectedr = -1
self.updateLoops()
def clear(self, event):
logInfo("Clear button clicked")
# Remove everything
self.loops = []
self.updateLoops()
def viewMenuSelect(self, event):
try:
self.focusView(self.viewMenu.GetStringSelection(), self.selectedModel, "kic_view")
logInfo("Viewing " + self.viewMenu.GetStringSelection())
except:
# Probably the user left the field blank, do nothing
pass
def focusView(self, posID, origmodel, newmodel=None):
model = origmodel
loopEnd = self.loopEnd
if (posID != "Whole Loop"):
chain = posID[0]
seqpos = posID[3:].strip()
# Loop end needs to be recalculated if this is a view of the de novo loop since the
# de novo loop may be a different size
if (newmodel and len(self.txtSequence.GetValue()) > 0):
loopEnd = self.loopBegin + len(self.txtSequence.GetValue()) + 1 # For the anchor
else:
i = 1
wholeloop_data = []
for ch in self.KICView[0]:
for residue in ch:
if (i >= self.loopBegin and i <= loopEnd):
chain = ch.id
seqpos = str(residue.id[1])
wholeloop_data.append((chain, seqpos))
i = i + 1
# Find the neighborhood view
if (newmodel):
firstmodel = newmodel
else:
firstmodel = origmodel
self.cmd.hide("all")
if (chain == " " or chain == "_"):
self.cmd.select("viewsele", "resi " + seqpos + " and model " + firstmodel)
else:
self.cmd.select("viewsele", "resi " + seqpos + " and model " + firstmodel + " and chain " + chain)
# If the loop is validly defined, let's show the whole loop instead of individual residues
if ((self.loopBegin >= 0 and self.loopEnd >= 0 and not(newmodel)) or posID == "Whole Loop"):
for i in range(self.loopBegin, loopEnd):
if (not(newmodel)):
(chain, seqpos) = self.seqWin.getResidueInfo(self.selectedModel, i)
else:
(chain, seqpos) = wholeloop_data[i-self.loopBegin]
if (chain == "_" or len(chain.strip()) == 0):
self.cmd.select("viewsele", "viewsele or (resi " + str(seqpos) + " and model " + firstmodel + ")")
else:
self.cmd.select("viewsele", "viewsele or (resi " + str(seqpos) + " and chain " + chain + " and model " + firstmodel + ")")
self.cmd.select("exviewsele", "model " + firstmodel + " within 12 of viewsele")
self.cmd.show("cartoon", "exviewsele")
self.cmd.hide("ribbon", "exviewsele")
self.cmd.show("sticks", "exviewsele")
self.cmd.set_bond("stick_radius", 0.1, "exviewsele")
# Display energy labels for new structures
if (newmodel):
relabelEnergies(self.KICView, self.residue_E, newmodel, self.scoretypeMenu.GetStringSelection(), self.cmd, seqpos)
self.cmd.label("not exviewsele", "")
self.cmd.zoom("exviewsele")
#if (chain == " " or chain == "_"):
# self.cmd.select("viewsele", "resi " + seqpos + " and model " + firstmodel)
#else:
# self.cmd.select("viewsele", "resi " + seqpos + " and model " + firstmodel + " and chain " + chain)
self.cmd.show("sticks", "viewsele")
self.cmd.set_bond("stick_radius", 0.25, "viewsele")
# Highlight this residue in PyMOL
self.cmd.select("seqsele", "viewsele")
if (newmodel):
# If this is after a protocol, also show the original structure in green for comparison
self.cmd.select("oldsele", "model " + origmodel + " and symbol c")
self.cmd.color("green", "oldsele")
self.cmd.set("cartoon_color", "green", "oldsele")
#if (chain == " " or chain == "_"):
#self.cmd.select("viewsele", "resi " + seqpos + " and model " + origmodel)
#else:
#self.cmd.select("viewsele", "resi " + seqpos + " and model " + origmodel + " and chain " + chain)
#self.cmd.select("viewsele", "model " + origmodel + " within 12 of viewsele")
self.cmd.select("exviewsele", "model " + origmodel + " within 12 of viewsele")
self.cmd.show("cartoon", "exviewsele")
self.cmd.hide("ribbon", "exviewsele")
self.cmd.show("sticks", "exviewsele")
self.cmd.set_bond("stick_radius", 0.1, "exviewsele")
self.cmd.zoom("exviewsele")
self.cmd.delete("oldsele")
#if (chain == " " or chain == "_"):
#self.cmd.select("exviewsele", "resi " + seqpos + " and model " + origmodel)
#else:
#self.cmd.select("viewsele", "resi " + seqpos + " and model " + origmodel + " and chain " + chain)
#self.cmd.show("sticks", "viewsele")
#self.cmd.set_bond("stick_radius", 0.25, "viewsele")
self.cmd.enable("seqsele")
self.cmd.delete("viewsele")
self.cmd.select("exviewsele", "solvent")
self.cmd.hide("everything", "exviewsele")
self.cmd.delete("exviewsele")
self.seqWin.selectUpdate(False)
def scoretypeMenuSelect(self, event):
# Make sure there is even a PyMOL_Mover pose loaded
if (self.selectedModel == ""):
return
logInfo("Changed scoretype view to " + self.scoretypeMenu.GetStringSelection())
recolorEnergies(self.KICView, self.residue_E, "kic_view", self.scoretypeMenu.GetStringSelection(), self.cmd)
self.viewMenuSelect(event) # To update all the labels
def serverToggle(self, event):
if (self.serverOn):
self.serverOn = False
# if (platform.system() == "Darwin"):
# self.save_all.SetBitmapLabel(bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/btnServer_Off.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
# else:
self.save_all.SetLabel("Server Off")
self.save_all.SetToolTipString("Perform KIC simulations locally")
logInfo("Turned off KIC server usage")
else:
self.serverOn = True
# if (platform.system() == "Darwin"):
# self.save_all.SetBitmapLabel(bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/btnServer_On.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
# else:
self.save_all.SetLabel("Server On")
self.save_all.SetToolTipString("Perform KIC simulations on a remote server")
logInfo("Turned on KIC server usage")
def cancelINDEL(self):
logInfo("Canceled INDEL operation")
try:
os.remove("INDELinput")
except:
pass
try:
os.remove("coarsekicinputtemp")
except:
pass
try:
os.remove("repacked.pdb")
except:
pass
try:
os.remove("finekicinput")
except:
pass
self.tmrKIC.Stop()
self.seqWin.cannotDelete = False
#self.scoretypeMenu.Disable()
#self.viewMenu.Disable()
self.modelMenu.Enable()
self.beginMenu.Enable()
self.endMenu.Enable()
self.minMenu.Enable()
self.maxMenu.Enable()
self.ResultsMin.Enable()
self.ResultsMax.Enable()
self.btnClear.Enable()
self.btnCst.Enable()
#self.btnLoopType.Enable()
#if (self.loopType == "De Novo"):
# self.txtSequence.Enable()
# if (platform.system() == "Darwin"):
# self.btnINDEL.SetBitmapLabel(bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/btnKIC.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
# else:
self.btnINDEL.SetLabel("Model!")
self.buttonState = "Model!"
self.btnINDEL.SetToolTipString("Perform INDEL simulation with selected parameters")
deleteInputFiles()
self.parent.parent.restartDaemon()
self.parent.GoBtn.Enable()
# Get rid of the messages
for i in range(0, len(self.seqWin.msgQueue)):
if (self.seqWin.msgQueue[i].find("Performing INDEL loop modeling, please be patient...") >= 0):
self.seqWin.msgQueue.pop(i)
break
for i in range(0, len(self.seqWin.msgQueue)):
if (self.seqWin.msgQueue[i].find("Performing rotamer repacking") >= 0):
self.seqWin.msgQueue.pop(i)
break
for i in range(0, len(self.seqWin.msgQueue)):
if (self.seqWin.msgQueue[i].find("Performing refined KIC loop modeling") >= 0):
self.seqWin.msgQueue.pop(i)
break
if (len(self.seqWin.msgQueue) > 0):
self.seqWin.labelMsg.SetLabel(self.seqWin.msgQueue[len(self.seqWin.msgQueue)-1])
else:
self.seqWin.labelMsg.SetLabel("")
self.seqWin.labelMsg.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.BOLD))
self.seqWin.labelMsg.SetForegroundColour("#FFFFFF")
def save_constraints(self):
constraints = self.ConstraintSet
if len(constraints) != 0:
self.areCST = True
goToSandbox()
output = open("indel.cst",'w+')
for [pdb,poseindx,constraint] in constraints:
output.write('%s\n'%(constraint))
output.close()
def INDELClick(self, event):
# This is also the "Finalize!" button
if (self.buttonState == "Model!"):
# Some checking to make sure input parameters make sense
# TODO make sure that the two end residues aren't selected. AnchoredGraftMover doesn't like grafting at terminals
if (self.minLength > self.maxLength):
wx.MessageBox("Please choose a maximum length that is greater than or equal to the minimum length.", "Invalid loop lengths" , wx.OK|wx.ICON_EXCLAMATION)
return
if (self.maxResultsval < self.minResultsval):
wx.MessageBox("Please enter a maximum results value that is higher than the minimum results value.", "Invalid parameter", wx.OK|wx.ICON_EXCLAMATION)
if (self.minResultsval <= 0):
self.minResultsval = 1
if (self.maxResultsval <= 0):
wx.MessageBox("Please enter a maximum results value that is greater than or equal to 1.", "Invalid parameter", wx.OK|wx.ICON_EXCLAMATION)
self.seqWin.labelMsg.SetLabel("Performing INDEL loop modeling, please be patient...")
self.seqWin.labelMsg.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.BOLD))
self.seqWin.labelMsg.SetForegroundColour("#FFFFFF")
self.seqWin.msgQueue.append("Performing INDEL loop modeling, please be patient...")
self.disableAll(model_menu_disable=True, seq_win_disable=True)
# if (platform.system() == "Darwin"):
# self.btnINDEL.SetBitmapLabel(bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/btnKIC_Cancel.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
# else:
self.btnINDEL.SetLabel("Cancel!")
self.buttonState = "Cancel!"
self.btnINDEL.SetToolTipString("Cancel the INDEL simulation")
self.stage = 1
logInfo("Clicked the INDEL button")
self.save_constraints()
self.tmrKIC = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.threadINDEL, self.tmrKIC)
self.tmrKIC.Start(1000)
elif (self.buttonState == "Cancel!"):
dlg = wx.MessageDialog(self, "Are you sure you want to cancel the INDEL simulation? All progress will be lost.", "Cancel KIC Simulation", wx.YES_NO | wx.ICON_QUESTION | wx.CENTRE)
result = dlg.ShowModal()
if (result == wx.ID_YES):
self.cancelINDEL()
dlg.Destroy()
else:
# Finalize button, ask whether the changes will be accepted or rejected
dlg = wx.MessageDialog(self, "Do you want to accept the results of this loop modeling session?", "Accept/Reject Model", wx.YES_NO | wx.CANCEL | wx.ICON_QUESTION | wx.CENTRE)
result = dlg.ShowModal()
if (result == wx.ID_YES):
logInfo("Accepted KIC model")
accept = True
elif (result == wx.ID_NO):
logInfo("Rejected KIC model")
accept = False
else:
logInfo("Cancelled Finalize operation")
dlg.Destroy()
return
# Try to get rid of working loop files in sandbox
temp_loop_files = glob.glob('loopout_*')
try:
for temp_loop in temp_loop_files:
os.remove(temp_loop)
except:
pass
# Clear grid of loops
self.grdLoops.ClearGrid()
self.grdLoops.DeleteRows(0, self.grdLoops.GetNumberRows())
# Keep track of the temporary name so we can remove it from the pymol window in a second
# Rename the chosen model pdb file to its final name
pymol_indel_model_selected = self.indel_model_selected
try:
os.rename(self.indel_model_selected, self.selectedModel + "_INDEL.pdb")
self.indel_model_selected = self.selectedModel + "_INDEL.pdb"
except:
pass
# Try to get rid of the models not chosen
for i in range(len(self.model_names)):
try:
os.remove(self.model_names[i])
except:
pass
# Clear internal list of model data
del self.model_names[:]
del self.lengths[:]
del self.scores[:]
# Re-enable controls
dlg.Destroy()
self.enableAll(save_enable=False)
#Pop message out of queue
for i in range(0, len(self.seqWin.msgQueue)):
if (self.seqWin.msgQueue[i].find("Performing INDEL loop modeling, please be patient...") >= 0):
self.seqWin.msgQueue.pop(i)
break
self.seqWin.labelMsg.SetLabel("")
# if (platform.system() == "Darwin"):
# self.btnINDEL.SetBitmapLabel(bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/btnKIC.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
# else:
self.btnINDEL.SetLabel("Model!")
self.buttonState = "Model!"
self.btnINDEL.SetToolTipString("Perform INDEL simulation with selected parameters")
self.cmd.label("all", "")
self.seqWin.cannotDelete = False
if (not(accept)):
try:
self.save_model.Disable()
self.save_all.Disable()
self.cmd.remove(pymol_indel_model_selected)
self.cmd.delete(pymol_indel_model_selected)
self.cmd.show()
self.cmd.show("cartoon")
self.cmd.hide("lines")
self.cmd.hide("sticks")
except:
pass
return
if (accept and self.selectedr == -1):
return
# Get rid of the original pose, save the designed pose, and reload the structure in PyMOL
poseindx = -1
for r in range(0, len(self.seqWin.IDs)):
if (self.seqWin.IDs[r].find(self.selectedModel) >= 0):
poseindx = r
break
try:
self.cmd.load(self.indel_model_selected, self.indel_model_selected)
# Color final model by ss
defaultPyMOLView(self.cmd, self.indel_model_selected)
self.cmd.color('white')
self.cmd.color('red', 'ss h')
self.cmd.color('yellow', 'ss s')
self.cmd.remove(self.selectedModel)
self.cmd.delete(self.selectedModel)
self.cmd.remove(pymol_indel_model_selected)
self.cmd.delete(pymol_indel_model_selected)
self.seqWin.reloadPose(poseindx, self.indel_model_selected, self.indel_model_selected)
# IMPORTANT: You have to replace the model in the sandbox with the new designed model
os.remove(self.selectedModel + ".pdb")
self.selectedModel = self.indel_model_selected
except Exception as e:
# Some weird error happened, do nothing instead of crashing
print "Bug at accept button click"
print e.message
import traceback; traceback.print_exc()
pass
def recoverFromError(self, msg=""):
# This function tells the user what the error was and tries to revert the protocol
# back to the pre-daemon state so the main GUI can continue to be used
if (len(msg) == 0):
f = open("errreport", "r")
errmsg = "An error was encountered during the protocol:\n\n"
for aline in f:
errmsg = errmsg + str(aline)
f.close()
os.remove("errreport")
else:
errmsg = msg
logInfo("Error Encountered")
logInfo(errmsg)
if (platform.system() == "Windows"):
sessioninfo = os.path.expanduser("~") + "\\InteractiveRosetta\\sessionlog"
else:
sessioninfo = os.path.expanduser("~") + "/.InteractiveRosetta/sessionlog"
errmsg = errmsg + "\n\nIf you don't know what caused this, send the file " + sessioninfo + " to a developer along with an explanation of what you did."
# You have to use a MessageDialog because the MessageBox doesn't always work for some reason
dlg = wx.MessageDialog(self, errmsg, "Error Encountered", wx.OK|wx.ICON_EXCLAMATION)
dlg.ShowModal()
dlg.Destroy()
self.seqWin.cannotDelete = False
self.parent.GoBtn.Enable()
self.modelMenu.Enable()
# self.btnLoopType.Enable()
self.beginMenu.Enable()
self.endMenu.Enable()
self.txtSequence.Enable()
self.btnINDEL.Enable()
# if (platform.system() == "Darwin"):
# self.btnINDEL.SetBitmapLabel(bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/btnKIC.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
# else:
self.btnINDEL.SetLabel("Model!")
self.buttonState = "Model!"
# Get rid of the messages
for i in range(0, len(self.seqWin.msgQueue)):
if (self.seqWin.msgQueue[i].find("Performing INDEL loop modeling, please be patient...") >= 0):
self.seqWin.msgQueue.pop(i)
break
if (len(self.seqWin.msgQueue) > 0):
self.seqWin.labelMsg.SetLabel(self.seqWin.msgQueue[len(self.seqWin.msgQueue)-1])
else:
self.seqWin.labelMsg.SetLabel("")
self.seqWin.labelMsg.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.BOLD))
self.seqWin.labelMsg.SetForegroundColour("#FFFFFF")
self.cancelINDEL()
def threadINDEL(self, event):
# Why am I doing this ridiculous timer thing for this KIC protocol?
# Because apparently on Linux there's some kind of weird bug that manifests when you
# attempt to run time.sleep loops looking for files to be generated
# Pango develops a phobia of periods in strings if you do that????
# Using this staged timer setup eliminates the error
# What is the problem? I don't know. Why does this fix it? I don't know
# The people on StackOverflow said to do it and it fixed it -_-
# I think it has something to do with Linux not liking things like "time.sleep"
# and calls to wx in threads
# Dump a file with the loop modeling parameters for the daemon to pick up
goToSandbox()
if (self.stage == 1):
self.tmrKIC.Stop()
self.timeoutCount = 0
#self.nstruct = int(self.txtNStruct.GetValue())
f = open("INDELinputtemp", "w")
pdbfile = self.selectedModel + ".pdb"
# Dump the PDB from PyMOL first in case the coordinates were altered by the user
self.cmd.save(pdbfile.strip(), "model " + self.selectedModel)
fixPyMOLSave(pdbfile.strip())
chain = self.endMenu.GetStringSelection()[0]
begin_seqpos = self.beginMenu.GetStringSelection()[3:]
self.begin_seqpos = begin_seqpos
end_seqpos = self.endMenu.GetStringSelection()[3:]
begin_index = self.seqWin.getRosettaIndex(self.selectedModel, chain, begin_seqpos)
end_index = self.seqWin.getRosettaIndex(self.selectedModel, chain, end_seqpos)
self.maxResultsval = self.ResultsMax.GetValue()
self.minResultsval = self.ResultsMin.GetValue()
self.progress = None
# If the user wants to check collisions against other loaded models, get their filenames
complex_pdbs = []
for i in range(self.seqWin.SeqViewer.NumberRows):
complex_model = self.seqWin.getModelForChain(i)
if self.selectedModel != complex_model:
complex_pdbs.append("COMPLEX\t" + complex_model + ".pdb\t")
if self.symmetric_design.GetValue() == False:
self.symmetry_value = 1
# Write out input file that gets picked up by the daemon
collision_cutoff = 2.0
f.write("SCAFFOLD\t" + pdbfile + "\n")
f.write("ANCHORS\t" + str(begin_index) + "\t" + str(end_index) + "\n")
f.write("RANGE\t" + str(self.minLength) + "\t" + str(self.maxLength) + "\n")
f.write("MIN_RESULTS\t" + str(self.minResultsval) + "\n")
f.write("MAX_RESULTS\t" + str(self.maxResultsval) + "\n")
f.write("PRESERVE_SEQUENCE\t" + str(self.preserve_sequence.GetValue()).upper() + "\n")
f.write("SYMMETRY\t" + str(self.symmetry_value) + "\n")
f.write("DUPLICATE_CUTOFF\t" + str(self.RedundancyCutoff.GetValue()) + "\n")
f.write("COLLISION\t" + str(collision_cutoff) + "\n")
f.write("FILEEXTENSION\t%s_\n"%(pdbfile))
if self.areCST: f.write("CONSTRAINTS\tindel.cst\n")
# f.write("SCOREFXN\t%s\n"%(self.selectWin.SelectScorefxnBtn.GetLabel()))
f.write("SCOREFXN\t%s\n"%(self.selectWin.weightsfile))
if len(complex_pdbs) > 0:
for pdb in complex_pdbs:
f.write(pdb)
f.close()
os.rename("INDELinputtemp", "INDELinput")
self.usingServer = False
logInfo("INDEL input uploaded locally at INDELinput")
self.stage = 2
#if (self.perturbType == "Perturb Only, Centroid"):# or self.loopType == "Refine"):
# self.stage = 4
self.looptimecount = 0
self.timeout = 18000000
#self.progress = wx.ProgressDialog("KIC Progress", "Modeling loops in centroid mode...", 100, style=wx.PD_CAN_ABORT | wx.PD_APP_MODAL | wx.PD_ELAPSED_TIME | wx.PD_REMAINING_TIME)
self.loop_indx = 0
self.last_progress_indx = 99
self.tmrKIC.Start(1000)
elif (self.stage == 2):
if (os.path.isfile("INDELoutput")):
self.tmrKIC.Stop()
# Pop this message out of the queue
for i in range(0, len(self.seqWin.msgQueue)):
if (self.seqWin.msgQueue[i].find("Performing INDEL loop modeling, please be patient...") >= 0):
self.seqWin.msgQueue.pop(i)
break
# Parse output file that gives us the filenames, energies, and insertion lengths of all the results
f = open("INDELoutput")
self.model_names = []
self.scores = []
self.lengths = []
for line in f:
tmp = line.split("\t")
self.model_names.append(tmp[0])
self.scores.append(tmp[1])
self.lengths.append(tmp[2])
f.close()
# Clear and populate table
self.grdLoops.ClearGrid()
self.grdLoops.AppendRows(len(self.model_names))
row = 0
for score, length in zip(self.scores, self.lengths):
self.grdLoops.SetCellValue(row, 0, length)
self.grdLoops.SetCellValue(row, 1, score)
row += 1
self.btnClear.Disable()
# We can get rid of the top-level output file now
try:
os.remove("INDELoutput")
self.progress.Destroy()
os.remove("progress")
except:
pass
#self.KICView = self.seqWin.pdbreader.get_structure("kic_view", "INDELoutput.pdb")
self.btnINDEL.Enable()
self.save_all.Enable()
#self.save_model.Enable()
#self.enableControls()
#self.selectedModel = ""
# if (platform.system() == "Darwin"):
# self.btnINDEL.SetBitmapLabel(bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/kic/btnKIC_Finalize.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
# else:
self.btnINDEL.SetLabel("Finalize!")
self.buttonState = "Finalize!"
self.btnINDEL.SetToolTipString("Accept or reject protocol results")
#os.remove("INDELoutput.pdb")
elif (os.path.isfile("errreport")):
# Something went wrong, tell the user about it (loop sequence probably too short)
if self.progress is not None: self.progress.Destroy()
self.tmrKIC.Stop()
self.parent.parent.restartDaemon() # Has to happen because coarse KIC is threaded
self.recoverFromError()
elif (os.path.isfile("progress")):
# The local daemon can output its progress to keep the GUI updated about
# how far along it is, along with a message
# This is optional
# See job/__init__.py for more information
if (self.progress is None):
self.progress = wx.ProgressDialog("INDEL Progress", "Performing INDEL design job...", 100, style=wx.PD_CAN_ABORT | wx.PD_APP_MODAL | wx.PD_ELAPSED_TIME | wx.PD_REMAINING_TIME)
fin = open("progress", "r")
data = fin.readlines()
fin.close()
# First line should be a fraction
try:
num = float(data[0].split("/")[0].strip())
den = float(data[0].split("/")[1].strip())
# Convert to a percentage
percent = int(num / den * 100.0)
if (percent > 99):
# Let's the appearance of the output file kill the progress bar
percent = 100
except:
return
try:
# The optional second line is a new message
newmsg = data[1].strip()
(keepGoing, skip) = self.progress.Update(percent, newmsg)
except:
(keepGoing, skip) = self.progress.Update(percent)
if (not(keepGoing)):
# User clicked "Cancel" on the progress bar
self.cancelINDEL()
self.progress.Destroy()
self.looptimecount = self.looptimecount + 1
if (self.looptimecount > self.timeout):
# The loop was probably too short and coarse KIC will run forever
# Kill the daemon and tell the user about it
self.tmrKIC.Stop()
# First delete that input file so the new daemon doesn't pick it up right away
try:
os.remove("INDELinput")
except:
pass
self.parent.parent.restartDaemon() # Has to happen because coarse KIC is threaded
#self.recoverFromError("ERROR: The loop sequence is too short and cannot bridge the endpoint residues!")
|
schenc3/InteractiveROSETTA
|
InteractiveROSETTA/scripts/indel.py
|
Python
|
gpl-2.0
| 78,517
|
[
"PyMOL"
] |
390263cc0ac83f34fd317454fff5a8aef3149db2cdd4751d67e1511e106099d4
|
'''
===============================================
:mod:`gridcells.analysis.bumps` - bump tracking
===============================================
Classes and functions for processing data related to bump attractors.
Classes
-------
.. inheritance-diagram:: gridcells.analysis.bumps
:parts: 2
.. autosummary::
MLFit
MLFitList
MLGaussianFit
MLGaussianFitList
SingleBumpPopulation
SymmetricGaussianParams
Functions
---------
.. autosummary::
fit_gaussian_tt
fit_gaussian_bump_tt
fit_maximum_lh
'''
from __future__ import absolute_import, division, print_function
import collections
import logging
import numpy as np
import scipy.optimize
from . import spikes
from ..core.common import Pair2D, twisted_torus_distance
LOGGER = logging.getLogger(__name__)
class SymmetricGaussianParams(object):
'''Parameters for the symmetric Gaussian function.'''
def __init__(self, amplitude, mu_x, mu_y, sigma, err2):
self.A = amplitude
self.mu_x = mu_x
self.mu_y = mu_y
self.sigma = sigma
self.err2 = err2
class MLFit(object):
'''Maximum likelihood fit data holer.'''
def __init__(self, mu, sigma2, ln_lh, err2):
self.mu = mu
self.sigma2 = sigma2
self.ln_lh = ln_lh
self.err2 = err2
class MLFitList(MLFit, collections.Sequence):
'''A container for holding results of maximum likelihood fitting.
Can be accessed as a Sequence object.
'''
def __init__(self, mu=None, sigma2=None, ln_lh=None, err2=None,
times=None):
if mu is None:
mu = []
if sigma2 is None:
sigma2 = []
if ln_lh is None:
ln_lh = []
if err2 is None:
err2 = []
if times is None:
times = []
super(MLFitList, self).__init__(mu, sigma2, ln_lh, err2)
self.times = times
if not self._consistent():
raise ValueError('All input arguments mus have same length')
def _consistent(self):
'''Check if the data is consistent.'''
return len(self.mu) == len(self.sigma2) and \
len(self.mu) == len(self.ln_lh) and \
len(self.mu) == len(self.err2) and \
len(self.mu) == len(self.times)
def __getitem__(self, key):
return (MLFit(self.mu[key], self.sigma2[key], self.ln_lh[key],
self.err2),
self.times)
def __len__(self):
return len(self.mu)
def append_data(self, d, t):
'''`d` must be an instance of :class:`MLFit`'''
if not isinstance(d, MLFit):
raise TypeError('ML data must be an instance of MLFit')
self.mu.append(d.mu)
self.sigma2.append(d.sigma2)
self.ln_lh.append(d.ln_lh)
self.err2.append(d.err2)
self.times.append(t)
class MLGaussianFit(SymmetricGaussianParams):
'''Gaussian fit performed by applying maximum likelihood estimator.'''
def __init__(self, amplitude, mu_x, mu_y, sigma, err2, ln_lh,
lh_precision):
super(MLGaussianFit, self).__init__(amplitude, mu_x, mu_y, sigma, err2)
self.ln_lh = ln_lh
self.lh_precision = lh_precision
class MLGaussianFitList(MLGaussianFit, collections.Sequence):
'''A container for holding maximum likelihood Gaussian fits.
Can be accessed as a Sequence.
'''
def __init__(self, amplitude=None, mu_x=None, mu_y=None, sigma=None,
err2=None, ln_lh=None, lh_precision=None, times=None):
if amplitude is None:
amplitude = []
if mu_x is None:
mu_x = []
if mu_y is None:
mu_y = []
if sigma is None:
sigma = []
if err2 is None:
err2 = []
if ln_lh is None:
ln_lh = []
if lh_precision is None:
lh_precision = []
if times is None:
times = []
super(MLGaussianFitList, self).__init__(amplitude, mu_x, mu_y, sigma,
err2, ln_lh,
lh_precision)
self.times = times
if not self._consistent():
raise ValueError('All input arguments mus have same length')
def _consistent(self):
'''Check if the data is consistent.'''
return \
len(self.A) == len(self.mu_x) and \
len(self.A) == len(self.mu_y) and \
len(self.A) == len(self.sigma) and \
len(self.A) == len(self.err2) and \
len(self.A) == len(self.ln_lh) and \
len(self.A) == len(self.lh_precision) and \
len(self.A) == len(self.times)
def append_data(self, d, t):
'''`d` must be an instance of :class:`MLGaussianFit`'''
if not isinstance(d, MLGaussianFit):
raise TypeError('Data must be an instance of MLGaussianFit')
self.A.append(d.A)
self.mu_x.append(d.mu_x)
self.mu_y.append(d.mu_y)
self.sigma.append(d.sigma)
self.err2.append(d.err2)
self.ln_lh.append(d.ln_lh)
self.lh_precision.append(d.lh_precision)
self.times.append(t)
def __getitem__(self, key):
return MLGaussianFit(self.A[key],
self.mu_x[key],
self.mu_y[key],
self.sigma[key],
self.err2[key],
self.ln_lh,
self.lh_precision), \
self.times[key]
def __len__(self):
return len(self.A) # All same length
def fit_gaussian_tt(sig_f, i):
r'''Fit a 2D circular Gaussian function to a 2D signal using a maximum
likelihood estimator.
The Gaussian is not generic: :math:`\sigma_x = \sigma_y = \sigma`, i.e.
it is circular only.
The function fitted looks like this:
.. math::
f(\mathbf{X}) = |A| \exp\left\{\frac{-|\mathbf{X} -
\mathbf{\mu}|^2}{2\sigma^2}\right\}
where :math:`|\cdot|` is a distance metric on the twisted torus.
Parameters
----------
sig_f : np.ndarray
A 2D array that specified the signal to fit the Gaussian onto. The
dimensions of the torus will be inferred from the shape of `sig_f`:
(dim.y, dim.x) = `sig_f.shape`.
i : SymmetricGaussianParams
Guassian initialisation parameters. The `err2` field will be ignored.
Returns
-------
:class:`MLGaussianFit`
Estimated values, together with maximum likelihood value and precision
(inverse variance of noise: *NOT* of the fitted Gaussian).
'''
# Fit the Gaussian using least squares
dim = Pair2D(sig_f.shape[1], sig_f.shape[0])
X, Y = np.meshgrid( # pylint: disable=unbalanced-tuple-unpacking
np.arange(dim.x, dtype=np.double),
np.arange(dim.y, dtype=np.double))
others = Pair2D(X.flatten(), Y.flatten())
a = Pair2D(None, None)
def gaussian_diff(x):
'''Compute error.'''
a.x = x[1] # mu_x
a.y = x[2] # mu_y
dist = twisted_torus_distance(a, others, dim)
# A sigma
# | |
return (np.abs(x[0]) * np.exp(-dist ** 2 / 2. / x[3] ** 2) -
sig_f.ravel())
xest, _ = scipy.optimize.leastsq(gaussian_diff,
np.array([i.A, i.mu_x, i.mu_y, i.sigma]))
err2 = gaussian_diff(xest) ** 2
# Remap the values modulo torus size
xest[1] = xest[1] % dim.x
xest[2] = xest[2] % dim.y
# Compute the log-likelihood
n = dim.x * dim.y
aic_correction = 5 # Number of optimized parameters
beta = 1.0 / (np.mean(err2))
ln_lh = -beta / 2. * np.sum(err2) + \
n / 2. * np.log(beta) - \
n / 2. * np.log(2 * np.pi) - \
aic_correction
return MLGaussianFit(xest[0], xest[1], xest[2], xest[3], err2, ln_lh, beta)
def fit_gaussian_bump_tt(sig):
'''Fit a 2D Gaussian onto a (potential) firing rate bump on the twisted
torus.
Parameters
----------
sig : np.ndarray
2D firing rate map to fit. Axis 0 is the Y position. This will be
passed directly to :func:`~analysis.image.fit_gaussian_tt`.
Returns
-------
:class:`analysis.image.MLGaussianFit`
Estimated values of the fit.
Notes
-----
The function initialises the Gaussian fitting parameters to a position at
the maximum of `sig`.
'''
mu0_y, mu0_x = np.unravel_index(np.argmax(sig), sig.shape)
a0 = sig[mu0_y, mu0_x]
sigma0 = np.max(sig.shape) / 4.
init = SymmetricGaussianParams(a0, mu0_x, mu0_y, sigma0, None)
return fit_gaussian_tt(sig, init)
def fit_maximum_lh(sig):
'''Fit a maximum likelihood solution under Gaussian noise.
Parameters
----------
sig : np.ndarray
A vector containing the samples
Returns
fit : MLFit
Maximum likelihood parameters
'''
sig = sig.flatten()
mu = np.mean(sig)
sigma2 = np.var(sig)
err2 = (sig - mu) ** 2
if sigma2 == 0:
ln_lh = np.inf
else:
n = len(sig)
aic_correction = 2
ln_lh = -.5 / sigma2 * np.sum((sig - mu) ** 2) - \
.5 * n * np.log(sigma2) - \
.5 * n * np.log(2 * np.pi) - \
aic_correction
return MLFit(mu, sigma2, ln_lh, err2)
class SingleBumpPopulation(spikes.TwistedTorusSpikes):
'''
A population of neurons that is supposed to form a bump on a twisted torus.
Parameters
----------
senders : array_like
A an array of neurons' IDs.
times : array_like
An array of spike times. Length must be the same as as <senders>.
sheet_size : A pair
A pair of X and Y dimensions of the torus.
'''
def __init__(self, senders, times, sheet_size):
super(SingleBumpPopulation, self).__init__(senders, times, sheet_size)
def _perform_fit(self, tstart, tend, dt, win_len, fit_callable, list_cls,
full_err=True):
'''Perform the fit given the requested ``fit_callable``.'''
F, Ft = self.sliding_firing_rate(tstart, tend, dt, win_len)
res = list_cls()
for tIdx in range(len(Ft)):
LOGGER.debug('%s:: fitting: %d/%d, %.3f/%.3f ',
fit_callable.__name__, tIdx + 1, len(Ft), Ft[tIdx],
Ft[-1])
fit_params = fit_callable(F[:, :, tIdx])
if not full_err:
fit_params.err2 = np.sum(fit_params.err2)
res.append_data(fit_params, Ft[tIdx])
return res
def bump_position(self, tstart, tend, dt, win_len, full_err=True):
'''Estimate bump positions during the simulation time:
1. Estimates population firing rate for each bin.
2. Apply the bump position estimation procedure to each of the
population activity items.
Parameters
----------
tstart, tend, dt, win_len : float
Start and end time, time step, and window length. See also
:meth:`~gridcells.analysis.spikes.PopulationSpikes.sliding_firing_rate`.
full_err : bool
If ``True``, save the full error of fit. Otherwise a sum only.
Returns
-------
pos:list :class:`MLGaussianFitList`
A list of fitted Gaussian parameters
Notes
-----
This method uses the Maximum likelihood estimator to fit the Gaussian
function (:meth:`~fit_gaussian_bump_tt`)
'''
return self._perform_fit(tstart, tend, dt, win_len,
fit_gaussian_bump_tt, MLGaussianFitList,
full_err=full_err)
def uniform_fit(self, tstart, tend, dt, win_len, full_err=True):
'''Estimate the mean firing rate using maximum likelihood estimator
(:func:`~gridcells.analysis.image.fit_maximum_lh`)
1. Uses :meth:`sliding_firing_rate`.
2. Apply the estimator.
Parameters
----------
tstart, tend, dt, win_len
As in :py:meth:`~analysis.spikes.sliding_firing_rate`.
full_err : bool
If ``True``, save the full error of fit. Otherwise a sum only.
Returns
-------
MLFitList
A list of fitted parameters.
'''
return self._perform_fit(tstart, tend, dt, win_len, fit_maximum_lh,
MLFitList, full_err=full_err)
|
lsolanka/gridcells
|
gridcells/analysis/bumps.py
|
Python
|
gpl-3.0
| 12,722
|
[
"Gaussian"
] |
2ba2ce48e790fc7c83c994dc21c45202a4037c1d3252ad846642a04717956b02
|
#!/usr/bin/env python
"""
SCHISM native reader
==================================
"""
import numpy as np
from datetime import timedelta, datetime
from opendrift.readers import reader_schism_native
from opendrift.readers import reader_global_landmask
from opendrift.models.oceandrift import OceanDrift
###############################
# MODEL
###############################
o = OceanDrift(loglevel=0) # Set loglevel to 0 for debug information
###############################
# READERS
###############################
# Creating and adding reader using a native SCHISM netcdf output file
# SCHISM reader
reader_landmask = reader_global_landmask.Reader(
llcrnrlon=171.5, llcrnrlat=-43.5,
urcrnrlon=177.0, urcrnrlat=-38.0)
# NZTM proj4 string found at https://spatialreference.org/ref/epsg/nzgd2000-new-zealand-transverse-mercator-2000/
proj4str_nztm = '+proj=tmerc +lat_0=0 +lon_0=173 +k=0.9996 +x_0=1600000 +y_0=10000000 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs'
schism_native = reader_schism_native.Reader(
filename = 'https://thredds.met.no/thredds/dodsC/metusers/knutfd/thredds/netcdf_unstructured_samples/schism_marl20080101_00z_3D.nc',
proj4 = proj4str_nztm,
use_3d = True)
# schism_native.plot_mesh(variable = ['sea_floor_depth_below_sea_level']) # check reader was correctly loaded
o.add_reader([reader_landmask,schism_native])
o.set_config('general:use_auto_landmask', False) # prevent opendrift from making a new dynamical landmask with global_landmask
# Seed elements at defined positions, depth and time
o.seed_elements(lon=174.046669, lat=-40.928116, radius=20, number=100,
z=np.linspace(0,-10, 100), time=schism_native.start_time)
o.seed_elements(lon= 173.8839, lat=-40.9160, radius=20, number=100,
z=np.linspace(0,-10, 100), time=schism_native.start_time)
o.seed_elements(lon=174.2940, lat=-41.0888, radius=20, number=100,
z=np.linspace(0,-10, 100), time=schism_native.start_time)
o.disable_vertical_motion() #Deactivate any vertical processes/advection"""
#%%
# Running model
o.run(time_step=900,
end_time = schism_native.start_time+timedelta(days=0.1))
# outfile='schism_native_output.nc')
# Print and plot results
print(o)
o.plot(fast=True)
o.animation()
o.animation_profile()
|
OpenDrift/opendrift
|
examples/example_schism_native.py
|
Python
|
gpl-2.0
| 2,316
|
[
"NetCDF"
] |
b4309868b107bf6fa8b2e8f950fa632e67f1d8d2992fbf2dbfcdf8a7adfaf534
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (
absolute_import, division, print_function, unicode_literals)
import six
from six.moves import zip, range, map
import sys
import os.path
import argparse
import numbers
import numpy as np
import scipy.optimize as spo
import scipy.stats as sps
import scipy.signal as ss
import multiworm
import multiworm.analytics.sgolay
import where
WALDO_LOC = os.path.join(os.path.dirname(__file__), '..', 'Waldo')
WALDO_CODE = os.path.join(WALDO_LOC, 'code')
WALDO_DATA = os.path.join(WALDO_LOC, 'data', 'worms')
def IQR(dist):
return np.percentile(dist, 75) - np.percentile(dist, 25)
def head_and_tail(linegen):
try:
head = tail = six.next(linegen)
except StopIteration:
return [] # linegen has zero length
for tail in linegen:
assert not tail.startswith('%')
if head != tail:
return [head, tail]
else:
return [head]
def normpdf(x, *args):
"Return the normal pdf evaluated at *x*; args provides *mu*, *sigma*"
# https://github.com/matplotlib/matplotlib/blob/master/lib/matplotlib/mlab.py#L1554
mu, sigma = args
return 1./(np.sqrt(2*np.pi)*sigma)*np.exp(-0.5 * (1./sigma*(x - mu))**2)
def fit_gaussian(x, num_bins=200):
# some testdata has no variance whatsoever, this is escape clause
if abs(max(x) - min(x)) < 1e-5:
print('fit_gaussian exit')
return max(x), 1
n, bin_edges = np.histogram(x, num_bins, normed=True)
bincenters = [0.5 * (bin_edges[i + 1] + bin_edges[i]) for i in range(len(n))]
# Target function
fitfunc = lambda p, x: normpdf(x, p[0], p[1])
# Distance to the target function
errfunc = lambda p, x, y: fitfunc(p, x) - y
# Initial guess for the parameters
mu = np.mean(x)
sigma = np.std(x)
p0 = [mu, sigma]
p1, success = spo.leastsq(errfunc, p0[:], args=(bincenters, n))
# weirdly if success is an integer from 1 to 4, it worked.
if success in [1,2,3,4]:
mu, sigma = p1
return mu, sigma
else:
return None
def centroid_stats(steps):
stats = []
for data in steps:
stats.append(fit_gaussian(data))
return stats
def centroid_steps(centroid):
xy = zip(*centroid)
dxy = [np.diff(d) for d in xy]
return dxy
def step_distribution(centroid):
import matplotlib.pyplot as plt
f, ax = plt.subplots()
steps = centroid_steps(centroid)
stats = centroid_stats(steps)
for direction, color, meansd, data in zip(['X', 'Y'], ['red', 'green'], stats, steps):
mean, sd = meansd
print(' {0:25s} | {1:0.2e}, {2:0.2e}'.format(direction + ' stddev, mean', sd, mean))
ax.hist(data, 500, histtype='stepfilled', color=color, alpha=0.5, normed=True, label=direction)
norm_x = np.linspace(-4, 4, 100) * sd + mean
norm_y = sps.norm(mean, sd).pdf(norm_x)
ax.plot(norm_x, norm_y, color=color, ls='--', lw=3)
ax.legend()
sd_window = 3.5
max_sd = max(s[1] for s in stats)
ax.set_xlim(-sd_window * max_sd, sd_window * max_sd)
def spectrogram(centroid):
import matplotlib.pyplot as plt
f, axs = plt.subplots(2, 2, sharex=True)
for ax, data in zip(axs, zip(*centroid)):
#import pdb;pdb.set_trace()
ax1, ax2 = ax
ax1.plot(np.arange(len(data))/25, data)
ax2.specgram(data, NFFT=512, Fs=25)
def spectral(centroid):
import matplotlib.pyplot as plt
f, ax = plt.subplot()
Ellipsis
def excise_frames(blob, start, stop):
first_frame = blob['frame'][0]
start_idx = start - first_frame
end_idx = stop - first_frame
if start_idx < 0 or end_idx > len(blob['frame']):
raise ValueError('Start/stop frames outside of bounds')
return blob['centroid'][start_idx:end_idx]
def fld(fieldname, *data, **kwargs):
joiner = kwargs.get('joiner', ', ')
try:
datastr = joiner.join(
('{0:.1f}' if isinstance(pt, numbers.Real) else '{0:d}').format(pt)
for pt in data)
except TypeError:
datastr = str(data)
print(' {0:25s} | {1:s}'.format(fieldname, datastr))
def sgolay(series, window, order):
series = np.array(series)
window = int(window)
order = int(order)
return multiworm.analytics.sgolay.savitzky_golay(series, window, order)
STOCK_METHODS = [
'boxcar', 'triang', 'blackman', 'hamming', 'hann', 'bartlett',
'flattop', 'parzen', 'bohman', 'blackmanharris', 'nuttall', 'barthann',
'kaiser', 'gaussian', 'general_gaussian', 'slepian', 'chebwin'
]
SMOOTH_METHODS = {
'sgolay': sgolay,
}
def smooth(method, series, winlen, *params):
if method in SMOOTH_METHODS:
return SMOOTH_METHODS[method](series, winlen, *params)
try:
winlen = int(winlen) // 2 * 2 + 1 # make it odd, rounding up
half_win = winlen // 2
wintype = (method,) + tuple(int(x) for x in params)
fir_win = ss.get_window(wintype, winlen)
except ValueError:
raise ValueError('Unrecognized smoothing type')
b = fir_win / sum(fir_win)
a = [1]
#zi = ss.lfiltic(b, a)
#zi = series[0] * np.ones(len(b) - 1)
return ss.lfilter(b, a, series)[winlen-1:]
def speed_dist(centroid):
Ellipsis
def waldo_pull(data_set, bid):
sys.path.append(WALDO_CODE)
from shared.wio.file_manager import get_timeseries
ext_bid = '{}_{:05d}'.format(data_set, bid)
return get_timeseries(ext_bid, 'xy')
def main(argv=None):
if argv is None:
argv = sys.argv
parser = argparse.ArgumentParser(description='Get basic information '
'about a particular blob.')
parser.add_argument('data_set', help='The location of the data set.')
parser.add_argument('blob_id', type=int, help='The blob ID in the '
'data set to summarize.')
parser.add_argument('-ht', '--head-and-tail', action='store_true')
parser.add_argument('--xy', action='store_true', help='Plot X and Y '
'coordinates for the blob')
parser.add_argument('--smooth', nargs='+', help='Smooth the '
'X-Y values. Must provide method (e.g. "sgolay"), and the '
'appropriate number of parameters for the filter.')
parser.add_argument('--spec', action='store_true', help='Spectogram')
#parser.add_argument('--show', action='store_true', help='Try to show the blob using images')
parser.add_argument('--dist', action='store_true', help='Distribution of '
'steps')
parser.add_argument('--speeds', action='store_true', help='Distribution '
'of speeds (requires --smooth ...)')
parser.add_argument('--frames', type=int, nargs=2, help='Start/stop frames')
parser.add_argument('--subsample', type=int, default=1,
help='Subsample speed by this many frames')
parser.add_argument('--waldo', action='store_true', help='Pull data from '
'Waldo-processed files')
parser.add_argument('--noshow', action='store_true', help="Don't show "
"the plot")
args = parser.parse_args()
#args.data_set = where.where(args.data_set)
experiment = multiworm.Experiment(experiment_id=args.data_set)
experiment.load_summary()
if args.blob_id not in experiment.bs_mapping:
print('Blob ID {0} not found.'.format(args.blob_id), file=sys.stderr)
sys.exit(1)
ALL_METHODS = list(six.iterkeys(SMOOTH_METHODS)) + STOCK_METHODS
if args.smooth and args.smooth[0] not in ALL_METHODS:
print('Smoothing method "{}" not valid. Must be one of: {}'
.format(args.smooth[0], ', '.join(ALL_METHODS)), file=sys.stderr)
sys.exit(1)
file_no, offset = experiment.summary[['file_no', 'offset']][experiment.bs_mapping[args.blob_id]]
if args.head_and_tail:
for line in experiment.parse_blob(args.blob_id, head_and_tail):
print(line, end='')
return
blob = experiment.parse_blob(args.blob_id)
if blob is None:
print("Blob ID {} exists, but has no data.".format(args.blob_id),
file=sys.stderr)
return
print('Data in blobs file number {0}, starting at byte {1}'.format(file_no, offset))
print('Path: {0}'.format(experiment.blobs_files[file_no]))
print(' {0:^25s} | {1:^30s} '.format('Field', 'Data'))
print(' ' + '-'* 65)
life_s = blob['time'][-1] - blob['time'][0]
life_f = blob['frame'][-1] - blob['frame'][0]
fld('Lifetime (s, frames)', life_s, life_f)
fld('Time Range (s)', blob['time'][0], blob['time'][-1], joiner=' - ')
fld('Frame Range', blob['frame'][0], blob['frame'][-1], joiner=' - ')
fld('Found at', *blob['centroid'][0])
fld('Lost at', *blob['centroid'][-1])
if args.xy or args.spec or args.dist or args.smooth or args.waldo:
import matplotlib.pyplot as plt
if args.waldo:
times, centroid = waldo_pull(os.path.basename(args.data_set), args.blob_id)
if centroid is None:
print("Blob ID {} exists, but has no Waldo data."
.format(args.blob_id), file=sys.stderr)
return
else:
centroid = excise_frames(blob, *args.frames) if args.frames else blob['centroid']
if args.spec:
spectrogram(centroid)
elif args.dist:
step_distribution(centroid)
elif args.smooth and args.speeds:
f = plt.figure()
ax_x = plt.subplot2grid((3, 2), (0, 0))
ax_y = plt.subplot2grid((3, 2), (1, 0), sharex=ax_x)
ax_speed = plt.subplot2grid((3, 2), (2, 0), sharex=ax_x)
ax_distspeed = plt.subplot2grid((3, 2), (0, 1), rowspan=3)
smooth_method, smooth_params = args.smooth[0], args.smooth[1:]
xy = list(zip(*centroid))
print(xy)
xy_smoothed = [smooth(smooth_method, c, *smooth_params) for c in xy]
for ax, c, c_smoothed in zip([ax_x, ax_y], xy, xy_smoothed):
ax.plot(c, color='blue', alpha=0.5)
ax.plot(c_smoothed, lw=2, color='green')
dxy = np.diff(np.array(xy_smoothed)[...,::args.subsample], axis=1)
print(len(dxy), len(xy_smoothed))
#import pdb;pdb.set_trace()
ds = np.linalg.norm(dxy, axis=0)
ax_speed.plot(ds)
#bins = np.ceil(2 * len(ds)**(1/3)) # Rice's Rule
bins = np.ceil(np.ptp(ds) * len(ds)**(1/3) / (2 * IQR(ds))) # Freedman–Diaconis' choice
ax_distspeed.hist(ds, bins, histtype='stepfilled', alpha=0.5, normed=True)
decades = range(10, 100, 10)
deciles = np.percentile(ds, decades)
print("\n{:>7s} | {:<s}".format('%ile', 'Speed (px/frame)'))
print(" -------------------")
for pct, pctile in zip(decades, deciles):
print("{:>7.0f} | {:6.3f}".format(pct, pctile))
#ax_distspeed.set_yscale('log')
elif args.smooth:
f, axs = plt.subplots(2, sharex=True)
for ax, data in zip(axs, zip(*centroid)):
smooth_method, smooth_params = args.smooth[0], args.smooth[1:]
data_smoothed = smooth(smooth_method, data, *smooth_params)
ax.plot(data, color='blue', alpha=0.5)
ax.plot(data_smoothed, lw=2, color='green')
else:
f, axs = plt.subplots(2, sharex=True)
for ax, data in zip(axs, zip(*centroid)):
ax.plot(data, color='blue')
if not args.noshow:
plt.show()
if __name__ == '__main__':
sys.exit(main())
'''
else: # show X and Y over frames
fig, axs = plt.subplots(2, sharex=True)
speed =
for ax, data in zip(axs, zip(*centroid)):
if args.smooth:
smooth_method, smooth_params = args.smooth[0], args.smooth[1:]
data_smoothed = smooth(smooth_method, data, *smooth_params)
if args.speeds:
fig_2, ax_speeds = plt.subplots()
ax_speeds.hist()
else:
ax.plot(data, color='blue', alpha=0.5)
ax.plot(data_smoothed, lw=2, color='green')
else:
ax.plot(data, color='blue')
'''
|
nicktimko/multiworm
|
blob_info.py
|
Python
|
mit
| 12,348
|
[
"Gaussian"
] |
4aaa803771af73d0fdf18834fa4a059e26c043bf0065eace727d65547fa1ea6b
|
from .decorator import tract_math_operation, set_dictionary_from_use_filenames_as_index
from warnings import warn
import numpy
import nibabel
from nibabel.spatialimages import SpatialImage
from ..tractography import (
Tractography, tractography_to_file, tractography_from_files
)
import sys
import traceback
from . import tensor_operations
from . import tract_operations
try:
from collections import OrderedDict
except ImportError: # Python 2.6 fix
from ordereddict import OrderedDict
@tract_math_operation(': print the names of scalar data associated with each tract')
def scalars(optional_flags, tractography):
return {
'scalar attributes':
tractography.tracts_data().keys()
}
@tract_math_operation(': counts the number of tracts', needs_one_tract=False)
def count(optional_flags, tractographies):
results = OrderedDict()
for default_tractography_name, (tract_name, tract) in enumerate(tractographies):
measurement_dict = tensor_operations.compute_all_measures(tract, ['number of tracts'])
results = set_dictionary_from_use_filenames_as_index(optional_flags,
tract_name, default_tractography_name,
results, measurement_dict)
return results
@tract_math_operation(': calculates mean and std of tract length')
def length_mean_std(optional_flags, tractography):
return tensor_operations.compute_all_measures(tractography, ['length mean (mm)', 'length std (mm^2)'])
@tract_math_operation('<volume unit>: calculates the volume of a tract based on voxel occupancy of a certain voxel volume')
def tract_volume(optional_flags, tractography, resolution):
return tensor_operations.compute_all_measures(tractography, ['tract volume'])
@tract_math_operation('<scalar>: calculates mean and std of a scalar quantity that has been averaged along each tract', needs_one_tract=False)
def scalar_per_tract_mean_std(optional_flags, tractographies, scalar):
results = OrderedDict()
try:
for default_tract_name, (tract_name, tract) in enumerate(tractographies):
measurement_dict = tensor_operations.compute_all_measures(tract,
['per tract distance weighted mean %s',
'per tract distance weighted std %s'],
scalars=[scalar])
results = set_dictionary_from_use_filenames_as_index(optional_flags,
tract_name, default_tract_name,
results, measurement_dict)
except KeyError:
traceback.print_exc(file=sys.stdout)
raise ValueError("Tractography does not contain this scalar data")
return results
@tract_math_operation('<scalar>: calculates many DTI measurements along each tract if there are two tensor data attributes: "tensor1" and "tensor2"', needs_one_tract=False)
def scalar_compute_most(optional_flags, tractographies, scalar):
if scalar == 'all':
get_reference_tract = tractographies[0][1]
scalars = [
s for s in get_reference_tract.tracts_data().keys() if not s.startswith("tensor")]
else:
scalars = [scalar]
results = OrderedDict()
try:
for default_tract_name, (tract_name, tract) in enumerate(tractographies):
# First_decorate_tract
if 'tensor1' in tract.tracts_data().keys():
tract = tensor_operations.decorate_tract_with_measures(tract, 'tensor1')
scalars.extend(
['FA_tensor1', 'MD_tensor1', 'AX_tensor1', 'RD_tensor1', 'GA_tensor1'])
if 'tensor2' in tract.tracts_data().keys():
tract = tensor_operations.decorate_tract_with_measures(tract, 'tensor2')
scalars.extend(
['FA_tensor2', 'MD_tensor2', 'AX_tensor2', 'RD_tensor2', 'GA_tensor2'])
measurement_dict = tensor_operations.compute_all_measures(tract,
['per tract distance weighted mean %s',
'per tract distance weighted std %s',
'tract volume',
'length mean (mm)', 'length std (mm^2)',
'number of tracts'
],
scalars=scalars, resolution=1.)
results = set_dictionary_from_use_filenames_as_index(optional_flags,
tract_name, default_tract_name,
results, measurement_dict)
except KeyError:
traceback.print_exc(file=sys.stdout)
raise ValueError("Tractography does not contain this tensor data")
return results
@tract_math_operation('<scalar>: calculates mean and std of a scalar quantity for each tract')
def scalar_tract_mean_std(optional_flags, tractography, scalar):
try:
tracts = tractography.original_tracts_data()[scalar]
result = OrderedDict((
('tract file', []),
('mean %s' % scalar, []),
('std %s' % scalar, [])
))
for i, t in enumerate(tracts):
result['tract file'].append('Tract %04d' % i)
result['mean %s' % scalar].append(t.mean())
result['std %s' % scalar].append(t.std())
return result
except KeyError:
raise ValueError("Tractography does not contain this scalar data")
@tract_math_operation('<scalar>: calculates median of a scalar quantity for each tract')
def scalar_tract_median(optional_flags, tractography, scalar):
try:
tracts = tractography.original_tracts_data()[scalar]
result = OrderedDict((
('tract file', []),
('median %s' % scalar, []),
))
for i, t in enumerate(tracts):
result['tract file'].append('Tract %04d' % i)
result['median %s' % scalar].append(float(numpy.median(t)))
return result
except KeyError:
raise ValueError("Tractography does not contain this scalar data")
@tract_math_operation('<scalar>: calculates mean and std of a scalar quantity over tracts')
def scalar_mean_std(optional_flags, tractography, scalar):
try:
scalars = tractography.tracts_data()[scalar]
all_scalars = numpy.vstack(scalars)
mean = all_scalars.mean(0)
std = all_scalars.std(0)
return OrderedDict((
('mean %s' % scalar, float(mean)),
('std %s' % scalar, float(std))
))
except KeyError:
raise ValueError("Tractography does not contain this scalar data")
@tract_math_operation('<scalar>: calculates median of a scalar quantity over tracts')
def scalar_median(optional_flags, tractography, scalar):
try:
scalars = tractography.tracts_data()[scalar]
all_scalars = numpy.vstack(scalars)
median = numpy.median(all_scalars)
return OrderedDict((
('median %s' % scalar, float(median)),
))
except KeyError:
raise ValueError("Tractography does not contain this scalar data")
@tract_math_operation(': Dumps all the data in the tractography', needs_one_tract=True)
def tract_dump(optional_flags, tractography):
res = OrderedDict()
tract_number = 'tract #'
res[tract_number] = []
res['x'] = []
res['y'] = []
res['z'] = []
data = tractography.tracts_data()
for k in data.keys():
res[k] = []
for i, tract in enumerate(tractography.tracts()):
res[tract_number] += [i] * len(tract)
res['x'] += list(tract[:, 0])
res['y'] += list(tract[:, 1])
res['z'] += list(tract[:, 2])
for k in data.keys():
res[k] += list(numpy.asarray(data[k][i]).squeeze())
return res
@tract_math_operation(': Dumps tract endpoints', needs_one_tract=True)
def tract_dump_endpoints(optional_flags, tractography):
res = OrderedDict()
tract_number = 'tract #'
res[tract_number] = []
res['x'] = []
res['y'] = []
res['z'] = []
for i, tract in enumerate(tractography.tracts()):
res[tract_number] += [i] * 2
res['x'] += list(tract[(0, -1), 0])
res['y'] += list(tract[(0, -1), 1])
res['z'] += list(tract[(0, -1), 2])
return res
@tract_math_operation(': Minimum and maximum distance between two consecutive points')
def tract_point_distance_min_max(optional_flags, tractography):
dist_min = numpy.empty(len(tractography.tracts()))
dist_max = numpy.empty(len(tractography.tracts()))
for i, tract in enumerate(tractography.tracts()):
dist = tract_operations.tract_length(tract)
dist_min[i] = dist.min()
dist_max[i] = dist.max()
print dist_min.min(), dist_max.max()
@tract_math_operation('<points per tract> <tractography_file_output>: subsamples tracts to a maximum number of points')
def tract_subsample(optional_flags, tractography, points_per_tract, file_output):
tractography.subsample_tracts(int(points_per_tract))
return Tractography(
tractography.tracts(), tractography.tracts_data(),
**tractography.extra_args
)
@tract_math_operation('<mm per tract> <tractography_file_output>: subsamples tracts to a maximum number of points')
def tract_remove_short_tracts(optional_flags, tractography, min_tract_length, file_output):
min_tract_length = float(min_tract_length)
tracts = tractography.tracts()
data = tractography.tracts_data()
tract_ix_to_keep = [
i for i, tract in enumerate(tractography.tracts())
if tract_operations.tract_length(tract) > min_tract_length
]
selected_tracts = [tracts[i] for i in tract_ix_to_keep]
selected_data = dict()
for key, item in data.items():
if len(item) == len(tracts):
selected_data_items = [item[i] for i in tract_ix_to_keep]
selected_data[key] = selected_data_items
else:
selected_data[key] = item
return Tractography(
selected_tracts, selected_data,
**tractography.extra_args
)
@tract_math_operation('<image> <quantity_name> <tractography_file_output>: maps the values of an image to the tract points')
def tract_map_image(optional_flags, tractography, image, quantity_name, file_output):
from os import path
from scipy import ndimage
image = nibabel.load(image)
ijk_points = tract_operations.tract_in_ijk(image, tractography)
image_data = image.get_data()
if image_data.ndim > 3:
output_name, ext = path.splitext(file_output)
output_name = output_name + '_%04d' + ext
for i, image in enumerate(image_data):
new_scalar_data = ndimage.map_coordinates(
image, ijk_points.T
)[:, None]
tractography.original_tracts_data()[
quantity_name] = new_scalar_data
tractography_to_file(output_name % i, Tractography(
tractography.original_tracts(), tractography.original_tracts_data()))
else:
new_scalar_data_flat = ndimage.map_coordinates(
image_data, ijk_points.T
)[:, None]
start = 0
new_scalar_data = []
for tract in tractography.original_tracts():
new_scalar_data.append(
new_scalar_data_flat[start: start + len(tract)].copy()
)
start += len(tract)
tractography.original_tracts_data()[quantity_name] = new_scalar_data
return Tractography(
tractography.original_tracts(
), tractography.original_tracts_data(),
**tractography.extra_args
)
@tract_math_operation(
'<deformation> <tractography_file_output>: apply a '
'non-linear deformation to a tractography'
)
def tract_deform(optional_flags, tractography, image, file_output=None):
from scipy import ndimage
import numpy as numpy
image = nibabel.load(image)
coord_adjustment = numpy.sign(numpy.diag(image.get_affine())[:-1])
ijk_points = tract_operations.tract_in_ijk(image, tractography)
image_data = image.get_data().squeeze()
if image_data.ndim != 4 and image_data.shape[-1] != 3:
raise ValueError('Image is not a deformation field')
new_points = numpy.vstack(tractography.tracts()) # ijk_points.copy()
for i in (0, 1, 2):
image_ = image_data[..., i]
deformation = ndimage.map_coordinates(
image_, ijk_points.T
).squeeze()
new_points[:, i] -= coord_adjustment[i] * deformation
new_ras_points = new_points # tract_in_ras(image, new_points)
start = 0
new_tracts = []
for tract in tractography.original_tracts():
new_tracts.append(
new_ras_points[start: start + len(tract)].copy()
)
start += len(tract)
return Tractography(
new_tracts, tractography.original_tracts_data(),
**tractography.extra_args
)
@tract_math_operation(
'<transform> [invert] <tractography_file_output>: apply a '
'affine transform to a tractography. '
'transform is assumed to be in RAS format like Nifti.'
)
def tract_affine_transform(optional_flags,
tractography, transform_file, ref_image,
invert=False, file_output=None
):
import nibabel
import numpy as numpy
ref_image = nibabel.load(ref_image)
ref_affine = ref_image.get_affine()
transform = numpy.loadtxt(transform_file)
invert = bool(invert)
if invert:
print "Inverting transform"
transform = numpy.linalg.inv(transform)
orig_points = numpy.vstack(tractography.tracts())
new_points = nibabel.affines.apply_affine(transform, orig_points)
start = 0
new_tracts = []
for tract in tractography.original_tracts():
new_tracts.append(
new_points[start: start + len(tract)].copy()
)
start += len(tract)
extra_args = {
'affine': ref_affine,
'image_dims': ref_image.shape
}
# if tractography.extra_args is not None:
# tractography.extra_args.update(extra_args)
# extra_args = tractography.extra_args
return Tractography(
new_tracts, tractography.original_tracts_data(),
**extra_args
)
@tract_math_operation('<bins> <qty> <output>')
def tract_tract_confidence(optional_flags, tractography, bins, qty, file_output=None):
bins = int(bins)
lengths = numpy.empty(len(tractography.tracts()))
tracts = tractography.tracts()
tracts_prob_data = []
tracts_length_bin = []
for i, tract in enumerate(tracts):
lengths[i] = tract_operations.tract_length(tract)
tracts_prob_data.append(numpy.zeros(len(tract)))
tracts_length_bin.append(numpy.zeros(len(tract)))
length_histogram_counts, length_histogram_bins = numpy.histogram(
lengths, normed=True, bins=bins)
for i in xrange(1, bins):
tract_log_prob = []
indices_bin = ((length_histogram_bins[
i - 1] < lengths) * (lengths < length_histogram_bins[i])).nonzero()[0]
if len(indices_bin) == 0:
continue
for j in indices_bin:
tract_log_prob.append(
numpy.log(tractography.tracts_data()[qty][j]).sum())
tract_log_prob = numpy.array(tract_log_prob)
tract_log_prob = numpy.nan_to_num(tract_log_prob)
lp_a0 = tract_log_prob[tract_log_prob < 0].max()
tract_log_prob_total = numpy.log(
numpy.exp(tract_log_prob - lp_a0).sum()) + lp_a0
tract_prob = numpy.exp(tract_log_prob - tract_log_prob_total)
for tract_number, tract_prob in zip(indices_bin, tract_prob):
tracts_prob_data[tract_number][:] = tract_prob
tracts_length_bin[tract_number][:] = length_histogram_bins[i - 1]
tractography.tracts_data()['tprob'] = tracts_prob_data
tractography.tracts_data()['tprob_bin'] = tracts_length_bin
return tractography
@tract_math_operation('<image> <mask_out>: calculates the mask image from a tract on the space of the given image')
def tract_generate_mask(optional_flags, tractography, image, file_output):
image = nibabel.load(image)
mask = tract_operations.tract_mask(image, tractography)
return SpatialImage(mask, image.get_affine())
@tract_math_operation('<image> [smoothing] <image_out>: calculates the probabilistic tract image for these tracts', needs_one_tract=False)
def tract_generate_population_probability_map(optional_flags, tractographies, image, smoothing=0, file_output=None):
from scipy import ndimage
image = nibabel.load(image)
smoothing = float(smoothing)
# tractographies includes tuples of (tractography filename, tractography
# instance)
if isinstance(tractographies[1], Tractography):
tractographies = [tractographies]
prob_map = tract_operations.tract_mask(image, tractographies[0][1]).astype(float)
if smoothing > 0:
prob_map = ndimage.gaussian_filter(prob_map, smoothing)
for tract in tractographies[1:]:
aux_map = tract_operations.tract_mask(image, tract[1])
if smoothing > 0:
aux_map = ndimage.gaussian_filter(aux_map, smoothing)
prob_map += aux_map
prob_map /= len(tractographies)
return SpatialImage(prob_map, image.get_affine()),
@tract_math_operation('<image> <image_out>: calculates the probabilistic tract image for these tracts', needs_one_tract=False)
def tract_generate_probability_map(optional_flags, tractographies, image, file_output):
image = nibabel.load(image)
prob_map = tract_operations.tract_probability_map(image, tractographies[0][1]).astype(float)
for tract in tractographies[1:]:
if len(tract[1].tracts()) == 0:
continue
new_prob_map = tract_operations.tract_mask(image, tract[1])
prob_map = prob_map + new_prob_map - (prob_map * new_prob_map)
return SpatialImage(prob_map, image.get_affine())
@tract_math_operation('<tractography_out>: strips the data from the tracts', needs_one_tract=True)
def tract_strip(optional_flags, tractography, file_output):
tractography_out = Tractography(tractography.tracts())
return tractography_out
@tract_math_operation('<tractography_out>: takes the union of all tractographies', needs_one_tract=False)
def tract_merge(optional_flags, tractographies, file_output):
all_tracts = []
all_data = {}
keys = [set(t[1].tracts_data().keys()) for t in tractographies]
common_keys = keys[0].intersection(*keys[1:])
affine = tractographies[0][1].extra_args.get('affine', None)
image_dims = tractographies[0][1].extra_args.get('image_dims', None)
for tract in tractographies:
tracts = tract[1].tracts()
if affine is not None and 'affine' in tract[1].extra_args:
if (tract[1].affine != affine).any():
affine = None
if image_dims is not None and 'image_dims' in tract[1].extra_args:
if (tract[1].image_dims != image_dims).any():
image_dims = None
all_tracts += tract[1].tracts()
data = tract[1].tracts_data()
for k in common_keys:
if len(data[k]) == len(tracts):
if k not in all_data:
all_data[k] = []
all_data[k] += data[k]
else:
all_data[k] = data[k]
return Tractography(
all_tracts, all_data,
affine=affine, image_dims=image_dims
)
@tract_math_operation('<volume unit> <tract1.vtk> ... <tractN.vtk>: calculates the kappa value of the first tract with the rest in the space of the reference image')
def tract_kappa(optional_flags, tractography, resolution, *other_tracts):
resolution = float(resolution)
voxels = tract_operations.voxelized_tract(tractography, resolution)
result = OrderedDict((
('tract file', []),
('kappa value', [])
))
for tract in other_tracts:
voxels1 = tract_operations.voxelized_tract(
tractography_from_files(tract),
resolution
)
all_voxels = numpy.array(list(voxels.union(voxels1)))
N = (all_voxels.max(0) - all_voxels.min(0)).prod()
pp = len(voxels.intersection(voxels1)) * 1.
pn = len(voxels.difference(voxels1)) * 1.
numpy = len(voxels1.difference(voxels)) * 1.
nn = N - pp - pn - numpy
observed_agreement = (pp + nn) / N
chance_agreement = (
(pp + pn) * (pp + numpy) + (nn + numpy) * (nn + pn)) / (N * N)
k = (observed_agreement - chance_agreement) / (1 - chance_agreement)
result['tract file'].append(tract)
result['kappa value'].append(k)
return result
@tract_math_operation('<volume> <threshold> <tract1.vtk> ... <tractN.vtk>: calculates the kappa value of the first tract with the rest in the space of the reference image')
def tract_kappa_volume(optional_flags, tractography, volume, threshold, resolution, *other_tracts):
resolution = float(resolution)
volume = nibabel.load(volume)
mask = (volume.get_data() > threshold).astype(int)
voxels = tract_operations.tract_mask(mask, tractography)
result = OrderedDict((
('tract file', []),
('kappa value', [])
))
for tract in other_tracts:
voxels1 = tract_operations.voxelized_tract(
tractography_from_files(tract), resolution)
all_voxels = numpy.array(list(voxels.union(voxels1)))
N = (all_voxels.max(0) - all_voxels.min(0)).prod()
pp = len(voxels.intersection(voxels1)) * 1.
pn = len(voxels.difference(voxels1)) * 1.
numpy = len(voxels1.difference(voxels)) * 1.
nn = N - pp - pn - numpy
observed_agreement = (pp + nn) / N
chance_agreement = (
(pp + pn) * (pp + numpy) + (nn + numpy) * (nn + pn)) / (N * N)
k = (observed_agreement - chance_agreement) / (1 - chance_agreement)
result['tract file'].append(tract)
result['kappa value'].append(k)
return result
@tract_math_operation('<volume unit> <tract1.vtk> ... <tractN.vtk>: calculates the dice coefficient of the first tract with the rest in the space of the reference image')
def tract_dice(optional_flags, tractography, resolution, *other_tracts):
resolution = float(resolution)
voxels = tract_operations.voxelized_tract(tractography, resolution)
result = OrderedDict((
('tract file', []),
('dice coefficient', [])
))
for tract in other_tracts:
voxels1 = tract_operations.voxelized_tract(
tractography_from_files(tract),
resolution
)
result['tract file'].append(tract)
result['dice coefficient'].append(
2 * len(voxels.intersection(voxels1)) * 1. /
(len(voxels) + len(voxels1))
)
return result
@tract_math_operation('<var> <tract_out>: smoothes the tract by convolving with a sliding window')
def tract_smooth(optional_flags, tractography, var, file_output):
from sklearn.neighbors import BallTree
var = float(var)
std = var ** 2
points = tractography.original_tracts()
all_points = numpy.vstack(points)
bt = BallTree(all_points)
N = len(all_points) / 3
I = numpy.eye(3)[None, ...]
for i, tract in enumerate(tractography.original_tracts()):
# all_points = numpy.vstack(points[:i] + points[i + 1:])
# bt = BallTree(all_points)
diff = numpy.diff(tract, axis=0)
diff = numpy.vstack((diff, diff[-1]))
lengths = numpy.sqrt((diff ** 2).sum(1))
# cum_lengths = numpy.cumsum(lengths)
diff_norm = diff / lengths[:, None]
tangent_lines = diff_norm[:, None, :] * diff_norm[:,:, None]
normal_planes = I - tangent_lines
# weight_matrices = normal_planes + 1e10 * tangent_lines
N = max(len(d) for d in bt.query_radius(tract, var * 3))
close_point_distances, close_point_indices = bt.query(
tract, N
)
close_points = all_points[close_point_indices]
difference_vectors = close_points - tract[:, None, :]
projected_vectors = (
normal_planes[:, None, :] *
difference_vectors[..., None]
).sum(-2)
projected_points = projected_vectors + tract[:, None, :]
# projected_distances2 = (projected_vectors**2).sum(-1)
# projected_weights = numpy.exp(- .5 * projected_distances2 / std)
# projected_weights /= projected_weights.sum(-1)[:, None]
weights = numpy.exp(
-.5 * close_point_distances ** 2 / std
)[..., None]
weights /= weights.sum(-2)[..., None]
# tract += (weights * projected_vectors).sum(-2)
# weighted_distances = (
# weight_matrices[:, None, :] *
# difference_vectors[..., None]
# ).sum(-2)
# weighted_distances *= difference_vectors
# weighted_distances = weighted_distances.sum(-1) ** .5
# weighted_points = (projected_points * weights).sum(1)
weighted_points = (projected_points * weights).sum(1)
tract[:] = weighted_points
# tract /= norm_term
return Tractography(
tractography.original_tracts(),
tractography.original_tracts_data(),
**tractography.extra_args
)
@tract_math_operation('<tract_out>: compute the protoype tract')
def tract_prototype_median(optional_flags, tractography, file_output=None):
from .tract_obb import prototype_tract
tracts = tractography.tracts()
data = tractography.tracts_data()
prototype_ix = prototype_tract(tracts)
selected_tracts = [tracts[prototype_ix]]
selected_data = dict()
for key, item in data.items():
if len(item) == len(tracts):
selected_data_items = [item[prototype_ix]]
selected_data[key] = selected_data_items
else:
selected_data[key] = item
return Tractography(selected_tracts, selected_data, **tractography.extra_args)
@tract_math_operation('<smooth order> <tract_out>: compute the protoype tract')
def tract_prototype_mean(optional_flags, tractography, smooth_order, file_output=None):
from .tract_obb import prototype_tract
tracts = tractography.tracts()
prototype_ix, leave_centers = prototype_tract(
tracts, return_leave_centers=True)
median_tract = tracts[prototype_ix]
mean_tract = numpy.empty_like(median_tract)
centers_used = set()
for point in median_tract:
closest_leave_center_ix = (
((leave_centers - point[None, :]) ** 2).sum(1)
).argmin()
if closest_leave_center_ix in centers_used:
continue
mean_tract[len(centers_used)] = leave_centers[closest_leave_center_ix]
centers_used.add(closest_leave_center_ix)
mean_tract = mean_tract[:len(centers_used)]
if smooth_order > 0:
try:
from scipy import interpolate
tck, u = interpolate.splprep(mean_tract.T)
mean_tract = numpy.transpose(interpolate.splev(u, tck))
except ImportError:
warn("A smooth order larger than 0 needs scipy installed")
return Tractography([mean_tract], {}, **tractography.extra_args)
@tract_math_operation('<volume unit> <tract1.vtk> ... <tractN.vtk>: calculates the Bhattacharyya coefficient of the first tract with the rest in the space of the reference image')
def tract_bhattacharyya_coefficient(optional_flags, tractography, resolution, *other_tracts):
resolution = float(resolution)
coord = ('X', 'Y', 'Z')
result = OrderedDict(
[('tract file', [])]
+ [
('bhattacharyya %s value' % coord[i], [])
for i in xrange(3)
]
)
tractography_points = numpy.vstack(tractography.tracts())
other_tracts_tractographies = [tractography_from_files(t_)
for t_ in other_tracts
]
other_tracts_points = [
numpy.vstack(t_.tracts())
for t_ in other_tracts_tractographies
]
mn_ = tractography_points.min(0)
mx_ = tractography_points.max(0)
for pts in other_tracts_points:
mn_ = numpy.minimum(mn_, pts.min(0))
mx_ = numpy.maximum(mn_, pts.max(0))
bins = numpy.ceil((mx_ - mn_) * 1. / resolution)
hists_tract = [
numpy.histogram(tractography_points[:, i], bins=bins[
i], density=True, range=(mn_[i], mx_[i]))[0]
for i in xrange(3)
]
for tract, tract_points in zip(other_tracts, other_tracts_points):
hists_other_tract = [
numpy.histogram(
tract_points[:, i], bins=bins[i], density=True, range=(mn_[i], mx_[i]))[0]
for i in xrange(3)
]
distances = [
numpy.sqrt(
hists_other_tract[i] * hists_tract[i] /
(hists_other_tract[i].sum() * hists_tract[i].sum())
).sum()
for i in xrange(3)
]
for i in xrange(3):
result['tract file'].append(tract)
result['bhattacharyya %s value' % coord[i]].append(
numpy.nan_to_num(distances[i]))
return result
@tract_math_operation(
'<image> <label>: Flips tracts such that the first endpoint is '
'in the given label',
needs_one_tract=True
)
def tract_flip_endpoints_in_label(
tractography, image, label, file_output=None
):
image = nibabel.load(image)
tracts_ijk = tract_operations.each_tract_in_ijk(image, tractography)
image_data = image.get_data()
label = int(label)
print image_data.sum()
needs_flip = []
for ix, tract in enumerate(tracts_ijk):
i, j, k = numpy.round(tract[0]).astype(int)
l, m, n = numpy.round(tract[-1]).astype(int)
e1 = image_data[i, j, k] == label
e2 = image_data[l, m, n] == label
if e2 and not e1:
needs_flip.append(ix)
elif e1 and e2:
warn("At least one tract has both endpoints in the label")
elif not(e1 or e2):
warn("At least one tract none of its endpoints in the label")
tracts = list(tractography.tracts())
tracts_data = tractography.tracts_data()
print "Flipped %d tracts" % len(needs_flip)
for i in needs_flip:
tracts[i] = tracts[i][::-1]
for data_key, data_points in tracts_data:
data_points[i] = data_points[i][::-1]
return Tractography(
tracts, tracts_data,
**tractography.extra_args
)
|
BRAINSia/tract_querier
|
tract_querier/tract_math/operations.py
|
Python
|
bsd-3-clause
| 31,095
|
[
"VTK"
] |
25912bf665d92b1c08087e85d0e0220de8b3cd6996095bb467b4b042b93d72c0
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import os
import numpy as np
import tables
from scipy import ndimage
from scipy.misc import imsave
import scipy.optimize as opt
from sklearn.cross_decomposition import PLSCanonical
from sklearn.linear_model import LassoCV
from braincode.util import configParser
from braincode.math import parallel_corr2_coef, corr2_coef, ridge
from braincode.math import get_pls_components, rcca
from braincode.math import LinearRegression
from braincode.math.norm import zero_one_norm, zscore
from braincode.pipeline import retinotopy
from braincode.pipeline.base import random_cross_modal_corr
from braincode.vim2 import util as vutil
def check_path(dir_path):
"""Check whether the directory does exist, if not, create it."""
if not os.path.exists(dir_path):
os.mkdir(dir_path, 0755)
def retinotopic_mapping(corr_file, data_dir, vxl_idx=None, figout=False):
"""Make the retinotopic mapping using activation map from CNN."""
if figout:
fig_dir = os.path.join(data_dir, 'fig')
check_path(fig_dir)
# load the cross-correlation matrix from file
corr_mtx = np.load(corr_file, mmap_mode='r')
# set voxel index
if not isinstance(vxl_idx, np.ndarray):
vxl_idx = np.arange(corr_mtx.shape[0])
elif len(vxl_idx) != corr_mtx.shape[0]:
print 'mismatch on voxel number!'
return
else:
print 'voxel index loaded.'
img_size = 55.0
pos_mtx = np.zeros((73728, 2))
pos_mtx[:] = np.nan
for i in range(len(vxl_idx)):
print 'Iter %s of %s' %(i+1, len(vxl_idx)),
tmp = corr_mtx[i, :]
tmp = np.nan_to_num(np.array(tmp))
# significant threshold for one-tail test
tmp[tmp <= 0.019257] = 0
if np.sum(tmp):
mmtx = tmp.reshape(55, 55)
#tmp = tmp.reshape(96, 27, 27)
#mmtx = np.max(tmp, axis=0)
print mmtx.min(), mmtx.max()
if figout:
fig_file = os.path.join(fig_dir, 'v'+str(vxl_idx[i])+'.png')
imsave(fig_file, mmtx)
# get indices of n maximum values
max_n = 20
row_idx, col_idx = np.unravel_index(
np.argsort(mmtx.ravel())[-1*max_n:],
mmtx.shape)
nmtx = np.zeros(mmtx.shape)
nmtx[row_idx, col_idx] = mmtx[row_idx, col_idx]
# center of mass
x, y = ndimage.measurements.center_of_mass(nmtx)
pos_mtx[vxl_idx[i], :] = [x, y]
else:
print ' '
#receptive_field_file = os.path.join(data_dir, 'receptive_field_pos.npy')
#np.save(receptive_field_file, pos_mtx)
#pos_mtx = np.load(receptive_field_file)
# generate retinotopic mapping
base_name = 'train_max' + str(max_n)
prf2visual_angle(pos_mtx, img_size, data_dir, base_name)
def prf2visual_angle(prf_mtx, img_size, out_dir, base_name):
"""Generate retinotopic mapping based on voxels' pRF parameters.
`prf_mtx` is a #voxel x pRF-features matrix, pRF features can be 2 columns
(row, col) of image or 3 columns which adding a third pRF size parameters.
"""
feature_size = prf_mtx.shape[1]
pos_mtx = prf_mtx[:, :2]
# eccentricity
ecc = retinotopy.coord2ecc(pos_mtx, img_size, 20)
vol = ecc.reshape(18, 64, 64)
vutil.save2nifti(vol, os.path.join(out_dir, base_name+'_ecc.nii.gz'))
# angle
angle = retinotopy.coord2angle(pos_mtx, img_size)
vol = angle.reshape(18, 64, 64)
vutil.save2nifti(vol, os.path.join(out_dir, base_name+'_angle.nii.gz'))
# pRF size
if feature_size > 2:
size_angle = retinotopy.get_prf_size(prf_mtx, 55, 20)
vol = size_angle.reshape(18, 64, 64)
vutil.save2nifti(vol, os.path.join(out_dir, base_name+'_size.nii.gz'))
def visual_prf(corr_mtx, vxl_idx, prf_dir):
"""pRF visualization."""
check_path(prf_dir)
prf = np.zeros_like(corr_mtx)
for i in range(len(vxl_idx)):
orig_mtx = corr_mtx[i, :].reshape(55, 55)
orig_file = os.path.join(prf_dir, 'v'+str(vxl_idx[i])+'_orig.png')
imsave(orig_file, orig_mtx)
prf_mtx = orig_mtx.copy()
prf_mtx[prf_mtx<prf_mtx.max()*0.8] = 0
prf_file = os.path.join(prf_dir, 'v'+str(vxl_idx[i])+'_prf.png')
imsave(prf_file, prf_mtx)
prf[i, :] = prf_mtx.flatten()
np.save(os.path.join(prf_dir, 'prf.npy'), prf)
def get_roi_idx(fmri_table, vxl_idx):
"""Get ROI label for each voxel."""
rois = ['v1lh', 'v1rh', 'v2lh', 'v2rh', 'v3lh', 'v3rh', 'v3alh', 'v3arh',
'v3blh', 'v3brh', 'v4lh', 'v4rh', 'MTlh', 'MTrh']
roi_dict = {}
for roi in rois:
roi_mask = fmri_table.get_node('/roi/%s'%(roi))[:].flatten()
roi_idx = np.nonzero(roi_mask==1)[0]
roi_idx = np.intersect1d(roi_idx, vxl_idx)
if roi_idx.sum():
roi_ptr = np.array([np.where(vxl_idx==roi_idx[i])[0][0]
for i in range(len(roi_idx))])
roi_dict[roi] = roi_ptr
return roi_dict
def roi_info(corr_mtx, wt_mtx, fmri_table, mask_idx, out_dir):
"""Get ROI info."""
roi_list = ['v1lh', 'v1rh', 'v2lh', 'v2rh', 'v3lh', 'v3rh',
'v3alh', 'v3arh', 'v3blh', 'v3brh', 'v4lh', 'v4rh',
'MTlh', 'MTrh', 'MTplh', 'MTprh']
fingerprints = np.zeros((wt_mtx.shape[2], len(roi_list)))
for ridx in range(len(roi_list)):
roi_mask = fmri_table.get_node('/roi/%s'%(roi_list[ridx]))[:].flatten()
roi_idx = np.nonzero(roi_mask==1)[0]
roi_idx = np.intersect1d(roi_idx, mask_idx)
roi_ptr = np.array([np.where(mask_idx==roi_idx[i])[0][0]
for i in range(len(roi_idx))])
#-- plot pRF for each voxel
roi_dir = os.path.join(out_dir, roi_list[ridx])
os.system('mkdir %s'%(roi_dir))
for idx in roi_ptr:
tmp = corr_mtx[:, idx]
if np.sum(tmp):
tmp = tmp.reshape(13, 13)
vutil.save_imshow(tmp, os.path.join(roi_dir,
'%s.png'%(mask_idx[idx])))
else:
print 'Drop %s'%(idx)
#-- get feature response figure print
ele_num = 0
fp = np.zeros((fingerprints.shape[0]))
for idx in roi_ptr:
tmp = corr_mtx[:, idx]
# conv1+optical : 0.17419
# norm1 : 0.15906
# norm2 : 0.14636
# conv3 : 0.14502
f = tmp>=0.14502
if f.sum():
ele_num += f.sum()
fp += np.sum(wt_mtx[f, idx, :], axis=0)
fp /= ele_num
fingerprints[:, ridx] = fp
#-- plot fingerprint for each roi
#for i in range(len(roi_list)):
# plt.bar(np.arange(96), fingerprints[:96, i], 0.35)
# plt.savefig('%s.png'%(roi_list[i]))
# plt.close()
np.save(os.path.join(out_dir, 'roi_fingerprints.npy'), fingerprints)
if __name__ == '__main__':
"""Main function."""
# config parser
cf = configParser.Config('config')
root_dir = cf.get('base', 'path')
feat_dir = os.path.join(root_dir, 'sfeatures')
db_dir = os.path.join(root_dir, 'subjects')
# phrase 'test': analyses were only conducted within lV1 for code test
# phrase 'work': for real analyses
phrase = 'test'
# subj config
subj_id = 1
subj_dir = os.path.join(db_dir, 'vS%s'%(subj_id))
#-- load fmri data
fmri_file = os.path.join(subj_dir, 'VoxelResponses.mat')
tf = tables.open_file(fmri_file)
#tf.list_nodes
#-- roi mat to nii
#roi_file = os.path.join(subj_dir, 'S%s_small_roi.nii.gz'%(subj_id))
#vutil.roi2nifti(tf, roi_file, mode='small')
#-- get mean fmri responses
#dataset = 'rt'
#mean_file = os.path.join(subj_dir, 'S%s_mean_%s.nii.gz'%(subj_id, dataset))
#vutil.gen_mean_vol(tf, dataset, mean_file)
#-- create mask
train_fmri_ts = tf.get_node('/rt')[:]
# data.shape = (73728, 7200)
# get non-nan voxel indexs
fmri_s = train_fmri_ts.sum(axis=1)
non_nan_idx = np.nonzero(np.logical_not(np.isnan(fmri_s)))[0]
if phrase=='test':
lv1_mask = tf.get_node('/roi/v1lh')[:].flatten()
vxl_idx = np.nonzero(lv1_mask==1)[0]
# for vS1, lV1 contains 490 non-NaN voxels
vxl_idx = np.intersect1d(vxl_idx, non_nan_idx)
else:
full_mask_file = os.path.join(subj_dir, 'S%s_mask.nii.gz'%(subj_id))
full_mask = vutil.data_swap(full_mask_file).flatten()
full_vxl_idx = np.nonzero(full_mask==1)[0]
vxl_idx = np.intersect1d(full_vxl_idx, non_nan_idx)
#np.save(os.path.join(subj_dir, 'full_vxl_idx.npy'), vxl_idx)
roi_dict = get_roi_idx(tf, vxl_idx)
#np.save(os.path.join(subj_dir, 'roi_idx_pointer.npy'), roi_dict)
#roi_dict = np.load(os.path.join(subj_dir, 'roi_idx_pointer.npy')).item()
#-- load fmri response
# data shape: (#voxel, 7200/540)
train_fmri_ts = tf.get_node('/rt')[:]
train_fmri_ts = np.nan_to_num(train_fmri_ts[vxl_idx])
val_fmri_ts = tf.get_node('/rv')[:]
val_fmri_ts = np.nan_to_num(val_fmri_ts[vxl_idx])
#-- save masked data as npy file
#train_file = os.path.join(subj_dir, 'S%s_train_fmri_lV1.npy'%(subj_id))
#val_file = os.path.join(subj_dir, 'S%s_val_fmri_lV1.npy'%(subj_id))
#np.save(train_file, train_fmri_ts)
#np.save(val_file, val_fmri_ts)
#-- load cnn activation data
# data.shape = (feature_size, x, y, 7200/540)
#train_feat_file = os.path.join(feat_dir, 'conv1_train_trs.npy')
#train_feat_ts = np.load(train_feat_file, mmap_mode='r')
#val_feat_file = os.path.join(feat_dir, 'conv1_val_trs.npy')
#val_feat_ts = np.load(val_feat_file, mmap_mode='r')
#-- 2d gaussian kernel based pRF estimate
prf_dir = os.path.join(subj_dir, 'prf')
check_path(prf_dir)
# parameter config
fwhms = np.arange(1, 11)
# lasso linear regression
vxl_idx = vxl_idx[:10]
file_idx = -1
for i in range(30250):
print '--------------------------'
print 'Kernel %s'%(i+1)
# load CNN features modulated by Gaussian kernels
if i/550 > file_idx:
train_feat_file = os.path.join(feat_dir, 'gaussian_kernels',
'gaussian_conv1_train_trs_%s.npy'%(i/550))
train_feat_ts = np.load(train_feat_file)
val_feat_file = os.path.join(feat_dir, 'gaussian_kernels',
'gaussian_conv1_val_trs_%s.npy'%(i/550))
val_feat_ts = np.load(val_feat_file)
file_idx = i/550
train_x = train_feat_ts[..., i%550]
val_x = val_feat_ts[..., i%550]
# shape of x : (96, 7200/540)
train_x = zscore(train_x).T
val_x = zscore(val_x).T
# output vars
paras = np.zeros((96, 30250, len(vxl_idx)))
val_corr = np.zeros((30250, len(vxl_idx)))
alphas = np.zeros((30250, len(vxl_idx)))
for j in range(len(vxl_idx)):
print 'Voxel %s'%(j+1)
train_y = train_fmri_ts[j]
val_y = val_fmri_ts[j]
lasso_cv = LassoCV(cv=10, n_jobs=4)
lasso_cv.fit(train_x, train_y)
alphas[i, j] = lasso_cv.alpha_
paras[:, i, j] = lasso_cv.coef_
pred_y = lasso_cv.predict(val_x)
val_corr[i, j] = np.corrcoef(val_y, pred_y)[0][1]
print 'Alpha %s, prediction score %s'%(alphas[i, j], val_corr[i, j])
np.save(os.path.join(prf_dir, 'lassoreg_paras.npy'), paras)
np.save(os.path.join(prf_dir, 'lassoreg_pred_corr.npy'), val_corr)
np.save(os.path.join(prf_dir, 'lassoreg_alphas.npy'), alphas)
#-- pRF to retinotopy
#prf_mtx = np.load(os.path.join(prf_dir, 'vxl_prf.npy'))
## generate full voxel feature matrix
#full_prf_mtx = np.zeros((73728, 3))
#full_prf_mtx[:] = np.nan
#for i in range(len(vxl_idx)):
# full_prf_mtx[vxl_idx[i], :] = prf_mtx[i, :]
#prf2visual_angle(full_prf_mtx, 55, prf_dir, 'retinotopy')
#-- feature temporal z-score
#print 'CNN features temporal z-score ...'
## summary features across channels
#train_feat_ts = train_feat_ts.mean(axis=0)
#train_feat_m = train_feat_ts.mean(axis=2, keepdims=True)
#train_feat_s = train_feat_ts.std(axis=2, keepdims=True)
#train_feat_ts = (train_feat_ts-train_feat_m)/(1e-10+train_feat_s)
#val_feat_ts = val_feat_ts.mean(axis=0)
#val_feat_m = val_feat_ts.mean(axis=2, keepdims=True)
#val_feat_s = val_feat_ts.std(axis=2, keepdims=True)
#val_feat_ts = (val_feat_ts-val_feat_m)/(1e-10+val_feat_s)
#print 'Salience features temporal z-score ...'
#train_sal_m = train_sal_ts.mean(axis=2, keepdims=True)
#train_sal_s = train_sal_ts.std(axis=2, keepdims=True)
#train_sal_ts = (train_sal_ts-train_sal_m)/(1e-10+train_sal_s)
#val_sal_m = val_sal_ts.mean(axis=2, keepdims=True)
#val_sal_s = val_sal_ts.std(axis=2, keepdims=True)
#val_sal_ts = (val_sal_ts-val_sal_m)/(1e-10+val_sal_s)
#print 'Salience modulated features temporal z-score ...'
#train_salfeat_ts = train_salfeat_ts.mean(axis=0)
#train_salfeat_m = train_salfeat_ts.mean(axis=2, keepdims=True)
#train_salfeat_s = train_salfeat_ts.std(axis=2, keepdims=True)
#train_salfeat_ts=(train_salfeat_ts-train_salfeat_m)/(1e-10+train_salfeat_s)
#val_salfeat_ts = val_salfeat_ts.mean(axis=0)
#val_salfeat_m = val_salfeat_ts.mean(axis=2, keepdims=True)
#val_salfeat_s = val_salfeat_ts.std(axis=2, keepdims=True)
#val_salfeat_ts = (val_salfeat_ts-val_salfeat_m)/(1e-10+val_salfeat_s)
#-- voxel-wise linear regression
#cross_corr_dir = os.path.join(subj_dir, 'spatial_cross_corr', 'lv1')
#reg_dir = os.path.join(cross_corr_dir, 'linreg_l1')
#check_path(reg_dir)
#corr_mtx = np.load(os.path.join(cross_corr_dir, 'train_conv1_corr.npy'))
#corr_mtx = corr_mtx.reshape(470, 55, 55)
## voxel-wise linear regression
#wts = np.zeros((470, 55, 55, 3))
#train_corr = np.zeros((470, 55, 55))
#val_corr = np.zeros((470, 55, 55))
#wts_mask = np.zeros((470, 3))
#statsp_mask = np.zeros((470, 3))
#train_corr_mask = np.zeros(470,)
#val_corr_mask = np.zeros(470, )
#for i in range(len(vxl_idx)):
# print 'Voxel %s of %s ...'%(i+1, len(vxl_idx))
# prf = corr_mtx[i, ...].copy()
# prf = prf > prf.max()*0.8
# print '%s voxels selected'%(prf.sum())
# if not prf.sum():
# continue
# pos = np.nonzero(prf)
# wts_tmp = np.zeros((pos[0].shape[0], 3))
# statsp_tmp = np.zeros((pos[0].shape[0], 3))
# train_corr_tmp = np.zeros(pos[0].shape[0],)
# val_corr_tmp = np.zeros(pos[0].shape[0],)
# for j in range(pos[0].shape[0]):
# train_Y = train_fmri_ts[i, :]
# val_Y = val_fmri_ts[i, :]
# train_X = np.zeros((7200, 3))
# train_X[:, 0] = train_feat_ts[pos[0][j], pos[1][j], :]
# train_X[:, 1] = train_sal_ts[pos[0][j], pos[1][j], :]
# train_X[:, 2] = train_salfeat_ts[pos[0][j], pos[1][j], :]
# val_X = np.zeros((540, 3))
# val_X[:, 0] = val_feat_ts[pos[0][j], pos[1][j], :]
# val_X[:, 1] = val_sal_ts[pos[0][j], pos[1][j], :]
# val_X[:, 2] = val_salfeat_ts[pos[0][j], pos[1][j], :]
# model = LinearRegression(fit_intercept=False)
# model.fit(train_X, train_Y)
# wts[i, pos[0][j], pos[1][j], :] = model.coef_
# ptrain_Y = model.predict(train_X)
# tcorr = np.corrcoef(ptrain_Y, train_Y)[0][1]
# train_corr[i, pos[0][j], pos[1][j]] = tcorr
# pval_Y = model.predict(val_X)
# vcorr = np.corrcoef(pval_Y, val_Y)[0][1]
# val_corr[i, pos[0][j], pos[1][j]] = vcorr
# wts_tmp[j, :] = model.coef_
# statsp_tmp[j, :] = model.p
# train_corr_tmp[j] = tcorr
# val_corr_tmp[j] = vcorr
# wts_mask[i, :] = wts_tmp.mean(axis=0)
# statsp_mask[i, :] = statsp_tmp.mean(axis=0)
# train_corr_mask[i] = train_corr_tmp.mean()
# val_corr_mask[i] = val_corr_tmp.mean()
#np.save(os.path.join(reg_dir, 'wts.npy'), wts)
#np.save(os.path.join(reg_dir, 'train_corr.npy'), train_corr)
#np.save(os.path.join(reg_dir, 'val_corr.npy'), val_corr)
#np.save(os.path.join(reg_dir, 'wts_mask.npy'), wts_mask)
#np.save(os.path.join(reg_dir, 'stats_p_mask.npy'), statsp_mask)
#np.save(os.path.join(reg_dir, 'train_corr_mask.npy'), train_corr_mask)
#np.save(os.path.join(reg_dir, 'val_corr_mask.npy'), val_corr_mask)
#-- Cross-modality mapping: voxel~CNN feature position correlation
#cross_corr_dir = os.path.join(subj_dir, 'spatial_cross_corr')
#check_path(cross_corr_dir)
#-- features from CNN
#corr_file = os.path.join(cross_corr_dir, 'train_conv1_corr.npy')
#feat_ts = train_feat_ts.sum(axis=0).reshape(3025, 7200)
#parallel_corr2_coef(train_fmri_ts, feat_ts, corr_file, block_size=55)
#-- visual-pRF: select pixels which corr-coef greater than 1/2 maximum
#corr_mtx = np.load(corr_file)
#prf_dir = os.path.join(cross_corr_dir, 'prf')
#visual_prf(corr_mtx, vxl_idx, prf_dir)
#-- categorize voxels based on pRF types
#corr_file = os.path.join(cross_corr_dir, 'train_conv1_corr.npy')
#corr_mtx = np.load(corr_file)
## get pRF by remove non-significant pixels
## two-tailed p < 0.01: r > 0.0302 and r < -0.0302
#ncorr_mtx = corr_mtx.copy()
#ncorr_mtx[(corr_mtx<=0.0302)&(corr_mtx>=-0.0302)] = 0
#prf_max = ncorr_mtx.max(axis=1)
#prf_min = ncorr_mtx.min(axis=1)
#prf_type = np.zeros(corr_mtx.shape[0])
#prf_type[(prf_max>0)&(prf_min>0)] = 1
#prf_type[(prf_max>0)&(prf_min==0)] = 2
#prf_type[(prf_max>0)&(prf_min<0)] = 3
#prf_type[(prf_max==0)&(prf_min<0)] = 4
#prf_type[(prf_max<0)&(prf_min<0)] = 5
#np.save(os.path.join(cross_corr_dir, 'prf_type.npy'), prf_type)
#nii_file = os.path.join(cross_corr_dir, 'prf_type.nii.gz')
#vutil.vxl_data2nifti(prf_type, vxl_idx, nii_file)
#-- pRF stats and visualization for each ROI
#prf_dir = os.path.join(cross_corr_dir, 'prf_figs')
#check_path(prf_dir)
#for roi in roi_dict:
# print '------%s------'%(roi)
# roi_idx = roi_dict[roi]
# # pRF type stats in each ROI
# roi_prf_type = prf_type[roi_idx]
# print 'Voxel number: %s'%(roi_prf_type.shape[0])
# for i in range(5):
# vxl_num = np.sum(roi_prf_type==(i+1))
# vxl_ratio = vxl_num * 100.0 / roi_prf_type.shape[0]
# print '%s, %0.2f'%(vxl_num, vxl_ratio)
# # save pRF as figs
# roi_dir = os.path.join(prf_dir, roi)
# check_path(roi_dir)
# roi_corr_mtx = corr_mtx[roi_idx, :]
# roi_min = roi_corr_mtx.min()
# roi_max = roi_corr_mtx.max()
# for i in roi_idx:
# vxl_prf = corr_mtx[i, :].reshape(55, 55)
# filename = 'v'+str(vxl_idx[i])+'_'+str(int(prf_type[i]))+'.png'
# out_file = os.path.join(roi_dir, filename)
# vutil.save_imshow(vxl_prf, out_file, val_range=(roi_min, roi_max))
#-- get pRF parameters based on 2D Gaussian curve using model fitting
#corr_mtx = np.load(os.path.join(cross_corr_dir, 'train_conv1_corr.npy'))
## last column is curve fitting error based on squared-differnece
#paras = np.zeros((corr_mtx.shape[0], 6))
#for i in range(corr_mtx.shape[0]):
# print i,
# y = corr_mtx[i, :]
# if y.max() >= abs(y.min()):
# x0, y0 = np.unravel_index(np.argmax(y.reshape(55, 55)), (55, 55))
# else:
# x0, y0 = np.unravel_index(np.argmin(y.reshape(55, 55)), (55, 55))
# initial_guess = (x0, y0, 3, 0, 2)
# try:
# popt, pcov = opt.curve_fit(vutil.sugar_gaussian_f, 55, y,
# p0=initial_guess)
# #print popt
# paras[i, :5] = popt
# pred_y = vutil.sugar_gaussian_f(55, *popt)
# paras[i, 5] = np.square(y-pred_y).sum()
# except RuntimeError:
# print 'Error - curve_fit failed'
# paras[i, :] = np.nan
#np.save(os.path.join(cross_corr_dir, 'curve_fit_paras.npy'), paras)
#-- curve-fit pRF visualization for each ROI
#prf_dir = os.path.join(cross_corr_dir, 'fit_prf_figs')
#check_path(prf_dir)
#paras = np.load(os.path.join(cross_corr_dir, 'curve_fit_paras.npy'))
#corr_mtx = np.load(os.path.join(cross_corr_dir, 'train_conv1_corr.npy'))
#prf_type = np.load(os.path.join(cross_corr_dir, 'prf_type.npy'))
#for roi in roi_dict:
# print '------%s------'%(roi)
# roi_idx = roi_dict[roi]
# # save pRF as figs
# roi_dir = os.path.join(prf_dir, roi)
# check_path(roi_dir)
# roi_corr_mtx = corr_mtx[roi_idx, :]
# roi_min = roi_corr_mtx.min()
# roi_max = roi_corr_mtx.max()
# for i in roi_idx:
# if np.isnan(paras[i, 0]):
# continue
# p = paras[i, :]
# vxl_prf = vutil.sugar_gaussian_f(55, *p).reshape(55, 55)
# filename = 'v'+str(vxl_idx[i])+'_'+str(int(prf_type[i]))+'.png'
# out_file = os.path.join(roi_dir, filename)
# vutil.save_imshow(vxl_prf, out_file, val_range=(roi_min, roi_max))
#-- show pRF parameters on cortical surface
#paras = np.load(os.path.join(cross_corr_dir, 'curve_fit_paras.npy'))
#full_prf_mtx = np.zeros((73728, 3))
#full_prf_mtx[:] = np.nan
#for i in range(len(vxl_idx)):
# full_prf_mtx[vxl_idx[i], :] = paras[i, :3]
#prf2visual_angle(full_prf_mtx, 55, cross_corr_dir, 'curve_fit')
#err_file = os.path.join(cross_corr_dir, 'curve_fit_err.nii.gz')
#vutil.vxl_data2nifti(paras[:, 5], vxl_idx, err_file)
#-- Cross-modality mapping: voxel~CNN unit correlation
#cross_corr_dir = os.path.join(subj_dir, 'cross_corr')
#check_path(cross_corr_dir)
# features from CNN
#corr_file = os.path.join(cross_corr_dir, 'train_norm1_corr.npy')
#feat_ts = train_feat_ts.reshape(69984, 7200)
#parallel_corr2_coef(train_fmri_ts, feat_ts, corr_file, block_size=96)
# features from optical flow
#corr_file = os.path.join(cross_corr_dir, 'train_optic_mag_corr.npy')
#feat_ts = tr_mag_ts.reshape(16384, 7200)
#parallel_corr2_coef(train_fmri_ts, feat_ts, corr_file, block_size=55)
#-- random cross-modal correlation
#rand_corr_file = os.path.join(cross_corr_dir, 'rand_train_conv1_corr.npy')
#feat_ts = tr_mag_ts.reshape(16384, 7200)
#random_cross_modal_corr(train_fmri_ts, feat_ts, 1000, 1000, rand_corr_file)
#permutation_stats(np.load(rand_corr_file))
#-- retinotopic mapping based on cross-correlation with norm1
#cross_corr_dir = os.path.join(subj_dir, 'cross_corr')
#retino_dir = os.path.join(cross_corr_dir, 'retinotopic')
#check_path(retino_dir)
#corr_file = os.path.join(cross_corr_dir, 'train_norm1_corr.npy')
#retinotopic_mapping(corr_file, retino_dir, vxl_idx, figout=False)
#-- feature temporal z-score
#print 'CNN features temporal z-score ...'
#train_feat_m = train_feat_ts.mean(axis=3, keepdims=True)
#train_feat_s = train_feat_ts.std(axis=3, keepdims=True)
#train_feat_ts = (train_feat_ts-train_feat_m)/(1e-10+train_feat_s)
#val_feat_ts = (val_feat_ts-train_feat_m)/(1e-10+train_feat_s)
#tmp_train_file = os.path.join(feat_dir, 'train_conv1_trs_z.npy')
#np.save(tmp_train_file, train_feat_ts)
#del train_feat_ts
#tmp_val_file = os.path.join(feat_dir, 'val_norm1_trs_z.npy')
#np.save(tmp_val_file, val_feat_ts)
#del val_feat_ts
#train_feat_ts = np.load(tmp_train_file, mmap_mode='r')
#train_feat_ts = train_feat_ts.reshape(69984, 7200)
#val_feat_ts = np.load(tmp_val_file, mmap_mode='r')
#val_feat_ts = val_feat_ts.reshape(69984, 540)
#-- fmri data z-score
#print 'fmri data temporal z-score'
#m = np.mean(train_fmri_ts, axis=1, keepdims=True)
#s = np.std(train_fmri_ts, axis=1, keepdims=True)
#train_fmri_ts = (train_fmri_ts - m) / (1e-10 + s)
#m = np.mean(val_fmri_ts, axis=1, keepdims=True)
#s = np.std(val_fmri_ts, axis=1, keepdims=True)
#val_fmri_ts = (val_fmri_ts - m) / (1e-10 + s)
#-- Encoding: ridge regression
#ridge_dir = os.path.join(subj_dir, 'ridge')
#check_path(ridge_dir)
#-- layer-wise ridge regression: select cnn units whose correlation with
#-- the given voxel exceeded the half of the maximal correlation within
#-- the layer.
#cross_corr_dir = os.path.join(subj_dir, 'cross_corr')
#cross_corr_file = os.path.join(cross_corr_dir, 'train_norm1_corr.npy')
#cross_corr = np.load(cross_corr_file, mmap_mode='r')
## output config
#ALPHA_NUM = 20
#BOOTS_NUM = 15
#full_vxl_num, feat_num = cross_corr.shape
#vxl_num = len(vxl_idx)
#wt_mtx = np.zeros((vxl_num, feat_num))
#alpha_mtx = np.zeros(vxl_num)
#val_corr_mtx = np.zeros(vxl_num)
##bootstrap_corr_mtx = np.zeros((vxl_num, ALPHA_NUM, BOOTS_NUM))
#bootstrap_corr_mtx = np.zeros((vxl_num, BOOTS_NUM))
## voxel-wise regression
#for i in range(vxl_num):
# print 'Voxel %s in %s'%(i+1, vxl_num)
# v_corr = cross_corr[np.where(full_vxl_idx==vxl_idx[i])[0][0], :]
# feat_idx = v_corr > (v_corr.max()/2)
# print 'Select %s features'%(feat_idx.sum())
# vtrain_feat = train_feat_ts[feat_idx, :]
# vval_feat = val_feat_ts[feat_idx, :]
# vtrain_fmri = np.expand_dims(train_fmri_ts[i, :], axis=0)
# vval_fmri = np.expand_dims(val_fmri_ts[i, :], axis=0)
# wt, val_corr, alpha, bscores, valinds = ridge.bootstrap_ridge(
# vtrain_feat.T, vtrain_fmri.T,
# vval_feat.T, vval_fmri.T,
# alphas=np.arange(100, 2001, 2001/ALPHA_NUM),
# #alphas=np.logspace(-2, 3, ALPHA_NUM),
# nboots=BOOTS_NUM, chunklen=72, nchunks=20,
# single_alpha=False, use_corr=True)
# print 'Alpha: %s'%(alpha)
# print 'Val Corr: %s'%(val_corr)
# wt_mtx[i, feat_idx] = wt.T
# val_corr_mtx[i] = val_corr
# alpha_mtx[i] = alpha
# alpha_idx = np.where(np.arange(100, 2001, 2001/ALPHA_NUM)==alpha)[0][0]
# #alpha_idx = np.where(np.logspace(-2, 3, ALPHA_NUM)==alpha)[0][0]
# bootstrap_corr_mtx[i, :] = bscores[alpha_idx, 0, :]
# #bootstrap_corr_mtx[i, ...] = bscores[:, 0, :]
## save output
#wt_file = os.path.join(ridge_dir, 'norm1_wt.npy')
#alpha_file = os.path.join(ridge_dir, 'norm1_alpha.npy')
#val_corr_file = os.path.join(ridge_dir, 'norm1_val_corr.npy')
#bootstrap_corr_file = os.path.join(ridge_dir, 'norm1_bootstrap_corr.npy')
#np.save(wt_file, wt_mtx)
#np.save(alpha_file, alpha_mtx)
#np.save(val_corr_file, val_corr_mtx)
#np.save(bootstrap_corr_file, bootstrap_corr_mtx)
|
sealhuang/brainCodingToolbox
|
braincode/vim2/rfencoding.py
|
Python
|
bsd-3-clause
| 27,156
|
[
"Gaussian"
] |
f28012babd46f1c57ada314ca08c0717892a7dc0d5691af16afd2e45000511fc
|
from __future__ import print_function, division
import collections
from sympy.core.add import Add
from sympy.core.basic import Basic, Atom
from sympy.core.expr import Expr
from sympy.core.function import count_ops
from sympy.core.logic import fuzzy_and
from sympy.core.power import Pow
from sympy.core.symbol import Symbol, Dummy, symbols
from sympy.core.numbers import Integer, ilcm, Float
from sympy.core.singleton import S
from sympy.core.sympify import sympify
from sympy.core.compatibility import is_sequence, default_sort_key, range, NotIterable
from sympy.polys import PurePoly, roots, cancel, gcd
from sympy.simplify import simplify as _simplify, signsimp, nsimplify
from sympy.utilities.iterables import flatten, numbered_symbols
from sympy.functions.elementary.miscellaneous import sqrt, Max, Min
from sympy.functions import exp, factorial
from sympy.printing import sstr
from sympy.core.compatibility import reduce, as_int, string_types
from sympy.assumptions.refine import refine
from types import FunctionType
def _iszero(x):
"""Returns True if x is zero."""
return x.is_zero
class MatrixError(Exception):
pass
class ShapeError(ValueError, MatrixError):
"""Wrong matrix shape"""
pass
class NonSquareMatrixError(ShapeError):
pass
class DeferredVector(Symbol, NotIterable):
"""A vector whose components are deferred (e.g. for use with lambdify)
Examples
========
>>> from sympy import DeferredVector, lambdify
>>> X = DeferredVector( 'X' )
>>> X
X
>>> expr = (X[0] + 2, X[2] + 3)
>>> func = lambdify( X, expr)
>>> func( [1, 2, 3] )
(3, 6)
"""
def __getitem__(self, i):
if i == -0:
i = 0
if i < 0:
raise IndexError('DeferredVector index out of range')
component_name = '%s[%d]' % (self.name, i)
return Symbol(component_name)
def __str__(self):
return sstr(self)
def __repr__(self):
return "DeferredVector('%s')" % (self.name)
class MatrixBase(object):
# Added just for numpy compatibility
__array_priority__ = 11
is_Matrix = True
is_Identity = None
_class_priority = 3
_sympify = staticmethod(sympify)
__hash__ = None # Mutable
@classmethod
def _handle_creation_inputs(cls, *args, **kwargs):
"""Return the number of rows, cols and flat matrix elements.
Examples
========
>>> from sympy import Matrix, I
Matrix can be constructed as follows:
* from a nested list of iterables
>>> Matrix( ((1, 2+I), (3, 4)) )
Matrix([
[1, 2 + I],
[3, 4]])
* from un-nested iterable (interpreted as a column)
>>> Matrix( [1, 2] )
Matrix([
[1],
[2]])
* from un-nested iterable with dimensions
>>> Matrix(1, 2, [1, 2] )
Matrix([[1, 2]])
* from no arguments (a 0 x 0 matrix)
>>> Matrix()
Matrix(0, 0, [])
* from a rule
>>> Matrix(2, 2, lambda i, j: i/(j + 1) )
Matrix([
[0, 0],
[1, 1/2]])
"""
from sympy.matrices.sparse import SparseMatrix
flat_list = None
if len(args) == 1:
# Matrix(SparseMatrix(...))
if isinstance(args[0], SparseMatrix):
return args[0].rows, args[0].cols, flatten(args[0].tolist())
# Matrix(Matrix(...))
elif isinstance(args[0], MatrixBase):
return args[0].rows, args[0].cols, args[0]._mat
# Matrix(MatrixSymbol('X', 2, 2))
elif isinstance(args[0], Basic) and args[0].is_Matrix:
return args[0].rows, args[0].cols, args[0].as_explicit()._mat
# Matrix(numpy.ones((2, 2)))
elif hasattr(args[0], "__array__"):
# NumPy array or matrix or some other object that implements
# __array__. So let's first use this method to get a
# numpy.array() and then make a python list out of it.
arr = args[0].__array__()
if len(arr.shape) == 2:
rows, cols = arr.shape[0], arr.shape[1]
flat_list = [cls._sympify(i) for i in arr.ravel()]
return rows, cols, flat_list
elif len(arr.shape) == 1:
rows, cols = arr.shape[0], 1
flat_list = [S.Zero]*rows
for i in range(len(arr)):
flat_list[i] = cls._sympify(arr[i])
return rows, cols, flat_list
else:
raise NotImplementedError(
"SymPy supports just 1D and 2D matrices")
# Matrix([1, 2, 3]) or Matrix([[1, 2], [3, 4]])
elif is_sequence(args[0])\
and not isinstance(args[0], DeferredVector):
in_mat = []
ncol = set()
for row in args[0]:
if isinstance(row, MatrixBase):
in_mat.extend(row.tolist())
if row.cols or row.rows: # only pay attention if it's not 0x0
ncol.add(row.cols)
else:
in_mat.append(row)
try:
ncol.add(len(row))
except TypeError:
ncol.add(1)
if len(ncol) > 1:
raise ValueError("Got rows of variable lengths: %s" %
sorted(list(ncol)))
cols = ncol.pop() if ncol else 0
rows = len(in_mat) if cols else 0
if rows:
if not is_sequence(in_mat[0]):
cols = 1
flat_list = [cls._sympify(i) for i in in_mat]
return rows, cols, flat_list
flat_list = []
for j in range(rows):
for i in range(cols):
flat_list.append(cls._sympify(in_mat[j][i]))
elif len(args) == 3:
rows = as_int(args[0])
cols = as_int(args[1])
# Matrix(2, 2, lambda i, j: i+j)
if len(args) == 3 and isinstance(args[2], collections.Callable):
op = args[2]
flat_list = []
for i in range(rows):
flat_list.extend(
[cls._sympify(op(cls._sympify(i), cls._sympify(j)))
for j in range(cols)])
# Matrix(2, 2, [1, 2, 3, 4])
elif len(args) == 3 and is_sequence(args[2]):
flat_list = args[2]
if len(flat_list) != rows*cols:
raise ValueError('List length should be equal to rows*columns')
flat_list = [cls._sympify(i) for i in flat_list]
# Matrix()
elif len(args) == 0:
# Empty Matrix
rows = cols = 0
flat_list = []
if flat_list is None:
raise TypeError("Data type not understood")
return rows, cols, flat_list
def _setitem(self, key, value):
"""Helper to set value at location given by key.
Examples
========
>>> from sympy import Matrix, I, zeros, ones
>>> m = Matrix(((1, 2+I), (3, 4)))
>>> m
Matrix([
[1, 2 + I],
[3, 4]])
>>> m[1, 0] = 9
>>> m
Matrix([
[1, 2 + I],
[9, 4]])
>>> m[1, 0] = [[0, 1]]
To replace row r you assign to position r*m where m
is the number of columns:
>>> M = zeros(4)
>>> m = M.cols
>>> M[3*m] = ones(1, m)*2; M
Matrix([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[2, 2, 2, 2]])
And to replace column c you can assign to position c:
>>> M[2] = ones(m, 1)*4; M
Matrix([
[0, 0, 4, 0],
[0, 0, 4, 0],
[0, 0, 4, 0],
[2, 2, 4, 2]])
"""
from .dense import Matrix
is_slice = isinstance(key, slice)
i, j = key = self.key2ij(key)
is_mat = isinstance(value, MatrixBase)
if type(i) is slice or type(j) is slice:
if is_mat:
self.copyin_matrix(key, value)
return
if not isinstance(value, Expr) and is_sequence(value):
self.copyin_list(key, value)
return
raise ValueError('unexpected value: %s' % value)
else:
if (not is_mat and
not isinstance(value, Basic) and is_sequence(value)):
value = Matrix(value)
is_mat = True
if is_mat:
if is_slice:
key = (slice(*divmod(i, self.cols)),
slice(*divmod(j, self.cols)))
else:
key = (slice(i, i + value.rows),
slice(j, j + value.cols))
self.copyin_matrix(key, value)
else:
return i, j, self._sympify(value)
return
def copy(self):
return self._new(self.rows, self.cols, self._mat)
def trace(self):
if not self.is_square:
raise NonSquareMatrixError()
return self._eval_trace()
def inv(self, method=None, **kwargs):
if not self.is_square:
raise NonSquareMatrixError()
if method is not None:
kwargs['method'] = method
return self._eval_inverse(**kwargs)
def inv_mod(self, m):
r"""
Returns the inverse of the matrix `K` (mod `m`), if it exists.
Method to find the matrix inverse of `K` (mod `m`) implemented in this function:
* Compute `\mathrm{adj}(K) = \mathrm{cof}(K)^t`, the adjoint matrix of `K`.
* Compute `r = 1/\mathrm{det}(K) \pmod m`.
* `K^{-1} = r\cdot \mathrm{adj}(K) \pmod m`.
Examples
========
>>> from sympy import Matrix
>>> A = Matrix(2, 2, [1, 2, 3, 4])
>>> A.inv_mod(5)
Matrix([
[3, 1],
[4, 2]])
>>> A.inv_mod(3)
Matrix([
[1, 1],
[0, 1]])
"""
from sympy.ntheory import totient
if not self.is_square:
raise NonSquareMatrixError()
N = self.cols
phi = totient(m)
det_K = self.det()
if gcd(det_K, m) != 1:
raise ValueError('Matrix is not invertible (mod %d)' % m)
det_inv = pow(int(det_K), int(phi - 1), int(m))
K_adj = self.cofactorMatrix().transpose()
K_inv = self.__class__(N, N, [det_inv*K_adj[i, j] % m for i in range(N) for j in range(N)])
return K_inv
def transpose(self):
return self._eval_transpose()
T = property(transpose, None, None, "Matrix transposition.")
def conjugate(self):
return self._eval_conjugate()
C = property(conjugate, None, None, "By-element conjugation.")
def adjoint(self):
"""Conjugate transpose or Hermitian conjugation."""
return self.T.C
@property
def H(self):
"""Return Hermite conjugate.
Examples
========
>>> from sympy import Matrix, I
>>> m = Matrix((0, 1 + I, 2, 3))
>>> m
Matrix([
[ 0],
[1 + I],
[ 2],
[ 3]])
>>> m.H
Matrix([[0, 1 - I, 2, 3]])
See Also
========
conjugate: By-element conjugation
D: Dirac conjugation
"""
return self.T.C
@property
def D(self):
"""Return Dirac conjugate (if self.rows == 4).
Examples
========
>>> from sympy import Matrix, I, eye
>>> m = Matrix((0, 1 + I, 2, 3))
>>> m.D
Matrix([[0, 1 - I, -2, -3]])
>>> m = (eye(4) + I*eye(4))
>>> m[0, 3] = 2
>>> m.D
Matrix([
[1 - I, 0, 0, 0],
[ 0, 1 - I, 0, 0],
[ 0, 0, -1 + I, 0],
[ 2, 0, 0, -1 + I]])
If the matrix does not have 4 rows an AttributeError will be raised
because this property is only defined for matrices with 4 rows.
>>> Matrix(eye(2)).D
Traceback (most recent call last):
...
AttributeError: Matrix has no attribute D.
See Also
========
conjugate: By-element conjugation
H: Hermite conjugation
"""
from sympy.physics.matrices import mgamma
if self.rows != 4:
# In Python 3.2, properties can only return an AttributeError
# so we can't raise a ShapeError -- see commit which added the
# first line of this inline comment. Also, there is no need
# for a message since MatrixBase will raise the AttributeError
raise AttributeError
return self.H*mgamma(0)
def __array__(self):
from .dense import matrix2numpy
return matrix2numpy(self)
def __len__(self):
"""Return the number of elements of self.
Implemented mainly so bool(Matrix()) == False.
"""
return self.rows*self.cols
@property
def shape(self):
"""The shape (dimensions) of the matrix as the 2-tuple (rows, cols).
Examples
========
>>> from sympy.matrices import zeros
>>> M = zeros(2, 3)
>>> M.shape
(2, 3)
>>> M.rows
2
>>> M.cols
3
"""
return (self.rows, self.cols)
def __sub__(self, a):
return self + (-a)
def __rsub__(self, a):
return (-self) + a
def __mul__(self, other):
"""Return self*other where other is either a scalar or a matrix
of compatible dimensions.
Examples
========
>>> from sympy.matrices import Matrix
>>> A = Matrix([[1, 2, 3], [4, 5, 6]])
>>> 2*A == A*2 == Matrix([[2, 4, 6], [8, 10, 12]])
True
>>> B = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> A*B
Matrix([
[30, 36, 42],
[66, 81, 96]])
>>> B*A
Traceback (most recent call last):
...
ShapeError: Matrices size mismatch.
>>>
See Also
========
matrix_multiply_elementwise
"""
if getattr(other, 'is_Matrix', False):
A = self
B = other
if A.cols != B.rows:
raise ShapeError("Matrices size mismatch.")
if A.cols == 0:
return classof(A, B)._new(A.rows, B.cols, lambda i, j: 0)
try:
blst = B.T.tolist()
except AttributeError:
# If B is a MatrixSymbol, B.T.tolist does not exist
return NotImplemented
alst = A.tolist()
return classof(A, B)._new(A.rows, B.cols, lambda i, j:
reduce(lambda k, l: k + l,
[a_ik * b_kj for a_ik, b_kj in zip(alst[i], blst[j])]))
else:
return self._new(self.rows, self.cols,
[i*other for i in self._mat])
__matmul__ = __mul__
def __rmul__(self, a):
if getattr(a, 'is_Matrix', False):
return self._new(a)*self
return self._new(self.rows, self.cols, [a*i for i in self._mat])
__rmatmul__ = __rmul__
def __pow__(self, num):
from sympy.matrices import eye, diag, MutableMatrix
from sympy import binomial
if not self.is_square:
raise NonSquareMatrixError()
if isinstance(num, int) or isinstance(num, Integer):
n = int(num)
if n < 0:
return self.inv()**-n # A**-2 = (A**-1)**2
a = eye(self.cols)
s = self
while n:
if n % 2:
a *= s
n -= 1
if not n:
break
s *= s
n //= 2
return self._new(a)
elif isinstance(num, (Expr, float)):
def jordan_cell_power(jc, n):
N = jc.shape[0]
l = jc[0, 0]
for i in range(N):
for j in range(N-i):
bn = binomial(n, i)
if isinstance(bn, binomial):
bn = bn._eval_expand_func()
jc[j, i+j] = l**(n-i)*bn
P, jordan_cells = self.jordan_cells()
# Make sure jordan_cells matrices are mutable:
jordan_cells = [MutableMatrix(j) for j in jordan_cells]
for j in jordan_cells:
jordan_cell_power(j, num)
return self._new(P*diag(*jordan_cells)*P.inv())
else:
raise TypeError(
"Only SymPy expressions or int objects are supported as exponent for matrices")
def __add__(self, other):
"""Return self + other, raising ShapeError if shapes don't match."""
if getattr(other, 'is_Matrix', False):
A = self
B = other
if A.shape != B.shape:
raise ShapeError("Matrix size mismatch.")
alst = A.tolist()
blst = B.tolist()
ret = [S.Zero]*A.rows
for i in range(A.shape[0]):
ret[i] = [j + k for j, k in zip(alst[i], blst[i])]
rv = classof(A, B)._new(ret)
if 0 in A.shape:
rv = rv.reshape(*A.shape)
return rv
raise TypeError('cannot add matrix and %s' % type(other))
def __radd__(self, other):
return self + other
def __div__(self, other):
return self*(S.One / other)
def __truediv__(self, other):
return self.__div__(other)
def __neg__(self):
return -1*self
def multiply(self, b):
"""Returns self*b
See Also
========
dot
cross
multiply_elementwise
"""
return self*b
def add(self, b):
"""Return self + b """
return self + b
def table(self, printer, rowstart='[', rowend=']', rowsep='\n',
colsep=', ', align='right'):
r"""
String form of Matrix as a table.
``printer`` is the printer to use for on the elements (generally
something like StrPrinter())
``rowstart`` is the string used to start each row (by default '[').
``rowend`` is the string used to end each row (by default ']').
``rowsep`` is the string used to separate rows (by default a newline).
``colsep`` is the string used to separate columns (by default ', ').
``align`` defines how the elements are aligned. Must be one of 'left',
'right', or 'center'. You can also use '<', '>', and '^' to mean the
same thing, respectively.
This is used by the string printer for Matrix.
Examples
========
>>> from sympy import Matrix
>>> from sympy.printing.str import StrPrinter
>>> M = Matrix([[1, 2], [-33, 4]])
>>> printer = StrPrinter()
>>> M.table(printer)
'[ 1, 2]\n[-33, 4]'
>>> print(M.table(printer))
[ 1, 2]
[-33, 4]
>>> print(M.table(printer, rowsep=',\n'))
[ 1, 2],
[-33, 4]
>>> print('[%s]' % M.table(printer, rowsep=',\n'))
[[ 1, 2],
[-33, 4]]
>>> print(M.table(printer, colsep=' '))
[ 1 2]
[-33 4]
>>> print(M.table(printer, align='center'))
[ 1 , 2]
[-33, 4]
>>> print(M.table(printer, rowstart='{', rowend='}'))
{ 1, 2}
{-33, 4}
"""
# Handle zero dimensions:
if self.rows == 0 or self.cols == 0:
return '[]'
# Build table of string representations of the elements
res = []
# Track per-column max lengths for pretty alignment
maxlen = [0] * self.cols
for i in range(self.rows):
res.append([])
for j in range(self.cols):
s = printer._print(self[i,j])
res[-1].append(s)
maxlen[j] = max(len(s), maxlen[j])
# Patch strings together
align = {
'left': 'ljust',
'right': 'rjust',
'center': 'center',
'<': 'ljust',
'>': 'rjust',
'^': 'center',
}[align]
for i, row in enumerate(res):
for j, elem in enumerate(row):
row[j] = getattr(elem, align)(maxlen[j])
res[i] = rowstart + colsep.join(row) + rowend
return rowsep.join(res)
def _format_str(self, printer=None):
if not printer:
from sympy.printing.str import StrPrinter
printer = StrPrinter()
# Handle zero dimensions:
if self.rows == 0 or self.cols == 0:
return 'Matrix(%s, %s, [])' % (self.rows, self.cols)
if self.rows == 1:
return "Matrix([%s])" % self.table(printer, rowsep=',\n')
return "Matrix([\n%s])" % self.table(printer, rowsep=',\n')
def __str__(self):
if self.rows == 0 or self.cols == 0:
return 'Matrix(%s, %s, [])' % (self.rows, self.cols)
return "Matrix(%s)" % str(self.tolist())
def __repr__(self):
return sstr(self)
def cholesky(self):
"""Returns the Cholesky decomposition L of a matrix A
such that L * L.T = A
A must be a square, symmetric, positive-definite
and non-singular matrix.
Examples
========
>>> from sympy.matrices import Matrix
>>> A = Matrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
>>> A.cholesky()
Matrix([
[ 5, 0, 0],
[ 3, 3, 0],
[-1, 1, 3]])
>>> A.cholesky() * A.cholesky().T
Matrix([
[25, 15, -5],
[15, 18, 0],
[-5, 0, 11]])
See Also
========
LDLdecomposition
LUdecomposition
QRdecomposition
"""
if not self.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if not self.is_symmetric():
raise ValueError("Matrix must be symmetric.")
return self._cholesky()
def LDLdecomposition(self):
"""Returns the LDL Decomposition (L, D) of matrix A,
such that L * D * L.T == A
This method eliminates the use of square root.
Further this ensures that all the diagonal entries of L are 1.
A must be a square, symmetric, positive-definite
and non-singular matrix.
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> A = Matrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
>>> L, D = A.LDLdecomposition()
>>> L
Matrix([
[ 1, 0, 0],
[ 3/5, 1, 0],
[-1/5, 1/3, 1]])
>>> D
Matrix([
[25, 0, 0],
[ 0, 9, 0],
[ 0, 0, 9]])
>>> L * D * L.T * A.inv() == eye(A.rows)
True
See Also
========
cholesky
LUdecomposition
QRdecomposition
"""
if not self.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if not self.is_symmetric():
raise ValueError("Matrix must be symmetric.")
return self._LDLdecomposition()
def lower_triangular_solve(self, rhs):
"""Solves Ax = B, where A is a lower triangular matrix.
See Also
========
upper_triangular_solve
gauss_jordan_solve
cholesky_solve
diagonal_solve
LDLsolve
LUsolve
QRsolve
pinv_solve
"""
if not self.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if rhs.rows != self.rows:
raise ShapeError("Matrices size mismatch.")
if not self.is_lower:
raise ValueError("Matrix must be lower triangular.")
return self._lower_triangular_solve(rhs)
def upper_triangular_solve(self, rhs):
"""Solves Ax = B, where A is an upper triangular matrix.
See Also
========
lower_triangular_solve
gauss_jordan_solve
cholesky_solve
diagonal_solve
LDLsolve
LUsolve
QRsolve
pinv_solve
"""
if not self.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if rhs.rows != self.rows:
raise TypeError("Matrix size mismatch.")
if not self.is_upper:
raise TypeError("Matrix is not upper triangular.")
return self._upper_triangular_solve(rhs)
def cholesky_solve(self, rhs):
"""Solves Ax = B using Cholesky decomposition,
for a general square non-singular matrix.
For a non-square matrix with rows > cols,
the least squares solution is returned.
See Also
========
lower_triangular_solve
upper_triangular_solve
gauss_jordan_solve
diagonal_solve
LDLsolve
LUsolve
QRsolve
pinv_solve
"""
if self.is_symmetric():
L = self._cholesky()
elif self.rows >= self.cols:
L = (self.T*self)._cholesky()
rhs = self.T*rhs
else:
raise NotImplementedError('Under-determined System. '
'Try M.gauss_jordan_solve(rhs)')
Y = L._lower_triangular_solve(rhs)
return (L.T)._upper_triangular_solve(Y)
def diagonal_solve(self, rhs):
"""Solves Ax = B efficiently, where A is a diagonal Matrix,
with non-zero diagonal entries.
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> A = eye(2)*2
>>> B = Matrix([[1, 2], [3, 4]])
>>> A.diagonal_solve(B) == B/2
True
See Also
========
lower_triangular_solve
upper_triangular_solve
gauss_jordan_solve
cholesky_solve
LDLsolve
LUsolve
QRsolve
pinv_solve
"""
if not self.is_diagonal:
raise TypeError("Matrix should be diagonal")
if rhs.rows != self.rows:
raise TypeError("Size mis-match")
return self._diagonal_solve(rhs)
def LDLsolve(self, rhs):
"""Solves Ax = B using LDL decomposition,
for a general square and non-singular matrix.
For a non-square matrix with rows > cols,
the least squares solution is returned.
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> A = eye(2)*2
>>> B = Matrix([[1, 2], [3, 4]])
>>> A.LDLsolve(B) == B/2
True
See Also
========
LDLdecomposition
lower_triangular_solve
upper_triangular_solve
gauss_jordan_solve
cholesky_solve
diagonal_solve
LUsolve
QRsolve
pinv_solve
"""
if self.is_symmetric():
L, D = self.LDLdecomposition()
elif self.rows >= self.cols:
L, D = (self.T*self).LDLdecomposition()
rhs = self.T*rhs
else:
raise NotImplementedError('Under-determined System. '
'Try M.gauss_jordan_solve(rhs)')
Y = L._lower_triangular_solve(rhs)
Z = D._diagonal_solve(Y)
return (L.T)._upper_triangular_solve(Z)
def solve_least_squares(self, rhs, method='CH'):
"""Return the least-square fit to the data.
By default the cholesky_solve routine is used (method='CH'); other
methods of matrix inversion can be used. To find out which are
available, see the docstring of the .inv() method.
Examples
========
>>> from sympy.matrices import Matrix, ones
>>> A = Matrix([1, 2, 3])
>>> B = Matrix([2, 3, 4])
>>> S = Matrix(A.row_join(B))
>>> S
Matrix([
[1, 2],
[2, 3],
[3, 4]])
If each line of S represent coefficients of Ax + By
and x and y are [2, 3] then S*xy is:
>>> r = S*Matrix([2, 3]); r
Matrix([
[ 8],
[13],
[18]])
But let's add 1 to the middle value and then solve for the
least-squares value of xy:
>>> xy = S.solve_least_squares(Matrix([8, 14, 18])); xy
Matrix([
[ 5/3],
[10/3]])
The error is given by S*xy - r:
>>> S*xy - r
Matrix([
[1/3],
[1/3],
[1/3]])
>>> _.norm().n(2)
0.58
If a different xy is used, the norm will be higher:
>>> xy += ones(2, 1)/10
>>> (S*xy - r).norm().n(2)
1.5
"""
if method == 'CH':
return self.cholesky_solve(rhs)
t = self.T
return (t*self).inv(method=method)*t*rhs
def solve(self, rhs, method='GE'):
"""Return solution to self*soln = rhs using given inversion method.
For a list of possible inversion methods, see the .inv() docstring.
"""
if not self.is_square:
if self.rows < self.cols:
raise ValueError('Under-determined system. '
'Try M.gauss_jordan_solve(rhs)')
elif self.rows > self.cols:
raise ValueError('For over-determined system, M, having '
'more rows than columns, try M.solve_least_squares(rhs).')
else:
return self.inv(method=method)*rhs
def __mathml__(self):
mml = ""
for i in range(self.rows):
mml += "<matrixrow>"
for j in range(self.cols):
mml += self[i, j].__mathml__()
mml += "</matrixrow>"
return "<matrix>" + mml + "</matrix>"
def extract(self, rowsList, colsList):
"""Return a submatrix by specifying a list of rows and columns.
Negative indices can be given. All indices must be in the range
-n <= i < n where n is the number of rows or columns.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(4, 3, range(12))
>>> m
Matrix([
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[9, 10, 11]])
>>> m.extract([0, 1, 3], [0, 1])
Matrix([
[0, 1],
[3, 4],
[9, 10]])
Rows or columns can be repeated:
>>> m.extract([0, 0, 1], [-1])
Matrix([
[2],
[2],
[5]])
Every other row can be taken by using range to provide the indices:
>>> m.extract(range(0, m.rows, 2), [-1])
Matrix([
[2],
[8]])
RowsList or colsList can also be a list of booleans, in which case
the rows or columns corresponding to the True values will be selected:
>>> m.extract([0, 1, 2, 3], [True, False, True])
Matrix([
[0, 2],
[3, 5],
[6, 8],
[9, 11]])
"""
cols = self.cols
flat_list = self._mat
if rowsList and all(isinstance(i, bool) for i in rowsList):
rowsList = [index for index, item in enumerate(rowsList) if item]
if colsList and all(isinstance(i, bool) for i in colsList):
colsList = [index for index, item in enumerate(colsList) if item]
rowsList = [a2idx(k, self.rows) for k in rowsList]
colsList = [a2idx(k, self.cols) for k in colsList]
return self._new(len(rowsList), len(colsList),
lambda i, j: flat_list[rowsList[i]*cols + colsList[j]])
def key2bounds(self, keys):
"""Converts a key with potentially mixed types of keys (integer and slice)
into a tuple of ranges and raises an error if any index is out of self's
range.
See Also
========
key2ij
"""
islice, jslice = [isinstance(k, slice) for k in keys]
if islice:
if not self.rows:
rlo = rhi = 0
else:
rlo, rhi = keys[0].indices(self.rows)[:2]
else:
rlo = a2idx(keys[0], self.rows)
rhi = rlo + 1
if jslice:
if not self.cols:
clo = chi = 0
else:
clo, chi = keys[1].indices(self.cols)[:2]
else:
clo = a2idx(keys[1], self.cols)
chi = clo + 1
return rlo, rhi, clo, chi
def key2ij(self, key):
"""Converts key into canonical form, converting integers or indexable
items into valid integers for self's range or returning slices
unchanged.
See Also
========
key2bounds
"""
if is_sequence(key):
if not len(key) == 2:
raise TypeError('key must be a sequence of length 2')
return [a2idx(i, n) if not isinstance(i, slice) else i
for i, n in zip(key, self.shape)]
elif isinstance(key, slice):
return key.indices(len(self))[:2]
else:
return divmod(a2idx(key, len(self)), self.cols)
def evalf(self, prec=None, **options):
"""Apply evalf() to each element of self."""
return self.applyfunc(lambda i: i.evalf(prec, **options))
n = evalf
def atoms(self, *types):
"""Returns the atoms that form the current object.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.matrices import Matrix
>>> Matrix([[x]])
Matrix([[x]])
>>> _.atoms()
set([x])
"""
if types:
types = tuple(
[t if isinstance(t, type) else type(t) for t in types])
else:
types = (Atom,)
result = set()
for i in self:
result.update( i.atoms(*types) )
return result
@property
def free_symbols(self):
"""Returns the free symbols within the matrix.
Examples
========
>>> from sympy.abc import x
>>> from sympy.matrices import Matrix
>>> Matrix([[x], [1]]).free_symbols
set([x])
"""
return set().union(*[i.free_symbols for i in self])
def subs(self, *args, **kwargs): # should mirror core.basic.subs
"""Return a new matrix with subs applied to each entry.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.matrices import SparseMatrix, Matrix
>>> SparseMatrix(1, 1, [x])
Matrix([[x]])
>>> _.subs(x, y)
Matrix([[y]])
>>> Matrix(_).subs(y, x)
Matrix([[x]])
"""
return self.applyfunc(lambda x: x.subs(*args, **kwargs))
def xreplace(self, rule): # should mirror core.basic.xreplace
"""Return a new matrix with xreplace applied to each entry.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.matrices import SparseMatrix, Matrix
>>> SparseMatrix(1, 1, [x])
Matrix([[x]])
>>> _.xreplace({x: y})
Matrix([[y]])
>>> Matrix(_).xreplace({y: x})
Matrix([[x]])
"""
return self.applyfunc(lambda x: x.xreplace(rule))
def expand(self, deep=True, modulus=None, power_base=True, power_exp=True,
mul=True, log=True, multinomial=True, basic=True, **hints):
"""Apply core.function.expand to each entry of the matrix.
Examples
========
>>> from sympy.abc import x
>>> from sympy.matrices import Matrix
>>> Matrix(1, 1, [x*(x+1)])
Matrix([[x*(x + 1)]])
>>> _.expand()
Matrix([[x**2 + x]])
"""
return self.applyfunc(lambda x: x.expand(
deep, modulus, power_base, power_exp, mul, log, multinomial, basic,
**hints))
def simplify(self, ratio=1.7, measure=count_ops):
"""Apply simplify to each element of the matrix.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy import sin, cos
>>> from sympy.matrices import SparseMatrix
>>> SparseMatrix(1, 1, [x*sin(y)**2 + x*cos(y)**2])
Matrix([[x*sin(y)**2 + x*cos(y)**2]])
>>> _.simplify()
Matrix([[x]])
"""
return self.applyfunc(lambda x: x.simplify(ratio, measure))
_eval_simplify = simplify
def refine(self, assumptions=True):
"""Apply refine to each element of the matrix.
Examples
========
>>> from sympy import Symbol, Matrix, Abs, sqrt, Q
>>> x = Symbol('x')
>>> Matrix([[Abs(x)**2, sqrt(x**2)],[sqrt(x**2), Abs(x)**2]])
Matrix([
[ Abs(x)**2, sqrt(x**2)],
[sqrt(x**2), Abs(x)**2]])
>>> _.refine(Q.real(x))
Matrix([
[ x**2, Abs(x)],
[Abs(x), x**2]])
"""
return self.applyfunc(lambda x: refine(x, assumptions))
def doit(self, **kwargs):
return self._new(self.rows, self.cols, [i.doit() for i in self._mat])
def print_nonzero(self, symb="X"):
"""Shows location of non-zero entries for fast shape lookup.
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> m = Matrix(2, 3, lambda i, j: i*3+j)
>>> m
Matrix([
[0, 1, 2],
[3, 4, 5]])
>>> m.print_nonzero()
[ XX]
[XXX]
>>> m = eye(4)
>>> m.print_nonzero("x")
[x ]
[ x ]
[ x ]
[ x]
"""
s = []
for i in range(self.rows):
line = []
for j in range(self.cols):
if self[i, j] == 0:
line.append(" ")
else:
line.append(str(symb))
s.append("[%s]" % ''.join(line))
print('\n'.join(s))
def LUsolve(self, rhs, iszerofunc=_iszero):
"""Solve the linear system Ax = rhs for x where A = self.
This is for symbolic matrices, for real or complex ones use
mpmath.lu_solve or mpmath.qr_solve.
See Also
========
lower_triangular_solve
upper_triangular_solve
gauss_jordan_solve
cholesky_solve
diagonal_solve
LDLsolve
QRsolve
pinv_solve
LUdecomposition
"""
if rhs.rows != self.rows:
raise ShapeError("`self` and `rhs` must have the same number of rows.")
A, perm = self.LUdecomposition_Simple(iszerofunc=_iszero)
n = self.rows
b = rhs.permuteFwd(perm).as_mutable()
# forward substitution, all diag entries are scaled to 1
for i in range(n):
for j in range(i):
scale = A[i, j]
b.zip_row_op(i, j, lambda x, y: x - y*scale)
# backward substitution
for i in range(n - 1, -1, -1):
for j in range(i + 1, n):
scale = A[i, j]
b.zip_row_op(i, j, lambda x, y: x - y*scale)
scale = A[i, i]
b.row_op(i, lambda x, _: x/scale)
return rhs.__class__(b)
def LUdecomposition(self, iszerofunc=_iszero):
"""Returns the decomposition LU and the row swaps p.
Examples
========
>>> from sympy import Matrix
>>> a = Matrix([[4, 3], [6, 3]])
>>> L, U, _ = a.LUdecomposition()
>>> L
Matrix([
[ 1, 0],
[3/2, 1]])
>>> U
Matrix([
[4, 3],
[0, -3/2]])
See Also
========
cholesky
LDLdecomposition
QRdecomposition
LUdecomposition_Simple
LUdecompositionFF
LUsolve
"""
combined, p = self.LUdecomposition_Simple(iszerofunc=_iszero)
L = self.zeros(self.rows)
U = self.zeros(self.rows)
for i in range(self.rows):
for j in range(self.rows):
if i > j:
L[i, j] = combined[i, j]
else:
if i == j:
L[i, i] = 1
U[i, j] = combined[i, j]
return L, U, p
def LUdecomposition_Simple(self, iszerofunc=_iszero):
"""Returns A comprised of L, U (L's diag entries are 1) and
p which is the list of the row swaps (in order).
See Also
========
LUdecomposition
LUdecompositionFF
LUsolve
"""
if not self.is_square:
raise NonSquareMatrixError("A Matrix must be square to apply LUdecomposition_Simple().")
n = self.rows
A = self.as_mutable()
p = []
# factorization
for j in range(n):
for i in range(j):
for k in range(i):
A[i, j] = A[i, j] - A[i, k]*A[k, j]
pivot = -1
for i in range(j, n):
for k in range(j):
A[i, j] = A[i, j] - A[i, k]*A[k, j]
# find the first non-zero pivot, includes any expression
if pivot == -1 and not iszerofunc(A[i, j]):
pivot = i
if pivot < 0:
# this result is based on iszerofunc's analysis of the possible pivots, so even though
# the element may not be strictly zero, the supplied iszerofunc's evaluation gave True
raise ValueError("No nonzero pivot found; inversion failed.")
if pivot != j: # row must be swapped
A.row_swap(pivot, j)
p.append([pivot, j])
scale = 1 / A[j, j]
for i in range(j + 1, n):
A[i, j] = A[i, j]*scale
return A, p
def LUdecompositionFF(self):
"""Compute a fraction-free LU decomposition.
Returns 4 matrices P, L, D, U such that PA = L D**-1 U.
If the elements of the matrix belong to some integral domain I, then all
elements of L, D and U are guaranteed to belong to I.
**Reference**
- W. Zhou & D.J. Jeffrey, "Fraction-free matrix factors: new forms
for LU and QR factors". Frontiers in Computer Science in China,
Vol 2, no. 1, pp. 67-80, 2008.
See Also
========
LUdecomposition
LUdecomposition_Simple
LUsolve
"""
from sympy.matrices import SparseMatrix
zeros = SparseMatrix.zeros
eye = SparseMatrix.eye
n, m = self.rows, self.cols
U, L, P = self.as_mutable(), eye(n), eye(n)
DD = zeros(n, n)
oldpivot = 1
for k in range(n - 1):
if U[k, k] == 0:
for kpivot in range(k + 1, n):
if U[kpivot, k]:
break
else:
raise ValueError("Matrix is not full rank")
U[k, k:], U[kpivot, k:] = U[kpivot, k:], U[k, k:]
L[k, :k], L[kpivot, :k] = L[kpivot, :k], L[k, :k]
P[k, :], P[kpivot, :] = P[kpivot, :], P[k, :]
L[k, k] = Ukk = U[k, k]
DD[k, k] = oldpivot*Ukk
for i in range(k + 1, n):
L[i, k] = Uik = U[i, k]
for j in range(k + 1, m):
U[i, j] = (Ukk*U[i, j] - U[k, j]*Uik) / oldpivot
U[i, k] = 0
oldpivot = Ukk
DD[n - 1, n - 1] = oldpivot
return P, L, DD, U
def cofactorMatrix(self, method="berkowitz"):
"""Return a matrix containing the cofactor of each element.
See Also
========
cofactor
minorEntry
minorMatrix
adjugate
"""
out = self._new(self.rows, self.cols, lambda i, j:
self.cofactor(i, j, method))
return out
def minorEntry(self, i, j, method="berkowitz"):
"""Calculate the minor of an element.
See Also
========
minorMatrix
cofactor
cofactorMatrix
"""
if not 0 <= i < self.rows or not 0 <= j < self.cols:
raise ValueError("`i` and `j` must satisfy 0 <= i < `self.rows` " +
"(%d)" % self.rows + "and 0 <= j < `self.cols` (%d)." % self.cols)
return self.minorMatrix(i, j).det(method)
def minorMatrix(self, i, j):
"""Creates the minor matrix of a given element.
See Also
========
minorEntry
cofactor
cofactorMatrix
"""
if not 0 <= i < self.rows or not 0 <= j < self.cols:
raise ValueError("`i` and `j` must satisfy 0 <= i < `self.rows` " +
"(%d)" % self.rows + "and 0 <= j < `self.cols` (%d)." % self.cols)
M = self.as_mutable()
M.row_del(i)
M.col_del(j)
return self._new(M)
def cofactor(self, i, j, method="berkowitz"):
"""Calculate the cofactor of an element.
See Also
========
cofactorMatrix
minorEntry
minorMatrix
"""
if (i + j) % 2 == 0:
return self.minorEntry(i, j, method)
else:
return -1*self.minorEntry(i, j, method)
def jacobian(self, X):
"""Calculates the Jacobian matrix (derivative of a vectorial function).
Parameters
==========
self : vector of expressions representing functions f_i(x_1, ..., x_n).
X : set of x_i's in order, it can be a list or a Matrix
Both self and X can be a row or a column matrix in any order
(i.e., jacobian() should always work).
Examples
========
>>> from sympy import sin, cos, Matrix
>>> from sympy.abc import rho, phi
>>> X = Matrix([rho*cos(phi), rho*sin(phi), rho**2])
>>> Y = Matrix([rho, phi])
>>> X.jacobian(Y)
Matrix([
[cos(phi), -rho*sin(phi)],
[sin(phi), rho*cos(phi)],
[ 2*rho, 0]])
>>> X = Matrix([rho*cos(phi), rho*sin(phi)])
>>> X.jacobian(Y)
Matrix([
[cos(phi), -rho*sin(phi)],
[sin(phi), rho*cos(phi)]])
See Also
========
hessian
wronskian
"""
if not isinstance(X, MatrixBase):
X = self._new(X)
# Both X and self can be a row or a column matrix, so we need to make
# sure all valid combinations work, but everything else fails:
if self.shape[0] == 1:
m = self.shape[1]
elif self.shape[1] == 1:
m = self.shape[0]
else:
raise TypeError("self must be a row or a column matrix")
if X.shape[0] == 1:
n = X.shape[1]
elif X.shape[1] == 1:
n = X.shape[0]
else:
raise TypeError("X must be a row or a column matrix")
# m is the number of functions and n is the number of variables
# computing the Jacobian is now easy:
return self._new(m, n, lambda j, i: self[j].diff(X[i]))
def QRdecomposition(self):
"""Return Q, R where A = Q*R, Q is orthogonal and R is upper triangular.
Examples
========
This is the example from wikipedia:
>>> from sympy import Matrix
>>> A = Matrix([[12, -51, 4], [6, 167, -68], [-4, 24, -41]])
>>> Q, R = A.QRdecomposition()
>>> Q
Matrix([
[ 6/7, -69/175, -58/175],
[ 3/7, 158/175, 6/175],
[-2/7, 6/35, -33/35]])
>>> R
Matrix([
[14, 21, -14],
[ 0, 175, -70],
[ 0, 0, 35]])
>>> A == Q*R
True
QR factorization of an identity matrix:
>>> A = Matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
>>> Q, R = A.QRdecomposition()
>>> Q
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> R
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
See Also
========
cholesky
LDLdecomposition
LUdecomposition
QRsolve
"""
cls = self.__class__
mat = self.as_mutable()
if not mat.rows >= mat.cols:
raise MatrixError(
"The number of rows must be greater than columns")
n = mat.rows
m = mat.cols
rank = n
row_reduced = mat.rref()[0]
for i in range(row_reduced.rows):
if row_reduced.row(i).norm() == 0:
rank -= 1
if not rank == mat.cols:
raise MatrixError("The rank of the matrix must match the columns")
Q, R = mat.zeros(n, m), mat.zeros(m)
for j in range(m): # for each column vector
tmp = mat[:, j] # take original v
for i in range(j):
# subtract the project of mat on new vector
tmp -= Q[:, i]*mat[:, j].dot(Q[:, i])
tmp.expand()
# normalize it
R[j, j] = tmp.norm()
Q[:, j] = tmp / R[j, j]
if Q[:, j].norm() != 1:
raise NotImplementedError(
"Could not normalize the vector %d." % j)
for i in range(j):
R[i, j] = Q[:, i].dot(mat[:, j])
return cls(Q), cls(R)
def QRsolve(self, b):
"""Solve the linear system 'Ax = b'.
'self' is the matrix 'A', the method argument is the vector
'b'. The method returns the solution vector 'x'. If 'b' is a
matrix, the system is solved for each column of 'b' and the
return value is a matrix of the same shape as 'b'.
This method is slower (approximately by a factor of 2) but
more stable for floating-point arithmetic than the LUsolve method.
However, LUsolve usually uses an exact arithmetic, so you don't need
to use QRsolve.
This is mainly for educational purposes and symbolic matrices, for real
(or complex) matrices use mpmath.qr_solve.
See Also
========
lower_triangular_solve
upper_triangular_solve
gauss_jordan_solve
cholesky_solve
diagonal_solve
LDLsolve
LUsolve
pinv_solve
QRdecomposition
"""
Q, R = self.as_mutable().QRdecomposition()
y = Q.T*b
# back substitution to solve R*x = y:
# We build up the result "backwards" in the vector 'x' and reverse it
# only in the end.
x = []
n = R.rows
for j in range(n - 1, -1, -1):
tmp = y[j, :]
for k in range(j + 1, n):
tmp -= R[j, k]*x[n - 1 - k]
x.append(tmp / R[j, j])
return self._new([row._mat for row in reversed(x)])
def cross(self, b):
"""Return the cross product of `self` and `b` relaxing the condition
of compatible dimensions: if each has 3 elements, a matrix of the
same type and shape as `self` will be returned. If `b` has the same
shape as `self` then common identities for the cross product (like
`a x b = - b x a`) will hold.
See Also
========
dot
multiply
multiply_elementwise
"""
if not is_sequence(b):
raise TypeError("`b` must be an ordered iterable or Matrix, not %s." %
type(b))
if not (self.rows * self.cols == b.rows * b.cols == 3):
raise ShapeError("Dimensions incorrect for cross product.")
else:
return self._new(self.rows, self.cols, (
(self[1]*b[2] - self[2]*b[1]),
(self[2]*b[0] - self[0]*b[2]),
(self[0]*b[1] - self[1]*b[0])))
def dot(self, b):
"""Return the dot product of Matrix self and b relaxing the condition
of compatible dimensions: if either the number of rows or columns are
the same as the length of b then the dot product is returned. If self
is a row or column vector, a scalar is returned. Otherwise, a list
of results is returned (and in that case the number of columns in self
must match the length of b).
Examples
========
>>> from sympy import Matrix
>>> M = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> v = [1, 1, 1]
>>> M.row(0).dot(v)
6
>>> M.col(0).dot(v)
12
>>> M.dot(v)
[6, 15, 24]
See Also
========
cross
multiply
multiply_elementwise
"""
from .dense import Matrix
if not isinstance(b, MatrixBase):
if is_sequence(b):
if len(b) != self.cols and len(b) != self.rows:
raise ShapeError("Dimensions incorrect for dot product.")
return self.dot(Matrix(b))
else:
raise TypeError("`b` must be an ordered iterable or Matrix, not %s." %
type(b))
mat = self
if mat.cols == b.rows:
if b.cols != 1:
mat = mat.T
b = b.T
prod = flatten((mat*b).tolist())
if len(prod) == 1:
return prod[0]
return prod
if mat.cols == b.cols:
return mat.dot(b.T)
elif mat.rows == b.rows:
return mat.T.dot(b)
else:
raise ShapeError("Dimensions incorrect for dot product.")
def multiply_elementwise(self, b):
"""Return the Hadamard product (elementwise product) of A and B
Examples
========
>>> from sympy.matrices import Matrix
>>> A = Matrix([[0, 1, 2], [3, 4, 5]])
>>> B = Matrix([[1, 10, 100], [100, 10, 1]])
>>> A.multiply_elementwise(B)
Matrix([
[ 0, 10, 200],
[300, 40, 5]])
See Also
========
cross
dot
multiply
"""
from sympy.matrices import matrix_multiply_elementwise
return matrix_multiply_elementwise(self, b)
def values(self):
"""Return non-zero values of self."""
return [i for i in flatten(self.tolist()) if not i.is_zero]
def norm(self, ord=None):
"""Return the Norm of a Matrix or Vector.
In the simplest case this is the geometric size of the vector
Other norms can be specified by the ord parameter
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm - does not exist
inf -- max(abs(x))
-inf -- min(abs(x))
1 -- as below
-1 -- as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other - does not exist sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
Examples
========
>>> from sympy import Matrix, Symbol, trigsimp, cos, sin, oo
>>> x = Symbol('x', real=True)
>>> v = Matrix([cos(x), sin(x)])
>>> trigsimp( v.norm() )
1
>>> v.norm(10)
(sin(x)**10 + cos(x)**10)**(1/10)
>>> A = Matrix([[1, 1], [1, 1]])
>>> A.norm(2)# Spectral norm (max of |Ax|/|x| under 2-vector-norm)
2
>>> A.norm(-2) # Inverse spectral norm (smallest singular value)
0
>>> A.norm() # Frobenius Norm
2
>>> Matrix([1, -2]).norm(oo)
2
>>> Matrix([-1, 2]).norm(-oo)
1
See Also
========
normalized
"""
# Row or Column Vector Norms
vals = list(self.values()) or [0]
if self.rows == 1 or self.cols == 1:
if ord == 2 or ord is None: # Common case sqrt(<x, x>)
return sqrt(Add(*(abs(i)**2 for i in vals)))
elif ord == 1: # sum(abs(x))
return Add(*(abs(i) for i in vals))
elif ord == S.Infinity: # max(abs(x))
return Max(*[abs(i) for i in vals])
elif ord == S.NegativeInfinity: # min(abs(x))
return Min(*[abs(i) for i in vals])
# Otherwise generalize the 2-norm, Sum(x_i**ord)**(1/ord)
# Note that while useful this is not mathematically a norm
try:
return Pow(Add(*(abs(i)**ord for i in vals)), S(1) / ord)
except (NotImplementedError, TypeError):
raise ValueError("Expected order to be Number, Symbol, oo")
# Matrix Norms
else:
if ord == 2: # Spectral Norm
# Maximum singular value
return Max(*self.singular_values())
elif ord == -2:
# Minimum singular value
return Min(*self.singular_values())
elif (ord is None or isinstance(ord, string_types) and ord.lower() in
['f', 'fro', 'frobenius', 'vector']):
# Reshape as vector and send back to norm function
return self.vec().norm(ord=2)
else:
raise NotImplementedError("Matrix Norms under development")
def normalized(self):
"""Return the normalized version of ``self``.
See Also
========
norm
"""
if self.rows != 1 and self.cols != 1:
raise ShapeError("A Matrix must be a vector to normalize.")
norm = self.norm()
out = self.applyfunc(lambda i: i / norm)
return out
def project(self, v):
"""Return the projection of ``self`` onto the line containing ``v``.
Examples
========
>>> from sympy import Matrix, S, sqrt
>>> V = Matrix([sqrt(3)/2, S.Half])
>>> x = Matrix([[1, 0]])
>>> V.project(x)
Matrix([[sqrt(3)/2, 0]])
>>> V.project(-x)
Matrix([[sqrt(3)/2, 0]])
"""
return v*(self.dot(v) / v.dot(v))
def permuteBkwd(self, perm):
"""Permute the rows of the matrix with the given permutation in reverse.
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.permuteBkwd([[0, 1], [0, 2]])
Matrix([
[0, 1, 0],
[0, 0, 1],
[1, 0, 0]])
See Also
========
permuteFwd
"""
copy = self.copy()
for i in range(len(perm) - 1, -1, -1):
copy.row_swap(perm[i][0], perm[i][1])
return copy
def permuteFwd(self, perm):
"""Permute the rows of the matrix with the given permutation.
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.permuteFwd([[0, 1], [0, 2]])
Matrix([
[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
See Also
========
permuteBkwd
"""
copy = self.copy()
for i in range(len(perm)):
copy.row_swap(perm[i][0], perm[i][1])
return copy
def exp(self):
"""Return the exponentiation of a square matrix."""
if not self.is_square:
raise NonSquareMatrixError(
"Exponentiation is valid only for square matrices")
try:
P, cells = self.jordan_cells()
except MatrixError:
raise NotImplementedError("Exponentiation is implemented only for matrices for which the Jordan normal form can be computed")
def _jblock_exponential(b):
# This function computes the matrix exponential for one single Jordan block
nr = b.rows
l = b[0, 0]
if nr == 1:
res = exp(l)
else:
from sympy import eye
# extract the diagonal part
d = b[0, 0]*eye(nr)
#and the nilpotent part
n = b-d
# compute its exponential
nex = eye(nr)
for i in range(1, nr):
nex = nex+n**i/factorial(i)
# combine the two parts
res = exp(b[0, 0])*nex
return(res)
blocks = list(map(_jblock_exponential, cells))
from sympy.matrices import diag
eJ = diag(* blocks)
# n = self.rows
ret = P*eJ*P.inv()
return type(self)(ret)
@property
def is_square(self):
"""Checks if a matrix is square.
A matrix is square if the number of rows equals the number of columns.
The empty matrix is square by definition, since the number of rows and
the number of columns are both zero.
Examples
========
>>> from sympy import Matrix
>>> a = Matrix([[1, 2, 3], [4, 5, 6]])
>>> b = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> c = Matrix([])
>>> a.is_square
False
>>> b.is_square
True
>>> c.is_square
True
"""
return self.rows == self.cols
@property
def is_zero(self):
"""Checks if a matrix is a zero matrix.
A matrix is zero if every element is zero. A matrix need not be square
to be considered zero. The empty matrix is zero by the principle of
vacuous truth. For a matrix that may or may not be zero (e.g.
contains a symbol), this will be None
Examples
========
>>> from sympy import Matrix, zeros
>>> from sympy.abc import x
>>> a = Matrix([[0, 0], [0, 0]])
>>> b = zeros(3, 4)
>>> c = Matrix([[0, 1], [0, 0]])
>>> d = Matrix([])
>>> e = Matrix([[x, 0], [0, 0]])
>>> a.is_zero
True
>>> b.is_zero
True
>>> c.is_zero
False
>>> d.is_zero
True
>>> e.is_zero
"""
if any(i.is_zero == False for i in self):
return False
if any(i.is_zero == None for i in self):
return None
return True
def is_nilpotent(self):
"""Checks if a matrix is nilpotent.
A matrix B is nilpotent if for some integer k, B**k is
a zero matrix.
Examples
========
>>> from sympy import Matrix
>>> a = Matrix([[0, 0, 0], [1, 0, 0], [1, 1, 0]])
>>> a.is_nilpotent()
True
>>> a = Matrix([[1, 0, 1], [1, 0, 0], [1, 1, 0]])
>>> a.is_nilpotent()
False
"""
if not self:
return True
if not self.is_square:
raise NonSquareMatrixError(
"Nilpotency is valid only for square matrices")
x = Dummy('x')
if self.charpoly(x).args[0] == x**self.rows:
return True
return False
@property
def is_upper(self):
"""Check if matrix is an upper triangular matrix. True can be returned
even if the matrix is not square.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 2, [1, 0, 0, 1])
>>> m
Matrix([
[1, 0],
[0, 1]])
>>> m.is_upper
True
>>> m = Matrix(4, 3, [5, 1, 9, 0, 4 , 6, 0, 0, 5, 0, 0, 0])
>>> m
Matrix([
[5, 1, 9],
[0, 4, 6],
[0, 0, 5],
[0, 0, 0]])
>>> m.is_upper
True
>>> m = Matrix(2, 3, [4, 2, 5, 6, 1, 1])
>>> m
Matrix([
[4, 2, 5],
[6, 1, 1]])
>>> m.is_upper
False
See Also
========
is_lower
is_diagonal
is_upper_hessenberg
"""
return all(self[i, j].is_zero
for i in range(1, self.rows)
for j in range(i))
@property
def is_lower(self):
"""Check if matrix is a lower triangular matrix. True can be returned
even if the matrix is not square.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 2, [1, 0, 0, 1])
>>> m
Matrix([
[1, 0],
[0, 1]])
>>> m.is_lower
True
>>> m = Matrix(4, 3, [0, 0, 0, 2, 0, 0, 1, 4 , 0, 6, 6, 5])
>>> m
Matrix([
[0, 0, 0],
[2, 0, 0],
[1, 4, 0],
[6, 6, 5]])
>>> m.is_lower
True
>>> from sympy.abc import x, y
>>> m = Matrix(2, 2, [x**2 + y, y**2 + x, 0, x + y])
>>> m
Matrix([
[x**2 + y, x + y**2],
[ 0, x + y]])
>>> m.is_lower
False
See Also
========
is_upper
is_diagonal
is_lower_hessenberg
"""
return all(self[i, j].is_zero
for i in range(self.rows)
for j in range(i + 1, self.cols))
@property
def is_hermitian(self):
"""Checks if the matrix is Hermitian.
In a Hermitian matrix element i,j is the complex conjugate of
element j,i.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy import I
>>> from sympy.abc import x
>>> a = Matrix([[1, I], [-I, 1]])
>>> a
Matrix([
[ 1, I],
[-I, 1]])
>>> a.is_hermitian
True
>>> a[0, 0] = 2*I
>>> a.is_hermitian
False
>>> a[0, 0] = x
>>> a.is_hermitian
>>> a[0, 1] = a[1, 0]*I
>>> a.is_hermitian
False
"""
def cond():
yield self.is_square
yield fuzzy_and(
self[i, i].is_real for i in range(self.rows))
yield fuzzy_and(
(self[i, j] - self[j, i].conjugate()).is_zero
for i in range(self.rows)
for j in range(i + 1, self.cols))
return fuzzy_and(i for i in cond())
@property
def is_upper_hessenberg(self):
"""Checks if the matrix is the upper-Hessenberg form.
The upper hessenberg matrix has zero entries
below the first subdiagonal.
Examples
========
>>> from sympy.matrices import Matrix
>>> a = Matrix([[1, 4, 2, 3], [3, 4, 1, 7], [0, 2, 3, 4], [0, 0, 1, 3]])
>>> a
Matrix([
[1, 4, 2, 3],
[3, 4, 1, 7],
[0, 2, 3, 4],
[0, 0, 1, 3]])
>>> a.is_upper_hessenberg
True
See Also
========
is_lower_hessenberg
is_upper
"""
return all(self[i, j].is_zero
for i in range(2, self.rows)
for j in range(i - 1))
@property
def is_lower_hessenberg(self):
r"""Checks if the matrix is in the lower-Hessenberg form.
The lower hessenberg matrix has zero entries
above the first superdiagonal.
Examples
========
>>> from sympy.matrices import Matrix
>>> a = Matrix([[1, 2, 0, 0], [5, 2, 3, 0], [3, 4, 3, 7], [5, 6, 1, 1]])
>>> a
Matrix([
[1, 2, 0, 0],
[5, 2, 3, 0],
[3, 4, 3, 7],
[5, 6, 1, 1]])
>>> a.is_lower_hessenberg
True
See Also
========
is_upper_hessenberg
is_lower
"""
return all(self[i, j].is_zero
for i in range(self.rows)
for j in range(i + 2, self.cols))
def is_symbolic(self):
"""Checks if any elements contain Symbols.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy.abc import x, y
>>> M = Matrix([[x, y], [1, 0]])
>>> M.is_symbolic()
True
"""
return any(element.has(Symbol) for element in self.values())
def is_symmetric(self, simplify=True):
"""Check if matrix is symmetric matrix,
that is square matrix and is equal to its transpose.
By default, simplifications occur before testing symmetry.
They can be skipped using 'simplify=False'; while speeding things a bit,
this may however induce false negatives.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 2, [0, 1, 1, 2])
>>> m
Matrix([
[0, 1],
[1, 2]])
>>> m.is_symmetric()
True
>>> m = Matrix(2, 2, [0, 1, 2, 0])
>>> m
Matrix([
[0, 1],
[2, 0]])
>>> m.is_symmetric()
False
>>> m = Matrix(2, 3, [0, 0, 0, 0, 0, 0])
>>> m
Matrix([
[0, 0, 0],
[0, 0, 0]])
>>> m.is_symmetric()
False
>>> from sympy.abc import x, y
>>> m = Matrix(3, 3, [1, x**2 + 2*x + 1, y, (x + 1)**2 , 2, 0, y, 0, 3])
>>> m
Matrix([
[ 1, x**2 + 2*x + 1, y],
[(x + 1)**2, 2, 0],
[ y, 0, 3]])
>>> m.is_symmetric()
True
If the matrix is already simplified, you may speed-up is_symmetric()
test by using 'simplify=False'.
>>> m.is_symmetric(simplify=False)
False
>>> m1 = m.expand()
>>> m1.is_symmetric(simplify=False)
True
"""
if not self.is_square:
return False
if simplify:
delta = self - self.transpose()
delta.simplify()
return delta.equals(self.zeros(self.rows, self.cols))
else:
return self == self.transpose()
def is_anti_symmetric(self, simplify=True):
"""Check if matrix M is an antisymmetric matrix,
that is, M is a square matrix with all M[i, j] == -M[j, i].
When ``simplify=True`` (default), the sum M[i, j] + M[j, i] is
simplified before testing to see if it is zero. By default,
the SymPy simplify function is used. To use a custom function
set simplify to a function that accepts a single argument which
returns a simplified expression. To skip simplification, set
simplify to False but note that although this will be faster,
it may induce false negatives.
Examples
========
>>> from sympy import Matrix, symbols
>>> m = Matrix(2, 2, [0, 1, -1, 0])
>>> m
Matrix([
[ 0, 1],
[-1, 0]])
>>> m.is_anti_symmetric()
True
>>> x, y = symbols('x y')
>>> m = Matrix(2, 3, [0, 0, x, -y, 0, 0])
>>> m
Matrix([
[ 0, 0, x],
[-y, 0, 0]])
>>> m.is_anti_symmetric()
False
>>> from sympy.abc import x, y
>>> m = Matrix(3, 3, [0, x**2 + 2*x + 1, y,
... -(x + 1)**2 , 0, x*y,
... -y, -x*y, 0])
Simplification of matrix elements is done by default so even
though two elements which should be equal and opposite wouldn't
pass an equality test, the matrix is still reported as
anti-symmetric:
>>> m[0, 1] == -m[1, 0]
False
>>> m.is_anti_symmetric()
True
If 'simplify=False' is used for the case when a Matrix is already
simplified, this will speed things up. Here, we see that without
simplification the matrix does not appear anti-symmetric:
>>> m.is_anti_symmetric(simplify=False)
False
But if the matrix were already expanded, then it would appear
anti-symmetric and simplification in the is_anti_symmetric routine
is not needed:
>>> m = m.expand()
>>> m.is_anti_symmetric(simplify=False)
True
"""
# accept custom simplification
simpfunc = simplify if isinstance(simplify, FunctionType) else \
_simplify if simplify else False
if not self.is_square:
return False
n = self.rows
if simplify:
for i in range(n):
# diagonal
if not simpfunc(self[i, i]).is_zero:
return False
# others
for j in range(i + 1, n):
diff = self[i, j] + self[j, i]
if not simpfunc(diff).is_zero:
return False
return True
else:
for i in range(n):
for j in range(i, n):
if self[i, j] != -self[j, i]:
return False
return True
def is_diagonal(self):
"""Check if matrix is diagonal,
that is matrix in which the entries outside the main diagonal are all zero.
Examples
========
>>> from sympy import Matrix, diag
>>> m = Matrix(2, 2, [1, 0, 0, 2])
>>> m
Matrix([
[1, 0],
[0, 2]])
>>> m.is_diagonal()
True
>>> m = Matrix(2, 2, [1, 1, 0, 2])
>>> m
Matrix([
[1, 1],
[0, 2]])
>>> m.is_diagonal()
False
>>> m = diag(1, 2, 3)
>>> m
Matrix([
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> m.is_diagonal()
True
See Also
========
is_lower
is_upper
is_diagonalizable
diagonalize
"""
for i in range(self.rows):
for j in range(self.cols):
if i != j and self[i, j]:
return False
return True
def det(self, method="bareis"):
"""Computes the matrix determinant using the method "method".
Possible values for "method":
bareis ... det_bareis
berkowitz ... berkowitz_det
det_LU ... det_LU_decomposition
See Also
========
det_bareis
berkowitz_det
det_LU
"""
# if methods were made internal and all determinant calculations
# passed through here, then these lines could be factored out of
# the method routines
if not self.is_square:
raise NonSquareMatrixError()
if not self:
return S.One
if method == "bareis":
return self.det_bareis()
elif method == "berkowitz":
return self.berkowitz_det()
elif method == "det_LU":
return self.det_LU_decomposition()
else:
raise ValueError("Determinant method '%s' unrecognized" % method)
def det_bareis(self):
"""Compute matrix determinant using Bareis' fraction-free
algorithm which is an extension of the well known Gaussian
elimination method. This approach is best suited for dense
symbolic matrices and will result in a determinant with
minimal number of fractions. It means that less term
rewriting is needed on resulting formulae.
TODO: Implement algorithm for sparse matrices (SFF),
http://www.eecis.udel.edu/~saunders/papers/sffge/it5.ps.
See Also
========
det
berkowitz_det
"""
if not self.is_square:
raise NonSquareMatrixError()
if not self:
return S.One
M, n = self.copy().as_mutable(), self.rows
if n == 1:
det = M[0, 0]
elif n == 2:
det = M[0, 0]*M[1, 1] - M[0, 1]*M[1, 0]
elif n == 3:
det = (M[0, 0]*M[1, 1]*M[2, 2] + M[0, 1]*M[1, 2]*M[2, 0] + M[0, 2]*M[1, 0]*M[2, 1]) - \
(M[0, 2]*M[1, 1]*M[2, 0] + M[0, 0]*M[1, 2]*M[2, 1] + M[0, 1]*M[1, 0]*M[2, 2])
else:
sign = 1 # track current sign in case of column swap
for k in range(n - 1):
# look for a pivot in the current column
# and assume det == 0 if none is found
if M[k, k] == 0:
for i in range(k + 1, n):
if M[i, k]:
M.row_swap(i, k)
sign *= -1
break
else:
return S.Zero
# proceed with Bareis' fraction-free (FF)
# form of Gaussian elimination algorithm
for i in range(k + 1, n):
for j in range(k + 1, n):
D = M[k, k]*M[i, j] - M[i, k]*M[k, j]
if k > 0:
D /= M[k - 1, k - 1]
if D.is_Atom:
M[i, j] = D
else:
M[i, j] = cancel(D)
det = sign*M[n - 1, n - 1]
return det.expand()
def det_LU_decomposition(self):
"""Compute matrix determinant using LU decomposition
Note that this method fails if the LU decomposition itself
fails. In particular, if the matrix has no inverse this method
will fail.
TODO: Implement algorithm for sparse matrices (SFF),
http://www.eecis.udel.edu/~saunders/papers/sffge/it5.ps.
See Also
========
det
det_bareis
berkowitz_det
"""
if not self.is_square:
raise NonSquareMatrixError()
if not self:
return S.One
M, n = self.copy(), self.rows
p, prod = [], 1
l, u, p = M.LUdecomposition()
if len(p) % 2:
prod = -1
for k in range(n):
prod = prod*u[k, k]*l[k, k]
return prod.expand()
def adjugate(self, method="berkowitz"):
"""Returns the adjugate matrix.
Adjugate matrix is the transpose of the cofactor matrix.
http://en.wikipedia.org/wiki/Adjugate
See Also
========
cofactorMatrix
transpose
berkowitz
"""
return self.cofactorMatrix(method).T
def inverse_LU(self, iszerofunc=_iszero):
"""Calculates the inverse using LU decomposition.
See Also
========
inv
inverse_GE
inverse_ADJ
"""
if not self.is_square:
raise NonSquareMatrixError()
ok = self.rref(simplify=True)[0]
if any(iszerofunc(ok[j, j]) for j in range(ok.rows)):
raise ValueError("Matrix det == 0; not invertible.")
return self.LUsolve(self.eye(self.rows), iszerofunc=_iszero)
def inverse_GE(self, iszerofunc=_iszero):
"""Calculates the inverse using Gaussian elimination.
See Also
========
inv
inverse_LU
inverse_ADJ
"""
from .dense import Matrix
if not self.is_square:
raise NonSquareMatrixError("A Matrix must be square to invert.")
big = Matrix.hstack(self.as_mutable(), Matrix.eye(self.rows))
red = big.rref(iszerofunc=iszerofunc, simplify=True)[0]
if any(iszerofunc(red[j, j]) for j in range(red.rows)):
raise ValueError("Matrix det == 0; not invertible.")
return self._new(red[:, big.rows:])
def inverse_ADJ(self, iszerofunc=_iszero):
"""Calculates the inverse using the adjugate matrix and a determinant.
See Also
========
inv
inverse_LU
inverse_GE
"""
if not self.is_square:
raise NonSquareMatrixError("A Matrix must be square to invert.")
d = self.berkowitz_det()
zero = d.equals(0)
if zero is None:
# if equals() can't decide, will rref be able to?
ok = self.rref(simplify=True)[0]
zero = any(iszerofunc(ok[j, j]) for j in range(ok.rows))
if zero:
raise ValueError("Matrix det == 0; not invertible.")
return self.adjugate() / d
def rref(self, iszerofunc=_iszero, simplify=False):
"""Return reduced row-echelon form of matrix and indices of pivot vars.
To simplify elements before finding nonzero pivots set simplify=True
(to use the default SymPy simplify function) or pass a custom
simplify function.
Examples
========
>>> from sympy import Matrix
>>> from sympy.abc import x
>>> m = Matrix([[1, 2], [x, 1 - 1/x]])
>>> m.rref()
(Matrix([
[1, 0],
[0, 1]]), [0, 1])
"""
simpfunc = simplify if isinstance(
simplify, FunctionType) else _simplify
# pivot: index of next row to contain a pivot
pivot, r = 0, self.as_mutable()
# pivotlist: indices of pivot variables (non-free)
pivotlist = []
for i in range(r.cols):
if pivot == r.rows:
break
if simplify:
r[pivot, i] = simpfunc(r[pivot, i])
if iszerofunc(r[pivot, i]):
for k in range(pivot, r.rows):
if simplify and k > pivot:
r[k, i] = simpfunc(r[k, i])
if not iszerofunc(r[k, i]):
r.row_swap(pivot, k)
break
else:
continue
scale = r[pivot, i]
r.row_op(pivot, lambda x, _: x / scale)
for j in range(r.rows):
if j == pivot:
continue
scale = r[j, i]
r.zip_row_op(j, pivot, lambda x, y: x - scale*y)
pivotlist.append(i)
pivot += 1
return self._new(r), pivotlist
def rank(self, iszerofunc=_iszero, simplify=False):
"""
Returns the rank of a matrix
>>> from sympy import Matrix
>>> from sympy.abc import x
>>> m = Matrix([[1, 2], [x, 1 - 1/x]])
>>> m.rank()
2
>>> n = Matrix(3, 3, range(1, 10))
>>> n.rank()
2
"""
row_reduced = self.rref(iszerofunc=iszerofunc, simplify=simplify)
rank = len(row_reduced[-1])
return rank
def nullspace(self, simplify=False):
"""Returns list of vectors (Matrix objects) that span nullspace of self
Examples
========
>>> from sympy.matrices import Matrix
>>> m = Matrix(3, 3, [1, 3, 0, -2, -6, 0, 3, 9, 6])
>>> m
Matrix([
[ 1, 3, 0],
[-2, -6, 0],
[ 3, 9, 6]])
>>> m.nullspace()
[Matrix([
[-3],
[ 1],
[ 0]])]
See Also
========
columnspace
"""
from sympy.matrices import zeros
simpfunc = simplify if isinstance(
simplify, FunctionType) else _simplify
reduced, pivots = self.rref(simplify=simpfunc)
basis = []
# create a set of vectors for the basis
for i in range(self.cols - len(pivots)):
basis.append(zeros(self.cols, 1))
# contains the variable index to which the vector corresponds
basiskey, cur = [-1]*len(basis), 0
for i in range(self.cols):
if i not in pivots:
basiskey[cur] = i
cur += 1
for i in range(self.cols):
if i not in pivots: # free var, just set vector's ith place to 1
basis[basiskey.index(i)][i, 0] = 1
else: # add negative of nonpivot entry to corr vector
for j in range(i + 1, self.cols):
line = pivots.index(i)
v = reduced[line, j]
if simplify:
v = simpfunc(v)
if v:
if j in pivots:
# XXX: Is this the correct error?
raise NotImplementedError(
"Could not compute the nullspace of `self`.")
basis[basiskey.index(j)][i, 0] = -v
return [self._new(b) for b in basis]
def columnspace(self, simplify=False):
"""Returns list of vectors (Matrix objects) that span columnspace of self
Examples
========
>>> from sympy.matrices import Matrix
>>> m = Matrix(3, 3, [1, 3, 0, -2, -6, 0, 3, 9, 6])
>>> m
Matrix([
[ 1, 3, 0],
[-2, -6, 0],
[ 3, 9, 6]])
>>> m.columnspace()
[Matrix([
[ 1],
[-2],
[ 3]]), Matrix([
[0],
[0],
[6]])]
See Also
========
nullspace
"""
simpfunc = simplify if isinstance(
simplify, FunctionType) else _simplify
reduced, pivots = self.rref(simplify=simpfunc)
basis = []
# create a set of vectors for the basis
for i in range(self.cols):
if i in pivots:
basis.append(self.col(i))
return [self._new(b) for b in basis]
def berkowitz(self):
"""The Berkowitz algorithm.
Given N x N matrix with symbolic content, compute efficiently
coefficients of characteristic polynomials of 'self' and all
its square sub-matrices composed by removing both i-th row
and column, without division in the ground domain.
This method is particularly useful for computing determinant,
principal minors and characteristic polynomial, when 'self'
has complicated coefficients e.g. polynomials. Semi-direct
usage of this algorithm is also important in computing
efficiently sub-resultant PRS.
Assuming that M is a square matrix of dimension N x N and
I is N x N identity matrix, then the following following
definition of characteristic polynomial is begin used:
charpoly(M) = det(t*I - M)
As a consequence, all polynomials generated by Berkowitz
algorithm are monic.
>>> from sympy import Matrix
>>> from sympy.abc import x, y, z
>>> M = Matrix([[x, y, z], [1, 0, 0], [y, z, x]])
>>> p, q, r, s = M.berkowitz()
>>> p # 0 x 0 M's sub-matrix
(1,)
>>> q # 1 x 1 M's sub-matrix
(1, -x)
>>> r # 2 x 2 M's sub-matrix
(1, -x, -y)
>>> s # 3 x 3 M's sub-matrix
(1, -2*x, x**2 - y*z - y, x*y - z**2)
For more information on the implemented algorithm refer to:
[1] S.J. Berkowitz, On computing the determinant in small
parallel time using a small number of processors, ACM,
Information Processing Letters 18, 1984, pp. 147-150
[2] M. Keber, Division-Free computation of sub-resultants
using Bezout matrices, Tech. Report MPI-I-2006-1-006,
Saarbrucken, 2006
See Also
========
berkowitz_det
berkowitz_minors
berkowitz_charpoly
berkowitz_eigenvals
"""
from sympy.matrices import zeros
berk = ((1,),)
if not self:
return berk
if not self.is_square:
raise NonSquareMatrixError()
A, N = self, self.rows
transforms = [0]*(N - 1)
for n in range(N, 1, -1):
T, k = zeros(n + 1, n), n - 1
R, C = -A[k, :k], A[:k, k]
A, a = A[:k, :k], -A[k, k]
items = [C]
for i in range(0, n - 2):
items.append(A*items[i])
for i, B in enumerate(items):
items[i] = (R*B)[0, 0]
items = [S.One, a] + items
for i in range(n):
T[i:, i] = items[:n - i + 1]
transforms[k - 1] = T
polys = [self._new([S.One, -A[0, 0]])]
for i, T in enumerate(transforms):
polys.append(T*polys[i])
return berk + tuple(map(tuple, polys))
def berkowitz_det(self):
"""Computes determinant using Berkowitz method.
See Also
========
det
berkowitz
"""
if not self.is_square:
raise NonSquareMatrixError()
if not self:
return S.One
poly = self.berkowitz()[-1]
sign = (-1)**(len(poly) - 1)
return sign*poly[-1]
def berkowitz_minors(self):
"""Computes principal minors using Berkowitz method.
See Also
========
berkowitz
"""
sign, minors = S.One, []
for poly in self.berkowitz():
minors.append(sign*poly[-1])
sign = -sign
return tuple(minors)
def berkowitz_charpoly(self, x=Dummy('lambda'), simplify=_simplify):
"""Computes characteristic polynomial minors using Berkowitz method.
A PurePoly is returned so using different variables for ``x`` does
not affect the comparison or the polynomials:
Examples
========
>>> from sympy import Matrix
>>> from sympy.abc import x, y
>>> A = Matrix([[1, 3], [2, 0]])
>>> A.berkowitz_charpoly(x) == A.berkowitz_charpoly(y)
True
Specifying ``x`` is optional; a Dummy with name ``lambda`` is used by
default (which looks good when pretty-printed in unicode):
>>> A.berkowitz_charpoly().as_expr()
_lambda**2 - _lambda - 6
No test is done to see that ``x`` doesn't clash with an existing
symbol, so using the default (``lambda``) or your own Dummy symbol is
the safest option:
>>> A = Matrix([[1, 2], [x, 0]])
>>> A.charpoly().as_expr()
_lambda**2 - _lambda - 2*x
>>> A.charpoly(x).as_expr()
x**2 - 3*x
See Also
========
berkowitz
"""
return PurePoly(list(map(simplify, self.berkowitz()[-1])), x)
charpoly = berkowitz_charpoly
def berkowitz_eigenvals(self, **flags):
"""Computes eigenvalues of a Matrix using Berkowitz method.
See Also
========
berkowitz
"""
return roots(self.berkowitz_charpoly(Dummy('x')), **flags)
def eigenvals(self, **flags):
"""Return eigen values using the berkowitz_eigenvals routine.
Since the roots routine doesn't always work well with Floats,
they will be replaced with Rationals before calling that
routine. If this is not desired, set flag ``rational`` to False.
"""
# roots doesn't like Floats, so replace them with Rationals
# unless the nsimplify flag indicates that this has already
# been done, e.g. in eigenvects
mat = self
if not mat:
return {}
if flags.pop('rational', True):
if any(v.has(Float) for v in mat):
mat = mat._new(mat.rows, mat.cols,
[nsimplify(v, rational=True) for v in mat])
flags.pop('simplify', None) # pop unsupported flag
return mat.berkowitz_eigenvals(**flags)
def eigenvects(self, **flags):
"""Return list of triples (eigenval, multiplicity, basis).
The flag ``simplify`` has two effects:
1) if bool(simplify) is True, as_content_primitive()
will be used to tidy up normalization artifacts;
2) if nullspace needs simplification to compute the
basis, the simplify flag will be passed on to the
nullspace routine which will interpret it there.
If the matrix contains any Floats, they will be changed to Rationals
for computation purposes, but the answers will be returned after being
evaluated with evalf. If it is desired to removed small imaginary
portions during the evalf step, pass a value for the ``chop`` flag.
"""
from sympy.matrices import eye
simplify = flags.get('simplify', True)
primitive = bool(flags.get('simplify', False))
chop = flags.pop('chop', False)
flags.pop('multiple', None) # remove this if it's there
# roots doesn't like Floats, so replace them with Rationals
float = False
mat = self
if any(v.has(Float) for v in self):
float = True
mat = mat._new(mat.rows, mat.cols, [nsimplify(
v, rational=True) for v in mat])
flags['rational'] = False # to tell eigenvals not to do this
out, vlist = [], mat.eigenvals(**flags)
vlist = list(vlist.items())
vlist.sort(key=default_sort_key)
flags.pop('rational', None)
for r, k in vlist:
tmp = mat.as_mutable() - eye(mat.rows)*r
basis = tmp.nullspace()
# whether tmp.is_symbolic() is True or False, it is possible that
# the basis will come back as [] in which case simplification is
# necessary.
if not basis:
# The nullspace routine failed, try it again with simplification
basis = tmp.nullspace(simplify=simplify)
if not basis:
raise NotImplementedError(
"Can't evaluate eigenvector for eigenvalue %s" % r)
if primitive:
# the relationship A*e = lambda*e will still hold if we change the
# eigenvector; so if simplify is True we tidy up any normalization
# artifacts with as_content_primtive (default) and remove any pure Integer
# denominators.
l = 1
for i, b in enumerate(basis[0]):
c, p = signsimp(b).as_content_primitive()
if c is not S.One:
b = c*p
l = ilcm(l, c.q)
basis[0][i] = b
if l != 1:
basis[0] *= l
if float:
out.append((r.evalf(chop=chop), k, [
mat._new(b).evalf(chop=chop) for b in basis]))
else:
out.append((r, k, [mat._new(b) for b in basis]))
return out
def left_eigenvects(self, **flags):
"""Returns left eigenvectors and eigenvalues.
This function returns the list of triples (eigenval, multiplicity,
basis) for the left eigenvectors. Options are the same as for
eigenvects(), i.e. the ``**flags`` arguments gets passed directly to
eigenvects().
Examples
========
>>> from sympy import Matrix
>>> M = Matrix([[0, 1, 1], [1, 0, 0], [1, 1, 1]])
>>> M.eigenvects()
[(-1, 1, [Matrix([
[-1],
[ 1],
[ 0]])]), (0, 1, [Matrix([
[ 0],
[-1],
[ 1]])]), (2, 1, [Matrix([
[2/3],
[1/3],
[ 1]])])]
>>> M.left_eigenvects()
[(-1, 1, [Matrix([[-2, 1, 1]])]), (0, 1, [Matrix([[-1, -1, 1]])]), (2,
1, [Matrix([[1, 1, 1]])])]
"""
mat = self
left_transpose = mat.transpose().eigenvects(**flags)
left = []
for (ev, mult, ltmp) in left_transpose:
left.append( (ev, mult, [l.transpose() for l in ltmp]) )
return left
def singular_values(self):
"""Compute the singular values of a Matrix
Examples
========
>>> from sympy import Matrix, Symbol
>>> x = Symbol('x', real=True)
>>> A = Matrix([[0, 1, 0], [0, x, 0], [-1, 0, 0]])
>>> A.singular_values()
[sqrt(x**2 + 1), 1, 0]
See Also
========
condition_number
"""
mat = self.as_mutable()
# Compute eigenvalues of A.H A
valmultpairs = (mat.H*mat).eigenvals()
# Expands result from eigenvals into a simple list
vals = []
for k, v in valmultpairs.items():
vals += [sqrt(k)]*v # dangerous! same k in several spots!
# sort them in descending order
vals.sort(reverse=True, key=default_sort_key)
return vals
def condition_number(self):
"""Returns the condition number of a matrix.
This is the maximum singular value divided by the minimum singular value
Examples
========
>>> from sympy import Matrix, S
>>> A = Matrix([[1, 0, 0], [0, 10, 0], [0, 0, S.One/10]])
>>> A.condition_number()
100
See Also
========
singular_values
"""
if not self:
return S.Zero
singularvalues = self.singular_values()
return Max(*singularvalues) / Min(*singularvalues)
def __getattr__(self, attr):
if attr in ('diff', 'integrate', 'limit'):
def doit(*args):
item_doit = lambda item: getattr(item, attr)(*args)
return self.applyfunc(item_doit)
return doit
else:
raise AttributeError(
"%s has no attribute %s." % (self.__class__.__name__, attr))
def integrate(self, *args):
"""Integrate each element of the matrix.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy.abc import x, y
>>> M = Matrix([[x, y], [1, 0]])
>>> M.integrate((x, ))
Matrix([
[x**2/2, x*y],
[ x, 0]])
>>> M.integrate((x, 0, 2))
Matrix([
[2, 2*y],
[2, 0]])
See Also
========
limit
diff
"""
return self._new(self.rows, self.cols,
lambda i, j: self[i, j].integrate(*args))
def limit(self, *args):
"""Calculate the limit of each element in the matrix.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy.abc import x, y
>>> M = Matrix([[x, y], [1, 0]])
>>> M.limit(x, 2)
Matrix([
[2, y],
[1, 0]])
See Also
========
integrate
diff
"""
return self._new(self.rows, self.cols,
lambda i, j: self[i, j].limit(*args))
def diff(self, *args):
"""Calculate the derivative of each element in the matrix.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy.abc import x, y
>>> M = Matrix([[x, y], [1, 0]])
>>> M.diff(x)
Matrix([
[1, 0],
[0, 0]])
See Also
========
integrate
limit
"""
return self._new(self.rows, self.cols,
lambda i, j: self[i, j].diff(*args))
def vec(self):
"""Return the Matrix converted into a one column matrix by stacking columns
Examples
========
>>> from sympy import Matrix
>>> m=Matrix([[1, 3], [2, 4]])
>>> m
Matrix([
[1, 3],
[2, 4]])
>>> m.vec()
Matrix([
[1],
[2],
[3],
[4]])
See Also
========
vech
"""
return self.T.reshape(len(self), 1)
def vech(self, diagonal=True, check_symmetry=True):
"""Return the unique elements of a symmetric Matrix as a one column matrix
by stacking the elements in the lower triangle.
Arguments:
diagonal -- include the diagonal cells of self or not
check_symmetry -- checks symmetry of self but not completely reliably
Examples
========
>>> from sympy import Matrix
>>> m=Matrix([[1, 2], [2, 3]])
>>> m
Matrix([
[1, 2],
[2, 3]])
>>> m.vech()
Matrix([
[1],
[2],
[3]])
>>> m.vech(diagonal=False)
Matrix([[2]])
See Also
========
vec
"""
from sympy.matrices import zeros
c = self.cols
if c != self.rows:
raise ShapeError("Matrix must be square")
if check_symmetry:
self.simplify()
if self != self.transpose():
raise ValueError("Matrix appears to be asymmetric; consider check_symmetry=False")
count = 0
if diagonal:
v = zeros(c*(c + 1) // 2, 1)
for j in range(c):
for i in range(j, c):
v[count] = self[i, j]
count += 1
else:
v = zeros(c*(c - 1) // 2, 1)
for j in range(c):
for i in range(j + 1, c):
v[count] = self[i, j]
count += 1
return v
def get_diag_blocks(self):
"""Obtains the square sub-matrices on the main diagonal of a square matrix.
Useful for inverting symbolic matrices or solving systems of
linear equations which may be decoupled by having a block diagonal
structure.
Examples
========
>>> from sympy import Matrix
>>> from sympy.abc import x, y, z
>>> A = Matrix([[1, 3, 0, 0], [y, z*z, 0, 0], [0, 0, x, 0], [0, 0, 0, 0]])
>>> a1, a2, a3 = A.get_diag_blocks()
>>> a1
Matrix([
[1, 3],
[y, z**2]])
>>> a2
Matrix([[x]])
>>> a3
Matrix([[0]])
"""
sub_blocks = []
def recurse_sub_blocks(M):
i = 1
while i <= M.shape[0]:
if i == 1:
to_the_right = M[0, i:]
to_the_bottom = M[i:, 0]
else:
to_the_right = M[:i, i:]
to_the_bottom = M[i:, :i]
if any(to_the_right) or any(to_the_bottom):
i += 1
continue
else:
sub_blocks.append(M[:i, :i])
if M.shape == M[:i, :i].shape:
return
else:
recurse_sub_blocks(M[i:, i:])
return
recurse_sub_blocks(self)
return sub_blocks
def diagonalize(self, reals_only=False, sort=False, normalize=False):
"""
Return (P, D), where D is diagonal and
D = P^-1 * M * P
where M is current matrix.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(3, 3, [1, 2, 0, 0, 3, 0, 2, -4, 2])
>>> m
Matrix([
[1, 2, 0],
[0, 3, 0],
[2, -4, 2]])
>>> (P, D) = m.diagonalize()
>>> D
Matrix([
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> P
Matrix([
[-1, 0, -1],
[ 0, 0, -1],
[ 2, 1, 2]])
>>> P.inv() * m * P
Matrix([
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
See Also
========
is_diagonal
is_diagonalizable
"""
from sympy.matrices import diag
if not self.is_square:
raise NonSquareMatrixError()
if not self.is_diagonalizable(reals_only, False):
self._diagonalize_clear_subproducts()
raise MatrixError("Matrix is not diagonalizable")
else:
if self._eigenvects is None:
self._eigenvects = self.eigenvects(simplify=True)
if sort:
self._eigenvects.sort(key=default_sort_key)
self._eigenvects.reverse()
diagvals = []
P = self._new(self.rows, 0, [])
for eigenval, multiplicity, vects in self._eigenvects:
for k in range(multiplicity):
diagvals.append(eigenval)
vec = vects[k]
if normalize:
vec = vec / vec.norm()
P = P.col_insert(P.cols, vec)
D = diag(*diagvals)
self._diagonalize_clear_subproducts()
return (P, D)
def is_diagonalizable(self, reals_only=False, clear_subproducts=True):
"""Check if matrix is diagonalizable.
If reals_only==True then check that diagonalized matrix consists of the only not complex values.
Some subproducts could be used further in other methods to avoid double calculations,
By default (if clear_subproducts==True) they will be deleted.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(3, 3, [1, 2, 0, 0, 3, 0, 2, -4, 2])
>>> m
Matrix([
[1, 2, 0],
[0, 3, 0],
[2, -4, 2]])
>>> m.is_diagonalizable()
True
>>> m = Matrix(2, 2, [0, 1, 0, 0])
>>> m
Matrix([
[0, 1],
[0, 0]])
>>> m.is_diagonalizable()
False
>>> m = Matrix(2, 2, [0, 1, -1, 0])
>>> m
Matrix([
[ 0, 1],
[-1, 0]])
>>> m.is_diagonalizable()
True
>>> m.is_diagonalizable(True)
False
See Also
========
is_diagonal
diagonalize
"""
if not self.is_square:
return False
res = False
self._is_symbolic = self.is_symbolic()
self._is_symmetric = self.is_symmetric()
self._eigenvects = None
self._eigenvects = self.eigenvects(simplify=True)
all_iscorrect = True
for eigenval, multiplicity, vects in self._eigenvects:
if len(vects) != multiplicity:
all_iscorrect = False
break
elif reals_only and not eigenval.is_real:
all_iscorrect = False
break
res = all_iscorrect
if clear_subproducts:
self._diagonalize_clear_subproducts()
return res
def _diagonalize_clear_subproducts(self):
del self._is_symbolic
del self._is_symmetric
del self._eigenvects
def jordan_cell(self, eigenval, n):
n = int(n)
from sympy.matrices import MutableMatrix
out = MutableMatrix.zeros(n)
for i in range(n-1):
out[i, i] = eigenval
out[i, i+1] = 1
out[n-1, n-1] = eigenval
return type(self)(out)
def _jordan_block_structure(self):
# To every eigenvalue may belong `i` blocks with size s(i)
# and a chain of generalized eigenvectors
# which will be determined by the following computations:
# for every eigenvalue we will add a dictionary
# containing, for all blocks, the blocksizes and the attached chain vectors
# that will eventually be used to form the transformation P
jordan_block_structures = {}
_eigenvects = self.eigenvects()
ev = self.eigenvals()
if len(ev) == 0:
raise AttributeError("could not compute the eigenvalues")
for eigenval, multiplicity, vects in _eigenvects:
l_jordan_chains={}
geometrical = len(vects)
if geometrical == multiplicity:
# The Jordan chains have all length 1 and consist of only one vector
# which is the eigenvector of course
chains = []
for v in vects:
chain=[v]
chains.append(chain)
l_jordan_chains[1] = chains
jordan_block_structures[eigenval] = l_jordan_chains
elif geometrical == 0:
raise MatrixError("Matrix has the eigen vector with geometrical multiplicity equal zero.")
else:
# Up to now we know nothing about the sizes of the blocks of our Jordan matrix.
# Note that knowledge of algebraic and geometrical multiplicity
# will *NOT* be sufficient to determine this structure.
# The blocksize `s` could be defined as the minimal `k` where
# `kernel(self-lI)^k = kernel(self-lI)^(k+1)`
# The extreme case would be that k = (multiplicity-geometrical+1)
# but the blocks could be smaller.
# Consider for instance the following matrix
# [2 1 0 0]
# [0 2 1 0]
# [0 0 2 0]
# [0 0 0 2]
# which coincides with it own Jordan canonical form.
# It has only one eigenvalue l=2 of (algebraic) multiplicity=4.
# It has two eigenvectors, one belonging to the last row (blocksize 1)
# and one being the last part of a jordan chain of length 3 (blocksize of the first block).
# Note again that it is not not possible to obtain this from the algebraic and geometrical
# multiplicity alone. This only gives us an upper limit for the dimension of one of
# the subspaces (blocksize of according jordan block) given by
# max=(multiplicity-geometrical+1) which is reached for our matrix
# but not for
# [2 1 0 0]
# [0 2 0 0]
# [0 0 2 1]
# [0 0 0 2]
# although multiplicity=4 and geometrical=2 are the same for this matrix.
from sympy.matrices import MutableMatrix
I = MutableMatrix.eye(self.rows)
l = eigenval
M = (self-l*I)
# We will store the matrices `(self-l*I)^k` for further computations
# for convenience only we store `Ms[0]=(sefl-lI)^0=I`
# so the index is the same as the power for all further Ms entries
# We also store the vectors that span these kernels (Ns[0] = [])
# and also their dimensions `a_s`
# this is mainly done for debugging since the number of blocks of a given size
# can be computed from the a_s, in order to check our result which is obtained simpler
# by counting the number of Jordan chains for `a` given `s`
# `a_0` is `dim(Kernel(Ms[0]) = dim (Kernel(I)) = 0` since `I` is regular
l_jordan_chains={}
Ms = [I]
Ns = [[]]
a = [0]
smax = 0
M_new = Ms[-1]*M
Ns_new = M_new.nullspace()
a_new = len(Ns_new)
Ms.append(M_new)
Ns.append(Ns_new)
while a_new > a[-1]: # as long as the nullspaces increase compute further powers
a.append(a_new)
M_new = Ms[-1]*M
Ns_new = M_new.nullspace()
a_new=len(Ns_new)
Ms.append(M_new)
Ns.append(Ns_new)
smax += 1
# We now have `Ms[-1]=((self-l*I)**s)=Z=0`.
# We also know the size of the biggest Jordan block
# associated with `l` to be `s`.
# Now let us proceed with the computation of the associate part of the transformation matrix `P`.
# We already know the kernel (=nullspace) `K_l` of (self-lI) which consists of the
# eigenvectors belonging to eigenvalue `l`.
# The dimension of this space is the geometric multiplicity of eigenvalue `l`.
# For every eigenvector ev out of `K_l`, there exists a subspace that is
# spanned by the Jordan chain of ev. The dimension of this subspace is
# represented by the length `s` of the Jordan block.
# The chain itself is given by `{e_0,..,e_s-1}` where:
# `e_k+1 =(self-lI)e_k (*)`
# and
# `e_s-1=ev`
# So it would be possible to start with the already known `ev` and work backwards until one
# reaches `e_0`. Unfortunately this can not be done by simply solving system (*) since its matrix
# is singular (by definition of the eigenspaces).
# This approach would force us a choose in every step the degree of freedom undetermined
# by (*). This is difficult to implement with computer algebra systems and also quite inefficient.
# We therefore reformulate the problem in terms of nullspaces.
# To do so we start from the other end and choose `e0`'s out of
# `E=Kernel(self-lI)^s / Kernel(self-lI)^(s-1)`
# Note that `Kernel(self-lI)^s = Kernel(Z) = V` (the whole vector space).
# So in the first step `s=smax` this restriction turns out to actually restrict nothing at all
# and the only remaining condition is to choose vectors in `Kernel(self-lI)^(s-1)`.
# Subsequently we compute `e_1=(self-lI)e_0`, `e_2=(self-lI)*e_1` and so on.
# The subspace `E` can have a dimension larger than one.
# That means that we have more than one Jordan block of size `s` for the eigenvalue `l`
# and as many Jordan chains (this is the case in the second example).
# In this case we start as many Jordan chains and have as many blocks of size `s` in the jcf.
# We now have all the Jordan blocks of size `s` but there might be others attached to the same
# eigenvalue that are smaller.
# So we will do the same procedure also for `s-1` and so on until 1 (the lowest possible order
# where the Jordan chain is of length 1 and just represented by the eigenvector).
for s in reversed(range(1, smax+1)):
S = Ms[s]
# We want the vectors in `Kernel((self-lI)^s)`,
# but without those in `Kernel(self-lI)^s-1`
# so we will add their adjoints as additional equations
# to the system formed by `S` to get the orthogonal
# complement.
# (`S` will no longer be quadratic.)
exclude_vectors = Ns[s-1]
for k in range(0, a[s-1]):
S = S.col_join((exclude_vectors[k]).adjoint())
# We also want to exclude the vectors
# in the chains for the bigger blocks
# that we have already computed (if there are any).
# (That is why we start with the biggest s).
# Since Jordan blocks are not orthogonal in general
# (in the original space), only those chain vectors
# that are on level s (index `s-1` in a chain)
# are added.
for chain_list in l_jordan_chains.values():
for chain in chain_list:
S = S.col_join(chain[s-1].adjoint())
e0s = S.nullspace()
# Determine the number of chain leaders
# for blocks of size `s`.
n_e0 = len(e0s)
s_chains = []
# s_cells=[]
for i in range(0, n_e0):
chain=[e0s[i]]
for k in range(1, s):
v = M*chain[k-1]
chain.append(v)
# We want the chain leader appear as the last of the block.
chain.reverse()
s_chains.append(chain)
l_jordan_chains[s] = s_chains
jordan_block_structures[eigenval] = l_jordan_chains
return jordan_block_structures
def jordan_form(self, calc_transformation=True):
r"""Return Jordan form J of current matrix.
Also the transformation P such that
`J = P^{-1} \cdot M \cdot P`
and the jordan blocks forming J
will be calculated.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix([
... [ 6, 5, -2, -3],
... [-3, -1, 3, 3],
... [ 2, 1, -2, -3],
... [-1, 1, 5, 5]])
>>> P, J = m.jordan_form()
>>> J
Matrix([
[2, 1, 0, 0],
[0, 2, 0, 0],
[0, 0, 2, 1],
[0, 0, 0, 2]])
See Also
========
jordan_cells
"""
P, Jcells = self.jordan_cells()
from sympy.matrices import diag
J = diag(*Jcells)
return P, type(self)(J)
def jordan_cells(self, calc_transformation=True):
r"""Return a list of Jordan cells of current matrix.
This list shape Jordan matrix J.
If calc_transformation is specified as False, then transformation P such that
`J = P^{-1} \cdot M \cdot P`
will not be calculated.
Notes
=====
Calculation of transformation P is not implemented yet.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(4, 4, [
... 6, 5, -2, -3,
... -3, -1, 3, 3,
... 2, 1, -2, -3,
... -1, 1, 5, 5])
>>> P, Jcells = m.jordan_cells()
>>> Jcells[0]
Matrix([
[2, 1],
[0, 2]])
>>> Jcells[1]
Matrix([
[2, 1],
[0, 2]])
See Also
========
jordan_form
"""
n = self.rows
Jcells = []
Pcols_new = []
jordan_block_structures = self._jordan_block_structure()
from sympy.matrices import MutableMatrix
# Order according to default_sort_key, this makes sure the order is the same as in .diagonalize():
for eigenval in (sorted(list(jordan_block_structures.keys()), key=default_sort_key)):
l_jordan_chains = jordan_block_structures[eigenval]
for s in reversed(sorted((l_jordan_chains).keys())): # Start with the biggest block
s_chains = l_jordan_chains[s]
block = self.jordan_cell(eigenval, s)
number_of_s_chains=len(s_chains)
for i in range(0, number_of_s_chains):
Jcells.append(type(self)(block))
chain_vectors = s_chains[i]
lc = len(chain_vectors)
assert lc == s
for j in range(0, lc):
generalized_eigen_vector = chain_vectors[j]
Pcols_new.append(generalized_eigen_vector)
P = MutableMatrix.zeros(n)
for j in range(0, n):
P[:, j] = Pcols_new[j]
return type(self)(P), Jcells
def _jordan_split(self, algebraical, geometrical):
"""Return a list of integers with sum equal to 'algebraical'
and length equal to 'geometrical'"""
n1 = algebraical // geometrical
res = [n1]*geometrical
res[len(res) - 1] += algebraical % geometrical
assert sum(res) == algebraical
return res
def has(self, *patterns):
"""Test whether any subexpression matches any of the patterns.
Examples
========
>>> from sympy import Matrix, Float
>>> from sympy.abc import x, y
>>> A = Matrix(((1, x), (0.2, 3)))
>>> A.has(x)
True
>>> A.has(y)
False
>>> A.has(Float)
True
"""
return any(a.has(*patterns) for a in self._mat)
def dual(self):
"""Returns the dual of a matrix, which is:
`(1/2)*levicivita(i, j, k, l)*M(k, l)` summed over indices `k` and `l`
Since the levicivita method is anti_symmetric for any pairwise
exchange of indices, the dual of a symmetric matrix is the zero
matrix. Strictly speaking the dual defined here assumes that the
'matrix' `M` is a contravariant anti_symmetric second rank tensor,
so that the dual is a covariant second rank tensor.
"""
from sympy import LeviCivita
from sympy.matrices import zeros
M, n = self[:, :], self.rows
work = zeros(n)
if self.is_symmetric():
return work
for i in range(1, n):
for j in range(1, n):
acum = 0
for k in range(1, n):
acum += LeviCivita(i, j, 0, k)*M[0, k]
work[i, j] = acum
work[j, i] = -acum
for l in range(1, n):
acum = 0
for a in range(1, n):
for b in range(1, n):
acum += LeviCivita(0, l, a, b)*M[a, b]
acum /= 2
work[0, l] = -acum
work[l, 0] = acum
return work
@classmethod
def hstack(cls, *args):
"""Return a matrix formed by joining args horizontally (i.e.
by repeated application of row_join).
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> Matrix.hstack(eye(2), 2*eye(2))
Matrix([
[1, 0, 2, 0],
[0, 1, 0, 2]])
"""
return reduce(cls.row_join, args)
@classmethod
def vstack(cls, *args):
"""Return a matrix formed by joining args vertically (i.e.
by repeated application of col_join).
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> Matrix.vstack(eye(2), 2*eye(2))
Matrix([
[1, 0],
[0, 1],
[2, 0],
[0, 2]])
"""
return reduce(cls.col_join, args)
def row_join(self, rhs):
"""Concatenates two matrices along self's last and rhs's first column
Examples
========
>>> from sympy import zeros, ones
>>> M = zeros(3)
>>> V = ones(3, 1)
>>> M.row_join(V)
Matrix([
[0, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 0, 1]])
See Also
========
row
col_join
"""
from sympy.matrices import MutableMatrix
# Allows you to build a matrix even if it is null matrix
if not self:
return type(self)(rhs)
if self.rows != rhs.rows:
raise ShapeError(
"`self` and `rhs` must have the same number of rows.")
newmat = MutableMatrix.zeros(self.rows, self.cols + rhs.cols)
newmat[:, :self.cols] = self
newmat[:, self.cols:] = rhs
return type(self)(newmat)
def col_join(self, bott):
"""Concatenates two matrices along self's last and bott's first row
Examples
========
>>> from sympy import zeros, ones
>>> M = zeros(3)
>>> V = ones(1, 3)
>>> M.col_join(V)
Matrix([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[1, 1, 1]])
See Also
========
col
row_join
"""
from sympy.matrices import MutableMatrix
# Allows you to build a matrix even if it is null matrix
if not self:
return type(self)(bott)
if self.cols != bott.cols:
raise ShapeError(
"`self` and `bott` must have the same number of columns.")
newmat = MutableMatrix.zeros(self.rows + bott.rows, self.cols)
newmat[:self.rows, :] = self
newmat[self.rows:, :] = bott
return type(self)(newmat)
def row_insert(self, pos, mti):
"""Insert one or more rows at the given row position.
Examples
========
>>> from sympy import zeros, ones
>>> M = zeros(3)
>>> V = ones(1, 3)
>>> M.row_insert(1, V)
Matrix([
[0, 0, 0],
[1, 1, 1],
[0, 0, 0],
[0, 0, 0]])
See Also
========
row
col_insert
"""
from sympy.matrices import MutableMatrix
# Allows you to build a matrix even if it is null matrix
if not self:
return type(self)(mti)
if pos == 0:
return mti.col_join(self)
elif pos < 0:
pos = self.rows + pos
if pos < 0:
pos = 0
elif pos > self.rows:
pos = self.rows
if self.cols != mti.cols:
raise ShapeError(
"`self` and `mti` must have the same number of columns.")
newmat = self.zeros(self.rows + mti.rows, self.cols)
i, j = pos, pos + mti.rows
newmat[:i, :] = self[:i, :]
newmat[i: j, :] = mti
newmat[j:, :] = self[i:, :]
return newmat
def col_insert(self, pos, mti):
"""Insert one or more columns at the given column position.
Examples
========
>>> from sympy import zeros, ones
>>> M = zeros(3)
>>> V = ones(3, 1)
>>> M.col_insert(1, V)
Matrix([
[0, 1, 0, 0],
[0, 1, 0, 0],
[0, 1, 0, 0]])
See Also
========
col
row_insert
"""
from sympy.matrices import MutableMatrix
# Allows you to build a matrix even if it is null matrix
if not self:
return type(self)(mti)
if pos == 0:
return mti.row_join(self)
elif pos < 0:
pos = self.cols + pos
if pos < 0:
pos = 0
elif pos > self.cols:
pos = self.cols
if self.rows != mti.rows:
raise ShapeError("self and mti must have the same number of rows.")
newmat = MutableMatrix.zeros(self.rows, self.cols + mti.cols)
i, j = pos, pos + mti.cols
newmat[:, :i] = self[:, :i]
newmat[:, i:j] = mti
newmat[:, j:] = self[:, i:]
return type(self)(newmat)
def replace(self, F, G, map=False):
"""Replaces Function F in Matrix entries with Function G.
Examples
========
>>> from sympy import symbols, Function, Matrix
>>> F, G = symbols('F, G', cls=Function)
>>> M = Matrix(2, 2, lambda i, j: F(i+j)) ; M
Matrix([
[F(0), F(1)],
[F(1), F(2)]])
>>> N = M.replace(F,G)
>>> N
Matrix([
[G(0), G(1)],
[G(1), G(2)]])
"""
M = self[:, :]
return M.applyfunc(lambda x: x.replace(F, G, map))
def pinv(self):
"""Calculate the Moore-Penrose pseudoinverse of the matrix.
The Moore-Penrose pseudoinverse exists and is unique for any matrix.
If the matrix is invertible, the pseudoinverse is the same as the
inverse.
Examples
========
>>> from sympy import Matrix
>>> Matrix([[1, 2, 3], [4, 5, 6]]).pinv()
Matrix([
[-17/18, 4/9],
[ -1/9, 1/9],
[ 13/18, -2/9]])
See Also
========
inv
pinv_solve
References
==========
.. [1] https://en.wikipedia.org/wiki/Moore-Penrose_pseudoinverse
"""
A = self
AH = self.H
# Trivial case: pseudoinverse of all-zero matrix is its transpose.
if A.is_zero:
return AH
try:
if self.rows >= self.cols:
return (AH * A).inv() * AH
else:
return AH * (A * AH).inv()
except ValueError:
# Matrix is not full rank, so A*AH cannot be inverted.
raise NotImplementedError('Rank-deficient matrices are not yet '
'supported.')
def pinv_solve(self, B, arbitrary_matrix=None):
"""Solve Ax = B using the Moore-Penrose pseudoinverse.
There may be zero, one, or infinite solutions. If one solution
exists, it will be returned. If infinite solutions exist, one will
be returned based on the value of arbitrary_matrix. If no solutions
exist, the least-squares solution is returned.
Parameters
==========
B : Matrix
The right hand side of the equation to be solved for. Must have
the same number of rows as matrix A.
arbitrary_matrix : Matrix
If the system is underdetermined (e.g. A has more columns than
rows), infinite solutions are possible, in terms of an arbitrary
matrix. This parameter may be set to a specific matrix to use
for that purpose; if so, it must be the same shape as x, with as
many rows as matrix A has columns, and as many columns as matrix
B. If left as None, an appropriate matrix containing dummy
symbols in the form of ``wn_m`` will be used, with n and m being
row and column position of each symbol.
Returns
=======
x : Matrix
The matrix that will satisfy Ax = B. Will have as many rows as
matrix A has columns, and as many columns as matrix B.
Examples
========
>>> from sympy import Matrix
>>> A = Matrix([[1, 2, 3], [4, 5, 6]])
>>> B = Matrix([7, 8])
>>> A.pinv_solve(B)
Matrix([
[ _w0_0/6 - _w1_0/3 + _w2_0/6 - 55/18],
[-_w0_0/3 + 2*_w1_0/3 - _w2_0/3 + 1/9],
[ _w0_0/6 - _w1_0/3 + _w2_0/6 + 59/18]])
>>> A.pinv_solve(B, arbitrary_matrix=Matrix([0, 0, 0]))
Matrix([
[-55/18],
[ 1/9],
[ 59/18]])
See Also
========
lower_triangular_solve
upper_triangular_solve
gauss_jordan_solve
cholesky_solve
diagonal_solve
LDLsolve
LUsolve
QRsolve
pinv
Notes
=====
This may return either exact solutions or least squares solutions.
To determine which, check ``A * A.pinv() * B == B``. It will be
True if exact solutions exist, and False if only a least-squares
solution exists. Be aware that the left hand side of that equation
may need to be simplified to correctly compare to the right hand
side.
References
==========
.. [1] https://en.wikipedia.org/wiki/Moore-Penrose_pseudoinverse#Obtaining_all_solutions_of_a_linear_system
"""
from sympy.matrices import eye
A = self
A_pinv = self.pinv()
if arbitrary_matrix is None:
rows, cols = A.cols, B.cols
w = symbols('w:{0}_:{1}'.format(rows, cols), cls=Dummy)
arbitrary_matrix = self.__class__(cols, rows, w).T
return A_pinv * B + (eye(A.cols) - A_pinv*A) * arbitrary_matrix
def gauss_jordan_solve(self, b, freevar=False):
"""
Solves Ax = b using Gauss Jordan elimination.
There may be zero, one, or infinite solutions. If one solution
exists, it will be returned. If infinite solutions exist, it will
be returned parametrically. If no solutions exist, It will throw
ValueError.
Parameters
==========
b : Matrix
The right hand side of the equation to be solved for. Must have
the same number of rows as matrix A.
freevar : List
If the system is underdetermined (e.g. A has more columns than
rows), infinite solutions are possible, in terms of an arbitrary
values of free variables. Then the index of the free variables
in the solutions (column Matrix) will be returned by freevar, if
the flag `freevar` is set to `True`.
Returns
=======
x : Matrix
The matrix that will satisfy Ax = B. Will have as many rows as
matrix A has columns, and as many columns as matrix B.
params : Matrix
If the system is underdetermined (e.g. A has more columns than
rows), infinite solutions are possible, in terms of an arbitrary
parameters. These arbitrary parameters are returned as params
Matrix.
Examples
========
>>> from sympy import Matrix
>>> A = Matrix([[1, 2, 1, 1], [1, 2, 2, -1], [2, 4, 0, 6]])
>>> b = Matrix([7, 12, 4])
>>> sol, params = A.gauss_jordan_solve(b)
>>> sol
Matrix([
[-2*_tau0 - 3*_tau1 + 2],
[ _tau0],
[ 2*_tau1 + 5],
[ _tau1]])
>>> params
Matrix([
[_tau0],
[_tau1]])
>>> A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
>>> b = Matrix([3, 6, 9])
>>> sol, params = A.gauss_jordan_solve(b)
>>> sol
Matrix([
[-1],
[ 2],
[ 0]])
>>> params
Matrix(0, 1, [])
See Also
========
lower_triangular_solve
upper_triangular_solve
cholesky_solve
diagonal_solve
LDLsolve
LUsolve
QRsolve
pinv
References
==========
.. [1] http://en.wikipedia.org/wiki/Gaussian_elimination
"""
from sympy.matrices import Matrix, zeros
aug = self.hstack(self.copy(), b.copy())
row, col = aug[:, :-1].shape
# solve by reduced row echelon form
A, pivots = aug.rref(simplify=True)
A, v = A[:, :-1], A[:, -1]
pivots = list(filter(lambda p: p < col, pivots))
rank = len(pivots)
# Bring to block form
permutation = Matrix(range(col)).T
A = A.vstack(A, permutation)
for i, c in enumerate(pivots):
A.col_swap(i, c)
A, permutation = A[:-1, :], A[-1, :]
# check for existence of solutions
# rank of aug Matrix should be equal to rank of coefficient matrix
if not v[rank:, 0].is_zero:
raise ValueError("Linear system has no solution")
# Get index of free symbols (free parameters)
free_var_index = permutation[len(pivots):] # non-pivots columns are free variables
# Free parameters
dummygen = numbered_symbols("tau", Dummy)
tau = Matrix([next(dummygen) for k in range(col - rank)]).reshape(col - rank, 1)
# Full parametric solution
V = A[:rank, rank:]
vt = v[:rank, 0]
free_sol = tau.vstack(vt - V*tau, tau)
# Undo permutation
sol = zeros(col, 1)
for k, v in enumerate(free_sol):
sol[permutation[k], 0] = v
if freevar:
return sol, tau, free_var_index
else:
return sol, tau
def classof(A, B):
"""
Get the type of the result when combining matrices of different types.
Currently the strategy is that immutability is contagious.
Examples
========
>>> from sympy import Matrix, ImmutableMatrix
>>> from sympy.matrices.matrices import classof
>>> M = Matrix([[1, 2], [3, 4]]) # a Mutable Matrix
>>> IM = ImmutableMatrix([[1, 2], [3, 4]])
>>> classof(M, IM)
<class 'sympy.matrices.immutable.ImmutableMatrix'>
"""
try:
if A._class_priority > B._class_priority:
return A.__class__
else:
return B.__class__
except Exception:
pass
try:
import numpy
if isinstance(A, numpy.ndarray):
return B.__class__
if isinstance(B, numpy.ndarray):
return A.__class__
except Exception:
pass
raise TypeError("Incompatible classes %s, %s" % (A.__class__, B.__class__))
def a2idx(j, n=None):
"""Return integer after making positive and validating against n."""
if type(j) is not int:
try:
j = j.__index__()
except AttributeError:
raise IndexError("Invalid index a[%r]" % (j, ))
if n is not None:
if j < 0:
j += n
if not (j >= 0 and j < n):
raise IndexError("Index out of range: a[%s]" % (j, ))
return int(j)
|
ChristinaZografou/sympy
|
sympy/matrices/matrices.py
|
Python
|
bsd-3-clause
| 136,380
|
[
"DIRAC",
"Gaussian"
] |
bf8c50fa36cbcbcd78d6b99fe757652c27a4cc18da78f43dd7ebe5e3b9bc0c5c
|
# ################################################################
#
# Active Particles on Curved Spaces (APCS)
#
# Author: Silke Henkes
#
# ICSMB, Department of Physics
# University of Aberdeen
#
# Author: Rastko Sknepnek
#
# Division of Physics
# School of Engineering, Physics and Mathematics
# University of Dundee
#
# (c) 2013, 2014
#
# This program cannot be used, copied, or modified without
# explicit permission of the author.
#
# ################################################################
# Utility code for computing potential energy profile averaged in the
# azimuthal direction
from read_data import *
from op import *
#from inertia import *
from glob import glob
from datetime import *
from random import uniform
from math import *
import numpy as np
import argparse
import scipy.spatial.distance as sd
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
#from matplotlib import rc
import matplotlib
from mpl_toolkits.mplot3d import Axes3D
import vtk
# setting global parameters
#matplotlib.rcParams['text.usetex'] = 'true'
matplotlib.rcParams['lines.linewidth'] = 2
matplotlib.rcParams['axes.linewidth'] = 2
matplotlib.rcParams['xtick.major.size'] = 8
matplotlib.rcParams['ytick.major.size'] = 8
#matplotlib.rcParams['font.size']=40.0
#matplotlib.rcParams['legend.fontsize']=22.0
matplotlib.rcParams['font.size']=28
matplotlib.rcParams['legend.fontsize']=14
cdict = {'red': [(0.0, 0.25, 0.25),
(0.3, 1.0, 1.0),
(0.5, 0.4, 0.0),
(1.0, 0.0, 0.0)],
'green': [(0.0, 0.0, 0.0),
(0.25, 0.0, 0.5),
(0.5, 1.0, 1.0),
(0.75, 0.5, 0.0),
(1.0, 0.0, 0.0)],
'blue': [(0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(0.7, 1.0, 1.0),
(1.0, 0.25, 0.25)]}
RMAX=1.0
makeEdges=True
class Configuration:
def __init__(self,param,geometry,interaction,foldername,base,snap,verbose):
self.verbose=verbose0
self.geometry=geometry
self.interaction=interaction
print "Geometry of simulation = " + geometry
print "Interaction of simulation = " + interaction
self.f=foldername + base + snap + '.dat'
print "Processing file : ", self.f
self.data = ReadData(self.f)
#if writeVTK:
##outname = '.'.join((f).split('.')[:-1]) + '_data.vtk'
#print outname
#writeConfigurationVTK(data,outname)
# get the data out of the files
x, y, z = np.array(data.data[data.keys['x']]), np.array(data.data[data.keys['y']]), np.array(data.data[data.keys['z']])
vx, vy, vz = np.array(data.data[data.keys['vx']]), np.array(data.data[data.keys['vy']]), np.array(data.data[data.keys['vz']])
nx, ny, nz = np.array(data.data[data.keys['nx']]), np.array(data.data[data.keys['ny']]), np.array(data.data[data.keys['nz']])
self.rval = np.column_stack((x,y,z))
self.vval = np.column_stack((vx,vy,vz))
self.nval = np.column_stack((nx,ny,nz))
# To be very, very sure that it is exactly normalized
self.nval=((nval).transpose()/(np.sqrt(np.sum(nval**2,axis=1))).transpose()).transpose()
# Getting the local coordinate system
self.rhat=((rval).transpose()/(np.sqrt(np.sum(rval**2,axis=1))).transpose()).transpose()
self.vhat=((vval).transpose()/(np.sqrt(np.sum(vval**2,axis=1))).transpose()).transpose()
def rotate_matrix_vectorial(axis,theta):
axlen=np.sqrt(axis[:,0]**2+axis[:,1]**2+axis[:,2]**2)
#print axlen
axis[:,0]=axis[:,0]/axlen
axis[:,1]=axis[:,1]/axlen
axis[:,2]=axis[:,2]/axlen
a=np.cos(theta/2)
b=-axis[:,0]*np.sin(theta/2)
c=-axis[:,1]*np.sin(theta/2)
d=-axis[:,2]*np.sin(theta/2)
rotmat=np.empty((len(axis[:,0]),3,3))
rotmat[:,0,0]=a*a+b*b-c*c-d*d
rotmat[:,0,1]=2*(b*c-a*d)
rotmat[:,0,2]=2*(b*d+a*c)
rotmat[:,1,0]=2*(b*c+a*d)
rotmat[:,1,1]=a*a+c*c-b*b-d*d
rotmat[:,1,2]=2*(c*d-a*b)
rotmat[:,2,0]=2*(b*d-a*c)
rotmat[:,2,1]=2*(c*d+a*b)
rotmat[:,2,2]=a*a+d*d-b*b-c*c
return rotmat
def rotate_vectorial(v,n,phi):
vrot=np.empty(np.shape(v))
np.shape(vrot)
rotmat=rotate_matrix_vectorial(n,phi)
np.shape(rotmat)
vrot[:,0]=rotmat[:,0,0]*v[:,0]+rotmat[:,0,1]*v[:,1]+rotmat[:,0,2]*v[:,2]
vrot[:,1]=rotmat[:,1,0]*v[:,0]+rotmat[:,1,1]*v[:,1]+rotmat[:,1,2]*v[:,2]
vrot[:,2]=rotmat[:,2,0]*v[:,0]+rotmat[:,2,1]*v[:,1]+rotmat[:,2,2]*v[:,2]
return vrot
# Fully vectorial version of parallel transport
# 1.determine the cross product of the origins
# 2.compute the magnitude of all the origin and cross vectors
# 3.Compute the dot product of the origins
# 4.The rotation axis is the direction of the cross product
# 5.The rotation angle is the angle between the origin vectors, extracted from the dot product
def parallel_transport(r1,r2,a1,a2):
r2_x_r1=np.cross(r2,r1)
#len_r2_x_r1=np.sqrt(r2_x_r1[:,0]**2+r2_x_r1[:,1]**2+r2_x_r1[:,2]**2)
len_r2_x_r1=np.sqrt(np.sum(r2_x_r1**2,axis=1))
#lenr1=np.sqrt(r1[:,0]**2+r1[:,1]**2+r1[:,2]**2)
lenr1=np.sqrt(np.sum(r1**2,axis=1))
#lenr2=np.sqrt(r2[:,0]**2+r2[:,1]**2+r2[:,2]**2)
lenr2=np.sqrt(np.sum(r2**2,axis=1))
dot_r1r2=r1[:,0]*r2[:,0]+r1[:,1]*r2[:,1]+r1[:,2]*r2[:,2]
n=np.empty(np.shape(r1))
n = r2_x_r1/len_r2_x_r1
#n[:,0] = r2_x_r1[:,0]/len_r2_x_r1
#n[:,1] = r2_x_r1[:,1]/len_r2_x_r1
#n[:,2] = r2_x_r1[:,2]/len_r2_x_r1
phi = np.arccos(dot_r1r2/(lenr1*lenr2))
a2trans=rotate_vectorial(a2,n,-phi)
return a2trans
# same thing for one vector and a set (i.e. a particle and its neigbours)
def parallel_transport_single(r1,r2,a2):
r2_x_r1=np.cross(r2,r1)
len_r2_x_r1=np.sqrt(np.sum(r2_x_r1**2,axis=1))
lenr1=np.sqrt(np.sum(r1**2,axis=1))
lenr2=np.sqrt(np.sum(r2**2,axis=1))
dot_r1r2=np.dot(r1,r2)
n=np.empty(np.shape(r1))
n = r2_x_r1/len_r2_x_r1
phi = np.arccos(dot_r1r2/(lenr1*lenr2))
a2trans=rotate_vectorial(a2,n,-phi)
return a2trans
# Argh. Ad hoc: here is the morse potential
# V = D*(1-np.exp(-a*(r-re)))**2
# F = 2aD exp(-a(r-re))*(1-exp(-a(r-re)))
# pair_potential morse { D = 0.2; a = 3.0; re = 2.0;
def compute_energy_and_pressure(r,k,sigma):
eng = np.zeros(len(r))
press = np.zeros(len(r))
stress = np.zeros((len(r),3,3))
Interaction='morse'
if Interaction=='harmonic':
#dist = sd.cdist(r,r)
dmax=4*sigma**2
for i in range(len(r)):
#for i in range(10):
dist=np.sum((r-r[i,:])**2,axis=1)
neighbours=[index for index,value in enumerate(dist) if value <dmax]
neighbours.remove(i)
dr=np.sqrt(dist[neighbours])
diff=2.0-dr
fact = 0.5*k*diff
eng_val = fact*diff
press_val = fact*dr
# Stress (force moment) has to be element by element) r_a F_b = -k r_a dist_b
drvec=r[neighbours,:]-r[i,:]
Fvec=k*((diff/dr).transpose()*(drvec).transpose()).transpose()
for u in range(3):
for v in range(3):
stress[neighbours,u,v]+=0.5*drvec[:,u]*Fvec[:,v]
eng[neighbours]+=eng_val
press[neighbours]+=press_val
else:
# We are morse by hand right now ...
D=0.2
re=2.0
a=3.0
dmax=8*sigma**2
for i in range(len(r)):
#for i in range(10):
dist=np.sum((r-r[i,:])**2,axis=1)
neighbours=[index for index,value in enumerate(dist) if value <dmax]
neighbours.remove(i)
dr=np.sqrt(dist[neighbours])
eng_val=D*(1-np.exp(-a*(dr-re)))**2
fnorm=-2*a*D*np.exp(-a*(dr-re))*(1-np.exp(-a*(dr-re)))
drvec=r[neighbours,:]-r[i,:]
Fvec=((fnorm/dr).transpose()*(drvec).transpose()).transpose()
press_val=fnorm*dr
for u in range(3):
for v in range(3):
stress[neighbours,u,v]+=0.5*drvec[:,u]*Fvec[:,v]
eng[neighbours]+=eng_val
press[neighbours]+=press_val
return [eng, press, stress]
def findLoop(rval,sigma,etheta,ephi,dmax):
neighList=[]
Ival=[]
Jval=[]
Inei=[]
count=0
# Identify all neighbours and add them to a list. Keep i->j and j->i separate
# The label is in neighList, the particle numbers are in Ival and Jval
for i in range(len(rval)):
dist=np.sum((rval-rval[i,:])**2,axis=1)
neighbours=[index for index,value in enumerate(dist) if value <dmax]
neighbours.remove(i)
neighList.extend([u for u in range(count,count+len(neighbours))])
Ival.extend([i for k in range(len(neighbours))])
Jval.extend(neighbours)
Inei.append([u for u in range(count,count+len(neighbours))])
count+=len(neighbours)
# Identify loops based on the neighbour list. Kick out any (one-way) contacts that have occured so far
Jarray=np.array(Jval)
LoopList=[]
# The dual: which loops belong to which particle
ParList=[[] for k in range(len(rval))]
LoopCen=[]
l=0
while len(neighList)>0:
idx=neighList[0]
idxkeep=idx
#print idx
idx0=[]
#llist0=[]
llist=[]
goneround=False
while goneround==False:
# Sort neighbours counterclockwise according to their local angle
dr0hat=rval[Jval[idx],:]-rval[Ival[idx],:]
dr0hat/=np.sqrt(np.sum(dr0hat**2))
jnei0=Inei[Jval[idx]]
jnei=list(Jarray[jnei0])
drvec=rval[jnei,:]-rval[Jval[idx],:]
drhat=((drvec).transpose()/(np.sqrt(np.sum(drvec**2,axis=1))).transpose()).transpose()
cbeta=np.einsum('kj,j->k',drhat,ephi[Jval[idx],:])
sbeta=np.einsum('kj,j->k',drhat,etheta[Jval[idx],:])
cbeta0=np.dot(dr0hat,ephi[Jval[idx],:])
sbeta0=np.dot(dr0hat,etheta[Jval[idx],:])
# arccos returns between 0 and pi. Just multiply by the sign of the sine
beta=np.arccos(cbeta)*np.sign(sbeta)
# Determine the angles from the contact (read backwards) to the others, and pick the largest, modulo 2pi
beta0=np.arccos(cbeta0)*np.sign(sbeta0)-np.pi
dbeta=beta-beta0
dbeta-=2*np.pi*np.round((dbeta-np.pi)/(2*np.pi))
# and throwing out the particle itself
itself=jnei.index(Ival[idx])
dbeta[itself]=-1
cnt=np.argmax(dbeta)
idx=jnei0[cnt]
goneround = idx in idx0
if goneround==False:
idx0.append(idx)
llist.append(Jarray[idx])
ParList[Jarray[idx]].append(l)
#print idx0
#print llist
#print len(neighList)
for v in idx0:
try:
neighList.remove(v)
except ValueError:
pass
# There may be rare isolated cases (rattlers?) where the first contact itself is not part of the eventual loop.
# This causes problems, because the loop identified after that has been removed.
# Remove the original contact, in case it hasn't
try:
#print idxkeep
neighList.remove(idxkeep)
except ValueError:
pass
looppos=rval[llist]
LoopCen.append([np.mean(looppos[:,0]), np.mean(looppos[:,1]),np.mean(looppos[:,2])])
LoopList.append(llist)
l+=1
# Much prettier: a loop that is too big (as measured by the mean square distance of the distances to the particles)
# Deconstruct it into lots of little loops (virtual ones), with defined centers
if makeEdges:
for l0 in range(len(LoopList)):
llist=LoopList[l0]
looppos=rval[llist]
dlvec=looppos-LoopCen[l0]
isLong=np.sqrt(np.sum(np.sum(dlvec**2,axis=1)))/len(llist)
if len(llist)>5:
print llist
print isLong
if isLong>RMAX:
print "Loop " + str(l0) + " with particles " + str(llist) + " is too big! "
for k in range(len(llist)):
kside=k-1
if kside<0:
kside=len(llist)-1
# Attempting to catch the inward pointing loops: the have to be global boundary ~sqrt(N)
if len(llist)<0.5*np.sqrt(len(rval)):
newcen=0.5*(rval[llist[k]]+rval[llist[kside]])-sigma*dlvec[k,:]/np.sqrt(np.sum(dlvec[k,:]**2))
else:
newcen=0.5*(rval[llist[k]]+rval[llist[kside]])+sigma*dlvec[k,:]/np.sqrt(np.sum(dlvec[k,:]**2))
LoopCen.append(newcen)
try:
ParList[llist[k]].remove(l0)
except ValueError:
pass
ParList[llist[k]].append(l)
try:
ParList[llist[kside]].remove(l0)
except ValueError:
pass
ParList[llist[kside]].append(l)
l+=1
LoopCen1=np.array(LoopCen)
# While we are at it, we can construct the dual tesselation here.
# All that's missing is to order the patches for the particles counterclockwise
for i in range(len(rval)):
parray=np.array(ParList[i])
drvec=LoopCen1[ParList[i]]-rval[i,:]
# Optionally Take care of irregularities (in the form of too long bonds) here. These happen at the edges of connected stuff
# The tesselation is correct, it's just not what we want
drlen=np.sqrt(np.sum(drvec**2,axis=1))
#if makeEdges:
#isLong=[index for index,value in enumerate(drlen) if value >RMAX]
## Replace this one by an approximation of an arc through its two next neighbours
#for j in isLong:
##print "Resizing connection to loop " + str(ParList[i][j]) + ' as new loop ' + str(l)
##jplus=
##dbeta=beta[lorder[j+1
#parray[j]=l
#LoopCen.append([rval[i,0]+0.5*RMAX*drvec[j,0]/drlen[j],rval[i,1]+0.75*RMAX*drvec[j,1]/drlen[j],rval[i,2]+0.5*RMAX*drvec[j,2]/drlen[j]])
#l+=1
#drvec=rval[jnei,:]-rval[Jval[idx],:]
drhat=((drvec).transpose()/(drlen).transpose()).transpose()
cbeta=np.einsum('kj,j->k',drhat,ephi[i,:])
sbeta=np.einsum('kj,j->k',drhat,etheta[i,:])
# arccos returns between 0 and pi. Just multiply by the sign of the sine
beta=np.arccos(cbeta)*np.sign(sbeta)
# sort by angle and put back in ParList
lorder=np.argsort(beta)
ParList[i]=parray[lorder]
# Use the new ParList structure where loops belong to particles are stored
return LoopList,LoopCen,ParList,Ival,Jval
def getDefects(f,sigma,outname,outname_patch,symtype='polar',debug=False,writeVTK=False,writeVTKpatches=False):
print "Processing file : ", f
data = ReadData(f)
if writeVTK:
#outname = '.'.join((f).split('.')[:-1]) + '_data.vtk'
print outname
writeConfigurationVTK(data,outname)
# get the data out of the files
x, y, z = np.array(data.data[data.keys['x']]), np.array(data.data[data.keys['y']]), np.array(data.data[data.keys['z']])
vx, vy, vz = np.array(data.data[data.keys['vx']]), np.array(data.data[data.keys['vy']]), np.array(data.data[data.keys['vz']])
nx, ny, nz = np.array(data.data[data.keys['nx']]), np.array(data.data[data.keys['ny']]), np.array(data.data[data.keys['nz']])
rval = np.column_stack((x,y,z))
vval = np.column_stack((vx,vy,vz))
nval = np.column_stack((nx,ny,nz))
# To be very, very sure that it is exactly normalized
nval=((nval).transpose()/(np.sqrt(np.sum(nval**2,axis=1))).transpose()).transpose()
# Getting the local coordinate system
rhat=((rval).transpose()/(np.sqrt(np.sum(rval**2,axis=1))).transpose()).transpose()
vhat=((vval).transpose()/(np.sqrt(np.sum(vval**2,axis=1))).transpose()).transpose()
# We are doing this in plainly local cartesian coordinates
etheta = np.empty(np.shape(rval))
etheta[:,0]=np.ones((len(rval),))
etheta[:,1]=0
etheta[:,2]=0
ephi=np.empty(np.shape(rval))
ephi[:,0]=0
ephi[:,1]=np.ones((len(rval),))
ephi[:,2]=0
# Trying a simple n^2 algorithm for the defects. Identify all loops by the old trusty Ball-Blumenfeld method
# Parallel transport each neighbor orientation vector back to it? Then compute the Burgers vector.
#dmax=(2.4*sigma)**2
dmax=(2.0*sigma)**2
LoopList,LoopCen,ParList,Ival,Jval=findLoop(rval,sigma,etheta,ephi,dmax)
#LoopList,Ival,Jval=findLoop(rval,etheta,ephi,dmax)
if writeVTKpatches:
writePatches(rval,LoopCen,ParList,outname_patch)
# Defect storage, up to 100
# For n and velocity
numdefect_n=0
numdefect_v=0
defects_n=np.zeros((100,4))
defects_v=np.zeros((100,4))
print len(LoopList)
for u in range(len(LoopList)):
# Should already be ordered counterclockwise
# Following a version of the Goldenfeld algorithm, with nx,ny,nz as is playing the role of the order parameter. The sphere is in cartesian space
thisLoop=LoopList[u]
# Generalized algorithm for defects of any type
# The old nematic algorithm, based on the hemispheres
# Count the defect charge. Times two, to use integers and easier if statements
printnow=False
if symtype=='oldnematic':
# The polarization vector nval
ctheta=1
coord=[]
coord.append(nval[thisLoop[0],:])
for t in range(1,len(thisLoop)):
ctheta=np.dot(nval[thisLoop[t],:],np.sign(ctheta)*nval[thisLoop[t-1],:])
# Nematic: append the order parameter, rotated through the *smaller* angle
coord.append(np.sign(ctheta)*nval[thisLoop[t],:])
# Find out if the last point and the starting point are in the same hemisphere.
cdefect=np.dot(coord[t],coord[0])
if cdefect<0:
ndefect=0.5
else:
ndefect=0.0
# The normalized velocity vector vhat
ctheta=1
coord=[]
coord.append(vhat[thisLoop[0],:])
for t in range(1,len(thisLoop)):
ctheta=np.dot(vhat[thisLoop[t],:],np.sign(ctheta)*vhat[thisLoop[t-1],:])
# Nematic: append the order parameter, rotated through the *smaller* angle
coord.append(np.sign(ctheta)*vhat[thisLoop[t],:])
# Find out if the last point and the starting point are in the same hemisphere.
cdefect=np.dot(coord[t],coord[0])
if cdefect<0:
vdefect=0.5
else:
vdefect=0.0
elif symtype=='polar':
# nval
thetatot=0
t0=thisLoop[-1]
for t in thisLoop[0:len(thisLoop)]:
ctheta=np.dot(nval[t,:],nval[t0,:])
stheta=np.dot(rhat[t,:],np.cross(nval[t,:],nval[t0,:]))
theta=np.arccos(ctheta)*np.sign(stheta)
thetatot+=theta
t0=t
# Classify according to defects
# For a polar one, we can only have integer defects
ndefect=int(round(thetatot/(2*np.pi)))
# vhat
thetatot=0
t0=thisLoop[-1]
for t in thisLoop[0:len(thisLoop)]:
ctheta=np.dot(vhat[t,:],vhat[t0,:])
stheta=np.dot(rhat[t,:],np.cross(vhat[t,:],vhat[t0,:]))
theta=np.arccos(ctheta)*np.sign(stheta)
thetatot+=theta
t0=t
#if ctheta<0:
#print "candidate: t t0 ctheta stheta theta thetatot"
#print t, t0, ctheta, stheta, theta, thetatot
#printnow=True
# Classify according to defects
# For a polar one, we can only have integer defects
vdefect=int(round(thetatot/(2*np.pi)))
#if printnow:
#print thetatot
#print thisLoop
elif symtype=='nematic':
# nval
thetatot=0
t0=thisloop[0]
ctheta=1
for t in thisLoop[1:-1]:
ctheta=np.dot(nval[t,:],np.sign(ctheta)*nval[t0,:])
stheta=np.dot(rhat[t,:],np.cross(nval[t,:],nval[t0,:]))
theta=np.arccos(ctheta)*np.sign(stheta)
thetatot+=theta
t0=t
ndefect=0.5*int(round(thetatot/(np.pi)))
# vhat
thetatot=0
t0=thisloop[0]
ctheta=1
for t in thisLoop[1:-1]:
ctheta=np.dot(vhat[t,:],np.sign(ctheta)*vhat[t0,:])
stheta=np.dot(rhat[t,:],np.cross(nval[t,:],vhat[t0,:]))
theta=np.arccos(ctheta)*np.sign(stheta)
thetatot+=theta
t0=t
vdefect=0.5*int(round(thetatot/(np.pi)))
else:
print "Unknown alignment symmetry type! Not tracking defects!"
ndefect=0.0
vdefect=0.0
if abs(ndefect)>0:
if numdefect_n<100:
print "Found Defect in orientation field!"
print ndefect
# Construct the geometric centre of the defect
rmhat=np.sum(rval[thisLoop],axis=0)
rmhat/=np.sqrt(np.sum(rmhat**2))
# Charge of the defect
defects_n[numdefect_n,0]=ndefect
# Coordinates of the defect
defects_n[numdefect_n,1:]=radius*rmhat
numdefect_n+=1
if abs(vdefect)>0:
if numdefect_v<100:
print "Found Defect in velocity field!"
print vdefect
# Construct the geometric centre of the defect
rmhat=np.sum(rval[thisLoop],axis=0)
rmhat/=np.sqrt(np.sum(rmhat**2))
# Charge of the defect
defects_v[numdefect_v,0]=vdefect
# Coordinates of the defect
defects_v[numdefect_v,1:]=radius*rmhat
numdefect_v+=1
#print defects
print 'Number of orientation field defects: ' + str(numdefect_n)
print 'Number of velocity field defects: ' + str(numdefect_v)
# Debugging output
if debug==True:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(rval[:,0], rval[:,1], rval[:,2], zdir='z', c='b',s=4)
ax.scatter(defects_n[:,1], defects_n[:,2], defects_n[:,3], zdir='z', c='r',s=50)
ax.scatter(defects_v[:,1], defects_v[:,2], defects_v[:,3], zdir='z', c='g',s=50)
# Computing dual to the loops, i.e. (a variant of) the BB tesselation.
return defects_n, defects_v,numdefect_n,numdefect_v
def writePatches(rval,LoopCen,ParList,outname):
print outname
points = vtk.vtkPoints()
polygons = vtk.vtkCellArray()
v=0
polygon = vtk.vtkPolygon()
havePoly=[]
for k in range(len(ParList)):
nedge=len(ParList[k])
if nedge<2:
print nedge
print k
print ParList[k]
else:
havePoly.append(k)
#for k in range(300):
# Create the points of the polygon: the loop centers
polygon = vtk.vtkPolygon()
for l in ParList[k]:
points.InsertNextPoint(LoopCen[l][0],LoopCen[l][1],LoopCen[l][2])
polygon.GetPointIds().SetNumberOfIds(nedge)
for l in range(nedge):
#print l
polygon.GetPointIds().SetId(l,v+l)
polygons.InsertNextCell(polygon)
v+=nedge
# Create the matching polydata
polygonPolyData = vtk.vtkPolyData()
polygonPolyData.SetPoints(points)
polygonPolyData.SetPolys(polygons)
# Add stresses ...
eng, press,stress = compute_energy_and_pressure(rval,1.0,1.0)
pressure = vtk.vtkDoubleArray()
pressure.SetNumberOfComponents(1)
pressure.SetName('Pressure')
for k in havePoly:
pressure.InsertNextValue(press[k])
polygonPolyData.GetCellData().AddArray(pressure)
writer = vtk.vtkXMLPolyDataWriter()
writer.SetFileName(outname)
if vtk.VTK_MAJOR_VERSION <= 5:
writer.SetInput(polygonPolyData)
else:
writer.SetInputData(polygonPolyData)
writer.SetDataModeToAscii()
writer.Write()
def writeConfigurationVTK(data,outfile):
Points = vtk.vtkPoints()
has_v = False
has_n = False
if not (data.keys.has_key('x') and data.keys.has_key('y') and data.keys.has_key('z')):
raise "Particle coordinate not specified in the input data."
x = np.array(data.data[data.keys['x']])
y = np.array(data.data[data.keys['y']])
z = np.array(data.data[data.keys['z']])
if (data.keys.has_key('vx') or data.keys.has_key('vy') or data.keys.has_key('vz')):
vx = np.array(data.data[data.keys['vx']])
vy = np.array(data.data[data.keys['vy']])
vz = np.array(data.data[data.keys['vz']])
has_v = True
if (data.keys.has_key('nx') or data.keys.has_key('ny') or data.keys.has_key('nz')):
nx = np.array(data.data[data.keys['nx']])
ny = np.array(data.data[data.keys['ny']])
nz = np.array(data.data[data.keys['nz']])
has_n = True
r = np.ones(len(x))
Radii = vtk.vtkDoubleArray()
Radii.SetNumberOfComponents(1)
Radii.SetName('Radius')
if has_v:
Velocities = vtk.vtkDoubleArray()
Velocities.SetNumberOfComponents(3)
Velocities.SetName("Velocity")
if has_n:
Directors = vtk.vtkDoubleArray()
Directors.SetNumberOfComponents(3)
Directors.SetName("Directors")
#NDirectors = vtk.vtkDoubleArray()
#NDirectors.SetNumberOfComponents(3)
#NDirectors.SetName("NDirectors")
for (xx,yy,zz,rr,nnx,nny,nnz) in zip(x,y,z,r,nx,ny,nz):
Points.InsertNextPoint(xx,yy,zz)
Radii.InsertNextValue(rr)
if has_v:
#vnorm=np.sqrt(vx**2+vy**2+vz**2)
#u=0
for (vvx,vvy,vvz) in zip(vx,vy,vz):
#no=vnorm[u]
#u+=1
#Velocities.InsertNextTuple3(vvx/no,vvy/no,vvz/no)
Velocities.InsertNextTuple3(vvx,vvy,vvz)
if has_n:
for (nnx,nny,nnz) in zip(nx,ny,nz):
#Directors.InsertNextTuple3(0.5*nnx,0.5*nny,0.5*nnz)
#NDirectors.InsertNextTuple3(-0.5*nnx,-0.5*nny,-0.5*nnz)
Directors.InsertNextTuple3(nnx,nny,nnz)
#if args.connected:
#Lines = vtk.vtkCellArray()
#Line = vtk.vtkLine()
#points = np.column_stack((x,y,z))
#hull = ConvexHull(points)
#edges = []
#for h in hull.simplices:
#i, j, k = h
#if not sorted([i,j]) in edges: edges.append(sorted([i,j]))
#if not sorted([i,k]) in edges: edges.append(sorted([i,k]))
#if not sorted([j,k]) in edges: edges.append(sorted([j,k]))
#for (i,j) in edges:
#Line.GetPointIds().SetId(0,i)
#Line.GetPointIds().SetId(1,j)
#Lines.InsertNextCell(Line)
polydata = vtk.vtkPolyData()
polydata.SetPoints(Points)
#if args.connected:
#polydata.SetLines(Lines)
polydata.GetPointData().AddArray(Radii)
if has_v:
polydata.GetPointData().AddArray(Velocities)
if has_n:
polydata.GetPointData().AddArray(Directors)
#polydata.GetPointData().AddArray(NDirectors)
#polydata.GetPointData().AddArray(NDirectors)
polydata.Modified()
writer = vtk.vtkXMLPolyDataWriter()
#outname = '.'.join(f.split('.')[:-1])
writer.SetFileName(outfile)
if vtk.VTK_MAJOR_VERSION <= 5:
writer.SetInput(polydata)
else:
writer.SetInputData(polydata)
writer.SetDataModeToAscii()
writer.Write()
def writeDefects(defects_n, defects_v,numdefect_n,numdefect_v,outfile):
# Preparing the vtp output
# Create point structure in vtk
Points = vtk.vtkPoints()
print "Created Points"
# Create (something) associated to the points, with different values for each
Number = vtk.vtkDoubleArray()
Number.SetNumberOfComponents(1)
Number.SetName('Number')
Size = vtk.vtkDoubleArray()
Size.SetNumberOfComponents(1)
Size.SetName('Size')
print "Created Number"
# Put one point at the centre, and the ndefect ones around it
Points.InsertNextPoint(0,0,0)
Number.InsertNextValue(0)
Size.InsertNextValue(0)
for u in range(numdefect_n):
Points.InsertNextPoint(defects_n[u,1],defects_n[u,2],defects_n[u,3])
Number.InsertNextValue(1)
Size.InsertNextValue(1.0)
for u in range(numdefect_v):
Points.InsertNextPoint(defects_v[u,1],defects_v[u,2],defects_v[u,3])
Number.InsertNextValue(2)
Size.InsertNextValue(1.0)
print "Added Particles and Numbers"
#lines = vtk.vtkCellArray()
#line = vtk.vtkLine()
#for i in range(numdefect_n):
#line = vtk.vtkLine()
#line.GetPointIds().SetId(0,0)
#line.GetPointIds().SetId(1,i+1)
#lines.InsertNextCell(line)
#for i in range(numdefect_v):
#line = vtk.vtkLine()
#line.GetPointIds().SetId(0,0)
#line.GetPointIds().SetId(1,numdefect_n+i+1)
#lines.InsertNextCell(line)
#print "Added lines"
polydata = vtk.vtkPolyData()
polydata.SetPoints(Points)
#polydata.SetLines(lines)
polydata.GetPointData().AddArray(Number)
polydata.GetPointData().AddArray(Size)
print "Finished Polydata"
polydata.Modified()
writer = vtk.vtkXMLPolyDataWriter()
writer.SetFileName(outfile)
# Python 2.7 vs. 3 incompatibility?
if vtk.VTK_MAJOR_VERSION <= 5:
writer.SetInput(polydata)
else:
writer.SetInputData(polydata)
writer.SetDataModeToAscii()
writer.Write()
print "Wrote File"
# Scripting version: Only execute if this is called as a script. Otherwise, it attempts to go through here when loading as a module
# and throws errors because some arguments aren't defined
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", type=str, help="Input file with particle velocity field")
parser.add_argument("-o", "--output", type=str, default="defects", help="Output file (text file)")
parser.add_argument("-k", "--k", type=float, default=1.0, help="soft potential strength")
parser.add_argument("-L", "--system_size", type=float, default=100, help="system size for flat system")
parser.add_argument("-r", "--particle_r", type=float, default=1.0, help="radius of particle ")
args = parser.parse_args()
print
print "\tActive Particles on Curved Spaces (APCS)"
print "\tPolar and nematic defect finding algoritm"
print
print "\tSilke Henkes"
print "\tUniversity of Aberdeen"
print "\t(c) 2014"
print "\t----------------------------------------------"
print
print "\tInput : ", args.input
print "\tOutput : ", args.output
print "\tSpring constant : ", args.k
print "\tSystem size: : ", args.system_size
print "\tRadius of the particle : ", args.particle_r
print
outname = '.'.join((args.input).split('.')[:-1]) + '_data.vtk'
print outname
outname_patch = '.'.join((args.input).split('.')[:-1]) + '_patches.vtk'
print outname
# Careful, Morse interaction range is up to twice particle radius
defects_n, defects_v,numdefect_n,numdefect_v=getDefects(args.input,1.3*args.particle_r,outname,outname_patch,'polar',True,True,True)
outname = '.'.join((args.input).split('.')[:-1]) + '_defects.vtk'
print outname
#writer.SetFileName(args.output+'/'+outname+'.vtp')
#writer.SetFileName(args.output+'.vtp')
writeDefects(defects_n, defects_v,numdefect_n,numdefect_v,outname)
plt.show()
|
sknepneklab/SAMoS
|
FormerAnalysis/Patches_stresses_defects_lib.py
|
Python
|
gpl-3.0
| 29,759
|
[
"VTK"
] |
28e4ee5d36801e93a1eaf884e4782505dcdccea6ad9e5ec4b852e6c0225e8e37
|
#Copyright (c) 2014 Sony Computer Entertainment America LLC. See License.txt.
import sys
sys.path.append("./CommonTestScripts")
import System
import Test
import CircuitEditorUtil
doc = atfDocService.OpenNewDocument(editor)
CircuitEditorUtil.SetGlobals(schemaLoader, Schema)
modules = []
annotations = []
connections = []
print "Adding annotations"
comment = editingContext.Insert[Annotation](DomNode(Schema.annotationType.Type), 300, 100)
editingContext.SetProperty(comment.DomNode, Schema.annotationType.textAttribute, "I am a comment")
comment2 = editingContext.Insert[Annotation](DomNode(Schema.annotationType.Type), 400, 100)
editingContext.SetProperty(comment2.DomNode, Schema.annotationType.textAttribute, "!@#$%^&*()_+<>/.,;[]\\")
print "Adding modules"
btn = editingContext.Insert[Module](CircuitEditorUtil.CreateModuleNode("buttonType", "benjamin button"), 100, 100)
light = editingContext.Insert[Module](CircuitEditorUtil.CreateModuleNode("lightType", "lights out"), 200, 100)
sound = editingContext.Insert[Module](CircuitEditorUtil.CreateModuleNode("soundType", "like a lion in zion"), 100, 200)
speaker = editingContext.Insert[Module](CircuitEditorUtil.CreateModuleNode("speakerType", "speakeazy"), 200, 200)
btn2 = editingContext.Insert[Module](CircuitEditorUtil.CreateModuleNode("buttonType", "btn2"), 100, 300)
btn3 = editingContext.Insert[Module](CircuitEditorUtil.CreateModuleNode("buttonType", "btn3"), 100, 400)
andObj = editingContext.Insert[Module](CircuitEditorUtil.CreateModuleNode("andType", "andONE"), 200, 300)
orObj = editingContext.Insert[Module](CircuitEditorUtil.CreateModuleNode("orType", "orca"), 200, 400)
light2 = editingContext.Insert[Module](CircuitEditorUtil.CreateModuleNode("lightType", "light2"), 300, 300)
light3 = editingContext.Insert[Module](CircuitEditorUtil.CreateModuleNode("lightType", "light3"), 300, 400)
print "Adding connections"
btnToLight = editingContext.Connect(btn, btn.Type.Outputs[0], light, light.Type.Inputs[0], None)
soundToSpeaker = editingContext.Connect(sound, sound.Type.Outputs[0], speaker, speaker.Type.Inputs[0], None)
btn2ToAnd = editingContext.Connect(btn2, btn2.Type.Outputs[0], andObj, andObj.Type.Inputs[0], None)
btn2ToOr = editingContext.Connect(btn2, btn2.Type.Outputs[0], orObj, orObj.Type.Inputs[0], None)
btn3ToAnd = editingContext.Connect(btn3, btn3.Type.Outputs[0], andObj, andObj.Type.Inputs[0], None)
btn3ToOr = editingContext.Connect(btn3, btn3.Type.Outputs[0], orObj, orObj.Type.Inputs[0], None)
btn2ToAnd = editingContext.Connect(btn2, btn2.Type.Outputs[0], andObj, andObj.Type.Inputs[0], None)
andToLight2 = editingContext.Connect(andObj, andObj.Type.Outputs[0], light2, light2.Type.Inputs[0], None)
orToLight3 = editingContext.Connect(orObj, orObj.Type.Outputs[0], light3, light3.Type.Inputs[0], None)
for annotation in circuitContainer.Annotations:
annotations.append(annotation)
for module in circuitContainer.Elements:
modules.append(module)
for connection in circuitContainer.Wires:
connections.append(connection)
filePath = Test.GetNewFilePath("EditAndSave.circuit")
atfFile.SaveAs(doc,Uri(filePath) )
Test.True(File.Exists(filePath), "Verify file saved")
atfFile.Close(doc)
docNew = atfFile.OpenExistingDocument(editor, Uri(filePath))
CircuitEditorUtil.VerifyCircuit(circuitContainer, modules, annotations, connections)
print Test.SUCCESS
|
jethac/ATF
|
Test/FunctionalTests/CircuitEditorTestScripts/EditSaveCloseAndReopen.py
|
Python
|
apache-2.0
| 3,356
|
[
"ORCA"
] |
d5eb029e1959e62b6a54686eabd82085548f47a0ec0e1e742bed8bfd998db19b
|
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jurko Gospodnetić ( jurko.gospodnetic@pke.hr )
"""
Suds Python library document caching unit tests.
Implemented using the 'pytest' testing framework.
"""
if __name__ == "__main__":
import __init__
__init__.runUsingPyTest(globals())
import suds
import suds.cache
import suds.sax.parser
import pytest
import os
import tempfile
class InvisibleMan:
"""Dummy class used for pickling related tests."""
def __init__(self, x):
self.x = x
# Hardcoded values used in different caching test cases.
value_empty = suds.byte_str("")
value_f2 = suds.byte_str("fifi2")
value_f22 = suds.byte_str("fifi22")
value_f3 = suds.byte_str("fifi3")
value_p1 = suds.byte_str("pero1")
value_p11 = suds.byte_str("pero11")
value_p111 = suds.byte_str("pero111")
value_p2 = suds.byte_str("pero2")
value_p22 = suds.byte_str("pero22")
value_unicode = suds.byte_str(u"€ 的 čćžšđČĆŽŠĐ")
def test_Cache():
cache = suds.cache.Cache()
pytest.raises(Exception, cache.get, "id")
pytest.raises(Exception, cache.put, "id", "object")
pytest.raises(Exception, cache.purge, "id")
pytest.raises(Exception, cache.clear)
def test_DocumentCache(tmpdir):
cacheFolder = tmpdir.join("puffy").strpath
cache = suds.cache.DocumentCache(cacheFolder)
assert isinstance(cache, suds.cache.FileCache)
assert cache.get("unga1") is None
# TODO: DocumentCache class interface seems silly. Its get() operation
# returns an XML document while its put() operation takes an XML element.
# The put() operation also silently ignores passed data of incorrect type.
# TODO: Update this test to no longer depend on the exact input XML data
# formatting. We currently expect it to be formatted exactly as what gets
# read back from the DocumentCache.
content = suds.byte_str("""\
<xsd:element name="Elemento">
<xsd:simpleType>
<xsd:restriction base="xsd:string">
<xsd:enumeration value="alfa"/>
<xsd:enumeration value="beta"/>
<xsd:enumeration value="gamma"/>
</xsd:restriction>
</xsd:simpleType>
</xsd:element>""")
xml = suds.sax.parser.Parser().parse(suds.BytesIO(content))
cache.put("unga1", xml.getChildren()[0])
readXML = cache.get("unga1")
assert isinstance(readXML, suds.sax.document.Document)
readXMLElements = readXML.getChildren()
assert len(readXMLElements) == 1
readXMLElement = readXMLElements[0]
assert isinstance(readXMLElement, suds.sax.element.Element)
assert suds.byte_str(str(readXMLElement)) == content
def test_FileCache():
cache = suds.cache.FileCache()
assert isinstance(cache, suds.cache.Cache)
def test_FileCache_clear(tmpdir):
cacheFolder1 = tmpdir.join("fungus").strpath
cache1 = suds.cache.FileCache(cacheFolder1)
cache1.put("unga1", value_p1)
cache1.put("unga2", value_p2)
assert cache1.get("unga1") == value_p1
assert cache1.get("unga2") == value_p2
cache1.clear()
assert _isEmptyCacheFolder(cacheFolder1)
assert cache1.get("unga1") is None
assert cache1.get("unga2") is None
cache1.put("unga1", value_p11)
cache1.put("unga2", value_p2)
assert cache1.get("unga1") == value_p11
assert cache1.get("unga2") == value_p2
cacheFolder2 = tmpdir.join("broccoli").strpath
cache2 = suds.cache.FileCache(cacheFolder2)
cache2.put("unga2", value_f2)
assert cache2.get("unga2") == value_f2
cache2.clear()
assert not _isEmptyCacheFolder(cacheFolder1)
assert _isEmptyCacheFolder(cacheFolder2)
assert cache2.get("unga2") is None
assert cache1.get("unga1") == value_p11
assert cache1.get("unga2") == value_p2
cache2.put("unga2", value_p22)
assert cache2.get("unga2") == value_p22
def test_FileCache_location(tmpdir):
defaultLocation = os.path.join(tempfile.gettempdir(), "suds")
cache = suds.cache.FileCache()
assert os.path.isdir(cache.location)
assert cache.location == defaultLocation
assert suds.cache.FileCache().location == defaultLocation
assert cache.location == defaultLocation
cacheFolder1 = tmpdir.join("flip-flop1").strpath
assert not os.path.isdir(cacheFolder1)
assert suds.cache.FileCache(location=cacheFolder1).location == cacheFolder1
assert _isEmptyCacheFolder(cacheFolder1)
cacheFolder2 = tmpdir.join("flip-flop2").strpath
assert not os.path.isdir(cacheFolder2)
assert suds.cache.FileCache(cacheFolder2).location == cacheFolder2
assert _isEmptyCacheFolder(cacheFolder2)
def test_FileCache_close_leaves_cached_files_behind(tmpdir):
cacheFolder1 = tmpdir.join("ana").strpath
cache1 = suds.cache.FileCache(cacheFolder1)
cache1.put("unga1", value_p1)
cache1.put("unga2", value_p2)
cacheFolder2 = tmpdir.join("nan").strpath
cache2 = suds.cache.FileCache(cacheFolder2)
cache2.put("unga2", value_f2)
cache2.put("unga3", value_f3)
del cache1
cache11 = suds.cache.FileCache(cacheFolder1)
assert cache11.get("unga1") == value_p1
assert cache11.get("unga2") == value_p2
assert cache2.get("unga2") == value_f2
assert cache2.get("unga3") == value_f3
def test_FileCache_get_put(tmpdir):
cacheFolder1 = tmpdir.join("firefly").strpath
cache1 = suds.cache.FileCache(cacheFolder1)
assert _isEmptyCacheFolder(cacheFolder1)
assert cache1.get("unga1") is None
cache1.put("unga1", value_p1)
assert not _isEmptyCacheFolder(cacheFolder1)
assert cache1.get("unga1") == value_p1
assert cache1.get("unga2") is None
cache1.put("unga1", value_p11)
assert cache1.get("unga1") == value_p11
assert cache1.get("unga2") is None
cache1.put("unga2", value_p2)
assert cache1.get("unga1") == value_p11
assert cache1.get("unga2") == value_p2
cacheFolder2 = tmpdir.join("semper fi").strpath
cache2 = suds.cache.FileCache(cacheFolder2)
assert _isEmptyCacheFolder(cacheFolder2)
assert cache2.get("unga2") is None
cache2.put("unga2", value_f2)
assert not _isEmptyCacheFolder(cacheFolder2)
assert cache2.get("unga2") == value_f2
assert cache2.get("unga3") is None
cache2.put("unga2", value_f22)
assert cache2.get("unga2") == value_f22
assert cache2.get("unga3") is None
cache2.put("unga3", value_f3)
assert cache2.get("unga2") == value_f22
assert cache2.get("unga3") == value_f3
assert not _isEmptyCacheFolder(cacheFolder1)
assert not _isEmptyCacheFolder(cacheFolder2)
assert cache1.get("unga1") == value_p11
assert cache1.get("unga2") == value_p2
assert cache1.get("unga3") is None
assert cache2.get("unga1") is None
assert cache2.get("unga2") == value_f22
assert cache2.get("unga3") == value_f3
def test_FileCache_purge(tmpdir):
cacheFolder1 = tmpdir.join("flamenco").strpath
cache1 = suds.cache.FileCache(cacheFolder1)
cache1.put("unga1", value_p1)
assert cache1.get("unga1") == value_p1
cache1.purge("unga1")
assert _isEmptyCacheFolder(cacheFolder1)
assert cache1.get("unga1") is None
cache1.put("unga1", value_p11)
cache1.put("unga2", value_p2)
assert cache1.get("unga1") == value_p11
assert cache1.get("unga2") == value_p2
cache1.purge("unga1")
assert cache1.get("unga1") is None
assert cache1.get("unga2") == value_p2
cache1.put("unga1", value_p111)
cacheFolder2 = tmpdir.join("shadow").strpath
cache2 = suds.cache.FileCache(cacheFolder2)
cache2.put("unga2", value_f2)
cache2.purge("unga2")
assert _isEmptyCacheFolder(cacheFolder2)
assert cache1.get("unga1") == value_p111
assert cache1.get("unga2") == value_p2
assert cache2.get("unga2") is None
def test_FileCache_reused_cache_folder(tmpdir):
cacheFolder = tmpdir.strpath
cache1 = suds.cache.FileCache(cacheFolder)
assert _isEmptyCacheFolder(cacheFolder)
assert cache1.get("unga1") is None
cache1.put("unga1", value_p1)
assert cache1.get("unga1") == value_p1
assert cache1.get("unga2") is None
cache1.put("unga1", value_p11)
assert cache1.get("unga1") == value_p11
assert cache1.get("unga2") is None
cache1.put("unga2", value_p2)
assert cache1.get("unga1") == value_p11
assert cache1.get("unga2") == value_p2
cache2 = suds.cache.FileCache(cacheFolder)
assert cache2.get("unga1") == value_p11
assert cache2.get("unga2") == value_p2
cache2.put("unga3", value_f3)
assert cache1.get("unga3") == value_f3
def test_FileCache_version(tmpdir):
fakeVersionInfo = "--- fake version info ---"
assert suds.__version__ != fakeVersionInfo
cacheFolder = tmpdir.join("hitori")
versionFile = cacheFolder.join("version")
cache = suds.cache.FileCache(cacheFolder.strpath)
assert versionFile.read() == suds.__version__
cache.put("unga1", value_p1)
versionFile.write(fakeVersionInfo)
assert cache.get("unga1") == value_p1
cache2 = suds.cache.FileCache(cacheFolder.strpath)
assert _isEmptyCacheFolder(cacheFolder.strpath)
assert cache.get("unga1") is None
assert cache2.get("unga1") is None
assert versionFile.read() == suds.__version__
cache.put("unga1", value_p11)
cache.put("unga2", value_p22)
versionFile.remove()
assert cache.get("unga1") == value_p11
assert cache.get("unga2") == value_p22
cache3 = suds.cache.FileCache(cacheFolder.strpath)
assert _isEmptyCacheFolder(cacheFolder.strpath)
assert cache.get("unga1") is None
assert cache.get("unga2") is None
assert cache2.get("unga1") is None
assert versionFile.read() == suds.__version__
def test_FileCache_with_empty_cached_content(tmpdir):
cacheFolder = tmpdir.strpath
cache = suds.cache.FileCache(cacheFolder)
cache.put("unga1", value_empty)
assert cache.get("unga1") == value_empty
assert not _isEmptyCacheFolder(cacheFolder)
def test_FileCache_with_random_utf_character_cached_content(tmpdir):
cacheFolder = tmpdir.strpath
cache = suds.cache.FileCache(cacheFolder)
cache.put("unga1", value_unicode)
assert cache.get("unga1") == value_unicode
assert not _isEmptyCacheFolder(cacheFolder)
def test_NoCache():
cache = suds.cache.NoCache()
assert isinstance(cache, suds.cache.Cache)
assert cache.get("id") == None
cache.put("id", "something")
assert cache.get("id") == None
# TODO: It should not be an error to call purge() or clear() on a NoCache
# instance.
pytest.raises(Exception, cache.purge, "id")
pytest.raises(Exception, cache.clear)
def test_ObjectCache(tmpdir):
cacheFolder = tmpdir.join("george carlin").strpath
cache = suds.cache.ObjectCache(cacheFolder)
assert isinstance(cache, suds.cache.FileCache)
assert cache.get("unga1") is None
assert cache.get("unga2") is None
cache.put("unga1", InvisibleMan(1))
cache.put("unga2", InvisibleMan(2))
read1 = cache.get("unga1")
read2 = cache.get("unga2")
assert read1.__class__ is InvisibleMan
assert read2.__class__ is InvisibleMan
assert read1.x == 1
assert read2.x == 2
def _isEmptyCacheFolder(folder):
assert os.path.isdir(folder)
def walkError(error):
pytest.fail("Error attempting to walk through cache folder contents.")
count = 0
for root, folders, files in os.walk(folder, onerror=walkError):
assert root == folder
return len(folders) == 0 and len(files) == 1 and files[0] == 'version'
return False
|
piotrpawlaczek/suds-jurko
|
tests/test_cache.py
|
Python
|
lgpl-3.0
| 12,588
|
[
"Firefly"
] |
b76e4363cb95adf0a4be4610d6bbb05e665348568f388846ed61aac6a02786cc
|
"""
Miscellaneous utility functions.
"""
__author__ = "Steven Kearnes"
__copyright__ = "Copyright 2014, Stanford University"
__license__ = "BSD 3-clause"
import cPickle
import gzip
import numpy as np
import os
from rdkit import Chem
from rdkit.Chem.Scaffolds import MurckoScaffold
from rdkit_utils import PicklableMol, serial
def read_pickle(filename):
"""
Read pickled data from (possibly gzipped) files.
Parameters
----------
filename : str
Filename.
"""
if filename.endswith('.gz'):
f = gzip.open(filename)
else:
f = open(filename)
data = cPickle.load(f)
f.close()
return data
def write_pickle(data, filename, protocol=cPickle.HIGHEST_PROTOCOL):
"""
Write data to a (possibly gzipped) pickle.
Parameters
----------
data : object
Object to pickle.
filename : str
Filename.
protocol : int, optional (default cPickle.HIGHEST_PROTOCOL)
Pickle protocol.
"""
if filename.endswith('.gz'):
f = gzip.open(filename, 'wb')
else:
f = open(filename, 'wb')
cPickle.dump(data, f, protocol)
f.close()
class DatasetSharder(object):
"""
Split a dataset into chunks.
Parameters
----------
filename : str, optional
Input filename. One of filename or mols must be provided.
mols : iterable, optional
Molecules to shard. One of filename or mols must be provided.
shard_size : int, optional (default 1000)
Number of molecules per shard.
write_shards : bool, optional (default True)
Write shards to disk.
prefix : str, optional
Prefix for output files.
flavor : str, optional (default 'pkl.gz')
Output molecule format used as the extension for shard filenames.
start_index : int, optional (default 0)
Starting index for shard filenames.
"""
def __init__(self, filename=None, mols=None, shard_size=1000,
write_shards=True, prefix=None, flavor='pkl.gz',
start_index=0):
if filename is None and mols is None:
raise ValueError('One of filename or mols must be provided.')
self.filename = filename
self.mols = mols
self.shard_size = shard_size
self.write_shards = write_shards
if self.filename is not None and prefix is None:
prefix = self._guess_prefix()
if write_shards and prefix is None:
raise ValueError('One of filename or prefix must be provided ' +
'when writing shards.')
self.prefix = prefix
self.flavor = flavor
self.index = start_index
self.writer = serial.MolWriter()
def _guess_prefix(self):
"""
Get the prefix from a filename.
Takes everything in the basename before the first period. For example,
the prefix for '../foo.bar.gz' is 'foo'.
"""
return os.path.basename(self.filename).split('.')[0]
def _next_filename(self):
"""
Generate the next shard filename.
"""
if self.prefix is None:
raise ValueError('Prefix must be provided when writing shards.')
filename = '{}-{}.{}'.format(self.prefix, self.index, self.flavor)
self.index += 1
return filename
def read_mols_from_file(self):
"""
Read molecules from a file.
"""
with serial.MolReader().open(self.filename) as reader:
for mol in reader.get_mols():
yield mol
def shard(self):
"""
Split a dataset into chunks.
If self.write_shards is False, a shard generator is returned. Each
shard is an ndarray with dtype=object, which gives convenient access
to ndarray operations (like fancy indexing) for downstream
applications.
"""
if self.write_shards:
for shard in self._shard():
self.write_shard(shard)
else:
return self._shard()
def _shard(self):
"""
Split a dataset into chunks.
"""
if self.mols is None:
self.mols = self.read_mols_from_file()
shard = []
for mol in self.mols:
shard.append(mol)
if len(shard) >= self.shard_size:
yield np.asarray(shard) # ndarray with dtype=object
shard = []
if len(shard):
yield np.asarray(shard)
def __iter__(self):
"""
Iterate through shards.
"""
return self._shard()
def write_shard(self, mols):
"""
Write molecules to the next shard file.
Molecules are converted to PicklableMols prior to writing to preserve
properties such as molecule names.
Parameters
----------
mols : array_like
Molecules.
"""
mols = [PicklableMol(mol) for mol in mols] # preserve properties
filename = self._next_filename()
with self.writer.open(filename) as f:
f.write(mols)
def pad_array(x, shape, fill=0, both=False):
"""
Pad an array with a fill value.
Parameters
----------
x : ndarray
Matrix.
shape : tuple or int
Desired shape. If int, all dimensions are padded to that size.
fill : object, optional (default 0)
Fill value.
both : bool, optional (default False)
If True, split the padding on both sides of each axis. If False,
padding is applied to the end of each axis.
"""
x = np.asarray(x)
if not isinstance(shape, tuple):
shape = tuple(shape for _ in xrange(x.ndim))
pad = []
for i in xrange(x.ndim):
diff = shape[i] - x.shape[i]
assert diff >= 0
if both:
a, b = divmod(diff, 2)
b += a
pad.append((a, b))
else:
pad.append((0, diff))
pad = tuple(pad)
x = np.pad(x, pad, mode='constant', constant_values=fill)
return x
class SmilesGenerator(object):
"""
Generate SMILES strings for molecules.
Parameters
----------
remove_hydrogens : bool, optional (default True)
Remove hydrogens prior to generating SMILES.
assign_stereo_from_3d : bool, optional (default False)
Assign stereochemistry from 3D coordinates. This will overwrite any
existing stereochemistry information on molecules.
"""
def __init__(self, remove_hydrogens=True, assign_stereo_from_3d=False):
self.remove_hydrogens = remove_hydrogens
self.assign_stereo_from_3d = assign_stereo_from_3d
def get_smiles(self, mol):
"""
Map a molecule name to its corresponding SMILES string.
Parameters
----------
mol : RDKit Mol
Molecule.
"""
if self.assign_stereo_from_3d: # do this before removing hydrogens
Chem.AssignAtomChiralTagsFromStructure(mol)
if self.remove_hydrogens:
mol = Chem.RemoveHs(mol) # creates a copy
return Chem.MolToSmiles(mol, isomericSmiles=True, canonical=True)
def get_unique_smiles(self, mols):
"""
Get unique SMILES for a set of molecules.
Parameters
----------
mols : iterable
Molecules.
"""
return np.unique([self.get_smiles(mol) for mol in mols])
class SmilesMap(object):
"""
Map compound names to SMILES.
Parameters
----------
prefix : str, optional
Prefix to prepend to IDs.
allow_duplicates : bool, optional (default True)
Allow duplicate SMILES.
kwargs : dict, optional
Keyword arguments for SmilesGenerator.
"""
def __init__(self, prefix=None, allow_duplicates=True, **kwargs):
self.prefix = prefix
self.allow_duplicates = allow_duplicates
self.engine = SmilesGenerator(**kwargs)
self.map = {}
def add_mol(self, mol):
"""
Map a molecule name to its corresponding SMILES string and store in the
SMILES map.
Parameters
----------
mol : RDKit Mol
Molecule.
"""
name = mol.GetProp('_Name')
try:
int(name) # check if this is a bare ID
if self.prefix is None:
raise TypeError('Bare IDs are not allowed.')
except ValueError:
pass
if self.prefix is not None:
name = '{}{}'.format(self.prefix, name)
smiles = self.engine.get_smiles(mol)
# Failures:
# * Name is already mapped to a different SMILES
# * SMILES is already used for a different name
if name in self.map: # catch all cases where name is already used
if self.map[name] != smiles:
raise ValueError('ID collision for "{}".'.format(name))
elif not self.allow_duplicates and smiles in self.map.values():
other = None
for key, val in self.map.items():
if val == smiles:
other = key
break
raise ValueError(
'SMILES collision between "{}" and "{}":\n\t{}'.format(
name, other, smiles))
else:
self.map[name] = smiles
def get_map(self):
"""
Get the map.
"""
return self.map
class ScaffoldGenerator(object):
"""
Generate molecular scaffolds.
Parameters
----------
include_chirality : : bool, optional (default False)
Include chirality in scaffolds.
"""
def __init__(self, include_chirality=False):
self.include_chirality = include_chirality
def get_scaffold(self, mol):
"""
Get Murcko scaffolds for molecules.
Murcko scaffolds are described in DOI: 10.1021/jm9602928. They are
essentially that part of the molecule consisting of rings and the
linker atoms between them.
Parameters
----------
mols : array_like
Molecules.
"""
return MurckoScaffold.MurckoScaffoldSmiles(
mol=mol, includeChirality=self.include_chirality)
|
rbharath/pande-gas
|
vs_utils/utils/__init__.py
|
Python
|
bsd-3-clause
| 10,224
|
[
"RDKit"
] |
ace4e0471203b59c19d82d51de02bb2c95ed97d2ff4e531743a4ee617b5b0a58
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
urlpatterns = [
# Django Admin
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("django_todolist.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
url(r'^', include('django_todolist.api.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', 'django.views.defaults.bad_request'),
url(r'^403/$', 'django.views.defaults.permission_denied'),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'django.views.defaults.server_error'),
]
|
andresgz/django_todolist
|
config/urls.py
|
Python
|
bsd-3-clause
| 1,067
|
[
"VisIt"
] |
cca160f5061f29efb4353d365589d22f798135218675a2fc0009c3a864912d55
|
import unittest
from test import support
import sys
import random
import math
import array
# Used for lazy formatting of failure messages
class Frm(object):
def __init__(self, format, *args):
self.format = format
self.args = args
def __str__(self):
return self.format % self.args
# SHIFT should match the value in longintrepr.h for best testing.
SHIFT = sys.int_info.bits_per_digit
BASE = 2 ** SHIFT
MASK = BASE - 1
KARATSUBA_CUTOFF = 70 # from longobject.c
# Max number of base BASE digits to use in test cases. Doubling
# this will more than double the runtime.
MAXDIGITS = 15
# build some special values
special = [0, 1, 2, BASE, BASE >> 1, 0x5555555555555555, 0xaaaaaaaaaaaaaaaa]
# some solid strings of one bits
p2 = 4 # 0 and 1 already added
for i in range(2*SHIFT):
special.append(p2 - 1)
p2 = p2 << 1
del p2
# add complements & negations
special += [~x for x in special] + [-x for x in special]
DBL_MAX = sys.float_info.max
DBL_MAX_EXP = sys.float_info.max_exp
DBL_MIN_EXP = sys.float_info.min_exp
DBL_MANT_DIG = sys.float_info.mant_dig
DBL_MIN_OVERFLOW = 2**DBL_MAX_EXP - 2**(DBL_MAX_EXP - DBL_MANT_DIG - 1)
# pure Python version of correctly-rounded true division
def truediv(a, b):
"""Correctly-rounded true division for integers."""
negative = a^b < 0
a, b = abs(a), abs(b)
# exceptions: division by zero, overflow
if not b:
raise ZeroDivisionError("division by zero")
if a >= DBL_MIN_OVERFLOW * b:
raise OverflowError("int/int too large to represent as a float")
# find integer d satisfying 2**(d - 1) <= a/b < 2**d
d = a.bit_length() - b.bit_length()
if d >= 0 and a >= 2**d * b or d < 0 and a * 2**-d >= b:
d += 1
# compute 2**-exp * a / b for suitable exp
exp = max(d, DBL_MIN_EXP) - DBL_MANT_DIG
a, b = a << max(-exp, 0), b << max(exp, 0)
q, r = divmod(a, b)
# round-half-to-even: fractional part is r/b, which is > 0.5 iff
# 2*r > b, and == 0.5 iff 2*r == b.
if 2*r > b or 2*r == b and q % 2 == 1:
q += 1
result = math.ldexp(q, exp)
return -result if negative else result
class LongTest(unittest.TestCase):
# Get quasi-random long consisting of ndigits digits (in base BASE).
# quasi == the most-significant digit will not be 0, and the number
# is constructed to contain long strings of 0 and 1 bits. These are
# more likely than random bits to provoke digit-boundary errors.
# The sign of the number is also random.
def getran(self, ndigits):
self.assertTrue(ndigits > 0)
nbits_hi = ndigits * SHIFT
nbits_lo = nbits_hi - SHIFT + 1
answer = 0
nbits = 0
r = int(random.random() * (SHIFT * 2)) | 1 # force 1 bits to start
while nbits < nbits_lo:
bits = (r >> 1) + 1
bits = min(bits, nbits_hi - nbits)
self.assertTrue(1 <= bits <= SHIFT)
nbits = nbits + bits
answer = answer << bits
if r & 1:
answer = answer | ((1 << bits) - 1)
r = int(random.random() * (SHIFT * 2))
self.assertTrue(nbits_lo <= nbits <= nbits_hi)
if random.random() < 0.5:
answer = -answer
return answer
# Get random long consisting of ndigits random digits (relative to base
# BASE). The sign bit is also random.
def getran2(ndigits):
answer = 0
for i in range(ndigits):
answer = (answer << SHIFT) | random.randint(0, MASK)
if random.random() < 0.5:
answer = -answer
return answer
def check_division(self, x, y):
eq = self.assertEqual
q, r = divmod(x, y)
q2, r2 = x//y, x%y
pab, pba = x*y, y*x
eq(pab, pba, Frm("multiplication does not commute for %r and %r", x, y))
eq(q, q2, Frm("divmod returns different quotient than / for %r and %r", x, y))
eq(r, r2, Frm("divmod returns different mod than %% for %r and %r", x, y))
eq(x, q*y + r, Frm("x != q*y + r after divmod on x=%r, y=%r", x, y))
if y > 0:
self.assertTrue(0 <= r < y, Frm("bad mod from divmod on %r and %r", x, y))
else:
self.assertTrue(y < r <= 0, Frm("bad mod from divmod on %r and %r", x, y))
def test_division(self):
digits = list(range(1, MAXDIGITS+1)) + list(range(KARATSUBA_CUTOFF,
KARATSUBA_CUTOFF + 14))
digits.append(KARATSUBA_CUTOFF * 3)
for lenx in digits:
x = self.getran(lenx)
for leny in digits:
y = self.getran(leny) or 1
self.check_division(x, y)
# specific numbers chosen to exercise corner cases of the
# current long division implementation
# 30-bit cases involving a quotient digit estimate of BASE+1
self.check_division(1231948412290879395966702881,
1147341367131428698)
self.check_division(815427756481275430342312021515587883,
707270836069027745)
self.check_division(627976073697012820849443363563599041,
643588798496057020)
self.check_division(1115141373653752303710932756325578065,
1038556335171453937726882627)
# 30-bit cases that require the post-subtraction correction step
self.check_division(922498905405436751940989320930368494,
949985870686786135626943396)
self.check_division(768235853328091167204009652174031844,
1091555541180371554426545266)
# 15-bit cases involving a quotient digit estimate of BASE+1
self.check_division(20172188947443, 615611397)
self.check_division(1020908530270155025, 950795710)
self.check_division(128589565723112408, 736393718)
self.check_division(609919780285761575, 18613274546784)
# 15-bit cases that require the post-subtraction correction step
self.check_division(710031681576388032, 26769404391308)
self.check_division(1933622614268221, 30212853348836)
def test_karatsuba(self):
digits = list(range(1, 5)) + list(range(KARATSUBA_CUTOFF,
KARATSUBA_CUTOFF + 10))
digits.extend([KARATSUBA_CUTOFF * 10, KARATSUBA_CUTOFF * 100])
bits = [digit * SHIFT for digit in digits]
# Test products of long strings of 1 bits -- (2**x-1)*(2**y-1) ==
# 2**(x+y) - 2**x - 2**y + 1, so the proper result is easy to check.
for abits in bits:
a = (1 << abits) - 1
for bbits in bits:
if bbits < abits:
continue
b = (1 << bbits) - 1
x = a * b
y = ((1 << (abits + bbits)) -
(1 << abits) -
(1 << bbits) +
1)
self.assertEqual(x, y,
Frm("bad result for a*b: a=%r, b=%r, x=%r, y=%r", a, b, x, y))
def check_bitop_identities_1(self, x):
eq = self.assertEqual
eq(x & 0, 0, Frm("x & 0 != 0 for x=%r", x))
eq(x | 0, x, Frm("x | 0 != x for x=%r", x))
eq(x ^ 0, x, Frm("x ^ 0 != x for x=%r", x))
eq(x & -1, x, Frm("x & -1 != x for x=%r", x))
eq(x | -1, -1, Frm("x | -1 != -1 for x=%r", x))
eq(x ^ -1, ~x, Frm("x ^ -1 != ~x for x=%r", x))
eq(x, ~~x, Frm("x != ~~x for x=%r", x))
eq(x & x, x, Frm("x & x != x for x=%r", x))
eq(x | x, x, Frm("x | x != x for x=%r", x))
eq(x ^ x, 0, Frm("x ^ x != 0 for x=%r", x))
eq(x & ~x, 0, Frm("x & ~x != 0 for x=%r", x))
eq(x | ~x, -1, Frm("x | ~x != -1 for x=%r", x))
eq(x ^ ~x, -1, Frm("x ^ ~x != -1 for x=%r", x))
eq(-x, 1 + ~x, Frm("not -x == 1 + ~x for x=%r", x))
eq(-x, ~(x-1), Frm("not -x == ~(x-1) forx =%r", x))
for n in range(2*SHIFT):
p2 = 2 ** n
eq(x << n >> n, x,
Frm("x << n >> n != x for x=%r, n=%r", (x, n)))
eq(x // p2, x >> n,
Frm("x // p2 != x >> n for x=%r n=%r p2=%r", (x, n, p2)))
eq(x * p2, x << n,
Frm("x * p2 != x << n for x=%r n=%r p2=%r", (x, n, p2)))
eq(x & -p2, x >> n << n,
Frm("not x & -p2 == x >> n << n for x=%r n=%r p2=%r", (x, n, p2)))
eq(x & -p2, x & ~(p2 - 1),
Frm("not x & -p2 == x & ~(p2 - 1) for x=%r n=%r p2=%r", (x, n, p2)))
def check_bitop_identities_2(self, x, y):
eq = self.assertEqual
eq(x & y, y & x, Frm("x & y != y & x for x=%r, y=%r", (x, y)))
eq(x | y, y | x, Frm("x | y != y | x for x=%r, y=%r", (x, y)))
eq(x ^ y, y ^ x, Frm("x ^ y != y ^ x for x=%r, y=%r", (x, y)))
eq(x ^ y ^ x, y, Frm("x ^ y ^ x != y for x=%r, y=%r", (x, y)))
eq(x & y, ~(~x | ~y), Frm("x & y != ~(~x | ~y) for x=%r, y=%r", (x, y)))
eq(x | y, ~(~x & ~y), Frm("x | y != ~(~x & ~y) for x=%r, y=%r", (x, y)))
eq(x ^ y, (x | y) & ~(x & y),
Frm("x ^ y != (x | y) & ~(x & y) for x=%r, y=%r", (x, y)))
eq(x ^ y, (x & ~y) | (~x & y),
Frm("x ^ y == (x & ~y) | (~x & y) for x=%r, y=%r", (x, y)))
eq(x ^ y, (x | y) & (~x | ~y),
Frm("x ^ y == (x | y) & (~x | ~y) for x=%r, y=%r", (x, y)))
def check_bitop_identities_3(self, x, y, z):
eq = self.assertEqual
eq((x & y) & z, x & (y & z),
Frm("(x & y) & z != x & (y & z) for x=%r, y=%r, z=%r", (x, y, z)))
eq((x | y) | z, x | (y | z),
Frm("(x | y) | z != x | (y | z) for x=%r, y=%r, z=%r", (x, y, z)))
eq((x ^ y) ^ z, x ^ (y ^ z),
Frm("(x ^ y) ^ z != x ^ (y ^ z) for x=%r, y=%r, z=%r", (x, y, z)))
eq(x & (y | z), (x & y) | (x & z),
Frm("x & (y | z) != (x & y) | (x & z) for x=%r, y=%r, z=%r", (x, y, z)))
eq(x | (y & z), (x | y) & (x | z),
Frm("x | (y & z) != (x | y) & (x | z) for x=%r, y=%r, z=%r", (x, y, z)))
def test_bitop_identities(self):
for x in special:
self.check_bitop_identities_1(x)
digits = range(1, MAXDIGITS+1)
for lenx in digits:
x = self.getran(lenx)
self.check_bitop_identities_1(x)
for leny in digits:
y = self.getran(leny)
self.check_bitop_identities_2(x, y)
self.check_bitop_identities_3(x, y, self.getran((lenx + leny)//2))
def slow_format(self, x, base):
digits = []
sign = 0
if x < 0:
sign, x = 1, -x
while x:
x, r = divmod(x, base)
digits.append(int(r))
digits.reverse()
digits = digits or [0]
return '-'[:sign] + \
{2: '0b', 8: '0o', 10: '', 16: '0x'}[base] + \
"".join("0123456789abcdef"[i] for i in digits)
def check_format_1(self, x):
for base, mapper in (8, oct), (10, repr), (16, hex):
got = mapper(x)
expected = self.slow_format(x, base)
msg = Frm("%s returned %r but expected %r for %r",
mapper.__name__, got, expected, x)
self.assertEqual(got, expected, msg)
self.assertEqual(int(got, 0), x, Frm('int("%s", 0) != %r', got, x))
# str() has to be checked a little differently since there's no
# trailing "L"
got = str(x)
expected = self.slow_format(x, 10)
msg = Frm("%s returned %r but expected %r for %r",
mapper.__name__, got, expected, x)
self.assertEqual(got, expected, msg)
def test_format(self):
for x in special:
self.check_format_1(x)
for i in range(10):
for lenx in range(1, MAXDIGITS+1):
x = self.getran(lenx)
self.check_format_1(x)
def test_long(self):
# Check conversions from string
LL = [
('1' + '0'*20, 10**20),
('1' + '0'*100, 10**100)
]
for s, v in LL:
for sign in "", "+", "-":
for prefix in "", " ", "\t", " \t\t ":
ss = prefix + sign + s
vv = v
if sign == "-" and v is not ValueError:
vv = -v
try:
self.assertEqual(int(ss), vv)
except ValueError:
pass
# trailing L should no longer be accepted...
self.assertRaises(ValueError, int, '123L')
self.assertRaises(ValueError, int, '123l')
self.assertRaises(ValueError, int, '0L')
self.assertRaises(ValueError, int, '-37L')
self.assertRaises(ValueError, int, '0x32L', 16)
self.assertRaises(ValueError, int, '1L', 21)
# ... but it's just a normal digit if base >= 22
self.assertEqual(int('1L', 22), 43)
# tests with base 0
self.assertEqual(int('000', 0), 0)
self.assertEqual(int('0o123', 0), 83)
self.assertEqual(int('0x123', 0), 291)
self.assertEqual(int('0b100', 0), 4)
self.assertEqual(int(' 0O123 ', 0), 83)
self.assertEqual(int(' 0X123 ', 0), 291)
self.assertEqual(int(' 0B100 ', 0), 4)
self.assertEqual(int('0', 0), 0)
self.assertEqual(int('+0', 0), 0)
self.assertEqual(int('-0', 0), 0)
self.assertEqual(int('00', 0), 0)
self.assertRaises(ValueError, int, '08', 0)
self.assertRaises(ValueError, int, '-012395', 0)
# invalid bases
invalid_bases = [-909,
2**31-1, 2**31, -2**31, -2**31-1,
2**63-1, 2**63, -2**63, -2**63-1,
2**100, -2**100,
]
for base in invalid_bases:
self.assertRaises(ValueError, int, '42', base)
def test_conversion(self):
class JustLong:
# test that __long__ no longer used in 3.x
def __long__(self):
return 42
self.assertRaises(TypeError, int, JustLong())
class LongTrunc:
# __long__ should be ignored in 3.x
def __long__(self):
return 42
def __trunc__(self):
return 1729
self.assertEqual(int(LongTrunc()), 1729)
@support.requires_IEEE_754
def test_float_conversion(self):
exact_values = [0, 1, 2,
2**53-3,
2**53-2,
2**53-1,
2**53,
2**53+2,
2**54-4,
2**54-2,
2**54,
2**54+4]
for x in exact_values:
self.assertEqual(float(x), x)
self.assertEqual(float(-x), -x)
# test round-half-even
for x, y in [(1, 0), (2, 2), (3, 4), (4, 4), (5, 4), (6, 6), (7, 8)]:
for p in range(15):
self.assertEqual(int(float(2**p*(2**53+x))), 2**p*(2**53+y))
for x, y in [(0, 0), (1, 0), (2, 0), (3, 4), (4, 4), (5, 4), (6, 8),
(7, 8), (8, 8), (9, 8), (10, 8), (11, 12), (12, 12),
(13, 12), (14, 16), (15, 16)]:
for p in range(15):
self.assertEqual(int(float(2**p*(2**54+x))), 2**p*(2**54+y))
# behaviour near extremes of floating-point range
int_dbl_max = int(DBL_MAX)
top_power = 2**DBL_MAX_EXP
halfway = (int_dbl_max + top_power)//2
self.assertEqual(float(int_dbl_max), DBL_MAX)
self.assertEqual(float(int_dbl_max+1), DBL_MAX)
self.assertEqual(float(halfway-1), DBL_MAX)
self.assertRaises(OverflowError, float, halfway)
self.assertEqual(float(1-halfway), -DBL_MAX)
self.assertRaises(OverflowError, float, -halfway)
self.assertRaises(OverflowError, float, top_power-1)
self.assertRaises(OverflowError, float, top_power)
self.assertRaises(OverflowError, float, top_power+1)
self.assertRaises(OverflowError, float, 2*top_power-1)
self.assertRaises(OverflowError, float, 2*top_power)
self.assertRaises(OverflowError, float, top_power*top_power)
for p in range(100):
x = 2**p * (2**53 + 1) + 1
y = 2**p * (2**53 + 2)
self.assertEqual(int(float(x)), y)
x = 2**p * (2**53 + 1)
y = 2**p * 2**53
self.assertEqual(int(float(x)), y)
def test_float_overflow(self):
for x in -2.0, -1.0, 0.0, 1.0, 2.0:
self.assertEqual(float(int(x)), x)
shuge = '12345' * 120
huge = 1 << 30000
mhuge = -huge
namespace = {'huge': huge, 'mhuge': mhuge, 'shuge': shuge, 'math': math}
for test in ["float(huge)", "float(mhuge)",
"complex(huge)", "complex(mhuge)",
"complex(huge, 1)", "complex(mhuge, 1)",
"complex(1, huge)", "complex(1, mhuge)",
"1. + huge", "huge + 1.", "1. + mhuge", "mhuge + 1.",
"1. - huge", "huge - 1.", "1. - mhuge", "mhuge - 1.",
"1. * huge", "huge * 1.", "1. * mhuge", "mhuge * 1.",
"1. // huge", "huge // 1.", "1. // mhuge", "mhuge // 1.",
"1. / huge", "huge / 1.", "1. / mhuge", "mhuge / 1.",
"1. ** huge", "huge ** 1.", "1. ** mhuge", "mhuge ** 1.",
"math.sin(huge)", "math.sin(mhuge)",
"math.sqrt(huge)", "math.sqrt(mhuge)", # should do better
# math.floor() of an int returns an int now
##"math.floor(huge)", "math.floor(mhuge)",
]:
self.assertRaises(OverflowError, eval, test, namespace)
# XXX Perhaps float(shuge) can raise OverflowError on some box?
# The comparison should not.
self.assertNotEqual(float(shuge), int(shuge),
"float(shuge) should not equal int(shuge)")
def test_logs(self):
LOG10E = math.log10(math.e)
for exp in list(range(10)) + [100, 1000, 10000]:
value = 10 ** exp
log10 = math.log10(value)
self.assertAlmostEqual(log10, exp)
# log10(value) == exp, so log(value) == log10(value)/log10(e) ==
# exp/LOG10E
expected = exp / LOG10E
log = math.log(value)
self.assertAlmostEqual(log, expected)
for bad in -(1 << 10000), -2, 0:
self.assertRaises(ValueError, math.log, bad)
self.assertRaises(ValueError, math.log10, bad)
def test_mixed_compares(self):
eq = self.assertEqual
# We're mostly concerned with that mixing floats and longs does the
# right stuff, even when longs are too large to fit in a float.
# The safest way to check the results is to use an entirely different
# method, which we do here via a skeletal rational class (which
# represents all Python ints, longs and floats exactly).
class Rat:
def __init__(self, value):
if isinstance(value, int):
self.n = value
self.d = 1
elif isinstance(value, float):
# Convert to exact rational equivalent.
f, e = math.frexp(abs(value))
assert f == 0 or 0.5 <= f < 1.0
# |value| = f * 2**e exactly
# Suck up CHUNK bits at a time; 28 is enough so that we suck
# up all bits in 2 iterations for all known binary double-
# precision formats, and small enough to fit in an int.
CHUNK = 28
top = 0
# invariant: |value| = (top + f) * 2**e exactly
while f:
f = math.ldexp(f, CHUNK)
digit = int(f)
assert digit >> CHUNK == 0
top = (top << CHUNK) | digit
f -= digit
assert 0.0 <= f < 1.0
e -= CHUNK
# Now |value| = top * 2**e exactly.
if e >= 0:
n = top << e
d = 1
else:
n = top
d = 1 << -e
if value < 0:
n = -n
self.n = n
self.d = d
assert float(n) / float(d) == value
else:
raise TypeError("can't deal with %r" % value)
def _cmp__(self, other):
if not isinstance(other, Rat):
other = Rat(other)
x, y = self.n * other.d, self.d * other.n
return (x > y) - (x < y)
def __eq__(self, other):
return self._cmp__(other) == 0
def __ne__(self, other):
return self._cmp__(other) != 0
def __ge__(self, other):
return self._cmp__(other) >= 0
def __gt__(self, other):
return self._cmp__(other) > 0
def __le__(self, other):
return self._cmp__(other) <= 0
def __lt__(self, other):
return self._cmp__(other) < 0
cases = [0, 0.001, 0.99, 1.0, 1.5, 1e20, 1e200]
# 2**48 is an important boundary in the internals. 2**53 is an
# important boundary for IEEE double precision.
for t in 2.0**48, 2.0**50, 2.0**53:
cases.extend([t - 1.0, t - 0.3, t, t + 0.3, t + 1.0,
int(t-1), int(t), int(t+1)])
cases.extend([0, 1, 2, sys.maxsize, float(sys.maxsize)])
# 1 << 20000 should exceed all double formats. int(1e200) is to
# check that we get equality with 1e200 above.
t = int(1e200)
cases.extend([0, 1, 2, 1 << 20000, t-1, t, t+1])
cases.extend([-x for x in cases])
for x in cases:
Rx = Rat(x)
for y in cases:
Ry = Rat(y)
Rcmp = (Rx > Ry) - (Rx < Ry)
xycmp = (x > y) - (x < y)
eq(Rcmp, xycmp, Frm("%r %r %d %d", x, y, Rcmp, xycmp))
eq(x == y, Rcmp == 0, Frm("%r == %r %d", x, y, Rcmp))
eq(x != y, Rcmp != 0, Frm("%r != %r %d", x, y, Rcmp))
eq(x < y, Rcmp < 0, Frm("%r < %r %d", x, y, Rcmp))
eq(x <= y, Rcmp <= 0, Frm("%r <= %r %d", x, y, Rcmp))
eq(x > y, Rcmp > 0, Frm("%r > %r %d", x, y, Rcmp))
eq(x >= y, Rcmp >= 0, Frm("%r >= %r %d", x, y, Rcmp))
def test__format__(self):
self.assertEqual(format(123456789, 'd'), '123456789')
self.assertEqual(format(123456789, 'd'), '123456789')
# sign and aligning are interdependent
self.assertEqual(format(1, "-"), '1')
self.assertEqual(format(-1, "-"), '-1')
self.assertEqual(format(1, "-3"), ' 1')
self.assertEqual(format(-1, "-3"), ' -1')
self.assertEqual(format(1, "+3"), ' +1')
self.assertEqual(format(-1, "+3"), ' -1')
self.assertEqual(format(1, " 3"), ' 1')
self.assertEqual(format(-1, " 3"), ' -1')
self.assertEqual(format(1, " "), ' 1')
self.assertEqual(format(-1, " "), '-1')
# hex
self.assertEqual(format(3, "x"), "3")
self.assertEqual(format(3, "X"), "3")
self.assertEqual(format(1234, "x"), "4d2")
self.assertEqual(format(-1234, "x"), "-4d2")
self.assertEqual(format(1234, "8x"), " 4d2")
self.assertEqual(format(-1234, "8x"), " -4d2")
self.assertEqual(format(1234, "x"), "4d2")
self.assertEqual(format(-1234, "x"), "-4d2")
self.assertEqual(format(-3, "x"), "-3")
self.assertEqual(format(-3, "X"), "-3")
self.assertEqual(format(int('be', 16), "x"), "be")
self.assertEqual(format(int('be', 16), "X"), "BE")
self.assertEqual(format(-int('be', 16), "x"), "-be")
self.assertEqual(format(-int('be', 16), "X"), "-BE")
# octal
self.assertEqual(format(3, "b"), "11")
self.assertEqual(format(-3, "b"), "-11")
self.assertEqual(format(1234, "b"), "10011010010")
self.assertEqual(format(-1234, "b"), "-10011010010")
self.assertEqual(format(1234, "-b"), "10011010010")
self.assertEqual(format(-1234, "-b"), "-10011010010")
self.assertEqual(format(1234, " b"), " 10011010010")
self.assertEqual(format(-1234, " b"), "-10011010010")
self.assertEqual(format(1234, "+b"), "+10011010010")
self.assertEqual(format(-1234, "+b"), "-10011010010")
# make sure these are errors
self.assertRaises(ValueError, format, 3, "1.3") # precision disallowed
self.assertRaises(ValueError, format, 3, "+c") # sign not allowed
# with 'c'
# ensure that only int and float type specifiers work
for format_spec in ([chr(x) for x in range(ord('a'), ord('z')+1)] +
[chr(x) for x in range(ord('A'), ord('Z')+1)]):
if not format_spec in 'bcdoxXeEfFgGn%':
self.assertRaises(ValueError, format, 0, format_spec)
self.assertRaises(ValueError, format, 1, format_spec)
self.assertRaises(ValueError, format, -1, format_spec)
self.assertRaises(ValueError, format, 2**100, format_spec)
self.assertRaises(ValueError, format, -(2**100), format_spec)
# ensure that float type specifiers work; format converts
# the int to a float
for format_spec in 'eEfFgG%':
for value in [0, 1, -1, 100, -100, 1234567890, -1234567890]:
self.assertEqual(format(value, format_spec),
format(float(value), format_spec))
def test_nan_inf(self):
self.assertRaises(OverflowError, int, float('inf'))
self.assertRaises(OverflowError, int, float('-inf'))
self.assertRaises(ValueError, int, float('nan'))
def test_true_division(self):
huge = 1 << 40000
mhuge = -huge
self.assertEqual(huge / huge, 1.0)
self.assertEqual(mhuge / mhuge, 1.0)
self.assertEqual(huge / mhuge, -1.0)
self.assertEqual(mhuge / huge, -1.0)
self.assertEqual(1 / huge, 0.0)
self.assertEqual(1 / huge, 0.0)
self.assertEqual(1 / mhuge, 0.0)
self.assertEqual(1 / mhuge, 0.0)
self.assertEqual((666 * huge + (huge >> 1)) / huge, 666.5)
self.assertEqual((666 * mhuge + (mhuge >> 1)) / mhuge, 666.5)
self.assertEqual((666 * huge + (huge >> 1)) / mhuge, -666.5)
self.assertEqual((666 * mhuge + (mhuge >> 1)) / huge, -666.5)
self.assertEqual(huge / (huge << 1), 0.5)
self.assertEqual((1000000 * huge) / huge, 1000000)
namespace = {'huge': huge, 'mhuge': mhuge}
for overflow in ["float(huge)", "float(mhuge)",
"huge / 1", "huge / 2", "huge / -1", "huge / -2",
"mhuge / 100", "mhuge / 200"]:
self.assertRaises(OverflowError, eval, overflow, namespace)
for underflow in ["1 / huge", "2 / huge", "-1 / huge", "-2 / huge",
"100 / mhuge", "200 / mhuge"]:
result = eval(underflow, namespace)
self.assertEqual(result, 0.0,
"expected underflow to 0 from %r" % underflow)
for zero in ["huge / 0", "mhuge / 0"]:
self.assertRaises(ZeroDivisionError, eval, zero, namespace)
def check_truediv(self, a, b, skip_small=True):
"""Verify that the result of a/b is correctly rounded, by
comparing it with a pure Python implementation of correctly
rounded division. b should be nonzero."""
# skip check for small a and b: in this case, the current
# implementation converts the arguments to float directly and
# then applies a float division. This can give doubly-rounded
# results on x87-using machines (particularly 32-bit Linux).
if skip_small and max(abs(a), abs(b)) < 2**DBL_MANT_DIG:
return
try:
# use repr so that we can distinguish between -0.0 and 0.0
expected = repr(truediv(a, b))
except OverflowError:
expected = 'overflow'
except ZeroDivisionError:
expected = 'zerodivision'
try:
got = repr(a / b)
except OverflowError:
got = 'overflow'
except ZeroDivisionError:
got = 'zerodivision'
self.assertEqual(expected, got, "Incorrectly rounded division {}/{}: "
"expected {}, got {}".format(a, b, expected, got))
@support.requires_IEEE_754
def test_correctly_rounded_true_division(self):
# more stringent tests than those above, checking that the
# result of true division of ints is always correctly rounded.
# This test should probably be considered CPython-specific.
# Exercise all the code paths not involving Gb-sized ints.
# ... divisions involving zero
self.check_truediv(123, 0)
self.check_truediv(-456, 0)
self.check_truediv(0, 3)
self.check_truediv(0, -3)
self.check_truediv(0, 0)
# ... overflow or underflow by large margin
self.check_truediv(671 * 12345 * 2**DBL_MAX_EXP, 12345)
self.check_truediv(12345, 345678 * 2**(DBL_MANT_DIG - DBL_MIN_EXP))
# ... a much larger or smaller than b
self.check_truediv(12345*2**100, 98765)
self.check_truediv(12345*2**30, 98765*7**81)
# ... a / b near a boundary: one of 1, 2**DBL_MANT_DIG, 2**DBL_MIN_EXP,
# 2**DBL_MAX_EXP, 2**(DBL_MIN_EXP-DBL_MANT_DIG)
bases = (0, DBL_MANT_DIG, DBL_MIN_EXP,
DBL_MAX_EXP, DBL_MIN_EXP - DBL_MANT_DIG)
for base in bases:
for exp in range(base - 15, base + 15):
self.check_truediv(75312*2**max(exp, 0), 69187*2**max(-exp, 0))
self.check_truediv(69187*2**max(exp, 0), 75312*2**max(-exp, 0))
# overflow corner case
for m in [1, 2, 7, 17, 12345, 7**100,
-1, -2, -5, -23, -67891, -41**50]:
for n in range(-10, 10):
self.check_truediv(m*DBL_MIN_OVERFLOW + n, m)
self.check_truediv(m*DBL_MIN_OVERFLOW + n, -m)
# check detection of inexactness in shifting stage
for n in range(250):
# (2**DBL_MANT_DIG+1)/(2**DBL_MANT_DIG) lies halfway
# between two representable floats, and would usually be
# rounded down under round-half-to-even. The tiniest of
# additions to the numerator should cause it to be rounded
# up instead.
self.check_truediv((2**DBL_MANT_DIG + 1)*12345*2**200 + 2**n,
2**DBL_MANT_DIG*12345)
# 1/2731 is one of the smallest division cases that's subject
# to double rounding on IEEE 754 machines working internally with
# 64-bit precision. On such machines, the next check would fail,
# were it not explicitly skipped in check_truediv.
self.check_truediv(1, 2731)
# a particularly bad case for the old algorithm: gives an
# error of close to 3.5 ulps.
self.check_truediv(295147931372582273023, 295147932265116303360)
for i in range(1000):
self.check_truediv(10**(i+1), 10**i)
self.check_truediv(10**i, 10**(i+1))
# test round-half-to-even behaviour, normal result
for m in [1, 2, 4, 7, 8, 16, 17, 32, 12345, 7**100,
-1, -2, -5, -23, -67891, -41**50]:
for n in range(-10, 10):
self.check_truediv(2**DBL_MANT_DIG*m + n, m)
# test round-half-to-even, subnormal result
for n in range(-20, 20):
self.check_truediv(n, 2**1076)
# largeish random divisions: a/b where |a| <= |b| <=
# 2*|a|; |ans| is between 0.5 and 1.0, so error should
# always be bounded by 2**-54 with equality possible only
# if the least significant bit of q=ans*2**53 is zero.
for M in [10**10, 10**100, 10**1000]:
for i in range(1000):
a = random.randrange(1, M)
b = random.randrange(a, 2*a+1)
self.check_truediv(a, b)
self.check_truediv(-a, b)
self.check_truediv(a, -b)
self.check_truediv(-a, -b)
# and some (genuinely) random tests
for _ in range(10000):
a_bits = random.randrange(1000)
b_bits = random.randrange(1, 1000)
x = random.randrange(2**a_bits)
y = random.randrange(1, 2**b_bits)
self.check_truediv(x, y)
self.check_truediv(x, -y)
self.check_truediv(-x, y)
self.check_truediv(-x, -y)
def test_small_ints(self):
for i in range(-5, 257):
self.assertTrue(i is i + 0)
self.assertTrue(i is i * 1)
self.assertTrue(i is i - 0)
self.assertTrue(i is i // 1)
self.assertTrue(i is i & -1)
self.assertTrue(i is i | 0)
self.assertTrue(i is i ^ 0)
self.assertTrue(i is ~~i)
self.assertTrue(i is i**1)
self.assertTrue(i is int(str(i)))
self.assertTrue(i is i<<2>>2, str(i))
# corner cases
i = 1 << 70
self.assertTrue(i - i is 0)
self.assertTrue(0 * i is 0)
def test_bit_length(self):
tiny = 1e-10
for x in range(-65000, 65000):
k = x.bit_length()
# Check equivalence with Python version
self.assertEqual(k, len(bin(x).lstrip('-0b')))
# Behaviour as specified in the docs
if x != 0:
self.assertTrue(2**(k-1) <= abs(x) < 2**k)
else:
self.assertEqual(k, 0)
# Alternative definition: x.bit_length() == 1 + floor(log_2(x))
if x != 0:
# When x is an exact power of 2, numeric errors can
# cause floor(log(x)/log(2)) to be one too small; for
# small x this can be fixed by adding a small quantity
# to the quotient before taking the floor.
self.assertEqual(k, 1 + math.floor(
math.log(abs(x))/math.log(2) + tiny))
self.assertEqual((0).bit_length(), 0)
self.assertEqual((1).bit_length(), 1)
self.assertEqual((-1).bit_length(), 1)
self.assertEqual((2).bit_length(), 2)
self.assertEqual((-2).bit_length(), 2)
for i in [2, 3, 15, 16, 17, 31, 32, 33, 63, 64, 234]:
a = 2**i
self.assertEqual((a-1).bit_length(), i)
self.assertEqual((1-a).bit_length(), i)
self.assertEqual((a).bit_length(), i+1)
self.assertEqual((-a).bit_length(), i+1)
self.assertEqual((a+1).bit_length(), i+1)
self.assertEqual((-a-1).bit_length(), i+1)
def test_round(self):
# check round-half-even algorithm. For round to nearest ten;
# rounding map is invariant under adding multiples of 20
test_dict = {0:0, 1:0, 2:0, 3:0, 4:0, 5:0,
6:10, 7:10, 8:10, 9:10, 10:10, 11:10, 12:10, 13:10, 14:10,
15:20, 16:20, 17:20, 18:20, 19:20}
for offset in range(-520, 520, 20):
for k, v in test_dict.items():
got = round(k+offset, -1)
expected = v+offset
self.assertEqual(got, expected)
self.assertTrue(type(got) is int)
# larger second argument
self.assertEqual(round(-150, -2), -200)
self.assertEqual(round(-149, -2), -100)
self.assertEqual(round(-51, -2), -100)
self.assertEqual(round(-50, -2), 0)
self.assertEqual(round(-49, -2), 0)
self.assertEqual(round(-1, -2), 0)
self.assertEqual(round(0, -2), 0)
self.assertEqual(round(1, -2), 0)
self.assertEqual(round(49, -2), 0)
self.assertEqual(round(50, -2), 0)
self.assertEqual(round(51, -2), 100)
self.assertEqual(round(149, -2), 100)
self.assertEqual(round(150, -2), 200)
self.assertEqual(round(250, -2), 200)
self.assertEqual(round(251, -2), 300)
self.assertEqual(round(172500, -3), 172000)
self.assertEqual(round(173500, -3), 174000)
self.assertEqual(round(31415926535, -1), 31415926540)
self.assertEqual(round(31415926535, -2), 31415926500)
self.assertEqual(round(31415926535, -3), 31415927000)
self.assertEqual(round(31415926535, -4), 31415930000)
self.assertEqual(round(31415926535, -5), 31415900000)
self.assertEqual(round(31415926535, -6), 31416000000)
self.assertEqual(round(31415926535, -7), 31420000000)
self.assertEqual(round(31415926535, -8), 31400000000)
self.assertEqual(round(31415926535, -9), 31000000000)
self.assertEqual(round(31415926535, -10), 30000000000)
self.assertEqual(round(31415926535, -11), 0)
self.assertEqual(round(31415926535, -12), 0)
self.assertEqual(round(31415926535, -999), 0)
# should get correct results even for huge inputs
for k in range(10, 100):
got = round(10**k + 324678, -3)
expect = 10**k + 325000
self.assertEqual(got, expect)
self.assertTrue(type(got) is int)
# nonnegative second argument: round(x, n) should just return x
for n in range(5):
for i in range(100):
x = random.randrange(-10000, 10000)
got = round(x, n)
self.assertEqual(got, x)
self.assertTrue(type(got) is int)
for huge_n in 2**31-1, 2**31, 2**63-1, 2**63, 2**100, 10**100:
self.assertEqual(round(8979323, huge_n), 8979323)
# omitted second argument
for i in range(100):
x = random.randrange(-10000, 10000)
got = round(x)
self.assertEqual(got, x)
self.assertTrue(type(got) is int)
# bad second argument
bad_exponents = ('brian', 2.0, 0j, None)
for e in bad_exponents:
self.assertRaises(TypeError, round, 3, e)
def test_to_bytes(self):
def check(tests, byteorder, signed=False):
for test, expected in tests.items():
try:
self.assertEqual(
test.to_bytes(len(expected), byteorder, signed=signed),
expected)
except Exception as err:
raise AssertionError(
"failed to convert {0} with byteorder={1} and signed={2}"
.format(test, byteorder, signed)) from err
# Convert integers to signed big-endian byte arrays.
tests1 = {
0: b'\x00',
1: b'\x01',
-1: b'\xff',
-127: b'\x81',
-128: b'\x80',
-129: b'\xff\x7f',
127: b'\x7f',
129: b'\x00\x81',
-255: b'\xff\x01',
-256: b'\xff\x00',
255: b'\x00\xff',
256: b'\x01\x00',
32767: b'\x7f\xff',
-32768: b'\xff\x80\x00',
65535: b'\x00\xff\xff',
-65536: b'\xff\x00\x00',
-8388608: b'\x80\x00\x00'
}
check(tests1, 'big', signed=True)
# Convert integers to signed little-endian byte arrays.
tests2 = {
0: b'\x00',
1: b'\x01',
-1: b'\xff',
-127: b'\x81',
-128: b'\x80',
-129: b'\x7f\xff',
127: b'\x7f',
129: b'\x81\x00',
-255: b'\x01\xff',
-256: b'\x00\xff',
255: b'\xff\x00',
256: b'\x00\x01',
32767: b'\xff\x7f',
-32768: b'\x00\x80',
65535: b'\xff\xff\x00',
-65536: b'\x00\x00\xff',
-8388608: b'\x00\x00\x80'
}
check(tests2, 'little', signed=True)
# Convert integers to unsigned big-endian byte arrays.
tests3 = {
0: b'\x00',
1: b'\x01',
127: b'\x7f',
128: b'\x80',
255: b'\xff',
256: b'\x01\x00',
32767: b'\x7f\xff',
32768: b'\x80\x00',
65535: b'\xff\xff',
65536: b'\x01\x00\x00'
}
check(tests3, 'big', signed=False)
# Convert integers to unsigned little-endian byte arrays.
tests4 = {
0: b'\x00',
1: b'\x01',
127: b'\x7f',
128: b'\x80',
255: b'\xff',
256: b'\x00\x01',
32767: b'\xff\x7f',
32768: b'\x00\x80',
65535: b'\xff\xff',
65536: b'\x00\x00\x01'
}
check(tests4, 'little', signed=False)
self.assertRaises(OverflowError, (256).to_bytes, 1, 'big', signed=False)
self.assertRaises(OverflowError, (256).to_bytes, 1, 'big', signed=True)
self.assertRaises(OverflowError, (256).to_bytes, 1, 'little', signed=False)
self.assertRaises(OverflowError, (256).to_bytes, 1, 'little', signed=True)
self.assertRaises(OverflowError, (-1).to_bytes, 2, 'big', signed=False),
self.assertRaises(OverflowError, (-1).to_bytes, 2, 'little', signed=False)
self.assertEqual((0).to_bytes(0, 'big'), b'')
self.assertEqual((1).to_bytes(5, 'big'), b'\x00\x00\x00\x00\x01')
self.assertEqual((0).to_bytes(5, 'big'), b'\x00\x00\x00\x00\x00')
self.assertEqual((-1).to_bytes(5, 'big', signed=True),
b'\xff\xff\xff\xff\xff')
self.assertRaises(OverflowError, (1).to_bytes, 0, 'big')
def test_from_bytes(self):
def check(tests, byteorder, signed=False):
for test, expected in tests.items():
try:
self.assertEqual(
int.from_bytes(test, byteorder, signed=signed),
expected)
except Exception as err:
raise AssertionError(
"failed to convert {0} with byteorder={1!r} and signed={2}"
.format(test, byteorder, signed)) from err
# Convert signed big-endian byte arrays to integers.
tests1 = {
b'': 0,
b'\x00': 0,
b'\x00\x00': 0,
b'\x01': 1,
b'\x00\x01': 1,
b'\xff': -1,
b'\xff\xff': -1,
b'\x81': -127,
b'\x80': -128,
b'\xff\x7f': -129,
b'\x7f': 127,
b'\x00\x81': 129,
b'\xff\x01': -255,
b'\xff\x00': -256,
b'\x00\xff': 255,
b'\x01\x00': 256,
b'\x7f\xff': 32767,
b'\x80\x00': -32768,
b'\x00\xff\xff': 65535,
b'\xff\x00\x00': -65536,
b'\x80\x00\x00': -8388608
}
check(tests1, 'big', signed=True)
# Convert signed little-endian byte arrays to integers.
tests2 = {
b'': 0,
b'\x00': 0,
b'\x00\x00': 0,
b'\x01': 1,
b'\x00\x01': 256,
b'\xff': -1,
b'\xff\xff': -1,
b'\x81': -127,
b'\x80': -128,
b'\x7f\xff': -129,
b'\x7f': 127,
b'\x81\x00': 129,
b'\x01\xff': -255,
b'\x00\xff': -256,
b'\xff\x00': 255,
b'\x00\x01': 256,
b'\xff\x7f': 32767,
b'\x00\x80': -32768,
b'\xff\xff\x00': 65535,
b'\x00\x00\xff': -65536,
b'\x00\x00\x80': -8388608
}
check(tests2, 'little', signed=True)
# Convert unsigned big-endian byte arrays to integers.
tests3 = {
b'': 0,
b'\x00': 0,
b'\x01': 1,
b'\x7f': 127,
b'\x80': 128,
b'\xff': 255,
b'\x01\x00': 256,
b'\x7f\xff': 32767,
b'\x80\x00': 32768,
b'\xff\xff': 65535,
b'\x01\x00\x00': 65536,
}
check(tests3, 'big', signed=False)
# Convert integers to unsigned little-endian byte arrays.
tests4 = {
b'': 0,
b'\x00': 0,
b'\x01': 1,
b'\x7f': 127,
b'\x80': 128,
b'\xff': 255,
b'\x00\x01': 256,
b'\xff\x7f': 32767,
b'\x00\x80': 32768,
b'\xff\xff': 65535,
b'\x00\x00\x01': 65536,
}
check(tests4, 'little', signed=False)
class myint(int):
pass
self.assertTrue(type(myint.from_bytes(b'\x00', 'big')) is myint)
self.assertEqual(myint.from_bytes(b'\x01', 'big'), 1)
self.assertTrue(
type(myint.from_bytes(b'\x00', 'big', signed=False)) is myint)
self.assertEqual(myint.from_bytes(b'\x01', 'big', signed=False), 1)
self.assertTrue(type(myint.from_bytes(b'\x00', 'little')) is myint)
self.assertEqual(myint.from_bytes(b'\x01', 'little'), 1)
self.assertTrue(type(myint.from_bytes(
b'\x00', 'little', signed=False)) is myint)
self.assertEqual(myint.from_bytes(b'\x01', 'little', signed=False), 1)
self.assertEqual(
int.from_bytes([255, 0, 0], 'big', signed=True), -65536)
self.assertEqual(
int.from_bytes((255, 0, 0), 'big', signed=True), -65536)
self.assertEqual(int.from_bytes(
bytearray(b'\xff\x00\x00'), 'big', signed=True), -65536)
self.assertEqual(int.from_bytes(
bytearray(b'\xff\x00\x00'), 'big', signed=True), -65536)
self.assertEqual(int.from_bytes(
array.array('B', b'\xff\x00\x00'), 'big', signed=True), -65536)
self.assertEqual(int.from_bytes(
memoryview(b'\xff\x00\x00'), 'big', signed=True), -65536)
self.assertRaises(ValueError, int.from_bytes, [256], 'big')
self.assertRaises(ValueError, int.from_bytes, [0], 'big\x00')
self.assertRaises(ValueError, int.from_bytes, [0], 'little\x00')
self.assertRaises(TypeError, int.from_bytes, "", 'big')
self.assertRaises(TypeError, int.from_bytes, "\x00", 'big')
self.assertRaises(TypeError, int.from_bytes, 0, 'big')
self.assertRaises(TypeError, int.from_bytes, 0, 'big', True)
self.assertRaises(TypeError, myint.from_bytes, "", 'big')
self.assertRaises(TypeError, myint.from_bytes, "\x00", 'big')
self.assertRaises(TypeError, myint.from_bytes, 0, 'big')
self.assertRaises(TypeError, int.from_bytes, 0, 'big', True)
def test_main():
support.run_unittest(LongTest)
if __name__ == "__main__":
test_main()
|
harmy/kbengine
|
kbe/res/scripts/common/Lib/test/test_long.py
|
Python
|
lgpl-3.0
| 48,711
|
[
"Brian"
] |
9734ccae67c944ab54dde8b40882422f7b60ba466c1787068079c02d2a7f245d
|
#!/usr/local/bin/python
import time, sys, os
import numpy as np
np.errstate(invalid='ignore')
from prospect.models import model_setup
from prospect.io import write_results
from prospect import fitting
from prospect.likelihood import lnlike_spec, lnlike_phot, write_log, chi_spec, chi_phot
# --------------
# Read command line arguments
# --------------
sargv = sys.argv
argdict = {'param_file': ''}
clargs = model_setup.parse_args(sargv, argdict=argdict)
run_params = model_setup.get_run_params(argv=sargv, **clargs)
# --------------
# Globals
# --------------
# GP instances as global
spec_noise, phot_noise = model_setup.load_gp(**run_params)
# Model as global
global_model = model_setup.load_model(**run_params)
# Obs as global
global_obs = model_setup.load_obs(**run_params)
# SPS Model instance as global
sps = model_setup.load_sps(**run_params)
# -----------------
# LnP function as global
# ------------------
def lnprobfn(theta, model=None, obs=None, noise=None, sps=sps,
residuals=False, verbose=run_params['verbose']):
"""Given a parameter vector and optionally a dictionary of observational
ata and a model object, return the ln of the posterior. This requires that
an sps object (and if using spectra and gaussian processes, a GP object) be
instantiated.
:param theta:
Input parameter vector, ndarray of shape (ndim,)
:param model:
bsfh.sedmodel model object, with attributes including ``params``, a
dictionary of model parameters. It must also have ``prior_product()``,
and ``mean_model()`` methods defined.
:param obs:
A dictionary of observational data. The keys should be
*``wavelength``
*``spectrum``
*``unc``
*``maggies``
*``maggies_unc``
*``filters``
* and optional spectroscopic ``mask`` and ``phot_mask``.
:returns lnp:
Ln posterior probability.
"""
if model is None:
model = global_model
if obs is None:
obs = global_obs
# Calculate prior probability and exit if not within prior
lnp_prior = model.prior_product(theta)
if not np.isfinite(lnp_prior):
return -np.infty
# Generate mean model
t1 = time.time()
try:
spec, phot, x = model.mean_model(theta, obs, sps=sps)
except(ValueError):
return -np.infty
d1 = time.time() - t1
# Return chi vectors for least-squares optimization
if residuals:
chispec = chi_spec(spec, obs)
chiphot = chi_phot(phot, obs)
return np.concatenate([chispec, chiphot])
# Noise modeling
if spec_noise is not None:
spec_noise.update(**model.params)
if phot_noise is not None:
phot_noise.update(**model.params)
vectors = {'spec': spec, 'unc': obs['unc'],
'sed': model._spec, 'cal': model._speccal,
'phot': phot, 'maggies_unc': obs['maggies_unc']}
# Calculate likelihoods
t2 = time.time()
lnp_spec = lnlike_spec(spec, obs=obs, spec_noise=spec_noise, **vectors)
lnp_phot = lnlike_phot(phot, obs=obs, phot_noise=phot_noise, **vectors)
d2 = time.time() - t2
if verbose:
write_log(theta, lnp_prior, lnp_spec, lnp_phot, d1, d2)
return lnp_prior + lnp_phot + lnp_spec
def chisqfn(theta, model, obs):
"""Negative of lnprobfn for minimization, and also handles passing in
keyword arguments which can only be postional arguments when using scipy
minimize.
"""
return -lnprobfn(theta, model=model, obs=obs)
def chivecfn(theta):
"""Return the residuals instead of a posterior probability or negative
chisq, for use with least-squares optimization methods
"""
return lnprobfn(theta, residuals=True)
# -----------------
# MPI pool. This must be done *after* lnprob and
# chi2 are defined since slaves will only see up to
# sys.exit()
# ------------------
try:
from emcee.utils import MPIPool
pool = MPIPool(debug=False, loadbalance=True)
if not pool.is_master():
# Wait for instructions from the master process.
pool.wait()
sys.exit(0)
except(ImportError, ValueError):
pool = None
print('Not using MPI')
def halt(message):
"""Exit, closing pool safely.
"""
print(message)
try:
pool.close()
except:
pass
sys.exit(0)
# --------------
# Master branch
# --------------
if __name__ == "__main__":
# --------------
# Setup
# --------------
rp = run_params
rp['sys.argv'] = sys.argv
try:
rp['sps_libraries'] = sps.ssp.libraries
except(AttributeError):
rp['sps_libraries'] = None
# Use the globals
model = global_model
obsdat = global_obs
chi2args = [None, None]
postkwargs = {}
# make zeros into tiny numbers
initial_theta = model.rectify_theta(model.initial_theta)
if rp.get('debug', False):
halt('stopping for debug')
# Try to set up an HDF5 file and write basic info to it
outroot = "{0}_{1}".format(rp['outfile'], int(time.time()))
odir = os.path.dirname(os.path.abspath(outroot))
if (not os.path.exists(odir)):
halt('Target output directory {} does not exist, please make it.'.format(odir))
try:
import h5py
hfilename = outroot + '_mcmc.h5'
hfile = h5py.File(hfilename, "a")
print("Writing to file {}".format(hfilename))
write_results.write_h5_header(hfile, run_params, model)
write_results.write_obs_to_h5(hfile, obsdat)
except(ImportError):
hfile = None
# -----------------------------------------
# Initial guesses using minimization
# -----------------------------------------
if rp['verbose']:
print('Starting minimization...')
if not np.isfinite(model.prior_product(model.initial_theta.copy())):
halt("Halting: initial parameter position has zero prior probability.")
nmin = rp.get('nmin', 1)
if pool is not None:
nmin = max([nmin, pool.size])
if bool(rp.get('do_powell', False)):
from prospect.fitting.fitting import run_minimize
powell_opt = {'ftol': rp['ftol'], 'xtol': 1e-6, 'maxfev': rp['maxfev']}
guesses, pdur, best = run_minimize(obsdat, model, sps, noise=None, lnprobfn=lnprobfn,
min_method='powell', min_opts={"options": powell_opt},
nmin=nmin, pool=pool)
initial_center = fitting.reinitialize(guesses[best].x, model,
edge_trunc=rp.get('edge_trunc', 0.1))
initial_prob = -guesses[best]['fun']
if rp['verbose']:
print('done Powell in {0}s'.format(pdur))
print('best Powell guess:{0}'.format(initial_center))
elif bool(rp.get('do_levenberg', False)):
from prospect.fitting.fitting import run_minimize
lm_opt = {"xtol": 5e-16, "ftol": 5e-16}
guesses, pdur, best = run_minimize(obsdat, model, sps, noise=None, lnprobfn=lnprobfn,
min_method='lm', min_opts=lm_opt,
nmin=nmin, pool=pool)
initial_center = fitting.reinitialize(guesses[best].x, model,
edge_trunc=rp.get('edge_trunc', 0.1))
initial_prob = None
if rp['verbose']:
print('done L-M in {0}s'.format(pdur))
print('best L-M guess:{0}'.format(initial_center))
else:
if rp['verbose']:
print('No minimization requested.')
guesses = None
pdur = 0.0
initial_center = initial_theta.copy()
initial_prob = None
# ---------------------
# Sampling
# -----------------------
if rp['verbose']:
print('emcee sampling...')
tstart = time.time()
out = fitting.run_emcee_sampler(lnprobfn, initial_center, model,
postkwargs=postkwargs, prob0=initial_prob,
pool=pool, hdf5=hfile, **rp)
esampler, burn_p0, burn_prob0 = out
edur = time.time() - tstart
if rp['verbose']:
print('done emcee in {0}s'.format(edur))
# -------------------------
# Output HDF5 (and pickles if asked for)
# -------------------------
print("Writing to {}".format(outroot))
if rp.get("output_pickles", False):
write_results.write_pickles(rp, model, obsdat, esampler, guesses,
outroot=outroot, toptimize=pdur, tsample=edur,
sampling_initial_center=initial_center,
post_burnin_center=burn_p0,
post_burnin_prob=burn_prob0)
if hfile is None:
hfile = hfilename
write_results.write_hdf5(hfile, rp, model, obsdat, esampler, guesses,
toptimize=pdur, tsample=edur,
sampling_initial_center=initial_center,
post_burnin_center=burn_p0,
post_burnin_prob=burn_prob0)
try:
hfile.close()
except:
pass
halt('Finished')
|
bd-j/prospector
|
scripts/prospector.py
|
Python
|
mit
| 9,248
|
[
"Gaussian"
] |
7b98da6e80255eaa81dd05592d1238bf59d97d816b6ba88ecfb14403fb902bfe
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from tensorflow.keras import (
activations,
initializers,
regularizers,
constraints,
)
from tensorflow.keras import backend as K
from tensorflow.keras.layers import InputSpec
from typeguard import typechecked
from tensorflow_addons.utils import types
def _scaled_noise(size, dtype):
x = tf.random.normal(shape=size, dtype=dtype)
return tf.sign(x) * tf.sqrt(tf.abs(x))
@tf.keras.utils.register_keras_serializable(package="Addons")
class NoisyDense(tf.keras.layers.Dense):
r"""Noisy dense layer that injects random noise to the weights of dense layer.
Noisy dense layers are fully connected layers whose weights and biases are
augmented by factorised Gaussian noise. The factorised Gaussian noise is
controlled through gradient descent by a second weights layer.
A `NoisyDense` layer implements the operation:
$$
\mathrm{NoisyDense}(x) =
\mathrm{activation}(\mathrm{dot}(x, \mu + (\sigma \cdot \epsilon))
+ \mathrm{bias})
$$
where $\mu$ is the standard weights layer, $\epsilon$ is the factorised
Gaussian noise, and $\sigma$ is a second weights layer which controls
$\epsilon$.
Note: bias only added if `use_bias` is `True`.
Example:
>>> # Create a `Sequential` model and add a NoisyDense
>>> # layer as the first layer.
>>> model = tf.keras.models.Sequential()
>>> model.add(tf.keras.Input(shape=(16,)))
>>> model.add(NoisyDense(32, activation='relu'))
>>> # Now the model will take as input arrays of shape (None, 16)
>>> # and output arrays of shape (None, 32).
>>> # Note that after the first layer, you don't need to specify
>>> # the size of the input anymore:
>>> model.add(NoisyDense(32))
>>> model.output_shape
(None, 32)
There are implemented both variants:
1. Independent Gaussian noise
2. Factorised Gaussian noise.
We can choose between that by 'use_factorised' parameter.
Args:
units: Positive integer, dimensionality of the output space.
sigma: A float between 0-1 used as a standard deviation figure and is
applied to the gaussian noise layer (`sigma_kernel` and `sigma_bias`). (uses only if use_factorised=True)
use_factorised: Boolean, whether the layer uses independent or factorised Gaussian noise
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
N-D tensor with shape: `(batch_size, ..., input_dim)`.
The most common situation would be
a 2D input with shape `(batch_size, input_dim)`.
Output shape:
N-D tensor with shape: `(batch_size, ..., units)`.
For instance, for a 2D input with shape `(batch_size, input_dim)`,
the output would have shape `(batch_size, units)`.
References:
- [Noisy Networks for Explanation](https://arxiv.org/pdf/1706.10295.pdf)
"""
@typechecked
def __init__(
self,
units: int,
sigma: float = 0.5,
use_factorised: bool = True,
activation: types.Activation = None,
use_bias: bool = True,
kernel_regularizer: types.Regularizer = None,
bias_regularizer: types.Regularizer = None,
activity_regularizer: types.Regularizer = None,
kernel_constraint: types.Constraint = None,
bias_constraint: types.Constraint = None,
**kwargs,
):
super().__init__(
units=units,
activation=activation,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
delattr(self, "kernel_initializer")
delattr(self, "bias_initializer")
self.sigma = sigma
self.use_factorised = use_factorised
def build(self, input_shape):
# Make sure dtype is correct
dtype = tf.dtypes.as_dtype(self.dtype or K.floatx())
if not (dtype.is_floating or dtype.is_complex):
raise TypeError(
"Unable to build `Dense` layer with non-floating point "
"dtype %s" % (dtype,)
)
input_shape = tf.TensorShape(input_shape)
self.last_dim = tf.compat.dimension_value(input_shape[-1])
sqrt_dim = self.last_dim ** (1 / 2)
if self.last_dim is None:
raise ValueError(
"The last dimension of the inputs to `Dense` "
"should be defined. Found `None`."
)
self.input_spec = InputSpec(min_ndim=2, axes={-1: self.last_dim})
# use factorising Gaussian variables
if self.use_factorised:
mu_init = 1.0 / sqrt_dim
sigma_init = self.sigma / sqrt_dim
# use independent Gaussian variables
else:
mu_init = (3.0 / self.last_dim) ** (1 / 2)
sigma_init = 0.017
sigma_init = initializers.Constant(value=sigma_init)
mu_init = initializers.RandomUniform(minval=-mu_init, maxval=mu_init)
# Learnable parameters
self.sigma_kernel = self.add_weight(
"sigma_kernel",
shape=[self.last_dim, self.units],
initializer=sigma_init,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
dtype=self.dtype,
trainable=True,
)
self.mu_kernel = self.add_weight(
"mu_kernel",
shape=[self.last_dim, self.units],
initializer=mu_init,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
dtype=self.dtype,
trainable=True,
)
self.eps_kernel = self.add_weight(
"eps_kernel",
shape=[self.last_dim, self.units],
initializer=initializers.Zeros(),
regularizer=None,
constraint=None,
dtype=self.dtype,
trainable=False,
)
if self.use_bias:
self.sigma_bias = self.add_weight(
"sigma_bias",
shape=[
self.units,
],
initializer=sigma_init,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
dtype=self.dtype,
trainable=True,
)
self.mu_bias = self.add_weight(
"mu_bias",
shape=[
self.units,
],
initializer=mu_init,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
dtype=self.dtype,
trainable=True,
)
self.eps_bias = self.add_weight(
"eps_bias",
shape=[
self.units,
],
initializer=initializers.Zeros(),
regularizer=None,
constraint=None,
dtype=self.dtype,
trainable=False,
)
else:
self.sigma_bias = None
self.mu_bias = None
self.eps_bias = None
self.reset_noise()
self.built = True
@property
def kernel(self):
return self.mu_kernel + (self.sigma_kernel * self.eps_kernel)
@property
def bias(self):
if self.use_bias:
return self.mu_bias + (self.sigma_bias * self.eps_bias)
def reset_noise(self):
"""Create the factorised Gaussian noise."""
if self.use_factorised:
# Generate random noise
in_eps = _scaled_noise([self.last_dim, 1], dtype=self.dtype)
out_eps = _scaled_noise([1, self.units], dtype=self.dtype)
# Scale the random noise
self.eps_kernel.assign(tf.matmul(in_eps, out_eps))
self.eps_bias.assign(out_eps[0])
else:
# generate independent variables
self.eps_kernel.assign(
tf.random.normal(shape=[self.last_dim, self.units], dtype=self.dtype)
)
self.eps_bias.assign(
tf.random.normal(
shape=[
self.units,
],
dtype=self.dtype,
)
)
def remove_noise(self):
"""Remove the factorised Gaussian noise."""
self.eps_kernel.assign(tf.zeros([self.last_dim, self.units], dtype=self.dtype))
self.eps_bias.assign(tf.zeros([self.units], dtype=self.dtype))
def call(self, inputs):
# TODO(WindQAQ): Replace this with `dense()` once public.
return super().call(inputs)
def get_config(self):
# TODO(WindQAQ): Get rid of this hacky way.
config = super(tf.keras.layers.Dense, self).get_config()
config.update(
{
"units": self.units,
"sigma": self.sigma,
"use_factorised": self.use_factorised,
"activation": activations.serialize(self.activation),
"use_bias": self.use_bias,
"kernel_regularizer": regularizers.serialize(self.kernel_regularizer),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"activity_regularizer": regularizers.serialize(
self.activity_regularizer
),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"bias_constraint": constraints.serialize(self.bias_constraint),
}
)
return config
|
tensorflow/addons
|
tensorflow_addons/layers/noisy_dense.py
|
Python
|
apache-2.0
| 11,081
|
[
"Gaussian"
] |
7d09eb2c743b069c3baffd5b3c04392d58fdcd0b72fc751c040d0d17cc110cce
|
"""
Student Views
"""
import datetime
import logging
import uuid
import json
import warnings
from collections import defaultdict
from urlparse import urljoin
from pytz import UTC
from requests import HTTPError
from ipware.ip import get_ip
from django.conf import settings
from django.contrib.auth import logout, authenticate, login
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import password_reset_confirm
from django.contrib import messages
from django.core.context_processors import csrf
from django.core import mail
from django.core.urlresolvers import reverse, NoReverseMatch
from django.core.validators import validate_email, ValidationError
from django.db import IntegrityError, transaction
from django.http import (HttpResponse, HttpResponseBadRequest, HttpResponseForbidden,
HttpResponseServerError, Http404)
from django.shortcuts import redirect
from django.utils.encoding import force_bytes, force_text
from django.utils.translation import ungettext
from django.utils.http import base36_to_int, urlsafe_base64_encode
from django.utils.translation import ugettext as _, get_language
from django.views.decorators.csrf import csrf_exempt, ensure_csrf_cookie
from django.views.decorators.http import require_POST, require_GET
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.template.response import TemplateResponse
from ratelimitbackend.exceptions import RateLimitException
from social.apps.django_app import utils as social_utils
from social.backends import oauth as social_oauth
from social.exceptions import AuthException, AuthAlreadyAssociated
from edxmako.shortcuts import render_to_response, render_to_string
from course_modes.models import CourseMode
from shoppingcart.api import order_history
from student.models import (
Registration, UserProfile,
PendingEmailChange, CourseEnrollment, CourseEnrollmentAttribute, unique_id_for_user,
CourseEnrollmentAllowed, UserStanding, LoginFailures,
create_comments_service_user, PasswordHistory, UserSignupSource,
DashboardConfiguration, LinkedInAddToProfileConfiguration, ManualEnrollmentAudit, ALLOWEDTOENROLL_TO_ENROLLED)
from student.forms import AccountCreationForm, PasswordResetFormNoActive, get_registration_extension_form
from lms.djangoapps.verify_student.models import SoftwareSecurePhotoVerification # pylint: disable=import-error
from certificates.models import CertificateStatuses, certificate_status_for_student
from certificates.api import ( # pylint: disable=import-error
get_certificate_url,
has_html_certificates_enabled,
)
from xmodule.modulestore.django import modulestore
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.locator import CourseLocator
from xmodule.modulestore import ModuleStoreEnum
from collections import namedtuple
from courseware.courses import get_courses, sort_by_announcement, sort_by_start_date # pylint: disable=import-error
from courseware.access import has_access
from django_comment_common.models import Role
from external_auth.models import ExternalAuthMap
import external_auth.views
from external_auth.login_and_register import (
login as external_auth_login,
register as external_auth_register
)
from bulk_email.models import Optout, CourseAuthorization
from lang_pref import LANGUAGE_KEY
import track.views
import dogstats_wrapper as dog_stats_api
from util.db import outer_atomic
from util.json_request import JsonResponse
from util.bad_request_rate_limiter import BadRequestRateLimiter
from util.milestones_helpers import (
get_pre_requisite_courses_not_completed,
)
from microsite_configuration import microsite
from util.password_policy_validators import (
validate_password_length, validate_password_complexity,
validate_password_dictionary
)
import third_party_auth
from third_party_auth import pipeline, provider
from student.helpers import (
check_verify_status_by_course,
auth_pipeline_urls, get_next_url_for_login_page,
DISABLE_UNENROLL_CERT_STATES,
)
from student.cookies import set_logged_in_cookies, delete_logged_in_cookies
from student.models import anonymous_id_for_user
from shoppingcart.models import DonationConfiguration, CourseRegistrationCode
from embargo import api as embargo_api
import analytics
from eventtracking import tracker
# Note that this lives in LMS, so this dependency should be refactored.
from notification_prefs.views import enable_notifications
# Note that this lives in openedx, so this dependency should be refactored.
from openedx.core.djangoapps.credentials.utils import get_user_program_credentials
from openedx.core.djangoapps.user_api.preferences import api as preferences_api
from openedx.core.djangoapps.programs.utils import get_programs_for_dashboard
log = logging.getLogger("edx.student")
AUDIT_LOG = logging.getLogger("audit")
ReverifyInfo = namedtuple('ReverifyInfo', 'course_id course_name course_number date status display') # pylint: disable=invalid-name
SETTING_CHANGE_INITIATED = 'edx.user.settings.change_initiated'
# Disable this warning because it doesn't make sense to completely refactor tests to appease Pylint
# pylint: disable=logging-format-interpolation
def csrf_token(context):
"""A csrf token that can be included in a form."""
token = context.get('csrf_token', '')
if token == 'NOTPROVIDED':
return ''
return (u'<div style="display:none"><input type="hidden"'
' name="csrfmiddlewaretoken" value="%s" /></div>' % (token))
# NOTE: This view is not linked to directly--it is called from
# branding/views.py:index(), which is cached for anonymous users.
# This means that it should always return the same thing for anon
# users. (in particular, no switching based on query params allowed)
def index(request, extra_context=None, user=AnonymousUser()):
"""
Render the edX main page.
extra_context is used to allow immediate display of certain modal windows, eg signup,
as used by external_auth.
"""
if extra_context is None:
extra_context = {}
courses = get_courses(user)
if microsite.get_value("ENABLE_COURSE_SORTING_BY_START_DATE",
settings.FEATURES["ENABLE_COURSE_SORTING_BY_START_DATE"]):
courses = sort_by_start_date(courses)
else:
courses = sort_by_announcement(courses)
context = {'courses': courses}
context['homepage_overlay_html'] = microsite.get_value('homepage_overlay_html')
# This appears to be an unused context parameter, at least for the master templates...
context['show_partners'] = microsite.get_value('show_partners', True)
# TO DISPLAY A YOUTUBE WELCOME VIDEO
# 1) Change False to True
context['show_homepage_promo_video'] = microsite.get_value('show_homepage_promo_video', False)
# 2) Add your video's YouTube ID (11 chars, eg "123456789xX"), or specify via microsite config
# Note: This value should be moved into a configuration setting and plumbed-through to the
# context via the microsite configuration workflow, versus living here
youtube_video_id = microsite.get_value('homepage_promo_video_youtube_id', "your-youtube-id")
context['homepage_promo_video_youtube_id'] = youtube_video_id
# allow for microsite override of the courses list
context['courses_list'] = microsite.get_template_path('courses_list.html')
# Insert additional context for use in the template
context.update(extra_context)
return render_to_response('index.html', context)
def process_survey_link(survey_link, user):
"""
If {UNIQUE_ID} appears in the link, replace it with a unique id for the user.
Currently, this is sha1(user.username). Otherwise, return survey_link.
"""
return survey_link.format(UNIQUE_ID=unique_id_for_user(user))
def cert_info(user, course_overview, course_mode):
"""
Get the certificate info needed to render the dashboard section for the given
student and course.
Arguments:
user (User): A user.
course_overview (CourseOverview): A course.
course_mode (str): The enrollment mode (honor, verified, audit, etc.)
Returns:
dict: Empty dict if certificates are disabled or hidden, or a dictionary with keys:
'status': one of 'generating', 'ready', 'notpassing', 'processing', 'restricted'
'show_download_url': bool
'download_url': url, only present if show_download_url is True
'show_disabled_download_button': bool -- true if state is 'generating'
'show_survey_button': bool
'survey_url': url, only if show_survey_button is True
'grade': if status is not 'processing'
'can_unenroll': if status allows for unenrollment
"""
if not course_overview.may_certify():
return {}
return _cert_info(
user,
course_overview,
certificate_status_for_student(user, course_overview.id),
course_mode
)
def reverification_info(statuses):
"""
Returns reverification-related information for *all* of user's enrollments whose
reverification status is in statuses.
Args:
statuses (list): a list of reverification statuses we want information for
example: ["must_reverify", "denied"]
Returns:
dictionary of lists: dictionary with one key per status, e.g.
dict["must_reverify"] = []
dict["must_reverify"] = [some information]
"""
reverifications = defaultdict(list)
# Sort the data by the reverification_end_date
for status in statuses:
if reverifications[status]:
reverifications[status].sort(key=lambda x: x.date)
return reverifications
def get_course_enrollments(user, org_to_include, orgs_to_exclude):
"""
Given a user, return a filtered set of his or her course enrollments.
Arguments:
user (User): the user in question.
org_to_include (str): for use in Microsites. If not None, ONLY courses
of this org will be returned.
orgs_to_exclude (list[str]): If org_to_include is not None, this
argument is ignored. Else, courses of this org will be excluded.
Returns:
generator[CourseEnrollment]: a sequence of enrollments to be displayed
on the user's dashboard.
"""
for enrollment in CourseEnrollment.enrollments_for_user(user):
# If the course is missing or broken, log an error and skip it.
course_overview = enrollment.course_overview
if not course_overview:
log.error(
"User %s enrolled in broken or non-existent course %s",
user.username,
enrollment.course_id
)
continue
# If we are in a Microsite, then filter out anything that is not
# attributed (by ORG) to that Microsite.
if org_to_include and course_overview.location.org != org_to_include:
continue
# Conversely, if we are not in a Microsite, then filter out any enrollments
# with courses attributed (by ORG) to Microsites.
elif course_overview.location.org in orgs_to_exclude:
continue
# Else, include the enrollment.
else:
yield enrollment
def _cert_info(user, course_overview, cert_status, course_mode): # pylint: disable=unused-argument
"""
Implements the logic for cert_info -- split out for testing.
Arguments:
user (User): A user.
course_overview (CourseOverview): A course.
course_mode (str): The enrollment mode (honor, verified, audit, etc.)
"""
# simplify the status for the template using this lookup table
template_state = {
CertificateStatuses.generating: 'generating',
CertificateStatuses.regenerating: 'generating',
CertificateStatuses.downloadable: 'ready',
CertificateStatuses.notpassing: 'notpassing',
CertificateStatuses.restricted: 'restricted',
CertificateStatuses.auditing: 'auditing',
CertificateStatuses.audit_passing: 'auditing',
CertificateStatuses.audit_notpassing: 'auditing',
}
default_status = 'processing'
default_info = {
'status': default_status,
'show_disabled_download_button': False,
'show_download_url': False,
'show_survey_button': False,
'can_unenroll': True,
}
if cert_status is None:
return default_info
is_hidden_status = cert_status['status'] in ('unavailable', 'processing', 'generating', 'notpassing', 'auditing')
if course_overview.certificates_display_behavior == 'early_no_info' and is_hidden_status:
return {}
status = template_state.get(cert_status['status'], default_status)
status_dict = {
'status': status,
'show_download_url': status == 'ready',
'show_disabled_download_button': status == 'generating',
'mode': cert_status.get('mode', None),
'linked_in_url': None,
'can_unenroll': status not in DISABLE_UNENROLL_CERT_STATES,
}
if (status in ('generating', 'ready', 'notpassing', 'restricted', 'auditing') and
course_overview.end_of_course_survey_url is not None):
status_dict.update({
'show_survey_button': True,
'survey_url': process_survey_link(course_overview.end_of_course_survey_url, user)})
else:
status_dict['show_survey_button'] = False
if status == 'ready':
# showing the certificate web view button if certificate is ready state and feature flags are enabled.
if has_html_certificates_enabled(course_overview.id, course_overview):
if course_overview.has_any_active_web_certificate:
status_dict.update({
'show_cert_web_view': True,
'cert_web_view_url': get_certificate_url(course_id=course_overview.id, uuid=cert_status['uuid'])
})
else:
# don't show download certificate button if we don't have an active certificate for course
status_dict['show_download_url'] = False
elif 'download_url' not in cert_status:
log.warning(
u"User %s has a downloadable cert for %s, but no download url",
user.username,
course_overview.id
)
return default_info
else:
status_dict['download_url'] = cert_status['download_url']
# If enabled, show the LinkedIn "add to profile" button
# Clicking this button sends the user to LinkedIn where they
# can add the certificate information to their profile.
linkedin_config = LinkedInAddToProfileConfiguration.current()
# posting certificates to LinkedIn is not currently
# supported in microsites/White Labels
if linkedin_config.enabled and not microsite.is_request_in_microsite():
status_dict['linked_in_url'] = linkedin_config.add_to_profile_url(
course_overview.id,
course_overview.display_name,
cert_status.get('mode'),
cert_status['download_url']
)
if status in ('generating', 'ready', 'notpassing', 'restricted', 'auditing'):
if 'grade' not in cert_status:
# Note: as of 11/20/2012, we know there are students in this state-- cs169.1x,
# who need to be regraded (we weren't tracking 'notpassing' at first).
# We can add a log.warning here once we think it shouldn't happen.
return default_info
else:
status_dict['grade'] = cert_status['grade']
return status_dict
@ensure_csrf_cookie
def signin_user(request):
"""Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`."""
external_auth_response = external_auth_login(request)
if external_auth_response is not None:
return external_auth_response
# Determine the URL to redirect to following login:
redirect_to = get_next_url_for_login_page(request)
if request.user.is_authenticated():
return redirect(redirect_to)
third_party_auth_error = None
for msg in messages.get_messages(request):
if msg.extra_tags.split()[0] == "social-auth":
# msg may or may not be translated. Try translating [again] in case we are able to:
third_party_auth_error = _(unicode(msg)) # pylint: disable=translation-of-non-string
break
context = {
'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in the header
# Bool injected into JS to submit form if we're inside a running third-
# party auth pipeline; distinct from the actual instance of the running
# pipeline, if any.
'pipeline_running': 'true' if pipeline.running(request) else 'false',
'pipeline_url': auth_pipeline_urls(pipeline.AUTH_ENTRY_LOGIN, redirect_url=redirect_to),
'platform_name': microsite.get_value(
'platform_name',
settings.PLATFORM_NAME
),
'third_party_auth_error': third_party_auth_error
}
return render_to_response('login.html', context)
@ensure_csrf_cookie
def register_user(request, extra_context=None):
"""Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`."""
# Determine the URL to redirect to following login:
redirect_to = get_next_url_for_login_page(request)
if request.user.is_authenticated():
return redirect(redirect_to)
external_auth_response = external_auth_register(request)
if external_auth_response is not None:
return external_auth_response
context = {
'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in the header
'email': '',
'name': '',
'running_pipeline': None,
'pipeline_urls': auth_pipeline_urls(pipeline.AUTH_ENTRY_REGISTER, redirect_url=redirect_to),
'platform_name': microsite.get_value(
'platform_name',
settings.PLATFORM_NAME
),
'selected_provider': '',
'username': '',
}
if extra_context is not None:
context.update(extra_context)
if context.get("extauth_domain", '').startswith(external_auth.views.SHIBBOLETH_DOMAIN_PREFIX):
return render_to_response('register-shib.html', context)
# If third-party auth is enabled, prepopulate the form with data from the
# selected provider.
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
current_provider = provider.Registry.get_from_pipeline(running_pipeline)
if current_provider is not None:
overrides = current_provider.get_register_form_data(running_pipeline.get('kwargs'))
overrides['running_pipeline'] = running_pipeline
overrides['selected_provider'] = current_provider.name
context.update(overrides)
return render_to_response('register.html', context)
def complete_course_mode_info(course_id, enrollment, modes=None):
"""
We would like to compute some more information from the given course modes
and the user's current enrollment
Returns the given information:
- whether to show the course upsell information
- numbers of days until they can't upsell anymore
"""
if modes is None:
modes = CourseMode.modes_for_course_dict(course_id)
mode_info = {'show_upsell': False, 'days_for_upsell': None}
# we want to know if the user is already enrolled as verified or credit and
# if verified is an option.
if CourseMode.VERIFIED in modes and enrollment.mode in CourseMode.UPSELL_TO_VERIFIED_MODES:
mode_info['show_upsell'] = True
# if there is an expiration date, find out how long from now it is
if modes['verified'].expiration_datetime:
today = datetime.datetime.now(UTC).date()
mode_info['days_for_upsell'] = (modes['verified'].expiration_datetime.date() - today).days
return mode_info
def is_course_blocked(request, redeemed_registration_codes, course_key):
"""Checking either registration is blocked or not ."""
blocked = False
for redeemed_registration in redeemed_registration_codes:
# registration codes may be generated via Bulk Purchase Scenario
# we have to check only for the invoice generated registration codes
# that their invoice is valid or not
if redeemed_registration.invoice_item:
if not redeemed_registration.invoice_item.invoice.is_valid:
blocked = True
# disabling email notifications for unpaid registration courses
Optout.objects.get_or_create(user=request.user, course_id=course_key)
log.info(
u"User %s (%s) opted out of receiving emails from course %s",
request.user.username,
request.user.email,
course_key,
)
track.views.server_track(
request,
"change-email1-settings",
{"receive_emails": "no", "course": course_key.to_deprecated_string()},
page='dashboard',
)
break
return blocked
@login_required
@ensure_csrf_cookie
def dashboard(request):
user = request.user
platform_name = microsite.get_value("platform_name", settings.PLATFORM_NAME)
# for microsites, we want to filter and only show enrollments for courses within
# the microsites 'ORG'
course_org_filter = microsite.get_value('course_org_filter')
# Let's filter out any courses in an "org" that has been declared to be
# in a Microsite
org_filter_out_set = microsite.get_all_orgs()
# remove our current Microsite from the "filter out" list, if applicable
if course_org_filter:
org_filter_out_set.remove(course_org_filter)
# Build our (course, enrollment) list for the user, but ignore any courses that no
# longer exist (because the course IDs have changed). Still, we don't delete those
# enrollments, because it could have been a data push snafu.
course_enrollments = list(get_course_enrollments(user, course_org_filter, org_filter_out_set))
# sort the enrollment pairs by the enrollment date
course_enrollments.sort(key=lambda x: x.created, reverse=True)
# Retrieve the course modes for each course
enrolled_course_ids = [enrollment.course_id for enrollment in course_enrollments]
__, unexpired_course_modes = CourseMode.all_and_unexpired_modes_for_courses(enrolled_course_ids)
course_modes_by_course = {
course_id: {
mode.slug: mode
for mode in modes
}
for course_id, modes in unexpired_course_modes.iteritems()
}
# Check to see if the student has recently enrolled in a course.
# If so, display a notification message confirming the enrollment.
enrollment_message = _create_recent_enrollment_message(
course_enrollments, course_modes_by_course
)
course_optouts = Optout.objects.filter(user=user).values_list('course_id', flat=True)
message = ""
if not user.is_active:
message = render_to_string(
'registration/activate_account_notice.html',
{'email': user.email, 'platform_name': platform_name}
)
# Global staff can see what courses errored on their dashboard
staff_access = False
errored_courses = {}
if has_access(user, 'staff', 'global'):
# Show any courses that errored on load
staff_access = True
errored_courses = modulestore().get_errored_courses()
show_courseware_links_for = frozenset(
enrollment.course_id for enrollment in course_enrollments
if has_access(request.user, 'load', enrollment.course_overview)
and has_access(request.user, 'view_courseware_with_prerequisites', enrollment.course_overview)
)
# Get any programs associated with courses being displayed.
# This is passed along in the template context to allow rendering of
# program-related information on the dashboard.
course_programs = _get_course_programs(user, [enrollment.course_id for enrollment in course_enrollments])
xseries_credentials = _get_xseries_credentials(user)
# Construct a dictionary of course mode information
# used to render the course list. We re-use the course modes dict
# we loaded earlier to avoid hitting the database.
course_mode_info = {
enrollment.course_id: complete_course_mode_info(
enrollment.course_id, enrollment,
modes=course_modes_by_course[enrollment.course_id]
)
for enrollment in course_enrollments
}
# Determine the per-course verification status
# This is a dictionary in which the keys are course locators
# and the values are one of:
#
# VERIFY_STATUS_NEED_TO_VERIFY
# VERIFY_STATUS_SUBMITTED
# VERIFY_STATUS_APPROVED
# VERIFY_STATUS_MISSED_DEADLINE
#
# Each of which correspond to a particular message to display
# next to the course on the dashboard.
#
# If a course is not included in this dictionary,
# there is no verification messaging to display.
verify_status_by_course = check_verify_status_by_course(user, course_enrollments)
cert_statuses = {
enrollment.course_id: cert_info(request.user, enrollment.course_overview, enrollment.mode)
for enrollment in course_enrollments
}
# only show email settings for Mongo course and when bulk email is turned on
show_email_settings_for = frozenset(
enrollment.course_id for enrollment in course_enrollments if (
settings.FEATURES['ENABLE_INSTRUCTOR_EMAIL'] and
modulestore().get_modulestore_type(enrollment.course_id) != ModuleStoreEnum.Type.xml and
CourseAuthorization.instructor_email_enabled(enrollment.course_id)
)
)
# Verification Attempts
# Used to generate the "you must reverify for course x" banner
verification_status, verification_msg = SoftwareSecurePhotoVerification.user_status(user)
# Gets data for midcourse reverifications, if any are necessary or have failed
statuses = ["approved", "denied", "pending", "must_reverify"]
reverifications = reverification_info(statuses)
show_refund_option_for = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.refundable()
)
block_courses = frozenset(
enrollment.course_id for enrollment in course_enrollments
if is_course_blocked(
request,
CourseRegistrationCode.objects.filter(
course_id=enrollment.course_id,
registrationcoderedemption__redeemed_by=request.user
),
enrollment.course_id
)
)
enrolled_courses_either_paid = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.is_paid_course()
)
# If there are *any* denied reverifications that have not been toggled off,
# we'll display the banner
denied_banner = any(item.display for item in reverifications["denied"])
# Populate the Order History for the side-bar.
order_history_list = order_history(user, course_org_filter=course_org_filter, org_filter_out_set=org_filter_out_set)
# get list of courses having pre-requisites yet to be completed
courses_having_prerequisites = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.course_overview.pre_requisite_courses
)
courses_requirements_not_met = get_pre_requisite_courses_not_completed(user, courses_having_prerequisites)
if 'notlive' in request.GET:
redirect_message = _("The course you are looking for does not start until {date}.").format(
date=request.GET['notlive']
)
else:
redirect_message = ''
context = {
'enrollment_message': enrollment_message,
'redirect_message': redirect_message,
'course_enrollments': course_enrollments,
'course_optouts': course_optouts,
'message': message,
'staff_access': staff_access,
'errored_courses': errored_courses,
'show_courseware_links_for': show_courseware_links_for,
'all_course_modes': course_mode_info,
'cert_statuses': cert_statuses,
'credit_statuses': _credit_statuses(user, course_enrollments),
'show_email_settings_for': show_email_settings_for,
'reverifications': reverifications,
'verification_status': verification_status,
'verification_status_by_course': verify_status_by_course,
'verification_msg': verification_msg,
'show_refund_option_for': show_refund_option_for,
'block_courses': block_courses,
'denied_banner': denied_banner,
'billing_email': settings.PAYMENT_SUPPORT_EMAIL,
'user': user,
'logout_url': reverse(logout_user),
'platform_name': platform_name,
'enrolled_courses_either_paid': enrolled_courses_either_paid,
'provider_states': [],
'order_history_list': order_history_list,
'courses_requirements_not_met': courses_requirements_not_met,
'nav_hidden': True,
'course_programs': course_programs,
'disable_courseware_js': True,
'xseries_credentials': xseries_credentials,
}
return render_to_response('dashboard.html', context)
def _create_recent_enrollment_message(course_enrollments, course_modes): # pylint: disable=invalid-name
"""
Builds a recent course enrollment message.
Constructs a new message template based on any recent course enrollments
for the student.
Args:
course_enrollments (list[CourseEnrollment]): a list of course enrollments.
course_modes (dict): Mapping of course ID's to course mode dictionaries.
Returns:
A string representing the HTML message output from the message template.
None if there are no recently enrolled courses.
"""
recently_enrolled_courses = _get_recently_enrolled_courses(course_enrollments)
if recently_enrolled_courses:
enroll_messages = [
{
"course_id": enrollment.course_overview.id,
"course_name": enrollment.course_overview.display_name,
"allow_donation": _allow_donation(course_modes, enrollment.course_overview.id, enrollment)
}
for enrollment in recently_enrolled_courses
]
platform_name = microsite.get_value('platform_name', settings.PLATFORM_NAME)
return render_to_string(
'enrollment/course_enrollment_message.html',
{'course_enrollment_messages': enroll_messages, 'platform_name': platform_name}
)
def _get_recently_enrolled_courses(course_enrollments):
"""
Given a list of enrollments, filter out all but recent enrollments.
Args:
course_enrollments (list[CourseEnrollment]): A list of course enrollments.
Returns:
list[CourseEnrollment]: A list of recent course enrollments.
"""
seconds = DashboardConfiguration.current().recent_enrollment_time_delta
time_delta = (datetime.datetime.now(UTC) - datetime.timedelta(seconds=seconds))
return [
enrollment for enrollment in course_enrollments
# If the enrollment has no created date, we are explicitly excluding the course
# from the list of recent enrollments.
if enrollment.is_active and enrollment.created > time_delta
]
def _allow_donation(course_modes, course_id, enrollment):
"""Determines if the dashboard will request donations for the given course.
Check if donations are configured for the platform, and if the current course is accepting donations.
Args:
course_modes (dict): Mapping of course ID's to course mode dictionaries.
course_id (str): The unique identifier for the course.
enrollment(CourseEnrollment): The enrollment object in which the user is enrolled
Returns:
True if the course is allowing donations.
"""
donations_enabled = DonationConfiguration.current().enabled
return (
donations_enabled and
enrollment.mode in course_modes[course_id] and
course_modes[course_id][enrollment.mode].min_price == 0
)
def _update_email_opt_in(request, org):
"""Helper function used to hit the profile API if email opt-in is enabled."""
email_opt_in = request.POST.get('email_opt_in')
if email_opt_in is not None:
email_opt_in_boolean = email_opt_in == 'true'
preferences_api.update_email_opt_in(request.user, org, email_opt_in_boolean)
def _credit_statuses(user, course_enrollments):
"""
Retrieve the status for credit courses.
A credit course is a course for which a user can purchased
college credit. The current flow is:
1. User becomes eligible for credit (submits verifications, passes the course, etc.)
2. User purchases credit from a particular credit provider.
3. User requests credit from the provider, usually creating an account on the provider's site.
4. The credit provider notifies us whether the user's request for credit has been accepted or rejected.
The dashboard is responsible for communicating the user's state in this flow.
Arguments:
user (User): The currently logged-in user.
course_enrollments (list[CourseEnrollment]): List of enrollments for the
user.
Returns: dict
The returned dictionary has keys that are `CourseKey`s and values that
are dictionaries with:
* eligible (bool): True if the user is eligible for credit in this course.
* deadline (datetime): The deadline for purchasing and requesting credit for this course.
* purchased (bool): Whether the user has purchased credit for this course.
* provider_name (string): The display name of the credit provider.
* provider_status_url (string): A URL the user can visit to check on their credit request status.
* request_status (string): Either "pending", "approved", or "rejected"
* error (bool): If true, an unexpected error occurred when retrieving the credit status,
so the user should contact the support team.
Example:
>>> _credit_statuses(user, course_enrollments)
{
CourseKey.from_string("edX/DemoX/Demo_Course"): {
"course_key": "edX/DemoX/Demo_Course",
"eligible": True,
"deadline": 2015-11-23 00:00:00 UTC,
"purchased": True,
"provider_name": "Hogwarts",
"provider_status_url": "http://example.com/status",
"request_status": "pending",
"error": False
}
}
"""
from openedx.core.djangoapps.credit import api as credit_api
# Feature flag off
if not settings.FEATURES.get("ENABLE_CREDIT_ELIGIBILITY"):
return {}
request_status_by_course = {
request["course_key"]: request["status"]
for request in credit_api.get_credit_requests_for_user(user.username)
}
credit_enrollments = {
enrollment.course_id: enrollment
for enrollment in course_enrollments
if enrollment.mode == "credit"
}
# When a user purchases credit in a course, the user's enrollment
# mode is set to "credit" and an enrollment attribute is set
# with the ID of the credit provider. We retrieve *all* such attributes
# here to minimize the number of database queries.
purchased_credit_providers = {
attribute.enrollment.course_id: attribute.value
for attribute in CourseEnrollmentAttribute.objects.filter(
namespace="credit",
name="provider_id",
enrollment__in=credit_enrollments.values()
).select_related("enrollment")
}
provider_info_by_id = {
provider["id"]: provider
for provider in credit_api.get_credit_providers()
}
statuses = {}
for eligibility in credit_api.get_eligibilities_for_user(user.username):
course_key = CourseKey.from_string(unicode(eligibility["course_key"]))
status = {
"course_key": unicode(course_key),
"eligible": True,
"deadline": eligibility["deadline"],
"purchased": course_key in credit_enrollments,
"provider_name": None,
"provider_status_url": None,
"provider_id": None,
"request_status": request_status_by_course.get(course_key),
"error": False,
}
# If the user has purchased credit, then include information about the credit
# provider from which the user purchased credit.
# We retrieve the provider's ID from the an "enrollment attribute" set on the user's
# enrollment when the user's order for credit is fulfilled by the E-Commerce service.
if status["purchased"]:
provider_id = purchased_credit_providers.get(course_key)
if provider_id is None:
status["error"] = True
log.error(
u"Could not find credit provider associated with credit enrollment "
u"for user %s in course %s. The user will not be able to see his or her "
u"credit request status on the student dashboard. This attribute should "
u"have been set when the user purchased credit in the course.",
user.id, course_key
)
else:
provider_info = provider_info_by_id.get(provider_id, {})
status["provider_name"] = provider_info.get("display_name")
status["provider_status_url"] = provider_info.get("status_url")
status["provider_id"] = provider_id
statuses[course_key] = status
return statuses
@transaction.non_atomic_requests
@require_POST
@outer_atomic(read_committed=True)
def change_enrollment(request, check_access=True):
"""
Modify the enrollment status for the logged-in user.
The request parameter must be a POST request (other methods return 405)
that specifies course_id and enrollment_action parameters. If course_id or
enrollment_action is not specified, if course_id is not valid, if
enrollment_action is something other than "enroll" or "unenroll", if
enrollment_action is "enroll" and enrollment is closed for the course, or
if enrollment_action is "unenroll" and the user is not enrolled in the
course, a 400 error will be returned. If the user is not logged in, 403
will be returned; it is important that only this case return 403 so the
front end can redirect the user to a registration or login page when this
happens. This function should only be called from an AJAX request, so
the error messages in the responses should never actually be user-visible.
Args:
request (`Request`): The Django request object
Keyword Args:
check_access (boolean): If True, we check that an accessible course actually
exists for the given course_key before we enroll the student.
The default is set to False to avoid breaking legacy code or
code with non-standard flows (ex. beta tester invitations), but
for any standard enrollment flow you probably want this to be True.
Returns:
Response
"""
# Get the user
user = request.user
# Ensure the user is authenticated
if not user.is_authenticated():
return HttpResponseForbidden()
# Ensure we received a course_id
action = request.POST.get("enrollment_action")
if 'course_id' not in request.POST:
return HttpResponseBadRequest(_("Course id not specified"))
try:
course_id = SlashSeparatedCourseKey.from_deprecated_string(request.POST.get("course_id"))
except InvalidKeyError:
log.warning(
u"User %s tried to %s with invalid course id: %s",
user.username,
action,
request.POST.get("course_id"),
)
return HttpResponseBadRequest(_("Invalid course id"))
if action == "enroll":
# Make sure the course exists
# We don't do this check on unenroll, or a bad course id can't be unenrolled from
if not modulestore().has_course(course_id):
log.warning(
u"User %s tried to enroll in non-existent course %s",
user.username,
course_id
)
return HttpResponseBadRequest(_("Course id is invalid"))
# Record the user's email opt-in preference
if settings.FEATURES.get('ENABLE_MKTG_EMAIL_OPT_IN'):
_update_email_opt_in(request, course_id.org)
available_modes = CourseMode.modes_for_course_dict(course_id)
# Check whether the user is blocked from enrolling in this course
# This can occur if the user's IP is on a global blacklist
# or if the user is enrolling in a country in which the course
# is not available.
redirect_url = embargo_api.redirect_if_blocked(
course_id, user=user, ip_address=get_ip(request),
url=request.path
)
if redirect_url:
return HttpResponse(redirect_url)
# Check that auto enrollment is allowed for this course
# (= the course is NOT behind a paywall)
if CourseMode.can_auto_enroll(course_id):
# Enroll the user using the default mode (audit)
# We're assuming that users of the course enrollment table
# will NOT try to look up the course enrollment model
# by its slug. If they do, it's possible (based on the state of the database)
# for no such model to exist, even though we've set the enrollment type
# to "audit".
try:
enroll_mode = CourseMode.auto_enroll_mode(course_id, available_modes)
if enroll_mode:
CourseEnrollment.enroll(user, course_id, check_access=check_access, mode=enroll_mode)
except Exception: # pylint: disable=broad-except
return HttpResponseBadRequest(_("Could not enroll"))
# If we have more than one course mode or professional ed is enabled,
# then send the user to the choose your track page.
# (In the case of no-id-professional/professional ed, this will redirect to a page that
# funnels users directly into the verification / payment flow)
if CourseMode.has_verified_mode(available_modes) or CourseMode.has_professional_mode(available_modes):
return HttpResponse(
reverse("course_modes_choose", kwargs={'course_id': unicode(course_id)})
)
# Otherwise, there is only one mode available (the default)
return HttpResponse()
elif action == "unenroll":
enrollment = CourseEnrollment.get_enrollment(user, course_id)
if not enrollment:
return HttpResponseBadRequest(_("You are not enrolled in this course"))
certificate_info = cert_info(user, enrollment.course_overview, enrollment.mode)
if certificate_info.get('status') in DISABLE_UNENROLL_CERT_STATES:
return HttpResponseBadRequest(_("Your certificate prevents you from unenrolling from this course"))
CourseEnrollment.unenroll(user, course_id)
return HttpResponse()
else:
return HttpResponseBadRequest(_("Enrollment action is invalid"))
# Need different levels of logging
@ensure_csrf_cookie
def login_user(request, error=""): # pylint: disable=too-many-statements,unused-argument
"""AJAX request to log in the user."""
backend_name = None
email = None
password = None
redirect_url = None
response = None
running_pipeline = None
third_party_auth_requested = third_party_auth.is_enabled() and pipeline.running(request)
third_party_auth_successful = False
trumped_by_first_party_auth = bool(request.POST.get('email')) or bool(request.POST.get('password'))
user = None
platform_name = microsite.get_value("platform_name", settings.PLATFORM_NAME)
if third_party_auth_requested and not trumped_by_first_party_auth:
# The user has already authenticated via third-party auth and has not
# asked to do first party auth by supplying a username or password. We
# now want to put them through the same logging and cookie calculation
# logic as with first-party auth.
running_pipeline = pipeline.get(request)
username = running_pipeline['kwargs'].get('username')
backend_name = running_pipeline['backend']
third_party_uid = running_pipeline['kwargs']['uid']
requested_provider = provider.Registry.get_from_pipeline(running_pipeline)
try:
user = pipeline.get_authenticated_user(requested_provider, username, third_party_uid)
third_party_auth_successful = True
except User.DoesNotExist:
AUDIT_LOG.warning(
u"Login failed - user with username {username} has no social auth "
"with backend_name {backend_name}".format(
username=username, backend_name=backend_name)
)
message = _(
"You've successfully logged into your {provider_name} account, "
"but this account isn't linked with an {platform_name} account yet."
).format(
platform_name=platform_name,
provider_name=requested_provider.name,
)
message += "<br/><br/>"
message += _(
"Use your {platform_name} username and password to log into {platform_name} below, "
"and then link your {platform_name} account with {provider_name} from your dashboard."
).format(
platform_name=platform_name,
provider_name=requested_provider.name,
)
message += "<br/><br/>"
message += _(
"If you don't have an {platform_name} account yet, "
"click <strong>Register</strong> at the top of the page."
).format(
platform_name=platform_name
)
return HttpResponse(message, content_type="text/plain", status=403)
else:
if 'email' not in request.POST or 'password' not in request.POST:
return JsonResponse({
"success": False,
# TODO: User error message
"value": _('There was an error receiving your login information. Please email us.'),
}) # TODO: this should be status code 400
email = request.POST['email']
password = request.POST['password']
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"Login failed - Unknown user email")
else:
AUDIT_LOG.warning(u"Login failed - Unknown user email: {0}".format(email))
# check if the user has a linked shibboleth account, if so, redirect the user to shib-login
# This behavior is pretty much like what gmail does for shibboleth. Try entering some @stanford.edu
# address into the Gmail login.
if settings.FEATURES.get('AUTH_USE_SHIB') and user:
try:
eamap = ExternalAuthMap.objects.get(user=user)
if eamap.external_domain.startswith(external_auth.views.SHIBBOLETH_DOMAIN_PREFIX):
return JsonResponse({
"success": False,
"redirect": reverse('shib-login'),
}) # TODO: this should be status code 301 # pylint: disable=fixme
except ExternalAuthMap.DoesNotExist:
# This is actually the common case, logging in user without external linked login
AUDIT_LOG.info(u"User %s w/o external auth attempting login", user)
# see if account has been locked out due to excessive login failures
user_found_by_email_lookup = user
if user_found_by_email_lookup and LoginFailures.is_feature_enabled():
if LoginFailures.is_user_locked_out(user_found_by_email_lookup):
lockout_message = _('This account has been temporarily locked due '
'to excessive login failures. Try again later.')
return JsonResponse({
"success": False,
"value": lockout_message,
}) # TODO: this should be status code 429 # pylint: disable=fixme
# see if the user must reset his/her password due to any policy settings
if user_found_by_email_lookup and PasswordHistory.should_user_reset_password_now(user_found_by_email_lookup):
return JsonResponse({
"success": False,
"value": _('Your password has expired due to password policy on this account. You must '
'reset your password before you can log in again. Please click the '
'"Forgot Password" link on this page to reset your password before logging in again.'),
}) # TODO: this should be status code 403 # pylint: disable=fixme
# if the user doesn't exist, we want to set the username to an invalid
# username so that authentication is guaranteed to fail and we can take
# advantage of the ratelimited backend
username = user.username if user else ""
if not third_party_auth_successful:
try:
user = authenticate(username=username, password=password, request=request)
# this occurs when there are too many attempts from the same IP address
except RateLimitException:
return JsonResponse({
"success": False,
"value": _('Too many failed login attempts. Try again later.'),
}) # TODO: this should be status code 429 # pylint: disable=fixme
if user is None:
# tick the failed login counters if the user exists in the database
if user_found_by_email_lookup and LoginFailures.is_feature_enabled():
LoginFailures.increment_lockout_counter(user_found_by_email_lookup)
# if we didn't find this username earlier, the account for this email
# doesn't exist, and doesn't have a corresponding password
if username != "":
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
loggable_id = user_found_by_email_lookup.id if user_found_by_email_lookup else "<unknown>"
AUDIT_LOG.warning(u"Login failed - password for user.id: {0} is invalid".format(loggable_id))
else:
AUDIT_LOG.warning(u"Login failed - password for {0} is invalid".format(email))
return JsonResponse({
"success": False,
"value": _('Email or password is incorrect.'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
# successful login, clear failed login attempts counters, if applicable
if LoginFailures.is_feature_enabled():
LoginFailures.clear_lockout_counter(user)
# Track the user's sign in
if hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
analytics.identify(user.id, {
'email': email,
'username': username
})
analytics.track(
user.id,
"edx.bi.user.account.authenticated",
{
'category': "conversion",
'label': request.POST.get('course_id'),
'provider': None
},
context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
if user is not None and user.is_active:
try:
# We do not log here, because we have a handler registered
# to perform logging on successful logins.
login(request, user)
if request.POST.get('remember') == 'true':
request.session.set_expiry(604800)
log.debug("Setting user session to never expire")
else:
request.session.set_expiry(0)
except Exception as exc: # pylint: disable=broad-except
AUDIT_LOG.critical("Login failed - Could not create session. Is memcached running?")
log.critical("Login failed - Could not create session. Is memcached running?")
log.exception(exc)
raise
redirect_url = None # The AJAX method calling should know the default destination upon success
if third_party_auth_successful:
redirect_url = pipeline.get_complete_url(backend_name)
response = JsonResponse({
"success": True,
"redirect_url": redirect_url,
})
# Ensure that the external marketing site can
# detect that the user is logged in.
return set_logged_in_cookies(request, response, user)
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"Login failed - Account not active for user.id: {0}, resending activation".format(user.id))
else:
AUDIT_LOG.warning(u"Login failed - Account not active for user {0}, resending activation".format(username))
reactivation_email_for_user(user)
not_activated_msg = _("This account has not been activated. We have sent another activation "
"message. Please check your email for the activation instructions.")
return JsonResponse({
"success": False,
"value": not_activated_msg,
}) # TODO: this should be status code 400 # pylint: disable=fixme
@csrf_exempt
@require_POST
@social_utils.strategy("social:complete")
def login_oauth_token(request, backend):
"""
Authenticate the client using an OAuth access token by using the token to
retrieve information from a third party and matching that information to an
existing user.
"""
warnings.warn("Please use AccessTokenExchangeView instead.", DeprecationWarning)
backend = request.backend
if isinstance(backend, social_oauth.BaseOAuth1) or isinstance(backend, social_oauth.BaseOAuth2):
if "access_token" in request.POST:
# Tell third party auth pipeline that this is an API call
request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_LOGIN_API
user = None
try:
user = backend.do_auth(request.POST["access_token"])
except (HTTPError, AuthException):
pass
# do_auth can return a non-User object if it fails
if user and isinstance(user, User):
login(request, user)
return JsonResponse(status=204)
else:
# Ensure user does not re-enter the pipeline
request.social_strategy.clean_partial_pipeline()
return JsonResponse({"error": "invalid_token"}, status=401)
else:
return JsonResponse({"error": "invalid_request"}, status=400)
raise Http404
@ensure_csrf_cookie
def logout_user(request):
"""
HTTP request to log out the user. Redirects to marketing page.
Deletes both the CSRF and sessionid cookies so the marketing
site can determine the logged in state of the user
"""
# We do not log here, because we have a handler registered
# to perform logging on successful logouts.
logout(request)
if settings.FEATURES.get('AUTH_USE_CAS'):
target = reverse('cas-logout')
else:
target = '/'
response = redirect(target)
delete_logged_in_cookies(response)
return response
@require_GET
@login_required
@ensure_csrf_cookie
def manage_user_standing(request):
"""
Renders the view used to manage user standing. Also displays a table
of user accounts that have been disabled and who disabled them.
"""
if not request.user.is_staff:
raise Http404
all_disabled_accounts = UserStanding.objects.filter(
account_status=UserStanding.ACCOUNT_DISABLED
)
all_disabled_users = [standing.user for standing in all_disabled_accounts]
headers = ['username', 'account_changed_by']
rows = []
for user in all_disabled_users:
row = [user.username, user.standing.changed_by]
rows.append(row)
context = {'headers': headers, 'rows': rows}
return render_to_response("manage_user_standing.html", context)
@require_POST
@login_required
@ensure_csrf_cookie
def disable_account_ajax(request):
"""
Ajax call to change user standing. Endpoint of the form
in manage_user_standing.html
"""
if not request.user.is_staff:
raise Http404
username = request.POST.get('username')
context = {}
if username is None or username.strip() == '':
context['message'] = _('Please enter a username')
return JsonResponse(context, status=400)
account_action = request.POST.get('account_action')
if account_action is None:
context['message'] = _('Please choose an option')
return JsonResponse(context, status=400)
username = username.strip()
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
context['message'] = _("User with username {} does not exist").format(username)
return JsonResponse(context, status=400)
else:
user_account, _success = UserStanding.objects.get_or_create(
user=user, defaults={'changed_by': request.user},
)
if account_action == 'disable':
user_account.account_status = UserStanding.ACCOUNT_DISABLED
context['message'] = _("Successfully disabled {}'s account").format(username)
log.info(u"%s disabled %s's account", request.user, username)
elif account_action == 'reenable':
user_account.account_status = UserStanding.ACCOUNT_ENABLED
context['message'] = _("Successfully reenabled {}'s account").format(username)
log.info(u"%s reenabled %s's account", request.user, username)
else:
context['message'] = _("Unexpected account status")
return JsonResponse(context, status=400)
user_account.changed_by = request.user
user_account.standing_last_changed_at = datetime.datetime.now(UTC)
user_account.save()
return JsonResponse(context)
@login_required
@ensure_csrf_cookie
def change_setting(request):
"""JSON call to change a profile setting: Right now, location"""
# TODO (vshnayder): location is no longer used
u_prof = UserProfile.objects.get(user=request.user) # request.user.profile_cache
if 'location' in request.POST:
u_prof.location = request.POST['location']
u_prof.save()
return JsonResponse({
"success": True,
"location": u_prof.location,
})
class AccountValidationError(Exception):
def __init__(self, message, field):
super(AccountValidationError, self).__init__(message)
self.field = field
@receiver(post_save, sender=User)
def user_signup_handler(sender, **kwargs): # pylint: disable=unused-argument
"""
handler that saves the user Signup Source
when the user is created
"""
if 'created' in kwargs and kwargs['created']:
site = microsite.get_value('SITE_NAME')
if site:
user_signup_source = UserSignupSource(user=kwargs['instance'], site=site)
user_signup_source.save()
log.info(u'user {} originated from a white labeled "Microsite"'.format(kwargs['instance'].id))
def _do_create_account(form, custom_form=None):
"""
Given cleaned post variables, create the User and UserProfile objects, as well as the
registration for this user.
Returns a tuple (User, UserProfile, Registration).
Note: this function is also used for creating test users.
"""
errors = {}
errors.update(form.errors)
if custom_form:
errors.update(custom_form.errors)
if errors:
raise ValidationError(errors)
user = User(
username=form.cleaned_data["username"],
email=form.cleaned_data["email"],
is_active=False
)
user.set_password(form.cleaned_data["password"])
registration = Registration()
# TODO: Rearrange so that if part of the process fails, the whole process fails.
# Right now, we can have e.g. no registration e-mail sent out and a zombie account
try:
with transaction.atomic():
user.save()
if custom_form:
custom_model = custom_form.save(commit=False)
custom_model.user = user
custom_model.save()
except IntegrityError:
# Figure out the cause of the integrity error
if len(User.objects.filter(username=user.username)) > 0:
raise AccountValidationError(
_("An account with the Public Username '{username}' already exists.").format(username=user.username),
field="username"
)
elif len(User.objects.filter(email=user.email)) > 0:
raise AccountValidationError(
_("An account with the Email '{email}' already exists.").format(email=user.email),
field="email"
)
else:
raise
# add this account creation to password history
# NOTE, this will be a NOP unless the feature has been turned on in configuration
password_history_entry = PasswordHistory()
password_history_entry.create(user)
registration.register(user)
profile_fields = [
"name", "level_of_education", "gender", "mailing_address", "city", "country", "goals",
"year_of_birth"
]
profile = UserProfile(
user=user,
**{key: form.cleaned_data.get(key) for key in profile_fields}
)
extended_profile = form.cleaned_extended_profile
if extended_profile:
profile.meta = json.dumps(extended_profile)
try:
profile.save()
except Exception: # pylint: disable=broad-except
log.exception("UserProfile creation failed for user {id}.".format(id=user.id))
raise
return (user, profile, registration)
def create_account_with_params(request, params):
"""
Given a request and a dict of parameters (which may or may not have come
from the request), create an account for the requesting user, including
creating a comments service user object and sending an activation email.
This also takes external/third-party auth into account, updates that as
necessary, and authenticates the user for the request's session.
Does not return anything.
Raises AccountValidationError if an account with the username or email
specified by params already exists, or ValidationError if any of the given
parameters is invalid for any other reason.
Issues with this code:
* It is not transactional. If there is a failure part-way, an incomplete
account will be created and left in the database.
* Third-party auth passwords are not verified. There is a comment that
they are unused, but it would be helpful to have a sanity check that
they are sane.
* It is over 300 lines long (!) and includes disprate functionality, from
registration e-mails to all sorts of other things. It should be broken
up into semantically meaningful functions.
* The user-facing text is rather unfriendly (e.g. "Username must be a
minimum of two characters long" rather than "Please use a username of
at least two characters").
"""
# Copy params so we can modify it; we can't just do dict(params) because if
# params is request.POST, that results in a dict containing lists of values
params = dict(params.items())
# allow for microsites to define their own set of required/optional/hidden fields
extra_fields = microsite.get_value(
'REGISTRATION_EXTRA_FIELDS',
getattr(settings, 'REGISTRATION_EXTRA_FIELDS', {})
)
# Boolean of whether a 3rd party auth provider and credentials were provided in
# the API so the newly created account can link with the 3rd party account.
#
# Note: this is orthogonal to the 3rd party authentication pipeline that occurs
# when the account is created via the browser and redirect URLs.
should_link_with_social_auth = third_party_auth.is_enabled() and 'provider' in params
if should_link_with_social_auth or (third_party_auth.is_enabled() and pipeline.running(request)):
params["password"] = pipeline.make_random_password()
# if doing signup for an external authorization, then get email, password, name from the eamap
# don't use the ones from the form, since the user could have hacked those
# unless originally we didn't get a valid email or name from the external auth
# TODO: We do not check whether these values meet all necessary criteria, such as email length
do_external_auth = 'ExternalAuthMap' in request.session
if do_external_auth:
eamap = request.session['ExternalAuthMap']
try:
validate_email(eamap.external_email)
params["email"] = eamap.external_email
except ValidationError:
pass
if eamap.external_name.strip() != '':
params["name"] = eamap.external_name
params["password"] = eamap.internal_password
log.debug(u'In create_account with external_auth: user = %s, email=%s', params["name"], params["email"])
extended_profile_fields = microsite.get_value('extended_profile_fields', [])
enforce_password_policy = (
settings.FEATURES.get("ENFORCE_PASSWORD_POLICY", False) and
not do_external_auth
)
# Can't have terms of service for certain SHIB users, like at Stanford
tos_required = (
not settings.FEATURES.get("AUTH_USE_SHIB") or
not settings.FEATURES.get("SHIB_DISABLE_TOS") or
not do_external_auth or
not eamap.external_domain.startswith(
external_auth.views.SHIBBOLETH_DOMAIN_PREFIX
)
)
form = AccountCreationForm(
data=params,
extra_fields=extra_fields,
extended_profile_fields=extended_profile_fields,
enforce_username_neq_password=True,
enforce_password_policy=enforce_password_policy,
tos_required=tos_required,
)
custom_form = get_registration_extension_form(data=params)
# Perform operations within a transaction that are critical to account creation
with transaction.atomic():
# first, create the account
(user, profile, registration) = _do_create_account(form, custom_form)
# next, link the account with social auth, if provided via the API.
# (If the user is using the normal register page, the social auth pipeline does the linking, not this code)
if should_link_with_social_auth:
backend_name = params['provider']
request.social_strategy = social_utils.load_strategy(request)
redirect_uri = reverse('social:complete', args=(backend_name, ))
request.backend = social_utils.load_backend(request.social_strategy, backend_name, redirect_uri)
social_access_token = params.get('access_token')
if not social_access_token:
raise ValidationError({
'access_token': [
_("An access_token is required when passing value ({}) for provider.").format(
params['provider']
)
]
})
request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_REGISTER_API
pipeline_user = None
error_message = ""
try:
pipeline_user = request.backend.do_auth(social_access_token, user=user)
except AuthAlreadyAssociated:
error_message = _("The provided access_token is already associated with another user.")
except (HTTPError, AuthException):
error_message = _("The provided access_token is not valid.")
if not pipeline_user or not isinstance(pipeline_user, User):
# Ensure user does not re-enter the pipeline
request.social_strategy.clean_partial_pipeline()
raise ValidationError({'access_token': [error_message]})
# Perform operations that are non-critical parts of account creation
preferences_api.set_user_preference(user, LANGUAGE_KEY, get_language())
if settings.FEATURES.get('ENABLE_DISCUSSION_EMAIL_DIGEST'):
try:
enable_notifications(user)
except Exception: # pylint: disable=broad-except
log.exception("Enable discussion notifications failed for user {id}.".format(id=user.id))
dog_stats_api.increment("common.student.account_created")
# If the user is registering via 3rd party auth, track which provider they use
third_party_provider = None
running_pipeline = None
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
third_party_provider = provider.Registry.get_from_pipeline(running_pipeline)
# Track the user's registration
if hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
identity_args = [
user.id, # pylint: disable=no-member
{
'email': user.email,
'username': user.username,
'name': profile.name,
'age': profile.age,
'education': profile.level_of_education_display,
'address': profile.mailing_address,
'gender': profile.gender_display,
'country': unicode(profile.country),
}
]
if hasattr(settings, 'MAILCHIMP_NEW_USER_LIST_ID'):
identity_args.append({
"MailChimp": {
"listId": settings.MAILCHIMP_NEW_USER_LIST_ID
}
})
analytics.identify(*identity_args)
analytics.track(
user.id,
"edx.bi.user.account.registered",
{
'category': 'conversion',
'label': params.get('course_id'),
'provider': third_party_provider.name if third_party_provider else None
},
context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
create_comments_service_user(user)
# Don't send email if we are:
#
# 1. Doing load testing.
# 2. Random user generation for other forms of testing.
# 3. External auth bypassing activation.
# 4. Have the platform configured to not require e-mail activation.
# 5. Registering a new user using a trusted third party provider (with skip_email_verification=True)
#
# Note that this feature is only tested as a flag set one way or
# the other for *new* systems. we need to be careful about
# changing settings on a running system to make sure no users are
# left in an inconsistent state (or doing a migration if they are).
send_email = (
not settings.FEATURES.get('SKIP_EMAIL_VALIDATION', None) and
not settings.FEATURES.get('AUTOMATIC_AUTH_FOR_TESTING') and
not (do_external_auth and settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH')) and
not (
third_party_provider and third_party_provider.skip_email_verification and
user.email == running_pipeline['kwargs'].get('details', {}).get('email')
)
)
if send_email:
context = {
'name': profile.name,
'key': registration.activation_key,
}
# composes activation email
subject = render_to_string('emails/activation_email_subject.txt', context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string('emails/activation_email.txt', context)
from_address = microsite.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
try:
if settings.FEATURES.get('REROUTE_ACTIVATION_EMAIL'):
dest_addr = settings.FEATURES['REROUTE_ACTIVATION_EMAIL']
message = ("Activation for %s (%s): %s\n" % (user, user.email, profile.name) +
'-' * 80 + '\n\n' + message)
mail.send_mail(subject, message, from_address, [dest_addr], fail_silently=False)
else:
user.email_user(subject, message, from_address)
except Exception: # pylint: disable=broad-except
log.error(u'Unable to send activation email to user from "%s"', from_address, exc_info=True)
else:
registration.activate()
# Immediately after a user creates an account, we log them in. They are only
# logged in until they close the browser. They can't log in again until they click
# the activation link from the email.
new_user = authenticate(username=user.username, password=params['password'])
login(request, new_user)
request.session.set_expiry(0)
# TODO: there is no error checking here to see that the user actually logged in successfully,
# and is not yet an active user.
if new_user is not None:
AUDIT_LOG.info(u"Login success on new account creation - {0}".format(new_user.username))
if do_external_auth:
eamap.user = new_user
eamap.dtsignup = datetime.datetime.now(UTC)
eamap.save()
AUDIT_LOG.info(u"User registered with external_auth %s", new_user.username)
AUDIT_LOG.info(u'Updated ExternalAuthMap for %s to be %s', new_user.username, eamap)
if settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'):
log.info('bypassing activation email')
new_user.is_active = True
new_user.save()
AUDIT_LOG.info(u"Login activated on extauth account - {0} ({1})".format(new_user.username, new_user.email))
return new_user
@csrf_exempt
def create_account(request, post_override=None):
"""
JSON call to create new edX account.
Used by form in signup_modal.html, which is included into navigation.html
"""
warnings.warn("Please use RegistrationView instead.", DeprecationWarning)
try:
user = create_account_with_params(request, post_override or request.POST)
except AccountValidationError as exc:
return JsonResponse({'success': False, 'value': exc.message, 'field': exc.field}, status=400)
except ValidationError as exc:
field, error_list = next(exc.message_dict.iteritems())
return JsonResponse(
{
"success": False,
"field": field,
"value": error_list[0],
},
status=400
)
redirect_url = None # The AJAX method calling should know the default destination upon success
# Resume the third-party-auth pipeline if necessary.
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
redirect_url = pipeline.get_complete_url(running_pipeline['backend'])
response = JsonResponse({
'success': True,
'redirect_url': redirect_url,
})
set_logged_in_cookies(request, response, user)
return response
def auto_auth(request):
"""
Create or configure a user account, then log in as that user.
Enabled only when
settings.FEATURES['AUTOMATIC_AUTH_FOR_TESTING'] is true.
Accepts the following querystring parameters:
* `username`, `email`, and `password` for the user account
* `full_name` for the user profile (the user's full name; defaults to the username)
* `staff`: Set to "true" to make the user global staff.
* `course_id`: Enroll the student in the course with `course_id`
* `roles`: Comma-separated list of roles to grant the student in the course with `course_id`
* `no_login`: Define this to create the user but not login
* `redirect`: Set to "true" will redirect to course if course_id is defined, otherwise it will redirect to dashboard
If username, email, or password are not provided, use
randomly generated credentials.
"""
# Generate a unique name to use if none provided
unique_name = uuid.uuid4().hex[0:30]
# Use the params from the request, otherwise use these defaults
username = request.GET.get('username', unique_name)
password = request.GET.get('password', unique_name)
email = request.GET.get('email', unique_name + "@example.com")
full_name = request.GET.get('full_name', username)
is_staff = request.GET.get('staff', None)
is_superuser = request.GET.get('superuser', None)
course_id = request.GET.get('course_id', None)
# mode has to be one of 'honor'/'professional'/'verified'/'audit'/'no-id-professional'/'credit'
enrollment_mode = request.GET.get('enrollment_mode', 'honor')
course_key = None
if course_id:
course_key = CourseLocator.from_string(course_id)
role_names = [v.strip() for v in request.GET.get('roles', '').split(',') if v.strip()]
redirect_when_done = request.GET.get('redirect', '').lower() == 'true'
login_when_done = 'no_login' not in request.GET
form = AccountCreationForm(
data={
'username': username,
'email': email,
'password': password,
'name': full_name,
},
tos_required=False
)
# Attempt to create the account.
# If successful, this will return a tuple containing
# the new user object.
try:
user, profile, reg = _do_create_account(form)
except AccountValidationError:
# Attempt to retrieve the existing user.
user = User.objects.get(username=username)
user.email = email
user.set_password(password)
user.save()
profile = UserProfile.objects.get(user=user)
reg = Registration.objects.get(user=user)
# Set the user's global staff bit
if is_staff is not None:
user.is_staff = (is_staff == "true")
user.save()
if is_superuser is not None:
user.is_superuser = (is_superuser == "true")
user.save()
# Activate the user
reg.activate()
reg.save()
# ensure parental consent threshold is met
year = datetime.date.today().year
age_limit = settings.PARENTAL_CONSENT_AGE_LIMIT
profile.year_of_birth = (year - age_limit) - 1
profile.save()
# Enroll the user in a course
if course_key is not None:
CourseEnrollment.enroll(user, course_key, mode=enrollment_mode)
# Apply the roles
for role_name in role_names:
role = Role.objects.get(name=role_name, course_id=course_key)
user.roles.add(role)
# Log in as the user
if login_when_done:
user = authenticate(username=username, password=password)
login(request, user)
create_comments_service_user(user)
# Provide the user with a valid CSRF token
# then return a 200 response unless redirect is true
if redirect_when_done:
# Redirect to course info page if course_id is known
if course_id:
try:
# redirect to course info page in LMS
redirect_url = reverse(
'info',
kwargs={'course_id': course_id}
)
except NoReverseMatch:
# redirect to course outline page in Studio
redirect_url = reverse(
'course_handler',
kwargs={'course_key_string': course_id}
)
else:
try:
# redirect to dashboard for LMS
redirect_url = reverse('dashboard')
except NoReverseMatch:
# redirect to home for Studio
redirect_url = reverse('home')
return redirect(redirect_url)
elif request.META.get('HTTP_ACCEPT') == 'application/json':
response = JsonResponse({
'created_status': u"Logged in" if login_when_done else "Created",
'username': username,
'email': email,
'password': password,
'user_id': user.id, # pylint: disable=no-member
'anonymous_id': anonymous_id_for_user(user, None),
})
else:
success_msg = u"{} user {} ({}) with password {} and user_id {}".format(
u"Logged in" if login_when_done else "Created",
username, email, password, user.id # pylint: disable=no-member
)
response = HttpResponse(success_msg)
response.set_cookie('csrftoken', csrf(request)['csrf_token'])
return response
@ensure_csrf_cookie
def activate_account(request, key):
"""When link in activation e-mail is clicked"""
regs = Registration.objects.filter(activation_key=key)
if len(regs) == 1:
user_logged_in = request.user.is_authenticated()
already_active = True
if not regs[0].user.is_active:
regs[0].activate()
already_active = False
# Enroll student in any pending courses he/she may have if auto_enroll flag is set
student = User.objects.filter(id=regs[0].user_id)
if student:
ceas = CourseEnrollmentAllowed.objects.filter(email=student[0].email)
for cea in ceas:
if cea.auto_enroll:
enrollment = CourseEnrollment.enroll(student[0], cea.course_id)
manual_enrollment_audit = ManualEnrollmentAudit.get_manual_enrollment_by_email(student[0].email)
if manual_enrollment_audit is not None:
# get the enrolled by user and reason from the ManualEnrollmentAudit table.
# then create a new ManualEnrollmentAudit table entry for the same email
# different transition state.
ManualEnrollmentAudit.create_manual_enrollment_audit(
manual_enrollment_audit.enrolled_by, student[0].email, ALLOWEDTOENROLL_TO_ENROLLED,
manual_enrollment_audit.reason, enrollment
)
resp = render_to_response(
"registration/activation_complete.html",
{
'user_logged_in': user_logged_in,
'already_active': already_active
}
)
return resp
if len(regs) == 0:
return render_to_response(
"registration/activation_invalid.html",
{'csrf': csrf(request)['csrf_token']}
)
return HttpResponseServerError(_("Unknown error. Please e-mail us to let us know how it happened."))
@csrf_exempt
@require_POST
def password_reset(request):
""" Attempts to send a password reset e-mail. """
# Add some rate limiting here by re-using the RateLimitMixin as a helper class
limiter = BadRequestRateLimiter()
if limiter.is_rate_limit_exceeded(request):
AUDIT_LOG.warning("Rate limit exceeded in password_reset")
return HttpResponseForbidden()
form = PasswordResetFormNoActive(request.POST)
if form.is_valid():
form.save(use_https=request.is_secure(),
from_email=microsite.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL),
request=request,
domain_override=request.get_host())
# When password change is complete, a "edx.user.settings.changed" event will be emitted.
# But because changing the password is multi-step, we also emit an event here so that we can
# track where the request was initiated.
tracker.emit(
SETTING_CHANGE_INITIATED,
{
"setting": "password",
"old": None,
"new": None,
"user_id": request.user.id,
}
)
else:
# bad user? tick the rate limiter counter
AUDIT_LOG.info("Bad password_reset user passed in.")
limiter.tick_bad_request_counter(request)
return JsonResponse({
'success': True,
'value': render_to_string('registration/password_reset_done.html', {}),
})
def password_reset_confirm_wrapper(
request,
uidb36=None,
token=None,
):
""" A wrapper around django.contrib.auth.views.password_reset_confirm.
Needed because we want to set the user as active at this step.
"""
# cribbed from django.contrib.auth.views.password_reset_confirm
try:
uid_int = base36_to_int(uidb36)
user = User.objects.get(id=uid_int)
user.is_active = True
user.save()
except (ValueError, User.DoesNotExist):
pass
# tie in password strength enforcement as an optional level of
# security protection
err_msg = None
if request.method == 'POST':
password = request.POST['new_password1']
if settings.FEATURES.get('ENFORCE_PASSWORD_POLICY', False):
try:
validate_password_length(password)
validate_password_complexity(password)
validate_password_dictionary(password)
except ValidationError, err:
err_msg = _('Password: ') + '; '.join(err.messages)
# also, check the password reuse policy
if not PasswordHistory.is_allowable_password_reuse(user, password):
if user.is_staff:
num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STAFF_PASSWORDS_BEFORE_REUSE']
else:
num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STUDENT_PASSWORDS_BEFORE_REUSE']
# Because of how ngettext is, splitting the following into shorter lines would be ugly.
# pylint: disable=line-too-long
err_msg = ungettext(
"You are re-using a password that you have used recently. You must have {num} distinct password before reusing a previous password.",
"You are re-using a password that you have used recently. You must have {num} distinct passwords before reusing a previous password.",
num_distinct
).format(num=num_distinct)
# also, check to see if passwords are getting reset too frequent
if PasswordHistory.is_password_reset_too_soon(user):
num_days = settings.ADVANCED_SECURITY_CONFIG['MIN_TIME_IN_DAYS_BETWEEN_ALLOWED_RESETS']
# Because of how ngettext is, splitting the following into shorter lines would be ugly.
# pylint: disable=line-too-long
err_msg = ungettext(
"You are resetting passwords too frequently. Due to security policies, {num} day must elapse between password resets.",
"You are resetting passwords too frequently. Due to security policies, {num} days must elapse between password resets.",
num_days
).format(num=num_days)
if err_msg:
# We have an password reset attempt which violates some security policy, use the
# existing Django template to communicate this back to the user
context = {
'validlink': True,
'form': None,
'title': _('Password reset unsuccessful'),
'err_msg': err_msg,
'platform_name': microsite.get_value('platform_name', settings.PLATFORM_NAME),
}
return TemplateResponse(request, 'registration/password_reset_confirm.html', context)
else:
# we also want to pass settings.PLATFORM_NAME in as extra_context
extra_context = {"platform_name": microsite.get_value('platform_name', settings.PLATFORM_NAME)}
# Support old password reset URLs that used base36 encoded user IDs.
# https://github.com/django/django/commit/1184d077893ff1bc947e45b00a4d565f3df81776#diff-c571286052438b2e3190f8db8331a92bR231
try:
uidb64 = force_text(urlsafe_base64_encode(force_bytes(base36_to_int(uidb36))))
except ValueError:
uidb64 = '1' # dummy invalid ID (incorrect padding for base64)
if request.method == 'POST':
# remember what the old password hash is before we call down
old_password_hash = user.password
result = password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=extra_context
)
# get the updated user
updated_user = User.objects.get(id=uid_int)
# did the password hash change, if so record it in the PasswordHistory
if updated_user.password != old_password_hash:
entry = PasswordHistory()
entry.create(updated_user)
return result
else:
return password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=extra_context
)
def reactivation_email_for_user(user):
try:
reg = Registration.objects.get(user=user)
except Registration.DoesNotExist:
return JsonResponse({
"success": False,
"error": _('No inactive user with this e-mail exists'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
context = {
'name': user.profile.name,
'key': reg.activation_key,
}
subject = render_to_string('emails/activation_email_subject.txt', context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/activation_email.txt', context)
try:
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
except Exception: # pylint: disable=broad-except
log.error(u'Unable to send reactivation email from "%s"', settings.DEFAULT_FROM_EMAIL, exc_info=True)
return JsonResponse({
"success": False,
"error": _('Unable to send reactivation email')
}) # TODO: this should be status code 500 # pylint: disable=fixme
return JsonResponse({"success": True})
def validate_new_email(user, new_email):
"""
Given a new email for a user, does some basic verification of the new address If any issues are encountered
with verification a ValueError will be thrown.
"""
try:
validate_email(new_email)
except ValidationError:
raise ValueError(_('Valid e-mail address required.'))
if new_email == user.email:
raise ValueError(_('Old email is the same as the new email.'))
if User.objects.filter(email=new_email).count() != 0:
raise ValueError(_('An account with this e-mail already exists.'))
def do_email_change_request(user, new_email, activation_key=None):
"""
Given a new email for a user, does some basic verification of the new address and sends an activation message
to the new address. If any issues are encountered with verification or sending the message, a ValueError will
be thrown.
"""
pec_list = PendingEmailChange.objects.filter(user=user)
if len(pec_list) == 0:
pec = PendingEmailChange()
pec.user = user
else:
pec = pec_list[0]
# if activation_key is not passing as an argument, generate a random key
if not activation_key:
activation_key = uuid.uuid4().hex
pec.new_email = new_email
pec.activation_key = activation_key
pec.save()
context = {
'key': pec.activation_key,
'old_email': user.email,
'new_email': pec.new_email
}
subject = render_to_string('emails/email_change_subject.txt', context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/email_change.txt', context)
from_address = microsite.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
try:
mail.send_mail(subject, message, from_address, [pec.new_email])
except Exception: # pylint: disable=broad-except
log.error(u'Unable to send email activation link to user from "%s"', from_address, exc_info=True)
raise ValueError(_('Unable to send email activation link. Please try again later.'))
# When the email address change is complete, a "edx.user.settings.changed" event will be emitted.
# But because changing the email address is multi-step, we also emit an event here so that we can
# track where the request was initiated.
tracker.emit(
SETTING_CHANGE_INITIATED,
{
"setting": "email",
"old": context['old_email'],
"new": context['new_email'],
"user_id": user.id,
}
)
@ensure_csrf_cookie
def confirm_email_change(request, key): # pylint: disable=unused-argument
"""
User requested a new e-mail. This is called when the activation
link is clicked. We confirm with the old e-mail, and update
"""
with transaction.atomic():
try:
pec = PendingEmailChange.objects.get(activation_key=key)
except PendingEmailChange.DoesNotExist:
response = render_to_response("invalid_email_key.html", {})
transaction.set_rollback(True)
return response
user = pec.user
address_context = {
'old_email': user.email,
'new_email': pec.new_email
}
if len(User.objects.filter(email=pec.new_email)) != 0:
response = render_to_response("email_exists.html", {})
transaction.set_rollback(True)
return response
subject = render_to_string('emails/email_change_subject.txt', address_context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/confirm_email_change.txt', address_context)
u_prof = UserProfile.objects.get(user=user)
meta = u_prof.get_meta()
if 'old_emails' not in meta:
meta['old_emails'] = []
meta['old_emails'].append([user.email, datetime.datetime.now(UTC).isoformat()])
u_prof.set_meta(meta)
u_prof.save()
# Send it to the old email...
try:
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
except Exception: # pylint: disable=broad-except
log.warning('Unable to send confirmation email to old address', exc_info=True)
response = render_to_response("email_change_failed.html", {'email': user.email})
transaction.set_rollback(True)
return response
user.email = pec.new_email
user.save()
pec.delete()
# And send it to the new email...
try:
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
except Exception: # pylint: disable=broad-except
log.warning('Unable to send confirmation email to new address', exc_info=True)
response = render_to_response("email_change_failed.html", {'email': pec.new_email})
transaction.set_rollback(True)
return response
response = render_to_response("email_change_successful.html", address_context)
return response
@require_POST
@login_required
@ensure_csrf_cookie
def change_email_settings(request):
"""Modify logged-in user's setting for receiving emails from a course."""
user = request.user
course_id = request.POST.get("course_id")
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
receive_emails = request.POST.get("receive_emails")
if receive_emails:
optout_object = Optout.objects.filter(user=user, course_id=course_key)
if optout_object:
optout_object.delete()
log.info(
u"User %s (%s) opted in to receive emails from course %s",
user.username,
user.email,
course_id,
)
track.views.server_track(
request,
"change-email-settings",
{"receive_emails": "yes", "course": course_id},
page='dashboard',
)
else:
Optout.objects.get_or_create(user=user, course_id=course_key)
log.info(
u"User %s (%s) opted out of receiving emails from course %s",
user.username,
user.email,
course_id,
)
track.views.server_track(
request,
"change-email-settings",
{"receive_emails": "no", "course": course_id},
page='dashboard',
)
return JsonResponse({"success": True})
def _get_course_programs(user, user_enrolled_courses): # pylint: disable=invalid-name
"""Build a dictionary of program data required for display on the student dashboard.
Given a user and an iterable of course keys, find all programs relevant to the
user and return them in a dictionary keyed by course key.
Arguments:
user (User): The user to authenticate as when requesting programs.
user_enrolled_courses (list): List of course keys representing the courses in which
the given user has active enrollments.
Returns:
dict, containing programs keyed by course. Empty if programs cannot be retrieved.
"""
course_programs = get_programs_for_dashboard(user, user_enrolled_courses)
programs_data = {}
for course_key, program in course_programs.viewitems():
if program.get('status') == 'active' and program.get('category') == 'xseries':
try:
programs_data[course_key] = {
'course_count': len(program['course_codes']),
'display_name': program['name'],
'category': program.get('category'),
'program_id': program['id'],
'program_marketing_url': urljoin(
settings.MKTG_URLS.get('ROOT'), 'xseries' + '/{}'
).format(program['marketing_slug']),
'display_category': 'XSeries'
}
except KeyError:
log.warning('Program structure is invalid, skipping display: %r', program)
return programs_data
def _get_xseries_credentials(user):
"""Return program credentials data required for display on
the learner dashboard.
Given a user, find all programs for which certificates have been earned
and return list of dictionaries of required program data.
Arguments:
user (User): user object for getting programs credentials.
Returns:
list of dict, containing data corresponding to the programs for which
the user has been awarded a credential.
"""
programs_credentials = get_user_program_credentials(user)
credentials_data = []
for program in programs_credentials:
if program.get('category') == 'xseries':
try:
program_data = {
'display_name': program['name'],
'subtitle': program['subtitle'],
'credential_url': program['credential_url'],
}
credentials_data.append(program_data)
except KeyError:
log.warning('Program structure is invalid: %r', program)
return credentials_data
|
doganov/edx-platform
|
common/djangoapps/student/views.py
|
Python
|
agpl-3.0
| 100,509
|
[
"VisIt"
] |
1b139da2587637bd6691eed9831e28e4ee52b6b7088ece41d70099e8ef9521b2
|
# -*- coding: utf-8 -*-
"""
This module implements the GenDendrite class, which implements generic dendrite
logic.
"""
from neuron import h
class GenDendrite(object):
"""This is the model of a generic dendrite.
Attributes:
name - String (Default None)
Name of the dendrite
secs - list (Default None)
List of the dendrites sections
soma - nrn.Section (Default None)
The soma to which the dendrite connects
Methods:
__init__
mk_secs
conn_soma
set_diam
set_L
Use cases:
>>> myDend = GenDendrite()
Creates a default dendrite without sections
>>> myDend = GenDendrite('myTestDend',4,['prox1','prox2','dist1','dist_2'],
[5,5,8,8], [50,50,70,100])
Creates a dendrite with 4 sections and specified geometry
"""
def __init__(self, dend_name=None, n_secs=None, sec_names=None, diam=None,
L=None):
self.name = dend_name
self.secs = None
self.soma = None
self._i = 0
if n_secs:
self.mk_secs(n_secs, sec_names)
if diam:
self.set_diam(diam)
if L:
self.set_L(L)
def mk_secs(self, n_secs=1, sec_names=[]):
"""Makes sections AND connects them. This is because a dendrite is by
definition made up of connected sections. sec_names has to be a list
of section names with len = n_secs. If sec_names = None the section
names are 'sec' + str(number).
"""
self.secs = []
if sec_names:
if not (hasattr(sec_names, '__getitem__')):
raise TypeError("sec_names should be list or None")
if len(sec_names) != n_secs:
raise ValueError("The len of sec_names must equal n_secs")
for curr_n in range(n_secs):
if sec_names:
self.secs.append(h.Section(name=sec_names[curr_n]))
else:
self.secs.append(h.Section(name='sec' + str(curr_n)))
if curr_n > 0:
self.secs[curr_n].connect(self.secs[curr_n - 1](1))
def conn_soma(self, soma, soma_loc=1):
"""Connect a soma to the dendrite"""
if self.soma:
raise StandardError("Soma already connected")
if not self.secs:
raise StandardError("Dendrite has no sections")
self.soma = soma
self.secs[0].connect(self.soma(soma_loc))
def set_diam(self, diam):
"""Change the diameter of the dendrite"""
if not bool(self.secs):
raise StandardError("Can't set diameter before sections are made")
return
if hasattr(diam, '__getitem__'):
if len(diam) != len(self.secs):
raise StandardError("List of diams does not fit n_secs")
return
for idx, curr_seg in enumerate(self.secs):
curr_seg.diam = diam[idx]
return
else:
for curr_seg in self.secs:
curr_seg.diam = diam
def set_L(self, L):
"""Change the length of the dendrite"""
if not bool(self.secs):
raise Warning("Can't set L before segments are made")
return
if hasattr(L, '__getitem__'):
if len(L) != len(self.secs):
raise Warning("List of diams does not fit number of segments")
return
for idx, curr_seg in enumerate(self.secs):
curr_seg.L = L[idx]
return
else:
for curr_seg in self.secs:
curr_seg.L = L
def __iter__(self):
return self
def __next__(self):
if not self.secs:
raise StandardError("No sections created yet")
if self._i < (len(self.secs)):
i = self._i
self._i += 1
return self.secs[i]
else:
self._i = 0
raise StopIteration()
def next(self):
return self.__next__()
def __getitem__(self, key):
if type(key) == int:
return self.secs[key]
else:
for x in self.secs:
if x.name == key:
return x
raise KeyError('Key not found')
def __len__(self):
return len(self.secs)
|
danielmuellernai/ouropy
|
gendendrite.py
|
Python
|
mit
| 4,318
|
[
"NEURON"
] |
821dfabcc3e7cc7cc448451efb45061161f95f83c3228b66ffa15bc0d556e4f9
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import json
import logging
import os
import stat
from shutil import rmtree
from time import sleep
from dropbox import client, rest, session
# https://www.dropbox.com/static/developers/dropbox-python-sdk-1.5.1-docs/
# If you change the code or fork this project, you MUST change these lines to
# provide your own app key and app secret. You are NOT ALLOWED to use these:
APP_KEY = 'eulow8e1l6vd8rp'
APP_SECRET = '4lvtbphia79iksd'
# For more information browse to https://www.dropbox.com/developers/apps
ACCESS_TYPE = 'app_folder' # 'app_folder' or 'dropbox'
def is_dir(path):
"""Return true if the path refers to an existing directory."""
try:
st = os.stat(path)
except os.error:
return False
return stat.S_ISDIR(st.st_mode)
def mkdir(path):
'''Make directories (if they don't exist already).'''
if not is_dir(path):
os.makedirs(path)
def request_user_authentication(url):
'''This default implementation works on the console. You can implement
another version for your app. For instance, the implementation would be
different for a GUI app. Or maybe in your case you would like to use
xdg-open to open the default browser automatically.
'''
print("Please authorize in your browser and press Enter when done,\n"
"or CTRL+C to cancel:\n{}" \
.format(url))
raw_input()
return True
class AuthenticationFailure(Exception):
pass
class PoorBox(object):
'''This class can be reused in other programs!'''
def load_cache(self):
if os.path.exists(self.cache_path):
with open(self.cache_path, 'r') as f:
self.cache = json.load(f)
else:
self.cache = {}
def save_cache(self):
with open(self.cache_path, 'w') as f:
json.dump(self.cache, f)
def __init__(self, token_key=None, token_secret=None, cursor=None,
cache_path='poorbox.cache', output_dir='POORBOX',
request_user_authentication=request_user_authentication,
app_key=APP_KEY, app_secret=APP_SECRET, access_type=ACCESS_TYPE):
'''``access_type`` can be "app_folder" or "dropbox". The latter gives
you access to an entire dropbox, but is much less likely to be
approved by Dropbox.
``request_user_authentication`` is a callback your app may provide.
'''
self.cache_path = cache_path
self.load_cache()
if token_key:
self.cache['token_key'] = token_key
if token_secret:
self.cache['token_secret'] = token_secret
if cursor:
self.cache['cursor'] = cursor
if token_key or token_secret or cursor:
self.save_cache()
self.output_dir = output_dir
self.request_user_authentication = request_user_authentication
# TODO Test what happens when app_key is incorrect
sess = session.DropboxSession(app_key, app_secret, access_type)
logging.debug('Dropbox session created.')
# Unfortunately the authentication step MUST be interactive:
try:
sess.set_token(self.cache['token_key'], self.cache['token_secret'])
logging.debug('Reusing access token.')
except Exception as e: # KeyError,
print('MAKE A NOTE', type(e), e) # TODO Remove print
# We need a new token
request_token = sess.obtain_request_token()
url = sess.build_authorize_url(request_token)
# Ask user to visit a URL
if not self.request_user_authentication(url):
raise AuthenticationFailure()
# Another API request gives us access:
sess.obtain_access_token(request_token)
logging.debug('Got a new access token. Saving in cache...')
self.cache['token_key'] = sess.token.key
self.cache['token_secret'] = sess.token.secret
self.save_cache()
self.client = client.DropboxClient(sess)
RETRY_FILE = 5
def download_file(self, remote_path, local_path):
# local_path = os.path.expanduser(local_path)
directory, filename = os.path.split(local_path)
mkdir(directory) # create if it does not exist
# Try to download 5 times to handle http 5xx errors from dropbox
for attempt in range(self.RETRY_FILE):
try:
fr = self.client.get_file(remote_path)
with open(local_path, 'wb') as fw:
fw.write(fr.read())
except (rest.ErrorResponse, rest.RESTSocketError) as error:
logging.debug('An error occured while downloading a file. '
'We attempt this {} times. The error was: {}'.format(
self.RETRY_FILE, str(error)))
sleep(attempt * 8)
else:
return local_path
def update(self):
'''https://www.dropbox.com/static/developers/\
dropbox-python-sdk-1.5.1-docs/#dropbox.client.DropboxClient.delta
'''
cursor = self.cache.get('cursor', None)
output_dir = os.path.expanduser(self.output_dir)
mkdir(output_dir)
while True:
resp = self.client.delta(cursor)
logging.debug("Updating from cursor {}".format(cursor))
if resp['reset']:
logging.info("This time, I am deleting the whole tree first. "
"Dropbox tells me to.")
rmtree(output_dir)
mkdir(output_dir)
entries = resp['entries']
num_entries = len(entries)
logg = lambda index, char, path: logging.info("{}/{} {} {}".format(
index + 1, num_entries, char, path))
for index, (path, metadata) in enumerate(entries):
local_path = os.path.join(output_dir, path.lstrip('/'))
if metadata is None: # This means delete!
if is_dir(local_path):
logg(index, 'X', path)
rmtree(local_path)
elif os.path.exists(local_path):
logg(index, 'x', path)
os.remove(local_path)
else:
# "If your local state doesn’t have anything at path,
logg(index, 'I', path) # ignore this entry."
elif metadata['is_dir']:
logg(index, 'D', path)
if os.path.exists(local_path):
if is_dir(local_path):
# TODO Just apply the new metadata to the directory
# u'modified': u'Sat, 23 Feb 2013 20:06:30 +0000'
continue
else: # It’s a file, replace it with the new entry
os.remove(local_path)
mkdir(local_path)
else: # Download a file
logg(index, '↓', path)
self.download_file(path, local_path)
# TODO Apply time to file?
cursor = resp['cursor']
logging.debug("New cursor: {}".format(cursor))
self.cache['cursor'] = cursor
self.save_cache()
if not resp['has_more']:
break
def poorbox_from_config_file(path): # TODO Implement
raise NotImplementedError()
# TODO Read config file into adict
return PoorBox(**adict)
def create_config_file(path): # TODO Implement
'''Config: app_key, app_secret, access_type, output_dir, cache_file
OMIT app_key if it is ours!
'''
raise NotImplementedError()
def main():
import argparse
parser = argparse.ArgumentParser(
description="Downloads a directory from your dropbox. "
"Warning: this command deletes entire directories, so be careful!")
parser.add_argument('-o', '--output-dir', metavar='DIRECTORY',
help="folder to download the files into", default='POORBOX')
parser.add_argument('-c', '--cache-file', metavar='FILE',
help="file for poorbox to keep the cache in", default='poorbox.cache')
parser.add_argument('-k', '--app-key', metavar='KEY',
help="dropbox application key", default=APP_KEY)
parser.add_argument('-s', '--app-secret', metavar='SECRET',
help="dropbox application secret", default=APP_SECRET)
parser.add_argument('-a', '--access_type',
choices=('dropbox', 'app_folder'), default=ACCESS_TYPE,
help="access the whole dropbox or a directory in it")
parser.add_argument('-v', '--verbose', action='store_true',
help="show what dropbox tells poorbox to do")
parser.add_argument('-l', '--log-dir', metavar='DIRECTORY',
help='folder to store logs into', default='.')
# args = vars(parser.parse_args())
args = parser.parse_args()
if args.log_dir or args.verbose:
from .log import setup_log
setup_log(directory=args.log_dir, level='debug',
screen_level=logging.DEBUG if args.verbose else logging.WARNING)
try:
poorbox = PoorBox(cache_path=args.cache_file, app_key=args.app_key,
app_secret=args.app_secret, access_type=args.access_type,
output_dir=args.output_dir)
except AuthenticationFailure as e: # We are unauthorized, so
parser.exit(status=401, message=str(e)) # quit with an error code.
else:
poorbox.update()
if __name__ == '__main__':
main()
|
nandoflorestan/poorbox
|
poorbox/__init__.py
|
Python
|
bsd-3-clause
| 9,668
|
[
"VisIt"
] |
2afb4259ebaeedbba3a8718a1fd8f2376edcb66c8467b36cb22ef66c1e9d10de
|
#!/usr/bin/env python
"""
This script subsamples the alignments of a BAM file. For this a
likelihood (0.0 < p(keep) < 1.0) of keeping all alignments of a read
has to be provided. All alignments of a read are treated the same
(i.e. are discarded or kept).
"""
import argparse
import random
import sys
import pysam
__description__ = "Subsample BAM file entries"
__author__ = "Konrad Foerstner <konrad@foerstner.org>"
__copyright__ = "2013 by Konrad Foerstner <konrad@foerstner.org>"
__license__ = "ISC license"
__email__ = "konrad@foerstner.org"
__version__ = "0.3"
parser = argparse.ArgumentParser()
parser.add_argument("input_bam")
parser.add_argument("output_bam")
parser.add_argument("keeping_likelihood", type=float)
parser.add_argument("--seed", default=None, type=float)
args = parser.parse_args()
# Set set if given as paramter
if not args.seed is None:
random.seed(args.seed)
prev_query = None
prev_keep = None
with pysam.Samfile(args.input_bam, "rb") as input_bam, pysam.Samfile(
args.output_bam, "wb", referencenames=input_bam.references,
referencelengths=input_bam.lengths, header=input_bam.header,
text=input_bam.text) as output_bam:
for alignment in input_bam:
# This is for reads with multiple alignments. If there previous
# alignment comes from the same read treat the current one the
# same way (keep or discard).
if alignment.qname == prev_query:
if prev_keep is True:
output_bam.write(alignment)
continue
else:
continue
if random.random() <= args.keeping_likelihood:
output_bam.write(alignment)
prev_keep = True
else:
prev_keep = False
prev_query = alignment.qname
|
konrad/kuf_bio_scripts
|
subsample_bam_file.py
|
Python
|
isc
| 1,775
|
[
"pysam"
] |
5f8daa3ca32b12858376123bc461365a0099bcef56f17d40963f931418ec2eae
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides the Stress class used to create, manipulate, and
calculate relevant properties of the stress tensor.
"""
from pymatgen.core.tensors import SquareTensor
import math
import numpy as np
import warnings
__author__ = "Joseph Montoya"
__copyright__ = "Copyright 2012, The Materials Project"
__credits__ = "Maarten de Jong, Mark Asta, Anubhav Jain"
__version__ = "1.0"
__maintainer__ = "Joseph Montoya"
__email__ = "montoyjh@lbl.gov"
__status__ = "Production"
__date__ = "July 24, 2018"
class Stress(SquareTensor):
"""
This class extends SquareTensor as a representation of the
stress
"""
symbol = "s"
def __new__(cls, stress_matrix):
"""
Create a Stress object. Note that the constructor uses __new__
rather than __init__ according to the standard method of
subclassing numpy ndarrays.
Args:
stress_matrix (3x3 array-like): the 3x3 array-like
representing the stress
"""
obj = super().__new__(cls, stress_matrix)
return obj.view(cls)
@property
def dev_principal_invariants(self):
"""
returns the principal invariants of the deviatoric stress tensor,
which is calculated by finding the coefficients of the characteristic
polynomial of the stress tensor minus the identity times the mean
stress
"""
return self.deviator_stress.principal_invariants*np.array([1, -1, 1])
@property
def von_mises(self):
"""
returns the von mises stress
"""
if not self.is_symmetric():
raise ValueError("The stress tensor is not symmetric, Von Mises "
"stress is based on a symmetric stress tensor.")
return math.sqrt(3*self.dev_principal_invariants[1])
@property
def mean_stress(self):
"""
returns the mean stress
"""
return 1./3.*self.trace()
@property
def deviator_stress(self):
"""
returns the deviatoric component of the stress
"""
if not self.is_symmetric:
raise warnings.warn("The stress tensor is not symmetric, "
"so deviator stress will not be either")
return self - self.mean_stress*np.eye(3)
def piola_kirchoff_1(self, def_grad):
"""
calculates the first Piola-Kirchoff stress
Args:
def_grad (3x3 array-like): deformation gradient tensor
"""
if not self.is_symmetric:
raise ValueError("The stress tensor is not symmetric, \
PK stress is based on a symmetric stress tensor.")
def_grad = SquareTensor(def_grad)
return def_grad.det*np.dot(self, def_grad.inv.trans)
def piola_kirchoff_2(self, def_grad):
"""
calculates the second Piola-Kirchoff stress
Args:
def_grad (3x3 array-like): rate of deformation tensor
"""
def_grad = SquareTensor(def_grad)
if not self.is_symmetric:
raise ValueError("The stress tensor is not symmetric, \
PK stress is based on a symmetric stress tensor.")
return def_grad.det*np.dot(np.dot(def_grad.inv, self),
def_grad.inv.trans)
|
tschaume/pymatgen
|
pymatgen/analysis/elasticity/stress.py
|
Python
|
mit
| 3,451
|
[
"pymatgen"
] |
a13fb310672608767f4faabbf025ef8c0a081c9562eed0316d807f386939afe5
|
import re
import traceback
from PyQt5 import QtCore as QC
from PyQt5 import QtGui as QG
from PyQt5 import QtWidgets as QW
import numpy as n
import scipy.signal as ss
from scipy import poly1d, polyfit
from scipy import ndimage as sn
import scipy.interpolate as si
from lsjuicer.util import helpers
from lsjuicer.static import selection_types
from lsjuicer.ui.items.selection import BoundaryManager, SelectionDataModel
from lsjuicer.static.constants import TransientBoundarySelectionTypeNames as TBSTN
class Pipe(QC.QObject):
pipe_toggled = QC.pyqtSignal()
new_data_out = QC.pyqtSignal()
def _set_data_in(self, data_in):
self._data_in = data_in
#print self.name,' set data in', data_in.shape
self.process()
def process(self):
pass
def set_chain(self, chain):
self.chain=chain
def _get_data_in(self):
#print self.name,' get data in', self._data_in,self
return self._data_in
data_in = property(fset = _set_data_in, fget = _get_data_in)
@property
def data_out(self):
#print self.name,' get data out', self._data_out,self
#print self.name,' sending data out', self._data_out.shape
return self._data_out
@data_out.setter
def data_out(self, data_out):
self._data_out = data_out
#print self.name, "new data out set"
#print self.name,' set data out', self._data_out,self
self.new_data_out.emit()
#data_out = property(fset = _set_data_out, fget = _get_data_out)
def __init__(self, name=None):
super(Pipe, self).__init__()
self.name = name
self.chain = None
self.enabled = True
self.up_pipe = None
self.processed = False
self._data_in = None
self._data_out = None
self.needs_ROI = False
self.options = {}
self.values = {}
self.pixel_size = None
def set_enabled(self, enabled):
#print 'enabled ', self.name
self.enabled = enabled
self.processed = True
self.pipe_toggled.emit()
self.process()
self.new_data_out.emit()
def new_values(self):
#print 'new values ', self.name
self.processed = True
#print 'nv p',self.name
self.process()
#print 'nv ndo',self.name
#self.new_data_out.emit()
#print 'nv pt',self.name
self.pipe_toggled.emit()
def set_pixelsize(self, pixelsize):
self.pixel_size = pixelsize
def set_up_pipe(self, pipe):
#pipe that is before this one in the chain
#print 'set up',self.name
if not self.up_pipe:
#print 'no up pipe'
pass
elif self.up_pipe is pipe:
#'connection exists',self.name,pipe.name
return
else:
#print 'disconnect', self.name, self.up_pipe.name
self.up_pipe.new_data_out.disconnect(self.new_data_in)
#print 'make connection', self.name, pipe.name
self.up_pipe = pipe
self.up_pipe.new_data_out.connect(self.new_data_in)
def new_data_in(self):
#print self.name,' new data in'
self.data_in = self.up_pipe.data_out
class PassPipe(Pipe):
"""Pipe that simply passes input to output"""
def _set_data_in(self, data_in):
self._data_in = data_in
def process(self):
#print 'process',self.name
self.data_out = self.data_in
class ProcessPipe(Pipe):
def process(self):
pass
def extra_ui(self):
#pipes that needs extra ui elements can return these by this method
return None
def update_options(self):
for option in self.option_names:
self.options[option].setValue(self.values[option])
class SingleChannelProcessPipe(ProcessPipe):
"""Pipe that works on data from each channel separately"""
def process(self):
#print 'process',self.name
if self.enabled and self.processed:
q = n.zeros_like(self.data_in)
for channel in range(q.shape[0]):
q[channel] = self.do_processing(channel)
self.data_out = q
else:
self.data_out = self.data_in
def do_processing(self, channel_no):
pass
class MultiChannelProcessPipe(ProcessPipe):
"""Pipe that works on data from all channel concurrently"""
def process(self):
#print 'process',self.name
if self.enabled and self.processed:
print 'process ', self.enabled, self.processed
q = self.do_processing()
self.data_out = q
else:
print 'process ', self.enabled, self.processed
self.data_out = self.data_in
def do_processing(self):
pass
class BlurPipe(SingleChannelProcessPipe):
def __init__(self, *args, **kwargs):
super(BlurPipe, self).__init__(*args, **kwargs)
init_value = 0.6
option_2 = QW.QDoubleSpinBox()
option_2.setMaximum(20)
option_2.setMinimum(0)
option_2.setSingleStep(0.1)
option_2.setValue(init_value)
self.options['Amount x'] = option_2
self.values['Amount x'] = init_value
init_value = 0.6
option_3 = QW.QDoubleSpinBox()
option_3.setMaximum(200)
option_3.setMinimum(0)
option_3.setSingleStep(0.1)
option_3.setValue(init_value)
self.options['Amount y'] = option_3
self.values['Amount y'] = init_value
option_1 = QW.QComboBox()
option_1.addItem("Gaussian")
option_1.addItem("Uniform")
option_1.addItem("Median")
self.options['Type'] = option_1
init_value = "Uniform"
index = option_1.findText(init_value)
option_1.setCurrentIndex(index)
self.values['Type'] = init_value
def do_processing(self, channel):
#q = helpers.blur_image(self.data_in, self.values['Amount'])
#return q
data = self.data_in[channel]
blur_type = self.values['Type']
level_x = self.values['Amount x']
level_y = self.values['Amount y']
blur_x = level_x/(self.pixel_size[0])
blur_y = level_y/(self.pixel_size[1])
level = (blur_y, blur_x)
print "\n\nDoing blur",blur_type, level, self.pixel_size
if blur_type == "Median":
blurred_data = sn.median_filter(data, level)
elif blur_type == "Uniform":
blurred_data = sn.uniform_filter(data, level)
elif blur_type =="Gaussian":
blurred_data = sn.gaussian_filter(data, level)
return blurred_data
class ShearPipe(MultiChannelProcessPipe):
align_indices = None
def __init__(self, *args, **kwargs):
super(ShearPipe, self).__init__(*args, **kwargs)
self.align_indices = None
init_value = 100
option_1 = QW.QSpinBox()
option_1.setMaximum(20000)
option_1.setMinimum(1)
option_1.setValue(init_value)
self.options['Lines'] = option_1
self.values['Lines'] = init_value
init_value = 0
option_2 = QW.QSpinBox()
option_2.setMaximum(50000)
option_2.setMinimum(0)
option_2.setValue(init_value)
self.options['Start'] = option_2
self.values['Start'] = init_value
init_value = 0
option_3 = QW.QSpinBox()
option_3.setMaximum(50)
option_3.setMinimum(0)
option_3.setValue(init_value)
self.options['Order'] = option_3
self.values['Order'] = init_value
init_value = False
option_4 = QW.QCheckBox("Reuse other channel")
option_4.setChecked(init_value)
self.options['Reuse'] = option_4
self.values['Reuse'] = option_4
init_value = 1
option_5 = QW.QSpinBox()
option_5.setMaximum(10)
option_5.setMinimum(1)
option_5.setValue(init_value)
self.options['Times'] = option_5
self.values['Times'] = init_value
self.selection = None
self.needs_ROI = True
self.option_names = ['Lines', 'Start']
def do_processing(self):
#q = helpers.blur_image(self.data_in, 8)
d=self.data_in.copy()
# align_indices = wave.argmax(axis=1)
if self.values['Reuse']:
if ShearPipe.align_indices is not None:
#print 'using indices'
align_indices = ShearPipe.align_indices
d=self.align_image(d, align_indices)
else:
pass
#print 'nothing to use'
else:
wave = d[:,self.values['Start']:self.values['Start']+self.values['Lines']]
d, ShearPipe.align_indices = self.align(d, wave,times=self.values['Times'])
# first = align_indices[0]
self.selection=None
self.roi_manager.remove_selections()
self.roi_manager.disable_builder()
return d
def align(self, image, wave, times=1):
cumulative_align_indices = None
image_0 = image.copy()
for i in range(times):
align_indices = self.get_align_indices(wave)
if cumulative_align_indices is not None:
cumulative_align_indices += align_indices
else:
cumulative_align_indices = align_indices
#cumulative_align_indices = self.fit_indices(cumulative_align_indices)
image = self.align_image(image, align_indices)
cumulative_align_indices = self.fit_indices(cumulative_align_indices)
image = self.align_image(image_0, cumulative_align_indices)
return image, cumulative_align_indices
def align_image(self, data, align_indices):
d=data
for i in range(1,d.shape[0]):
d[i] = n.roll(d[i],align_indices[i])
return d
def fit_indices(self, indices):
order = self.values['Order']
if order:
if order == -1:
import fitfun
def fitf(arg, y0,y1,y2,x1):
n=len(indices)
x=arg
ya = (y1-y0)/x1*x + y0
yb = (y2-y1)/(n-x1)*(x-x1)+y1
return ya*(x<x1)+yb*(x>=x1)
xx = n.arange(len(indices))
oo=fitfun.Optimizer(xx, indices)
oo.set_function(fitf)
oo.set_parameter_range('y0', min(indices),max(indices),0)
oo.set_parameter_range('y1', min(indices),max(indices),0)
oo.set_parameter_range('y2', min(indices),max(indices),0)
oo.set_parameter_range('x1', 2.0, len(indices)-2.,len(indices)/2.)
oo.optimize()
#print oo.solutions
#print 'old',indices.tolist()
indices=fitf(arg=xx, **oo.solutions).astype('int')
#print 'new',indices.tolist()
else:
#print 'old i', indices.tolist()
x = range(len(indices))
fit_f= poly1d( polyfit( x,indices, self.values['Order']) )
indices = fit_f(x).round().astype('int')
#print 'new i', indices.tolist()
else:
pass
return indices
def get_align_indices(self, wave):
#wave = helpers.blur_image(wave.astype('float'),1)
wave = sn.uniform_filter(wave.astype('float'), (3,3))
indices = []
w_base = wave.mean(axis=0)
w_base_n = (w_base-w_base.min())/(w_base.max()-w_base.min())
pad_left = n.ones(wave.shape[1]/2.)*w_base_n[0:10].mean()
pad_right = n.ones(wave.shape[1]/2.)*w_base_n[-10:].mean()
ww0=n.hstack((pad_left,w_base_n,pad_right))
flatten = 3
for i in range(wave.shape[0]):
if 0:
indices.append(0)
else:
ww = wave[max(0,i-flatten):min(wave.shape[0], i+flatten)]
w_i = ww.mean(axis=0)
w_i2 = helpers.smooth(wave[i])
w_i = helpers.smooth(w_i)
w_i_n = (w_i-w_i.min())/(w_i.max()-w_i.min())
w_i_n2 = (w_i2-w_i2.min())/(w_i2.max()-w_i2.min())
cc = ss.correlate(ww0, w_i_n, mode='valid')
indices.append(cc.argmax()-wave.shape[1]/2.)
#make a nice polynomial fit for the indices
indices = n.array(indices).astype('int')
return indices
def set_scene(self, scene):
self.scene = scene
self.roi_manager = BoundaryManager(self.scene, selection_types.data['pipes.singleboundary'])
self.selection_model = SelectionDataModel()
self.selection_model.set_selection_manager(self.roi_manager)
self.roi_manager.selection_added.connect(self.boundary_selected)
def boundary_selected(self):
self.selection = self.roi_manager.selections[0]
self.selection.changed.connect(self.boundary_changed)
def boundary_changed(self):
#print self.selection.rectf
left = self.selection.rectf.left()
width = self.selection.rectf.width()
self.values['Start']=left
self.values['Lines']=width
self.update_options()
def extra_ui(self):
button = QW.QPushButton('Select')
button.clicked.connect(lambda:self.roi_manager.activate_builder_by_type_name(TBSTN.MANUAL))
return button
class ImageMathPipe(MultiChannelProcessPipe):
def __init__(self, *args, **kwargs):
super(ImageMathPipe, self).__init__(*args, **kwargs)
self.needs_ROI = False
self.option_names = ['Expression']
option_1 = QW.QLineEdit()
self.options['Expression'] = option_1
self.values['Expression'] = ""
def do_processing(self):
"""Process the expression. We expect it to contain channels as ch[0-9]
which will be replaced with channel[[0-9]]"""
expr = self.values['Expression']
def repl_f(match):
return "channels[%s]"%(match.group(1))
print 'expression is', expr, type(expr)
if expr:
channels = self.data_in.astype('float')
valid_expr = "res=%s"%(re.sub('ch([0-9])', repl_f, expr))
print valid_expr
import_statement = "from numpy import cos,log,sqrt,sin"
exec_statement = "\n".join([import_statement, valid_expr])
try:
exec(exec_statement)
#resize the result to the expected shape and dimension
res.shape = (1,res.shape[0], res.shape[1], res.shape[2])
#print channels[0].mean(), channels[0].min(), channels[0].max()
#print channels[1].mean(), channels[1].min(), channels[1].max()
#print res.mean(), res.min(), res.max()
return n.vstack((res,)*self.data_in.shape[0])
except Exception,e:
QW.QMessageBox.critical(None, "Error with expression!",
"Error:\n"+traceback.format_exception_only(type(e),e)[0])
return self.data_in
class SelfRatioPipe(SingleChannelProcessPipe):
def __init__(self, *args, **kwargs):
super(SelfRatioPipe, self).__init__(*args, **kwargs)
self.needs_ROI = True
self.option_names = ['Lines', 'Start']
init_value = 100
option_1 = QW.QSpinBox()
option_1.setMaximum(10000)
option_1.setMinimum(1)
option_1.setValue(init_value)
self.options['Lines'] = option_1
self.values['Lines'] = init_value
init_value = 0
option_2 = QW.QSpinBox()
option_2.setMaximum(50000)
option_2.setMinimum(0)
option_2.setValue(init_value)
self.options['Start'] = option_2
self.values['Start'] = init_value
self.selection = None
def set_scene(self, scene):
self.scene = scene
self.roi_manager = BoundaryManager(self.scene, selection_types.data['pipes.singleboundary'])
self.selection_model = SelectionDataModel()
self.selection_model.set_selection_manager(self.roi_manager)
self.roi_manager.selection_added.connect(self.boundary_selected)
def boundary_selected(self):
self.selection = self.roi_manager.selections[0]
self.selection.changed.connect(self.boundary_changed)
def boundary_changed(self):
print self.selection.rectf
left = self.selection.rectf.left()
width = self.selection.rectf.width()
self.values['Start']=left
self.values['Lines']=width
self.update_options()
def do_processing(self,channel):
d = self.data_in[channel]
array_for_mean = d[:,:,
self.values['Start']:self.values['Start']+self.values['Lines']]
means = array_for_mean.mean(axis=2)
means_array = n.column_stack((means,)*d.shape[2])
#have to reshape with Fortran ordering to get the correct data
means_array = means_array.reshape(d.shape, order = 'F')
#FIXME 100 is to make histogram look nice
q = self.data_in/means_array*100
self.selection=None
self.roi_manager.remove_selections()
self.roi_manager.disable_builder()
return q
def extra_ui(self):
button = QW.QPushButton('Select')
button.clicked.connect(lambda:self.roi_manager.activate_builder_by_type_name(TBSTN.MANUAL))
return button
class ImageProcessPipe(ProcessPipe):
def __init__(self, *args, **kwargs):
super(ImageProcessPipe, self).__init__(*args, **kwargs)
init_value = 2.0
option_1 = QW.QSpinBox()
option_1.setMaximum(10)
option_1.setMinimum(1)
option_1.setValue(init_value)
self.options['Multiplier 1'] = option_1
self.values['Multiplier 1'] = init_value
init_value = 3.0
option_2 = QW.QSpinBox()
option_2.setMaximum(10)
option_2.setMinimum(1)
option_2.setValue(init_value)
self.options['Multiplier 2'] = option_2
self.values['Multiplier 2'] = init_value
#self.value = init_value
def do_processing(self):
q = self.data_in**(1./self.values["Multiplier 1"])
return q
class PipeChain(QC.QObject):
pipe_state_changed = QC.pyqtSignal()
new_histogram = QC.pyqtSignal()
def set_source_data(self, source_data):
self.source_data = source_data
#print 'source data shape', source_data.shape
if source_data.ndim == 3:
pass
elif source_data.ndim == 4:
pass
else:
raise ValueError("wrong data dimension %i"%source_data.ndim)
self.percentage_value_map = {}
self.inpipe.data_in = self.source_data
#self.calc_histogram()
def calc_histogram(self):
#ignore call if a pipe has been freshly inserted
if self.pipe_insertion:
self.pipe_insertion = False
return
for channel in range(self.source_data.shape[0]):
#data = self.source_data[channel]
data = self.get_result_data()[channel]
#if nans are in the data then use only non-nan data for histogram
nans = n.isnan(data)
if n.any(nans):
data = data[n.invert(n.isnan(data))]
histogram = n.histogram(data, bins=min(64, max(5, n.sqrt(data.size))), density=True)
counts = histogram[0]
bins = histogram[1]
percs = []
cumul = (counts*n.diff(bins)).cumsum()
percs=cumul.tolist()
percs.insert(0,0) #add first value to avoid out of interpolation range errors
self.perc_val_funcs[channel] = si.interp1d(n.array(percs)*100, bins)
self.val_perc_funcs[channel] = si.interp1d(bins, n.array(percs)*100)
self.histograms[channel] = histogram
self.new_histogram.emit()
def histogram(self, channel=0):
if not self.histograms:
self.calc_histogram()
return self.histograms[channel]
def percentage_value(self, percentage, channel = 0):
if not self.perc_val_funcs:
self.calc_histogram()
return self.perc_val_funcs[channel](100-(percentage+0.01))
def value_percentage(self, value, channel = 0):
if not self.val_perc_funcs:
self.calc_histogram()
return self.val_perc_funcs[channel](value)
def update_pixel_size(self, pixel_size):
self.pixel_size = pixel_size
for pipe in self.imagepipes:
pipe.set_pixelsize(self.pixelsize)
def get_result_data(self):
return self.outpipe.data_out
#def set_frame(self, frame=None):
# if frame is not None:
# self.frame = frame
# if self.frame is not None:
# self.inpipe.data_in = self.source_data[:,self.frame,:,:]
# else:
# self.inpipe.data_in = self.source_data
@property
def active(self):
"""Return True if pipechain has any active elements, False otherwise"""
if len(self.pipes)<3:
return False
else:
for p in self.process_pipes:
if p.enabled and p.processed:
return True
return False
@property
def process_pipes(self):
return self.pipes[1:-1]
def __init__(self, pixel_size=None, graphicsscene=None, parent = None):
super(PipeChain, self).__init__(parent)
self.pipes = []
self.scene = graphicsscene
self.pixel_size = pixel_size
self.percentage_value_map = {}
self.histograms = {}
self.perc_val_funcs = {}
self.val_perc_funcs = {}
self.imagepipes = []
#this is needed to avoid extra histogram calculation when pipe is added
#to the chain.
self.pipe_insertion=False
self.inpipe = PassPipe("inpipe")
self.inpipe.set_chain(self)
self.outpipe = PassPipe("outpipe")
self.outpipe.set_chain(self)
self.outpipe.new_data_out.connect(self.calc_histogram)
self.do_connections()
def add_pipe(self, new_pipe):
self.pipe_insertion = True
self.imagepipes.append(new_pipe)
new_pipe.set_pixelsize(self.pixel_size)
new_pipe.set_chain(self)
if new_pipe.needs_ROI:
new_pipe.set_scene(self.scene)
self.do_connections()
new_pipe.pipe_toggled.connect(lambda:self.pipe_state_changed.emit())
def do_connections(self):
self.pipes = []
self.pipes.append(self.inpipe)
for pipe in self.imagepipes:
self.pipes.append(pipe)
self.pipes.append(self.outpipe)
for i in range(len(self.pipes)-1):
source = self.pipes[i]
sink = self.pipes[i+1]
sink.set_up_pipe(source)
if self.inpipe.data_in is not None:
self.inpipe.process()
class PipeWidget(QW.QFrame):
def __init__(self, pipe, parent = None):
super(PipeWidget, self).__init__(parent)
layout = QW.QVBoxLayout()
layout.setContentsMargins(0,0,0,0)
self.setLayout(layout)
self.setFrameStyle(QW.QFrame.StyledPanel)
self.setFrameShadow(QW.QFrame.Plain)
visible_layout = QW.QHBoxLayout()
settings_layout = QW.QGridLayout()
settings_layout.setContentsMargins(0,0,0,0)
settings_layout.setSpacing(0)
settings_frame = QW.QFrame()
settings_frame.setLayout(settings_layout)
layout.addLayout(visible_layout)
layout.addWidget(settings_frame)
name_label = QW.QLabel(pipe.name)
on_checkbox = QW.QCheckBox("Enabled")
on_checkbox.setChecked(True)
on_checkbox.toggled.connect(pipe.set_enabled)
on_checkbox.toggled.connect(settings_frame.setEnabled)
#details_pb = QG.QPushButton('Settings')
#details_pb.setCheckable(True)
#details_pb.setChecked(False)
#settings_frame.setVisible(False)
#details_pb.toggled.connect(settings_frame.setVisible)
visible_layout.addWidget(name_label)
visible_layout.addWidget(on_checkbox)
#visible_layout.addWidget(details_pb)
count = 0
apply_pb = QW.QPushButton("Apply")
for option in pipe.options:
settings_layout.addWidget(QW.QLabel(option), count, 0)
settings_layout.addWidget(pipe.options[option], count, 1)
if isinstance(pipe.options[option], QW.QLineEdit):
print 'connect lineedit'
pipe.options[option].returnPressed.connect(self.set_pipe_options)
count +=1
extra_ui = pipe.extra_ui()
if extra_ui:
settings_layout.addWidget(extra_ui, count,0,1,2)
count +=1
apply_pb = QW.QPushButton("Apply")
settings_layout.addWidget(apply_pb, count, 1)
apply_pb.clicked.connect(self.set_pipe_options)
self.pipe = pipe
def minimumSizeHint(self):
return QC.QSize(100,100)
def set_pipe_options(self):
new = False
for option in self.pipe.options:
widget = self.pipe.options[option]
if isinstance(widget, QW.QCheckBox):
new_value = widget.isChecked()
elif isinstance(widget, QW.QComboBox):
new_value = str(widget.currentText())
elif isinstance(widget, QW.QLineEdit):
new_value = str(widget.text())
if not test_string(new_value):
QW.QMessageBox.critical(self, "Bad input!",
"The expression %s is invalid!"%new_value)
new_value = ""
else:
new_value = self.pipe.options[option].value()
if new_value != self.pipe.values[option]:
self.pipe.values[option] = new_value
new = True
else:
pass
if new or not self.pipe.processed:
self.pipe.new_values()
def test_string(s):
allowed = ['ch', '+','/','-','+','*','sqrt','log','sin','cos','(',')','.']
numbers = [str(el) for el in range(10)]
allowed.extend(numbers)
s2 = s
for a in allowed:
s2 = s2.replace(a,'')
if s2:
return False
else:
return True
class PipeModel(QC.QAbstractListModel):
def __init__(self, parent = None):
super(PipeModel, self).__init__(parent)
self._pipedata = []
@property
def pipedata(self):
return self._pipedata
@pipedata.setter
def pipedata(self, pipes):
#print 'new pipe'
self.modelAboutToBeReset.emit()
self._pipedata = pipes
self.modelReset.emit()
#print self._pipedata
@property
def rows(self):
return len(self.pipedata)
def rowCount(self, parent):
return self.rows
def data(self, index, role):
pipe = self.pipedata[index.row()]
if role == QC.Qt.DisplayRole:
return pipe.name
elif role==QC.Qt.DecorationRole:
if pipe.enabled:
if pipe.processed:
return QG.QColor('lime')
else:
return QG.QColor('orange')
else:
return QG.QColor('red')
else:
return QC.QVariant()
def pipes_updated(self):
self.modelAboutToBeReset.emit()
self.layoutAboutToBeChanged.emit((),0)
self.modelReset.emit()
self.layoutChanged.emit((),0)
pipe_classes = {'SelfRatio':SelfRatioPipe,
'Shear':ShearPipe, "Blur":BlurPipe,
'Image math':ImageMathPipe}
|
ardoi/datajuicer
|
lsjuicer/data/pipes/tools.py
|
Python
|
gpl-3.0
| 27,557
|
[
"Gaussian"
] |
999373c59b712ce6382e91956b88029a088fc0eda19a2b8e8775af9dc3f2e7b0
|
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import models
from django.dispatch import receiver
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from djblets.db.fields import CounterField, JSONField
from djblets.db.managers import ConcurrencyManager
from djblets.forms.fields import TIMEZONE_CHOICES
from reviewboard.accounts.managers import ProfileManager, TrophyManager
from reviewboard.accounts.trophies import TrophyType
from reviewboard.reviews.models import Group, ReviewRequest
from reviewboard.reviews.signals import review_request_published
from reviewboard.site.models import LocalSite
@python_2_unicode_compatible
class ReviewRequestVisit(models.Model):
"""
A recording of the last time a review request was visited by a user.
Users have one ReviewRequestVisit entry in the database per review
request they've visited. This is used to keep track of any updates
to review requests they've already seen, so that we can intelligently
inform them that new discussions have taken place.
"""
user = models.ForeignKey(User, related_name="review_request_visits")
review_request = models.ForeignKey(ReviewRequest, related_name="visits")
timestamp = models.DateTimeField(_('last visited'), default=timezone.now)
# Set this up with a ConcurrencyManager to help prevent race conditions.
objects = ConcurrencyManager()
def __str__(self):
return "Review request visit"
class Meta:
unique_together = ("user", "review_request")
@python_2_unicode_compatible
class Profile(models.Model):
"""User profile. Contains some basic configurable settings"""
user = models.ForeignKey(User, unique=True)
# This will redirect new users to the account settings page the first time
# they log in (or immediately after creating an account). This allows
# people to fix their real name and join groups.
first_time_setup_done = models.BooleanField(
default=False,
verbose_name=_("first time setup done"),
help_text=_("Indicates whether the user has already gone through "
"the first time setup process by saving their user "
"preferences."))
# Whether the user wants to receive emails
should_send_email = models.BooleanField(
default=True,
verbose_name=_("send email"),
help_text=_("Indicates whether the user wishes to receive emails."))
collapsed_diffs = models.BooleanField(
default=True,
verbose_name=_("collapsed diffs"),
help_text=_("Indicates whether diffs should be shown in their "
"collapsed state by default."))
wordwrapped_diffs = models.BooleanField(
default=True,
help_text=_("This field is unused and will be removed in a future "
"version."))
syntax_highlighting = models.BooleanField(
default=True,
verbose_name=_("syntax highlighting"),
help_text=_("Indicates whether the user wishes to see "
"syntax highlighting in the diffs."))
is_private = models.BooleanField(
default=False,
verbose_name=_("profile private"),
help_text=_("Indicates whether the user wishes to keep his/her "
"profile private."))
open_an_issue = models.BooleanField(
default=True,
verbose_name=_("opens an issue"),
help_text=_("Indicates whether the user wishes to default "
"to opening an issue or not."))
# Indicate whether closed review requests should appear in the
# review request lists (excluding the dashboard).
show_closed = models.BooleanField(default=True)
sort_review_request_columns = models.CharField(max_length=256, blank=True)
sort_dashboard_columns = models.CharField(max_length=256, blank=True)
sort_submitter_columns = models.CharField(max_length=256, blank=True)
sort_group_columns = models.CharField(max_length=256, blank=True)
review_request_columns = models.CharField(max_length=256, blank=True)
dashboard_columns = models.CharField(max_length=256, blank=True)
submitter_columns = models.CharField(max_length=256, blank=True)
group_columns = models.CharField(max_length=256, blank=True)
# A list of starred review requests. This allows users to monitor a
# review request and receive e-mails on updates without actually being
# on the reviewer list or commenting on the review. This is similar to
# adding yourself to a CC list.
starred_review_requests = models.ManyToManyField(ReviewRequest, blank=True,
related_name="starred_by")
# A list of watched groups. This is so that users can monitor groups
# without actually joining them, preventing e-mails being sent to the
# user and review requests from entering the Incoming Reviews list.
starred_groups = models.ManyToManyField(Group, blank=True,
related_name="starred_by")
# Allows per-user timezone settings
timezone = models.CharField(choices=TIMEZONE_CHOICES, default='UTC',
max_length=30)
extra_data = JSONField(null=True)
objects = ProfileManager()
def star_review_request(self, review_request):
"""Marks a review request as starred.
This will mark a review request as starred for this user and
immediately save to the database.
"""
self.starred_review_requests.add(review_request)
if (review_request.public and
(review_request.status == ReviewRequest.PENDING_REVIEW or
review_request.status == ReviewRequest.SUBMITTED)):
site_profile, is_new = LocalSiteProfile.objects.get_or_create(
user=self.user,
local_site=review_request.local_site,
profile=self)
if is_new:
site_profile.save()
site_profile.increment_starred_public_request_count()
self.save()
def unstar_review_request(self, review_request):
"""Marks a review request as unstarred.
This will mark a review request as starred for this user and
immediately save to the database.
"""
q = self.starred_review_requests.filter(pk=review_request.pk)
if q.count() > 0:
self.starred_review_requests.remove(review_request)
if (review_request.public and
(review_request.status == ReviewRequest.PENDING_REVIEW or
review_request.status == ReviewRequest.SUBMITTED)):
site_profile, is_new = LocalSiteProfile.objects.get_or_create(
user=self.user,
local_site=review_request.local_site,
profile=self)
if is_new:
site_profile.save()
site_profile.decrement_starred_public_request_count()
self.save()
def star_review_group(self, review_group):
"""Marks a review group as starred.
This will mark a review group as starred for this user and
immediately save to the database.
"""
if self.starred_groups.filter(pk=review_group.pk).count() == 0:
self.starred_groups.add(review_group)
def unstar_review_group(self, review_group):
"""Marks a review group as unstarred.
This will mark a review group as starred for this user and
immediately save to the database.
"""
if self.starred_groups.filter(pk=review_group.pk).count() > 0:
self.starred_groups.remove(review_group)
def __str__(self):
return self.user.username
@python_2_unicode_compatible
class LocalSiteProfile(models.Model):
"""User profile information specific to a LocalSite."""
user = models.ForeignKey(User, related_name='site_profiles')
profile = models.ForeignKey(Profile, related_name='site_profiles')
local_site = models.ForeignKey(LocalSite, null=True, blank=True,
related_name='site_profiles')
# A dictionary of permission that the user has granted. Any permission
# missing is considered to be False.
permissions = JSONField(null=True)
# Counts for quickly knowing how many review requests are incoming
# (both directly and total), outgoing (pending and total ever made),
# and starred (public).
direct_incoming_request_count = CounterField(
_('direct incoming review request count'),
initializer=lambda p: ReviewRequest.objects.to_user_directly(
p.user, local_site=p.local_site).count())
total_incoming_request_count = CounterField(
_('total incoming review request count'),
initializer=lambda p: ReviewRequest.objects.to_user(
p.user, local_site=p.local_site).count())
pending_outgoing_request_count = CounterField(
_('pending outgoing review request count'),
initializer=lambda p: ReviewRequest.objects.from_user(
p.user, p.user, local_site=p.local_site).count())
total_outgoing_request_count = CounterField(
_('total outgoing review request count'),
initializer=lambda p: ReviewRequest.objects.from_user(
p.user, p.user, None, local_site=p.local_site).count())
starred_public_request_count = CounterField(
_('starred public review request count'),
initializer=lambda p: (p.pk and
p.profile.starred_review_requests.public(
user=None,
local_site=p.local_site).count()) or 0)
class Meta:
unique_together = (('user', 'local_site'),
('profile', 'local_site'))
def __str__(self):
return '%s (%s)' % (self.user.username, self.local_site)
class Trophy(models.Model):
"""A trophy represents an achievement given to the user.
It is associated with a ReviewRequest and a User and can be associated
with a LocalSite.
"""
category = models.CharField(max_length=100)
received_date = models.DateTimeField(default=timezone.now)
review_request = models.ForeignKey(ReviewRequest, related_name="trophies")
local_site = models.ForeignKey(LocalSite, null=True,
related_name="trophies")
user = models.ForeignKey(User, related_name="trophies")
objects = TrophyManager()
@cached_property
def trophy_type(self):
"""Get the TrophyType instance for this trophy."""
return TrophyType.for_category(self.category)
def get_display_text(self):
"""Get the display text for this trophy."""
return self.trophy_type.get_display_text(self)
#
# The following functions are patched onto the User model.
#
def _is_user_profile_visible(self, user=None):
"""Returns whether or not a User's profile is viewable by a given user.
A profile is viewable if it's not marked as private, or the viewing
user owns the profile, or the user is a staff member.
"""
try:
if hasattr(self, 'is_private'):
# This is an optimization used by the web API. It will set
# is_private on this User instance through a query, saving a
# lookup for each instance.
#
# This must be done because select_related() and
# prefetch_related() won't cache reverse foreign key relations.
is_private = self.is_private
else:
is_private = self.get_profile().is_private
return ((user and (user == self or user.is_staff)) or
not is_private)
except Profile.DoesNotExist:
return True
def _should_send_email(self):
"""Returns whether a user wants to receive emails.
This is patched into the user object to make it easier to deal with missing
Profile objects."""
try:
return self.get_profile().should_send_email
except Profile.DoesNotExist:
return True
def _get_profile(self):
"""Returns the profile for the User.
The profile will be cached, preventing queries for future lookups.
"""
if not hasattr(self, '_profile'):
self._profile = Profile.objects.get(user=self)
self._profile.user = self
return self._profile
def _get_site_profile(self, local_site):
"""Returns the LocalSiteProfile for a given LocalSite for the User.
The profile will be cached, preventing queries for future lookups.
"""
if not hasattr(self, '_site_profiles'):
self._site_profiles = {}
if local_site.pk not in self._site_profiles:
site_profile = \
LocalSiteProfile.objects.get(user=self, local_site=local_site)
site_profile.user = self
site_profile.local_site = local_site
self._site_profiles[local_site.pk] = site_profile
return self._site_profiles[local_site.pk]
User.is_profile_visible = _is_user_profile_visible
User.get_profile = _get_profile
User.get_site_profile = _get_site_profile
User.should_send_email = _should_send_email
User._meta.ordering = ('username',)
@receiver(review_request_published)
def _call_compute_trophies(sender, review_request, **kwargs):
if review_request.changedescs.count() == 0 and review_request.public:
Trophy.objects.compute_trophies(review_request)
|
1tush/reviewboard
|
reviewboard/accounts/models.py
|
Python
|
mit
| 13,588
|
[
"VisIt"
] |
70c7dfa609f7b2ef4533f8fd9451c9aa0f57751306decc0e87d8311b44fb639e
|
import glob, shelve
from geneutils import *
DB_DIR = '../Proteomes/Candida_proteomes/'
BLASTP_RESULTS_DIR = 'resultsp/'
#MAIN_SPECIES = 'S288C'
#SUBJECT_DBS = ['AWRI1631_ABSV01000000', 'AWRI796_ADVS01000000', 'CBS7960_AEWL01000000', \
#'CLIB215_AEWP01000000', 'CLIB324_AEWM01000000', 'CLIB382_AFDG01000000', 'EC1118_PRJEA37863', \
#'EC9-8_AGSJ01000000', 'FL100_AEWO01000000', 'FostersB_AEHH01000000', 'FostersO_AEEZ01000000', \
#'Kyokai7_BABQ01000000', 'LalvinQA23_ADVV01000000', 'M22_ABPC01000000', 'PW5_AFDC01000000', \
#'RM11-1a_AAEG01000000', 'Sigma1278b_ACVY01000000', 'JAY291_ACFL01000000', 'T7_AFDE01000000', \
#'T73_AFDF01000000', 'UC5_AFDD01000000', 'Vin13_ADXC01000000', 'VL3_AEJS01000000', 'Y10_AEWK01000000', \
#'YJM269_AEWN01000000', 'YJM789_AAFW02000000', 'YPS163_ABPD01000000', 'W303_MPG_2011']
MAIN_SPECIES = 'S288C_cerevisiae__shift1'#, 'S288C_cerevisiae__shift2'
SUBJECT_DBS = ['candida_albicans_wo-1_1', 'candida_guilliermondii_1', 'candida_lusitaniae_1', \
'candida_parapsilosis_1', 'candida_tropicalis_3', 'debaryomyces_hansenii_1', 'lodderomyces_elongisporus_1']
#SUBJECT_DBS = ['K_waltii', 'L_kluyveri', 'S_bayanus', 'S_castellii', 'S_cerevisiae', 'S_kudriavzevii', \
#'S_mikatae', 'S_paradoxus', 'Scastellii_040406']
#SUBJECT_DBS = ['schizosaccharomyces_cryophilus_mito_3', 'schizosaccharomyces_cryophilus_oy26_3', \
#'schizosaccharomyces_japonicus_yfs275_4', 'schizosaccharomyces_japonicus_yfs275_mitochondria_1', \
#'schizosaccharomyces_octosporus_5', 'schizosaccharomyces_octosporus_mito', 'schizosaccharomyces_pombe_972h-_2', \
#'schizosaccharomyces_pombe_972h-_mitochondria']
# RUN
# GENERATE BLAST DATABASES, GIVEN FASTA FILES
generate_blast_database(DB_DIR)
# SAVE QUERY AND SUBJECT DBS INTO A TWO-KEY DICTIONARY, SEARCHABLE BY SPECIES AND ORF NAME
generate_python_pep_database(DB_DIR, SUBJECT_DBS + [MAIN_SPECIES])
execute('mkdir -p ' + BLASTP_RESULTS_DIR + ' && rm -rf ' + BLASTP_RESULTS_DIR + '/*')
for entry in fasta_entries(DB_DIR + MAIN_SPECIES + '_pep.fsa'):
write_file(BLASTP_RESULTS_DIR + entry.id, entry.format('fasta'))
print("created pre-MUSCLE fasta file for " + entry.id)
# RUN FULL BLASTP BETWEEN S288C AND EACH OF THE OTHER STRAINS
for DB_NAME in SUBJECT_DBS:
blast('P', DB_DIR + MAIN_SPECIES + "_pep.fsa", DB_DIR + DB_NAME, outname=MAIN_SPECIES + "--" + DB_NAME + ".blastp.csv")
# TAKE BLASTP RESULTS, RUN REVERSE BLASTP, AND APPEND THE SEQUENCES TO THE APPROPRIATE FASTA FILES FOR MUSLCE.
# TAKES ONLY THE FIRST MATCH INTO THE PRE-MUSCLE FILE
for DB_NAME in SUBJECT_DBS:
for (query_orf, subject_orfs_set) in qseq_sseq_sets(MAIN_SPECIES + '--' + DB_NAME + '.blastp.csv'):
for subject_orf in subject_orfs_set:
sseq_fsa = LOCAL_PEP_DATABASE[DB_NAME, subject_orf].format('fasta')
if reverse_blast_check('P', DB_DIR + MAIN_SPECIES, query_orf, sseq_fsa):
append_to_file(BLASTP_RESULTS_DIR + query_orf, sseq_fsa)
break
print("Finished adding " + DB_NAME + " matches to pre-MUSCLE FASTA files")
# RUN MUSCLE
failed_files = []
for multi_fasta_file in glob.glob(os.path.join(BLASTP_RESULTS_DIR + "*")):
try:
muscle(multi_fasta_file)
except:
failed_files.append(multi_fasta_file)
print('ALL DONE')
if failed_files:
print('\n\nTHE FOLLOWING PRE-MUSCLE FILES FAILED:')
for i in failed_files:
print(i)
|
q10/gene
|
testp.py
|
Python
|
bsd-3-clause
| 3,442
|
[
"BLAST"
] |
dfb05f3568b0dfbdb5a54392b33fc9cfea9c4c4fdafd8a55e8f8d335d8a348fe
|
# BEGIN_COPYRIGHT
#
# Copyright (C) 2014 CRS4.
#
# This file is part of blast-python.
#
# blast-python is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# blast-python is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# blast-python. If not, see <http://www.gnu.org/licenses/>.
#
# END_COPYRIGHT
import os, re, warnings
from distutils.core import setup, Extension
from distutils.command.build_py import build_py as du_build_py
from distutils.errors import DistutilsSetupError
NCBI_INCLUDE = os.getenv("NCBI_INCLUDE", "/usr/include/ncbi-tools++")
NCBI_LIB = os.getenv("NCBI_LIB", "/usr/lib/ncbi-tools++")
EXPECTED_NCBI_TOOLKIT_VERSION = '20100418'
try:
with open("VERSION") as f:
VERSION = f.read().strip()
except IOError:
raise DistutilsSetupError("failed to read version info")
mtime = lambda fn: os.stat(fn).st_mtime
# ncbi_toolkit_main: auto-generated in original Makefile
cpp_names = ["blast_options", "blast_sseq", "blast_sseq_factories",
"blast_blast2seq", "blast_diagnostics", "blast_hits",
"blast_sseq_loc_from_fasta", "blast_sseq_loc_from_str",
"cseq_sequence_extractor", "ncbi_toolkit_main"]
cpp_files = ["src/%s.cpp" % n for n in cpp_names]
include_dirs = [NCBI_INCLUDE]
library_dirs = [NCBI_LIB]
ver_file = os.path.join(NCBI_INCLUDE, 'common/ncbi_source_ver.h')
try:
with open(ver_file) as f:
vtext = f.read()
v = re.search(r'NCBI_PRODUCTION_VER\s+(\d+)', vtext).groups()[0]
except (IOError, AttributeError):
problem = "could not get NCBI toolkit version"
else:
if v != EXPECTED_NCBI_TOOLKIT_VERSION:
problem = "ncbi toolkit's version (%r) is not the expected one (%r)" % (
v, EXPECTED_NCBI_TOOLKIT_VERSION)
else:
problem = None
if problem:
warnings.warn(problem)
blast_libs = ["xblast", "xalgoblastdbindex", "composition_adjustment",
"xalgodustmask", "seqdb", "xobjutil", "xobjread",
"blast_services", "xalgowinmask", "seqmasks_io",]
other_libs = ['biblio', 'blastdb', 'dbapi_driver', 'general', 'id1', 'id2',
'medline', 'ncbi_xloader_genbank', 'ncbi_xreader',
'ncbi_xreader_cache', 'ncbi_xreader_id1', 'ncbi_xreader_id2',
'pub', 'scoremat', 'seq', 'seqcode', 'seqset', 'seqsplit',
'sequtil', 'tables', 'xcompress', 'xconnect', 'xncbi',
'xnetblast', 'xnetblastcli', 'xobjmgr', 'xobjsimple', 'xser',
'xutil']
libraries = ['boost_python'] + blast_libs + other_libs + ['z']
blast_core_ext = Extension("ncbi_toolkit",
cpp_files,
include_dirs=include_dirs,
library_dirs=library_dirs,
runtime_library_dirs=library_dirs,
libraries=libraries,
extra_compile_args=['-O3'])
def write_version(filename="BlastPython/version.py"):
if os.path.exists(filename) and mtime("VERSION") <= mtime(filename):
return
with open(filename, "w") as f:
f.write("# GENERATED BY setup.py\n")
f.write("version='%s'\n" % VERSION)
class build_py(du_build_py):
def run(self):
write_version()
du_build_py.run(self)
setup(name="blast-python",
version=VERSION,
description='Python bindings for NCBI blast',
author='Gianluigi Zanetti',
author_email='zag@crs4.it',
maintainer='Simone Leo',
maintainer_email='simleo@crs4.it',
url='http://svn.crs4.it/blast-python/',
packages=['BlastPython'],
ext_modules=[blast_core_ext],
cmdclass={"build_py": build_py},
)
|
crs4/blast-python
|
setup.py
|
Python
|
gpl-3.0
| 4,043
|
[
"BLAST"
] |
d265ab1db73de48f7d5f96a9e6b3445948b049996207f8fa66b9523d53fef0f1
|
# modified mexican hat wavelet test.py
# spectral analysis for RADAR and WRF patterns
import os, shutil
import time
import pickle
import numpy as np
from scipy import signal, ndimage
import matplotlib.pyplot as plt
from armor import defaultParameters as dp
from armor import pattern
from armor import objects4 as ob
#from armor import misc as ms
dbz = pattern.DBZ
testScriptsFolder = dp.root + 'python/armor/tests/'
testName = "modifiedMexicanHatTest3"
timeString = str(int(time.time()))
outputFolder = dp.root + 'labLogs/%d-%d-%d-%s/' % \
(time.localtime().tm_year, time.localtime().tm_mon, time.localtime().tm_mday, testName)
if not os.path.exists(outputFolder):
os.makedirs(outputFolder)
shutil.copyfile(testScriptsFolder+testName+".py", outputFolder+ timeString + testName+".py")
kongreywrf = ob.kongreywrf
kongreywrf.fix()
kongrey = ob.kongrey
monsoon = ob.monsoon
monsoon.list= [v for v in monsoon.list if '20120612' in v.dataTime]
march2014 = ob.march2014
march2014wrf11 = ob.march2014wrf11
march2014wrf12 = ob.march2014wrf12
summaryFile = open(outputFolder + timeString + "summary.txt", 'a')
for ds in [kongrey]:
summaryFile.write("\n===============================================================\n\n\n")
streamMean = 0.
dbzCount = 0
for a in ds:
print "-------------------------------------------------"
print testName
print
print a.name
a.load()
a.setThreshold(0)
a.saveImage(imagePath=outputFolder+a.name+".png")
L = []
for sigma in [1, 2, 4, 8 ,16, 32, 64, 128, 256, 512]:
print "sigma:", sigma
a.load()
a.setThreshold(0)
arr0 = a.matrix
#arr1 = signal.convolve2d(arr0, mask_i, mode='same', boundary='fill')
arr1 = ndimage.filters.gaussian_laplace(arr0, sigma=sigma, mode="constant", cval=0.0) * sigma**2 #2014-04-29
a1 = dbz(matrix=arr1.real, name=a.name + "_" + testName + "_sigma" + str(sigma))
L.append({ 'sigma' : sigma,
'a1' : a1,
'abssum1': abs(a1.matrix).sum(),
'sum1' : a1.matrix.sum(),
})
print "abs sum", abs(a1.matrix.sum())
#a1.show()
#a2.show()
plt.close()
a1.histogram(display=False, outputPath=outputFolder+a1.name+"_histogram.png")
#pickle.dump(L, open(outputFolder+ a.name +'_test_results.pydump','w')) # no need to dump if test is easy
x = [v['sigma'] for v in L]
y1 = [v['abssum1'] for v in L]
plt.close()
plt.plot(x,y1)
plt.title(a1.name+ '\n absolute values against sigma')
plt.savefig(outputFolder+a1.name+"-spectrum-histogram.png")
plt.close()
# now update the mean
streamMeanUpdate = np.array([v['abssum1'] for v in L])
dbzCount += 1
streamMean = 1.* ((streamMean*(dbzCount -1)) + streamMeanUpdate ) / dbzCount
sigmas =[v['sigma'] for v in L]
print "Stream Count and Mean so far:", dbzCount, streamMean
# now save the mean and the plot
summaryText = '\n---------------------------------------\n'
summaryText += str(int(time.time())) + '\n'
summaryText += "dbzStream Name:" + ds.name + '\n'
summaryText += "dbzCount:\t" + str(dbzCount) + '\n'
summaryText +="sigma:\t\t" + str(sigmas) + '\n'
summaryText += "streamMean:\t" + str(streamMean.tolist()) +'\n'
print summaryText
print "saving..."
# release the memory
a.matrix = np.array([0])
summaryFile.write(summaryText)
plt.close()
plt.plot(sigmas, streamMean)
plt.title(ds.name + '- average laplacian-of-gaussian spectrum for ' +str(dbzCount) + ' DBZ patterns')
plt.savefig(outputFolder + ds.name + "_average_LoG_spectrum.png")
plt.close()
summaryFile.close()
|
yaukwankiu/armor
|
tests/modifiedMexicanHatTest3.py
|
Python
|
cc0-1.0
| 4,018
|
[
"Gaussian"
] |
7cd77cce8fcfafcae4c71004e781a762442d13b223b160976620f0f5cf9d4449
|
"""Session object for building, serializing, sending, and receiving messages in
IPython. The Session object supports serialization, HMAC signatures, and
metadata on messages.
Also defined here are utilities for working with Sessions:
* A SessionFactory to be used as a base class for configurables that work with
Sessions.
* A Message object for convenience that allows attribute-access to the msg dict.
Authors:
* Min RK
* Brian Granger
* Fernando Perez
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import hashlib
import hmac
import logging
import os
import pprint
import random
import uuid
from datetime import datetime
try:
import cPickle
pickle = cPickle
except:
cPickle = None
import pickle
try:
# We are using compare_digest to limit the surface of timing attacks
from hmac import compare_digest
except ImportError:
# Python < 2.7.7: When digests don't match no feedback is provided,
# limiting the surface of attack
def compare_digest(a,b): return a == b
import zmq
from zmq.utils import jsonapi
from zmq.eventloop.ioloop import IOLoop
from zmq.eventloop.zmqstream import ZMQStream
from IPython.config.configurable import Configurable, LoggingConfigurable
from IPython.utils import io
from IPython.utils.importstring import import_item
from IPython.utils.jsonutil import extract_dates, squash_dates, date_default
from IPython.utils.py3compat import (str_to_bytes, str_to_unicode, unicode_type,
iteritems)
from IPython.utils.traitlets import (CBytes, Unicode, Bool, Any, Instance, Set,
DottedObjectName, CUnicode, Dict, Integer,
TraitError,
)
from IPython.utils.pickleutil import PICKLE_PROTOCOL
from IPython.kernel.zmq.serialize import MAX_ITEMS, MAX_BYTES
#-----------------------------------------------------------------------------
# utility functions
#-----------------------------------------------------------------------------
def squash_unicode(obj):
"""coerce unicode back to bytestrings."""
if isinstance(obj,dict):
for key in obj.keys():
obj[key] = squash_unicode(obj[key])
if isinstance(key, unicode_type):
obj[squash_unicode(key)] = obj.pop(key)
elif isinstance(obj, list):
for i,v in enumerate(obj):
obj[i] = squash_unicode(v)
elif isinstance(obj, unicode_type):
obj = obj.encode('utf8')
return obj
#-----------------------------------------------------------------------------
# globals and defaults
#-----------------------------------------------------------------------------
# ISO8601-ify datetime objects
json_packer = lambda obj: jsonapi.dumps(obj, default=date_default)
json_unpacker = lambda s: jsonapi.loads(s)
pickle_packer = lambda o: pickle.dumps(squash_dates(o), PICKLE_PROTOCOL)
pickle_unpacker = pickle.loads
default_packer = json_packer
default_unpacker = json_unpacker
DELIM = b"<IDS|MSG>"
# singleton dummy tracker, which will always report as done
DONE = zmq.MessageTracker()
#-----------------------------------------------------------------------------
# Mixin tools for apps that use Sessions
#-----------------------------------------------------------------------------
session_aliases = dict(
ident = 'Session.session',
user = 'Session.username',
keyfile = 'Session.keyfile',
)
session_flags = {
'secure' : ({'Session' : { 'key' : str_to_bytes(str(uuid.uuid4())),
'keyfile' : '' }},
"""Use HMAC digests for authentication of messages.
Setting this flag will generate a new UUID to use as the HMAC key.
"""),
'no-secure' : ({'Session' : { 'key' : b'', 'keyfile' : '' }},
"""Don't authenticate messages."""),
}
def default_secure(cfg):
"""Set the default behavior for a config environment to be secure.
If Session.key/keyfile have not been set, set Session.key to
a new random UUID.
"""
if 'Session' in cfg:
if 'key' in cfg.Session or 'keyfile' in cfg.Session:
return
# key/keyfile not specified, generate new UUID:
cfg.Session.key = str_to_bytes(str(uuid.uuid4()))
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class SessionFactory(LoggingConfigurable):
"""The Base class for configurables that have a Session, Context, logger,
and IOLoop.
"""
logname = Unicode('')
def _logname_changed(self, name, old, new):
self.log = logging.getLogger(new)
# not configurable:
context = Instance('zmq.Context')
def _context_default(self):
return zmq.Context.instance()
session = Instance('IPython.kernel.zmq.session.Session')
loop = Instance('zmq.eventloop.ioloop.IOLoop', allow_none=False)
def _loop_default(self):
return IOLoop.instance()
def __init__(self, **kwargs):
super(SessionFactory, self).__init__(**kwargs)
if self.session is None:
# construct the session
self.session = Session(**kwargs)
class Message(object):
"""A simple message object that maps dict keys to attributes.
A Message can be created from a dict and a dict from a Message instance
simply by calling dict(msg_obj)."""
def __init__(self, msg_dict):
dct = self.__dict__
for k, v in iteritems(dict(msg_dict)):
if isinstance(v, dict):
v = Message(v)
dct[k] = v
# Having this iterator lets dict(msg_obj) work out of the box.
def __iter__(self):
return iter(iteritems(self.__dict__))
def __repr__(self):
return repr(self.__dict__)
def __str__(self):
return pprint.pformat(self.__dict__)
def __contains__(self, k):
return k in self.__dict__
def __getitem__(self, k):
return self.__dict__[k]
def msg_header(msg_id, msg_type, username, session):
date = datetime.now()
return locals()
def extract_header(msg_or_header):
"""Given a message or header, return the header."""
if not msg_or_header:
return {}
try:
# See if msg_or_header is the entire message.
h = msg_or_header['header']
except KeyError:
try:
# See if msg_or_header is just the header
h = msg_or_header['msg_id']
except KeyError:
raise
else:
h = msg_or_header
if not isinstance(h, dict):
h = dict(h)
return h
class Session(Configurable):
"""Object for handling serialization and sending of messages.
The Session object handles building messages and sending them
with ZMQ sockets or ZMQStream objects. Objects can communicate with each
other over the network via Session objects, and only need to work with the
dict-based IPython message spec. The Session will handle
serialization/deserialization, security, and metadata.
Sessions support configurable serialization via packer/unpacker traits,
and signing with HMAC digests via the key/keyfile traits.
Parameters
----------
debug : bool
whether to trigger extra debugging statements
packer/unpacker : str : 'json', 'pickle' or import_string
importstrings for methods to serialize message parts. If just
'json' or 'pickle', predefined JSON and pickle packers will be used.
Otherwise, the entire importstring must be used.
The functions must accept at least valid JSON input, and output *bytes*.
For example, to use msgpack:
packer = 'msgpack.packb', unpacker='msgpack.unpackb'
pack/unpack : callables
You can also set the pack/unpack callables for serialization directly.
session : bytes
the ID of this Session object. The default is to generate a new UUID.
username : unicode
username added to message headers. The default is to ask the OS.
key : bytes
The key used to initialize an HMAC signature. If unset, messages
will not be signed or checked.
keyfile : filepath
The file containing a key. If this is set, `key` will be initialized
to the contents of the file.
"""
debug=Bool(False, config=True, help="""Debug output in the Session""")
packer = DottedObjectName('json',config=True,
help="""The name of the packer for serializing messages.
Should be one of 'json', 'pickle', or an import name
for a custom callable serializer.""")
def _packer_changed(self, name, old, new):
if new.lower() == 'json':
self.pack = json_packer
self.unpack = json_unpacker
self.unpacker = new
elif new.lower() == 'pickle':
self.pack = pickle_packer
self.unpack = pickle_unpacker
self.unpacker = new
else:
self.pack = import_item(str(new))
unpacker = DottedObjectName('json', config=True,
help="""The name of the unpacker for unserializing messages.
Only used with custom functions for `packer`.""")
def _unpacker_changed(self, name, old, new):
if new.lower() == 'json':
self.pack = json_packer
self.unpack = json_unpacker
self.packer = new
elif new.lower() == 'pickle':
self.pack = pickle_packer
self.unpack = pickle_unpacker
self.packer = new
else:
self.unpack = import_item(str(new))
session = CUnicode(u'', config=True,
help="""The UUID identifying this session.""")
def _session_default(self):
u = unicode_type(uuid.uuid4())
self.bsession = u.encode('ascii')
return u
def _session_changed(self, name, old, new):
self.bsession = self.session.encode('ascii')
# bsession is the session as bytes
bsession = CBytes(b'')
username = Unicode(str_to_unicode(os.environ.get('USER', 'username')),
help="""Username for the Session. Default is your system username.""",
config=True)
metadata = Dict({}, config=True,
help="""Metadata dictionary, which serves as the default top-level metadata dict for each message.""")
# message signature related traits:
key = CBytes(b'', config=True,
help="""execution key, for extra authentication.""")
def _key_changed(self, name, old, new):
if new:
self.auth = hmac.HMAC(new, digestmod=self.digest_mod)
else:
self.auth = None
signature_scheme = Unicode('hmac-sha256', config=True,
help="""The digest scheme used to construct the message signatures.
Must have the form 'hmac-HASH'.""")
def _signature_scheme_changed(self, name, old, new):
if not new.startswith('hmac-'):
raise TraitError("signature_scheme must start with 'hmac-', got %r" % new)
hash_name = new.split('-', 1)[1]
try:
self.digest_mod = getattr(hashlib, hash_name)
except AttributeError:
raise TraitError("hashlib has no such attribute: %s" % hash_name)
digest_mod = Any()
def _digest_mod_default(self):
return hashlib.sha256
auth = Instance(hmac.HMAC)
digest_history = Set()
digest_history_size = Integer(2**16, config=True,
help="""The maximum number of digests to remember.
The digest history will be culled when it exceeds this value.
"""
)
keyfile = Unicode('', config=True,
help="""path to file containing execution key.""")
def _keyfile_changed(self, name, old, new):
with open(new, 'rb') as f:
self.key = f.read().strip()
# for protecting against sends from forks
pid = Integer()
# serialization traits:
pack = Any(default_packer) # the actual packer function
def _pack_changed(self, name, old, new):
if not callable(new):
raise TypeError("packer must be callable, not %s"%type(new))
unpack = Any(default_unpacker) # the actual packer function
def _unpack_changed(self, name, old, new):
# unpacker is not checked - it is assumed to be
if not callable(new):
raise TypeError("unpacker must be callable, not %s"%type(new))
# thresholds:
copy_threshold = Integer(2**16, config=True,
help="Threshold (in bytes) beyond which a buffer should be sent without copying.")
buffer_threshold = Integer(MAX_BYTES, config=True,
help="Threshold (in bytes) beyond which an object's buffer should be extracted to avoid pickling.")
item_threshold = Integer(MAX_ITEMS, config=True,
help="""The maximum number of items for a container to be introspected for custom serialization.
Containers larger than this are pickled outright.
"""
)
def __init__(self, **kwargs):
"""create a Session object
Parameters
----------
debug : bool
whether to trigger extra debugging statements
packer/unpacker : str : 'json', 'pickle' or import_string
importstrings for methods to serialize message parts. If just
'json' or 'pickle', predefined JSON and pickle packers will be used.
Otherwise, the entire importstring must be used.
The functions must accept at least valid JSON input, and output
*bytes*.
For example, to use msgpack:
packer = 'msgpack.packb', unpacker='msgpack.unpackb'
pack/unpack : callables
You can also set the pack/unpack callables for serialization
directly.
session : unicode (must be ascii)
the ID of this Session object. The default is to generate a new
UUID.
bsession : bytes
The session as bytes
username : unicode
username added to message headers. The default is to ask the OS.
key : bytes
The key used to initialize an HMAC signature. If unset, messages
will not be signed or checked.
signature_scheme : str
The message digest scheme. Currently must be of the form 'hmac-HASH',
where 'HASH' is a hashing function available in Python's hashlib.
The default is 'hmac-sha256'.
This is ignored if 'key' is empty.
keyfile : filepath
The file containing a key. If this is set, `key` will be
initialized to the contents of the file.
"""
super(Session, self).__init__(**kwargs)
self._check_packers()
self.none = self.pack({})
# ensure self._session_default() if necessary, so bsession is defined:
self.session
self.pid = os.getpid()
@property
def msg_id(self):
"""always return new uuid"""
return str(uuid.uuid4())
def _check_packers(self):
"""check packers for datetime support."""
pack = self.pack
unpack = self.unpack
# check simple serialization
msg = dict(a=[1,'hi'])
try:
packed = pack(msg)
except Exception as e:
msg = "packer '{packer}' could not serialize a simple message: {e}{jsonmsg}"
if self.packer == 'json':
jsonmsg = "\nzmq.utils.jsonapi.jsonmod = %s" % jsonapi.jsonmod
else:
jsonmsg = ""
raise ValueError(
msg.format(packer=self.packer, e=e, jsonmsg=jsonmsg)
)
# ensure packed message is bytes
if not isinstance(packed, bytes):
raise ValueError("message packed to %r, but bytes are required"%type(packed))
# check that unpack is pack's inverse
try:
unpacked = unpack(packed)
assert unpacked == msg
except Exception as e:
msg = "unpacker '{unpacker}' could not handle output from packer '{packer}': {e}{jsonmsg}"
if self.packer == 'json':
jsonmsg = "\nzmq.utils.jsonapi.jsonmod = %s" % jsonapi.jsonmod
else:
jsonmsg = ""
raise ValueError(
msg.format(packer=self.packer, unpacker=self.unpacker, e=e, jsonmsg=jsonmsg)
)
# check datetime support
msg = dict(t=datetime.now())
try:
unpacked = unpack(pack(msg))
if isinstance(unpacked['t'], datetime):
raise ValueError("Shouldn't deserialize to datetime")
except Exception:
self.pack = lambda o: pack(squash_dates(o))
self.unpack = lambda s: unpack(s)
def msg_header(self, msg_type):
return msg_header(self.msg_id, msg_type, self.username, self.session)
def msg(self, msg_type, content=None, parent=None, header=None, metadata=None):
"""Return the nested message dict.
This format is different from what is sent over the wire. The
serialize/unserialize methods converts this nested message dict to the wire
format, which is a list of message parts.
"""
msg = {}
header = self.msg_header(msg_type) if header is None else header
msg['header'] = header
msg['msg_id'] = header['msg_id']
msg['msg_type'] = header['msg_type']
msg['parent_header'] = {} if parent is None else extract_header(parent)
msg['content'] = {} if content is None else content
msg['metadata'] = self.metadata.copy()
if metadata is not None:
msg['metadata'].update(metadata)
return msg
def sign(self, msg_list):
"""Sign a message with HMAC digest. If no auth, return b''.
Parameters
----------
msg_list : list
The [p_header,p_parent,p_content] part of the message list.
"""
if self.auth is None:
return b''
h = self.auth.copy()
for m in msg_list:
h.update(m)
return str_to_bytes(h.hexdigest())
def serialize(self, msg, ident=None):
"""Serialize the message components to bytes.
This is roughly the inverse of unserialize. The serialize/unserialize
methods work with full message lists, whereas pack/unpack work with
the individual message parts in the message list.
Parameters
----------
msg : dict or Message
The next message dict as returned by the self.msg method.
Returns
-------
msg_list : list
The list of bytes objects to be sent with the format::
[ident1, ident2, ..., DELIM, HMAC, p_header, p_parent,
p_metadata, p_content, buffer1, buffer2, ...]
In this list, the ``p_*`` entities are the packed or serialized
versions, so if JSON is used, these are utf8 encoded JSON strings.
"""
content = msg.get('content', {})
if content is None:
content = self.none
elif isinstance(content, dict):
content = self.pack(content)
elif isinstance(content, bytes):
# content is already packed, as in a relayed message
pass
elif isinstance(content, unicode_type):
# should be bytes, but JSON often spits out unicode
content = content.encode('utf8')
else:
raise TypeError("Content incorrect type: %s"%type(content))
real_message = [self.pack(msg['header']),
self.pack(msg['parent_header']),
self.pack(msg['metadata']),
content,
]
to_send = []
if isinstance(ident, list):
# accept list of idents
to_send.extend(ident)
elif ident is not None:
to_send.append(ident)
to_send.append(DELIM)
signature = self.sign(real_message)
to_send.append(signature)
to_send.extend(real_message)
return to_send
def send(self, stream, msg_or_type, content=None, parent=None, ident=None,
buffers=None, track=False, header=None, metadata=None):
"""Build and send a message via stream or socket.
The message format used by this function internally is as follows:
[ident1,ident2,...,DELIM,HMAC,p_header,p_parent,p_content,
buffer1,buffer2,...]
The serialize/unserialize methods convert the nested message dict into this
format.
Parameters
----------
stream : zmq.Socket or ZMQStream
The socket-like object used to send the data.
msg_or_type : str or Message/dict
Normally, msg_or_type will be a msg_type unless a message is being
sent more than once. If a header is supplied, this can be set to
None and the msg_type will be pulled from the header.
content : dict or None
The content of the message (ignored if msg_or_type is a message).
header : dict or None
The header dict for the message (ignored if msg_to_type is a message).
parent : Message or dict or None
The parent or parent header describing the parent of this message
(ignored if msg_or_type is a message).
ident : bytes or list of bytes
The zmq.IDENTITY routing path.
metadata : dict or None
The metadata describing the message
buffers : list or None
The already-serialized buffers to be appended to the message.
track : bool
Whether to track. Only for use with Sockets, because ZMQStream
objects cannot track messages.
Returns
-------
msg : dict
The constructed message.
"""
if not isinstance(stream, zmq.Socket):
# ZMQStreams and dummy sockets do not support tracking.
track = False
if isinstance(msg_or_type, (Message, dict)):
# We got a Message or message dict, not a msg_type so don't
# build a new Message.
msg = msg_or_type
else:
msg = self.msg(msg_or_type, content=content, parent=parent,
header=header, metadata=metadata)
if not os.getpid() == self.pid:
io.rprint("WARNING: attempted to send message from fork")
io.rprint(msg)
return
buffers = [] if buffers is None else buffers
to_send = self.serialize(msg, ident)
to_send.extend(buffers)
longest = max([ len(s) for s in to_send ])
copy = (longest < self.copy_threshold)
if buffers and track and not copy:
# only really track when we are doing zero-copy buffers
tracker = stream.send_multipart(to_send, copy=False, track=True)
else:
# use dummy tracker, which will be done immediately
tracker = DONE
stream.send_multipart(to_send, copy=copy)
if self.debug:
pprint.pprint(msg)
pprint.pprint(to_send)
pprint.pprint(buffers)
msg['tracker'] = tracker
return msg
def send_raw(self, stream, msg_list, flags=0, copy=True, ident=None):
"""Send a raw message via ident path.
This method is used to send a already serialized message.
Parameters
----------
stream : ZMQStream or Socket
The ZMQ stream or socket to use for sending the message.
msg_list : list
The serialized list of messages to send. This only includes the
[p_header,p_parent,p_metadata,p_content,buffer1,buffer2,...] portion of
the message.
ident : ident or list
A single ident or a list of idents to use in sending.
"""
to_send = []
if isinstance(ident, bytes):
ident = [ident]
if ident is not None:
to_send.extend(ident)
to_send.append(DELIM)
to_send.append(self.sign(msg_list))
to_send.extend(msg_list)
stream.send_multipart(to_send, flags, copy=copy)
def recv(self, socket, mode=zmq.NOBLOCK, content=True, copy=True):
"""Receive and unpack a message.
Parameters
----------
socket : ZMQStream or Socket
The socket or stream to use in receiving.
Returns
-------
[idents], msg
[idents] is a list of idents and msg is a nested message dict of
same format as self.msg returns.
"""
if isinstance(socket, ZMQStream):
socket = socket.socket
try:
msg_list = socket.recv_multipart(mode, copy=copy)
except zmq.ZMQError as e:
if e.errno == zmq.EAGAIN:
# We can convert EAGAIN to None as we know in this case
# recv_multipart won't return None.
return None,None
else:
raise
# split multipart message into identity list and message dict
# invalid large messages can cause very expensive string comparisons
idents, msg_list = self.feed_identities(msg_list, copy)
try:
return idents, self.unserialize(msg_list, content=content, copy=copy)
except Exception as e:
# TODO: handle it
raise e
def feed_identities(self, msg_list, copy=True):
"""Split the identities from the rest of the message.
Feed until DELIM is reached, then return the prefix as idents and
remainder as msg_list. This is easily broken by setting an IDENT to DELIM,
but that would be silly.
Parameters
----------
msg_list : a list of Message or bytes objects
The message to be split.
copy : bool
flag determining whether the arguments are bytes or Messages
Returns
-------
(idents, msg_list) : two lists
idents will always be a list of bytes, each of which is a ZMQ
identity. msg_list will be a list of bytes or zmq.Messages of the
form [HMAC,p_header,p_parent,p_content,buffer1,buffer2,...] and
should be unpackable/unserializable via self.unserialize at this
point.
"""
if copy:
idx = msg_list.index(DELIM)
return msg_list[:idx], msg_list[idx+1:]
else:
failed = True
for idx,m in enumerate(msg_list):
if m.bytes == DELIM:
failed = False
break
if failed:
raise ValueError("DELIM not in msg_list")
idents, msg_list = msg_list[:idx], msg_list[idx+1:]
return [m.bytes for m in idents], msg_list
def _add_digest(self, signature):
"""add a digest to history to protect against replay attacks"""
if self.digest_history_size == 0:
# no history, never add digests
return
self.digest_history.add(signature)
if len(self.digest_history) > self.digest_history_size:
# threshold reached, cull 10%
self._cull_digest_history()
def _cull_digest_history(self):
"""cull the digest history
Removes a randomly selected 10% of the digest history
"""
current = len(self.digest_history)
n_to_cull = max(int(current // 10), current - self.digest_history_size)
if n_to_cull >= current:
self.digest_history = set()
return
to_cull = random.sample(self.digest_history, n_to_cull)
self.digest_history.difference_update(to_cull)
def unserialize(self, msg_list, content=True, copy=True):
"""Unserialize a msg_list to a nested message dict.
This is roughly the inverse of serialize. The serialize/unserialize
methods work with full message lists, whereas pack/unpack work with
the individual message parts in the message list.
Parameters
----------
msg_list : list of bytes or Message objects
The list of message parts of the form [HMAC,p_header,p_parent,
p_metadata,p_content,buffer1,buffer2,...].
content : bool (True)
Whether to unpack the content dict (True), or leave it packed
(False).
copy : bool (True)
Whether to return the bytes (True), or the non-copying Message
object in each place (False).
Returns
-------
msg : dict
The nested message dict with top-level keys [header, parent_header,
content, buffers].
"""
minlen = 5
message = {}
if not copy:
for i in range(minlen):
msg_list[i] = msg_list[i].bytes
if self.auth is not None:
signature = msg_list[0]
if not signature:
raise ValueError("Unsigned Message")
if signature in self.digest_history:
raise ValueError("Duplicate Signature: %r" % signature)
self._add_digest(signature)
check = self.sign(msg_list[1:5])
if not compare_digest(signature, check):
raise ValueError("Invalid Signature: %r" % signature)
if not len(msg_list) >= minlen:
raise TypeError("malformed message, must have at least %i elements"%minlen)
header = self.unpack(msg_list[1])
message['header'] = extract_dates(header)
message['msg_id'] = header['msg_id']
message['msg_type'] = header['msg_type']
message['parent_header'] = extract_dates(self.unpack(msg_list[2]))
message['metadata'] = self.unpack(msg_list[3])
if content:
message['content'] = self.unpack(msg_list[4])
else:
message['content'] = msg_list[4]
message['buffers'] = msg_list[5:]
return message
def test_msg2obj():
am = dict(x=1)
ao = Message(am)
assert ao.x == am['x']
am['y'] = dict(z=1)
ao = Message(am)
assert ao.y.z == am['y']['z']
k1, k2 = 'y', 'z'
assert ao[k1][k2] == am[k1][k2]
am2 = dict(ao)
assert am['x'] == am2['x']
assert am['y']['z'] == am2['y']['z']
|
WillisXChen/django-oscar
|
oscar/lib/python2.7/site-packages/IPython/kernel/zmq/session.py
|
Python
|
bsd-3-clause
| 30,948
|
[
"Brian"
] |
62af6776910b45820618002b4c0029f82cf8a202cbe1f074c9492fae69a547f0
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import pytest
from mock import MagicMock
import ansible.constants as C
from ansible.cli.galaxy import GalaxyCLI, SERVER_DEF
from ansible.galaxy.token import GalaxyToken, NoTokenSentinel
from ansible.module_utils._text import to_bytes, to_text
@pytest.fixture()
def b_token_file(request, tmp_path_factory):
b_test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Token'))
b_token_path = os.path.join(b_test_dir, b"token.yml")
token = getattr(request, 'param', None)
if token:
with open(b_token_path, 'wb') as token_fd:
token_fd.write(b"token: %s" % to_bytes(token))
orig_token_path = C.GALAXY_TOKEN_PATH
C.GALAXY_TOKEN_PATH = to_text(b_token_path)
try:
yield b_token_path
finally:
C.GALAXY_TOKEN_PATH = orig_token_path
def test_client_id(monkeypatch):
monkeypatch.setattr(C, 'GALAXY_SERVER_LIST', ['server1', 'server2'])
test_server_config = {option[0]: None for option in SERVER_DEF}
test_server_config.update(
{
'url': 'http://my_galaxy_ng:8000/api/automation-hub/',
'auth_url': 'http://my_keycloak:8080/auth/realms/myco/protocol/openid-connect/token',
'client_id': 'galaxy-ng',
'token': 'access_token',
}
)
test_server_default = {option[0]: None for option in SERVER_DEF}
test_server_default.update(
{
'url': 'https://cloud.redhat.com/api/automation-hub/',
'auth_url': 'https://sso.redhat.com/auth/realms/redhat-external/protocol/openid-connect/token',
'token': 'access_token',
}
)
get_plugin_options = MagicMock(side_effect=[test_server_config, test_server_default])
monkeypatch.setattr(C.config, 'get_plugin_options', get_plugin_options)
cli_args = [
'ansible-galaxy',
'collection',
'install',
'namespace.collection:1.0.0',
]
galaxy_cli = GalaxyCLI(args=cli_args)
mock_execute_install = MagicMock()
monkeypatch.setattr(galaxy_cli, '_execute_install_collection', mock_execute_install)
galaxy_cli.run()
assert galaxy_cli.api_servers[0].token.client_id == 'galaxy-ng'
assert galaxy_cli.api_servers[1].token.client_id == 'cloud-services'
def test_token_explicit(b_token_file):
assert GalaxyToken(token="explicit").get() == "explicit"
@pytest.mark.parametrize('b_token_file', ['file'], indirect=True)
def test_token_explicit_override_file(b_token_file):
assert GalaxyToken(token="explicit").get() == "explicit"
@pytest.mark.parametrize('b_token_file', ['file'], indirect=True)
def test_token_from_file(b_token_file):
assert GalaxyToken().get() == "file"
def test_token_from_file_missing(b_token_file):
assert GalaxyToken().get() is None
@pytest.mark.parametrize('b_token_file', ['file'], indirect=True)
def test_token_none(b_token_file):
assert GalaxyToken(token=NoTokenSentinel).get() is None
|
mattclay/ansible
|
test/units/galaxy/test_token.py
|
Python
|
gpl-3.0
| 3,218
|
[
"Galaxy"
] |
d8fa8acce1b937febd7aba36ab2c6beeedd93ec2966c3815b9cfc095ce6195b8
|
"""Optimise the cache."""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from __future__ import print_function
from zeroinstall import _, logger
import os, sys
def _already_linked(a, b):
ai = os.stat(a)
bi = os.stat(b)
return (ai.st_dev, ai.st_ino) == (bi.st_dev, bi.st_ino)
def _byte_identical(a, b):
with open(a, 'rb') as af:
with open(b, 'rb') as bf:
while True:
adata = af.read(100)
bdata = bf.read(100)
if adata != bdata:
return False
if not adata:
return True
def _link(a, b, tmpfile):
"""Keep 'a', delete 'b' and hard-link to 'a'"""
if not _byte_identical(a, b):
logger.warn(_("Files should be identical, but they're not!\n%(file_a)s\n%(file_b)s"), {'file_a': a, 'file_b': b})
b_dir = os.path.dirname(b)
old_mode = os.lstat(b_dir).st_mode
os.chmod(b_dir, old_mode | 0o200) # Need write access briefly
try:
os.link(a, tmpfile)
try:
os.rename(tmpfile, b)
except:
os.unlink(tmpfile)
raise
finally:
os.chmod(b_dir, old_mode)
def optimise(impl_dir):
"""Scan an implementation cache directory for duplicate files, and
hard-link any duplicates together to save space.
@param impl_dir: a $cache/0install.net/implementations directory
@type impl_dir: str
@return: (unique bytes, duplicated bytes, already linked, manifest size)
@rtype: (int, int, int, int)"""
first_copy = {} # TypeDigest -> Path
dup_size = uniq_size = already_linked = man_size = 0
import random
from zeroinstall.zerostore import BadDigest, parse_algorithm_digest_pair
for x in range(10):
tmpfile = os.path.join(impl_dir, 'optimise-%d' % random.randint(0, 1000000))
if not os.path.exists(tmpfile):
break
else:
raise Exception(_("Can't generate unused tempfile name!"))
dirs = os.listdir(impl_dir)
total = len(dirs)
msg = ""
def clear():
print("\r" + (" " * len(msg)) + "\r", end='')
for i, impl in enumerate(dirs):
clear()
msg = _("[%(done)d / %(total)d] Reading manifests...") % {'done': i, 'total': total}
print(msg, end='')
sys.stdout.flush()
try:
alg, manifest_digest = parse_algorithm_digest_pair(impl)
except BadDigest:
logger.warn(_("Skipping non-implementation '%s'"), impl)
continue
manifest_path = os.path.join(impl_dir, impl, '.manifest')
try:
ms = open(manifest_path, 'rt')
except OSError as ex:
logger.warn(_("Failed to read manifest file '%(manifest_path)s': %(exception)s"), {'manifest': manifest_path, 'exception': str(ex)})
continue
if alg == 'sha1': continue
man_size += os.path.getsize(manifest_path)
dir = ""
for line in ms:
if line[0] == 'D':
itype, path = line.split(' ', 1)
assert path.startswith('/')
dir = path[1:-1] # Strip slash and newline
continue
if line[0] == "S":
itype, digest, size, rest = line.split(' ', 3)
uniq_size += int(size)
continue
assert line[0] in "FX"
itype, digest, mtime, size, path = line.split(' ', 4)
path = path[:-1] # Strip newline
size = int(size)
key = (itype, digest, mtime, size)
loc_path = (impl, dir, path)
first_loc = first_copy.get(key, None)
if first_loc:
first_full = os.path.join(impl_dir, *first_loc)
new_full = os.path.join(impl_dir, *loc_path)
if _already_linked(first_full, new_full):
already_linked += size
else:
_link(first_full, new_full, tmpfile)
dup_size += size
else:
first_copy[key] = loc_path
uniq_size += size
ms.close()
clear()
return (uniq_size, dup_size, already_linked, man_size)
|
timdiels/0install
|
zeroinstall/zerostore/optimise.py
|
Python
|
lgpl-2.1
| 3,542
|
[
"VisIt"
] |
6561e59ddfacc37ba8988d8e57dd9ed6f59f9c7908f475525bec2c3bb31cae75
|
"""
This module contains logic for the home page of the web application, from which the user can visit any page of the
application.
"""
from pyramid.view import view_config
@view_config(route_name='home', renderer='homepage.mako')
def home_view(request):
"""
This function executes the logic for the home page, allowing the user to access Project Conway.
@param request The request sent to this page of the web application.
"""
return {'title': 'Home',
'page': 'homepage'}
|
CO600GOL/Game_of_life
|
ProjectConway/projectconway/views/home.py
|
Python
|
mit
| 507
|
[
"VisIt"
] |
f0c8cc304a2c90189e6dea539e36143302e271fd11f7e46c17d2366101c99654
|
import math
import cmath
#
#
def radians(degrees):
return math.pi * degrees / 180.0
def degrees(rad):
return 180.0 * rad / math.pi
def rotate_2d(theta, x, y):
"""Rotate point by theta"""
cangle = cmath.exp(theta * 1j)
cx = cangle * complex(x, y)
return cx.real, cx.imag
def angle(x, y):
"""return phase angle in radians"""
return cmath.phase(complex(x, y))
#
#
def distance_from_line(xy, line):
# see http://en.wikipedia.org/wiki/Distance_from_a_point_to_a_line#Line_defined_by_two_points
x0, y0 = xy
(x1, y1), (x2, y2) = line
a = y2 - y1
b = x2 - x1
a = (a * a) + (b * b)
q = math.sqrt(a)
if q == 0.0:
# no line at all!
return 100
a = x0 * (y2 - y1)
b = y0 * (x2 - x1)
a = a - b + (x2 * y1) - (y2 * x1)
return abs(a) / q
def distance(xy0, xy1):
x0, y0 = xy0
x1, y1 = xy1
dx = x0 - x1
dy = y0 - y1
return math.sqrt((dx*dx) + (dy*dy))
#
#
class Material:
def __init__(self, w, h, t):
self.width = w
self.height = h
self.thickness = t
#
#
class Config:
cut_colour = 3
draw_colour = 4
dotted_colour = 5
engrave_colour = 2
thick_colour = 6
thin_colour = 7
def __init__(self, **kwargs):
self.data = kwargs
def cut(self):
return self.cut_colour
#
#
class Extent:
def __init__(self):
self.mina = None
self.maxa = None
def add(self, a):
if self.mina is None:
self.mina = a
self.maxa = a
return
if a < self.mina:
self.mina = a
elif a > self.maxa:
self.maxa = a
def mid(self):
return (self.maxa + self.mina) / 2.0
def __repr__(self):
return "Extent(min=%f,max=%f)" % (self.mina, self.maxa)
#
#
class Polygon:
def __init__(self, xy=(0, 0), **kwargs):
self.points = []
self.arcs = []
self.origin = xy
self.kwargs = kwargs
def add(self, x, y):
self.points.append((x, y))
def add_arc(self, arc):
self.arcs.append(arc)
def add_poly(self, poly):
self.points += poly.points
self.arcs += poly.arcs
def copy(self):
poly = Polygon(self.origin)
poly.arcs = [ arc.copy() for arc in self.arcs ]
for point in self.points:
poly.add(*point)
poly.kwargs = self.kwargs
if hasattr(self, "info"):
a.info = self.info
return poly
def close(self):
self.points.append(self.points[0])
def lines(self):
if self.points:
x0, y0 = self.points[0]
for x, y in self.points[1:]:
line = (x0, y0), (x, y)
x0, y0 = x, y
yield line
def rotate(self, degrees):
points = []
rad = radians(degrees)
for x, y in self.points:
points.append(rotate_2d(rad, x, y))
self.points = points
for arc in self.arcs:
arc.rotate(degrees)
if self.origin:
self.origin = rotate_2d(rad, self.origin[0], self.origin[1])
def translate(self, dx, dy):
points = []
for x, y in self.points:
points.append((x + dx, y + dy))
self.points = points
for arc in self.arcs:
arc.translate(dx, dy)
if self.origin:
self.origin = self.origin[0] + dx, self.origin[1] + dy
def move(self, x, y):
self.translate(x - self.origin[0], y - self.origin[1])
def reflect_v(self):
points = []
for point in self.points:
points.append((-point[0], point[1]))
self.points = points
for arc in self.arcs:
arc.reflect_v()
def extent(self):
xx = Extent()
yy = Extent()
for x, y in self.points:
xx.add(x)
yy.add(y)
# TODO : needs extent of arcs too
return Rectangle((xx.mina, yy.mina), (xx.maxa, yy.maxa))
def centre(self):
xx, yy = Extent(), Extent()
for x, y in self.points:
xx.add(x)
yy.add(y)
return xx.mid(), yy.mid()
def draw(self, drawing, colour):
colour = self.kwargs.get("colour", colour)
for xy0, xy1 in self.lines():
drawing.line(xy0, xy1, color=colour)
for arc in self.arcs:
arc.draw(drawing, colour)
#
#
class Rectangle(Polygon):
def __init__(self, xy0, xy1, **kwargs):
x0, y0 = xy0
x1, y1 = xy1
Polygon.__init__(self, (x0, y0), **kwargs)
self.corner = x1, y1
self.add(x0, y0)
self.add(x1, y0)
self.add(x1, y1)
self.add(x0, y1)
self.close()
self.str = "Rectangle((%f,%f),(%f,%f))" % (x0, y0, x1, y1)
def corners(self):
return self.points[:-1]
def __repr__(self):
return self.str
#
#
def normalise_angle(x):
while x >= 360:
x -= 360
while x < 0:
x += 360
return x
class Arc:
def __init__(self, xy, radius, start_angle, end_angle, **kwargs):
self.x, self.y = xy
self.radius = radius
self.start_angle = start_angle
self.end_angle = end_angle
self.kwargs = kwargs
self.hole = kwargs.get("hole", False)
def is_circle(self):
a1, a2 = normalise_angle(self.start_angle), normalise_angle(self.end_angle)
return a1 == a2
def rotate(self, degrees):
rad = radians(degrees)
self.x, self.y = rotate_2d(rad, self.x, self.y)
# rotate start/end angles
# check for < 0, or > 360 condition
if self.is_circle():
return
def rot(a):
a += degrees
while a < 0:
a += 360
while a >= 360:
a -= 360
return a
self.start_angle = rot(self.start_angle)
self.end_angle = rot(self.end_angle)
def translate(self, dx, dy):
self.x += dx
self.y += dy
def move(self, x, y):
self.x = x
self.y = y
def reflect_v(self):
self.x = -self.x
if self.is_circle():
return
def reflect_angle(a):
if 0 <= a < 180:
return 180 - a
b = a - 180
while b < 0:
b += 360
return b
self.start_angle = reflect_angle(self.start_angle)
self.end_angle = reflect_angle(self.end_angle)
self.start_angle, self.end_angle = self.end_angle, self.start_angle
def reflect_h(self):
self.rotate(90)
self.reflect_v()
self.rotate(-90)
def copy(self):
a = Arc((self.x, self.y), self.radius, self.start_angle, self.end_angle)
a.kwargs = self.kwargs
a.hole = self.hole
return a
def draw(self, drawing, colour):
colour = self.kwargs.get("colour", colour)
if self.is_circle():
drawing.circle(radius=self.radius, center=(self.x, self.y), color=colour)
else:
drawing.arc(radius=self.radius, center=(self.x, self.y), startangle=self.start_angle, endangle=self.end_angle, color=colour)
def __repr__(self):
return "Arc(%s,%s,%s,%s,%s)" % (self.x, self.y, self.radius, self.start_angle, self.end_angle)
#
#
a = Arc((0, 0), 1, 90, 180)
a.rotate(90)
assert a.start_angle == 180.0
assert a.end_angle == 270.0
a.rotate(90)
assert a.start_angle == 270.0
assert a.end_angle == 0.0, a.end_angle
#
#
class Circle(Arc):
def __init__(self, xy, radius, **kwargs):
Arc.__init__(self, xy, radius, 0, 360, **kwargs)
self.hole = kwargs.get("hole", True)
#
#
class Collection:
def __init__(self, work=None, colour=None):
self.data = []
self.origin = None
self.arcs = []
self.colour = colour
if work:
self.add(work)
def add(self, obj):
self.data.append(obj)
def draw(self, drawing, colour=None):
for data in self.data:
data.draw(drawing, colour or self.colour)
def rotate(self, degrees):
for data in self.data:
data.rotate(degrees)
def translate(self, dx, dy):
for data in self.data:
data.translate(dx, dy)
def move(self, x, y):
for data in self.data:
data.move(x, y)
def reflect_v(self):
for data in self.data:
data.reflect_v()
def lines(self):
for data in self.data:
for line in data.lines():
yield line
def copy(self):
c = Collection()
for data in self.data:
c.add(data.copy())
return c
def extent(self):
xx = Extent()
yy = Extent()
for data in self.data:
r = data.extent()
xx.add(r.origin[0])
xx.add(r.corner[0])
yy.add(r.origin[1])
yy.add(r.corner[1])
return Rectangle((xx.mina, yy.mina), (xx.maxa, yy.maxa))
#
# Text
class Text:
def __init__(self, xy, text, **kwargs):
self.origin = xy
self.text = text
self.rot = 0
self.kwargs = kwargs
def translate(self, dx, dy):
self.origin = self.origin[0] + dx, self.origin[1] + dy
def rotate(self, degrees):
rad = radians(degrees)
self.origin = rotate_2d(rad, self.origin[0], self.origin[1])
self.rot += degrees
def draw(self, drawing, colour):
colour = self.kwargs.get("colour", colour)
drawing.text(self.text, insert=self.origin, rotation=self.rot, color=colour, **self.kwargs)
#
#
class TCut:
def __init__(self, w, d, shank, nut_w, nut_t, stress_hole=None):
self.w = w
self.d = d
self.shank = shank
self.nut_w = nut_w
self.nut_t = nut_t
self.stress_hole = stress_hole
def make_elev(self, xy, orient):
shape = Polygon()
width = self.w / 2.0
n_width = self.nut_w / 2.0
shape.add(-width, 0)
shape.add(-width, -self.shank)
shape.add(-n_width, -self.shank)
shape.add(-n_width, -(self.shank + self.nut_t))
shape.add(-width, -(self.shank + self.nut_t))
shape.add(-width, -self.d)
shape.add(width, -self.d)
shape.add(width, -(self.shank + self.nut_t))
shape.add(n_width, -(self.shank + self.nut_t))
shape.add(n_width, -self.shank)
shape.add(width, -self.shank)
shape.add(width, 0)
if self.stress_hole:
shape.add_arc(Circle((-n_width, -self.shank), self.stress_hole))
shape.add_arc(Circle((n_width, -self.shank), self.stress_hole))
shape.rotate(orient)
shape.translate(*xy)
shape.origin = xy
return shape
def make_plan(self, xy, orient):
shape = Polygon()
shape.add_arc(Circle((0, 0), self.w / 2.0))
shape.rotate(orient)
shape.translate(*xy)
shape.origin = xy
return shape
#
#
#
# Maths used for kerf calculations
def parallel(points, d, inner):
x0, y0 = points[0]
x1, y1 = points[1]
dx, dy = x1 - x0, y1 - y0
a = angle(dx, dy)
# vector at 90 degrees to line
if inner:
x, y = rotate_2d(a, 0, d)
else:
x, y = rotate_2d(a, 0, -d)
return (x0 + x, y0 + y), (x1 + x, y1 + y)
def vertical(xy0, xy1):
x0, _ = xy0
x1, _ = xy1
return x1 == x0
def equation_of_line(xy0, xy1):
x0, y0 = xy0
x1, y1 = xy1
dx, dy = x1 - x0, y1 - y0
m = (y1 - y0) / (x1 - x0)
c = y0 - (m * x0)
return m, c
def intersect_lines(e0, e1):
# given 2 equations of line
# calculate intersection point
m0, c0 = e0
m1, c1 = e1
x = (c0 - c1) / (m1 - m0)
y = (m0 * x) + c0
return x, y
def solve_for_x(x, xy):
m, b = equation_of_line(*xy)
y = (x * m) + b
return x, y
def intersect(xy0, xy1):
if vertical(*xy0):
# solve for x = x0
return solve_for_x(xy0[1][0], xy1)
elif vertical(*xy1):
# solve for x = x1
return solve_for_x(xy1[0][0], xy0)
else:
e0, e1 = equation_of_line(*xy0), equation_of_line(*xy1)
return intersect_lines(e0, e1)
def parallel_intersect(xy0, xy1, d, inner):
xy0 = parallel(xy0, d, inner)
xy1 = parallel(xy1, d, inner)
return intersect(xy0, xy1)
#
#
def remove_point(poly, xy, cuts):
found = False
# check first segment
if poly.points[0] == xy:
for cut in cuts:
if on_segment(cut, poly.points[:2]):
poly.points[0] = cut
found = True
# check last segment
if poly.points[-1] == xy:
for cut in cuts:
if on_segment(cut, poly.points[-2:]):
poly.points[-1] = cut
found = True
if found:
return poly
# need to split the polygon into two parts
polys = []
p = poly.copy()
p.points = []
for point in poly.points:
p.add(*point)
if point == xy:
polys.append(p)
p = Polygon()
p.add(*point)
polys.append(p)
c = Collection()
for p in polys:
c.add(remove_point(p, xy, cuts))
return c
#
#
def visit(c, fn):
if isinstance(c, Collection):
for d in c.data:
visit(d, fn)
else:
fn(c)
def has_point(poly, xy):
for point in poly.points:
if point == xy:
return True
return False
def make_unit_vector(xy1, xy2):
(x1, y1), (x2, y2) = xy1, xy2
v = complex(x2-x1, y2-y1)
v /= abs(v)
return v
def corner(shape, xy, radius, inside=False, tracker=None):
if not isinstance(shape, Collection):
c = Collection()
c.add(shape)
shape = c
class Visitor:
def __init__(self, parent):
self.parent = parent
self.cuts = None
def on_poly(self, p):
# find the polygon with the specified corner
if not isinstance(p, Polygon):
return
if not has_point(p, xy):
return
self.on_match(p)
def on_match(self, p):
# find the 2 line segments that make up the corner to curve
lines = [ None, None ]
for xy0, xy1 in p.lines():
if not ((xy0 == xy) or (xy1 == xy)):
continue
if xy0 == xy:
lines[1] = xy0, xy1
elif xy1 == xy:
lines[0] = xy0, xy1
else:
raise Exception("not found")
# arrange points as xy1, xy2, xy3, where xy2 is the corner to curve
assert lines[0][1] == lines[1][0]
data = lines[0][0], lines[0][1], lines[1][1]
# make unit vectors for the vertex
v1 = make_unit_vector(data[1], data[0])
v2 = make_unit_vector(data[1], data[2])
# generate the corner arc
p1, p2 = self.corner(v1, v2, complex(*data[1]))
# save the line segment cut points
self.cuts = [ (v.real, v.imag) for v in [ p1, p2 ] ]
def corner(self, v1, v2, xy):
s = v1 + v2 # vector at mid angle - centre of arc lies on this line
s /= abs(s) # unit vector
angle = cmath.phase(s) - cmath.phase(v1)
d = abs(radius / math.tan(angle)) # distance to start of arc from vertex
v1 *= d
v2 *= d
# distance along s vector to centre of arc
h = abs(complex(radius, d))
s *= h
v0 = s + xy # centre of arc
va = v1 - s # vectors to cut points
vb = v2 - s
# angles to cut points from arc centre
a0, a1 = [ degrees(cmath.phase(v)) for v in [ va, vb ] ]
# add the arc
if normalise_angle(a1 - a0) > 180:
a0, a1 = a1, a0
if inside:
a0, a1 = a1, a0
c = Arc((v0.real, v0.imag), radius, a0, a1)
self.parent.add(c)
# return end points of polygon
return v1 + xy, v2 + xy
arcs = Collection()
v = Visitor(arcs)
visit(shape, v.on_poly)
shape.add(arcs)
# make the cuts
def cut(c):
for i, d in enumerate(c.data):
if isinstance(d, Collection):
cut(d)
continue
if not isinstance(d, Polygon):
continue
if not has_point(d, xy):
continue
c.data[i] = remove_point(d, xy, v.cuts)
return c
cut(shape)
return shape
#
#
def replace(line, shape):
points = shape.points[:]
start, end = points[0], points[-1]
dstart = distance(line[0], start)
dend = distance(line[0], end)
if dend < dstart:
points.reverse()
start, end = end, start
poly = Polygon()
poly.add(*line[0])
for point in points:
poly.add(*point)
poly.add(*line[1])
return poly.lines()
#
#
def on_segment(xy, line, margin=0.01):
d = distance_from_line(xy, line)
if d > margin:
return False
(x0, y0), (x1, y1) = line
if x1 < x0:
x0, x1 = x1, x0
if y1 < y0:
y0, y1 = y1, y0
# are we on the segment?
def within(x, x0, x1):
if x0 > x1:
x0, x1 = x1, x0
return (x0-margin) <= x <= (x1+margin)
return within(xy[0], x0, x1) and within(xy[1], y0, y1)
def find_hit(parent, item):
for shape in parent.data:
if isinstance(shape, Polygon) or isinstance(shape, Rectangle):
for line in shape.lines():
if on_segment(item.origin, line):
return parent, shape
elif isinstance(shape, Collection):
p, shape = find_hit(shape, item)
if shape:
return p, shape
return parent, None
def change_shape(parent, old, new):
data = []
for d in parent.data:
if d == old:
data.append(new)
else:
data.append(d)
parent.data = data
def splice(parent, item):
p, src = find_hit(parent, item)
if src:
w = splice_inner(src, item)
change_shape(parent, src, w)
else:
print "no match found for", item
return parent
def splice_inner(src, item):
lines = []
arcs = []
for line in src.lines():
if on_segment(item.origin, line):
for subst in replace(line, item):
lines.append(subst)
arcs += item.arcs
else:
lines.append(line)
shape = Polygon(src.origin)
shape.add(*lines[0][0])
for line in lines:
shape.add(*line[1])
shape.arcs = src.arcs[:]
shape.arcs += [ arc.copy() for arc in arcs ]
return shape
#
#
def cutout(width, depth):
poly = Polygon()
width /= 2.0
poly.add(-width, 0)
poly.add(-width, depth)
poly.add(width, depth)
poly.add(width, 0)
poly.origin = 0, 0
return poly
#
#
def hinge(work, xy0, xy1, on, off, pitch):
c = Collection()
c.add(work)
def frange(a, b, step):
while a < b:
yield a
a += step
y0, y1 = xy0[1], xy1[1]
x0, x1 = xy0[0], xy1[0]
for x in frange(x0, x1, pitch*2):
y = y0
poly = Polygon()
poly.add(x, y)
y += (on+off) / 2.0 # for the first cut
poly.add(x, y)
y += off
c.add(poly)
while (y+on) < y1:
poly = Polygon()
poly.add(x, y)
y += on
poly.add(x, y)
y += off
c.add(poly)
poly = Polygon()
poly.add(x, y)
poly.add(x, y1)
c.add(poly)
for x in frange(x0+pitch, x1, pitch*2):
for y in frange(y0+off, y1, on+off):
poly = Polygon()
poly.add(x, y)
poly.add(x, min(y+on, y1-off))
c.add(poly)
return c
# FIN
|
DaveBerkeley/lasercut
|
laser/laser.py
|
Python
|
gpl-2.0
| 20,008
|
[
"VisIt"
] |
2bdc12dec0774aa969804d570076a699b29e33d988a1d93c3aeb8d84ab7553aa
|
from collections import deque
import time
import requests
# Constants
BRAZIL = 'br'
EUROPE_NORDIC_EAST = 'eune'
EUROPE_WEST = 'euw'
KOREA = 'kr'
LATIN_AMERICA_NORTH = 'lan'
LATIN_AMERICA_SOUTH = 'las'
NORTH_AMERICA = 'na'
OCEANIA = 'oce'
RUSSIA = 'ru'
TURKEY = 'tr'
# Platforms
platforms = {
BRAZIL: 'BR1',
EUROPE_NORDIC_EAST: 'EUN1',
EUROPE_WEST: 'EUW1',
KOREA: 'KR',
LATIN_AMERICA_NORTH: 'LA1',
LATIN_AMERICA_SOUTH: 'LA2',
NORTH_AMERICA: 'NA1',
OCEANIA: 'OC1',
RUSSIA: 'RU',
TURKEY: 'TR1'
}
queue_types = [
'CUSTOM', # Custom games
'NORMAL_5x5_BLIND', # Normal 5v5 blind pick
'BOT_5x5', # Historical Summoners Rift coop vs AI games
'BOT_5x5_INTRO', # Summoners Rift Intro bots
'BOT_5x5_BEGINNER', # Summoner's Rift Coop vs AI Beginner Bot games
'BOT_5x5_INTERMEDIATE', # Historical Summoner's Rift Coop vs AI Intermediate Bot games
'NORMAL_3x3', # Normal 3v3 games
'NORMAL_5x5_DRAFT', # Normal 5v5 Draft Pick games
'ODIN_5x5_BLIND', # Dominion 5v5 Blind Pick games
'ODIN_5x5_DRAFT', # Dominion 5v5 Draft Pick games
'BOT_ODIN_5x5', # Dominion Coop vs AI games
'RANKED_SOLO_5x5', # Ranked Solo 5v5 games
'RANKED_PREMADE_3x3', # Ranked Premade 3v3 games
'RANKED_PREMADE_5x5', # Ranked Premade 5v5 games
'RANKED_TEAM_3x3', # Ranked Team 3v3 games
'RANKED_TEAM_5x5', # Ranked Team 5v5 games
'BOT_TT_3x3', # Twisted Treeline Coop vs AI games
'GROUP_FINDER_5x5', # Team Builder games
'ARAM_5x5', # ARAM games
'ONEFORALL_5x5', # One for All games
'FIRSTBLOOD_1x1', # Snowdown Showdown 1v1 games
'FIRSTBLOOD_2x2', # Snowdown Showdown 2v2 games
'SR_6x6', # Hexakill games
'URF_5x5', # Ultra Rapid Fire games
'BOT_URF_5x5', # Ultra Rapid Fire games played against AI games
'NIGHTMARE_BOT_5x5_RANK1', # Doom Bots Rank 1 games
'NIGHTMARE_BOT_5x5_RANK2', # Doom Bots Rank 2 games
'NIGHTMARE_BOT_5x5_RANK5', # Doom Bots Rank 5 games
'ASCENSION_5x5', # Ascension games
'HEXAKILL', # 6v6 games on twisted treeline
'KING_PORO_5x5', # King Poro game games
'COUNTER_PICK', # Nemesis games,
'BILGEWATER_5x5', # Black Market Brawlers games
]
game_maps = [
{'map_id': 1, 'name': "Summoner's Rift", 'notes': "Summer Variant"},
{'map_id': 2, 'name': "Summoner's Rift", 'notes': "Autumn Variant"},
{'map_id': 3, 'name': "The Proving Grounds", 'notes': "Tutorial Map"},
{'map_id': 4, 'name': "Twisted Treeline", 'notes': "Original Version"},
{'map_id': 8, 'name': "The Crystal Scar", 'notes': "Dominion Map"},
{'map_id': 10, 'name': "Twisted Treeline", 'notes': "Current Version"},
{'map_id': 11, 'name': "Summoner's Rift", 'notes': "Current Version"},
{'map_id': 12, 'name': "Howling Abyss", 'notes': "ARAM Map"},
{'map_id': 14, 'name': "Butcher's Bridge", 'notes': "ARAM Map"},
]
game_modes = [
'CLASSIC', # Classic Summoner's Rift and Twisted Treeline games
'ODIN', # Dominion/Crystal Scar games
'ARAM', # ARAM games
'TUTORIAL', # Tutorial games
'ONEFORALL', # One for All games
'ASCENSION', # Ascension games
'FIRSTBLOOD', # Snowdown Showdown games
'KINGPORO', # King Poro games
]
game_types = [
'CUSTOM_GAME', # Custom games
'TUTORIAL_GAME', # Tutorial games
'MATCHED_GAME', # All other games
]
sub_types = [
'NONE', # Custom games
'NORMAL', # Summoner's Rift unranked games
'NORMAL_3x3', # Twisted Treeline unranked games
'ODIN_UNRANKED', # Dominion/Crystal Scar games
'ARAM_UNRANKED_5v5', # ARAM / Howling Abyss games
'BOT', # Summoner's Rift and Crystal Scar games played against AI
'BOT_3x3', # Twisted Treeline games played against AI
'RANKED_SOLO_5x5', # Summoner's Rift ranked solo queue games
'RANKED_TEAM_3x3', # Twisted Treeline ranked team games
'RANKED_TEAM_5x5', # Summoner's Rift ranked team games
'ONEFORALL_5x5', # One for All games
'FIRSTBLOOD_1x1', # Snowdown Showdown 1x1 games
'FIRSTBLOOD_2x2', # Snowdown Showdown 2x2 games
'SR_6x6', # Hexakill games
'CAP_5x5', # Team Builder games
'URF', # Ultra Rapid Fire games
'URF_BOT', # Ultra Rapid Fire games against AI
'NIGHTMARE_BOT', # Nightmare bots
'ASCENSION', # Ascension games
'HEXAKILL', # Twisted Treeline 6x6 Hexakill
'KING_PORO', # King Poro games
'COUNTER_PICK', # Nemesis games
'BILGEWATER', # Black Market Brawlers games
]
player_stat_summary_types = [
'Unranked', # Summoner's Rift unranked games
'Unranked3x3', # Twisted Treeline unranked games
'OdinUnranked', # Dominion/Crystal Scar games
'AramUnranked5x5', # ARAM / Howling Abyss games
'CoopVsAI', # Summoner's Rift and Crystal Scar games played against AI
'CoopVsAI3x3', # Twisted Treeline games played against AI
'RankedSolo5x5', # Summoner's Rift ranked solo queue games
'RankedTeams3x3', # Twisted Treeline ranked team games
'RankedTeams5x5', # Summoner's Rift ranked team games
'OneForAll5x5', # One for All games
'FirstBlood1x1', # Snowdown Showdown 1x1 games
'FirstBlood2x2', # Snowdown Showdown 2x2 games
'SummonersRift6x6', # Hexakill games
'CAP5x5', # Team Builder games
'URF', # Ultra Rapid Fire games
'URFBots', # Ultra Rapid Fire games played against AI
'NightmareBot', # Summoner's Rift games played against Nightmare AI
'Hexakill', # Twisted Treeline 6x6 Hexakill games
'KingPoro', # King Poro games
'CounterPick', # Nemesis games
'Bilgewater', # Black Market Brawlers games
]
solo_queue, ranked_5s, ranked_3s = 'RANKED_SOLO_5x5', 'RANKED_TEAM_5x5', 'RANKED_TEAM_3x3'
api_versions = {
'champion': 1.2,
'current-game': 1.0,
'featured-games': 1.0,
'game': 1.3,
'league': 2.5,
'lol-static-data': 1.2,
'lol-status': 1.0,
'match': 2.2,
'matchhistory': 2.2,
'matchlist': 2.2,
'stats': 1.3,
'summoner': 1.4,
'team': 2.4
}
class LoLException(Exception):
def __init__(self, error):
self.error = error
def __str__(self):
return self.error
error_400 = LoLException("Bad request")
error_401 = LoLException("Unauthorized")
error_404 = LoLException("Game data not found")
error_429 = LoLException("Too many requests")
error_500 = LoLException("Internal server error")
error_503 = LoLException("Service unavailable")
def raise_status(response):
if response.status_code == 400:
raise error_400
elif response.status_code == 401:
raise error_401
elif response.status_code == 404:
raise error_404
elif response.status_code == 429:
raise error_429
elif response.status_code == 500:
raise error_500
elif response.status_code == 503:
raise error_503
else:
response.raise_for_status()
class RateLimit:
def __init__(self, allowed_requests, seconds):
self.allowed_requests = allowed_requests
self.seconds = seconds
self.made_requests = deque()
def __reload(self):
t = time.time()
while len(self.made_requests) > 0 and self.made_requests[0] < t:
self.made_requests.popleft()
def add_request(self):
self.made_requests.append(time.time() + self.seconds)
def request_available(self):
self.__reload()
return len(self.made_requests) < self.allowed_requests
class RiotWatcher:
def __init__(self, key, default_region=NORTH_AMERICA, limits=(RateLimit(10, 10), RateLimit(500, 600), )):
self.key = key
self.default_region = default_region
self.limits = limits
def can_make_request(self):
for lim in self.limits:
if not lim.request_available():
return False
return True
def base_request(self, url, region, static=False, **kwargs):
if region is None:
region = self.default_region
args = {'api_key': self.key}
for k in kwargs:
if kwargs[k] is not None:
args[k] = kwargs[k]
r = requests.get(
'https://{proxy}.api.pvp.net/api/lol/{static}{region}/{url}'.format(
proxy='global' if static else region,
static='static-data/' if static else '',
region=region,
url=url
),
params=args
)
if not static:
for lim in self.limits:
lim.add_request()
raise_status(r)
return r.json()
def _observer_mode_request(self, url, proxy=None, **kwargs):
if proxy is None:
proxy = self.default_region
args = {'api_key': self.key}
for k in kwargs:
if kwargs[k] is not None:
args[k] = kwargs[k]
r = requests.get(
'https://{proxy}.api.pvp.net/observer-mode/rest/{url}'.format(
proxy=proxy,
url=url
),
params=args
)
for lim in self.limits:
lim.add_request()
raise_status(r)
return r.json()
@staticmethod
def sanitized_name(name):
return name.replace(' ', '').lower()
# champion-v1.2
def _champion_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/champion/{end_url}'.format(
version=api_versions['champion'],
end_url=end_url
),
region,
**kwargs
)
def get_all_champions(self, region=None, free_to_play=False):
return self._champion_request('', region, freeToPlay=free_to_play)
def get_champion(self, champion_id, region=None):
return self._champion_request('{id}'.format(id=champion_id), region)
# current-game-v1.0
def get_current_game(self, summoner_id, platform_id=None, region=None):
if platform_id is None:
platform_id = platforms[self.default_region]
return self._observer_mode_request(
'consumer/getSpectatorGameInfo/{platform}/{summoner_id}'.format(
platform=platform_id,
summoner_id=summoner_id
),
region
)
# featured-game-v1.0
def get_featured_games(self, proxy=None):
return self._observer_mode_request('featured', proxy)
# game-v1.3
def _game_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/game/{end_url}'.format(
version=api_versions['game'],
end_url=end_url
),
region,
**kwargs
)
def get_recent_games(self, summoner_id, region=None):
return self._game_request('by-summoner/{summoner_id}/recent'.format(summoner_id=summoner_id), region)
# league-v2.5
def _league_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/league/{end_url}'.format(
version=api_versions['league'],
end_url=end_url
),
region,
**kwargs
)
def get_league(self, summoner_ids=None, team_ids=None, region=None):
"""summoner_ids and team_ids arguments must be iterable, only one should be specified, not both"""
if (summoner_ids is None) != (team_ids is None):
if summoner_ids is not None:
return self._league_request(
'by-summoner/{summoner_ids}'.format(summoner_ids=','.join([str(s) for s in summoner_ids])),
region
)
else:
return self._league_request(
'by-team/{team_ids}'.format(team_ids=','.join([str(t) for t in team_ids])),
region
)
def get_league_entry(self, summoner_ids=None, team_ids=None, region=None):
"""summoner_ids and team_ids arguments must be iterable, only one should be specified, not both"""
if (summoner_ids is None) != (team_ids is None):
if summoner_ids is not None:
return self._league_request(
'by-summoner/{summoner_ids}/entry'.format(
summoner_ids=','.join([str(s) for s in summoner_ids])
),
region
)
else:
return self._league_request(
'by-team/{team_ids}/entry'.format(team_ids=','.join([str(t) for t in team_ids])),
region
)
def get_challenger(self, region=None, queue=solo_queue):
return self._league_request('challenger', region, type=queue)
def get_master(self, region=None, queue=solo_queue):
return self._league_request('master', region, type=queue)
# lol-static-data-v1.2
def _static_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/{end_url}'.format(
version=api_versions['lol-static-data'],
end_url=end_url
),
region,
static=True,
**kwargs
)
def static_get_champion_list(self, region=None, locale=None, version=None, data_by_id=None, champ_data=None):
return self._static_request(
'champion',
region,
locale=locale,
version=version,
dataById=data_by_id,
champData=champ_data
)
def static_get_champion(self, champ_id, region=None, locale=None, version=None, champ_data=None):
return self._static_request(
'champion/{id}'.format(id=champ_id),
region,
locale=locale,
version=version,
champData=champ_data
)
def static_get_item_list(self, region=None, locale=None, version=None, item_list_data=None):
return self._static_request('item', region, locale=locale, version=version, itemListData=item_list_data)
def static_get_item(self, item_id, region=None, locale=None, version=None, item_data=None):
return self._static_request(
'item/{id}'.format(id=item_id),
region,
locale=locale,
version=version,
itemData=item_data
)
def static_get_mastery_list(self, region=None, locale=None, version=None, mastery_list_data=None):
return self._static_request(
'mastery',
region,
locale=locale,
version=version,
masteryListData=mastery_list_data
)
def static_get_mastery(self, mastery_id, region=None, locale=None, version=None, mastery_data=None):
return self._static_request(
'mastery/{id}'.format(id=mastery_id),
region,
locale=locale,
version=version,
masteryData=mastery_data
)
def static_get_realm(self, region=None):
return self._static_request('realm', region)
def static_get_rune_list(self, region=None, locale=None, version=None, rune_list_data=None):
return self._static_request('rune', region, locale=locale, version=version, runeListData=rune_list_data)
def static_get_rune(self, rune_id, region=None, locale=None, version=None, rune_data=None):
return self._static_request(
'rune/{id}'.format(id=rune_id),
region,
locale=locale,
version=version,
runeData=rune_data
)
def static_get_summoner_spell_list(self, region=None, locale=None, version=None, data_by_id=None, spell_data=None):
return self._static_request(
'summoner-spell',
region,
locale=locale,
version=version,
dataById=data_by_id,
spellData=spell_data
)
def static_get_summoner_spell(self, spell_id, region=None, locale=None, version=None, spell_data=None):
return self._static_request(
'summoner-spell/{id}'.format(id=spell_id),
region,
locale=locale,
version=version,
spellData=spell_data
)
def static_get_versions(self, region=None):
return self._static_request('versions', region)
# match-v2.2
def _match_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/match/{end_url}'.format(
version=api_versions['match'],
end_url=end_url
),
region,
**kwargs
)
def get_match(self, match_id, region=None, include_timeline=False):
return self._match_request(
'{match_id}'.format(match_id=match_id),
region,
includeTimeline=include_timeline
)
# lol-status-v1.0
@staticmethod
def get_server_status(region=None):
if region is None:
url = 'shards'
else:
url = 'shards/{region}'.format(region=region)
r = requests.get('http://status.leagueoflegends.com/{url}'.format(url=url))
raise_status(r)
return r.json()
# match history-v2.2
def _match_history_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/matchhistory/{end_url}'.format(
version=api_versions['matchhistory'],
end_url=end_url
),
region,
**kwargs
)
def get_match_history(self, summoner_id, region=None, champion_ids=None, ranked_queues=None, begin_index=None,
end_index=None):
return self._match_history_request(
'{summoner_id}'.format(summoner_id=summoner_id),
region,
championIds=champion_ids,
rankedQueues=ranked_queues,
beginIndex=begin_index,
endIndex=end_index
)
# match list-v2.2
def _match_list_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/matchlist/by-summoner/{end_url}'.format(
version=api_versions['matchlist'],
end_url=end_url,
),
region,
**kwargs
)
def get_match_list(self, summoner_id, region=None, champion_ids=None, ranked_queues=None, seasons=None,
begin_time=None, end_time=None, begin_index=None, end_index=None):
return self._match_list_request(
'{summoner_id}'.format(summoner_id=summoner_id),
region,
championsIds=champion_ids,
rankedQueues=ranked_queues,
seasons=seasons,
beginTime=begin_time,
endTime=end_time,
beginIndex=begin_index,
endIndex=end_index
)
# stats-v1.3
def _stats_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/stats/{end_url}'.format(
version=api_versions['stats'],
end_url=end_url
),
region,
**kwargs
)
def get_stat_summary(self, summoner_id, region=None, season=None):
return self._stats_request(
'by-summoner/{summoner_id}/summary'.format(summoner_id=summoner_id),
region,
season='SEASON{}'.format(season) if season is not None else None)
def get_ranked_stats(self, summoner_id, region=None, season=None):
return self._stats_request(
'by-summoner/{summoner_id}/ranked'.format(summoner_id=summoner_id),
region,
season='SEASON{}'.format(season) if season is not None else None
)
# summoner-v1.4
def _summoner_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/summoner/{end_url}'.format(
version=api_versions['summoner'],
end_url=end_url
),
region,
**kwargs
)
def get_mastery_pages(self, summoner_ids, region=None):
return self._summoner_request(
'{summoner_ids}/masteries'.format(summoner_ids=','.join([str(s) for s in summoner_ids])),
region
)
def get_rune_pages(self, summoner_ids, region=None):
return self._summoner_request(
'{summoner_ids}/runes'.format(summoner_ids=','.join([str(s) for s in summoner_ids])),
region
)
def get_summoners(self, names=None, ids=None, region=None):
if (names is None) != (ids is None):
return self._summoner_request(
'by-name/{summoner_names}'.format(
summoner_names=','.join([self.sanitized_name(n) for n in names])) if names is not None
else '{summoner_ids}'.format(summoner_ids=','.join([str(i) for i in ids])),
region
)
else:
return None
def get_summoner(self, name=None, _id=None, region=None):
if (name is None) != (_id is None):
if name is not None:
name = self.sanitized_name(name)
return self.get_summoners(names=[name, ], region=region)[name]
else:
return self.get_summoners(ids=[_id, ], region=region)[str(_id)]
return None
def get_summoner_name(self, summoner_ids, region=None):
return self._summoner_request(
'{summoner_ids}/name'.format(summoner_ids=','.join([str(s) for s in summoner_ids])),
region
)
# team-v2.4
def _team_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/team/{end_url}'.format(
version=api_versions['team'],
end_url=end_url
),
region,
**kwargs
)
def get_teams_for_summoner(self, summoner_id, region=None):
return self.get_teams_for_summoners([summoner_id, ], region=region)[str(summoner_id)]
def get_teams_for_summoners(self, summoner_ids, region=None):
return self._team_request(
'by-summoner/{summoner_id}'.format(summoner_id=','.join([str(s) for s in summoner_ids])),
region
)
def get_team(self, team_id, region=None):
return self.get_teams([team_id, ], region=region)[str(team_id)]
def get_teams(self, team_ids, region=None):
return self._team_request('{team_ids}'.format(team_ids=','.join(str(t) for t in team_ids)), region)
|
Neil511/Riot-Watcher
|
riotwatcher/riotwatcher.py
|
Python
|
mit
| 22,597
|
[
"CRYSTAL"
] |
5e78953f5cd5fcea3a7440293c01a6deec425980a9d45e812c5aa64232fd839a
|
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
One repository to update them all
On mbed.org the mbed SDK is split up in multiple repositories, this script takes
care of updating them all.
"""
import sys
from copy import copy
from os import walk, remove, makedirs
from os.path import join, abspath, dirname, relpath, exists, isfile
from shutil import copyfile
from optparse import OptionParser
import re
import string
ROOT = abspath(join(dirname(__file__), ".."))
sys.path.insert(0, ROOT)
from tools.settings import MBED_ORG_PATH, MBED_ORG_USER, BUILD_DIR
from tools.paths import *
from tools.utils import run_cmd
MBED_URL = "mbed.org"
MBED_USER = "mbed_official"
changed = []
push_remote = True
quiet = False
commit_msg = ''
# Code that does have a mirror in the mbed SDK
# Tuple data: (repo_name, list_of_code_dirs, [team])
# team is optional - if not specified, the code is published under mbed_official
OFFICIAL_CODE = (
("mbed-dev" , [MBED_DRIVERS, MBED_PLATFORM, MBED_HAL]),
("mbed-rtos", RTOS),
("mbed-dsp" , DSP),
("mbed-rpc" , MBED_RPC),
("lwip" , LWIP_SOURCES+"/lwip"),
("lwip-sys", LWIP_SOURCES+"/lwip-sys"),
("Socket" , LWIP_SOURCES+"/Socket"),
("lwip-eth" , ETH_SOURCES+"/lwip-eth"),
("EthernetInterface", ETH_SOURCES+"/EthernetInterface"),
("USBDevice", USB),
("USBHost" , USB_HOST),
("CellularModem", CELLULAR_SOURCES),
("CellularUSBModem", CELLULAR_USB_SOURCES),
("UbloxUSBModem", UBLOX_SOURCES),
("UbloxModemHTTPClientTest", [TEST_DIR+"/net/cellular/http/common", TEST_DIR+"/net/cellular/http/ubloxusb"]),
("UbloxModemSMSTest", [TEST_DIR+"/net/cellular/sms/common", TEST_DIR+"/net/cellular/sms/ubloxusb"]),
("FATFileSystem", FAT_FS, "mbed-official"),
)
# Code that does have dependencies to libraries should point to
# the latest revision. By default, they point to a specific revision.
CODE_WITH_DEPENDENCIES = (
# Libraries
"EthernetInterface",
# RTOS Examples
"rtos_basic",
"rtos_isr",
"rtos_mail",
"rtos_mutex",
"rtos_queue",
"rtos_semaphore",
"rtos_signals",
"rtos_timer",
# Net Examples
"TCPEchoClient",
"TCPEchoServer",
"TCPSocket_HelloWorld",
"UDPSocket_HelloWorld",
"UDPEchoClient",
"UDPEchoServer",
"BroadcastReceive",
"BroadcastSend",
# mbed sources
"mbed-src-program",
)
# A list of regular expressions that will be checked against each directory
# name and skipped if they match.
IGNORE_DIRS = (
)
IGNORE_FILES = (
'COPYING',
'\.md',
"\.lib",
"\.bld"
)
def ignore_path(name, reg_exps):
for r in reg_exps:
if re.search(r, name):
return True
return False
class MbedRepository:
@staticmethod
def run_and_print(command, cwd):
stdout, _, _ = run_cmd(command, work_dir=cwd, redirect=True)
print(stdout)
def __init__(self, name, team = None):
self.name = name
self.path = join(MBED_ORG_PATH, name)
if team is None:
self.url = "http://" + MBED_URL + "/users/" + MBED_USER + "/code/%s/"
else:
self.url = "http://" + MBED_URL + "/teams/" + team + "/code/%s/"
if not exists(self.path):
# Checkout code
if not exists(MBED_ORG_PATH):
makedirs(MBED_ORG_PATH)
self.run_and_print(['hg', 'clone', self.url % name], cwd=MBED_ORG_PATH)
else:
# Update
self.run_and_print(['hg', 'pull'], cwd=self.path)
self.run_and_print(['hg', 'update'], cwd=self.path)
def publish(self):
# The maintainer has to evaluate the changes first and explicitly accept them
self.run_and_print(['hg', 'addremove'], cwd=self.path)
stdout, _, _ = run_cmd(['hg', 'status'], work_dir=self.path)
if stdout == '':
print "No changes"
return False
print stdout
if quiet:
commit = 'Y'
else:
commit = raw_input(push_remote and "Do you want to commit and push? Y/N: " or "Do you want to commit? Y/N: ")
if commit == 'Y':
args = ['hg', 'commit', '-u', MBED_ORG_USER]
if commit_msg:
args = args + ['-m', commit_msg]
self.run_and_print(args, cwd=self.path)
if push_remote:
self.run_and_print(['hg', 'push'], cwd=self.path)
return True
# Check if a file is a text file or a binary file
# Taken from http://code.activestate.com/recipes/173220/
text_characters = "".join(map(chr, range(32, 127)) + list("\n\r\t\b"))
_null_trans = string.maketrans("", "")
def is_text_file(filename):
block_size = 1024
def istext(s):
if "\0" in s:
return 0
if not s: # Empty files are considered text
return 1
# Get the non-text characters (maps a character to itself then
# use the 'remove' option to get rid of the text characters.)
t = s.translate(_null_trans, text_characters)
# If more than 30% non-text characters, then
# this is considered a binary file
if float(len(t))/len(s) > 0.30:
return 0
return 1
with open(filename) as f:
res = istext(f.read(block_size))
return res
# Return the line ending type for the given file ('cr' or 'crlf')
def get_line_endings(f):
examine_size = 1024
try:
tf = open(f, "rb")
lines, ncrlf = tf.readlines(examine_size), 0
tf.close()
for l in lines:
if l.endswith("\r\n"):
ncrlf = ncrlf + 1
return 'crlf' if ncrlf > len(lines) >> 1 else 'cr'
except:
return 'cr'
# Copy file to destination, but preserve destination line endings if possible
# This prevents very annoying issues with huge diffs that appear because of
# differences in line endings
def copy_with_line_endings(sdk_file, repo_file):
if not isfile(repo_file):
copyfile(sdk_file, repo_file)
return
is_text = is_text_file(repo_file)
if is_text:
sdk_le = get_line_endings(sdk_file)
repo_le = get_line_endings(repo_file)
if not is_text or sdk_le == repo_le:
copyfile(sdk_file, repo_file)
else:
print "Converting line endings in '%s' to '%s'" % (abspath(repo_file), repo_le)
f = open(sdk_file, "rb")
data = f.read()
f.close()
f = open(repo_file, "wb")
data = data.replace("\r\n", "\n") if repo_le == 'cr' else data.replace('\n','\r\n')
f.write(data)
f.close()
def visit_files(path, visit):
for root, dirs, files in walk(path):
# Ignore hidden directories
for d in copy(dirs):
full = join(root, d)
if d.startswith('.'):
dirs.remove(d)
if ignore_path(full, IGNORE_DIRS):
print "Skipping '%s'" % full
dirs.remove(d)
for file in files:
if ignore_path(file, IGNORE_FILES):
continue
visit(join(root, file))
def update_repo(repo_name, sdk_paths, team_name):
repo = MbedRepository(repo_name, team_name)
# copy files from mbed SDK to mbed_official repository
def visit_mbed_sdk(sdk_file):
repo_file = join(repo.path, relpath(sdk_file, sdk_path))
repo_dir = dirname(repo_file)
if not exists(repo_dir):
makedirs(repo_dir)
copy_with_line_endings(sdk_file, repo_file)
for sdk_path in sdk_paths:
visit_files(sdk_path, visit_mbed_sdk)
# remove repository files that do not exist in the mbed SDK
def visit_repo(repo_file):
for sdk_path in sdk_paths:
sdk_file = join(sdk_path, relpath(repo_file, repo.path))
if exists(sdk_file):
break
else:
remove(repo_file)
print "remove: %s" % repo_file
visit_files(repo.path, visit_repo)
if repo.publish():
changed.append(repo_name)
def update_code(repositories):
for r in repositories:
repo_name, sdk_dir = r[0], r[1]
team_name = r[2] if len(r) == 3 else None
print '\n=== Updating "%s" ===' % repo_name
sdk_dirs = [sdk_dir] if type(sdk_dir) != type([]) else sdk_dir
update_repo(repo_name, sdk_dirs, team_name)
def update_single_repo(repo):
repos = [r for r in OFFICIAL_CODE if r[0] == repo]
if not repos:
print "Repository '%s' not found" % repo
else:
update_code(repos)
def update_dependencies(repositories):
for repo_name in repositories:
print '\n=== Updating "%s" ===' % repo_name
repo = MbedRepository(repo_name)
# point to the latest libraries
def visit_repo(repo_file):
with open(repo_file, "r") as f:
url = f.read()
with open(repo_file, "w") as f:
f.write(url[:(url.rindex('/')+1)])
visit_files(repo.path, visit_repo, None, MBED_REPO_EXT)
if repo.publish():
changed.append(repo_name)
def update_mbed():
update_repo("mbed", [join(BUILD_DIR, "mbed")], None)
def do_sync(options):
global push_remote, quiet, commit_msg, changed
push_remote = not options.nopush
quiet = options.quiet
commit_msg = options.msg
chnaged = []
if options.code:
update_code(OFFICIAL_CODE)
if options.dependencies:
update_dependencies(CODE_WITH_DEPENDENCIES)
if options.mbed:
update_mbed()
if options.repo:
update_single_repo(options.repo)
if changed:
print "Repositories with changes:", changed
return changed
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-c", "--code",
action="store_true", default=False,
help="Update the mbed_official code")
parser.add_option("-d", "--dependencies",
action="store_true", default=False,
help="Update the mbed_official code dependencies")
parser.add_option("-m", "--mbed",
action="store_true", default=False,
help="Release a build of the mbed library")
parser.add_option("-n", "--nopush",
action="store_true", default=False,
help="Commit the changes locally only, don't push them")
parser.add_option("", "--commit_message",
action="store", type="string", default='', dest='msg',
help="Commit message to use for all the commits")
parser.add_option("-r", "--repository",
action="store", type="string", default='', dest='repo',
help="Synchronize only the given repository")
parser.add_option("-q", "--quiet",
action="store_true", default=False,
help="Don't ask for confirmation before commiting or pushing")
(options, args) = parser.parse_args()
do_sync(options)
|
maximmbed/mbed
|
tools/synch.py
|
Python
|
apache-2.0
| 11,465
|
[
"VisIt"
] |
0debb82ea2367ae1b2beddc8dc15eaad23f5ebc1ad390944c4d421651632cb54
|
"""
Output conversion for Gaussian
==============================
This module contains conversion utilities that is solely written for the
Gaussian computational chemistry program.
.. autosummary::
:toctree:
gauout2PESyaml
"""
import collections
import re
from collections import abc
import itertools
import numpy as np
try:
from yaml import CDumper as Dumper
except ImportError:
from yaml import Dumper
from yaml import dump, YAMLError
#
# The drive function
# ------------------
#
def gauout2PESyaml(gauout_name, yaml_name,
energy_patt=r'^ SCF Done[^=]+=(?P<energy>[^A]+)A\.U',
ref_energy=0.0, symbs=None, mols=None, add_info=None):
"""Converts a Gaussian output file to a PES YAML file
The atomic coordinates will be stored in the field ``atm_coords`` in input
orientation in units of Angstrom. The SCF energy will be stored as
``static_energy`` in units of eV. The forces will be stored in
``atm_foces`` in the unit of eV/Angstrom.
The atomic symbols and molecules will also be stored in ``atm_symbs`` and
``mols`` according to user input.
:param str gauout_name: The name of the Gaussian output file.
:param str yaml_name: The name of the YAML file to be written.
:param str energy_patt: The pattern that can be used to grab the raw energy
in Hartree. The energy needs to be in the named group ``energy`` and
the last line matching the pattern with search will be used. Default to
the SCF energy.
:param float ref_energy: The reference energy to be subtracted from the raw
energy, in Hartree.
:param symbs: The symbols for the atoms in the output. By default the
element symbol for the atomic numbers will be used. Or it can be given
as a callable which will be called with the atomic index number and the
default symbol to return the actual symbol of the atoms. An iterable
can be given directly as well.
:param mols: An iterable for the atomic indices of the molecules in the
system. Elements in the iterable can be another iterable to give the
actual indices of the atoms, or an integral number to show that the
next n atoms will be a molecule. By default there is going to be just
one molecule.
:param dict add_info: The dictionary of additional information to add.
:raises ValueError: if the input has got problems.
:raises IOError: if something is wrong with the files.
:returns: 0 for success.
"""
# Parse the Gaussian output.
parse_res = _parse_gauout(gauout_name, energy_patt)
# The result dictionary.
res = {}
# The coordinates.
res['atm_coords'] = parse_res.atm_coords.tolist()
# The energy.
res['static_energy'] = (
parse_res.static_energy - ref_energy
) * _HARTREE2EV
# The forces.
res['atm_forces'] = (
parse_res.atm_forces * _HARTREE_P_BOHR2EV_P_ANGS
).tolist()
atm_numbs = parse_res.atm_numbs
# The symbols.
res['atm_symbs'] = _gen_symbs(atm_numbs, symbs)
# The molecules.
res['mols'] = _gen_mols(atm_numbs, mols)
if add_info is not None:
res.update(add_info)
# Dump to the YAML file.
_dump2yaml(yaml_name, res)
return 0
#
# Some unit conversion constants
# ------------------------------
#
_HARTREE2EV = 27.21139
_HARTREE_P_BOHR2EV_P_ANGS = 51.42207
#
# Gaussian output parsing
# -----------------------
#
ParseRes = collections.namedtuple(
'ParseRes', [
'atm_coords',
'static_energy',
'atm_forces',
'atm_numbs',
]
)
def _parse_gauout(gauout_name, energy_patt):
"""Parses the given Gaussian output file
The results will be put in a named tuple. All units are *not* converted.
And tensor properties like coordinates and forces will be in numpy arrays.
:param str gauout_name: The name of the Gaussian output file to parse.
:param str energy_patt: The energy pattern to grab the energy.
:returns: The parse result.
"""
# Open and read the file.
try:
with open(gauout_name, 'r') as gauout:
lines = gauout.readlines()
except IOError:
raise
# Get the energy, the easiest one.
compiled_energy_patt = re.compile(energy_patt)
static_energy = None
for line in lines:
res = compiled_energy_patt.search(line)
if res is None:
continue
else:
static_energy = float(res.group('energy'))
continue
if static_energy is None:
raise ValueError(
'Energy failed to be read from {}'.format(gauout_name)
)
# Get the coordinates and the atomic numbers.
coords_lines = _get_lines_under_title(
lines, r'^ +Input orientation: *$', r'^ *\d'
)
atm_numbs = []
atm_coords = []
for line in coords_lines:
fields = line.split()
atm_numbs.append(
int(fields[1])
)
atm_coords.append(
[float(i) for i in fields[3:6]]
)
continue
atm_coords = np.array(atm_coords)
# Get the forces.
forces_lines = _get_lines_under_title(
lines, r'^ +\*+ +Axes restored to original set +\*+ *$', r'^ *\d'
)
atm_forces = []
for line in forces_lines:
fields = line.split()
atm_forces.append(
[float(i) for i in fields[2:5]]
)
continue
atm_forces = np.array(atm_forces)
return ParseRes(
atm_coords=atm_coords, static_energy=static_energy,
atm_forces=atm_forces, atm_numbs=atm_numbs,
)
def _get_lines_under_title(lines, title_patt, content_patt):
"""Gets the lines under a title
If multiple titles are found, only the lines in the last section will be
returned.
:param lines: A sequence of lines.
:param title_patt: The pattern for the title.
:param content_patt: The pattern for the content lines.
:raises ValueError: If the title cannot be found.
:returns: The content lines following the title.
"""
# Compile the given patterns
compiled_title_patt = re.compile(title_patt)
compiled_content_patt = re.compile(content_patt)
# Find the location of the title.
title_loc = None
for idx, line in enumerate(lines):
if compiled_title_patt.search(line) is not None:
title_loc = idx
continue
else:
continue
if title_loc is None:
raise ValueError(
'The given title {} failed to be found'.format(title_patt)
)
# Gather the content lines following the title.
content_lines = []
started = False
for line in lines[title_loc:]:
if compiled_content_patt.search(line) is None:
if started:
break
else:
continue
else:
content_lines.append(line)
if not started:
started = True
return content_lines
#
# Symbols and molecules generation
# --------------------------------
#
def _gen_symbs(atm_numbs, symbs):
"""Generates the atomic symbols
By default, the element symbols will be used. If iterable is given its
content will be directly used. If callable is given, it will be called with
atomic index and default symbol to get the actual symbol.
"""
if isinstance(symbs, abc.Iterable):
symbs = list(symbs)
if len(symbs) != len(atm_numbs):
raise ValueError(
'The given symbols does not match the number of atoms!'
)
else:
default_symbs = [
_ELEMENT_SYMBS[i] for i in atm_numbs
]
if symbs is None:
symbs = default_symbs
else:
symbs = [
symbs(idx, default_symb)
for idx, default_symb in enumerate(default_symbs)
]
return symbs
def _gen_mols(atm_numbs, mols):
"""Generates the nested molecules list"""
if mols is None:
return [i for i, _ in enumerate(atm_numbs)]
else:
ret_val = []
# Get the molecules list.
curr_atm = 0
for i in mols:
if isinstance(i, int):
ret_val.append(
list(range(curr_atm, curr_atm + i))
)
curr_atm += i
else:
ret_val.append(
list(i)
)
curr_atm = max(i)
continue
# Check the correctness.
for i, j in itertools.zip_longest(
range(0, len(atm_numbs)),
sorted(itertools.chain.from_iterable(ret_val))
):
if i != j:
raise ValueError(
'Incorrect molecule specification, atom {} not correctly '
'given!'.format(i)
)
continue
return ret_val
_ELEMENT_SYMBS = {
1: "H",
2: "He",
3: "Li",
4: "Be",
5: "B",
6: "C",
7: "N",
8: "O",
9: "F",
10: "Ne",
11: "Na",
12: "Mg",
13: "Al",
14: "Si",
15: "P",
16: "S",
17: "Cl",
18: "Ar",
19: "K",
20: "Ca",
21: "Sc",
22: "Ti",
23: "V",
24: "Cr",
25: "Mn",
26: "Fe",
27: "Co",
28: "Ni",
29: "Cu",
30: "Zn",
31: "Ga",
32: "Ge",
33: "As",
34: "Se",
35: "Br",
36: "Kr",
37: "Rb",
38: "Sr",
39: "Y",
40: "Zr",
41: "Nb",
42: "Mo",
43: "Tc",
44: "Ru",
45: "Rh",
46: "Pd",
47: "Ag",
48: "Cd",
49: "In",
50: "Sn",
51: "Sb",
52: "Te",
53: "I",
54: "Xe",
55: "Cs",
56: "Ba",
57: "La",
58: "Ce",
59: "Pr",
60: "Nd",
61: "Pm",
62: "Sm",
63: "Eu",
64: "Gd",
65: "Tb",
66: "Dy",
67: "Ho",
68: "Er",
69: "Tm",
70: "Yb",
71: "Lu",
72: "Hf",
73: "Ta",
74: "W",
75: "Re",
76: "Os",
77: "Ir",
78: "Pt",
79: "Au",
80: "Hg",
81: "Tl",
82: "Pb",
83: "Bi",
84: "Po",
85: "At",
86: "Rn",
87: "Fr",
88: "Ra",
89: "Ac",
90: "Th",
91: "Pa",
92: "U",
93: "Np",
94: "Pu",
95: "Am",
96: "Cm",
97: "Bk",
98: "Cf",
99: "Es",
}
#
# Output generation
# -----------------
#
def _dump2yaml(yaml_name, content):
"""Dumps the content dictionary into a YAML file with the given name"""
try:
with open(yaml_name, 'w') as yaml_file:
dump(content, stream=yaml_file, Dumper=Dumper)
except IOError:
raise IOError(
'Invalid output file {}'.format(yaml_name)
)
except YAMLError:
raise ValueError(
'Invalid data to be dumped by YAML:\n{!r}'.format(content)
)
|
tschijnmo/FFOMP
|
FFOMP/ccutils/gau2yaml.py
|
Python
|
mit
| 10,855
|
[
"Gaussian"
] |
c3468c8c4d0c0649bb457f49c68d116534c6abb0bfaae90c17995ab19f153e70
|
# Autodetecting setup.py script for building the Python extensions
#
import sys, os, importlib.machinery, re, optparse
from glob import glob
import importlib._bootstrap
import importlib.util
import sysconfig
from distutils import log
from distutils.errors import *
from distutils.core import Extension, setup
from distutils.command.build_ext import build_ext
from distutils.command.install import install
from distutils.command.install_lib import install_lib
from distutils.command.build_scripts import build_scripts
from distutils.spawn import find_executable
cross_compiling = "_PYTHON_HOST_PLATFORM" in os.environ
# Add special CFLAGS reserved for building the interpreter and the stdlib
# modules (Issue #21121).
cflags = sysconfig.get_config_var('CFLAGS')
py_cflags_nodist = sysconfig.get_config_var('PY_CFLAGS_NODIST')
sysconfig.get_config_vars()['CFLAGS'] = cflags + ' ' + py_cflags_nodist
class Dummy:
"""Hack for parallel build"""
ProcessPoolExecutor = None
sys.modules['concurrent.futures.process'] = Dummy
def get_platform():
# cross build
if "_PYTHON_HOST_PLATFORM" in os.environ:
return os.environ["_PYTHON_HOST_PLATFORM"]
# Get value of sys.platform
if sys.platform.startswith('osf1'):
return 'osf1'
return sys.platform
host_platform = get_platform()
# Were we compiled --with-pydebug or with #define Py_DEBUG?
COMPILED_WITH_PYDEBUG = ('--with-pydebug' in sysconfig.get_config_var("CONFIG_ARGS"))
# This global variable is used to hold the list of modules to be disabled.
disabled_module_list = []
def add_dir_to_list(dirlist, dir):
"""Add the directory 'dir' to the list 'dirlist' (after any relative
directories) if:
1) 'dir' is not already in 'dirlist'
2) 'dir' actually exists, and is a directory.
"""
if dir is None or not os.path.isdir(dir) or dir in dirlist:
return
for i, path in enumerate(dirlist):
if not os.path.isabs(path):
dirlist.insert(i + 1, dir)
return
dirlist.insert(0, dir)
def macosx_sdk_root():
"""
Return the directory of the current OSX SDK,
or '/' if no SDK was specified.
"""
cflags = sysconfig.get_config_var('CFLAGS')
m = re.search(r'-isysroot\s+(\S+)', cflags)
if m is None:
sysroot = '/'
else:
sysroot = m.group(1)
return sysroot
def is_macosx_sdk_path(path):
"""
Returns True if 'path' can be located in an OSX SDK
"""
return ( (path.startswith('/usr/') and not path.startswith('/usr/local'))
or path.startswith('/System/')
or path.startswith('/Library/') )
def find_file(filename, std_dirs, paths):
"""Searches for the directory where a given file is located,
and returns a possibly-empty list of additional directories, or None
if the file couldn't be found at all.
'filename' is the name of a file, such as readline.h or libcrypto.a.
'std_dirs' is the list of standard system directories; if the
file is found in one of them, no additional directives are needed.
'paths' is a list of additional locations to check; if the file is
found in one of them, the resulting list will contain the directory.
"""
if host_platform == 'darwin':
# Honor the MacOSX SDK setting when one was specified.
# An SDK is a directory with the same structure as a real
# system, but with only header files and libraries.
sysroot = macosx_sdk_root()
# Check the standard locations
for dir in std_dirs:
f = os.path.join(dir, filename)
if host_platform == 'darwin' and is_macosx_sdk_path(dir):
f = os.path.join(sysroot, dir[1:], filename)
if os.path.exists(f): return []
# Check the additional directories
for dir in paths:
f = os.path.join(dir, filename)
if host_platform == 'darwin' and is_macosx_sdk_path(dir):
f = os.path.join(sysroot, dir[1:], filename)
if os.path.exists(f):
return [dir]
# Not found anywhere
return None
def find_library_file(compiler, libname, std_dirs, paths):
result = compiler.find_library_file(std_dirs + paths, libname)
if result is None:
return None
if host_platform == 'darwin':
sysroot = macosx_sdk_root()
# Check whether the found file is in one of the standard directories
dirname = os.path.dirname(result)
for p in std_dirs:
# Ensure path doesn't end with path separator
p = p.rstrip(os.sep)
if host_platform == 'darwin' and is_macosx_sdk_path(p):
# Note that, as of Xcode 7, Apple SDKs may contain textual stub
# libraries with .tbd extensions rather than the normal .dylib
# shared libraries installed in /. The Apple compiler tool
# chain handles this transparently but it can cause problems
# for programs that are being built with an SDK and searching
# for specific libraries. Distutils find_library_file() now
# knows to also search for and return .tbd files. But callers
# of find_library_file need to keep in mind that the base filename
# of the returned SDK library file might have a different extension
# from that of the library file installed on the running system,
# for example:
# /Applications/Xcode.app/Contents/Developer/Platforms/
# MacOSX.platform/Developer/SDKs/MacOSX10.11.sdk/
# usr/lib/libedit.tbd
# vs
# /usr/lib/libedit.dylib
if os.path.join(sysroot, p[1:]) == dirname:
return [ ]
if p == dirname:
return [ ]
# Otherwise, it must have been in one of the additional directories,
# so we have to figure out which one.
for p in paths:
# Ensure path doesn't end with path separator
p = p.rstrip(os.sep)
if host_platform == 'darwin' and is_macosx_sdk_path(p):
if os.path.join(sysroot, p[1:]) == dirname:
return [ p ]
if p == dirname:
return [p]
else:
assert False, "Internal error: Path not found in std_dirs or paths"
def module_enabled(extlist, modname):
"""Returns whether the module 'modname' is present in the list
of extensions 'extlist'."""
extlist = [ext for ext in extlist if ext.name == modname]
return len(extlist)
def find_module_file(module, dirlist):
"""Find a module in a set of possible folders. If it is not found
return the unadorned filename"""
list = find_file(module, [], dirlist)
if not list:
return module
if len(list) > 1:
log.info("WARNING: multiple copies of %s found", module)
return os.path.join(list[0], module)
class PyBuildExt(build_ext):
def __init__(self, dist):
build_ext.__init__(self, dist)
self.failed = []
self.failed_on_import = []
if '-j' in os.environ.get('MAKEFLAGS', ''):
self.parallel = True
def build_extensions(self):
# Detect which modules should be compiled
missing = self.detect_modules()
# Remove modules that are present on the disabled list
extensions = [ext for ext in self.extensions
if ext.name not in disabled_module_list]
# move ctypes to the end, it depends on other modules
ext_map = dict((ext.name, i) for i, ext in enumerate(extensions))
if "_ctypes" in ext_map:
ctypes = extensions.pop(ext_map["_ctypes"])
extensions.append(ctypes)
self.extensions = extensions
# Fix up the autodetected modules, prefixing all the source files
# with Modules/.
srcdir = sysconfig.get_config_var('srcdir')
if not srcdir:
# Maybe running on Windows but not using CYGWIN?
raise ValueError("No source directory; cannot proceed.")
srcdir = os.path.abspath(srcdir)
moddirlist = [os.path.join(srcdir, 'Modules')]
# Fix up the paths for scripts, too
self.distribution.scripts = [os.path.join(srcdir, filename)
for filename in self.distribution.scripts]
# Python header files
headers = [sysconfig.get_config_h_filename()]
headers += glob(os.path.join(sysconfig.get_path('include'), "*.h"))
# The sysconfig variable built by makesetup, listing the already
# built modules as configured by the Setup files.
modnames = sysconfig.get_config_var('MODNAMES').split()
removed_modules = []
for ext in self.extensions:
ext.sources = [ find_module_file(filename, moddirlist)
for filename in ext.sources ]
if ext.depends is not None:
ext.depends = [find_module_file(filename, moddirlist)
for filename in ext.depends]
else:
ext.depends = []
# re-compile extensions if a header file has been changed
ext.depends.extend(headers)
# If a module has already been built by the Makefile,
# don't build it here.
if ext.name in modnames:
removed_modules.append(ext)
if removed_modules:
self.extensions = [x for x in self.extensions if x not in
removed_modules]
# When you run "make CC=altcc" or something similar, you really want
# those environment variables passed into the setup.py phase. Here's
# a small set of useful ones.
compiler = os.environ.get('CC')
args = {}
# unfortunately, distutils doesn't let us provide separate C and C++
# compilers
if compiler is not None:
(ccshared,cflags) = sysconfig.get_config_vars('CCSHARED','CFLAGS')
args['compiler_so'] = compiler + ' ' + ccshared + ' ' + cflags
self.compiler.set_executables(**args)
build_ext.build_extensions(self)
for ext in self.extensions:
self.check_extension_import(ext)
longest = max([len(e.name) for e in self.extensions], default=0)
if self.failed or self.failed_on_import:
all_failed = self.failed + self.failed_on_import
longest = max(longest, max([len(name) for name in all_failed]))
def print_three_column(lst):
lst.sort(key=str.lower)
# guarantee zip() doesn't drop anything
while len(lst) % 3:
lst.append("")
for e, f, g in zip(lst[::3], lst[1::3], lst[2::3]):
print("%-*s %-*s %-*s" % (longest, e, longest, f,
longest, g))
if missing:
print()
print("Python build finished successfully!")
print("The necessary bits to build these optional modules were not "
"found:")
print_three_column(missing)
print("To find the necessary bits, look in setup.py in"
" detect_modules() for the module's name.")
print()
if removed_modules:
print("The following modules found by detect_modules() in"
" setup.py, have been")
print("built by the Makefile instead, as configured by the"
" Setup files:")
print_three_column([ext.name for ext in removed_modules])
if self.failed:
failed = self.failed[:]
print()
print("Failed to build these modules:")
print_three_column(failed)
print()
if self.failed_on_import:
failed = self.failed_on_import[:]
print()
print("Following modules built successfully"
" but were removed because they could not be imported:")
print_three_column(failed)
print()
def build_extension(self, ext):
if ext.name == '_ctypes':
if not self.configure_ctypes(ext):
return
try:
build_ext.build_extension(self, ext)
except (CCompilerError, DistutilsError) as why:
self.announce('WARNING: building of extension "%s" failed: %s' %
(ext.name, sys.exc_info()[1]))
self.failed.append(ext.name)
return
def check_extension_import(self, ext):
# Don't try to import an extension that has failed to compile
if ext.name in self.failed:
self.announce(
'WARNING: skipping import check for failed build "%s"' %
ext.name, level=1)
return
# Workaround for Mac OS X: The Carbon-based modules cannot be
# reliably imported into a command-line Python
if 'Carbon' in ext.extra_link_args:
self.announce(
'WARNING: skipping import check for Carbon-based "%s"' %
ext.name)
return
if host_platform == 'darwin' and (
sys.maxsize > 2**32 and '-arch' in ext.extra_link_args):
# Don't bother doing an import check when an extension was
# build with an explicit '-arch' flag on OSX. That's currently
# only used to build 32-bit only extensions in a 4-way
# universal build and loading 32-bit code into a 64-bit
# process will fail.
self.announce(
'WARNING: skipping import check for "%s"' %
ext.name)
return
# Workaround for Cygwin: Cygwin currently has fork issues when many
# modules have been imported
if host_platform == 'cygwin':
self.announce('WARNING: skipping import check for Cygwin-based "%s"'
% ext.name)
return
ext_filename = os.path.join(
self.build_lib,
self.get_ext_filename(self.get_ext_fullname(ext.name)))
# If the build directory didn't exist when setup.py was
# started, sys.path_importer_cache has a negative result
# cached. Clear that cache before trying to import.
sys.path_importer_cache.clear()
# Don't try to load extensions for cross builds
if cross_compiling:
return
loader = importlib.machinery.ExtensionFileLoader(ext.name, ext_filename)
spec = importlib.util.spec_from_file_location(ext.name, ext_filename,
loader=loader)
try:
importlib._bootstrap._load(spec)
except ImportError as why:
self.failed_on_import.append(ext.name)
self.announce('*** WARNING: renaming "%s" since importing it'
' failed: %s' % (ext.name, why), level=3)
assert not self.inplace
basename, tail = os.path.splitext(ext_filename)
newname = basename + "_failed" + tail
if os.path.exists(newname):
os.remove(newname)
os.rename(ext_filename, newname)
except:
exc_type, why, tb = sys.exc_info()
self.announce('*** WARNING: importing extension "%s" '
'failed with %s: %s' % (ext.name, exc_type, why),
level=3)
self.failed.append(ext.name)
def add_multiarch_paths(self):
# Debian/Ubuntu multiarch support.
# https://wiki.ubuntu.com/MultiarchSpec
cc = sysconfig.get_config_var('CC')
tmpfile = os.path.join(self.build_temp, 'multiarch')
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
ret = os.system(
'%s -print-multiarch > %s 2> /dev/null' % (cc, tmpfile))
multiarch_path_component = ''
try:
if ret >> 8 == 0:
with open(tmpfile) as fp:
multiarch_path_component = fp.readline().strip()
finally:
os.unlink(tmpfile)
if multiarch_path_component != '':
add_dir_to_list(self.compiler.library_dirs,
'/usr/lib/' + multiarch_path_component)
add_dir_to_list(self.compiler.include_dirs,
'/usr/include/' + multiarch_path_component)
return
if not find_executable('dpkg-architecture'):
return
opt = ''
if cross_compiling:
opt = '-t' + sysconfig.get_config_var('HOST_GNU_TYPE')
tmpfile = os.path.join(self.build_temp, 'multiarch')
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
ret = os.system(
'dpkg-architecture %s -qDEB_HOST_MULTIARCH > %s 2> /dev/null' %
(opt, tmpfile))
try:
if ret >> 8 == 0:
with open(tmpfile) as fp:
multiarch_path_component = fp.readline().strip()
add_dir_to_list(self.compiler.library_dirs,
'/usr/lib/' + multiarch_path_component)
add_dir_to_list(self.compiler.include_dirs,
'/usr/include/' + multiarch_path_component)
finally:
os.unlink(tmpfile)
def add_gcc_paths(self):
gcc = sysconfig.get_config_var('CC')
tmpfile = os.path.join(self.build_temp, 'gccpaths')
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
ret = os.system('%s -E -v - </dev/null 2>%s 1>/dev/null' % (gcc, tmpfile))
is_gcc = False
in_incdirs = False
inc_dirs = []
lib_dirs = []
try:
if ret >> 8 == 0:
with open(tmpfile) as fp:
for line in fp.readlines():
if line.startswith("gcc version"):
is_gcc = True
elif line.startswith("#include <...>"):
in_incdirs = True
elif line.startswith("End of search list"):
in_incdirs = False
elif is_gcc and line.startswith("LIBRARY_PATH"):
for d in line.strip().split("=")[1].split(":"):
d = os.path.normpath(d)
if '/gcc/' not in d:
add_dir_to_list(self.compiler.library_dirs,
d)
elif is_gcc and in_incdirs and '/gcc/' not in line:
add_dir_to_list(self.compiler.include_dirs,
line.strip())
finally:
os.unlink(tmpfile)
def detect_math_libs(self):
# Check for MacOS X, which doesn't need libm.a at all
if host_platform == 'darwin':
return []
else:
return ['m']
def detect_modules(self):
# Ensure that /usr/local is always used, but the local build
# directories (i.e. '.' and 'Include') must be first. See issue
# 10520.
if not cross_compiling:
add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib')
add_dir_to_list(self.compiler.include_dirs, '/usr/local/include')
# only change this for cross builds for 3.3, issues on Mageia
if cross_compiling:
self.add_gcc_paths()
self.add_multiarch_paths()
# Add paths specified in the environment variables LDFLAGS and
# CPPFLAGS for header and library files.
# We must get the values from the Makefile and not the environment
# directly since an inconsistently reproducible issue comes up where
# the environment variable is not set even though the value were passed
# into configure and stored in the Makefile (issue found on OS X 10.3).
for env_var, arg_name, dir_list in (
('LDFLAGS', '-R', self.compiler.runtime_library_dirs),
('LDFLAGS', '-L', self.compiler.library_dirs),
('CPPFLAGS', '-I', self.compiler.include_dirs)):
env_val = sysconfig.get_config_var(env_var)
if env_val:
# To prevent optparse from raising an exception about any
# options in env_val that it doesn't know about we strip out
# all double dashes and any dashes followed by a character
# that is not for the option we are dealing with.
#
# Please note that order of the regex is important! We must
# strip out double-dashes first so that we don't end up with
# substituting "--Long" to "-Long" and thus lead to "ong" being
# used for a library directory.
env_val = re.sub(r'(^|\s+)-(-|(?!%s))' % arg_name[1],
' ', env_val)
parser = optparse.OptionParser()
# Make sure that allowing args interspersed with options is
# allowed
parser.allow_interspersed_args = True
parser.error = lambda msg: None
parser.add_option(arg_name, dest="dirs", action="append")
options = parser.parse_args(env_val.split())[0]
if options.dirs:
for directory in reversed(options.dirs):
add_dir_to_list(dir_list, directory)
if os.path.normpath(sys.base_prefix) != '/usr' \
and not sysconfig.get_config_var('PYTHONFRAMEWORK'):
# OSX note: Don't add LIBDIR and INCLUDEDIR to building a framework
# (PYTHONFRAMEWORK is set) to avoid # linking problems when
# building a framework with different architectures than
# the one that is currently installed (issue #7473)
add_dir_to_list(self.compiler.library_dirs,
sysconfig.get_config_var("LIBDIR"))
add_dir_to_list(self.compiler.include_dirs,
sysconfig.get_config_var("INCLUDEDIR"))
# lib_dirs and inc_dirs are used to search for files;
# if a file is found in one of those directories, it can
# be assumed that no additional -I,-L directives are needed.
if not cross_compiling:
lib_dirs = self.compiler.library_dirs + [
'/lib64', '/usr/lib64',
'/lib', '/usr/lib',
]
inc_dirs = self.compiler.include_dirs + ['/usr/include']
else:
lib_dirs = self.compiler.library_dirs[:]
inc_dirs = self.compiler.include_dirs[:]
exts = []
missing = []
config_h = sysconfig.get_config_h_filename()
with open(config_h) as file:
config_h_vars = sysconfig.parse_config_h(file)
srcdir = sysconfig.get_config_var('srcdir')
# OSF/1 and Unixware have some stuff in /usr/ccs/lib (like -ldb)
if host_platform in ['osf1', 'unixware7', 'openunix8']:
lib_dirs += ['/usr/ccs/lib']
# HP-UX11iv3 keeps files in lib/hpux folders.
if host_platform == 'hp-ux11':
lib_dirs += ['/usr/lib/hpux64', '/usr/lib/hpux32']
if host_platform == 'darwin':
# This should work on any unixy platform ;-)
# If the user has bothered specifying additional -I and -L flags
# in OPT and LDFLAGS we might as well use them here.
#
# NOTE: using shlex.split would technically be more correct, but
# also gives a bootstrap problem. Let's hope nobody uses
# directories with whitespace in the name to store libraries.
cflags, ldflags = sysconfig.get_config_vars(
'CFLAGS', 'LDFLAGS')
for item in cflags.split():
if item.startswith('-I'):
inc_dirs.append(item[2:])
for item in ldflags.split():
if item.startswith('-L'):
lib_dirs.append(item[2:])
math_libs = self.detect_math_libs()
# XXX Omitted modules: gl, pure, dl, SGI-specific modules
#
# The following modules are all pretty straightforward, and compile
# on pretty much any POSIXish platform.
#
# array objects
exts.append( Extension('array', ['arraymodule.c']) )
shared_math = 'Modules/_math.o'
# complex math library functions
exts.append( Extension('cmath', ['cmathmodule.c'],
extra_objects=[shared_math],
depends=['_math.h', shared_math],
libraries=math_libs) )
# math library functions, e.g. sin()
exts.append( Extension('math', ['mathmodule.c'],
extra_objects=[shared_math],
depends=['_math.h', shared_math],
libraries=math_libs) )
# time libraries: librt may be needed for clock_gettime()
time_libs = []
lib = sysconfig.get_config_var('TIMEMODULE_LIB')
if lib:
time_libs.append(lib)
# time operations and variables
exts.append( Extension('time', ['timemodule.c'],
libraries=time_libs) )
# math_libs is needed by delta_new() that uses round() and by accum()
# that uses modf().
exts.append( Extension('_datetime', ['_datetimemodule.c'],
libraries=math_libs) )
# random number generator implemented in C
exts.append( Extension("_random", ["_randommodule.c"]) )
# bisect
exts.append( Extension("_bisect", ["_bisectmodule.c"]) )
# heapq
exts.append( Extension("_heapq", ["_heapqmodule.c"]) )
# C-optimized pickle replacement
exts.append( Extension("_pickle", ["_pickle.c"]) )
# atexit
exts.append( Extension("atexit", ["atexitmodule.c"]) )
# _json speedups
exts.append( Extension("_json", ["_json.c"]) )
# Python C API test module
exts.append( Extension('_testcapi', ['_testcapimodule.c'],
depends=['testcapi_long.h']) )
# Python PEP-3118 (buffer protocol) test module
exts.append( Extension('_testbuffer', ['_testbuffer.c']) )
# Test loading multiple modules from one compiled file (http://bugs.python.org/issue16421)
exts.append( Extension('_testimportmultiple', ['_testimportmultiple.c']) )
# Test multi-phase extension module init (PEP 489)
exts.append( Extension('_testmultiphase', ['_testmultiphase.c']) )
# profiler (_lsprof is for cProfile.py)
exts.append( Extension('_lsprof', ['_lsprof.c', 'rotatingtree.c']) )
# static Unicode character database
exts.append( Extension('unicodedata', ['unicodedata.c'],
depends=['unicodedata_db.h', 'unicodename_db.h']) )
# _opcode module
exts.append( Extension('_opcode', ['_opcode.c']) )
# asyncio speedups
exts.append( Extension("_asyncio", ["_asynciomodule.c"]) )
# Modules with some UNIX dependencies -- on by default:
# (If you have a really backward UNIX, select and socket may not be
# supported...)
# fcntl(2) and ioctl(2)
libs = []
if (config_h_vars.get('FLOCK_NEEDS_LIBBSD', False)):
# May be necessary on AIX for flock function
libs = ['bsd']
exts.append( Extension('fcntl', ['fcntlmodule.c'], libraries=libs) )
# pwd(3)
exts.append( Extension('pwd', ['pwdmodule.c']) )
# grp(3)
exts.append( Extension('grp', ['grpmodule.c']) )
# spwd, shadow passwords
if (config_h_vars.get('HAVE_GETSPNAM', False) or
config_h_vars.get('HAVE_GETSPENT', False)):
exts.append( Extension('spwd', ['spwdmodule.c']) )
else:
missing.append('spwd')
# select(2); not on ancient System V
exts.append( Extension('select', ['selectmodule.c']) )
# Fred Drake's interface to the Python parser
exts.append( Extension('parser', ['parsermodule.c']) )
# Memory-mapped files (also works on Win32).
exts.append( Extension('mmap', ['mmapmodule.c']) )
# Lance Ellinghaus's syslog module
# syslog daemon interface
exts.append( Extension('syslog', ['syslogmodule.c']) )
#
# Here ends the simple stuff. From here on, modules need certain
# libraries, are platform-specific, or present other surprises.
#
# Multimedia modules
# These don't work for 64-bit platforms!!!
# These represent audio samples or images as strings:
#
# Operations on audio samples
# According to #993173, this one should actually work fine on
# 64-bit platforms.
#
# audioop needs math_libs for floor() in multiple functions.
exts.append( Extension('audioop', ['audioop.c'],
libraries=math_libs) )
# readline
do_readline = self.compiler.find_library_file(lib_dirs, 'readline')
readline_termcap_library = ""
curses_library = ""
# Cannot use os.popen here in py3k.
tmpfile = os.path.join(self.build_temp, 'readline_termcap_lib')
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
# Determine if readline is already linked against curses or tinfo.
if do_readline:
if cross_compiling:
ret = os.system("%s -d %s | grep '(NEEDED)' > %s" \
% (sysconfig.get_config_var('READELF'),
do_readline, tmpfile))
elif find_executable('ldd'):
ret = os.system("ldd %s > %s" % (do_readline, tmpfile))
else:
ret = 256
if ret >> 8 == 0:
with open(tmpfile) as fp:
for ln in fp:
if 'curses' in ln:
readline_termcap_library = re.sub(
r'.*lib(n?cursesw?)\.so.*', r'\1', ln
).rstrip()
break
# termcap interface split out from ncurses
if 'tinfo' in ln:
readline_termcap_library = 'tinfo'
break
if os.path.exists(tmpfile):
os.unlink(tmpfile)
# Issue 7384: If readline is already linked against curses,
# use the same library for the readline and curses modules.
if 'curses' in readline_termcap_library:
curses_library = readline_termcap_library
elif self.compiler.find_library_file(lib_dirs, 'ncursesw'):
curses_library = 'ncursesw'
elif self.compiler.find_library_file(lib_dirs, 'ncurses'):
curses_library = 'ncurses'
elif self.compiler.find_library_file(lib_dirs, 'curses'):
curses_library = 'curses'
if host_platform == 'darwin':
os_release = int(os.uname()[2].split('.')[0])
dep_target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
if (dep_target and
(tuple(int(n) for n in dep_target.split('.')[0:2])
< (10, 5) ) ):
os_release = 8
if os_release < 9:
# MacOSX 10.4 has a broken readline. Don't try to build
# the readline module unless the user has installed a fixed
# readline package
if find_file('readline/rlconf.h', inc_dirs, []) is None:
do_readline = False
if do_readline:
if host_platform == 'darwin' and os_release < 9:
# In every directory on the search path search for a dynamic
# library and then a static library, instead of first looking
# for dynamic libraries on the entire path.
# This way a statically linked custom readline gets picked up
# before the (possibly broken) dynamic library in /usr/lib.
readline_extra_link_args = ('-Wl,-search_paths_first',)
else:
readline_extra_link_args = ()
readline_libs = ['readline']
if readline_termcap_library:
pass # Issue 7384: Already linked against curses or tinfo.
elif curses_library:
readline_libs.append(curses_library)
elif self.compiler.find_library_file(lib_dirs +
['/usr/lib/termcap'],
'termcap'):
readline_libs.append('termcap')
exts.append( Extension('readline', ['readline.c'],
library_dirs=['/usr/lib/termcap'],
extra_link_args=readline_extra_link_args,
libraries=readline_libs) )
else:
missing.append('readline')
# crypt module.
if self.compiler.find_library_file(lib_dirs, 'crypt'):
libs = ['crypt']
else:
libs = []
exts.append( Extension('_crypt', ['_cryptmodule.c'], libraries=libs) )
# CSV files
exts.append( Extension('_csv', ['_csv.c']) )
# POSIX subprocess module helper.
exts.append( Extension('_posixsubprocess', ['_posixsubprocess.c']) )
# socket(2)
exts.append( Extension('_socket', ['socketmodule.c'],
depends = ['socketmodule.h']) )
# Detect SSL support for the socket module (via _ssl)
search_for_ssl_incs_in = [
'/usr/local/ssl/include',
'/usr/contrib/ssl/include/'
]
ssl_incs = find_file('openssl/ssl.h', inc_dirs,
search_for_ssl_incs_in
)
if ssl_incs is not None:
krb5_h = find_file('krb5.h', inc_dirs,
['/usr/kerberos/include'])
if krb5_h:
ssl_incs += krb5_h
ssl_libs = find_library_file(self.compiler, 'ssl',lib_dirs,
['/usr/local/ssl/lib',
'/usr/contrib/ssl/lib/'
] )
if (ssl_incs is not None and
ssl_libs is not None):
exts.append( Extension('_ssl', ['_ssl.c'],
include_dirs = ssl_incs,
library_dirs = ssl_libs,
libraries = ['ssl', 'crypto'],
depends = ['socketmodule.h']), )
else:
missing.append('_ssl')
# find out which version of OpenSSL we have
openssl_ver = 0
openssl_ver_re = re.compile(
r'^\s*#\s*define\s+OPENSSL_VERSION_NUMBER\s+(0x[0-9a-fA-F]+)' )
# look for the openssl version header on the compiler search path.
opensslv_h = find_file('openssl/opensslv.h', [],
inc_dirs + search_for_ssl_incs_in)
if opensslv_h:
name = os.path.join(opensslv_h[0], 'openssl/opensslv.h')
if host_platform == 'darwin' and is_macosx_sdk_path(name):
name = os.path.join(macosx_sdk_root(), name[1:])
try:
with open(name, 'r') as incfile:
for line in incfile:
m = openssl_ver_re.match(line)
if m:
openssl_ver = int(m.group(1), 16)
break
except IOError as msg:
print("IOError while reading opensshv.h:", msg)
#print('openssl_ver = 0x%08x' % openssl_ver)
min_openssl_ver = 0x00907000
have_any_openssl = ssl_incs is not None and ssl_libs is not None
have_usable_openssl = (have_any_openssl and
openssl_ver >= min_openssl_ver)
if have_any_openssl:
if have_usable_openssl:
# The _hashlib module wraps optimized implementations
# of hash functions from the OpenSSL library.
exts.append( Extension('_hashlib', ['_hashopenssl.c'],
depends = ['hashlib.h'],
include_dirs = ssl_incs,
library_dirs = ssl_libs,
libraries = ['ssl', 'crypto']) )
else:
print("warning: openssl 0x%08x is too old for _hashlib" %
openssl_ver)
missing.append('_hashlib')
# We always compile these even when OpenSSL is available (issue #14693).
# It's harmless and the object code is tiny (40-50 KB per module,
# only loaded when actually used).
exts.append( Extension('_sha256', ['sha256module.c'],
depends=['hashlib.h']) )
exts.append( Extension('_sha512', ['sha512module.c'],
depends=['hashlib.h']) )
exts.append( Extension('_md5', ['md5module.c'],
depends=['hashlib.h']) )
exts.append( Extension('_sha1', ['sha1module.c'],
depends=['hashlib.h']) )
blake2_deps = glob(os.path.join(os.getcwd(), srcdir,
'Modules/_blake2/impl/*'))
blake2_deps.append('hashlib.h')
blake2_macros = []
if not cross_compiling and os.uname().machine == "x86_64":
# Every x86_64 machine has at least SSE2.
blake2_macros.append(('BLAKE2_USE_SSE', '1'))
exts.append( Extension('_blake2',
['_blake2/blake2module.c',
'_blake2/blake2b_impl.c',
'_blake2/blake2s_impl.c'],
define_macros=blake2_macros,
depends=blake2_deps) )
sha3_deps = glob(os.path.join(os.getcwd(), srcdir,
'Modules/_sha3/kcp/*'))
sha3_deps.append('hashlib.h')
exts.append( Extension('_sha3',
['_sha3/sha3module.c'],
depends=sha3_deps))
# Modules that provide persistent dictionary-like semantics. You will
# probably want to arrange for at least one of them to be available on
# your machine, though none are defined by default because of library
# dependencies. The Python module dbm/__init__.py provides an
# implementation independent wrapper for these; dbm/dumb.py provides
# similar functionality (but slower of course) implemented in Python.
# Sleepycat^WOracle Berkeley DB interface.
# http://www.oracle.com/database/berkeley-db/db/index.html
#
# This requires the Sleepycat^WOracle DB code. The supported versions
# are set below. Visit the URL above to download
# a release. Most open source OSes come with one or more
# versions of BerkeleyDB already installed.
max_db_ver = (5, 3)
min_db_ver = (3, 3)
db_setup_debug = False # verbose debug prints from this script?
def allow_db_ver(db_ver):
"""Returns a boolean if the given BerkeleyDB version is acceptable.
Args:
db_ver: A tuple of the version to verify.
"""
if not (min_db_ver <= db_ver <= max_db_ver):
return False
return True
def gen_db_minor_ver_nums(major):
if major == 4:
for x in range(max_db_ver[1]+1):
if allow_db_ver((4, x)):
yield x
elif major == 3:
for x in (3,):
if allow_db_ver((3, x)):
yield x
else:
raise ValueError("unknown major BerkeleyDB version", major)
# construct a list of paths to look for the header file in on
# top of the normal inc_dirs.
db_inc_paths = [
'/usr/include/db4',
'/usr/local/include/db4',
'/opt/sfw/include/db4',
'/usr/include/db3',
'/usr/local/include/db3',
'/opt/sfw/include/db3',
# Fink defaults (http://fink.sourceforge.net/)
'/sw/include/db4',
'/sw/include/db3',
]
# 4.x minor number specific paths
for x in gen_db_minor_ver_nums(4):
db_inc_paths.append('/usr/include/db4%d' % x)
db_inc_paths.append('/usr/include/db4.%d' % x)
db_inc_paths.append('/usr/local/BerkeleyDB.4.%d/include' % x)
db_inc_paths.append('/usr/local/include/db4%d' % x)
db_inc_paths.append('/pkg/db-4.%d/include' % x)
db_inc_paths.append('/opt/db-4.%d/include' % x)
# MacPorts default (http://www.macports.org/)
db_inc_paths.append('/opt/local/include/db4%d' % x)
# 3.x minor number specific paths
for x in gen_db_minor_ver_nums(3):
db_inc_paths.append('/usr/include/db3%d' % x)
db_inc_paths.append('/usr/local/BerkeleyDB.3.%d/include' % x)
db_inc_paths.append('/usr/local/include/db3%d' % x)
db_inc_paths.append('/pkg/db-3.%d/include' % x)
db_inc_paths.append('/opt/db-3.%d/include' % x)
if cross_compiling:
db_inc_paths = []
# Add some common subdirectories for Sleepycat DB to the list,
# based on the standard include directories. This way DB3/4 gets
# picked up when it is installed in a non-standard prefix and
# the user has added that prefix into inc_dirs.
std_variants = []
for dn in inc_dirs:
std_variants.append(os.path.join(dn, 'db3'))
std_variants.append(os.path.join(dn, 'db4'))
for x in gen_db_minor_ver_nums(4):
std_variants.append(os.path.join(dn, "db4%d"%x))
std_variants.append(os.path.join(dn, "db4.%d"%x))
for x in gen_db_minor_ver_nums(3):
std_variants.append(os.path.join(dn, "db3%d"%x))
std_variants.append(os.path.join(dn, "db3.%d"%x))
db_inc_paths = std_variants + db_inc_paths
db_inc_paths = [p for p in db_inc_paths if os.path.exists(p)]
db_ver_inc_map = {}
if host_platform == 'darwin':
sysroot = macosx_sdk_root()
class db_found(Exception): pass
try:
# See whether there is a Sleepycat header in the standard
# search path.
for d in inc_dirs + db_inc_paths:
f = os.path.join(d, "db.h")
if host_platform == 'darwin' and is_macosx_sdk_path(d):
f = os.path.join(sysroot, d[1:], "db.h")
if db_setup_debug: print("db: looking for db.h in", f)
if os.path.exists(f):
with open(f, 'rb') as file:
f = file.read()
m = re.search(br"#define\WDB_VERSION_MAJOR\W(\d+)", f)
if m:
db_major = int(m.group(1))
m = re.search(br"#define\WDB_VERSION_MINOR\W(\d+)", f)
db_minor = int(m.group(1))
db_ver = (db_major, db_minor)
# Avoid 4.6 prior to 4.6.21 due to a BerkeleyDB bug
if db_ver == (4, 6):
m = re.search(br"#define\WDB_VERSION_PATCH\W(\d+)", f)
db_patch = int(m.group(1))
if db_patch < 21:
print("db.h:", db_ver, "patch", db_patch,
"being ignored (4.6.x must be >= 4.6.21)")
continue
if ( (db_ver not in db_ver_inc_map) and
allow_db_ver(db_ver) ):
# save the include directory with the db.h version
# (first occurrence only)
db_ver_inc_map[db_ver] = d
if db_setup_debug:
print("db.h: found", db_ver, "in", d)
else:
# we already found a header for this library version
if db_setup_debug: print("db.h: ignoring", d)
else:
# ignore this header, it didn't contain a version number
if db_setup_debug:
print("db.h: no version number version in", d)
db_found_vers = list(db_ver_inc_map.keys())
db_found_vers.sort()
while db_found_vers:
db_ver = db_found_vers.pop()
db_incdir = db_ver_inc_map[db_ver]
# check lib directories parallel to the location of the header
db_dirs_to_check = [
db_incdir.replace("include", 'lib64'),
db_incdir.replace("include", 'lib'),
]
if host_platform != 'darwin':
db_dirs_to_check = list(filter(os.path.isdir, db_dirs_to_check))
else:
# Same as other branch, but takes OSX SDK into account
tmp = []
for dn in db_dirs_to_check:
if is_macosx_sdk_path(dn):
if os.path.isdir(os.path.join(sysroot, dn[1:])):
tmp.append(dn)
else:
if os.path.isdir(dn):
tmp.append(dn)
db_dirs_to_check = tmp
db_dirs_to_check = tmp
# Look for a version specific db-X.Y before an ambiguous dbX
# XXX should we -ever- look for a dbX name? Do any
# systems really not name their library by version and
# symlink to more general names?
for dblib in (('db-%d.%d' % db_ver),
('db%d%d' % db_ver),
('db%d' % db_ver[0])):
dblib_file = self.compiler.find_library_file(
db_dirs_to_check + lib_dirs, dblib )
if dblib_file:
dblib_dir = [ os.path.abspath(os.path.dirname(dblib_file)) ]
raise db_found
else:
if db_setup_debug: print("db lib: ", dblib, "not found")
except db_found:
if db_setup_debug:
print("bsddb using BerkeleyDB lib:", db_ver, dblib)
print("bsddb lib dir:", dblib_dir, " inc dir:", db_incdir)
dblibs = [dblib]
# Only add the found library and include directories if they aren't
# already being searched. This avoids an explicit runtime library
# dependency.
if db_incdir in inc_dirs:
db_incs = None
else:
db_incs = [db_incdir]
if dblib_dir[0] in lib_dirs:
dblib_dir = None
else:
if db_setup_debug: print("db: no appropriate library found")
db_incs = None
dblibs = []
dblib_dir = None
# The sqlite interface
sqlite_setup_debug = False # verbose debug prints from this script?
# We hunt for #define SQLITE_VERSION "n.n.n"
# We need to find >= sqlite version 3.0.8
sqlite_incdir = sqlite_libdir = None
sqlite_inc_paths = [ '/usr/include',
'/usr/include/sqlite',
'/usr/include/sqlite3',
'/usr/local/include',
'/usr/local/include/sqlite',
'/usr/local/include/sqlite3',
]
if cross_compiling:
sqlite_inc_paths = []
MIN_SQLITE_VERSION_NUMBER = (3, 0, 8)
MIN_SQLITE_VERSION = ".".join([str(x)
for x in MIN_SQLITE_VERSION_NUMBER])
# Scan the default include directories before the SQLite specific
# ones. This allows one to override the copy of sqlite on OSX,
# where /usr/include contains an old version of sqlite.
if host_platform == 'darwin':
sysroot = macosx_sdk_root()
for d_ in inc_dirs + sqlite_inc_paths:
d = d_
if host_platform == 'darwin' and is_macosx_sdk_path(d):
d = os.path.join(sysroot, d[1:])
f = os.path.join(d, "sqlite3.h")
if os.path.exists(f):
if sqlite_setup_debug: print("sqlite: found %s"%f)
with open(f) as file:
incf = file.read()
m = re.search(
r'\s*.*#\s*.*define\s.*SQLITE_VERSION\W*"([\d\.]*)"', incf)
if m:
sqlite_version = m.group(1)
sqlite_version_tuple = tuple([int(x)
for x in sqlite_version.split(".")])
if sqlite_version_tuple >= MIN_SQLITE_VERSION_NUMBER:
# we win!
if sqlite_setup_debug:
print("%s/sqlite3.h: version %s"%(d, sqlite_version))
sqlite_incdir = d
break
else:
if sqlite_setup_debug:
print("%s: version %d is too old, need >= %s"%(d,
sqlite_version, MIN_SQLITE_VERSION))
elif sqlite_setup_debug:
print("sqlite: %s had no SQLITE_VERSION"%(f,))
if sqlite_incdir:
sqlite_dirs_to_check = [
os.path.join(sqlite_incdir, '..', 'lib64'),
os.path.join(sqlite_incdir, '..', 'lib'),
os.path.join(sqlite_incdir, '..', '..', 'lib64'),
os.path.join(sqlite_incdir, '..', '..', 'lib'),
]
sqlite_libfile = self.compiler.find_library_file(
sqlite_dirs_to_check + lib_dirs, 'sqlite3')
if sqlite_libfile:
sqlite_libdir = [os.path.abspath(os.path.dirname(sqlite_libfile))]
if sqlite_incdir and sqlite_libdir:
sqlite_srcs = ['_sqlite/cache.c',
'_sqlite/connection.c',
'_sqlite/cursor.c',
'_sqlite/microprotocols.c',
'_sqlite/module.c',
'_sqlite/prepare_protocol.c',
'_sqlite/row.c',
'_sqlite/statement.c',
'_sqlite/util.c', ]
sqlite_defines = []
if host_platform != "win32":
sqlite_defines.append(('MODULE_NAME', '"sqlite3"'))
else:
sqlite_defines.append(('MODULE_NAME', '\\"sqlite3\\"'))
# Enable support for loadable extensions in the sqlite3 module
# if --enable-loadable-sqlite-extensions configure option is used.
if '--enable-loadable-sqlite-extensions' not in sysconfig.get_config_var("CONFIG_ARGS"):
sqlite_defines.append(("SQLITE_OMIT_LOAD_EXTENSION", "1"))
if host_platform == 'darwin':
# In every directory on the search path search for a dynamic
# library and then a static library, instead of first looking
# for dynamic libraries on the entire path.
# This way a statically linked custom sqlite gets picked up
# before the dynamic library in /usr/lib.
sqlite_extra_link_args = ('-Wl,-search_paths_first',)
else:
sqlite_extra_link_args = ()
include_dirs = ["Modules/_sqlite"]
# Only include the directory where sqlite was found if it does
# not already exist in set include directories, otherwise you
# can end up with a bad search path order.
if sqlite_incdir not in self.compiler.include_dirs:
include_dirs.append(sqlite_incdir)
# avoid a runtime library path for a system library dir
if sqlite_libdir and sqlite_libdir[0] in lib_dirs:
sqlite_libdir = None
exts.append(Extension('_sqlite3', sqlite_srcs,
define_macros=sqlite_defines,
include_dirs=include_dirs,
library_dirs=sqlite_libdir,
extra_link_args=sqlite_extra_link_args,
libraries=["sqlite3",]))
else:
missing.append('_sqlite3')
dbm_setup_debug = False # verbose debug prints from this script?
dbm_order = ['gdbm']
# The standard Unix dbm module:
if host_platform not in ['cygwin']:
config_args = [arg.strip("'")
for arg in sysconfig.get_config_var("CONFIG_ARGS").split()]
dbm_args = [arg for arg in config_args
if arg.startswith('--with-dbmliborder=')]
if dbm_args:
dbm_order = [arg.split('=')[-1] for arg in dbm_args][-1].split(":")
else:
dbm_order = "ndbm:gdbm:bdb".split(":")
dbmext = None
for cand in dbm_order:
if cand == "ndbm":
if find_file("ndbm.h", inc_dirs, []) is not None:
# Some systems have -lndbm, others have -lgdbm_compat,
# others don't have either
if self.compiler.find_library_file(lib_dirs,
'ndbm'):
ndbm_libs = ['ndbm']
elif self.compiler.find_library_file(lib_dirs,
'gdbm_compat'):
ndbm_libs = ['gdbm_compat']
else:
ndbm_libs = []
if dbm_setup_debug: print("building dbm using ndbm")
dbmext = Extension('_dbm', ['_dbmmodule.c'],
define_macros=[
('HAVE_NDBM_H',None),
],
libraries=ndbm_libs)
break
elif cand == "gdbm":
if self.compiler.find_library_file(lib_dirs, 'gdbm'):
gdbm_libs = ['gdbm']
if self.compiler.find_library_file(lib_dirs,
'gdbm_compat'):
gdbm_libs.append('gdbm_compat')
if find_file("gdbm/ndbm.h", inc_dirs, []) is not None:
if dbm_setup_debug: print("building dbm using gdbm")
dbmext = Extension(
'_dbm', ['_dbmmodule.c'],
define_macros=[
('HAVE_GDBM_NDBM_H', None),
],
libraries = gdbm_libs)
break
if find_file("gdbm-ndbm.h", inc_dirs, []) is not None:
if dbm_setup_debug: print("building dbm using gdbm")
dbmext = Extension(
'_dbm', ['_dbmmodule.c'],
define_macros=[
('HAVE_GDBM_DASH_NDBM_H', None),
],
libraries = gdbm_libs)
break
elif cand == "bdb":
if dblibs:
if dbm_setup_debug: print("building dbm using bdb")
dbmext = Extension('_dbm', ['_dbmmodule.c'],
library_dirs=dblib_dir,
runtime_library_dirs=dblib_dir,
include_dirs=db_incs,
define_macros=[
('HAVE_BERKDB_H', None),
('DB_DBM_HSEARCH', None),
],
libraries=dblibs)
break
if dbmext is not None:
exts.append(dbmext)
else:
missing.append('_dbm')
# Anthony Baxter's gdbm module. GNU dbm(3) will require -lgdbm:
if ('gdbm' in dbm_order and
self.compiler.find_library_file(lib_dirs, 'gdbm')):
exts.append( Extension('_gdbm', ['_gdbmmodule.c'],
libraries = ['gdbm'] ) )
else:
missing.append('_gdbm')
# Unix-only modules
if host_platform != 'win32':
# Steen Lumholt's termios module
exts.append( Extension('termios', ['termios.c']) )
# Jeremy Hylton's rlimit interface
exts.append( Extension('resource', ['resource.c']) )
# Sun yellow pages. Some systems have the functions in libc.
if (host_platform not in ['cygwin', 'qnx6'] and
find_file('rpcsvc/yp_prot.h', inc_dirs, []) is not None):
if (self.compiler.find_library_file(lib_dirs, 'nsl')):
libs = ['nsl']
else:
libs = []
exts.append( Extension('nis', ['nismodule.c'],
libraries = libs) )
else:
missing.append('nis')
else:
missing.extend(['nis', 'resource', 'termios'])
# Curses support, requiring the System V version of curses, often
# provided by the ncurses library.
curses_defines = []
curses_includes = []
panel_library = 'panel'
if curses_library == 'ncursesw':
curses_defines.append(('HAVE_NCURSESW', '1'))
curses_includes.append('/usr/include/ncursesw')
# Bug 1464056: If _curses.so links with ncursesw,
# _curses_panel.so must link with panelw.
panel_library = 'panelw'
if host_platform == 'darwin':
# On OS X, there is no separate /usr/lib/libncursesw nor
# libpanelw. If we are here, we found a locally-supplied
# version of libncursesw. There should be also be a
# libpanelw. _XOPEN_SOURCE defines are usually excluded
# for OS X but we need _XOPEN_SOURCE_EXTENDED here for
# ncurses wide char support
curses_defines.append(('_XOPEN_SOURCE_EXTENDED', '1'))
elif host_platform == 'darwin' and curses_library == 'ncurses':
# Building with the system-suppied combined libncurses/libpanel
curses_defines.append(('HAVE_NCURSESW', '1'))
curses_defines.append(('_XOPEN_SOURCE_EXTENDED', '1'))
if curses_library.startswith('ncurses'):
curses_libs = [curses_library]
exts.append( Extension('_curses', ['_cursesmodule.c'],
include_dirs=curses_includes,
define_macros=curses_defines,
libraries = curses_libs) )
elif curses_library == 'curses' and host_platform != 'darwin':
# OSX has an old Berkeley curses, not good enough for
# the _curses module.
if (self.compiler.find_library_file(lib_dirs, 'terminfo')):
curses_libs = ['curses', 'terminfo']
elif (self.compiler.find_library_file(lib_dirs, 'termcap')):
curses_libs = ['curses', 'termcap']
else:
curses_libs = ['curses']
exts.append( Extension('_curses', ['_cursesmodule.c'],
define_macros=curses_defines,
libraries = curses_libs) )
else:
missing.append('_curses')
# If the curses module is enabled, check for the panel module
if (module_enabled(exts, '_curses') and
self.compiler.find_library_file(lib_dirs, panel_library)):
exts.append( Extension('_curses_panel', ['_curses_panel.c'],
include_dirs=curses_includes,
define_macros=curses_defines,
libraries = [panel_library] + curses_libs) )
else:
missing.append('_curses_panel')
# Andrew Kuchling's zlib module. Note that some versions of zlib
# 1.1.3 have security problems. See CERT Advisory CA-2002-07:
# http://www.cert.org/advisories/CA-2002-07.html
#
# zlib 1.1.4 is fixed, but at least one vendor (RedHat) has decided to
# patch its zlib 1.1.3 package instead of upgrading to 1.1.4. For
# now, we still accept 1.1.3, because we think it's difficult to
# exploit this in Python, and we'd rather make it RedHat's problem
# than our problem <wink>.
#
# You can upgrade zlib to version 1.1.4 yourself by going to
# http://www.gzip.org/zlib/
zlib_inc = find_file('zlib.h', [], inc_dirs)
have_zlib = False
if zlib_inc is not None:
zlib_h = zlib_inc[0] + '/zlib.h'
version = '"0.0.0"'
version_req = '"1.1.3"'
if host_platform == 'darwin' and is_macosx_sdk_path(zlib_h):
zlib_h = os.path.join(macosx_sdk_root(), zlib_h[1:])
with open(zlib_h) as fp:
while 1:
line = fp.readline()
if not line:
break
if line.startswith('#define ZLIB_VERSION'):
version = line.split()[2]
break
if version >= version_req:
if (self.compiler.find_library_file(lib_dirs, 'z')):
if host_platform == "darwin":
zlib_extra_link_args = ('-Wl,-search_paths_first',)
else:
zlib_extra_link_args = ()
exts.append( Extension('zlib', ['zlibmodule.c'],
libraries = ['z'],
extra_link_args = zlib_extra_link_args))
have_zlib = True
else:
missing.append('zlib')
else:
missing.append('zlib')
else:
missing.append('zlib')
# Helper module for various ascii-encoders. Uses zlib for an optimized
# crc32 if we have it. Otherwise binascii uses its own.
if have_zlib:
extra_compile_args = ['-DUSE_ZLIB_CRC32']
libraries = ['z']
extra_link_args = zlib_extra_link_args
else:
extra_compile_args = []
libraries = []
extra_link_args = []
exts.append( Extension('binascii', ['binascii.c'],
extra_compile_args = extra_compile_args,
libraries = libraries,
extra_link_args = extra_link_args) )
# Gustavo Niemeyer's bz2 module.
if (self.compiler.find_library_file(lib_dirs, 'bz2')):
if host_platform == "darwin":
bz2_extra_link_args = ('-Wl,-search_paths_first',)
else:
bz2_extra_link_args = ()
exts.append( Extension('_bz2', ['_bz2module.c'],
libraries = ['bz2'],
extra_link_args = bz2_extra_link_args) )
else:
missing.append('_bz2')
# LZMA compression support.
if self.compiler.find_library_file(lib_dirs, 'lzma'):
exts.append( Extension('_lzma', ['_lzmamodule.c'],
libraries = ['lzma']) )
else:
missing.append('_lzma')
# Interface to the Expat XML parser
#
# Expat was written by James Clark and is now maintained by a group of
# developers on SourceForge; see www.libexpat.org for more information.
# The pyexpat module was written by Paul Prescod after a prototype by
# Jack Jansen. The Expat source is included in Modules/expat/. Usage
# of a system shared libexpat.so is possible with --with-system-expat
# configure option.
#
# More information on Expat can be found at www.libexpat.org.
#
if '--with-system-expat' in sysconfig.get_config_var("CONFIG_ARGS"):
expat_inc = []
define_macros = []
expat_lib = ['expat']
expat_sources = []
expat_depends = []
else:
expat_inc = [os.path.join(os.getcwd(), srcdir, 'Modules', 'expat')]
define_macros = [
('HAVE_EXPAT_CONFIG_H', '1'),
]
expat_lib = []
expat_sources = ['expat/xmlparse.c',
'expat/xmlrole.c',
'expat/xmltok.c']
expat_depends = ['expat/ascii.h',
'expat/asciitab.h',
'expat/expat.h',
'expat/expat_config.h',
'expat/expat_external.h',
'expat/internal.h',
'expat/latin1tab.h',
'expat/utf8tab.h',
'expat/xmlrole.h',
'expat/xmltok.h',
'expat/xmltok_impl.h'
]
exts.append(Extension('pyexpat',
define_macros = define_macros,
include_dirs = expat_inc,
libraries = expat_lib,
sources = ['pyexpat.c'] + expat_sources,
depends = expat_depends,
))
# Fredrik Lundh's cElementTree module. Note that this also
# uses expat (via the CAPI hook in pyexpat).
if os.path.isfile(os.path.join(srcdir, 'Modules', '_elementtree.c')):
define_macros.append(('USE_PYEXPAT_CAPI', None))
exts.append(Extension('_elementtree',
define_macros = define_macros,
include_dirs = expat_inc,
libraries = expat_lib,
sources = ['_elementtree.c'],
depends = ['pyexpat.c'] + expat_sources +
expat_depends,
))
else:
missing.append('_elementtree')
# Hye-Shik Chang's CJKCodecs modules.
exts.append(Extension('_multibytecodec',
['cjkcodecs/multibytecodec.c']))
for loc in ('kr', 'jp', 'cn', 'tw', 'hk', 'iso2022'):
exts.append(Extension('_codecs_%s' % loc,
['cjkcodecs/_codecs_%s.c' % loc]))
# Stefan Krah's _decimal module
exts.append(self._decimal_ext())
# Thomas Heller's _ctypes module
self.detect_ctypes(inc_dirs, lib_dirs)
# Richard Oudkerk's multiprocessing module
if host_platform == 'win32': # Windows
macros = dict()
libraries = ['ws2_32']
elif host_platform == 'darwin': # Mac OSX
macros = dict()
libraries = []
elif host_platform == 'cygwin': # Cygwin
macros = dict()
libraries = []
elif host_platform in ('freebsd4', 'freebsd5', 'freebsd6', 'freebsd7', 'freebsd8'):
# FreeBSD's P1003.1b semaphore support is very experimental
# and has many known problems. (as of June 2008)
macros = dict()
libraries = []
elif host_platform.startswith('openbsd'):
macros = dict()
libraries = []
elif host_platform.startswith('netbsd'):
macros = dict()
libraries = []
else: # Linux and other unices
macros = dict()
libraries = ['rt']
if host_platform == 'win32':
multiprocessing_srcs = [ '_multiprocessing/multiprocessing.c',
'_multiprocessing/semaphore.c',
]
else:
multiprocessing_srcs = [ '_multiprocessing/multiprocessing.c',
]
if (sysconfig.get_config_var('HAVE_SEM_OPEN') and not
sysconfig.get_config_var('POSIX_SEMAPHORES_NOT_ENABLED')):
multiprocessing_srcs.append('_multiprocessing/semaphore.c')
if sysconfig.get_config_var('WITH_THREAD'):
exts.append ( Extension('_multiprocessing', multiprocessing_srcs,
define_macros=list(macros.items()),
include_dirs=["Modules/_multiprocessing"]))
else:
missing.append('_multiprocessing')
# End multiprocessing
# Platform-specific libraries
if host_platform.startswith(('linux', 'freebsd', 'gnukfreebsd')):
exts.append( Extension('ossaudiodev', ['ossaudiodev.c']) )
else:
missing.append('ossaudiodev')
if host_platform == 'darwin':
exts.append(
Extension('_scproxy', ['_scproxy.c'],
extra_link_args=[
'-framework', 'SystemConfiguration',
'-framework', 'CoreFoundation',
]))
self.extensions.extend(exts)
# Call the method for detecting whether _tkinter can be compiled
self.detect_tkinter(inc_dirs, lib_dirs)
if '_tkinter' not in [e.name for e in self.extensions]:
missing.append('_tkinter')
## # Uncomment these lines if you want to play with xxmodule.c
## ext = Extension('xx', ['xxmodule.c'])
## self.extensions.append(ext)
if 'd' not in sys.abiflags:
ext = Extension('xxlimited', ['xxlimited.c'],
define_macros=[('Py_LIMITED_API', '0x03050000')])
self.extensions.append(ext)
return missing
def detect_tkinter_explicitly(self):
# Build _tkinter using explicit locations for Tcl/Tk.
#
# This is enabled when both arguments are given to ./configure:
#
# --with-tcltk-includes="-I/path/to/tclincludes \
# -I/path/to/tkincludes"
# --with-tcltk-libs="-L/path/to/tcllibs -ltclm.n \
# -L/path/to/tklibs -ltkm.n"
#
# These values can also be specified or overridden via make:
# make TCLTK_INCLUDES="..." TCLTK_LIBS="..."
#
# This can be useful for building and testing tkinter with multiple
# versions of Tcl/Tk. Note that a build of Tk depends on a particular
# build of Tcl so you need to specify both arguments and use care when
# overriding.
# The _TCLTK variables are created in the Makefile sharedmods target.
tcltk_includes = os.environ.get('_TCLTK_INCLUDES')
tcltk_libs = os.environ.get('_TCLTK_LIBS')
if not (tcltk_includes and tcltk_libs):
# Resume default configuration search.
return 0
extra_compile_args = tcltk_includes.split()
extra_link_args = tcltk_libs.split()
ext = Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'],
define_macros=[('WITH_APPINIT', 1)],
extra_compile_args = extra_compile_args,
extra_link_args = extra_link_args,
)
self.extensions.append(ext)
return 1
def detect_tkinter_darwin(self, inc_dirs, lib_dirs):
# The _tkinter module, using frameworks. Since frameworks are quite
# different the UNIX search logic is not sharable.
from os.path import join, exists
framework_dirs = [
'/Library/Frameworks',
'/System/Library/Frameworks/',
join(os.getenv('HOME'), '/Library/Frameworks')
]
sysroot = macosx_sdk_root()
# Find the directory that contains the Tcl.framework and Tk.framework
# bundles.
# XXX distutils should support -F!
for F in framework_dirs:
# both Tcl.framework and Tk.framework should be present
for fw in 'Tcl', 'Tk':
if is_macosx_sdk_path(F):
if not exists(join(sysroot, F[1:], fw + '.framework')):
break
else:
if not exists(join(F, fw + '.framework')):
break
else:
# ok, F is now directory with both frameworks. Continure
# building
break
else:
# Tk and Tcl frameworks not found. Normal "unix" tkinter search
# will now resume.
return 0
# For 8.4a2, we must add -I options that point inside the Tcl and Tk
# frameworks. In later release we should hopefully be able to pass
# the -F option to gcc, which specifies a framework lookup path.
#
include_dirs = [
join(F, fw + '.framework', H)
for fw in ('Tcl', 'Tk')
for H in ('Headers', 'Versions/Current/PrivateHeaders')
]
# For 8.4a2, the X11 headers are not included. Rather than include a
# complicated search, this is a hard-coded path. It could bail out
# if X11 libs are not found...
include_dirs.append('/usr/X11R6/include')
frameworks = ['-framework', 'Tcl', '-framework', 'Tk']
# All existing framework builds of Tcl/Tk don't support 64-bit
# architectures.
cflags = sysconfig.get_config_vars('CFLAGS')[0]
archs = re.findall(r'-arch\s+(\w+)', cflags)
tmpfile = os.path.join(self.build_temp, 'tk.arch')
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
# Note: cannot use os.popen or subprocess here, that
# requires extensions that are not available here.
if is_macosx_sdk_path(F):
os.system("file %s/Tk.framework/Tk | grep 'for architecture' > %s"%(os.path.join(sysroot, F[1:]), tmpfile))
else:
os.system("file %s/Tk.framework/Tk | grep 'for architecture' > %s"%(F, tmpfile))
with open(tmpfile) as fp:
detected_archs = []
for ln in fp:
a = ln.split()[-1]
if a in archs:
detected_archs.append(ln.split()[-1])
os.unlink(tmpfile)
for a in detected_archs:
frameworks.append('-arch')
frameworks.append(a)
ext = Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'],
define_macros=[('WITH_APPINIT', 1)],
include_dirs = include_dirs,
libraries = [],
extra_compile_args = frameworks[2:],
extra_link_args = frameworks,
)
self.extensions.append(ext)
return 1
def detect_tkinter(self, inc_dirs, lib_dirs):
# The _tkinter module.
# Check whether --with-tcltk-includes and --with-tcltk-libs were
# configured or passed into the make target. If so, use these values
# to build tkinter and bypass the searches for Tcl and TK in standard
# locations.
if self.detect_tkinter_explicitly():
return
# Rather than complicate the code below, detecting and building
# AquaTk is a separate method. Only one Tkinter will be built on
# Darwin - either AquaTk, if it is found, or X11 based Tk.
if (host_platform == 'darwin' and
self.detect_tkinter_darwin(inc_dirs, lib_dirs)):
return
# Assume we haven't found any of the libraries or include files
# The versions with dots are used on Unix, and the versions without
# dots on Windows, for detection by cygwin.
tcllib = tklib = tcl_includes = tk_includes = None
for version in ['8.6', '86', '8.5', '85', '8.4', '84', '8.3', '83',
'8.2', '82', '8.1', '81', '8.0', '80']:
tklib = self.compiler.find_library_file(lib_dirs,
'tk' + version)
tcllib = self.compiler.find_library_file(lib_dirs,
'tcl' + version)
if tklib and tcllib:
# Exit the loop when we've found the Tcl/Tk libraries
break
# Now check for the header files
if tklib and tcllib:
# Check for the include files on Debian and {Free,Open}BSD, where
# they're put in /usr/include/{tcl,tk}X.Y
dotversion = version
if '.' not in dotversion and "bsd" in host_platform.lower():
# OpenBSD and FreeBSD use Tcl/Tk library names like libtcl83.a,
# but the include subdirs are named like .../include/tcl8.3.
dotversion = dotversion[:-1] + '.' + dotversion[-1]
tcl_include_sub = []
tk_include_sub = []
for dir in inc_dirs:
tcl_include_sub += [dir + os.sep + "tcl" + dotversion]
tk_include_sub += [dir + os.sep + "tk" + dotversion]
tk_include_sub += tcl_include_sub
tcl_includes = find_file('tcl.h', inc_dirs, tcl_include_sub)
tk_includes = find_file('tk.h', inc_dirs, tk_include_sub)
if (tcllib is None or tklib is None or
tcl_includes is None or tk_includes is None):
self.announce("INFO: Can't locate Tcl/Tk libs and/or headers", 2)
return
# OK... everything seems to be present for Tcl/Tk.
include_dirs = [] ; libs = [] ; defs = [] ; added_lib_dirs = []
for dir in tcl_includes + tk_includes:
if dir not in include_dirs:
include_dirs.append(dir)
# Check for various platform-specific directories
if host_platform == 'sunos5':
include_dirs.append('/usr/openwin/include')
added_lib_dirs.append('/usr/openwin/lib')
elif os.path.exists('/usr/X11R6/include'):
include_dirs.append('/usr/X11R6/include')
added_lib_dirs.append('/usr/X11R6/lib64')
added_lib_dirs.append('/usr/X11R6/lib')
elif os.path.exists('/usr/X11R5/include'):
include_dirs.append('/usr/X11R5/include')
added_lib_dirs.append('/usr/X11R5/lib')
else:
# Assume default location for X11
include_dirs.append('/usr/X11/include')
added_lib_dirs.append('/usr/X11/lib')
# If Cygwin, then verify that X is installed before proceeding
if host_platform == 'cygwin':
x11_inc = find_file('X11/Xlib.h', [], include_dirs)
if x11_inc is None:
return
# Check for BLT extension
if self.compiler.find_library_file(lib_dirs + added_lib_dirs,
'BLT8.0'):
defs.append( ('WITH_BLT', 1) )
libs.append('BLT8.0')
elif self.compiler.find_library_file(lib_dirs + added_lib_dirs,
'BLT'):
defs.append( ('WITH_BLT', 1) )
libs.append('BLT')
# Add the Tcl/Tk libraries
libs.append('tk'+ version)
libs.append('tcl'+ version)
if host_platform in ['aix3', 'aix4']:
libs.append('ld')
# Finally, link with the X11 libraries (not appropriate on cygwin)
if host_platform != "cygwin":
libs.append('X11')
ext = Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'],
define_macros=[('WITH_APPINIT', 1)] + defs,
include_dirs = include_dirs,
libraries = libs,
library_dirs = added_lib_dirs,
)
self.extensions.append(ext)
# XXX handle these, but how to detect?
# *** Uncomment and edit for PIL (TkImaging) extension only:
# -DWITH_PIL -I../Extensions/Imaging/libImaging tkImaging.c \
# *** Uncomment and edit for TOGL extension only:
# -DWITH_TOGL togl.c \
# *** Uncomment these for TOGL extension only:
# -lGL -lGLU -lXext -lXmu \
def configure_ctypes_darwin(self, ext):
# Darwin (OS X) uses preconfigured files, in
# the Modules/_ctypes/libffi_osx directory.
srcdir = sysconfig.get_config_var('srcdir')
ffi_srcdir = os.path.abspath(os.path.join(srcdir, 'Modules',
'_ctypes', 'libffi_osx'))
sources = [os.path.join(ffi_srcdir, p)
for p in ['ffi.c',
'x86/darwin64.S',
'x86/x86-darwin.S',
'x86/x86-ffi_darwin.c',
'x86/x86-ffi64.c',
'powerpc/ppc-darwin.S',
'powerpc/ppc-darwin_closure.S',
'powerpc/ppc-ffi_darwin.c',
'powerpc/ppc64-darwin_closure.S',
]]
# Add .S (preprocessed assembly) to C compiler source extensions.
self.compiler.src_extensions.append('.S')
include_dirs = [os.path.join(ffi_srcdir, 'include'),
os.path.join(ffi_srcdir, 'powerpc')]
ext.include_dirs.extend(include_dirs)
ext.sources.extend(sources)
return True
def configure_ctypes(self, ext):
if not self.use_system_libffi:
if host_platform == 'darwin':
return self.configure_ctypes_darwin(ext)
print('warning: building with the bundled copy of libffi is'
' deprecated on this platform. It will not be'
' distributed with Python 3.7')
srcdir = sysconfig.get_config_var('srcdir')
ffi_builddir = os.path.join(self.build_temp, 'libffi')
ffi_srcdir = os.path.abspath(os.path.join(srcdir, 'Modules',
'_ctypes', 'libffi'))
ffi_configfile = os.path.join(ffi_builddir, 'fficonfig.py')
from distutils.dep_util import newer_group
config_sources = [os.path.join(ffi_srcdir, fname)
for fname in os.listdir(ffi_srcdir)
if os.path.isfile(os.path.join(ffi_srcdir, fname))]
if self.force or newer_group(config_sources,
ffi_configfile):
from distutils.dir_util import mkpath
mkpath(ffi_builddir)
config_args = [arg for arg in sysconfig.get_config_var("CONFIG_ARGS").split()
if (('--host=' in arg) or ('--build=' in arg))]
if not self.verbose:
config_args.append("-q")
# Pass empty CFLAGS because we'll just append the resulting
# CFLAGS to Python's; -g or -O2 is to be avoided.
cmd = "cd %s && env CFLAGS='' '%s/configure' %s" \
% (ffi_builddir, ffi_srcdir, " ".join(config_args))
res = os.system(cmd)
if res or not os.path.exists(ffi_configfile):
print("Failed to configure _ctypes module")
return False
fficonfig = {}
with open(ffi_configfile) as f:
exec(f.read(), globals(), fficonfig)
# Add .S (preprocessed assembly) to C compiler source extensions.
self.compiler.src_extensions.append('.S')
include_dirs = [os.path.join(ffi_builddir, 'include'),
ffi_builddir,
os.path.join(ffi_srcdir, 'src')]
extra_compile_args = fficonfig['ffi_cflags'].split()
ext.sources.extend(os.path.join(ffi_srcdir, f) for f in
fficonfig['ffi_sources'])
ext.include_dirs.extend(include_dirs)
ext.extra_compile_args.extend(extra_compile_args)
return True
def detect_ctypes(self, inc_dirs, lib_dirs):
self.use_system_libffi = False
include_dirs = []
extra_compile_args = []
extra_link_args = []
sources = ['_ctypes/_ctypes.c',
'_ctypes/callbacks.c',
'_ctypes/callproc.c',
'_ctypes/stgdict.c',
'_ctypes/cfield.c']
depends = ['_ctypes/ctypes.h']
math_libs = self.detect_math_libs()
if host_platform == 'darwin':
sources.append('_ctypes/malloc_closure.c')
sources.append('_ctypes/darwin/dlfcn_simple.c')
extra_compile_args.append('-DMACOSX')
include_dirs.append('_ctypes/darwin')
# XXX Is this still needed?
## extra_link_args.extend(['-read_only_relocs', 'warning'])
elif host_platform == 'sunos5':
# XXX This shouldn't be necessary; it appears that some
# of the assembler code is non-PIC (i.e. it has relocations
# when it shouldn't. The proper fix would be to rewrite
# the assembler code to be PIC.
# This only works with GCC; the Sun compiler likely refuses
# this option. If you want to compile ctypes with the Sun
# compiler, please research a proper solution, instead of
# finding some -z option for the Sun compiler.
extra_link_args.append('-mimpure-text')
elif host_platform.startswith('hp-ux'):
extra_link_args.append('-fPIC')
ext = Extension('_ctypes',
include_dirs=include_dirs,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
libraries=[],
sources=sources,
depends=depends)
# function my_sqrt() needs math library for sqrt()
ext_test = Extension('_ctypes_test',
sources=['_ctypes/_ctypes_test.c'],
libraries=math_libs)
self.extensions.extend([ext, ext_test])
if host_platform == 'darwin':
if '--with-system-ffi' not in sysconfig.get_config_var("CONFIG_ARGS"):
return
# OS X 10.5 comes with libffi.dylib; the include files are
# in /usr/include/ffi
inc_dirs.append('/usr/include/ffi')
elif '--without-system-ffi' in sysconfig.get_config_var("CONFIG_ARGS"):
return
ffi_inc = [sysconfig.get_config_var("LIBFFI_INCLUDEDIR")]
if not ffi_inc or ffi_inc[0] == '':
ffi_inc = find_file('ffi.h', [], inc_dirs)
if ffi_inc is not None:
ffi_h = ffi_inc[0] + '/ffi.h'
with open(ffi_h) as f:
for line in f:
line = line.strip()
if line.startswith(('#define LIBFFI_H',
'#define ffi_wrapper_h')):
break
else:
ffi_inc = None
print('Header file {} does not define LIBFFI_H or '
'ffi_wrapper_h'.format(ffi_h))
ffi_lib = None
if ffi_inc is not None:
for lib_name in ('ffi', 'ffi_pic'):
if (self.compiler.find_library_file(lib_dirs, lib_name)):
ffi_lib = lib_name
break
if ffi_inc and ffi_lib:
ext.include_dirs.extend(ffi_inc)
ext.libraries.append(ffi_lib)
self.use_system_libffi = True
def _decimal_ext(self):
extra_compile_args = []
undef_macros = []
if '--with-system-libmpdec' in sysconfig.get_config_var("CONFIG_ARGS"):
include_dirs = []
libraries = [':libmpdec.so.2']
sources = ['_decimal/_decimal.c']
depends = ['_decimal/docstrings.h']
else:
srcdir = sysconfig.get_config_var('srcdir')
include_dirs = [os.path.abspath(os.path.join(srcdir,
'Modules',
'_decimal',
'libmpdec'))]
libraries = []
sources = [
'_decimal/_decimal.c',
'_decimal/libmpdec/basearith.c',
'_decimal/libmpdec/constants.c',
'_decimal/libmpdec/context.c',
'_decimal/libmpdec/convolute.c',
'_decimal/libmpdec/crt.c',
'_decimal/libmpdec/difradix2.c',
'_decimal/libmpdec/fnt.c',
'_decimal/libmpdec/fourstep.c',
'_decimal/libmpdec/io.c',
'_decimal/libmpdec/memory.c',
'_decimal/libmpdec/mpdecimal.c',
'_decimal/libmpdec/numbertheory.c',
'_decimal/libmpdec/sixstep.c',
'_decimal/libmpdec/transpose.c',
]
depends = [
'_decimal/docstrings.h',
'_decimal/libmpdec/basearith.h',
'_decimal/libmpdec/bits.h',
'_decimal/libmpdec/constants.h',
'_decimal/libmpdec/convolute.h',
'_decimal/libmpdec/crt.h',
'_decimal/libmpdec/difradix2.h',
'_decimal/libmpdec/fnt.h',
'_decimal/libmpdec/fourstep.h',
'_decimal/libmpdec/io.h',
'_decimal/libmpdec/mpalloc.h',
'_decimal/libmpdec/mpdecimal.h',
'_decimal/libmpdec/numbertheory.h',
'_decimal/libmpdec/sixstep.h',
'_decimal/libmpdec/transpose.h',
'_decimal/libmpdec/typearith.h',
'_decimal/libmpdec/umodarith.h',
]
config = {
'x64': [('CONFIG_64','1'), ('ASM','1')],
'uint128': [('CONFIG_64','1'), ('ANSI','1'), ('HAVE_UINT128_T','1')],
'ansi64': [('CONFIG_64','1'), ('ANSI','1')],
'ppro': [('CONFIG_32','1'), ('PPRO','1'), ('ASM','1')],
'ansi32': [('CONFIG_32','1'), ('ANSI','1')],
'ansi-legacy': [('CONFIG_32','1'), ('ANSI','1'),
('LEGACY_COMPILER','1')],
'universal': [('UNIVERSAL','1')]
}
cc = sysconfig.get_config_var('CC')
sizeof_size_t = sysconfig.get_config_var('SIZEOF_SIZE_T')
machine = os.environ.get('PYTHON_DECIMAL_WITH_MACHINE')
if machine:
# Override automatic configuration to facilitate testing.
define_macros = config[machine]
elif host_platform == 'darwin':
# Universal here means: build with the same options Python
# was built with.
define_macros = config['universal']
elif sizeof_size_t == 8:
if sysconfig.get_config_var('HAVE_GCC_ASM_FOR_X64'):
define_macros = config['x64']
elif sysconfig.get_config_var('HAVE_GCC_UINT128_T'):
define_macros = config['uint128']
else:
define_macros = config['ansi64']
elif sizeof_size_t == 4:
ppro = sysconfig.get_config_var('HAVE_GCC_ASM_FOR_X87')
if ppro and ('gcc' in cc or 'clang' in cc) and \
not 'sunos' in host_platform:
# solaris: problems with register allocation.
# icc >= 11.0 works as well.
define_macros = config['ppro']
extra_compile_args.append('-Wno-unknown-pragmas')
else:
define_macros = config['ansi32']
else:
raise DistutilsError("_decimal: unsupported architecture")
# Workarounds for toolchain bugs:
if sysconfig.get_config_var('HAVE_IPA_PURE_CONST_BUG'):
# Some versions of gcc miscompile inline asm:
# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=46491
# http://gcc.gnu.org/ml/gcc/2010-11/msg00366.html
extra_compile_args.append('-fno-ipa-pure-const')
if sysconfig.get_config_var('HAVE_GLIBC_MEMMOVE_BUG'):
# _FORTIFY_SOURCE wrappers for memmove and bcopy are incorrect:
# http://sourceware.org/ml/libc-alpha/2010-12/msg00009.html
undef_macros.append('_FORTIFY_SOURCE')
# Faster version without thread local contexts:
if not sysconfig.get_config_var('WITH_THREAD'):
define_macros.append(('WITHOUT_THREADS', 1))
# Uncomment for extra functionality:
#define_macros.append(('EXTRA_FUNCTIONALITY', 1))
ext = Extension (
'_decimal',
include_dirs=include_dirs,
libraries=libraries,
define_macros=define_macros,
undef_macros=undef_macros,
extra_compile_args=extra_compile_args,
sources=sources,
depends=depends
)
return ext
class PyBuildInstall(install):
# Suppress the warning about installation into the lib_dynload
# directory, which is not in sys.path when running Python during
# installation:
def initialize_options (self):
install.initialize_options(self)
self.warn_dir=0
# Customize subcommands to not install an egg-info file for Python
sub_commands = [('install_lib', install.has_lib),
('install_headers', install.has_headers),
('install_scripts', install.has_scripts),
('install_data', install.has_data)]
class PyBuildInstallLib(install_lib):
# Do exactly what install_lib does but make sure correct access modes get
# set on installed directories and files. All installed files with get
# mode 644 unless they are a shared library in which case they will get
# mode 755. All installed directories will get mode 755.
# this is works for EXT_SUFFIX too, which ends with SHLIB_SUFFIX
shlib_suffix = sysconfig.get_config_var("SHLIB_SUFFIX")
def install(self):
outfiles = install_lib.install(self)
self.set_file_modes(outfiles, 0o644, 0o755)
self.set_dir_modes(self.install_dir, 0o755)
return outfiles
def set_file_modes(self, files, defaultMode, sharedLibMode):
if not self.is_chmod_supported(): return
if not files: return
for filename in files:
if os.path.islink(filename): continue
mode = defaultMode
if filename.endswith(self.shlib_suffix): mode = sharedLibMode
log.info("changing mode of %s to %o", filename, mode)
if not self.dry_run: os.chmod(filename, mode)
def set_dir_modes(self, dirname, mode):
if not self.is_chmod_supported(): return
for dirpath, dirnames, fnames in os.walk(dirname):
if os.path.islink(dirpath):
continue
log.info("changing mode of %s to %o", dirpath, mode)
if not self.dry_run: os.chmod(dirpath, mode)
def is_chmod_supported(self):
return hasattr(os, 'chmod')
class PyBuildScripts(build_scripts):
def copy_scripts(self):
outfiles, updated_files = build_scripts.copy_scripts(self)
fullversion = '-{0[0]}.{0[1]}'.format(sys.version_info)
minoronly = '.{0[1]}'.format(sys.version_info)
newoutfiles = []
newupdated_files = []
for filename in outfiles:
if filename.endswith(('2to3', 'pyvenv')):
newfilename = filename + fullversion
else:
newfilename = filename + minoronly
log.info('renaming %s to %s', filename, newfilename)
os.rename(filename, newfilename)
newoutfiles.append(newfilename)
if filename in updated_files:
newupdated_files.append(newfilename)
return newoutfiles, newupdated_files
SUMMARY = """
Python is an interpreted, interactive, object-oriented programming
language. It is often compared to Tcl, Perl, Scheme or Java.
Python combines remarkable power with very clear syntax. It has
modules, classes, exceptions, very high level dynamic data types, and
dynamic typing. There are interfaces to many system calls and
libraries, as well as to various windowing systems (X11, Motif, Tk,
Mac, MFC). New built-in modules are easily written in C or C++. Python
is also usable as an extension language for applications that need a
programmable interface.
The Python implementation is portable: it runs on many brands of UNIX,
on Windows, DOS, Mac, Amiga... If your favorite system isn't
listed here, it may still be supported, if there's a C compiler for
it. Ask around on comp.lang.python -- or just try compiling Python
yourself.
"""
CLASSIFIERS = """
Development Status :: 6 - Mature
License :: OSI Approved :: Python Software Foundation License
Natural Language :: English
Programming Language :: C
Programming Language :: Python
Topic :: Software Development
"""
def main():
# turn off warnings when deprecated modules are imported
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
setup(# PyPI Metadata (PEP 301)
name = "Python",
version = sys.version.split()[0],
url = "http://www.python.org/%d.%d" % sys.version_info[:2],
maintainer = "Guido van Rossum and the Python community",
maintainer_email = "python-dev@python.org",
description = "A high-level object-oriented programming language",
long_description = SUMMARY.strip(),
license = "PSF license",
classifiers = [x for x in CLASSIFIERS.split("\n") if x],
platforms = ["Many"],
# Build info
cmdclass = {'build_ext': PyBuildExt,
'build_scripts': PyBuildScripts,
'install': PyBuildInstall,
'install_lib': PyBuildInstallLib},
# The struct module is defined here, because build_ext won't be
# called unless there's at least one extension module defined.
ext_modules=[Extension('_struct', ['_struct.c'])],
# If you change the scripts installed here, you also need to
# check the PyBuildScripts command above, and change the links
# created by the bininstall target in Makefile.pre.in
scripts = ["Tools/scripts/pydoc3", "Tools/scripts/idle3",
"Tools/scripts/2to3", "Tools/scripts/pyvenv"]
)
# --install-platlib
if __name__ == '__main__':
main()
|
anbangleo/NlsdeWeb
|
Python-3.6.0/setup.py
|
Python
|
mit
| 101,041
|
[
"VisIt"
] |
71ffa5a25db9d6e45a08f5356f778e595f62bf001cccd26bc587364a97144a1f
|
#
# Copyright (c) 2014 ThoughtWorks, Inc.
#
# Pixelated is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pixelated is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Pixelated. If not, see <http://www.gnu.org/licenses/>.
import time
from behave import *
from steps import login_url, logout_url, signup_url
from common import *
from ..page_objects import ControlPanelPage
from ..page_objects import LoginPage
from ..page_objects import SignUpPage
from ..page_objects import TagList
@when(u'I visit the user-agent')
def step_impl(context):
context.browser.get(login_url())
@then(u'I should see a login button')
def step_impl(context):
context.browser.find_element_by_css_selector('button[type=submit]')
@given(u'I\'m logged in')
@when(u'I login')
def step_impl(context):
context.browser.get(login_url())
login_page = LoginPage(context)
login_page.enter_username(context.random_user.username).enter_password(context.random_user.password).login()
login_page.wait_interstitial_page()
@then(u'I see the inbox')
def step_impl(context):
# phantomjs can not deal with the interstitial. We need to load the
# website manually after the user-agent has started
time.sleep(30)
taglist = TagList(context)
taglist.is_pixelated_loaded()
@when(u'I logout')
def step_impl(context):
logout_button = context.browser.find_element_by_css_selector('ul#logout')
logout_button.click()
@when(u'I visit the signup-page')
def step_impl(context):
context.browser.get(signup_url())
@then(u'I should see a signup button')
def step_impl(context):
context.browser.find_element_by_name('button')
@when(u'I register')
def step_impl(context):
signup_page = SignUpPage(context)
signup_page.enter_username(context.random_user.username)
signup_page.enter_password(context.random_user.password)
signup_page.enter_password_confirmation(context.random_user.password)
signup_page.enter_invite_code(get_invite_code())
signup_page.click_signup_button()
@then(u'I see the control-panel')
def step_impl(context):
controlpanel_page = ControlPanelPage(context)
controlpanel_page.is_control_panel_home()
|
pixelated/puppet-pixelated
|
files/functional-tests/steps/account.py
|
Python
|
agpl-3.0
| 2,635
|
[
"VisIt"
] |
51c4e2300dccd8096191249ad2f7f18d96bda423bcec54f20fda8f03711bc55d
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2011, 2012 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" Bibauthorid Web Interface Logic and URL handler. """
# pylint: disable=W0105
# pylint: disable=C0301
# pylint: disable=W0613
from cgi import escape
from pprint import pformat
from operator import itemgetter
import re
try:
from invenio.jsonutils import json, json_unicode_to_utf8, CFG_JSON_AVAILABLE
except ImportError:
CFG_JSON_AVAILABLE = False
json = None
from invenio.bibauthorid_webapi import add_cname_to_hepname_record
from invenio.bibauthorid_webapi import create_new_person
from invenio.config import CFG_SITE_URL, CFG_BASE_URL
from invenio.bibauthorid_config import AID_ENABLED, PERSON_SEARCH_RESULTS_SHOW_PAPERS_PERSON_LIMIT, \
BIBAUTHORID_UI_SKIP_ARXIV_STUB_PAGE, VALID_EXPORT_FILTERS, PERSONS_PER_PAGE, \
MAX_NUM_SHOW_PAPERS, BIBAUTHORID_CFG_SITE_NAME, CFG_BIBAUTHORID_ENABLED
from invenio.config import CFG_SITE_LANG, CFG_SITE_URL, CFG_INSPIRE_SITE, CFG_SITE_SECURE_URL
from invenio.bibauthorid_name_utils import most_relevant_name, clean_string
from invenio.webpage import page, pageheaderonly, pagefooteronly
from invenio.messages import gettext_set_language # , wash_language
from invenio.template import load
from invenio.webinterface_handler import wash_urlargd, WebInterfaceDirectory
from invenio.session import get_session
from invenio.urlutils import redirect_to_url, get_canonical_and_alternates_urls
from invenio.webuser import (getUid,
page_not_authorized,
collect_user_info,
set_user_preferences,
get_user_preferences,
email_valid_p,
emailUnique,
get_email_from_username,
get_uid_from_email,
isGuestUser)
from invenio.access_control_admin import acc_get_user_roles
from invenio.search_engine import perform_request_search
from invenio.search_engine_utils import get_fieldvalues
from invenio.bibauthorid_config import CREATE_NEW_PERSON
from invenio.bibsched import bibsched_task_finished_successfully, \
bibsched_task_finished_with_error, bibsched_task_running, bibsched_task_waiting, \
UnknownBibschedStatus
import invenio.webinterface_handler_config as apache
import invenio.webauthorprofile_interface as webauthorapi
import invenio.bibauthorid_webapi as webapi
from invenio.bibauthorid_general_utils import get_title_of_doi, get_title_of_arxiv_pubid, is_valid_orcid
from invenio.bibauthorid_backinterface import update_external_ids_of_authors, get_orcid_id_of_author, \
get_validated_request_tickets_for_author, get_title_of_paper, get_claimed_papers_of_author, \
get_free_author_id
from invenio.bibauthorid_dbinterface import defaultdict, remove_arxiv_papers_of_author, \
get_author_by_canonical_name, get_token, set_token, remove_rtid_from_ticket
from invenio.orcidutils import get_dois_from_orcid, get_dois_from_orcid_using_pid
from invenio.bibauthorid_webauthorprofileinterface import is_valid_canonical_id, get_person_id_from_canonical_id, \
get_person_redirect_link, author_has_papers
from invenio.bibauthorid_templates import WebProfileMenu, WebProfilePage
from invenio.bibauthorid_general_utils import get_inspire_record_url
from invenio.bibcatalog import BIBCATALOG_SYSTEM
# Imports related to hepnames update form
from invenio.bibedit_utils import get_bibrecord
from invenio.bibrecord import record_get_field_value, record_get_field_values, \
record_get_field_instances, field_get_subfield_values
from invenio.bibauthorid_name_utils import split_name_parts
from invenio.orcidutils import push_orcid_papers
TEMPLATE = load('bibauthorid')
class WebInterfaceBibAuthorIDClaimPages(WebInterfaceDirectory):
'''
Handles /author/claim pages and AJAX requests.
Supplies the methods:
/author/claim/<string>
/author/claim/action
/author/claim/claimstub
/author/claim/export
/author/claim/merge_profiles_ajax
/author/claim/search_box_ajax
/author/claim/tickets_admin
/author/claim/search
'''
_exports = ['',
'action',
'claimstub',
'export',
'merge_profiles_ajax',
'search_box_ajax',
'tickets_admin'
]
def _lookup(self, component, path):
'''
This handler parses dynamic URLs:
- /author/profile/1332 shows the page of author with id: 1332
- /author/profile/100:5522,1431 shows the page of the author
identified by the bibrefrec: '100:5522,1431'
'''
if not component in self._exports:
return WebInterfaceBibAuthorIDClaimPages(component), path
def _is_profile_owner(self, pid):
return self.person_id == int(pid)
def _is_admin(self, pinfo):
return pinfo['ulevel'] == 'admin'
def __init__(self, identifier=None):
'''
Constructor of the web interface.
@param identifier: identifier of an author. Can be one of:
- an author id: e.g. "14"
- a canonical id: e.g. "J.R.Ellis.1"
- a bibrefrec: e.g. "100:1442,155"
@type identifier: str
'''
self.person_id = -1 # -1 is a non valid author identifier
if identifier is None or not isinstance(identifier, str):
return
# check if it's a canonical id: e.g. "J.R.Ellis.1"
pid = int(webapi.get_person_id_from_canonical_id(identifier))
if pid >= 0:
self.person_id = pid
return
# check if it's an author id: e.g. "14"
try:
self.person_id = int(identifier)
return
except ValueError:
pass
# check if it's a bibrefrec: e.g. "100:1442,155"
if webapi.is_valid_bibref(identifier):
pid = int(webapi.get_person_id_from_paper(identifier))
if pid >= 0:
self.person_id = pid
return
def __call__(self, req, form):
'''
Serve the main person page.
Will use the object's person id to get a person's information.
@param req: apache request object
@type req: apache request object
@param form: POST/GET variables of the request
@type form: dict
@return: a full page formatted in HTML
@rtype: str
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ulevel = pinfo['ulevel']
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'open_claim': (str, None),
'ticketid': (int, -1),
'verbose': (int, 0)})
debug = "verbose" in argd and argd["verbose"] > 0
ln = argd['ln']
req.argd = argd # needed for perform_req_search
if self.person_id < 0:
return redirect_to_url(req, '%s/author/search' % (CFG_SITE_URL))
no_access = self._page_access_permission_wall(req, [self.person_id])
if no_access:
return no_access
pinfo['claim_in_process'] = True
user_info = collect_user_info(req)
user_info['precached_viewclaimlink'] = pinfo['claim_in_process']
session.dirty = True
if self.person_id != -1:
pinfo['claimpaper_admin_last_viewed_pid'] = self.person_id
rt_ticket_id = argd['ticketid']
if rt_ticket_id != -1:
pinfo["admin_requested_ticket_id"] = rt_ticket_id
session.dirty = True
# Create menu and page using templates
cname = webapi.get_canonical_id_from_person_id(self.person_id)
menu = WebProfileMenu(str(cname), "claim", ln, self._is_profile_owner(pinfo['pid']), self._is_admin(pinfo))
profile_page = WebProfilePage("claim", webapi.get_longest_name_from_pid(self.person_id))
profile_page.add_profile_menu(menu)
full_name = webapi.get_longest_name_from_pid(self.person_id)
page_title = '%s - Publications Management' % full_name
guest_prompt = 'true'
if not CFG_INSPIRE_SITE:
guest_prompt = 'false'
if 'prompt_shown' not in session:
session['prompt_shown'] = False
if session['prompt_shown']:
guest_prompt = 'false'
else:
session['prompt_shown'] = True
session.dirty = True
profile_page.add_bootstrapped_data(json.dumps({
"backbone": """
(function(ticketbox) {
var app = ticketbox.app;
app.userops.set(%s);
app.bodyModel.set({userLevel: "%s", guestPrompt: %s});
})(ticketbox);""" % (WebInterfaceAuthorTicketHandling.bootstrap_status(pinfo, "user"), ulevel, guest_prompt)
}))
if debug:
profile_page.add_debug_info(session)
# body = self._generate_optional_menu(ulevel, req, form)
content = self._generate_tabs(ulevel, req)
content += self._generate_footer(ulevel)
content = content.decode('utf-8', 'strict')
webapi.history_log_visit(req, 'claim', pid=self.person_id)
return page(title=page_title,
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=profile_page.get_wrapped_body("generic", {'html': content}).encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def _page_access_permission_wall(self, req, req_pid=None, req_level=None):
'''
Display an error page if user not authorized to use the interface.
@param req: Apache Request Object for session management
@type req: Apache Request Object
@param req_pid: Requested person id
@type req_pid: int
@param req_level: Request level required for the page
@type req_level: string
'''
session = get_session(req)
uid = getUid(req)
pinfo = session["personinfo"]
uinfo = collect_user_info(req)
if 'ln' in pinfo:
ln = pinfo["ln"]
else:
ln = CFG_SITE_LANG
_ = gettext_set_language(ln)
is_authorized = True
pids_to_check = []
if not AID_ENABLED:
return page_not_authorized(req, text=_("Fatal: Author ID capabilities are disabled on this system."))
if req_level and 'ulevel' in pinfo and pinfo["ulevel"] != req_level:
return page_not_authorized(req, text=_("Fatal: You are not allowed to access this functionality."))
if req_pid and not isinstance(req_pid, list):
pids_to_check = [req_pid]
elif req_pid and isinstance(req_pid, list):
pids_to_check = req_pid
if (not (uinfo['precached_usepaperclaim']
or uinfo['precached_usepaperattribution'])
and 'ulevel' in pinfo
and not pinfo["ulevel"] == "admin"):
is_authorized = False
if is_authorized and not webapi.user_can_view_CMP(uid):
is_authorized = False
if is_authorized and 'ticket' in pinfo:
for tic in pinfo["ticket"]:
if 'pid' in tic:
pids_to_check.append(tic['pid'])
if pids_to_check and is_authorized:
user_pid = webapi.get_pid_from_uid(uid)
if not uinfo['precached_usepaperattribution']:
if (not user_pid in pids_to_check
and 'ulevel' in pinfo
and not pinfo["ulevel"] == "admin"):
is_authorized = False
elif (user_pid in pids_to_check
and 'ulevel' in pinfo
and not pinfo["ulevel"] == "admin"):
for tic in list(pinfo["ticket"]):
if not tic["pid"] == user_pid:
pinfo['ticket'].remove(tic)
if not is_authorized:
return page_not_authorized(req, text=_("Fatal: You are not allowed to access this functionality."))
else:
return ""
def _generate_title(self, ulevel):
'''
Generates the title for the specified user permission level.
@param ulevel: user permission level
@type ulevel: str
@return: title
@rtype: str
'''
def generate_title_guest():
title = 'Assign papers'
if self.person_id:
title = 'Assign papers for: ' + str(webapi.get_person_redirect_link(self.person_id))
return title
def generate_title_user():
title = 'Assign papers'
if self.person_id:
title = 'Assign papers (user interface) for: ' + str(webapi.get_person_redirect_link(self.person_id))
return title
def generate_title_admin():
title = 'Assign papers'
if self.person_id:
title = 'Assign papers (administrator interface) for: ' + str(
webapi.get_person_redirect_link(self.person_id))
return title
generate_title = {'guest': generate_title_guest,
'user': generate_title_user,
'admin': generate_title_admin}
return generate_title[ulevel]()
def _generate_tabs(self, ulevel, req):
'''
Generates the tabs content for the specified user permission level.
@param ulevel: user permission level
@type ulevel: str
@param req: apache request object
@type req: apache request object
@return: tabs content
@rtype: str
'''
from invenio.bibauthorid_templates import verbiage_dict as tmpl_verbiage_dict
from invenio.bibauthorid_templates import buttons_verbiage_dict as tmpl_buttons_verbiage_dict
def generate_tabs_guest(req):
links = list() # ['delete', 'commit','del_entry','commit_entry']
tabs = ['records', 'repealed', 'review']
return generate_tabs_admin(req, show_tabs=tabs, ticket_links=links,
open_tickets=list(),
verbiage_dict=tmpl_verbiage_dict['guest'],
buttons_verbiage_dict=tmpl_buttons_verbiage_dict['guest'],
show_reset_button=False)
def generate_tabs_user(req):
links = ['delete', 'del_entry']
tabs = ['records', 'repealed', 'review', 'tickets']
session = get_session(req)
pinfo = session['personinfo']
uid = getUid(req)
user_is_owner = 'not_owner'
if pinfo["claimpaper_admin_last_viewed_pid"] == webapi.get_pid_from_uid(uid):
user_is_owner = 'owner'
open_tickets = webapi.get_person_request_ticket(self.person_id)
tickets = list()
for t in open_tickets:
owns = False
for row in t[0]:
if row[0] == 'uid-ip' and row[1].split('||')[0] == str(uid):
owns = True
if owns:
tickets.append(t)
return generate_tabs_admin(req, show_tabs=tabs, ticket_links=links,
open_tickets=tickets,
verbiage_dict=tmpl_verbiage_dict['user'][user_is_owner],
buttons_verbiage_dict=tmpl_buttons_verbiage_dict['user'][user_is_owner])
def generate_tabs_admin(req, show_tabs=['records', 'repealed', 'review', 'comments', 'tickets', 'data'],
ticket_links=['delete', 'commit', 'del_entry', 'commit_entry'], open_tickets=None,
verbiage_dict=None, buttons_verbiage_dict=None, show_reset_button=True):
session = get_session(req)
personinfo = dict()
try:
personinfo = session["personinfo"]
except KeyError:
return ""
if 'ln' in personinfo:
ln = personinfo["ln"]
else:
ln = CFG_SITE_LANG
all_papers = webapi.get_papers_by_person_id(self.person_id, ext_out=True)
records = [{'recid': paper[0],
'bibref': paper[1],
'flag': paper[2],
'authorname': paper[3],
'authoraffiliation': paper[4],
'paperdate': paper[5],
'rt_status': paper[6],
'paperexperiment': paper[7]} for paper in all_papers]
rejected_papers = [row for row in records if row['flag'] < -1]
rest_of_papers = [row for row in records if row['flag'] >= -1]
review_needed = webapi.get_review_needing_records(self.person_id)
if len(review_needed) < 1:
if 'review' in show_tabs:
show_tabs.remove('review')
if open_tickets is None:
open_tickets = webapi.get_person_request_ticket(self.person_id)
else:
if len(open_tickets) < 1 and 'tickets' in show_tabs:
show_tabs.remove('tickets')
rt_tickets = None
if "admin_requested_ticket_id" in personinfo:
rt_tickets = personinfo["admin_requested_ticket_id"]
if verbiage_dict is None:
verbiage_dict = translate_dict_values(tmpl_verbiage_dict['admin'], ln)
if buttons_verbiage_dict is None:
buttons_verbiage_dict = translate_dict_values(tmpl_buttons_verbiage_dict['admin'], ln)
# send data to the template function
tabs = TEMPLATE.tmpl_admin_tabs(ln, person_id=self.person_id,
rejected_papers=rejected_papers,
rest_of_papers=rest_of_papers,
review_needed=review_needed,
rt_tickets=rt_tickets,
open_rt_tickets=open_tickets,
show_tabs=show_tabs,
ticket_links=ticket_links,
verbiage_dict=verbiage_dict,
buttons_verbiage_dict=buttons_verbiage_dict,
show_reset_button=show_reset_button)
return tabs
def translate_dict_values(dictionary, ln):
def translate_str_values(dictionary, f=lambda x: x):
translated_dict = dict()
for key, value in dictionary.iteritems():
if isinstance(value, str):
translated_dict[key] = f(value)
elif isinstance(value, dict):
translated_dict[key] = translate_str_values(value, f)
else:
raise TypeError("Value should be either string or dictionary.")
return translated_dict
return translate_str_values(dictionary, f=gettext_set_language(ln))
generate_tabs = {'guest': generate_tabs_guest,
'user': generate_tabs_user,
'admin': generate_tabs_admin}
return generate_tabs[ulevel](req)
def _generate_footer(self, ulevel):
'''
Generates the footer for the specified user permission level.
@param ulevel: user permission level
@type ulevel: str
@return: footer
@rtype: str
'''
def generate_footer_guest():
return TEMPLATE.tmpl_invenio_search_box()
def generate_footer_user():
return generate_footer_guest()
def generate_footer_admin():
return generate_footer_guest()
generate_footer = {'guest': generate_footer_guest,
'user': generate_footer_user,
'admin': generate_footer_admin}
return generate_footer[ulevel]()
def _ticket_dispatch_end(self, req):
'''
The ticket dispatch is finished, redirect to the original page of
origin or to the last_viewed_pid or return to the papers autoassigned box to populate its data
'''
session = get_session(req)
pinfo = session["personinfo"]
webapi.session_bareinit(req)
if 'claim_in_process' in pinfo:
pinfo['claim_in_process'] = False
if "merge_ticket" in pinfo and pinfo['merge_ticket']:
pinfo['merge_ticket'] = []
user_info = collect_user_info(req)
user_info['precached_viewclaimlink'] = True
session.dirty = True
if "referer" in pinfo and pinfo["referer"]:
referer = pinfo["referer"]
del(pinfo["referer"])
session.dirty = True
return redirect_to_url(req, referer)
# if we are coming fromt he autoclaim box we should not redirect and just return to the caller function
if 'autoclaim' in pinfo and pinfo['autoclaim']['review_failed'] == False and pinfo['autoclaim']['begin_autoclaim'] == True:
pinfo['autoclaim']['review_failed'] = False
pinfo['autoclaim']['begin_autoclaim'] = False
session.dirty = True
else:
redirect_page = webapi.history_get_last_visited_url(
pinfo['visit_diary'], limit_to_page=['manage_profile', 'claim'])
if not redirect_page:
redirect_page = webapi.get_fallback_redirect_link(req)
if 'autoclaim' in pinfo and pinfo['autoclaim']['review_failed'] and pinfo['autoclaim']['checkout']:
redirect_page = '%s/author/claim/action?checkout=True' % (CFG_SITE_URL,)
pinfo['autoclaim']['checkout'] = False
session.dirty = True
elif not 'manage_profile' in redirect_page:
pinfo['autoclaim']['review_failed'] = False
pinfo['autoclaim']['begin_autoclaim'] == False
pinfo['autoclaim']['checkout'] = True
session.dirty = True
redirect_page = '%s/author/claim/%s?open_claim=True' % (
CFG_SITE_URL,
webapi.get_person_redirect_link(pinfo["claimpaper_admin_last_viewed_pid"]))
else:
pinfo['autoclaim']['review_failed'] = False
pinfo['autoclaim']['begin_autoclaim'] == False
pinfo['autoclaim']['checkout'] = True
session.dirty = True
return redirect_to_url(req, redirect_page)
# redirect_link = diary('get_redirect_link', caller='_ticket_dispatch_end', parameters=[('open_claim','True')])
# return redirect_to_url(req, redirect_link)
def _check_user_fields(self, req, form):
argd = wash_urlargd(
form,
{'ln': (str, CFG_SITE_LANG),
'user_first_name': (str, None),
'user_last_name': (str, None),
'user_email': (str, None),
'user_comments': (str, None)})
session = get_session(req)
pinfo = session["personinfo"]
ulevel = pinfo["ulevel"]
skip_checkout_faulty_fields = False
if ulevel in ['user', 'admin']:
skip_checkout_faulty_fields = True
if not ("user_first_name_sys" in pinfo and pinfo["user_first_name_sys"]):
if "user_first_name" in argd and argd['user_first_name']:
if not argd["user_first_name"] and not skip_checkout_faulty_fields:
pinfo["checkout_faulty_fields"].append("user_first_name")
else:
pinfo["user_first_name"] = escape(argd["user_first_name"])
if not ("user_last_name_sys" in pinfo and pinfo["user_last_name_sys"]):
if "user_last_name" in argd and argd['user_last_name']:
if not argd["user_last_name"] and not skip_checkout_faulty_fields:
pinfo["checkout_faulty_fields"].append("user_last_name")
else:
pinfo["user_last_name"] = escape(argd["user_last_name"])
if not ("user_email_sys" in pinfo and pinfo["user_email_sys"]):
if "user_email" in argd and argd['user_email']:
if not email_valid_p(argd["user_email"]):
pinfo["checkout_faulty_fields"].append("user_email")
else:
pinfo["user_email"] = escape(argd["user_email"])
if (ulevel == "guest"
and emailUnique(argd["user_email"]) > 0):
pinfo["checkout_faulty_fields"].append("user_email_taken")
else:
pinfo["checkout_faulty_fields"].append("user_email")
if "user_comments" in argd:
if argd["user_comments"]:
pinfo["user_ticket_comments"] = escape(argd["user_comments"])
else:
pinfo["user_ticket_comments"] = ""
session.dirty = True
def action(self, req, form):
'''
Initial step in processing of requests: ticket generation/update.
Also acts as action dispatcher for interface mass action requests.
Valid mass actions are:
- add_external_id: add an external identifier to an author
- add_missing_external_ids: add missing external identifiers of an author
- bibref_check_submit:
- cancel: clean the session (erase tickets and so on)
- cancel_rt_ticket:
- cancel_search_ticket:
- cancel_stage:
- checkout:
- checkout_continue_claiming:
- checkout_remove_transaction:
- checkout_submit:
- claim: claim papers for an author
- commit_rt_ticket:
- confirm: confirm assignments to an author
- delete_external_ids: delete external identifiers of an author
- repeal: repeal assignments from an author
- reset: reset assignments of an author
- set_canonical_name: set/swap the canonical name of an author
- to_other_person: assign a document from an author to another author
@param req: apache request object
@type req: apache request object
@param form: parameters sent via GET or POST request
@type form: dict
@return: a full page formatted in HTML
@return: str
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session["personinfo"]
argd = wash_urlargd(form,
{'autoclaim_show_review': (str, None),
'canonical_name': (str, None),
'existing_ext_ids': (list, None),
'ext_id': (str, None),
'uid': (int, None),
'ext_system': (str, None),
'ln': (str, CFG_SITE_LANG),
'pid': (int, -1),
'primary_profile': (str, None),
'search_param': (str, None),
'rt_action': (str, None),
'rt_id': (int, None),
'selection': (list, None),
'rtid': (int, None),
# permitted actions
'add_external_id': (str, None),
'set_uid': (str, None),
'add_missing_external_ids': (str, None),
'associate_profile': (str, None),
'bibref_check_submit': (str, None),
'cancel': (str, None),
'cancel_merging': (str, None),
'cancel_rt_ticket': (str, None),
'cancel_search_ticket': (str, None),
'cancel_stage': (str, None),
'checkout': (str, None),
'checkout_continue_claiming': (str, None),
'checkout_remove_transaction': (str, None),
'checkout_submit': (str, None),
'assign': (str, None),
'commit_rt_ticket': (str, None),
'close_rt_ticket': (str, None),
'confirm': (str, None),
'delete_external_ids': (str, None),
'email': (str, None),
'merge': (str, None),
'reject': (str, None),
'repeal': (str, None),
'reset': (str, None),
'send_message': (str, None),
'set_canonical_name': (str, None),
'to_other_person': (str, None)})
ulevel = pinfo["ulevel"]
ticket = pinfo["ticket"]
uid = getUid(req)
ln = argd['ln']
action = None
permitted_actions = ['add_external_id',
'set_uid',
'add_missing_external_ids',
'associate_profile',
'bibref_check_submit',
'cancel',
'cancel_merging',
'cancel_rt_ticket',
'cancel_search_ticket',
'cancel_stage',
'checkout',
'checkout_continue_claiming',
'checkout_remove_transaction',
'checkout_submit',
'assign',
'close_rt_ticket',
'commit_rt_ticket',
'confirm',
'delete_external_ids',
'merge',
'reject',
'repeal',
'reset',
'send_message',
'set_canonical_name',
'to_other_person']
for act in permitted_actions:
# one action (the most) is enabled in the form
if argd[act] is not None:
action = act
no_access = self._page_access_permission_wall(req, None)
if no_access and action not in ["assign"]:
return no_access
# incomplete papers (incomplete paper info or other problems) trigger action function without user's interference
# in order to fix those problems and claim papers or remove them from the ticket
if (action is None
and "bibref_check_required" in pinfo
and pinfo["bibref_check_required"]):
if "bibref_check_reviewed_bibrefs" in pinfo:
del(pinfo["bibref_check_reviewed_bibrefs"])
session.dirty = True
def add_external_id():
'''
associates the user with pid to the external id ext_id
'''
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot add external id to unknown person")
if argd['ext_system']:
ext_sys = argd['ext_system']
else:
return self._error_page(req, ln,
"Fatal: cannot add an external id without specifying the system")
if argd['ext_id']:
ext_id = argd['ext_id']
else:
return self._error_page(req, ln,
"Fatal: cannot add a custom external id without a suggestion")
userinfo = "%s||%s" % (uid, req.remote_ip)
webapi.add_person_external_id(pid, ext_sys, ext_id, userinfo)
return redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def set_uid():
'''
associates the user with pid to the external id ext_id
'''
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: current user is unknown")
if argd['uid'] is not None:
dest_uid = int(argd['uid'])
else:
return self._error_page(req, ln,
"Fatal: user id is not valid")
userinfo = "%s||%s" % (uid, req.remote_ip)
webapi.set_person_uid(pid, dest_uid, userinfo)
# remove arxiv pubs of current pid
remove_arxiv_papers_of_author(pid)
dest_uid_pid = webapi.get_pid_from_uid(dest_uid)
if dest_uid_pid > -1:
# move the arxiv pubs of the dest_uid to the current pid
dest_uid_arxiv_papers = webapi.get_arxiv_papers_of_author(dest_uid_pid)
webapi.add_arxiv_papers_to_author(dest_uid_arxiv_papers, pid)
return redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def add_missing_external_ids():
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot recompute external ids for an unknown person")
update_external_ids_of_authors([pid], overwrite=False)
return redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def associate_profile():
'''
associates the user with user id to the person profile with pid
'''
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot associate profile without a person id.")
uid = getUid(req)
pid, profile_claimed = webapi.claim_profile(uid, pid)
redirect_pid = pid
if profile_claimed:
pinfo['pid'] = pid
pinfo['should_check_to_autoclaim'] = True
pinfo["login_info_message"] = "confirm_success"
session.dirty = True
redirect_to_url(req, '%s/author/manage_profile/%s' % (CFG_SITE_URL, redirect_pid))
# if someone have already claimed this profile it redirects to choose_profile with an error message
else:
param = ''
if 'search_param' in argd and argd['search_param']:
param = '&search_param=' + argd['search_param']
redirect_to_url(req, '%s/author/choose_profile?failed=%s%s' % (CFG_SITE_URL, True, param))
def bibref_check_submit():
pinfo["bibref_check_reviewed_bibrefs"] = list()
add_rev = pinfo["bibref_check_reviewed_bibrefs"].append
if ("bibrefs_auto_assigned" in pinfo
or "bibrefs_to_confirm" in pinfo):
person_reviews = list()
if ("bibrefs_auto_assigned" in pinfo
and pinfo["bibrefs_auto_assigned"]):
person_reviews.append(pinfo["bibrefs_auto_assigned"])
if ("bibrefs_to_confirm" in pinfo
and pinfo["bibrefs_to_confirm"]):
person_reviews.append(pinfo["bibrefs_to_confirm"])
for ref_review in person_reviews:
for person_id in ref_review:
for bibrec in ref_review[person_id]["bibrecs"]:
rec_grp = "bibrecgroup%s" % bibrec
elements = list()
if rec_grp in form:
if isinstance(form[rec_grp], str):
elements.append(form[rec_grp])
elif isinstance(form[rec_grp], list):
elements += form[rec_grp]
else:
continue
for element in elements:
test = element.split("||")
if test and len(test) > 1 and test[1]:
tref = test[1] + "," + str(bibrec)
tpid = webapi.wash_integer_id(test[0])
if (webapi.is_valid_bibref(tref)
and tpid > -1):
add_rev(element + "," + str(bibrec))
session.dirty = True
def cancel():
self.__session_cleanup(req)
return self._ticket_dispatch_end(req)
def cancel_merging():
'''
empties the session out of merge content and redirects to the manage profile page
that the user was viewing before the merge
'''
if argd['primary_profile']:
primary_cname = argd['primary_profile']
else:
return self._error_page(req, ln,
"Fatal: Couldn't redirect to the previous page")
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
if pinfo['merge_profiles']:
pinfo['merge_profiles'] = list()
session.dirty = True
redirect_url = "%s/author/manage_profile/%s" % (CFG_SITE_URL, primary_cname)
return redirect_to_url(req, redirect_url)
def cancel_rt_ticket():
if argd['selection'] is not None:
bibrefrecs = argd['selection']
else:
return self._error_page(req, ln,
"Fatal: cannot cancel unknown ticket")
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln, "Fatal: cannot cancel unknown ticket")
if argd['rt_id'] is not None and argd['rt_action'] is not None:
rt_id = int(argd['rt_id'])
rt_action = argd['rt_action']
for bibrefrec in bibrefrecs:
webapi.delete_transaction_from_request_ticket(pid, rt_id, rt_action, bibrefrec)
else:
rt_id = int(bibrefrecs[0])
webapi.delete_request_ticket(pid, rt_id)
return redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, pid))
def cancel_search_ticket(without_return=False):
if 'search_ticket' in pinfo:
del(pinfo['search_ticket'])
session.dirty = True
if "claimpaper_admin_last_viewed_pid" in pinfo:
pid = pinfo["claimpaper_admin_last_viewed_pid"]
if not without_return:
return redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
if not without_return:
return self.search(req, form)
def cancel_stage():
if 'bibref_check_required' in pinfo:
del(pinfo['bibref_check_required'])
if 'bibrefs_auto_assigned' in pinfo:
del(pinfo['bibrefs_auto_assigned'])
if 'bibrefs_to_confirm' in pinfo:
del(pinfo['bibrefs_to_confirm'])
for tt in [row for row in ticket if 'incomplete' in row]:
ticket.remove(tt)
session.dirty = True
return self._ticket_dispatch_end(req)
def checkout():
pass
# return self._ticket_final_review(req)
def checkout_continue_claiming():
pinfo["checkout_faulty_fields"] = list()
self._check_user_fields(req, form)
return self._ticket_dispatch_end(req)
def checkout_remove_transaction():
bibref = argd['checkout_remove_transaction']
if webapi.is_valid_bibref(bibref):
for rmt in [row for row in ticket if row["bibref"] == bibref]:
ticket.remove(rmt)
pinfo["checkout_confirmed"] = False
session.dirty = True
# return self._ticket_final_review(req)
def checkout_submit():
pinfo["checkout_faulty_fields"] = list()
self._check_user_fields(req, form)
if not ticket:
pinfo["checkout_faulty_fields"].append("tickets")
pinfo["checkout_confirmed"] = True
if pinfo["checkout_faulty_fields"]:
pinfo["checkout_confirmed"] = False
session.dirty = True
# return self._ticket_final_review(req)
def claim():
if argd['selection'] is not None:
bibrefrecs = argd['selection']
else:
return self._error_page(req, ln,
"Fatal: cannot create ticket without any papers selected. " + \
"Please go back and select which papers would you like to claim.")
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot claim papers to an unknown person")
if action == 'assign':
claimed_recs = [paper[2] for paper in get_claimed_papers_of_author(pid)]
for bibrefrec in list(bibrefrecs):
_, rec = webapi.split_bibrefrec(bibrefrec)
if rec in claimed_recs:
bibrefrecs.remove(bibrefrec)
for bibrefrec in bibrefrecs:
operation_parts = {'pid': pid,
'action': action,
'bibrefrec': bibrefrec}
operation_to_be_added = webapi.construct_operation(operation_parts, pinfo, uid)
if operation_to_be_added is None:
continue
ticket = pinfo['ticket']
webapi.add_operation_to_ticket(operation_to_be_added, ticket)
session.dirty = True
return redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def claim_to_other_person():
if argd['selection'] is not None:
bibrefrecs = argd['selection']
else:
return self._error_page(req, ln,
"Fatal: cannot create ticket without any papers selected. " + \
"Please go back and select which papers would you like to claim.")
return self._ticket_open_assign_to_other_person(req, bibrefrecs, form)
def commit_rt_ticket():
if argd['selection'] is not None:
tid = argd['selection'][0]
else:
return self._error_page(req, ln,
"Fatal: cannot cancel unknown ticket")
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot cancel unknown ticket")
return self._commit_rt_ticket(req, tid, pid)
def confirm_repeal_reset():
if argd['pid'] > -1 or int(argd['pid']) == CREATE_NEW_PERSON:
pid = argd['pid']
cancel_search_ticket(without_return=True)
else:
return self._ticket_open_assign_to_other_person(req, argd['selection'], form)
# return self._error_page(req, ln, "Fatal: cannot create ticket without a
# person id! (crr %s)" %repr(argd))
bibrefrecs = argd['selection']
if argd['confirm']:
action = 'assign'
if pid == CREATE_NEW_PERSON:
pid = create_new_person(getUid(req))
elif argd['repeal']:
action = 'reject'
elif argd['reset']:
action = 'reset'
else:
return self._error_page(req, ln, "Fatal: not existent action!")
for bibrefrec in bibrefrecs:
form['jsondata'] = json.dumps({'pid': str(pid),
'action': action,
'bibrefrec': bibrefrec,
'on': 'user'})
t = WebInterfaceAuthorTicketHandling()
t.add_operation(req, form)
return redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def close_rt_ticket():
BIBCATALOG_SYSTEM.ticket_set_attribute(0, argd['rtid'], 'status', 'resolved')
remove_rtid_from_ticket(argd['rtid'], argd['pid'])
return redirect_to_url(req, "%s/author/claim/%s#tabTickets" % (CFG_SITE_URL, webapi.get_person_redirect_link(argd['pid'])))
def delete_external_ids():
'''
deletes association between the user with pid and the external id ext_id
'''
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot delete external ids from an unknown person")
if argd['existing_ext_ids'] is not None:
existing_ext_ids = argd['existing_ext_ids']
else:
return self._error_page(req, ln,
"Fatal: you must select at least one external id in order to delete it")
userinfo = "%s||%s" % (uid, req.remote_ip)
webapi.delete_person_external_ids(pid, existing_ext_ids, userinfo)
return redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def none_action():
return self._error_page(req, ln,
"Fatal: cannot create ticket if no action selected.")
def merge():
'''
performs a merge if allowed on the profiles that the user chose
'''
if argd['primary_profile']:
primary_cname = argd['primary_profile']
else:
return self._error_page(req, ln,
"Fatal: cannot perform a merge without a primary profile!")
if argd['selection']:
profiles_to_merge = argd['selection']
else:
return self._error_page(req, ln,
"Fatal: cannot perform a merge without any profiles selected!")
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
uid = getUid(req)
primary_pid = webapi.get_person_id_from_canonical_id(primary_cname)
pids_to_merge = [webapi.get_person_id_from_canonical_id(cname) for cname in profiles_to_merge]
is_admin = False
if pinfo['ulevel'] == 'admin':
is_admin = True
# checking if there are restrictions regarding this merge
can_perform_merge, preventing_pid, error_message = webapi.merge_is_allowed(primary_pid, pids_to_merge, is_admin)
if not can_perform_merge:
# when redirected back to the merge profiles page display an error message
# about the currently attempted merge
session.dirty = True
req.status = apache.HTTP_CONFLICT
c_name = webapi.get_canonical_id_from_person_id(preventing_pid)
return 'Cannot merge profile: %s Reason: %s' % (c_name,
error_message)
if is_admin:
webapi.merge_profiles(primary_pid, pids_to_merge)
else:
name = ''
if 'user_last_name' in pinfo:
name = pinfo['user_last_name']
if 'user_first_name' in pinfo:
name += pinfo['user_first_name']
email = ''
if 'user_email' in pinfo:
email = pinfo['user_email']
elif 'email' in argd:
# the email was submitted in form
email = argd['email']
pinfo['form_email'] = email
selection_str = "&selection=".join(profiles_to_merge)
userinfo = {'uid-ip': "userid: %s (from %s)" % (uid, req.remote_ip),
'name': name,
'email': email,
'merge link': "%s/author/merge_profiles?primary_profile=%s&selection=%s" % (CFG_SITE_URL, primary_cname, selection_str),
'uid': uid}
# a message is sent to the admin with info regarding the currently attempted merge
webapi.create_request_message(userinfo, subj=('Merge profiles request: %s' % primary_cname))
# when redirected back to the manage profile page display a message about the merge
pinfo['merge_info_message'] = ("success", "confirm_operation")
pinfo['merge_profiles'] = list()
session.dirty = True
redirect_url = "%s/author/manage_profile/%s" % (CFG_SITE_URL, primary_cname)
return redirect_to_url(req, redirect_url)
def send_message():
'''
sends a message from the user to the admin
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
# pp = pprint.PrettyPrinter(indent=4)
# session_dump = pp.pprint(pinfo)
session_dump = str(pinfo)
name = ''
name_changed = False
name_given = ''
email = ''
email_changed = False
email_given = ''
comment = ''
last_page_visited = ''
if "user_last_name" in pinfo:
name = pinfo["user_last_name"]
if "user_first_name" in pinfo:
name += pinfo["user_first_name"]
name = name.rstrip()
if "user_email" in pinfo:
email = pinfo["user_email"]
email = email.rstrip()
if 'Name' in form:
if not name:
name = form['Name']
elif name != form['Name']:
name_given = form['Name']
name_changed = True
name = name.rstrip()
if 'E-mail'in form:
if not email:
email = form['E-mail']
elif name != form['E-mail']:
email_given = form['E-mail']
email_changed = True
email = email.rstrip()
if 'Comment' in form:
comment = form['Comment']
comment = comment.rstrip()
if not name or not comment or not email:
redirect_to_url(req, '%s/author/help?incomplete_params=%s' % (CFG_SITE_URL, True))
if 'last_page_visited' in form:
last_page_visited = form['last_page_visited']
uid = getUid(req)
userinfo = {'uid-ip': "userid: %s (from %s)" % (uid, req.remote_ip),
'name': name,
'email': email,
'comment': comment,
'last_page_visited': last_page_visited,
'session_dump': session_dump,
'name_given': name_given,
'email_given': email_given,
'name_changed': name_changed,
'email_changed': email_changed,
'uid': uid}
webapi.create_request_message(userinfo)
def set_canonical_name():
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot set canonical name to unknown person")
if argd['canonical_name'] is not None:
cname = argd['canonical_name']
else:
return self._error_page(req, ln,
"Fatal: cannot set a custom canonical name without a suggestion")
userinfo = "%s||%s" % (uid, req.remote_ip)
if webapi.is_valid_canonical_id(cname):
webapi.swap_person_canonical_name(pid, cname, userinfo)
else:
webapi.update_person_canonical_name(pid, cname, userinfo)
return redirect_to_url(req, "%s/author/claim/%s%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid), '#tabData'))
action_functions = {'add_external_id': add_external_id,
'set_uid': set_uid,
'add_missing_external_ids': add_missing_external_ids,
'associate_profile': associate_profile,
'bibref_check_submit': bibref_check_submit,
'cancel': cancel,
'cancel_merging': cancel_merging,
'cancel_rt_ticket': cancel_rt_ticket,
'cancel_search_ticket': cancel_search_ticket,
'cancel_stage': cancel_stage,
'checkout': checkout,
'checkout_continue_claiming': checkout_continue_claiming,
'checkout_remove_transaction': checkout_remove_transaction,
'checkout_submit': checkout_submit,
'assign': claim,
'commit_rt_ticket': commit_rt_ticket,
'close_rt_ticket': close_rt_ticket,
'confirm': confirm_repeal_reset,
'delete_external_ids': delete_external_ids,
'merge': merge,
'reject': claim,
'repeal': confirm_repeal_reset,
'reset': confirm_repeal_reset,
'send_message': send_message,
'set_canonical_name': set_canonical_name,
'to_other_person': claim_to_other_person,
None: none_action}
return action_functions[action]()
def _ticket_open_assign_to_other_person(self, req, bibrefs, form):
'''
Initializes search to find a person to attach the selected records to
@param req: Apache request object
@type req: Apache request object
@param bibrefs: list of record IDs to consider
@type bibrefs: list of int
@param form: GET/POST request parameters
@type form: dict
'''
session = get_session(req)
pinfo = session["personinfo"]
pinfo["search_ticket"] = dict()
search_ticket = pinfo["search_ticket"]
search_ticket['action'] = 'assign'
search_ticket['bibrefs'] = bibrefs
session.dirty = True
return self.search(req, form)
def _cancel_rt_ticket(self, req, tid, pid):
'''
deletes an RT ticket
'''
webapi.delete_request_ticket(pid, tid)
return redirect_to_url(req, "%s/author/claim/%s" %
(CFG_SITE_URL, webapi.get_person_redirect_link(str(pid))))
def _cancel_transaction_from_rt_ticket(self, tid, pid, action, bibref):
'''
deletes a transaction from an rt ticket
'''
webapi.delete_transaction_from_request_ticket(pid, tid, action, bibref)
def _commit_rt_ticket(self, req, tid, pid):
'''
Commit of an rt ticket: creates a real ticket and commits.
'''
session = get_session(req)
pinfo = session["personinfo"]
ticket = pinfo["ticket"]
uid = getUid(req)
tid = int(tid)
try:
rt_ticket = get_validated_request_tickets_for_author(pid, tid)[0]
except IndexError:
msg = """This ticket with the tid: %s has already been
removed.""" % tid
return self._error_page(req, message=msg)
for action, bibrefrec in rt_ticket['operations']:
operation_parts = {'pid': pid,
'action': action,
'bibrefrec': bibrefrec}
operation_to_be_added = webapi.construct_operation(operation_parts, pinfo, uid)
webapi.add_operation_to_ticket(operation_to_be_added, ticket)
session.dirty = True
webapi.delete_request_ticket(pid, tid)
redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, pid))
def _error_page(self, req, ln=CFG_SITE_LANG, message=None, intro=True):
'''
Create a page that contains a message explaining the error.
@param req: Apache Request Object
@type req: Apache Request Object
@param ln: language
@type ln: string
@param message: message to be displayed
@type message: string
'''
body = []
_ = gettext_set_language(ln)
if not message:
message = "No further explanation available. Sorry."
if intro:
body.append(_("<p>We're sorry. An error occurred while "
"handling your request. Please find more information "
"below:</p>"))
body.append("<p><strong>%s</strong></p>" % message)
return page(title=_("Notice"),
body="\n".join(body),
description="%s - Internal Error" % BIBAUTHORID_CFG_SITE_NAME,
keywords="%s, Internal Error" % BIBAUTHORID_CFG_SITE_NAME,
language=ln,
req=req)
def __session_cleanup(self, req):
'''
Cleans the session from all bibauthorid specific settings and
with that cancels any transaction currently in progress.
@param req: Apache Request Object
@type req: Apache Request Object
'''
session = get_session(req)
try:
pinfo = session["personinfo"]
except KeyError:
return
if "ticket" in pinfo:
pinfo['ticket'] = []
if "search_ticket" in pinfo:
pinfo['search_ticket'] = dict()
# clear up bibref checker if it's done.
if ("bibref_check_required" in pinfo
and not pinfo["bibref_check_required"]):
if 'bibrefs_to_confirm' in pinfo:
del(pinfo['bibrefs_to_confirm'])
if "bibrefs_auto_assigned" in pinfo:
del(pinfo["bibrefs_auto_assigned"])
del(pinfo["bibref_check_required"])
if "checkout_confirmed" in pinfo:
del(pinfo["checkout_confirmed"])
if "checkout_faulty_fields" in pinfo:
del(pinfo["checkout_faulty_fields"])
# pinfo['ulevel'] = ulevel
# pinfo["claimpaper_admin_last_viewed_pid"] = -1
pinfo["admin_requested_ticket_id"] = -1
session.dirty = True
def _generate_search_ticket_box(self, req):
'''
Generate the search ticket to remember a pending search for Person
entities in an attribution process
@param req: Apache request object
@type req: Apache request object
'''
session = get_session(req)
pinfo = session["personinfo"]
search_ticket = None
if 'search_ticket' in pinfo:
search_ticket = pinfo['search_ticket']
if not search_ticket:
return ''
else:
return TEMPLATE.tmpl_search_ticket_box('person_search', 'assign_papers', search_ticket['bibrefs'])
def search_box(self, query, shown_element_functions):
'''
collecting the persons' data that the search function returned
@param req: Apache request object
@type req: Apache request object
@param query: the query string
@type query: string
@param shown_element_functions: contains the functions that will tell to the template which columns to show and what buttons to print
@type shown_element_functions: dict
@return: html body
@rtype: string
'''
pid_list = self._perform_search(query)
search_results = []
for pid in pid_list:
result = defaultdict(list)
result['pid'] = pid
result['canonical_id'] = webapi.get_canonical_id_from_person_id(pid)
result['name_variants'] = webapi.get_person_names_from_id(pid)
result['external_ids'] = webapi.get_external_ids_from_person_id(pid)
# this variable shows if we want to use the following data in the search template
if 'pass_status' in shown_element_functions and shown_element_functions['pass_status']:
result['status'] = webapi.is_profile_available(pid)
search_results.append(result)
body = TEMPLATE.tmpl_author_search(query, search_results, shown_element_functions)
body = TEMPLATE.tmpl_person_detail_layout(body)
return body
def search(self, req, form):
'''
Function used for searching a person based on a name with which the
function is queried.
@param req: Apache Request Object
@type form: dict
@return: a full page formatted in HTML
@rtype: string
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ulevel = pinfo['ulevel']
person_id = self.person_id
uid = getUid(req)
argd = wash_urlargd(
form,
{'ln': (str, CFG_SITE_LANG),
'verbose': (int, 0),
'q': (str, None)})
debug = "verbose" in argd and argd["verbose"] > 0
ln = argd['ln']
cname = ''
is_owner = False
last_visited_pid = webapi.history_get_last_visited_pid(session['personinfo']['visit_diary'])
if last_visited_pid is not None:
cname = webapi.get_canonical_id_from_person_id(last_visited_pid)
try:
int(cname)
except ValueError:
is_owner = False
else:
is_owner = self._is_profile_owner(last_visited_pid)
menu = WebProfileMenu(str(cname), "search", ln, is_owner, self._is_admin(pinfo))
title = "Person search"
# Create Wrapper Page Markup
profile_page = WebProfilePage("search", title, no_cache=True)
profile_page.add_bootstrapped_data(json.dumps({
"backbone": """
(function(ticketbox) {
var app = ticketbox.app;
app.userops.set(%s);
app.bodyModel.set({userLevel: "%s"});
})(ticketbox);""" % (WebInterfaceAuthorTicketHandling.bootstrap_status(pinfo, "user"), ulevel)
}))
if debug:
profile_page.add_debug_info(pinfo)
no_access = self._page_access_permission_wall(req)
shown_element_functions = dict()
shown_element_functions['show_search_bar'] = TEMPLATE.tmpl_general_search_bar()
if no_access:
return no_access
search_ticket = None
bibrefs = []
if 'search_ticket' in pinfo:
search_ticket = pinfo['search_ticket']
for r in search_ticket['bibrefs']:
bibrefs.append(r)
if search_ticket and "ulevel" in pinfo:
if pinfo["ulevel"] == "admin":
shown_element_functions['new_person_gen'] = TEMPLATE.tmpl_assigning_search_new_person_generator(bibrefs)
content = ""
if search_ticket:
shown_element_functions['button_gen'] = TEMPLATE.tmpl_assigning_search_button_generator(bibrefs)
content = content + self._generate_search_ticket_box(req)
query = None
if 'q' in argd:
if argd['q']:
query = escape(argd['q'])
content += self.search_box(query, shown_element_functions)
body = profile_page.get_wrapped_body("generic", {'html': content})
parameter = None
if query:
parameter = '?search_param=%s' + query
webapi.history_log_visit(req, 'search', params=parameter)
return page(title=title,
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def merge_profiles(self, req, form):
'''
begginig of the proccess that performs the merge over multipe person profiles
@param req: Apache Request Object
@type form: dict
@return: a full page formatted in HTML
@rtype: string
'''
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'primary_profile': (str, None),
'search_param': (str, ''),
'selection': (list, None),
'verbose': (int, 0)})
ln = argd['ln']
primary_cname = argd['primary_profile']
search_param = argd['search_param']
selection = argd['selection']
debug = 'verbose' in argd and argd['verbose'] > 0
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
profiles_to_merge = pinfo['merge_profiles']
_ = gettext_set_language(ln)
if not primary_cname:
return page_not_authorized(req, text=_('This page is not accessible directly.'))
no_access = self._page_access_permission_wall(req)
if no_access:
return no_access
if selection is not None:
profiles_to_merge_session = [cname for cname, is_available in profiles_to_merge]
for profile in selection:
if profile not in profiles_to_merge_session:
pid = webapi.get_person_id_from_canonical_id(profile)
is_available = webapi.is_profile_available(pid)
pinfo['merge_profiles'].append([profile, '1' if is_available else '0'])
session.dirty = True
primary_pid = webapi.get_person_id_from_canonical_id(primary_cname)
is_available = webapi.is_profile_available(primary_pid)
if not session['personinfo']['merge_primary_profile']:
session['personinfo']['merge_primary_profile'] = [primary_cname, '1' if is_available else '0']
session.dirty = True
body = ''
cname = ''
is_owner = False
last_visited_pid = webapi.history_get_last_visited_pid(session['personinfo']['visit_diary'])
if last_visited_pid is not None:
cname = webapi.get_canonical_id_from_person_id(last_visited_pid)
is_owner = self._is_profile_owner(last_visited_pid)
title = 'Merge Profiles'
menu = WebProfileMenu(str(cname), "manage_profile", ln, is_owner, self._is_admin(pinfo))
merge_page = WebProfilePage("merge_profile", title, no_cache=True)
merge_page.add_profile_menu(menu)
if debug:
merge_page.add_debug_info(pinfo)
# display status for any previously attempted merge
if pinfo['merge_info_message']:
teaser_key, message = pinfo['merge_info_message']
body += TEMPLATE.tmpl_merge_transaction_box(teaser_key, [message])
pinfo['merge_info_message'] = None
session.dirty = True
body += TEMPLATE.tmpl_merge_ticket_box('person_search', 'merge_profiles', primary_cname)
shown_element_functions = dict()
shown_element_functions['show_search_bar'] = TEMPLATE.tmpl_merge_profiles_search_bar(primary_cname)
shown_element_functions['button_gen'] = TEMPLATE.merge_profiles_button_generator()
shown_element_functions['pass_status'] = 'True'
gFormEmail = ""
if 'form_email' in pinfo:
gFormEmail = pinfo['form_email']
merge_page.add_bootstrapped_data(json.dumps({
"other": ("var gMergeProfile = %s; var gMergeList = %s;" +
"var gUserLevel = '%s'; var gFormEmail = '%s';") %
([primary_cname, '1' if is_available else '0'],
profiles_to_merge, pinfo['ulevel'], gFormEmail)
}))
body += self.search_box(search_param, shown_element_functions)
body = merge_page.get_wrapped_body("generic", {'html': body})
return page(title=title,
metaheaderadd=merge_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def _perform_search(self, search_param):
'''
calls the search function on the search_param and returns the results
@param search_param: query string
@type search_param: String
@return: list of pids that the search found they match with the search query
@return: list
'''
pid_canditates_list = []
nquery = None
if search_param:
if search_param.count(":"):
try:
left, right = search_param.split(":")
try:
nsearch_param = str(right)
except (ValueError, TypeError):
try:
nsearch_param = str(left)
except (ValueError, TypeError):
nsearch_param = search_param
except ValueError:
nsearch_param = search_param
else:
nsearch_param = search_param
sorted_results = webapi.search_person_ids_by_name(nsearch_param)
for result in sorted_results:
pid_canditates_list.append(result[0])
return pid_canditates_list
def merge_profiles_ajax(self, req, form):
'''
Function used for handling Ajax requests used in order to add/remove profiles
in/from the merging profiles list, which is saved in the session.
@param req: Apache Request Object
@type req: Apache Request Object
@param form: Parameters sent via Ajax request
@type form: dict
@return: json data
'''
# Abort if the simplejson module isn't available
if not CFG_JSON_AVAILABLE:
print "Json not configurable"
# If it is an Ajax request, extract any JSON data.
ajax_request = False
# REcent papers request
if 'jsondata' in form:
json_data = json.loads(str(form['jsondata']))
# Deunicode all strings (Invenio doesn't have unicode
# support).
json_data = json_unicode_to_utf8(json_data)
ajax_request = True
json_response = {'resultCode': 0}
# Handle request.
if ajax_request:
req_type = json_data['requestType']
if req_type == 'addProfile':
if 'profile' in json_data:
profile = json_data['profile']
person_id = webapi.get_person_id_from_canonical_id(profile)
if person_id != -1:
webapi.session_bareinit(req)
session = get_session(req)
profiles_to_merge = session["personinfo"]["merge_profiles"]
profile_availability = webapi.is_profile_available(person_id)
if profile_availability:
profile_availability = "1"
else:
profile_availability = "0"
if profile not in [el[0] for el in profiles_to_merge]:
profiles_to_merge.append([profile, profile_availability])
session.dirty = True
# TODO check access rights and get profile from db
json_response.update({'resultCode': 1})
json_response.update({'addedPofile': profile})
json_response.update({'addedPofileAvailability': profile_availability})
else:
json_response.update({'result': 'Error: Profile does not exist'})
else:
json_response.update({'result': 'Error: Profile was already in the list'})
else:
json_response.update({'result': 'Error: Missing profile'})
elif req_type == 'removeProfile':
if 'profile' in json_data:
profile = json_data['profile']
if webapi.get_person_id_from_canonical_id(profile) != -1:
webapi.session_bareinit(req)
session = get_session(req)
profiles_to_merge = session["personinfo"]["merge_profiles"]
# print (str(profiles_to_merge))
if profile in [el[0] for el in profiles_to_merge]:
for prof in list(profiles_to_merge):
if prof[0] == profile:
profiles_to_merge.remove(prof)
session.dirty = True
# TODO check access rights and get profile from db
json_response.update({'resultCode': 1})
json_response.update({'removedProfile': profile})
else:
json_response.update({'result': 'Error: Profile was missing already from the list'})
else:
json_response.update({'result': 'Error: Profile does not exist'})
else:
json_response.update({'result': 'Error: Missing profile'})
elif req_type == 'setPrimaryProfile':
if 'profile' in json_data:
profile = json_data['profile']
profile_id = webapi.get_person_id_from_canonical_id(profile)
if profile_id != -1:
webapi.session_bareinit(req)
session = get_session(req)
profile_availability = webapi.is_profile_available(profile_id)
if profile_availability:
profile_availability = "1"
else:
profile_availability = "0"
profiles_to_merge = session["personinfo"]["merge_profiles"]
if profile in [el[0] for el in profiles_to_merge if el and el[0]]:
for prof in list(profiles_to_merge):
if prof[0] == profile:
profiles_to_merge.remove(prof)
primary_profile = session["personinfo"]["merge_primary_profile"]
if primary_profile and primary_profile not in profiles_to_merge:
profiles_to_merge.append(primary_profile)
session["personinfo"]["merge_primary_profile"] = [profile, profile_availability]
session.dirty = True
json_response.update({'resultCode': 1})
json_response.update({'primaryProfile': profile})
json_response.update({'primaryPofileAvailability': profile_availability})
else:
json_response.update({'result': 'Error: Profile was already in the list'})
else:
json_response.update({'result': 'Error: Missing profile'})
else:
json_response.update({'result': 'Error: Wrong request type'})
return json.dumps(json_response)
def search_box_ajax(self, req, form):
'''
Function used for handling Ajax requests used in the search box.
@param req: Apache Request Object
@type req: Apache Request Object
@param form: Parameters sent via Ajax request
@type form: dict
@return: json data
'''
# Abort if the simplejson module isn't available
if not CFG_JSON_AVAILABLE:
print "Json not configurable"
# If it is an Ajax request, extract any JSON data.
ajax_request = False
# REcent papers request
if 'jsondata' in form:
json_data = json.loads(str(form['jsondata']))
# Deunicode all strings (Invenio doesn't have unicode
# support).
json_data = json_unicode_to_utf8(json_data)
ajax_request = True
json_response = {'resultCode': 0}
# Handle request.
if ajax_request:
req_type = json_data['requestType']
if req_type == 'getPapers':
if 'personId' in json_data:
pId = json_data['personId']
papers = sorted([[p[0]] for p in webapi.get_papers_by_person_id(int(pId), -1)],
key=itemgetter(0))
papers_html = TEMPLATE.tmpl_gen_papers(papers[0:MAX_NUM_SHOW_PAPERS])
json_response.update({'result': "\n".join(papers_html)})
json_response.update({'totalPapers': len(papers)})
json_response.update({'resultCode': 1})
json_response.update({'pid': str(pId)})
else:
json_response.update({'result': 'Error: Missing person id'})
elif req_type == 'getNames':
if 'personId' in json_data:
pId = json_data['personId']
names = webapi.get_person_names_from_id(int(pId))
names_html = TEMPLATE.tmpl_gen_names(names)
json_response.update({'result': "\n".join(names_html)})
json_response.update({'resultCode': 1})
json_response.update({'pid': str(pId)})
elif req_type == 'getIDs':
if 'personId' in json_data:
pId = json_data['personId']
ids = webapi.get_external_ids_from_person_id(int(pId))
ids_html = TEMPLATE.tmpl_gen_ext_ids(ids)
json_response.update({'result': "\n".join(ids_html)})
json_response.update({'resultCode': 1})
json_response.update({'pid': str(pId)})
elif req_type == 'isProfileClaimed':
if 'personId' in json_data:
pId = json_data['personId']
isClaimed = webapi.get_uid_from_personid(pId)
if isClaimed != -1:
json_response.update({'resultCode': 1})
json_response.update({'pid': str(pId)})
else:
json_response.update({'result': 'Error: Wrong request type'})
return json.dumps(json_response)
def choose_profile(self, req, form):
'''
Generate SSO landing/choose_profile page
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'search_param': (str, None),
'failed': (str, None),
'verbose': (int, 0)})
ln = argd['ln']
debug = "verbose" in argd and argd["verbose"] > 0
req.argd = argd # needed for perform_req_search
search_param = argd['search_param']
webapi.session_bareinit(req)
session = get_session(req)
uid = getUid(req)
pinfo = session['personinfo']
failed = True
if not argd['failed']:
failed = False
_ = gettext_set_language(ln)
if not CFG_INSPIRE_SITE:
return page_not_authorized(req, text=_("This page is not accessible directly."))
params = WebInterfaceBibAuthorIDClaimPages.get_params_to_check_login_info(session)
login_info = webapi.get_login_info(uid, params)
if 'arXiv' not in login_info['logged_in_to_remote_systems']:
return page_not_authorized(req, text=_("This page is not accessible directly."))
pid = webapi.get_user_pid(login_info['uid'])
# Create Wrapper Page Markup
is_owner = False
menu = WebProfileMenu('', "choose_profile", ln, is_owner, self._is_admin(pinfo))
choose_page = WebProfilePage("choose_profile", "Choose your profile", no_cache=True)
choose_page.add_profile_menu(menu)
if debug:
choose_page.add_debug_info(pinfo)
content = TEMPLATE.tmpl_choose_profile(failed)
body = choose_page.get_wrapped_body("generic", {'html': content})
# In any case, when we step by here, an autoclaim should be performed right after!
pinfo = session["personinfo"]
pinfo['should_check_to_autoclaim'] = True
session.dirty = True
last_visited_pid = webapi.history_get_last_visited_pid(session['personinfo']['visit_diary'])
# if already logged in then redirect the user to the page he was viewing
if pid != -1:
redirect_pid = pid
if last_visited_pid:
redirect_pid = last_visited_pid
redirect_to_url(req, '%s/author/manage_profile/%s' % (CFG_SITE_URL, str(redirect_pid)))
else:
# get name strings and email addresses from SSO/Oauth logins:
# {'system':{'name':[variant1,...,variantn], 'email':'blabla@bla.bla',
# 'pants_size':20}}
remote_login_systems_info = webapi.get_remote_login_systems_info(
req, login_info['logged_in_to_remote_systems'])
# get union of recids that are associated to the ids from all the external systems: set(inspire_recids_list)
recids = webapi.get_remote_login_systems_recids(req, login_info['logged_in_to_remote_systems'])
# this is the profile with the biggest intersection of papers so it's
# more probable that this is the profile the user seeks
probable_pid = webapi.match_profile(req, recids, remote_login_systems_info)
# if not search_param and probable_pid > -1 and probable_pid == last_visited_pid:
# try to assign the user to the profile he chose. If for some reason the profile is not available we assign him to an empty profile
# redirect_pid, profile_claimed = webapi.claim_profile(login_info['uid'], probable_pid)
# if profile_claimed:
# redirect_to_url(req,
# '%s/author/claim/action?associate_profile=True&redirect_pid=%s' %
# (CFG_SITE_URL, str(redirect_pid)))
probable_profile_suggestion_info = None
last_viewed_profile_suggestion_info = None
if last_visited_pid > -1 and webapi.is_profile_available(last_visited_pid):
# get information about the most probable profile and show it to the user
last_viewed_profile_suggestion_info = webapi.get_profile_suggestion_info(req, last_visited_pid, recids)
if probable_pid > -1 and webapi.is_profile_available(probable_pid):
# get information about the most probable profile and show it to the user
probable_profile_suggestion_info = webapi.get_profile_suggestion_info(req, probable_pid, recids)
if not search_param:
# we prefil the search with most relevant among the names that we get from external systems
name_variants = webapi.get_name_variants_list_from_remote_systems_names(remote_login_systems_info)
search_param = most_relevant_name(name_variants)
body = body + TEMPLATE.tmpl_probable_profile_suggestion(
probable_profile_suggestion_info,
last_viewed_profile_suggestion_info,
search_param)
free_id = get_free_author_id()
shown_element_functions = dict()
shown_element_functions['button_gen'] = TEMPLATE.tmpl_choose_profile_search_button_generator()
shown_element_functions['new_person_gen'] = TEMPLATE.tmpl_choose_profile_search_new_person_generator(free_id)
shown_element_functions['show_search_bar'] = TEMPLATE.tmpl_choose_profile_search_bar()
# show in the templates the column status (if profile is bound to a user or not)
shown_element_functions['show_status'] = True
# pass in the templates the data of the column status (if profile is bound to a user or not)
# we might need the data without having to show them in the columne (fi merge_profiles
shown_element_functions['pass_status'] = True
# show search results to the user
body = body + self.search_box(search_param, shown_element_functions)
body = body + TEMPLATE.tmpl_choose_profile_footer()
title = _(' ')
return page(title=title,
metaheaderadd=choose_page.get_head().encode('utf-8'),
body=body,
req=req,
language=ln)
@staticmethod
def _arxiv_box(req, login_info, person_id, user_pid):
'''
Proccess and collect data for arXiv box
@param req: Apache request object
@type req: Apache request object
@param login_info: status of login in the following format: {'logged_in': True, 'uid': 2, 'logged_in_to_remote_systems':['Arxiv', ...]}
@type login_info: dict
@param login_info: person id of the current page's profile
@type login_info: int
@param login_info: person id of the user
@type login_info: int
@return: data required to built the arXiv box
@rtype: dict
'''
session = get_session(req)
pinfo = session["personinfo"]
arxiv_data = dict()
arxiv_data['view_own_profile'] = person_id == user_pid
# if the user is not a guest and he is connected through arXiv
arxiv_data['login'] = login_info['logged_in']
arxiv_data['user_pid'] = user_pid
arxiv_data['user_has_pid'] = user_pid != -1
# if the profile the use is logged in is the same with the profile of the page that the user views
arxiv_data['view_own_profile'] = user_pid == person_id
return arxiv_data
@staticmethod
def _orcid_box(arxiv_logged_in, person_id, user_pid, ulevel):
'''
Proccess and collect data for orcid box
@param req: Apache request object
@type req: Apache request object
@param arxiv_logged_in: shows if the user is logged in through arXiv or not
@type arxiv_logged_in: boolean
@param person_id: person id of the current page's profile
@type person_id: int
@param user_pid: person id of the user
@type user_pid: int
@param ulevel: user's level
@type ulevel: string
@return: data required to built the orcid box
@rtype: dict
'''
orcid_data = dict()
orcid_data['arxiv_login'] = arxiv_logged_in
orcid_data['orcids'] = None
orcid_data['add_power'] = False
orcid_data['own_profile'] = False
orcid_data['pid'] = person_id
# Indicates whether we should push the works or not.
orcid_data['push'] = not get_token(person_id)
# if the profile the use is logged in is the same with the profile of the page that the user views
if person_id == user_pid:
orcid_data['own_profile'] = True
# if the user is an admin then he can add an existing orcid to the profile
if ulevel == "admin":
orcid_data['add_power'] = True
orcids = webapi.get_orcids_by_pid(person_id)
if orcids:
orcid_data['orcids'] = orcids
return orcid_data
@staticmethod
def _autoclaim_papers_box(req, person_id, user_pid, remote_logged_in_systems):
'''
Proccess and collect data for orcid box
@param req: Apache request object
@type req: Apache request object
@param person_id: person id of the current page's profile
@type person_id: int
@param user_pid: person id of the user
@type user_pid: int
@param remote_logged_in_systems: the remote logged in systems
@type remote_logged_in_systems: list
@return: data required to built the autoclaim box
@rtype: dict
'''
autoclaim_data = dict()
# if no autoclaim should occur or had occured and results should be shown then the box should remain hidden
autoclaim_data['hidden'] = True
autoclaim_data['person_id'] = person_id
# if the profile the use is logged in is the same with the profile of the page that the user views
if person_id == user_pid:
recids_to_autoclaim = webapi.get_remote_login_systems_recids(req, remote_logged_in_systems)
autoclaim_data['hidden'] = False
autoclaim_data['num_of_claims'] = len(recids_to_autoclaim)
return autoclaim_data
@staticmethod
def get_params_to_check_login_info(session):
def get_params_to_check_login_info_of_arxiv(session):
try:
return session['user_info']
except KeyError:
return None
def get_params_to_check_login_info_of_orcid(session):
pinfo = session['personinfo']
try:
pinfo['orcid']['has_orcid_id'] = bool(
get_orcid_id_of_author(pinfo['pid'])[0][0] and pinfo['orcid']['import_pubs'])
except:
pinfo['orcid']['has_orcid_id'] = False
session.dirty = True
return pinfo['orcid']
get_params_for_remote_system = {'arXiv': get_params_to_check_login_info_of_arxiv,
'orcid': get_params_to_check_login_info_of_orcid}
params = dict()
for system, get_params in get_params_for_remote_system.iteritems():
params[system] = get_params(session)
return params
@staticmethod
def _claim_paper_box(person_id):
'''
Proccess and collect data for claim paper box
@param person_id: person id of the current page's profile
@type person_id: int
@return: data required to built the claim paper box
@rtype: dict
'''
claim_paper_data = dict()
claim_paper_data['canonical_id'] = str(webapi.get_canonical_id_from_person_id(person_id))
return claim_paper_data
@staticmethod
def _support_box():
'''
Proccess and collect data for support box
@return: data required to built the support box
@rtype: dict
'''
support_data = dict()
return support_data
@staticmethod
def _merge_box(person_id):
'''
Proccess and collect data for merge box
@param person_id: person id of the current page's profile
@type person_id: int
@return: data required to built the merge box
@rtype: dict
'''
merge_data = dict()
search_param = webapi.get_canonical_id_from_person_id(person_id)
name_variants = [element[0] for element in webapi.get_person_names_from_id(person_id)]
mr_name = most_relevant_name(name_variants)
if mr_name:
search_param = mr_name.split(",")[0]
merge_data['search_param'] = search_param
merge_data['canonical_id'] = webapi.get_canonical_id_from_person_id(person_id)
return merge_data
@staticmethod
def _internal_ids_box(person_id, user_pid, ulevel):
'''
Proccess and collect data for external_ids box
@param person_id: person id of the current page's profile
@type person_id: int
@param user_pid: person id of the user
@type user_pid: int
@param remote_logged_in_systems: the remote logged in systems
@type remote_logged_in_systems: list
@return: data required to built the external_ids box
@rtype: dict
'''
external_ids_data = dict()
external_ids_data['uid'], external_ids_data['old_uids'] = webapi.get_internal_user_id_from_person_id(person_id)
external_ids_data['person_id'] = person_id
external_ids_data['user_pid'] = user_pid
external_ids_data['ulevel'] = ulevel
return external_ids_data
@staticmethod
def _external_ids_box(person_id, user_pid, ulevel):
'''
Proccess and collect data for external_ids box
@param person_id: person id of the current page's profile
@type person_id: int
@param user_pid: person id of the user
@type user_pid: int
@param remote_logged_in_systems: the remote logged in systems
@type remote_logged_in_systems: list
@return: data required to built the external_ids box
@rtype: dict
'''
internal_ids_data = dict()
internal_ids_data['ext_ids'] = webapi.get_external_ids_from_person_id(person_id)
internal_ids_data['person_id'] = person_id
internal_ids_data['user_pid'] = user_pid
internal_ids_data['ulevel'] = ulevel
return internal_ids_data
@staticmethod
def _hepnames_box(person_id):
return webapi.get_hepnames(person_id)
def tickets_admin(self, req, form):
'''
Generate SSO landing/welcome page
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})
ln = argd['ln']
webapi.session_bareinit(req)
no_access = self._page_access_permission_wall(req, req_level='admin')
if no_access:
return no_access
session = get_session(req)
pinfo = session['personinfo']
cname = ''
is_owner = False
last_visited_pid = webapi.history_get_last_visited_pid(pinfo['visit_diary'])
if last_visited_pid is not None:
cname = webapi.get_canonical_id_from_person_id(last_visited_pid)
is_owner = self._is_profile_owner(last_visited_pid)
menu = WebProfileMenu(str(cname), "open_tickets", ln, is_owner, self._is_admin(pinfo))
title = "Open RT tickets"
profile_page = WebProfilePage("help", title, no_cache=True)
profile_page.add_profile_menu(menu)
tickets = webapi.get_persons_with_open_tickets_list()
tickets = list(tickets)
for t in list(tickets):
tickets.remove(t)
tickets.append([clean_string(webapi.get_most_frequent_name_from_pid(int(t[0]))),
webapi.get_person_redirect_link(t[0]), t[0], t[1]])
content = TEMPLATE.tmpl_tickets_admin(tickets)
content = TEMPLATE.tmpl_person_detail_layout(content)
body = profile_page.get_wrapped_body("generic", {'html': content})
return page(title=title,
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def help(self, req, form):
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})
ln = argd['ln']
_ = gettext_set_language(ln)
if not CFG_BIBAUTHORID_ENABLED:
return page_not_authorized(req, text=_("This page is not accessible directly."))
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
cname = ''
is_owner = False
last_visited_pid = webapi.history_get_last_visited_pid(pinfo['visit_diary'])
if last_visited_pid is not None:
cname = webapi.get_canonical_id_from_person_id(last_visited_pid)
is_owner = self._is_profile_owner(last_visited_pid)
title = "Help Center"
profile_page = WebProfilePage("help", title, no_cache=True)
template_parameters = {'base_url': CFG_BASE_URL}
body = profile_page.get_wrapped_body("help", template_parameters)
return page(title=title,
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def export(self, req, form):
'''
Generate JSONized export of Person data
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
argd = wash_urlargd(
form,
{'ln': (str, CFG_SITE_LANG),
'request': (str, None),
'userid': (str, None)})
if not CFG_JSON_AVAILABLE:
return "500_json_not_found__install_package"
# session = get_session(req)
request = None
userid = None
if "userid" in argd and argd['userid']:
userid = argd['userid']
else:
return "404_user_not_found"
if "request" in argd and argd['request']:
request = argd["request"]
# find user from ID
user_email = get_email_from_username(userid)
if user_email == userid:
return "404_user_not_found"
uid = get_uid_from_email(user_email)
uinfo = collect_user_info(uid)
# find person by uid
pid = webapi.get_pid_from_uid(uid)
# find papers py pid that are confirmed through a human.
papers = webapi.get_papers_by_person_id(pid, 2)
# filter by request param, e.g. arxiv
if not request:
return "404__no_filter_selected"
if not request in VALID_EXPORT_FILTERS:
return "500_filter_invalid"
if request == "arxiv":
query = "(recid:"
query += " OR recid:".join(papers)
query += ") AND 037:arxiv"
db_docs = perform_request_search(p=query, rg=0)
nickmail = ""
nickname = ""
db_arxiv_ids = []
try:
nickname = uinfo["nickname"]
except KeyError:
pass
if not nickname:
try:
nickmail = uinfo["email"]
except KeyError:
nickmail = user_email
nickname = nickmail
db_arxiv_ids = get_fieldvalues(db_docs, "037__a")
construct = {"nickname": nickname,
"claims": ";".join(db_arxiv_ids)}
jsondmp = json.dumps(construct)
signature = webapi.sign_assertion("arXiv", jsondmp)
construct["digest"] = signature
return json.dumps(construct)
index = __call__
class WebInterfaceBibAuthorIDManageProfilePages(WebInterfaceDirectory):
_exports = ['',
'import_orcid_pubs',
'push_orcid_pubs',
'connect_author_with_hepname',
'connect_author_with_hepname_ajax',
'suggest_orcid',
'suggest_orcid_ajax']
def _lookup(self, component, path):
'''
This handler parses dynamic URLs:
- /author/profile/1332 shows the page of author with id: 1332
- /author/profile/100:5522,1431 shows the page of the author
identified by the bibrefrec: '100:5522,1431'
'''
if not component in self._exports:
return WebInterfaceBibAuthorIDManageProfilePages(component), path
def _is_profile_owner(self, pid):
return self.person_id == int(pid)
def _is_admin(self, pinfo):
return pinfo['ulevel'] == 'admin'
def __init__(self, identifier=None):
'''
Constructor of the web interface.
@param identifier: identifier of an author. Can be one of:
- an author id: e.g. "14"
- a canonical id: e.g. "J.R.Ellis.1"
- a bibrefrec: e.g. "100:1442,155"
@type identifier: str
'''
self.person_id = -1 # -1 is a non valid author identifier
if identifier is None or not isinstance(identifier, str):
self.original_identifier = str()
return
else:
self.original_identifier = identifier
# check if it's a canonical id: e.g. "J.R.Ellis.1"
try:
pid = int(identifier)
except ValueError:
pid = int(webapi.get_person_id_from_canonical_id(identifier))
if pid >= 0:
self.person_id = pid
return
# check if it's an author id: e.g. "14"
try:
pid = int(identifier)
if webapi.author_has_papers(pid):
self.person_id = pid
return
except ValueError:
pass
# check if it's a bibrefrec: e.g. "100:1442,155"
if webapi.is_valid_bibref(identifier):
pid = int(webapi.get_person_id_from_paper(identifier))
if pid >= 0:
self.person_id = pid
return
def _get_orcid_token(self, session, pinfo):
if 'oauth2_access_token' not in session:
return None
token = session['oauth2_access_token']
if token != '':
return token
return None
def __call__(self, req, form):
'''
Generate SSO landing/author management page
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ulevel = pinfo['ulevel']
person_id = self.person_id
uid = getUid(req)
pinfo['claim_in_process'] = True
argd = wash_urlargd(form, {
'ln': (str, CFG_SITE_LANG),
'verbose': (int, 0)})
debug = "verbose" in argd and argd["verbose"] > 0
ln = argd['ln']
_ = gettext_set_language(ln)
if not CFG_BIBAUTHORID_ENABLED or self.person_id is None:
return page_not_authorized(req, text=_("This page is not accessible directly."))
if person_id < 0:
return self._error_page(req, message=("Identifier %s is not a valid person identifier or does not exist anymore!" % self.original_identifier))
# log the visit
webapi.history_log_visit(req, 'manage_profile', pid=person_id)
# store the arxiv papers the user owns
if uid > 0 and not pinfo['arxiv_status']:
uinfo = collect_user_info(req)
arxiv_papers = list()
if 'external_arxivids' in uinfo and uinfo['external_arxivids']:
arxiv_papers = uinfo['external_arxivids'].split(';')
if arxiv_papers:
webapi.add_arxiv_papers_to_author(arxiv_papers, person_id)
pinfo['arxiv_status'] = True
params = WebInterfaceBibAuthorIDClaimPages.get_params_to_check_login_info(session)
login_info = webapi.get_login_info(uid, params)
# Create Wrapper Page Markup
cname = webapi.get_canonical_id_from_person_id(self.person_id)
long_name = webapi.get_longest_name_from_pid(self.person_id)
# TODO: Replace dash with —
page_title = "%s - %s" % (long_name, _('Manage Profile'))
menu = WebProfileMenu(
str(cname),
"manage_profile",
ln,
self._is_profile_owner(pinfo['pid']),
self._is_admin(pinfo))
profile_page = WebProfilePage("manage_profile", long_name, no_cache=True)
profile_page.add_profile_menu(menu)
profile_page.add_bootstrapped_data(json.dumps({
"backbone": """
(function(ticketbox) {
var app = ticketbox.app;
app.userops.set(%s);
app.bodyModel.set({userLevel: "%s"});
})(ticketbox);""" % (WebInterfaceAuthorTicketHandling.bootstrap_status(pinfo, "user"), ulevel)
}))
if debug:
profile_page.add_debug_info(pinfo)
user_pid = webapi.get_user_pid(login_info['uid'])
person_data = webapi.get_person_info_by_pid(person_id)
arxiv_data = WebInterfaceBibAuthorIDClaimPages._arxiv_box(req, login_info, person_id, user_pid)
orcid_data = WebInterfaceBibAuthorIDClaimPages._orcid_box(arxiv_data['login'], person_id, user_pid, ulevel)
orcid_data['token'] = self._get_orcid_token(session, pinfo)
claim_paper_data = WebInterfaceBibAuthorIDClaimPages._claim_paper_box(person_id)
support_data = WebInterfaceBibAuthorIDClaimPages._support_box()
ids_box_html = None
if ulevel == 'admin':
ext_ids_data = WebInterfaceBibAuthorIDClaimPages._external_ids_box(person_id, user_pid, ulevel)
int_ids_data = WebInterfaceBibAuthorIDClaimPages._internal_ids_box(person_id, user_pid, ulevel)
ids_box_html = TEMPLATE.tmpl_ext_ids_box(
person_id,
int_ids_data,
ext_ids_data,
ln,
add_box=False,
loading=False)
autoclaim_data = WebInterfaceBibAuthorIDClaimPages._autoclaim_papers_box(
req, person_id, user_pid, login_info['logged_in_to_remote_systems'])
merge_data = WebInterfaceBibAuthorIDClaimPages._merge_box(person_id)
hepnames_data = WebInterfaceBibAuthorIDClaimPages._hepnames_box(person_id)
content = ''
# display status for any previously attempted merge
if pinfo['merge_info_message']:
teaser_key, message = pinfo['merge_info_message']
content += TEMPLATE.tmpl_merge_transaction_box(teaser_key, [message])
pinfo['merge_info_message'] = None
session.dirty = True
modal = ''
if 'orcid_info' in session:
orcid_info = session['orcid_info']['status']
else:
orcid_info = ''
if CFG_INSPIRE_SITE:
html_arxiv = TEMPLATE.tmpl_arxiv_box(arxiv_data, ln, add_box=False, loading=False)
html_orcid, modal = TEMPLATE.tmpl_orcid_box(orcid_data, ln, orcid_info, add_box=False, loading=False)
if hepnames_data is not None:
hepnames_data.update({
'cname': webapi.get_canonical_id_from_person_id(person_id),
'link_to_record': ulevel == "admin",
'hepnames_link': "%s/%s/" % (CFG_BASE_URL, "record"),
'new_record_link': 'http://slac.stanford.edu/spires/hepnames/additions.shtml',
'update_link': "http://inspirehep.net/person/update?IRN=",
'profile_link': "%s/%s" % (CFG_BASE_URL, "author/profile/")
})
html_hepnames = WebProfilePage.render_template('personal_details_box', hepnames_data)
else:
html_hepnames = "Loading.."
html_support = TEMPLATE.tmpl_support_box(support_data, ln, add_box=False, loading=False)
if autoclaim_data['hidden']:
autoclaim_successful_recs = None
autoclaim_unsuccessful_recs = None
else:
if not pinfo['orcid']['import_pubs'] and pinfo['autoclaim']['res'] is not None:
autoclaim_data = pinfo['autoclaim']['res']
autoclaim_successful_recs = autoclaim_data['successful_recids']
autoclaim_unsuccessful_recs = autoclaim_data['unsuccessful_recids']
else:
login_status = webapi.get_login_info(uid, params)
autoclaim_ticket = pinfo['autoclaim']['ticket']
external_pubs_association = pinfo['autoclaim']['external_pubs_association']
remote_systems = login_status['logged_in_to_remote_systems']
papers_to_autoclaim = set(webapi.get_papers_from_remote_systems(remote_systems,
params,
external_pubs_association))
for paper in papers_to_autoclaim:
operation_parts = {'pid': person_id,
'action': 'assign',
'bibrefrec': str(paper)}
operation_to_be_added = webapi.construct_operation(operation_parts,
pinfo,
uid)
if operation_to_be_added is None:
# In case the operation could not be created (because of an
# erroneous bibrefrec) ignore it and continue with the rest
continue
webapi.add_operation_to_ticket(operation_to_be_added, autoclaim_ticket)
additional_info = {'first_name': '', 'last_name': '', 'email': '',
'comments': 'Assigned automatically when autoclaim was triggered.'}
userinfo = webapi.fill_out_userinfo(additional_info, uid, req.remote_ip, ulevel, strict_check=False)
if 'email' in session:
userinfo['email'] = session['email']
elif 'email' not in userinfo:
userinfo['email'] = None
webapi.commit_operations_from_ticket(autoclaim_ticket, userinfo, uid, ulevel)
already_claimed_recids = set(
[rec for _, _, rec in get_claimed_papers_of_author(person_id)]) & papers_to_autoclaim
successful_recids = set([op['rec'] for op in webapi.get_ticket_status(
autoclaim_ticket) if 'execution_result' in op]) | already_claimed_recids
webapi.clean_ticket(autoclaim_ticket)
unsuccessful_recids = [op['rec'] for op in webapi.get_ticket_status(autoclaim_ticket)]
autoclaim_data['recids_to_external_ids'] = dict()
for key, value in external_pubs_association.iteritems():
ext_system, ext_id = key
rec = value
title = get_title_of_paper(rec)
autoclaim_data['recids_to_external_ids'][rec] = title
autoclaim_successful_recs = [(
autoclaim_data['recids_to_external_ids'][recid],
get_inspire_record_url(recid),
recid) for recid in successful_recids]
autoclaim_unsuccessful_recs = [(
autoclaim_data['recids_to_external_ids'][recid],
get_inspire_record_url(recid),
recid) for recid in unsuccessful_recids]
# cache the result in the session
autoclaim_data['successful_recids'] = autoclaim_successful_recs
autoclaim_data['unsuccessful_recids'] = autoclaim_unsuccessful_recs
pinfo['autoclaim']['res'] = autoclaim_data
if pinfo['orcid']['import_pubs']:
pinfo['orcid']['import_pubs'] = False
session.dirty = True
template_parameters = {
"autoclaim_successful_recids": autoclaim_successful_recs,
"autoclaim_unsuccessful_recids": autoclaim_unsuccessful_recs,
"review_autoclaim_link": "%s/author/ticket/review_autoclaim" % CFG_SITE_URL,
"merge": TEMPLATE.tmpl_merge_box(merge_data, ln, add_box=False, loading=False),
"external_ids_box_html": ids_box_html,
"user_level": ulevel,
"base_url": CFG_BASE_URL,
"inspire" : CFG_INSPIRE_SITE,
"orcid_message" : self._generate_orcid_message(req, ln)
}
if 'orcid_info' in session:
session.pop('orcid_info', None)
session.dirty = True
# Inspire specific endpoints.
if CFG_INSPIRE_SITE:
template_parameters["hepnames"] = html_hepnames
template_parameters["arxiv"] = html_arxiv
template_parameters["orcid"] = html_orcid
template_parameters["contact"] = html_support
template_parameters["modal"] = modal
body = profile_page.get_wrapped_body("manage_profile", template_parameters)
# body = profile_page.get_wrapped_body("generic", {'html': content})
return page(title=page_title,
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def _generate_orcid_message(self, req, ln):
'''
Generate the box which informs the user about running ORCID push.
@param req: Apache request object
@type req: Apache request object
'''
session = get_session(req)
orcid_info = None
if 'orcid_info' in session:
orcid_info = session['orcid_info']['status']
if not orcid_info:
return ''
else:
return TEMPLATE.tmpl_orcid_message(orcid_info, ln)
def import_orcid_pubs(self, req, form):
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
orcid_info = pinfo['orcid']
orcid_id, orcid_dois = get_dois_from_orcid_using_pid(pinfo['pid'])
# TODO: what to do in case some ORCID server error occurs?
if orcid_id is None or orcid_dois is None:
redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_SECURE_URL, pinfo['pid']))
# TODO: it would be smarter if:
# 1. we save in the db the orcid_dois
# 2. to expire only the external pubs box in the profile page
webauthorapi.expire_all_cache_for_personid(pinfo['pid'])
orcid_info['imported_pubs'] = orcid_dois
orcid_info['import_pubs'] = True
session.dirty = True
redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_SECURE_URL, pinfo['pid']))
def _get_identifier_from_path(self, path):
'''Return identifier from path to manage_profile page.
Example: localhost:4000/author/manage_profile/273672/wowow -> 273672
'''
tokens = path.split('/')
return tokens[tokens.index('manage_profile') + 1]
def push_orcid_pubs(self, req, form):
'''Push all claimed papers to ORCID database.
Doesn't push papers which were there earlier. Needs user authentication.
When a user requests a push, this method will be run twice. Firstly,
user should authenticate himself. Then, in the second run, after
receiving the token from ORCID, the push is done.
'''
webapi.session_bareinit(req)
session = get_session(req)
if 'orcid_pid' not in session:
# I can't assume that pid will be available in session
identifier = self._get_identifier_from_path(req.referer)
try:
session['orcid_pid'] = get_author_by_canonical_name(identifier)[0][0]
except:
session['orcid_pid'] = identifier
session.dirty = True
if 'oauth2_access_token' not in session:
session['oauth2_access_token'] = ''
if session['oauth2_access_token'] == '':
# Authenticate
session['pushorcid'] = True
session.dirty = True
redirect_to_url(req, "%s/youraccount/oauth2?provider=%s&scope=/orcid-works/update+/orcid-works/create" % (CFG_SITE_SECURE_URL, 'orcid'))
# We expect user to have only one ORCID
assert(len(webapi.get_orcids_by_pid(session['orcid_pid'])) == 1)
if session['oauth2_orcid'] != webapi.get_orcids_by_pid(session['orcid_pid'])[0]:
# User has authenticated, but he is using different account
session['oauth2_access_token'] = ''
session['orcid_info'] = {'status': 'wrong_account'}
person_id = session.pop('orcid_pid')
session.dirty = True
redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_SECURE_URL, person_id))
set_token(session['orcid_pid'], session['oauth2_access_token'])
session['orcid_info'] = {'status': 'finished'}
# Token may expire. It is better to get rid of it.
session['oauth2_access_token'] = ''
person_id = session.pop('orcid_pid')
session.dirty = True
redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_SECURE_URL, person_id))
def connect_author_with_hepname(self, req, form):
argd = wash_urlargd(form, {'cname': (str, None),
'hepname': (str, None),
'ln': (str, CFG_SITE_LANG)})
ln = argd['ln']
if argd['cname'] is not None:
cname = argd['cname']
else:
return self._error_page(req, ln, "Fatal: cannot associate a hepname without a person id.")
if argd['hepname'] is not None:
hepname = argd['hepname']
else:
return self._error_page(req, ln, "Fatal: cannot associate an author with a non valid hepname.")
webapi.session_bareinit(req)
session = get_session(req)
webapi.connect_author_with_hepname(cname, hepname, session['uid'])
pinfo = session['personinfo']
last_visited_page = webapi.history_get_last_visited_url(pinfo['visit_diary'], just_page=True)
redirect_to_url(req, "%s/author/%s/%s" % (CFG_SITE_URL, last_visited_page, cname))
def connect_author_with_hepname_ajax(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if 'jsondata' not in form:
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
cname = json_data['cname']
hepname = json_data['hepname']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
if not self._is_admin(pinfo):
if 'email' in json_data:
pinfo['form_email'] = json_data['email']
webapi.connect_author_with_hepname(cname, hepname,
session['uid'],
email=json_data['email'])
else:
webapi.connect_author_with_hepname(cname, hepname,
session['uid'])
else:
uid = getUid(req)
add_cname_to_hepname_record({cname: hepname}, uid)
def suggest_orcid(self, req, form):
argd = wash_urlargd(form, {'orcid': (str, None),
'pid': (int, -1),
'ln': (str, CFG_SITE_LANG)})
ln = argd['ln']
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln, "Fatal: cannot associate an orcid without a person id.")
if argd['orcid'] is not None and is_valid_orcid(argd['orcid']):
orcid = argd['orcid']
else:
return self._error_page(req, ln, "Fatal: cannot associate an author with a non valid ORCID.")
session = get_session(req)
webapi.connect_author_with_orcid(webapi.get_canonical_id_from_person_id(pid), orcid, session['uid'])
redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, pid))
def suggest_orcid_ajax(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if 'jsondata' not in form:
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
orcid = json_data['orcid']
pid = json_data['pid']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
if not is_valid_orcid(orcid):
return self._fail(req, apache.HTTP_NOT_FOUND)
session = get_session(req)
webapi.connect_author_with_orcid(webapi.get_canonical_id_from_person_id(pid), orcid, session['uid'])
def _fail(self, req, code):
req.status = code
return
def _error_page(self, req, ln=CFG_SITE_LANG, message=None, intro=True):
'''
Create a page that contains a message explaining the error.
@param req: Apache Request Object
@type req: Apache Request Object
@param ln: language
@type ln: string
@param message: message to be displayed
@type message: string
'''
body = []
_ = gettext_set_language(ln)
if not message:
message = "No further explanation available. Sorry."
if intro:
body.append(_("<p>We're sorry. An error occurred while "
"handling your request. Please find more information "
"below:</p>"))
body.append("<p><strong>%s</strong></p>" % message)
return page(title=_("Notice"),
body="\n".join(body),
description="%s - Internal Error" % BIBAUTHORID_CFG_SITE_NAME,
keywords="%s, Internal Error" % BIBAUTHORID_CFG_SITE_NAME,
language=ln,
req=req)
index = __call__
class WebInterfaceAuthorTicketHandling(WebInterfaceDirectory):
_exports = ['get_status',
'update_status',
'add_operation',
'modify_operation',
'remove_operation',
'commit',
'abort',
'review_autoclaim'
]
@staticmethod
def bootstrap_status(pinfo, on_ticket):
'''
Function used for generating get_status json bootstrapping.
@param pinfo: person_info
@type pinfo: dict
@param on_ticket: ticket target
@type on_ticket: str
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
author_ticketing = WebInterfaceAuthorTicketHandling()
ticket = author_ticketing._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return "{}"
ticket_status = webapi.get_ticket_status(ticket)
return json.dumps(ticket_status)
def get_status(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if 'jsondata' not in form:
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
ticket_status = webapi.get_ticket_status(ticket)
session.dirty = True
req.content_type = 'application/json'
req.write(json.dumps(ticket_status))
def update_status(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if 'jsondata' not in form:
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.update_ticket_status(ticket)
session.dirty = True
def add_operation(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if 'jsondata' not in form:
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
operation_parts = {'pid': int(json_data['pid']),
'action': json_data['action'],
'bibrefrec': json_data['bibrefrec']}
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
uid = getUid(req)
operation_to_be_added = webapi.construct_operation(operation_parts, pinfo, uid)
if operation_to_be_added is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.add_operation_to_ticket(operation_to_be_added, ticket)
session.dirty = True
def review_autoclaim(self, req, form):
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
uid = getUid(req)
try:
autoclaim = pinfo['autoclaim']['ticket']
except KeyError:
autoclaim = list()
ticket = self._get_according_ticket('user', pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
for item in autoclaim:
webapi.add_operation_to_ticket(item, ticket)
redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_BASE_URL, pinfo['pid']))
def modify_operation(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if 'jsondata' not in form:
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
operation_parts = {'pid': int(json_data['pid']),
'action': json_data['action'],
'bibrefrec': json_data['bibrefrec']}
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
uid = getUid(req)
operation_to_be_modified = webapi.construct_operation(operation_parts, pinfo, uid, should_have_bibref=False)
if operation_to_be_modified is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
operation_is_modified = webapi.modify_operation_from_ticket(operation_to_be_modified, ticket)
if not operation_is_modified:
# Operation couldn't be modified because it doesn't exist in the
# ticket. Wrong parameters were given hence we should fail!
return self._fail(req, apache.HTTP_NOT_FOUND)
session.dirty = True
def remove_operation(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if 'jsondata' not in form:
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
operation_parts = {'pid': int(json_data['pid']),
'action': json_data['action'],
'bibrefrec': json_data['bibrefrec']}
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
uid = getUid(req)
operation_to_be_removed = webapi.construct_operation(operation_parts, pinfo, uid)
if operation_to_be_removed is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
operation_is_removed = webapi.remove_operation_from_ticket(operation_to_be_removed, ticket)
if not operation_is_removed:
# Operation couldn't be removed because it doesn't exist in the
# ticket. Wrong parameters were given hence we should fail!
return self._fail(req, apache.HTTP_NOT_FOUND)
session.dirty = True
def commit(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if 'jsondata' not in form:
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
additional_info = {'first_name': json_data.get('first_name', "Default"),
'last_name': json_data.get('last_name', "Default"),
'email': json_data.get('email', "Default"),
'comments': json_data['comments']}
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ulevel = pinfo['ulevel']
uid = getUid(req)
user_is_guest = isGuestUser(uid)
if not user_is_guest:
try:
additional_info['first_name'] = session['user_info']['external_firstname']
additional_info['last_name'] = session['user_info']['external_familyname']
additional_info['email'] = session['user_info']['email']
except KeyError:
additional_info['first_name'] = additional_info['last_name'] = additional_info['email'] = str(uid)
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
# When a guest is claiming we should not commit if he
# doesn't provide us his full personal information
strict_check = user_is_guest
userinfo = webapi.fill_out_userinfo(additional_info, uid, req.remote_ip, ulevel, strict_check=strict_check)
if userinfo is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
# Syncing is done here. Entries that have been handled are removed from
# unsuccessful_tickets so that they do not reappear in the next reload.
if pinfo['autoclaim']['res']:
if 'unsuccessful_recids' in pinfo['autoclaim']['res']:
unsuccessful_recids = pinfo['autoclaim']['res']['unsuccessful_recids']
else:
unsuccessful_recids = []
for entry in ticket:
recid = entry['rec']
unsuccessful_recids = [rec for rec in unsuccessful_recids if rec[2] != recid]
pinfo['autoclaim']['res']['unsuccessful_recids'] = unsuccessful_recids
webapi.commit_operations_from_ticket(ticket, userinfo, uid, ulevel)
session.dirty = True
def abort(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if 'jsondata' not in form:
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
# When a user is claiming we should completely delete his ticket if he
# aborts the claiming procedure
delete_ticket = (on_ticket == 'user')
webapi.abort_ticket(ticket, delete_ticket=delete_ticket)
session.dirty = True
def _get_according_ticket(self, on_ticket, pinfo):
ticket = None
if on_ticket == 'user':
ticket = pinfo['ticket']
elif on_ticket == 'autoclaim':
ticket = pinfo['autoclaim']['ticket']
return ticket
def _fail(self, req, code):
req.status = code
return
class WebAuthorSearch(WebInterfaceDirectory):
"""
Provides an interface to profile search using AJAX queries.
"""
_exports = ['list',
'details']
# This class requires JSON libraries
assert CFG_JSON_AVAILABLE, "[WebAuthorSearch] JSON must be enabled."
class QueryPerson(WebInterfaceDirectory):
_exports = ['']
MIN_QUERY_LENGTH = 2
QUERY_REGEX = re.compile(r"[\w\s\.\-,@]+$", re.UNICODE)
def __init__(self, query=None):
self.query = query
def _lookup(self, component, path):
if component not in self._exports:
return WebAuthorSearch.QueryPerson(component), path
def __call__(self, req, form):
if self.query is None or len(self.query) < self.MIN_QUERY_LENGTH:
req.status = apache.HTTP_BAD_REQUEST
return "Query too short"
if not self.QUERY_REGEX.match(self.query):
req.status = apache.HTTP_BAD_REQUEST
return "Invalid query."
pid_results = [{"pid": pid[0]} for pid in webapi.search_person_ids_by_name(self.query)]
req.content_type = 'application/json'
return json.dumps(pid_results)
# Request for index handled by __call__
index = __call__
def _JSON_received(self, form):
try:
return "jsondata" in form
except TypeError:
return False
def _extract_JSON(self, form):
try:
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
return json_data
except ValueError:
return None
def _get_pid_details(self, pid):
details = webapi.get_person_info_by_pid(pid)
details.update({
"names": [{"name": x, "paperCount": y} for x, y in webapi.get_person_names_from_id(pid)],
"externalIds": [{x: y} for x, y in webapi.get_external_ids_from_person_id(pid).items()]
})
details['cname'] = details.pop("canonical_name", None)
return details
def details(self, req, form):
if self._JSON_received(form):
try:
json_data = self._extract_JSON(form)
pids = json_data['pids']
req.content_type = 'application/json'
details = [self._get_pid_details(pid) for pid in pids]
return json.dumps(details)
except (TypeError, KeyError):
req.status = apache.HTTP_BAD_REQUEST
return "Invalid query."
else:
req.status = apache.HTTP_BAD_REQUEST
return "Incorrect query format."
list = QueryPerson()
class WebInterfaceAuthor(WebInterfaceDirectory):
'''
Handles /author/* pages.
Supplies the methods:
/author/choose_profile
/author/claim/
/author/help
/author/manage_profile
/author/merge_profiles
/author/profile/
/author/search
/author/ticket/
'''
_exports = ['',
'choose_profile',
'claim',
'help',
'manage_profile',
'merge_profiles',
'profile',
'search',
'search_ajax',
'ticket']
from invenio.webauthorprofile_webinterface import WebAuthorPages
claim = WebInterfaceBibAuthorIDClaimPages()
profile = WebAuthorPages()
choose_profile = claim.choose_profile
help = claim.help
manage_profile = WebInterfaceBibAuthorIDManageProfilePages()
merge_profiles = claim.merge_profiles
search = claim.search
search_ajax = WebAuthorSearch()
ticket = WebInterfaceAuthorTicketHandling()
def _lookup(self, component, path):
if component not in self._exports:
return WebInterfaceAuthor(component), path
def __init__(self, component=None):
self.path = component
def __call__(self, req, form):
if self.path is None or len(self.path) < 1:
redirect_to_url(req, "%s/author/search" % CFG_BASE_URL)
if CFG_BIBAUTHORID_ENABLED:
# Check if canonical id: e.g. "J.R.Ellis.1"
pid = get_person_id_from_canonical_id(self.path)
if pid >= 0:
url = "%s/author/profile/%s" % (CFG_BASE_URL, get_person_redirect_link(pid))
redirect_to_url(req, url, redirection_type=apache.HTTP_MOVED_PERMANENTLY)
return
else:
try:
pid = int(self.path)
except ValueError:
redirect_to_url(req, "%s/author/search?q=%s" % (CFG_BASE_URL, self.path))
return
else:
if author_has_papers(pid):
cid = get_person_redirect_link(pid)
if is_valid_canonical_id(cid):
redirect_id = cid
else:
redirect_id = pid
url = "%s/author/profile/%s" % (CFG_BASE_URL, redirect_id)
redirect_to_url(req, url, redirection_type=apache.HTTP_MOVED_PERMANENTLY)
return
redirect_to_url(req, "%s/author/search" % CFG_BASE_URL)
return
else:
url = "%s/author/profile/%s" % (CFG_BASE_URL, self.path)
redirect_to_url(req, url, redirection_type=apache.HTTP_MOVED_PERMANENTLY)
return
index = __call__
class WebInterfacePerson(WebInterfaceDirectory):
'''
Handles /person/* pages.
Supplies the methods:
/person/welcome
'''
_exports = ['welcome', 'update', 'you']
def welcome(self, req, form):
redirect_to_url(req, "%s/author/choose_profile" % CFG_SITE_SECURE_URL)
def you(self, req, form):
redirect_to_url(req, "%s/author/choose_profile" % CFG_SITE_SECURE_URL)
def update(self, req, form):
"""
Generate hepnames update form
"""
argd = wash_urlargd(form,
{'ln': (str, CFG_SITE_LANG),
'email': (str, ''),
'IRN': (str, ''),
})
# Retrieve info for HEP name based on email or IRN
recids = []
if argd['email']:
recids = perform_request_search(p="371__m:%s" % argd['email'], cc="HepNames")
elif argd['IRN']:
recids = perform_request_search(p="001:%s" % argd['IRN'], cc="HepNames")
else:
redirect_to_url(req, "%s/collection/HepNames" % (CFG_SITE_URL))
if not recids:
redirect_to_url(req, "%s/collection/HepNames" % (CFG_SITE_URL))
else:
hepname_bibrec = get_bibrecord(recids[0])
# Extract all info from recid that should be included in the form
full_name = record_get_field_value(hepname_bibrec, tag="100", ind1="", ind2="", code="a")
display_name = record_get_field_value(hepname_bibrec, tag="880", ind1="", ind2="", code="a")
email = record_get_field_value(hepname_bibrec, tag="371", ind1="", ind2="", code="m")
status = record_get_field_value(hepname_bibrec, tag="100", ind1="", ind2="", code="g")
keynumber = record_get_field_value(hepname_bibrec, tag="970", ind1="", ind2="", code="a")
try:
keynumber = keynumber.split('-')[1]
except IndexError:
pass
research_field_list = record_get_field_values(hepname_bibrec, tag="650", ind1="1", ind2="7", code="a")
institution_list = []
for instance in record_get_field_instances(hepname_bibrec, tag="371", ind1="", ind2=""):
if not instance or field_get_subfield_values(instance, "m"):
continue
institution_info = ["", "", "", "", ""]
if field_get_subfield_values(instance, "a"):
institution_info[0] = field_get_subfield_values(instance, "a")[0]
if field_get_subfield_values(instance, "r"):
institution_info[1] = field_get_subfield_values(instance, "r")[0]
if field_get_subfield_values(instance, "s"):
institution_info[2] = field_get_subfield_values(instance, "s")[0]
if field_get_subfield_values(instance, "t"):
institution_info[3] = field_get_subfield_values(instance, "t")[0]
if field_get_subfield_values(instance, "z"):
institution_info[4] = field_get_subfield_values(instance, "z")[0]
institution_list.append(institution_info)
phd_advisor_list = record_get_field_values(hepname_bibrec, tag="701", ind1="", ind2="", code="a")
experiment_list = record_get_field_values(hepname_bibrec, tag="693", ind1="", ind2="", code="e")
web_page = record_get_field_value(hepname_bibrec, tag="856", ind1="1", ind2="", code="u")
# Create form and pass as parameters all the content from the record
body = TEMPLATE.tmpl_update_hep_name(full_name, display_name, email,
status, research_field_list,
institution_list, phd_advisor_list,
experiment_list, web_page)
title = "HEPNames"
return page(title=title,
metaheaderadd=TEMPLATE.tmpl_update_hep_name_headers(),
body=body,
req=req,
)
# pylint: enable=C0301
# pylint: enable=W0613
|
ioannistsanaktsidis/invenio
|
modules/bibauthorid/lib/bibauthorid_webinterface.py
|
Python
|
gpl-2.0
| 149,664
|
[
"VisIt"
] |
0dd8707efd7729d89f8166f9a2dd7e878cc9554164476388d86fe3a7b4630914
|
#==== Image Centering ====================================================#
#=========================================================================#
def set_fiber_data(self, method, **kwargs):
"""Set the fiber center, diameter, and centroid using the same method
Args
----
method : {'edge', 'radius', 'gaussian', 'circle'}
Uses the respective method to find the fiber center
**kwargs
The keyworded arguments to pass to the centering method
Sets
----
_centroid.method : Pixel
The centroid of the image in the context of the given method
_center.method : Pixel
The center of the fiber face in the context of the given method
_diameter.method : float
The diameter of the fiber face in the context of the given method
"""
self.set_fiber_center(method, **kwargs)
self.set_fiber_centroid(method, **kwargs)
def set_fiber_diameter(self, method, **kwargs):
"""Set the fiber diameter using given method
Args
----
method : {'edge', 'radius', 'gaussian', 'circle'}
Uses the respective method to find the fiber center
**kwargs :
The keyworded arguments to pass to the centering method
Sets
----
_diameter.method : float
The diameter of the fiber face in the context of the given method
_center.method : Pixel
The center of the fiber face in the context of the given method
Raises
------
RuntimeError
cannot accept the 'circle' method when setting the diameter since
it requires a known radius to run
"""
if method == 'circle':
raise RuntimeError('Fiber diameter cannot be set by circle method')
self.set_fiber_center(method, **kwargs)
def set_fiber_center(self, method, show_image=False, **kwargs):
"""Find fiber center using given method
Args
----
method : {'edge', 'radius', 'gaussian', 'circle'}
Uses the respective method to find the fiber center
show_image : boolean, optional (default=False)
Whether or not to show relevant fitting image
**kwargs :
The keyworded arguments to pass to the centering method
Raises
------
RuntimeError
needs a valid method string to run the proper algorithm
"""
center, diameter = fiber_center_and_diameter(self, method, show_image, **kwargs)
setattr(self._center, method, center)
setattr(self._diameter, method, diameter)
# # Reset the fits due to new fiber parameters
# if method == 'radius':
# self.set_fiber_center_radius_method(**kwargs)
# elif method == 'edge':
# self.set_fiber_center_edge_method()
# elif method == 'circle':
# self.set_fiber_center_circle_method(**kwargs)
# elif method == 'gaussian':
# self.set_fiber_center_gaussian_method()
# else:
# raise RuntimeError('Incorrect string for fiber centering method')
# if show_image:
# center = getattr(self._center, method)
# r = getattr(self._diameter, method) / 2.0
# image = self.get_filtered_image()
# if method == 'gaussian':
# plot_overlaid_cross_sections(image, self.get_gaussian_fit(),
# center)
# plot_dot(image, center)
# show_plots()
# else:
# plot_image(remove_circle(image, center, r, res=1))
# plot_overlaid_cross_sections(image, image.max() / 2.0
# * circle_array(self.get_mesh_grid(),
# center.x, center.y, r, res=1),
# center)
# if method == 'edge':
# for corner in self._edges:
# plot_dot(image, corner)
# show_plots()
def set_fiber_center_gaussian_method(self):
"""Set fiber center using a Gaussian Fit
Uses Scipy.optimize.curve_fit method to fit fiber image to
gaussian_array(). The radius found extends to 2-sigma of the gaussian
therefore encompassing ~95% of the imaged light. Use previous methods
of center-finding to approximate the location of the center
Sets
----
_diameter.gaussian : float
Diameter of the fiber in the gaussian method context
_center.gaussian : {'x': float, 'y': float}
Center of the fiber in the gaussian method context
_fit.gaussian : 2D numpy.ndarray
Best gaussian fit for the fiber image
"""
_, coeffs = self.get_gaussian_fit(full_output=True)
self._center.gaussian.x = coeffs[0]
self._center.gaussian.y = coeffs[1]
self._diameter.gaussian = abs(coeffs[2]) * 2.0
self._gaussian_amp = coeffs[3]
self._gaussian_offset = coeffs[4]
def set_fiber_center_radius_method(self, radius_tol=.03, radius_range=None, **kwargs):
"""Set fiber center using dark circle with varying radius
Uses a golden mean optimization method to find the optimal radius of the
dark circle that covers the fiber image used in
get_fiber_centerCircleMethod(). The optimization is for a parameter
array_sum which is weighted by the area of the circle, meaning that a
smaller circle is preferred over one that simply covers the entire image
Args
----
radius_tol : number (default=1)
Minimum possible range of radius values before ending iteration
radius_range: int (in pixels)
Range of tested radii, i.e. max(radius) - min(radius). If None,
uses full possible range
Sets
----
_diameter.radius : float
Diameter of the fiber in the radius method context
_center.radius : {'x': float, 'y': float}
Center of the fiber in the radius method context
_diameter.circle : float
Also uses the circle method, therefore changes this value
_center.circle : float
Also uses the circle method, therefore chnages this value
"""
image = self.get_filtered_image()
# Initialize range of tested radii
r = np.zeros(4).astype(float)
if radius_range is not None:
approx_radius = self.get_fiber_radius(method='edge')
radius_range /= 2.0
r[0] = approx_radius - radius_range
if r[0] < 0.0:
r[0] = 0.0
r[3] = approx_radius + radius_range
else:
r[0] = 0
r[3] = min(self.height, self.width) / 2.0
r[1] = r[0] + (1 - self._phi) * (r[3] - r[0])
r[2] = r[0] + self._phi * (r[3] - r[0])
array_sum = np.zeros(2).astype(float)
for i in xrange(2):
self.set_fiber_center(method='circle', radius=r[i+1],
image=image, **kwargs)
array_sum[i] = (self._array_sum.circle
+ self.threshold
* np.pi * r[i+1]**2)
min_index = np.argmin(array_sum) # Integer 0 or 1 for min of r[1], r[2]
while abs(r[3]-r[0]) > radius_tol:
if min_index == 0:
r[3] = r[2]
r[2] = r[1]
r[1] = r[0] + (1 - self._phi) * (r[3] - r[0])
else:
r[0] = r[1]
r[1] = r[2]
r[2] = r[0] + self._phi * (r[3] - r[0])
array_sum[1 - min_index] = array_sum[min_index]
self.set_fiber_center(method='circle', radius=r[min_index+1],
image=image, **kwargs)
array_sum[min_index] = (self._array_sum.circle
+ self.threshold
* np.pi * r[min_index+1]**2)
min_index = np.argmin(array_sum) # Integer 0 or 1 for min of r[1], r[2]
self._diameter.radius = r[min_index+1] * 2
self._center.radius.y = self._center.circle.y
self._center.radius.x = self._center.circle.x
self._array_sum.radius = np.amin(array_sum)
def set_fiber_center_circle_method(self, radius=None, center_tol=.03,
center_range=None, image=None, **kwargs):
"""Finds fiber center using a dark circle of set radius
Uses golden mean method to find the optimal center for a circle
covering the fiber image. The optimization is for a parameter array_sum
that simply sums over the entire fiber image array
Args
----
radius : float
Radius to use when creating circle
center_tol : number (default=1)
Minimum possible range of center values before ending iteration
center_range: int (in pixels)
Range of tested centers, i.e. max(x0) - min(x0). If None,
uses full possible range
image : 2d numpy.ndarray, optional
The image being analyzed. This is only useful for the radius_method.
Probably not for use outside the class.
Sets
----
_diameter.circle : float
Diameter of the fiber in the circle method context
_center.circle : {'x': float, 'y': float}
Center of the fiber in the circle method context
_diameter.edge : float
If center_range is not None, approximates the circle's center using
the edge method
_center.edge : float
If center_range is not None, approximates the circle's center using
the edge method
"""
res = int(1.0/center_tol)
if image is None:
image = self.get_filtered_image()
if radius is None:
radius = self.get_fiber_radius(method='edge')
# Create four "corners" to test center of the removed circle
x = np.zeros(4).astype(float)
y = np.zeros(4).astype(float)
if center_range is not None:
approx_center = self.get_fiber_center(method='edge')
center_range = center_range / 2.0
x[0] = approx_center.x - center_range
if x[0] < radius:
x[0] = radius
x[3] = approx_center.x + center_range
if x[3] > self.width - radius:
x[3] = self.width - radius
y[0] = approx_center.y - center_range
if y[0] < radius:
y[0] = radius
y[3] = approx_center.y + center_range
if y[3] > self.height - radius:
y[3] = self.height - radius
else:
x[0] = radius
x[3] = self.width - radius
y[0] = radius
y[3] = self.height - radius
x[1] = x[0] + (1 - self._phi) * (x[3] - x[0])
x[2] = x[0] + self._phi * (x[3] - x[0])
y[1] = y[0] + (1 - self._phi) * (y[3] - y[0])
y[2] = y[0] + self._phi * (y[3] - y[0])
# Initialize array sums to each corner
array_sum = np.zeros((2, 2)).astype(float)
for i in xrange(2):
for j in xrange(2):
removed_circle_array = remove_circle(image,
Pixel(x[i+1], y[j+1]),
radius, res=1)
array_sum[j, i] = sum_array(removed_circle_array)
# Find the index of the corner with minimum array_sum
min_index = np.unravel_index(np.argmin(array_sum), (2, 2)) # Tuple
while abs(x[3] - x[0]) > center_tol and abs(y[3] - y[0]) > center_tol:
# Move the other corners to smaller search area
if min_index[0] == 0:
y[3] = y[2]
y[2] = y[1]
y[1] = y[0] + (1 - self._phi) * (y[3] - y[0])
else:
y[0] = y[1]
y[1] = y[2]
y[2] = y[0] + self._phi * (y[3] - y[0])
if min_index[1] == 0:
x[3] = x[2]
x[2] = x[1]
x[1] = x[0] + (1 - self._phi) * (x[3] - x[0])
else:
x[0] = x[1]
x[1] = x[2]
x[2] = x[0] + self._phi * (x[3] - x[0])
# Replace the opposite corner array sum (so it doesn't need to be recalculated)
array_sum[1 - min_index[0], 1 - min_index[1]] = array_sum[min_index]
min_index = (1 - min_index[0], 1 - min_index[1])
# Recalculate new sums for all four corners
for i in xrange(2):
for j in xrange(2):
if i != min_index[1] or j != min_index[0]:
temp_res = 1
if abs(x[3] - x[0]) < 10*center_tol and abs(y[3] - y[0]) < 10*center_tol:
temp_res = res
removed_circle_array = remove_circle(image,
Pixel(x[i+1], y[j+1]),
radius, temp_res)
array_sum[j, i] = sum_array(removed_circle_array)
min_index = np.unravel_index(np.argmin(array_sum), (2, 2))
self._center.circle.x = x[min_index[1]+1]
self._center.circle.y = y[min_index[0]+1]
self._diameter.circle = radius * 2.0
self._array_sum.circle = np.amin(array_sum)
def set_fiber_center_edge_method(self, **kwargs):
"""TAverages the fiber edges to set the fiber center
Sets
----
self._center.edge.y : float
self._center.edge.x : float
"""
self.set_fiber_edges(**kwargs)
self._center.edge.y = (self._edges.top.y + self._edges.bottom.y) / 2.0
self._center.edge.x = (self._edges.left.x + self._edges.right.x) / 2.0
|
rpetersburg/fiber_properties
|
legacy_code/image_centering.py
|
Python
|
mit
| 13,360
|
[
"Gaussian"
] |
53f70b75320fcf36b820c797f2bb48d757848abc0cd82fbd11255d48d23c41fb
|
# Orca
#
# Copyright 2004-2009 Sun Microsystems Inc.
# Copyright 2010-2013 The Orca Team
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Labels for Orca's GUIs. These have been put in their own module so that we
can present them in the correct language when users change the language on the
fly without having to reload a bunch of modules."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2004-2009 Sun Microsystems Inc." \
"Copyright (c) 2010-2013 The Orca Team"
__license__ = "LGPL"
from .orca_i18n import _, C_
# Translators: This string appears on a button in a dialog. "Activating" the
# selected item will perform the action that one would expect to occur if the
# object were clicked on with the mouse. If the object is a link, activating
# it will bring you to a new page. If the object is a button, activating it
# will press the button. If the object is a combobox, activating it will expand
# it to show all of its contents. And so on.
ACTIVATE = _("_Activate")
# Translators: Orca has a number of commands that override the default behavior
# within an application. For instance, on a web page Orca's Structural Navigation
# command "h" moves you to the next heading. What should happen when you press
# "h" in an entry on a web page depends: If you want to resume reading content,
# "h" should move to the next heading; if you want to enter text, "h" should not
# move you to the next heading. Because Orca doesn't know what you want to do,
# it has two modes: In browse mode, Orca treats key presses as commands to read
# the content; in focus mode, Orca treats key presses as something that should be
# handled by the focused widget. Orca optionally can attempt to detect which mode
# is appropriate for the current situation and switch automatically. This string
# is a label for a GUI option to enable such automatic switching when structural
# navigation commands are used. As an example, if this setting were enabled,
# pressing "e" to move to the next entry would move focus there and also turn
# focus mode on so that the next press of "e" would type an "e" into the entry.
# If this setting is not enabled, the second press of "e" would continue to be
# a navigation command to move amongst entries.
AUTO_FOCUS_MODE_STRUCT_NAV = _("Automatic focus mode during structural navigation")
# Translators: Orca has a number of commands that override the default behavior
# within an application. For instance, if you are at the bottom of an entry and
# press Down arrow, should you leave the entry? It depends on if you want to
# resume reading content or if you are editing the text in the entry. Because
# Orca doesn't know what you want to do, it has two modes: In browse mode, Orca
# treats key presses as commands to read the content; in focus mode, Orca treats
# key presses as something that should be handled by the focused widget. Orca
# optionally can attempt to detect which mode is appropriate for the current
# situation and switch automatically. This string is a label for a GUI option to
# enable such automatic switching when caret navigation commands are used. As an
# example, if this setting were enabled, pressing Down Arrow would allow you to
# move into an entry but once you had done so, Orca would switch to Focus mode
# and subsequent presses of Down Arrow would be controlled by the web browser
# and not by Orca. If this setting is not enabled, Orca would continue to control
# what happens when you press an arrow key, thus making it possible to arrow out
# of the entry.
AUTO_FOCUS_MODE_CARET_NAV = _("Automatic focus mode during caret navigation")
# Translators: Orca has a number of commands that override the default behavior
# within an application. For instance, if you are at the bottom of an entry and
# press Down arrow, should you leave the entry? It depends on if you want to
# resume reading content or if you are editing the text in the entry. Because
# Orca doesn't know what you want to do, it has two modes: In browse mode, Orca
# treats key presses as commands to read the content; in focus mode, Orca treats
# key presses as something that should be handled by the focused widget. Orca
# optionally can attempt to detect which mode is appropriate for the current
# situation and switch automatically. This string is a label for a GUI option to
# enable such automatic switching when native navigation commands are used.
# Here "native" means "not Orca"; it could be a browser navigation command such
# as the Tab key, or it might be a web page behavior, such as the search field
# automatically gaining focus when the page loads.
AUTO_FOCUS_MODE_NATIVE_NAV = _("Automatic focus mode during native navigation")
# Translators: A single braille cell on a refreshable braille display consists
# of 8 dots. Dot 7 is the dot in the bottom left corner. If the user selects
# this option, Dot 7 will be used to 'underline' text of interest, e.g. when
# "marking"/indicating that a given word is bold.
BRAILLE_DOT_7 = _("Dot _7")
# Translators: A single braille cell on a refreshable braille display consists
# of 8 dots. Dot 8 is the dot in the bottom right corner. If the user selects
# this option, Dot 8 will be used to 'underline' text of interest, e.g. when
# "marking"/indicating that a given word is bold.
BRAILLE_DOT_8 = _("Dot _8")
# Translators: A single braille cell on a refreshable braille display consists
# of 8 dots. Dots 7-8 are the dots at the bottom. If the user selects this
# option, Dots 7-8 will be used to 'underline' text of interest, e.g. when
# "marking"/indicating that a given word is bold.
BRAILLE_DOT_7_8 = _("Dots 7 an_d 8")
# Translators: This is the label for a button in a dialog.
BTN_CANCEL = _("_Cancel")
# Translators: This is the label for a button in a dialog.
BTN_JUMP_TO = _("_Jump to")
# Translators: This is the label for a button in a dialog.
BTN_OK = _("_OK")
# Translators: Orca uses Speech Dispatcher to present content to users via
# text-to-speech. Speech Dispatcher has a feature to control how capital
# letters are presented: Do nothing at all, say the word 'capital' prior to
# presenting a capital letter (which Speech Dispatcher refers to as 'spell'),
# or play a tone (which Speech Dispatcher refers to as a sound 'icon'.) This
# string to be translated appears as a combo box item in Orca's Preferences.
CAPITALIZATION_STYLE_ICON = C_("capitalization style", "Icon")
# Translators: Orca uses Speech Dispatcher to present content to users via
# text-to-speech. Speech Dispatcher has a feature to control how capital
# letters are presented: Do nothing at all, say the word 'capital' prior to
# presenting a capital letter (which Speech Dispatcher refers to as 'spell'),
# or play a tone (which Speech Dispatcher refers to as a sound 'icon'.) This
# string to be translated appears as a combo box item in Orca's Preferences.
CAPITALIZATION_STYLE_NONE = C_("capitalization style", "None")
# Translators: Orca uses Speech Dispatcher to present content to users via
# text-to-speech. Speech Dispatcher has a feature to control how capital
# letters are presented: Do nothing at all, say the word 'capital' prior to
# presenting a capital letter (which Speech Dispatcher refers to as 'spell'),
# or play a tone (which Speech Dispatcher refers to as a sound 'icon'.) This
# string to be translated appears as a combo box item in Orca's Preferences.
CAPITALIZATION_STYLE_SPELL = C_("capitalization style", "Spell")
# Translators: If this checkbox is checked, then Orca will tell you when one of
# your buddies is typing a message.
CHAT_ANNOUNCE_BUDDY_TYPING = _("Announce when your _buddies are typing")
# Translators: If this checkbox is checked, then Orca will provide the user with
# chat room specific message histories rather than just a single history which
# contains the latest messages from all the chat rooms that they are in.
CHAT_SEPARATE_MESSAGE_HISTORIES = _("Provide chat room specific _message histories")
# Translators: This is the label of a panel holding options for how messages in
# this application's chat rooms should be spoken. The options are: Speak messages
# from all channels (i.e. even if the chat application doesn't have focus); speak
# messages from a channel only if it is the active channel; speak messages from
# any channel, but only if the chat application has focus.
CHAT_SPEAK_MESSAGES_FROM = _("Speak messages from")
# Translators: This is the label of a radio button. If it is selected, Orca will
# speak all new chat messages as they appear irrespective of whether or not the
# chat application currently has focus. This is the default behaviour.
CHAT_SPEAK_MESSAGES_ALL = _("All cha_nnels")
# Translators: This is the label of a radio button. If it is selected, Orca will
# speak all new chat messages as they appear if and only if the chat application
# has focus. The string substitution is for the application name (e.g Pidgin).
CHAT_SPEAK_MESSAGES_ALL_IF_FOCUSED = _("All channels when an_y %s window is active")
# Translators: This is the label of a radio button. If it is selected, Orca will
# only speak new chat messages for the currently active channel, irrespective of
# whether the chat application has focus.
CHAT_SPEAK_MESSAGES_ACTIVE = _("A channel only if its _window is active")
# Translators: If this checkbox is checked, then Orca will speak the name of the
# chat room prior to presenting an incoming message.
CHAT_SPEAK_ROOM_NAME = _("_Speak Chat Room name")
# Translators: When presenting the content of a line on a web page, Orca by
# default presents the full line, including any links or form fields on that
# line, in order to reflect the on-screen layout as seen by sighted users.
# Not all users like this presentation, however, and prefer to have objects
# treated as if they were on individual lines, such as is done by Windows
# screen readers, so that unrelated objects (e.g. links in a navbar) are not
# all jumbled together. As a result, this is now configurable. If layout mode
# is enabled, Orca will present the full line as it appears on the screen; if
# it is disabled, Orca will treat each object as if it were on a separate line,
# both for presentation and navigation.
CONTENT_LAYOUT_MODE = _("Enable layout mode for content")
# Translators: Orca's keybindings support double and triple "clicks" or key
# presses, similar to using a mouse. This string appears in Orca's preferences
# dialog after a keybinding which requires a double click.
CLICK_COUNT_DOUBLE = _("double click")
# Translators: Orca's keybindings support double and triple "clicks" or key
# presses, similar to using a mouse. This string appears in Orca's preferences
# dialog after a keybinding which requires a triple click.
CLICK_COUNT_TRIPLE = _("triple click")
# Translators: This is a label which will appear in the list of available speech
# engines as a special item. It refers to the default engine configured within
# the speech subsystem. Apart from this item, the user will have a chance to
# select a particular speech engine by its real name (Festival, IBMTTS, etc.)
DEFAULT_SYNTHESIZER = _("Default Synthesizer")
# Translators: This is a label for a column header in Orca's pronunciation
# dictionary. The pronunciation dictionary allows the user to correct words
# which the speech synthesizer mispronounces (e.g. a person's name, a technical
# word) or doesn't pronounce as the user desires (e.g. an acronym) by providing
# an alternative string. The "Actual String" here refers to the word to be
# corrected as it would actually appear in text being read. Example: "LOL".
DICTIONARY_ACTUAL_STRING = _("Actual String")
# Translators: This is a label for a column header in Orca's pronunciation
# dictionary. The pronunciation dictionary allows the user to correct words
# which the speech synthesizer mispronounces (e.g. a person's name, a technical
# word) or doesn't pronounce as the user desires (e.g. an acronym) by providing
# an alternative string. The "Replacement String" here refers to how the user
# would like the "Actual String" to be pronounced by the speech synthesizer.
# Example: "L O L" or "Laughing Out Loud" (for Actual String "LOL").
DICTIONARY_REPLACEMENT_STRING = _("Replacement String")
# Translators: Orca has an "echo" feature to present text as it is being written
# by the user. While Orca's "key echo" options present the actual keyboard keys
# being pressed, "character echo" presents the character/string of length 1 that
# is inserted as a result of the keypress.
ECHO_CHARACTER = _("Enable echo by cha_racter")
# Translators: Orca has an "echo" feature to present text as it is being written
# by the user. This string refers to a "key echo" option. When this option is
# enabled, dead keys will be announced when pressed.
ECHO_DIACRITICAL = _("Enable non-spacing _diacritical keys")
# Translators: Orca has a "find" feature which allows the user to search the
# active application for on screen text and widgets. This label is associated
# with the setting to begin the search from the current location rather than
# from the top of the screen.
FIND_START_AT_CURRENT_LOCATION = _("C_urrent location")
# Translators: This is the label for a spinbutton. This option allows the user
# to specify the number of matched characters that must be present before Orca
# speaks the line that contains the results from an application's Find toolbar.
FIND_MINIMUM_MATCH_LENGTH = _("Minimum length of matched text:")
# Translators: This is the label of a panel containing options for what Orca
# presents when the user is in the Find toolbar of an application, e.g. Firefox.
FIND_OPTIONS = _("Find Options")
# Translators: This is the label for a checkbox. This option controls whether
# the line that contains the match from an application's Find toolbar should
# always be spoken, or only spoken if it is a different line than the line
# which contained the last match.
FIND_ONLY_SPEAK_CHANGED_LINES = _("Onl_y speak changed lines during find")
# Translators: This is the label for a checkbox. This option controls whether or
# not Orca will automatically speak the line that contains the match while the
# user is performing a search from the Find toolbar of an application, e.g.
# Firefox.
FIND_SPEAK_RESULTS = _("Speak results during _find")
# Translators: Command is a table column header where the cells in the column
# are a sentence that briefly describes what action Orca will take if and when
# the user invokes that keyboard command.
KB_HEADER_FUNCTION = _("Command")
# Translators: Key Binding is a table column header where the cells in the
# column represent keyboard combinations the user can press to invoke Orca
# commands.
KB_HEADER_KEY_BINDING = _("Key Binding")
# Translators: This string is a label for the group of Orca commands which
# can be used in any setting, task, or application. They are not specific
# to, for instance, web browsing.
KB_GROUP_DEFAULT = C_("keybindings", "Default")
# Translators: An external braille device has buttons on it that permit the
# user to create input gestures from the braille device. The braille bindings
# are what determine the actions Orca will take when the user presses these
# buttons.
KB_GROUP_BRAILLE = _("Braille Bindings")
# Translators: This string is a label for the group of Orca commands which
# do not currently have an associated key binding.
KB_GROUP_UNBOUND = _("Unbound")
# Translators: Modified is a table column header in Orca's preferences dialog.
# This column contains a checkbox which indicates whether a key binding
# for an Orca command has been changed by the user to something other than its
# default value.
KB_MODIFIED = C_("keybindings", "Modified")
# Translators: This label refers to the keyboard layout (desktop or laptop).
KEYBOARD_LAYOUT_DESKTOP = _("_Desktop")
# Translators: Orca's preferences can be configured on a per-application basis,
# allowing users to customize Orca's behavior, keybindings, etc. to work one
# way in LibreOffice and another way in a chat application. This string is the
# title of Orca's application-specific preferences dialog for an application.
# The string substituted in is the accessible name of the application (e.g.
# "Gedit", "Firefox", etc.
PREFERENCES_APPLICATION_TITLE = _("Screen Reader Preferences for %s")
# Translators: This is a table column header. This column consists of a single
# checkbox. If the checkbox is checked, Orca will indicate the associated item
# or attribute by "marking" it in braille. "Marking" is not the same as writing
# out the word; instead marking refers to adding some other indicator, e.g.
# "underlining" with braille dots 7-8 a word that is bold.
PRESENTATION_MARK_IN_BRAILLE = _("Mark in braille")
# Translators: "Present Unless" is a column header of the text attributes panel
# of the Orca preferences dialog. On this panel, the user can select a set of
# text attributes that they would like spoken and/or indicated in braille.
# Because the list of attributes could get quite lengthy, we provide the option
# to always speak/braille a text attribute *unless* its value is equal to the
# value given by the user in this column of the list. For example, given the
# text attribute "underline" and a present unless value of "none", the user is
# stating that he/she would like to have underlined text announced for all cases
# (single, double, low, etc.) except when the value of underline is none (i.e.
# when it's not underlined). "Present" here is being used as a verb.
PRESENTATION_PRESENT_UNLESS = _("Present Unless")
# Translators: This is a table column header. The "Speak" column consists of a
# single checkbox. If the checkbox is checked, Orca will speak the associated
# item or attribute (e.g. saying "Bold" as part of the information presented
# when the user gives the Orca command to obtain the format and font details of
# the current text).
PRESENTATION_SPEAK = _("Speak")
# Translators: This is the title of a message dialog informing the user that
# he/she attempted to save a new user profile under a name which already exists.
# A "user profile" is a collection of settings which apply to a given task, such
# as a "Spanish" profile which would use Spanish text-to-speech and Spanish
# braille and selected when reading Spanish content.
PROFILE_CONFLICT_TITLE = _("Save Profile As Conflict")
# Translators: This is the label of a message dialog informing the user that
# he/she attempted to save a new user profile under a name which already exists.
# A "user profile" is a collection of settings which apply to a given task, such
# as a "Spanish" profile which would use Spanish text-to-speech and Spanish
# braille and selected when reading Spanish content.
PROFILE_CONFLICT_LABEL = _("User Profile Conflict!")
# Translators: This is the message in a dialog informing the user that he/she
# attempted to save a new user profile under a name which already exists.
# A "user profile" is a collection of settings which apply to a given task, such
# as a "Spanish" profile which would use Spanish text-to-speech and Spanish
# braille and selected when reading Spanish content.
PROFILE_CONFLICT_MESSAGE = _("Profile %s already exists.\n" \
"Continue updating the existing profile with " \
"these new changes?")
# Translators: This text is displayed in a message dialog when a user indicates
# he/she wants to switch to a new user profile which will cause him/her to lose
# settings which have been altered but not yet saved. A "user profile" is a
# collection of settings which apply to a given task such as a "Spanish" profile
# which would use Spanish text-to-speech and Spanish braille and selected when
# reading Spanish content.
PROFILE_LOAD_LABEL = _("Load user profile")
# Translators: This text is displayed in a message dialog when a user indicates
# he/she wants to switch to a new user profile which will cause him/her to lose
# settings which have been altered but not yet saved. A "user profile" is a
# collection of settings which apply to a given task such as a "Spanish" profile
# which would use Spanish text-to-speech and Spanish braille and selected when
# reading Spanish content.
PROFILE_LOAD_MESSAGE = \
_("You are about to change the active profile. If you\n" \
"have just made changes in your preferences, they will\n" \
"be dropped at profile load.\n\n" \
"Continue loading profile discarding previous changes?")
# Translators: Profiles in Orca make it possible for users to quickly switch
# amongst a group of pre-defined settings (e.g. an 'English' profile for reading
# text written in English using an English-language speech synthesizer and
# braille rules, and a similar 'Spanish' profile for reading Spanish text. The
# following string is the title of a dialog in which users can save a newly-
# defined profile.
PROFILE_SAVE_AS_TITLE = _("Save Profile As")
# Translators: Profiles in Orca make it possible for users to quickly switch
# amongst a group of pre-defined settings (e.g. an 'English' profile for reading
# text written in English using an English-language speech synthesizer and
# braille rules, and a similar 'Spanish' profile for reading Spanish text. The
# following string is the label for a text entry in which the user enters the
# name of a new settings profile being saved via the 'Save Profile As' dialog.
PROFILE_NAME_LABEL = _("_Profile Name:")
# Translators: Profiles in Orca make it possible for users to quickly switch
# amongst a group of pre-defined settings (e.g. an 'English' profile for reading
# text written in English using an English-language speech synthesizer and
# braille rules, and a similar 'Spanish' profile for reading Spanish text.
# The following is a label in a dialog informing the user that he/she
# is about to remove a user profile, and action that cannot be undone.
PROFILE_REMOVE_LABEL = _("Remove user profile")
# Translators: Profiles in Orca make it possible for users to quickly switch
# amongst a group of pre-defined settings (e.g. an 'English' profile for reading
# text written in English using an English-language speech synthesizer and
# braille rules, and a similar 'Spanish' profile for reading Spanish text.
# The following is a message in a dialog informing the user that he/she
# is about to remove a user profile, an action that cannot be undone.
PROFILE_REMOVE_MESSAGE = _("You are about to remove profile %s. " \
"All unsaved settings and settings saved in this " \
"profile will be lost. Do you want to continue " \
"and remove this profile and all related settings?")
# Translators: Orca has a setting which determines which progress bar updates
# should be announced. Choosing "All" means that Orca will present progress bar
# updates regardless of what application and window they happen to be in.
PROGRESS_BAR_ALL = C_("ProgressBar", "All")
# Translators: Orca has a setting which determines which progress bar updates
# should be announced. Choosing "Application" means that Orca will present
# progress bar updates as long as the progress bar is in the active application
# (but not necessarily in the current window).
PROGRESS_BAR_APPLICATION = C_("ProgressBar", "Application")
# Translators: Orca has a setting which determines which progress bar updates
# should be announced. Choosing "Window" means that Orca will present progress
# bar updates as long as the progress bar is in the active window.
PROGRESS_BAR_WINDOW = C_("ProgressBar", "Window")
# Translators: If this setting is chosen, no punctuation symbols will be spoken
# as a user reads a document.
PUNCTUATION_STYLE_NONE = C_("punctuation level", "_None")
# Translators: If this setting is chosen, common punctuation symbols (like
# comma, period, question mark) will not be spoken as a user reads a document,
# but less common symbols (such as #, @, $) will.
PUNCTUATION_STYLE_SOME = _("So_me")
# Translators: If this setting is chosen, the majority of punctuation symbols
# will be spoken as a user reads a document.
PUNCTUATION_STYLE_MOST = _("M_ost")
# Translators: If this setting is chosen and the user is reading over an entire
# document, Orca will pause at the end of each line.
SAY_ALL_STYLE_LINE = _("Line")
# Translators: If this setting is chosen and the user is reading over an entire
# document, Orca will pause at the end of each sentence.
SAY_ALL_STYLE_SENTENCE = _("Sentence")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the text of a blockquote.
SN_HEADER_BLOCKQUOTE = C_("structural navigation", "Blockquote")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the text of a button.
SN_HEADER_BUTTON = C_("structural navigation", "Button")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the caption of a table.
SN_HEADER_CAPTION = C_("structural navigation", "Caption")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the label of a check box.
SN_HEADER_CHECK_BOX = C_("structural navigation", "Check Box")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the text displayed for a web element with an "onClick" handler.
SN_HEADER_CLICKABLE = C_("structural navigation", "Clickable")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the selected item in a combo box.
SN_HEADER_COMBO_BOX = C_("structural navigation", "Combo Box")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the description of an element.
SN_HEADER_DESCRIPTION = C_("structural navigation", "Description")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the text of a heading.
SN_HEADER_HEADING = C_("structural navigation", "Heading")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the text (alt text, title, etc.) associated with an image.
SN_HEADER_IMAGE = C_("structural navigation", "Image")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the label of a form field.
SN_HEADER_LABEL = C_("structural navigation", "Label")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the text of a landmark. ARIA role landmarks are the W3C defined HTML
# tag attribute 'role' used to identify important part of webpage like banners,
# main context, search etc.
SN_HEADER_LANDMARK = C_("structural navigation", "Landmark")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of a column which
# contains the level of a heading. Level will be a "1" for <h1>, a "2" for <h2>,
# and so on.
SN_HEADER_LEVEL = C_("structural navigation", "Level")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the text of a link.
SN_HEADER_LINK = C_("structural navigation", "Link")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the text of a list.
SN_HEADER_LIST = C_("structural navigation", "List")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the text of a list item.
SN_HEADER_LIST_ITEM = C_("structural navigation", "List Item")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the text of an object.
SN_HEADER_OBJECT = C_("structural navigation", "Object")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the text of a paragraph.
SN_HEADER_PARAGRAPH = C_("structural navigation", "Paragraph")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the label of a radio button.
SN_HEADER_RADIO_BUTTON = C_("structural navigation", "Radio Button")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the role of a widget. Examples include "heading", "paragraph",
# "table", "combo box", etc.
SN_HEADER_ROLE = C_("structural navigation", "Role")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the selected item of a form field.
SN_HEADER_SELECTED_ITEM = C_("structural navigation", "Selected Item")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the state of a widget. Examples include "checked"/"not checked",
# "selected"/"not selected", "visited/not visited", etc.
SN_HEADER_STATE = C_("structural navigation", "State")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the text of an entry.
SN_HEADER_TEXT = C_("structural navigation", "Text")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the URI of a link.
SN_HEADER_URI = C_("structural navigation", "URI")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title for a column which
# contains the value of a form field.
SN_HEADER_VALUE = C_("structural navigation", "Value")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of such a dialog box.
SN_TITLE_BLOCKQUOTE = C_("structural navigation", "Blockquotes")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of such a dialog box.
SN_TITLE_BUTTON = C_("structural navigation", "Buttons")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of such a dialog box.
SN_TITLE_CHECK_BOX = C_("structural navigation", "Check Boxes")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of such a dialog box.
# "Clickables" are web elements which have an "onClick" handler.
SN_TITLE_CLICKABLE = C_("structural navigation", "Clickables")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of such a dialog box.
SN_TITLE_COMBO_BOX = C_("structural navigation", "Combo Boxes")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of such a dialog box.
SN_TITLE_ENTRY = C_("structural navigation", "Entries")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of such a dialog box.
SN_TITLE_FORM_FIELD = C_("structural navigation", "Form Fields")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of such a dialog box.
SN_TITLE_HEADING = C_("structural navigation", "Headings")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of such a dialog box.
SN_TITLE_IMAGE = C_("structural navigation", "Images")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of such a dialog box.
# Level will be a "1" for <h1>, a "2" for <h2>, and so on.
SN_TITLE_HEADING_AT_LEVEL = C_("structural navigation", "Headings at Level %d")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of such a dialog box.
# ARIA role landmarks are the W3C defined HTML tag attribute 'role' used to
# identify important part of webpage like banners, main context, search etc.
SN_TITLE_LANDMARK = C_("structural navigation", "Landmarks")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of such a dialog box.
# A 'large object' is a logical chunk of text, such as a paragraph, a list,
# a table, etc.
SN_TITLE_LARGE_OBJECT = C_("structural navigation", "Large Objects")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of such a dialog box.
SN_TITLE_LINK = C_("structural navigation", "Links")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of such a dialog box.
SN_TITLE_LIST = C_("structural navigation", "Lists")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of such a dialog box.
SN_TITLE_LIST_ITEM = C_("structural navigation", "List Items")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of such a dialog box.
SN_TITLE_PARAGRAPH = C_("structural navigation", "Paragraphs")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of such a dialog box.
SN_TITLE_RADIO_BUTTON = C_("structural navigation", "Radio Buttons")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of such a dialog box.
SN_TITLE_TABLE = C_("structural navigation", "Tables")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of such a dialog box.
SN_TITLE_UNVISITED_LINK = C_("structural navigation", "Unvisited Links")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is the title of such a dialog box.
SN_TITLE_VISITED_LINK = C_("structural navigation", "Visited Links")
# Translators: This is the title of a panel holding options for how to navigate
# HTML content (e.g., Orca caret navigation, positioning of caret, structural
# navigation, etc.).
PAGE_NAVIGATION = _("Page Navigation")
# Translators: When the user loads a new web page, they can optionally have Orca
# automatically start reading the page from beginning to end. This is the label
# of a checkbox in which users can indicate their preference.
READ_PAGE_UPON_LOAD = \
_("Automatically start speaking a page when it is first _loaded")
# Translators: When the user loads a new web page, they can optionally have Orca
# automatically summarize details about the page, such as the number of elements
# (landmarks, forms, links, tables, etc.).
PAGE_SUMMARY_UPON_LOAD = _("_Present summary of a page when it is first loaded")
# Translators: Different speech systems and speech engines work differently when
# it comes to handling pauses (e.g. sentence boundaries). This property allows
# the user to specify whether speech should be sent to the speech synthesis
# system immediately when a pause directive is encountered or if it should be
# queued up and sent to the speech synthesis system once the entire set of
# utterances has been calculated.
SPEECH_BREAK_INTO_CHUNKS = _("Break speech into ch_unks between pauses")
# Translators: This string will appear in the list of available voices for the
# current speech engine. "%s" will be replaced by the name of the current speech
# engine, such as "Festival default voice" or "IBMTTS default voice". It refers
# to the default voice configured for given speech engine within the speech
# subsystem. Apart from this item, the list will contain the names of all
# available "real" voices provided by the speech engine.
SPEECH_DEFAULT_VOICE = _("%s default voice")
# Translators: This refers to the voice used by Orca when presenting the content
# of the screen and other messages.
SPEECH_VOICE_TYPE_DEFAULT = C_("VoiceType", "Default")
# Translators: This refers to the voice used by Orca when presenting one or more
# characters which is part of a hyperlink.
SPEECH_VOICE_TYPE_HYPERLINK = C_("VoiceType", "Hyperlink")
# Translators: This refers to the voice used by Orca when presenting information
# which is not displayed on the screen as text, but is still being communicated
# by the system in some visual fashion. For instance, Orca says "misspelled" to
# indicate the presence of the red squiggly line found under a spelling error;
# Orca might say "3 of 6" when a user Tabs into a list of six items and the
# third item is selected. And so on.
SPEECH_VOICE_TYPE_SYSTEM = C_("VoiceType", "System")
# Translators: This refers to the voice used by Orca when presenting one or more
# characters which is written in uppercase.
SPEECH_VOICE_TYPE_UPPERCASE = C_("VoiceType", "Uppercase")
# Translators this label refers to the name of particular speech synthesis
# system. (http://devel.freebsoft.org/speechd)
SPEECH_DISPATCHER = _("Speech Dispatcher")
# Translators: This is a label for a group of options related to Orca's behavior
# when presenting an application's spell check dialog.
SPELL_CHECK = C_("OptionGroup", "Spell Check")
# Translators: This is a label for a checkbox associated with an Orca setting.
# When this option is enabled, Orca will spell out the current error in addition
# to speaking it. For example, if the misspelled word is "foo," enabling this
# setting would cause Orca to speak "f o o" after speaking "foo".
SPELL_CHECK_SPELL_ERROR = _("Spell _error")
# Translators: This is a label for a checkbox associated with an Orca setting.
# When this option is enabled, Orca will spell out the current suggestion in
# addition to speaking it. For example, if the misspelled word is "foo," and
# the first suggestion is "for" enabling this setting would cause Orca to speak
# "f o r" after speaking "for".
SPELL_CHECK_SPELL_SUGGESTION = _("Spell _suggestion")
# Translators: This is a label for a checkbox associated with an Orca setting.
# When this option is enabled, Orca will present the context (surrounding text,
# typically the sentence or line) in which the mistake occurred.
SPELL_CHECK_PRESENT_CONTEXT = _("Present _context of error")
# Translators: This is a label for an option to tell Orca whether or not it
# should speak the coordinates of the current spreadsheet cell. Coordinates are
# the row and column position within the spreadsheet (i.e. A1, B1, C2 ...)
SPREADSHEET_SPEAK_CELL_COORDINATES = _("Speak spreadsheet cell coordinates")
# Translators: This is a label for an option which controls what Orca speaks when
# presenting selection changes in a spreadsheet. By default, Orca will speak just
# what changed. For instance, if cells A1 through A8 are already selected, and the
# user adds A9 to the selection, Orca by default would just say "A9 selected."
# Some users, however, prefer to have Orca always announce the entire selected range,
# i.e. in the same scenario say "A1 through A9 selected." Those users should enable
# this option.
SPREADSHEET_SPEAK_SELECTED_RANGE = _("Always speak selected spreadsheet range")
# Translators: This is a label for an option for whether or not to speak the
# header of a table cell in document content.
TABLE_ANNOUNCE_CELL_HEADER = _("Announce cell _header")
# Translators: This is the title of a panel containing options for specifying
# how to navigate tables in document content.
TABLE_NAVIGATION = _("Table Navigation")
# Translators: This is a label for an option to tell Orca to skip over empty/
# blank cells when navigating tables in document content.
TABLE_SKIP_BLANK_CELLS = _("Skip _blank cells")
# Translators: When users are navigating a table, they sometimes want the entire
# row of a table read; other times they want just the current cell presented to
# them. This label is associated with the default presentation to be used.
TABLE_SPEAK_CELL = _("Speak _cell")
# Translators: This is a label for an option to tell Orca whether or not it
# should speak table cell coordinates in document content.
TABLE_SPEAK_CELL_COORDINATES = _("Speak _cell coordinates")
# Translators: This is a label for an option to tell Orca whether or not it
# should speak the span size of a table cell (e.g., how many rows and columns
# a particular table cell spans in a table).
TABLE_SPEAK_CELL_SPANS = _("Speak _multiple cell spans")
# Translators: This is a table column header. "Attribute" here refers to text
# attributes such as bold, underline, family-name, etc.
TEXT_ATTRIBUTE_NAME = _("Attribute Name")
# Translators: Gecko native caret navigation is where Firefox itself controls
# how the arrow keys move the caret around HTML content. It's often broken, so
# Orca needs to provide its own support. As such, Orca offers the user the
# ability to switch between the Firefox mode and the Orca mode. This is the
# label of a checkbox in which users can indicate their default preference.
USE_CARET_NAVIGATION = _("Control caret navigation")
# Translators: Orca provides keystrokes to navigate HTML content in a structural
# manner: go to previous/next header, list item, table, etc. This is the label
# of a checkbox in which users can indicate their default preference.
USE_STRUCTURAL_NAVIGATION = _("Enable _structural navigation")
# Translators: This refers to the amount of information Orca provides about a
# particular object that receives focus.
VERBOSITY_LEVEL_BRIEF = _("Brie_f")
|
GNOME/orca
|
src/orca/guilabels.py
|
Python
|
lgpl-2.1
| 47,461
|
[
"ORCA"
] |
de60ffa9c5855630e3bb5c24b7d298449374191fe8ca97d695e3d9d9ec0f9397
|
from __future__ import unicode_literals
from django.db import IntegrityError, connection, models, transaction
from django.test import TestCase
from .models import (
Bar, Director, Favorites, HiddenPointer, ManualPrimaryKey, MultiModel,
Place, RelatedModel, Restaurant, School, Target, UndergroundBar, Waiter,
)
class OneToOneTests(TestCase):
def setUp(self):
self.p1 = Place.objects.create(name='Demon Dogs', address='944 W. Fullerton')
self.p2 = Place.objects.create(name='Ace Hardware', address='1013 N. Ashland')
self.r1 = Restaurant.objects.create(place=self.p1, serves_hot_dogs=True, serves_pizza=False)
self.b1 = Bar.objects.create(place=self.p1, serves_cocktails=False)
def test_getter(self):
# A Restaurant can access its place.
self.assertEqual(repr(self.r1.place), '<Place: Demon Dogs the place>')
# A Place can access its restaurant, if available.
self.assertEqual(repr(self.p1.restaurant), '<Restaurant: Demon Dogs the restaurant>')
# p2 doesn't have an associated restaurant.
with self.assertRaisesMessage(Restaurant.DoesNotExist, 'Place has no restaurant'):
self.p2.restaurant
# The exception raised on attribute access when a related object
# doesn't exist should be an instance of a subclass of `AttributeError`
# refs #21563
self.assertFalse(hasattr(self.p2, 'restaurant'))
def test_setter(self):
# Set the place using assignment notation. Because place is the primary
# key on Restaurant, the save will create a new restaurant
self.r1.place = self.p2
self.r1.save()
self.assertEqual(repr(self.p2.restaurant), '<Restaurant: Ace Hardware the restaurant>')
self.assertEqual(repr(self.r1.place), '<Place: Ace Hardware the place>')
self.assertEqual(self.p2.pk, self.r1.pk)
# Set the place back again, using assignment in the reverse direction.
self.p1.restaurant = self.r1
self.assertEqual(repr(self.p1.restaurant), '<Restaurant: Demon Dogs the restaurant>')
r = Restaurant.objects.get(pk=self.p1.id)
self.assertEqual(repr(r.place), '<Place: Demon Dogs the place>')
def test_manager_all(self):
# Restaurant.objects.all() just returns the Restaurants, not the Places.
self.assertQuerysetEqual(Restaurant.objects.all(), [
'<Restaurant: Demon Dogs the restaurant>',
])
# Place.objects.all() returns all Places, regardless of whether they
# have Restaurants.
self.assertQuerysetEqual(Place.objects.order_by('name'), [
'<Place: Ace Hardware the place>',
'<Place: Demon Dogs the place>',
])
def test_manager_get(self):
def assert_get_restaurant(**params):
self.assertEqual(repr(Restaurant.objects.get(**params)),
'<Restaurant: Demon Dogs the restaurant>')
assert_get_restaurant(place__id__exact=self.p1.pk)
assert_get_restaurant(place__id=self.p1.pk)
assert_get_restaurant(place__exact=self.p1.pk)
assert_get_restaurant(place__exact=self.p1)
assert_get_restaurant(place=self.p1.pk)
assert_get_restaurant(place=self.p1)
assert_get_restaurant(pk=self.p1.pk)
assert_get_restaurant(place__pk__exact=self.p1.pk)
assert_get_restaurant(place__pk=self.p1.pk)
assert_get_restaurant(place__name__startswith="Demon")
def assert_get_place(**params):
self.assertEqual(repr(Place.objects.get(**params)),
'<Place: Demon Dogs the place>')
assert_get_place(restaurant__place__exact=self.p1.pk)
assert_get_place(restaurant__place__exact=self.p1)
assert_get_place(restaurant__place__pk=self.p1.pk)
assert_get_place(restaurant__exact=self.p1.pk)
assert_get_place(restaurant__exact=self.r1)
assert_get_place(restaurant__pk=self.p1.pk)
assert_get_place(restaurant=self.p1.pk)
assert_get_place(restaurant=self.r1)
assert_get_place(id__exact=self.p1.pk)
assert_get_place(pk=self.p1.pk)
def test_foreign_key(self):
# Add a Waiter to the Restaurant.
w = self.r1.waiter_set.create(name='Joe')
self.assertEqual(repr(w), '<Waiter: Joe the waiter at Demon Dogs the restaurant>')
# Query the waiters
def assert_filter_waiters(**params):
self.assertQuerysetEqual(Waiter.objects.filter(**params), [
'<Waiter: Joe the waiter at Demon Dogs the restaurant>'
])
assert_filter_waiters(restaurant__place__exact=self.p1.pk)
assert_filter_waiters(restaurant__place__exact=self.p1)
assert_filter_waiters(restaurant__place__pk=self.p1.pk)
assert_filter_waiters(restaurant__exact=self.r1.pk)
assert_filter_waiters(restaurant__exact=self.r1)
assert_filter_waiters(restaurant__pk=self.r1.pk)
assert_filter_waiters(restaurant=self.r1.pk)
assert_filter_waiters(restaurant=self.r1)
assert_filter_waiters(id__exact=w.pk)
assert_filter_waiters(pk=w.pk)
# Delete the restaurant; the waiter should also be removed
r = Restaurant.objects.get(pk=self.r1.pk)
r.delete()
self.assertEqual(Waiter.objects.count(), 0)
def test_multiple_o2o(self):
# One-to-one fields still work if you create your own primary key
o1 = ManualPrimaryKey(primary_key="abc123", name="primary")
o1.save()
o2 = RelatedModel(link=o1, name="secondary")
o2.save()
# You can have multiple one-to-one fields on a model, too.
x1 = MultiModel(link1=self.p1, link2=o1, name="x1")
x1.save()
self.assertEqual(repr(o1.multimodel), '<MultiModel: Multimodel x1>')
# This will fail because each one-to-one field must be unique (and
# link2=o1 was used for x1, above).
mm = MultiModel(link1=self.p2, link2=o1, name="x1")
with self.assertRaises(IntegrityError):
with transaction.atomic():
mm.save()
def test_unsaved_object(self):
"""
#10811 -- Assigning an unsaved object to a OneToOneField
should raise an exception.
"""
place = Place(name='User', address='London')
with self.assertRaisesMessage(ValueError,
'Cannot assign "%r": "%s" instance isn\'t saved in the database.'
% (place, Restaurant.place.field.remote_field.model._meta.object_name)):
Restaurant.objects.create(place=place, serves_hot_dogs=True, serves_pizza=False)
bar = UndergroundBar()
p = Place(name='User', address='London')
with self.assertRaisesMessage(ValueError,
'Cannot assign "%r": "%s" instance isn\'t saved in the database.'
% (bar, p._meta.object_name)):
p.undergroundbar = bar
def test_unsaved_object_check_override(self):
"""
#24495 - Assigning an unsaved object to a OneToOneField
should be allowed when the allow_unsaved_instance_assignment
attribute has been set to True.
"""
class UnsavedOneToOneField(models.OneToOneField):
# A OneToOneField which can point to an unsaved object
allow_unsaved_instance_assignment = True
class Band(models.Model):
name = models.CharField(max_length=50)
class BandManager(models.Model):
band = UnsavedOneToOneField(Band, models.CASCADE)
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
band = Band(name='The Beatles')
manager = BandManager(first_name='Brian', last_name='Epstein')
# This should not raise an exception as the OneToOneField between
# manager and band has allow_unsaved_instance_assignment=True.
manager.band = band
self.assertEqual(manager.band, band)
def test_reverse_relationship_cache_cascade(self):
"""
Regression test for #9023: accessing the reverse relationship shouldn't
result in a cascading delete().
"""
bar = UndergroundBar.objects.create(place=self.p1, serves_cocktails=False)
# The bug in #9023: if you access the one-to-one relation *before*
# setting to None and deleting, the cascade happens anyway.
self.p1.undergroundbar
bar.place.name = 'foo'
bar.place = None
bar.save()
self.p1.delete()
self.assertEqual(Place.objects.all().count(), 1)
self.assertEqual(UndergroundBar.objects.all().count(), 1)
def test_create_models_m2m(self):
"""
Regression test for #1064 and #1506
Check that we create models via the m2m relation if the remote model
has a OneToOneField.
"""
f = Favorites(name='Fred')
f.save()
f.restaurants = [self.r1]
self.assertQuerysetEqual(
f.restaurants.all(),
['<Restaurant: Demon Dogs the restaurant>']
)
def test_reverse_object_cache(self):
"""
Regression test for #7173
Check that the name of the cache for the reverse object is correct.
"""
self.assertEqual(self.p1.restaurant, self.r1)
self.assertEqual(self.p1.bar, self.b1)
def test_related_object_cache(self):
""" Regression test for #6886 (the related-object cache) """
# Look up the objects again so that we get "fresh" objects
p = Place.objects.get(name="Demon Dogs")
r = p.restaurant
# Accessing the related object again returns the exactly same object
self.assertIs(p.restaurant, r)
# But if we kill the cache, we get a new object
del p._restaurant_cache
self.assertIsNot(p.restaurant, r)
# Reassigning the Restaurant object results in an immediate cache update
# We can't use a new Restaurant because that'll violate one-to-one, but
# with a new *instance* the is test below will fail if #6886 regresses.
r2 = Restaurant.objects.get(pk=r.pk)
p.restaurant = r2
self.assertIs(p.restaurant, r2)
# Assigning None succeeds if field is null=True.
ug_bar = UndergroundBar.objects.create(place=p, serves_cocktails=False)
ug_bar.place = None
self.assertIsNone(ug_bar.place)
# Assigning None fails: Place.restaurant is null=False
self.assertRaises(ValueError, setattr, p, 'restaurant', None)
# You also can't assign an object of the wrong type here
self.assertRaises(ValueError, setattr, p, 'restaurant', p)
# Creation using keyword argument should cache the related object.
p = Place.objects.get(name="Demon Dogs")
r = Restaurant(place=p)
self.assertIs(r.place, p)
# Creation using attname keyword argument and an id will cause the related
# object to be fetched.
p = Place.objects.get(name="Demon Dogs")
r = Restaurant(place_id=p.id)
self.assertIsNot(r.place, p)
self.assertEqual(r.place, p)
def test_filter_one_to_one_relations(self):
"""
Regression test for #9968
filtering reverse one-to-one relations with primary_key=True was
misbehaving. We test both (primary_key=True & False) cases here to
prevent any reappearance of the problem.
"""
Target.objects.create()
self.assertQuerysetEqual(
Target.objects.filter(pointer=None),
['<Target: Target object>']
)
self.assertQuerysetEqual(
Target.objects.exclude(pointer=None),
[]
)
self.assertQuerysetEqual(
Target.objects.filter(second_pointer=None),
['<Target: Target object>']
)
self.assertQuerysetEqual(
Target.objects.exclude(second_pointer=None),
[]
)
def test_reverse_object_does_not_exist_cache(self):
"""
Regression for #13839 and #17439.
DoesNotExist on a reverse one-to-one relation is cached.
"""
p = Place(name='Zombie Cats', address='Not sure')
p.save()
with self.assertNumQueries(1):
with self.assertRaises(Restaurant.DoesNotExist):
p.restaurant
with self.assertNumQueries(0):
with self.assertRaises(Restaurant.DoesNotExist):
p.restaurant
def test_reverse_object_cached_when_related_is_accessed(self):
"""
Regression for #13839 and #17439.
The target of a one-to-one relation is cached
when the origin is accessed through the reverse relation.
"""
# Use a fresh object without caches
r = Restaurant.objects.get(pk=self.r1.pk)
p = r.place
with self.assertNumQueries(0):
self.assertEqual(p.restaurant, r)
def test_related_object_cached_when_reverse_is_accessed(self):
"""
Regression for #13839 and #17439.
The origin of a one-to-one relation is cached
when the target is accessed through the reverse relation.
"""
# Use a fresh object without caches
p = Place.objects.get(pk=self.p1.pk)
r = p.restaurant
with self.assertNumQueries(0):
self.assertEqual(r.place, p)
def test_reverse_object_cached_when_related_is_set(self):
"""
Regression for #13839 and #17439.
The target of a one-to-one relation is always cached.
"""
p = Place(name='Zombie Cats', address='Not sure')
p.save()
self.r1.place = p
self.r1.save()
with self.assertNumQueries(0):
self.assertEqual(p.restaurant, self.r1)
def test_reverse_object_cached_when_related_is_unset(self):
"""
Regression for #13839 and #17439.
The target of a one-to-one relation is always cached.
"""
b = UndergroundBar(place=self.p1, serves_cocktails=True)
b.save()
with self.assertNumQueries(0):
self.assertEqual(self.p1.undergroundbar, b)
b.place = None
b.save()
with self.assertNumQueries(0):
with self.assertRaises(UndergroundBar.DoesNotExist):
self.p1.undergroundbar
def test_get_reverse_on_unsaved_object(self):
"""
Regression for #18153 and #19089.
Accessing the reverse relation on an unsaved object
always raises an exception.
"""
p = Place()
# When there's no instance of the origin of the one-to-one
with self.assertNumQueries(0):
with self.assertRaises(UndergroundBar.DoesNotExist):
p.undergroundbar
UndergroundBar.objects.create()
# When there's one instance of the origin
# (p.undergroundbar used to return that instance)
with self.assertNumQueries(0):
with self.assertRaises(UndergroundBar.DoesNotExist):
p.undergroundbar
# Several instances of the origin are only possible if database allows
# inserting multiple NULL rows for a unique constraint
if connection.features.supports_nullable_unique_constraints:
UndergroundBar.objects.create()
# When there are several instances of the origin
with self.assertNumQueries(0):
with self.assertRaises(UndergroundBar.DoesNotExist):
p.undergroundbar
def test_set_reverse_on_unsaved_object(self):
"""
Writing to the reverse relation on an unsaved object
is impossible too.
"""
p = Place()
b = UndergroundBar.objects.create()
with self.assertNumQueries(0):
with self.assertRaises(ValueError):
p.undergroundbar = b
def test_nullable_o2o_delete(self):
u = UndergroundBar.objects.create(place=self.p1)
u.place_id = None
u.save()
self.p1.delete()
self.assertTrue(UndergroundBar.objects.filter(pk=u.pk).exists())
self.assertIsNone(UndergroundBar.objects.get(pk=u.pk).place)
def test_hidden_accessor(self):
"""
When a '+' ending related name is specified no reverse accessor should
be added to the related model.
"""
self.assertFalse(
hasattr(Target, HiddenPointer._meta.get_field('target').remote_field.get_accessor_name())
)
def test_related_object(self):
public_school = School.objects.create(is_public=True)
public_director = Director.objects.create(school=public_school, is_temp=False)
private_school = School.objects.create(is_public=False)
private_director = Director.objects.create(school=private_school, is_temp=True)
# Only one school is available via all() due to the custom default manager.
self.assertQuerysetEqual(
School.objects.all(),
["<School: School object>"]
)
# Only one director is available via all() due to the custom default manager.
self.assertQuerysetEqual(
Director.objects.all(),
["<Director: Director object>"]
)
self.assertEqual(public_director.school, public_school)
self.assertEqual(public_school.director, public_director)
# Make sure the base manager is used so that the related objects
# is still accessible even if the default manager doesn't normally
# allow it.
self.assertEqual(private_director.school, private_school)
# Make sure the base manager is used so that an student can still access
# its related school even if the default manager doesn't normally
# allow it.
self.assertEqual(private_school.director, private_director)
# If the manager is marked "use_for_related_fields", it'll get used instead
# of the "bare" queryset. Usually you'd define this as a property on the class,
# but this approximates that in a way that's easier in tests.
School.objects.use_for_related_fields = True
try:
private_director = Director._base_manager.get(pk=private_director.pk)
self.assertRaises(School.DoesNotExist, lambda: private_director.school)
finally:
School.objects.use_for_related_fields = False
Director.objects.use_for_related_fields = True
try:
private_school = School._base_manager.get(pk=private_school.pk)
self.assertRaises(Director.DoesNotExist, lambda: private_school.director)
finally:
Director.objects.use_for_related_fields = False
def test_hasattr_related_object(self):
# The exception raised on attribute access when a related object
# doesn't exist should be an instance of a subclass of `AttributeError`
# refs #21563
self.assertFalse(hasattr(Director(), 'director'))
self.assertFalse(hasattr(School(), 'school'))
def test_update_one_to_one_pk(self):
p1 = Place.objects.create()
p2 = Place.objects.create()
r1 = Restaurant.objects.create(place=p1)
r2 = Restaurant.objects.create(place=p2)
w = Waiter.objects.create(restaurant=r1)
Waiter.objects.update(restaurant=r2)
w.refresh_from_db()
self.assertEqual(w.restaurant, r2)
def test_rel_pk_subquery(self):
r = Restaurant.objects.first()
q1 = Restaurant.objects.filter(place_id=r.pk)
# Test that subquery using primary key and a query against the
# same model works correctly.
q2 = Restaurant.objects.filter(place_id__in=q1)
self.assertQuerysetEqual(q2, [r], lambda x: x)
# Test that subquery using 'pk__in' instead of 'place_id__in' work, too.
q2 = Restaurant.objects.filter(
pk__in=Restaurant.objects.filter(place__id=r.place.pk)
)
self.assertQuerysetEqual(q2, [r], lambda x: x)
def test_rel_pk_exact(self):
r = Restaurant.objects.first()
r2 = Restaurant.objects.filter(pk__exact=r).first()
self.assertEqual(r, r2)
|
mewtaylor/django
|
tests/one_to_one/tests.py
|
Python
|
bsd-3-clause
| 20,355
|
[
"Brian"
] |
e3ea417a509c11d5fe04c4dac1d6303746de891a2610c45b19e47db6bb9af95c
|
# spud - keep track of photos
# Copyright (C) 2008-2013 Brian May
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, unicode_literals
import environ
from .defaults import * # NOQA
exec(open("/etc/spud/settings.py", "rb").read())
# SETTINGS FROM DOCKER
env = environ.Env()
BUILD_DATE = env('BUILD_DATE', default=None)
VCS_REF = env('VCS_REF', default=None)
|
brianmay/spud
|
spud/settings.py
|
Python
|
gpl-3.0
| 987
|
[
"Brian"
] |
62c08bd5d600a08daec9c20e5dbc4334714b29df2022553b1caf613a41005f7a
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2013, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Initialize and configure *Flask-Script* extension."""
from __future__ import print_function
import re
import functools
from flask import flash, current_app
from flask.ext.registry import RegistryProxy, ModuleAutoDiscoveryRegistry
from flask.ext.script import Manager as FlaskExtManager
from flask.ext.script.commands import Shell, Server, ShowUrls, Clean
from six.moves import urllib
from types import FunctionType
from werkzeug.utils import import_string, find_modules
from invenio.base.signals import pre_command, post_command
def change_command_name(method=None, new_name=None):
"""Change command name to `new_name` or replace '_' by '-'."""
if method is None:
return functools.partial(change_command_name, new_name=new_name)
if new_name is None:
new_name = method.__name__.replace('_', '-')
method.__name__ = new_name
return method
def generate_secret_key():
"""Generate secret key."""
import string
import random
return ''.join([random.choice(string.ascii_letters + string.digits)
for dummy in range(0, 256)])
def print_progress(p, L=40, prefix='', suffix=''):
"""Print textual progress bar."""
bricks = int(p * L)
print('\r', prefix, end=' ')
print('[{0}{1}] {2}%'.format('#' * bricks, ' ' * (L - bricks),
int(p * 100)), end=' ')
print(suffix, end=' ')
def check_for_software_updates(flash_message=False):
"""Check for a new release of Invenio.
:return: True if you have latest version, else False if you need to upgrade
or None if server was not reachable.
"""
from invenio.config import CFG_VERSION
from invenio.base.i18n import _
try:
find = re.compile('Invenio v[0-9]+.[0-9]+.[0-9]+(\-rc[0-9])?'
' is released')
webFile = urllib.urlopen("http://invenio-software.org/repo"
"/invenio/tree/RELEASE-NOTES")
temp = ""
version = ""
version1 = ""
while 1:
temp = webFile.readline()
match1 = find.match(temp)
try:
version = match1.group()
break
except:
pass
if not temp:
break
webFile.close()
submatch = re.compile('[0-9]+.[0-9]+.[0-9]+(\-rc[0-9])?')
version1 = submatch.search(version)
web_version = version1.group().split(".")
local_version = CFG_VERSION.split(".")
if (web_version[0] > local_version[0] or
web_version[0] == local_version[0] and
web_version[1] > local_version[1] or
web_version[0] == local_version[0] and
web_version[1] == local_version[1] and
web_version[2] > local_version[2]):
if flash_message:
flash(_('A newer version of Invenio is available for '
'download. You may want to visit %s') %
('<a href=\"http://invenio-software.org/wiki'
'/Installation/Download\">http://invenio-software.org'
'/wiki/Installation/Download</a>'), 'warning')
return False
except Exception as e:
print(e)
if flash_message:
flash(_('Cannot download or parse release notes from http://'
'invenio-software.org/repo/invenio/tree/RELEASE-NOTES'),
'error')
return None
return True
class Manager(FlaskExtManager):
"""Custom manager implementation with signaling support."""
def add_command(self, name, command):
"""Wrap default ``add_command`` method."""
sender = command.run if type(command.run) is FunctionType \
else command.__class__
class SignalingCommand(command.__class__):
def __call__(self, *args, **kwargs):
app = self.app if not len(args) else args[0]
with app.test_request_context():
pre_command.send(sender, args=args, **kwargs)
res = super(SignalingCommand, self).__call__(*args, **kwargs)
with app.test_request_context():
post_command.send(sender, args=args, **kwargs)
return res
command.__class__ = SignalingCommand
return super(Manager, self).add_command(name, command)
def set_serve_static_files(sender, *args, **kwargs):
"""Enable serving of static files for `runserver` command.
Normally Apache serves static files, but during development and if you are
using the Werkzeug standalone development server, you can set this flag to
`True`, to enable static file serving.
"""
current_app.config.setdefault('CFG_FLASK_SERVE_STATIC_FILES', True)
pre_command.connect(set_serve_static_files, sender=Server)
def register_manager(manager):
"""Register all manager plugins and default commands with the manager."""
from six.moves.urllib.parse import urlparse
managers = RegistryProxy('managers', ModuleAutoDiscoveryRegistry, 'manage')
with manager.app.app_context():
for script in find_modules('invenio.base.scripts'):
manager.add_command(script.split('.')[-1],
import_string(script + ':manager'))
for script in managers:
if script.__name__ == 'invenio.base.manage':
continue
manager.add_command(script.__name__.split('.')[-2],
getattr(script, 'manager'))
manager.add_command("clean", Clean())
manager.add_command("show-urls", ShowUrls())
manager.add_command("shell", Shell())
parsed_url = urlparse(manager.app.config.get('CFG_SITE_URL'))
port = parsed_url.port or 80
host = parsed_url.hostname or '127.0.0.1'
runserver = Server(host=host, port=port)
manager.add_command("runserver", runserver)
# FIXME separation of concerns is violated here.
from invenio.ext.collect import collect
collect.init_script(manager)
from invenio.ext.assets import command, bower
manager.add_command("assets", command)
manager.add_command("bower", bower)
|
lnielsen/invenio
|
invenio/ext/script/__init__.py
|
Python
|
gpl-2.0
| 7,029
|
[
"VisIt"
] |
418f3fe8aab61df79767f861dedf85c1a345ed7e5eb838abc70984f5bad39cc3
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAims(RPackage):
"""This package contains the AIMS implementation. It contains
necessary functions to assign the five intrinsic molecular
subtypes (Luminal A, Luminal B, Her2-enriched, Basal-like,
Normal-like). Assignments could be done on individual samples
as well as on dataset of gene expression data."""
homepage = "http://bioconductor.org/packages/AIMS/"
git = "https://git.bioconductor.org/packages/AIMS.git"
version('1.8.0', commit='86b866c20e191047492c51b43e3f73082c3f8357')
depends_on('r@3.4.0:3.4.9', when='@1.8.0')
depends_on('r-e1071', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
|
krafczyk/spack
|
var/spack/repos/builtin/packages/r-aims/package.py
|
Python
|
lgpl-2.1
| 1,934
|
[
"Bioconductor"
] |
5307ea7cd329b0b615e124e5759f84d949c43e6cf3e0879b6ad3a73c390dbb2c
|
#!/usr/bin/env python
"""Simple parsers for configuration files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import collections
import logging
import re
from future.builtins import zip
from future.utils import iteritems
from future.utils import string_types
from typing import Text
from grr_response_core.lib import lexer
from grr_response_core.lib import parser
from grr_response_core.lib import parsers
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import anomaly as rdf_anomaly
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import config_file as rdf_config_file
from grr_response_core.lib.rdfvalues import protodict as rdf_protodict
from grr_response_core.lib.rdfvalues import standard as rdf_standard
from grr_response_core.lib.util import compatibility
from grr_response_core.lib.util import precondition
def AsIter(arg):
"""Encapsulates an argument in a tuple, if it's not already iterable."""
if isinstance(arg, string_types):
rslt = [arg]
elif isinstance(arg, collections.Iterable):
rslt = arg
elif not arg:
rslt = []
else:
rslt = [arg]
return tuple(rslt)
# Grr lexer implementation of ssv parser. Considered using
# https://github.com/Eugeny/reconfigure/blob/master/reconfigure/parsers/ssv.py
# but it doesn't seem to actually forward lookup.
class FieldParser(lexer.Lexer):
r"""A generalized field based parser that splits entries into fields.
Entries refer to distinct records within the text content, for example each
line of /etc/passwd or a ssh configuration attribute.
Fields are elements that make up the entry, for example the individual
parameters in /etc/passwd.
The parser supports:
- Flexible field based separators (e.g. spaces, commas, colons).
- Identification and removal of line comments. Inline comments (e.g. /*...*/)
are not supported.
- Line continuation detection.
- Multiline quotes.
The parser uses the following attributes as defaults:
- comments: #
- cont: \ (followed by any amount of whitespace)
- ml_quote: False (by default, quotes must close before newlines).
- quot: Both " and ' characters.
- sep: Whitespace
- term: Newlines.
To override default values, pass in appropriate keywords with a python
compatible regex string.
"""
def __init__(self,
comments=r"#",
cont=r"\\\s*\n",
ml_quote=False,
quot=(r"\"", r"'"),
sep=r"[ \t\f\v]+",
term=r"[\r\n]",
verbose=0):
r"""A generalized field-based parser.
Handles whitespace, csv etc.
Args:
comments: Line comment patterns (e.g. "#").
cont: Continuation patterns (e.g. "\\").
ml_quote: Boolean flag to allow quoted strings to span lines.
quot: Quotation patterns (e.g. "\\"" or "'").
sep: Field separator patterns (e.g. "[\\s,]").
term: Entry termination patterns (e.g. "\\n").
verbose: Enable verbose mode for the lexer. Useful for debugging.
"""
super(FieldParser, self).__init__()
self.entries = []
self.fields = []
self.field = ""
self.comments = AsIter(comments)
self.cont = AsIter(cont)
self.ml_quote = AsIter(ml_quote)
self.quot = AsIter(quot)
self.sep = AsIter(sep)
self.term = AsIter(term)
self.verbose = verbose
self._GenStates()
def Reset(self):
super(FieldParser, self).Reset()
self.entries = []
self.fields = []
self.field = ""
def _GenStates(self):
"""Generate the lexer states."""
self.GenCommentState()
self.GenFwdState()
self.GenQuotedState()
self.GenCatchallState()
def _AddToken(self, state_regex, regex, actions, next_state):
self._tokens.append(lexer.Token(state_regex, regex, actions, next_state))
def GenCommentState(self):
if self.comments:
self._AddToken("COMMENT", r"\n", "PushBack,PopState", None)
self._AddToken("COMMENT", ".", None, None)
def GenFwdState(self):
"""Generates forwarding state rules.
The lexer will fast forward until there is string content. The
string content will be returned to the string processor.
"""
for c in self.cont:
self._AddToken("FWD", c, None, None)
for s in self.sep:
self._AddToken("FWD", s, None, None)
self._AddToken("FWD", ".", "PushBack,PopState", None)
def GenQuotedState(self):
"""Generate string matching state rules."""
for i, q in enumerate(self.quot):
label = "%s_STRING" % i
escaped = re.escape(q)
self._AddToken(label, escaped, "PopState", None)
self._AddToken(label, q, "PopState", None)
if self.ml_quote:
self._AddToken(label, r"\n", None, None)
else:
self._AddToken(label, r"\n", "BadLine", None)
self._AddToken(label, ".", "AddToField", None)
def GenCatchallState(self):
"""Generate string matching state rules.
This sets up initial state handlers that cover both the 'INITIAL' state
and the intermediate content between fields.
The lexer acts on items with precedence:
- continuation characters: use the fast forward state rules.
- field separators: finalize processing the field.
- quotation characters: use the quotation state rules.
"""
for c in self.comments:
self._AddToken(".", c, "PushState,EndField", "COMMENT")
for c in self.cont:
self._AddToken(".", c, "PushState", "FWD")
for t in self.term:
self._AddToken(".", t, "EndEntry", None)
for s in self.sep:
self._AddToken(".", s, "EndField", None)
for i, q in enumerate(self.quot):
self._AddToken(".", q, "PushState", "%s_STRING" % i)
self._AddToken(".", ".", "AddToField", None)
def EndEntry(self, **_):
self.EndField()
if self.fields:
# Copy the fields into the processed entries.
self.entries.append(self.fields[:])
self.fields = []
def AddToField(self, string="", **_):
if string:
self.field += string
def EndField(self, **_):
if self.field:
self.fields.append(self.field[:])
self.field = ""
def BadLine(self, **_):
logging.debug("Skipped bad line in file at %s", self.processed)
self.field = ""
def ParseEntries(self, data):
precondition.AssertType(data, Text)
# Flush any old results.
self.Reset()
self.Feed(data)
self.Close()
# In case there isn't a terminating field at the end of the feed, e.g. \n
self.EndEntry()
return self.entries
class KeyValueParser(FieldParser):
"""A generalized KeyValue parser that splits entries into key/value pairs.
Capabilities and parameters are identical to FieldParser, with one difference.
The parser also accepts the parameter "kv_sep"
Patterns specified in kv_sep are used to demarcate key/value processing.
kv_sep defaults to "="
"""
def __init__(self,
comments=r"#",
cont=r"\\\s*\n",
kv_sep="=",
ml_quote=False,
quot=(r"\"", r"'"),
sep=r"[ \t\f\v]+",
term=r"[\r\n]",
verbose=0):
"""A generalized key-value parser.
Handles whitespace, csv etc.
Args:
comments: Line comment patterns (e.g. "#").
cont: Continuation patterns (e.g. "\\").
kv_sep: Key/Value separators (e.g. "=" or ":").
ml_quote: Boolean flag to allow quoted strings to span lines.
quot: Quotation patterns (e.g. "\\"" or "'").
sep: Field separator patterns (e.g. "[\\s,]").
term: Entry termination patterns (e.g. "\\n").
verbose: Enable verbose mode for the lexer. Useful for debugging.
"""
self.kv_sep = AsIter(kv_sep)
super(KeyValueParser, self).__init__(
comments=comments,
cont=cont,
ml_quote=ml_quote,
quot=quot,
sep=sep,
term=term,
verbose=verbose)
self.key_field = ""
def _GenStates(self):
self.GenCommentState()
self.GenFwdState()
self.GenQuotedState()
self.GenMatchFirstState()
self.GenInitialState()
self.GenKeyState()
self.GenValueState()
self.GenCatchallState()
def GenMatchFirstState(self):
for i, q in enumerate(self.quot):
self._AddToken(".", q, "PushState", "%s_STRING" % i)
for c in self.cont:
self._AddToken(".", c, "PushState", "FWD")
def GenInitialState(self):
for c in self.comments:
self._AddToken("INITIAL", c, "PushState,EndField", "COMMENT")
for t in self.term:
self._AddToken("INITIAL", t, "EndField,EndEntry", None)
for c in self.sep:
self._AddToken("INITIAL", c, "PushState", "FWD")
for k in self.kv_sep:
self._AddToken("INITIAL", k, "BadLine", None)
self._AddToken("INITIAL", ".", "PushState,PushBack", "KEY")
def GenKeyState(self):
for c in self.comments:
self._AddToken("KEY", c, "EndKeyField,EndEntry,PopState,PushBack",
"COMMENT")
for t in self.term:
self._AddToken("KEY", t, "EndKeyField,EndEntry,PopState", None)
for k in self.kv_sep:
self._AddToken("KEY", k, "EndKeyField", "VALUE")
def GenValueState(self):
for c in self.comments:
self._AddToken("VALUE", c, "EndField,EndEntry,PopState,PushBack",
"COMMENT")
for t in self.term:
self._AddToken("VALUE", t, "EndField,EndEntry,PopState", None)
for s in self.sep:
self._AddToken("VALUE", s, "EndField", None)
def GenCatchallState(self):
self._AddToken(".", ".", "AddToField", None)
def EndKeyField(self, **_):
self.key_field = self.field
self.field = ""
def EndEntry(self, **_):
# Finalize processing for non-terminated entries. Key first, then fields.
if self.field and not self.key_field:
self.EndKeyField()
else:
self.EndField()
# Set up the entry.
key_field = self.key_field.strip()
if key_field:
self.entries.append({key_field: self.fields})
self.key_field = ""
self.fields = []
def ParseToOrderedDict(self, data):
result = collections.OrderedDict()
for field in self.ParseEntries(data):
result.update(field)
return result
class NfsExportsParser(parsers.SingleFileParser):
"""Parser for NFS exports."""
output_types = [rdf_config_file.NfsExport]
supported_artifacts = ["NfsExportsFile"]
def __init__(self, *args, **kwargs):
super(NfsExportsParser, self).__init__(*args, **kwargs)
self._field_parser = FieldParser()
def ParseFile(self, knowledge_base, pathspec, filedesc):
del knowledge_base # Unused.
del pathspec # Unused.
for entry in self._field_parser.ParseEntries(
utils.ReadFileBytesAsUnicode(filedesc)):
if not entry:
continue
result = rdf_config_file.NfsExport()
result.share = entry[0]
for field in entry[1:]:
if field.startswith(("-", "(")):
result.defaults = field.strip("-()").split(",")
else:
client = rdf_config_file.NfsClient()
cfg = field.split("(", 1)
host = cfg[0]
if len(cfg) > 1:
options = cfg[1]
else:
options = None
client.host = host
if options:
client.options = options.strip("()").split(",")
result.clients.append(client)
yield result
class SshdFieldParser(object):
"""The base class for the ssh config parsers."""
# Specify the values that are boolean or integer. Anything else is a string.
_integers = ["clientalivecountmax",
"magicudsport",
"maxauthtries",
"maxsessions",
"port",
"protocol",
"serverkeybits",
"x11displayoffset"] # pyformat: disable
_booleans = ["allowagentforwarding",
"challengeresponseauthentication",
"dsaauthentication",
"gssapiauthentication",
"gssapicleanupcredentials",
"gssapikeyexchange",
"gssapistorecredentialsonrekey",
"gssapistrictacceptorcheck",
"hostbasedauthentication",
"ignorerhosts",
"ignoreuserknownhosts",
"kbdinteractiveauthentication",
"kerberosauthentication",
"passwordauthentication",
"permitemptypasswords",
"permittunnel",
"permituserenvironment",
"pubkeyauthentication",
"rhostsrsaauthentication",
"rsaauthentication",
"strictmodes",
"uselogin",
"usepam",
"x11forwarding",
"x11uselocalhost"] # pyformat: disable
# Valid ways that parameters can repeat
_repeated = {
"acceptenv": r"[\n\s]+",
"allowgroups": r"[\s]+",
"allowusers": r"[\s]+",
"authenticationmethods": r"[\s]+",
"authorizedkeysfile": r"[\s]+",
"ciphers": r"[,]+",
"denygroups": r"[\s]+",
"denyusers": r"[\s]+",
"forcecommand": r"[\n]+",
"hostkey": r"[\n]+",
"kexalgorithms": r"[,]+",
"listenaddress": r"[\n]+",
"macs": r"[,]+",
"permitopen": r"[\s]+",
"port": r"[,\n]+",
"protocol": r"[,]+",
"pubkeyacceptedkeytypes": r"[,]+",
"subsystem": r"[\n]+"
}
_true = ["yes", "true", "1"]
_aliases = {"dsaauthentication": "pubkeyauthentication"}
_match_keywords = [
"acceptenv", "allowagentforwarding", "allowgroups", "allowtcpforwarding",
"allowusers", "authenticationmethods", "authorizedkeyscommand",
"authorizedkeyscommanduser", "authorizedkeysfile",
"authorizedprincipalsfile", "banner", "chrootdirectory", "denygroups",
"denyusers", "forcecommand", "gatewayports", "gssapiauthentication",
"hostbasedauthentication", "hostbasedusesnamefrompacketonly",
"kbdinteractiveauthentication", "kerberosauthentication", "magicudspath",
"magicudsport", "maxauthtries", "maxsessions", "passwordauthentication",
"permitemptypasswords", "permitopen", "permitrootlogin",
"permittemphomedir", "permittty", "permittunnel",
"pubkeyacceptedkeytypes", "pubkeyauthentication", "rekeylimit",
"rhostsrsaauthentication", "rsaauthentication", "temphomedirpath",
"x11displayoffset", "x11forwarding", "x11uselocalhost"
]
def __init__(self):
super(SshdFieldParser, self).__init__()
self.Flush()
def Flush(self):
self.config = {}
self.matches = []
self.section = self.config
self.processor = self._ParseEntry
def ParseLine(self, line):
"""Extracts keyword/value settings from the sshd config.
The keyword is always the first string item.
Values are the remainder of the string. In cases where an sshd config
allows multiple values, these are split according to whatever separator(s)
sshd_config permits for that value.
Keywords and values are normalized. Keywords are converted to lowercase.
Values are converted into integers, booleans or strings. Strings are always
lowercased.
Args:
line: A line of the configuration file.
"""
kv = line.split(None, 1)
keyword = kv[0].lower()
# Safely set the argument string if it wasn't found.
values = kv[1:] or [""]
# Then split any parameters that are actually repeated items.
separators = self._repeated.get(keyword)
if separators:
repeated = []
for v in values:
repeated.extend(re.split(separators, v))
# Remove empty matches.
values = [v for v in repeated if v]
# Now convert the values to the right types.
if keyword in self._integers:
values = [int(v) for v in values]
elif keyword in self._booleans:
values = [v.lower() in self._true for v in values]
else:
values = [v.lower() for v in values]
# Only repeated arguments should be treated as a list.
if keyword not in self._repeated:
values = values[0]
# Switch sections for new match blocks.
if keyword == "match":
self._NewMatchSection(values)
# If it's an alias, resolve it.
if keyword in self._aliases:
keyword = self._aliases[keyword]
# Add the keyword/values to the section.
self.processor(keyword, values)
def _ParseEntry(self, key, val):
"""Adds an entry for a configuration setting.
Args:
key: The name of the setting.
val: The value of the setting.
"""
if key in self._repeated:
setting = self.section.setdefault(key, [])
setting.extend(val)
else:
self.section.setdefault(key, val)
def _ParseMatchGrp(self, key, val):
"""Adds valid match group parameters to the configuration."""
if key in self._match_keywords:
self._ParseEntry(key, val)
def _NewMatchSection(self, val):
"""Create a new configuration section for each match clause.
Each match clause is added to the main config, and the criterion that will
trigger the match is recorded, as is the configuration.
Args:
val: The value following the 'match' keyword.
"""
section = {"criterion": val, "config": {}}
self.matches.append(section)
# Now add configuration items to config section of the match block.
self.section = section["config"]
# Switch to a match-specific processor on a new match_block.
self.processor = self._ParseMatchGrp
def GenerateResults(self):
matches = []
for match in self.matches:
criterion, config = match["criterion"], match["config"]
block = rdf_config_file.SshdMatchBlock(criterion=criterion, config=config)
matches.append(block)
yield rdf_config_file.SshdConfig(config=self.config, matches=matches)
class SshdConfigParser(parsers.SingleFileParser):
"""A parser for sshd_config files."""
supported_artifacts = ["SshdConfigFile"]
output_types = [rdf_config_file.SshdConfig]
def __init__(self, *args, **kwargs):
super(SshdConfigParser, self).__init__(*args, **kwargs)
self._field_parser = SshdFieldParser()
def ParseFile(self, knowledge_base, pathspec, filedesc):
del knowledge_base # Unused.
del pathspec # Unused.
# Clean out any residual state.
self._field_parser.Flush()
lines = [
l.strip() for l in utils.ReadFileBytesAsUnicode(filedesc).splitlines()
]
for line in lines:
# Remove comments (will break if it includes a quoted/escaped #)
line = line.split("#")[0].strip()
if line:
self._field_parser.ParseLine(line)
for result in self._field_parser.GenerateResults():
yield result
class SshdConfigCmdParser(parser.CommandParser):
"""A command parser for sshd -T output."""
supported_artifacts = ["SshdConfigCmd"]
output_types = [rdf_config_file.SshdConfig]
def __init__(self, *args, **kwargs):
super(SshdConfigCmdParser, self).__init__(*args, **kwargs)
self._field_parser = SshdFieldParser()
def Parse(self, cmd, args, stdout, stderr, return_val, knowledge_base):
# Clean out any residual state.
self._field_parser.Flush()
lines = [l.strip() for l in stdout.splitlines()]
for line in lines:
if line:
self._field_parser.ParseLine(line)
for result in self._field_parser.GenerateResults():
yield result
class MtabParser(parsers.SingleFileParser):
"""Parser for mounted filesystem data acquired from /proc/mounts."""
output_types = [rdf_client_fs.Filesystem]
supported_artifacts = ["LinuxProcMounts", "LinuxFstab"]
def __init__(self, *args, **kwargs):
super(MtabParser, self).__init__(*args, **kwargs)
self._field_parser = FieldParser()
def ParseFile(self, knowledge_base, pathspec, filedesc):
del knowledge_base # Unused.
del pathspec # Unused.
for entry in self._field_parser.ParseEntries(
utils.ReadFileBytesAsUnicode(filedesc)):
if not entry:
continue
result = rdf_client_fs.Filesystem()
result.device = compatibility.UnescapeString(entry[0])
result.mount_point = compatibility.UnescapeString(entry[1])
result.type = compatibility.UnescapeString(entry[2])
options = KeyValueParser(term=",").ParseToOrderedDict(entry[3])
# Keys without values get assigned [] by default. Because these keys are
# actually true, if declared, change any [] values to True.
for k, v in iteritems(options):
options[k] = v or [True]
result.options = rdf_protodict.AttributedDict(**options)
yield result
class MountCmdParser(parser.CommandParser):
"""Parser for mounted filesystem data acquired from the mount command."""
output_types = [rdf_client_fs.Filesystem]
supported_artifacts = ["LinuxMountCmd"]
mount_re = re.compile(r"(.*) on (.*) type (.*) \((.*)\)")
def __init__(self, *args, **kwargs):
super(MountCmdParser, self).__init__(*args, **kwargs)
self._field_parser = FieldParser()
def Parse(self, cmd, args, stdout, stderr, return_val, knowledge_base):
"""Parse the mount command output."""
_ = stderr, args, knowledge_base # Unused.
self.CheckReturn(cmd, return_val)
for entry in self._field_parser.ParseEntries(stdout):
line_str = " ".join(entry)
mount_rslt = self.mount_re.match(line_str)
if mount_rslt:
device, mount_point, fs_type, option_str = mount_rslt.groups()
result = rdf_client_fs.Filesystem()
result.device = device
result.mount_point = mount_point
result.type = fs_type
# Parse these options as a dict as some items may be key/values.
# KeyValue parser uses OrderedDict as the native parser method. Use it.
options = KeyValueParser(term=",").ParseToOrderedDict(option_str)
# Keys without values get assigned [] by default. Because these keys are
# actually true, if declared, change any [] values to True.
for k, v in iteritems(options):
options[k] = v or [True]
result.options = rdf_protodict.AttributedDict(**options)
yield result
class RsyslogFieldParser(FieldParser):
"""Field parser for syslog configurations."""
log_rule_re = re.compile(r"([\w,\*]+)\.([\w,!=\*]+)")
destinations = collections.OrderedDict([
("TCP", re.compile(r"(?:@@)([^;]*)")),
("UDP", re.compile(r"(?:@)([^;]*)")),
("PIPE", re.compile(r"(?:\|)([^;]*)")),
("NONE", re.compile(r"(?:~)([^;]*)")),
("SCRIPT", re.compile(r"(?:\^)([^;]*)")),
("MODULE", re.compile(r"(?::om\w:)([^;]*)")),
("FILE", re.compile(r"-?(/[^;]*)")), ("WALL", re.compile(r"(\*)"))
]) # pyformat: disable
def ParseAction(self, action):
"""Extract log configuration data from rsyslog actions.
Actions have the format:
<facility>/<severity> <type_def><destination>;<template>
e.g. *.* @@loghost.example.com.:514;RSYSLOG_ForwardFormat
Actions are selected by a type definition. These include:
"@@": TCP syslog
"@": UDP syslog
"|": Named pipe
"~": Drop to /dev/null
"^": Shell script
":om<string>:": An output module
Or a file path.
Args:
action: The action string from rsyslog.
Returns:
a rdfvalue.LogTarget message.
"""
rslt = rdf_config_file.LogTarget()
for dst_str, dst_re in iteritems(self.destinations):
dst = dst_re.match(action)
if dst:
rslt.transport = dst_str
rslt.destination = dst.group(1)
break
return rslt
class RsyslogParser(parsers.MultiFileParser):
"""Artifact parser for syslog configurations."""
output_types = [rdf_protodict.AttributedDict]
supported_artifacts = ["LinuxRsyslogConfigs"]
def __init__(self, *args, **kwargs):
super(RsyslogParser, self).__init__(*args, **kwargs)
self._field_parser = RsyslogFieldParser()
def ParseFiles(self, knowledge_base, pathspecs, filedescs):
del knowledge_base # Unused.
del pathspecs # Unused.
# TODO(user): review quoting and line continuation.
result = rdf_config_file.LogConfig()
for file_obj in filedescs:
for entry in self._field_parser.ParseEntries(
utils.ReadFileBytesAsUnicode(file_obj)):
directive = entry[0]
log_rule = self._field_parser.log_rule_re.match(directive)
if log_rule and entry[1:]:
target = self._field_parser.ParseAction(entry[1])
target.facility, target.priority = log_rule.groups()
result.targets.append(target)
return [result]
class PackageSourceParser(parsers.SingleFileParser):
"""Common code for APT and YUM source list parsing."""
output_types = [rdf_protodict.AttributedDict]
# Prevents this from automatically registering.
__abstract = True # pylint: disable=g-bad-name
def ParseFile(self, knowledge_base, pathspec, filedesc):
del knowledge_base # Unused.
uris_to_parse = self.FindPotentialURIs(filedesc)
uris = []
for url_to_parse in uris_to_parse:
url = rdf_standard.URI.FromHumanReadable(url_to_parse)
# if no transport then url_to_parse wasn't actually a valid URL
# either host or path also have to exist for this to be a valid URL
if url.transport and (url.host or url.path):
uris.append(url)
filename = pathspec.path
cfg = {"filename": filename, "uris": uris}
yield rdf_protodict.AttributedDict(**cfg)
def FindPotentialURIs(self, file_obj):
"""Stub Method to be overriden by APT and Yum source parsers."""
raise NotImplementedError("Please implement FindPotentialURIs.")
# TODO: Make sure all special cases are caught by this function.
def ParseURIFromKeyValues(self, data, separator, uri_key):
"""Parse key/value formatted source listing and return potential URLs.
The fundamental shape of this format is as follows:
key: value # here : = separator
key : value
URI: [URL] # here URI = uri_key
[URL] # this is where it becomes trickey because [URL]
[URL] # can contain 'separator' specially if separator is :
key: value
The key uri_key is of interest to us and since the next line
in the config could contain another [URL], we need to keep track of context
when we hit uri_key to be able to check if the next line(s)
have more [URL].
Args:
data: unprocessed lines from a file
separator: how the key/value pairs are seperated
uri_key: starting name of the key containing URI.
Returns:
A list of potential URLs found in data
"""
precondition.AssertType(data, Text)
precondition.AssertType(separator, Text)
kv_entries = KeyValueParser(kv_sep=separator).ParseEntries(data)
spaced_entries = FieldParser().ParseEntries(data)
uris = []
check_uri_on_next_line = False
for kv_entry, sp_entry in zip(kv_entries, spaced_entries):
for k, v in iteritems(kv_entry):
# This line could be a URL if a) from key:value, value is empty OR
# b) if separator is : and first character of v starts with /.
if (check_uri_on_next_line and
(not v or (separator == ":" and v[0].startswith("/")))):
uris.append(sp_entry[0])
else:
check_uri_on_next_line = False
if k.lower().startswith(uri_key) and v:
check_uri_on_next_line = True
uris.append(v[0]) # v is a list
return uris
class APTPackageSourceParser(PackageSourceParser):
"""Parser for APT source lists to extract URIs only."""
supported_artifacts = ["APTSources"]
def FindPotentialURIs(self, file_obj):
"""Given a file, this will return all potenial APT source URIs."""
rfc822_format = "" # will contain all lines not in legacy format
uris_to_parse = []
for line in utils.ReadFileBytesAsUnicode(file_obj).splitlines(True):
# check if legacy style line - if it is then extract URL
m = re.search(r"^\s*deb(?:-\S+)?(?:\s+\[[^\]]*\])*\s+(\S+)(?:\s|$)", line)
if m:
uris_to_parse.append(m.group(1))
else:
rfc822_format += line
uris_to_parse.extend(self.ParseURIFromKeyValues(rfc822_format, ":", "uri"))
return uris_to_parse
class YumPackageSourceParser(PackageSourceParser):
"""Parser for Yum source lists to extract URIs only."""
supported_artifacts = ["YumSources"]
def FindPotentialURIs(self, file_obj):
"""Given a file, this will return all potenial Yum source URIs."""
return self.ParseURIFromKeyValues(
utils.ReadFileBytesAsUnicode(file_obj), "=", "baseurl")
class CronAtAllowDenyParser(parsers.SingleFileParser):
"""Parser for /etc/cron.allow /etc/cron.deny /etc/at.allow & /etc/at.deny."""
output_types = [rdf_protodict.AttributedDict]
supported_artifacts = ["CronAtAllowDenyFiles"]
def ParseFile(self, knowledge_base, pathspec, filedesc):
del knowledge_base # Unused.
lines = set([
l.strip() for l in utils.ReadFileBytesAsUnicode(filedesc).splitlines()
])
users = []
bad_lines = []
for line in lines:
# behaviour of At/Cron is undefined for lines with whitespace separated
# fields/usernames
if " " in line:
bad_lines.append(line)
elif line: # drop empty lines
users.append(line)
filename = pathspec.path
cfg = {"filename": filename, "users": users}
yield rdf_protodict.AttributedDict(**cfg)
if bad_lines:
yield rdf_anomaly.Anomaly(
type="PARSER_ANOMALY",
symptom="Dodgy entries in %s." % (filename),
reference_pathspec=pathspec,
finding=bad_lines)
class NtpdFieldParser(FieldParser):
"""Field parser for ntpd.conf file."""
output_types = [rdf_config_file.NtpConfig]
supported_artifacts = ["NtpConfFile"]
# The syntax is based on:
# https://www.freebsd.org/cgi/man.cgi?query=ntp.conf&sektion=5
# keywords with integer args.
_integers = set(["ttl", "hop"])
# keywords with floating point args.
_floats = set(["broadcastdelay", "calldelay"])
# keywords that have repeating args.
_repeated = set(["ttl", "hop"])
# keywords that set an option state, but can be "repeated" as well.
_boolean = set(["enable", "disable"])
# keywords that are keyed to their first argument, an address.
_address_based = set([
"trap", "fudge", "server", "restrict", "peer", "broadcast",
"manycastclient"
])
# keywords that append/augment the config.
_accumulators = set(["includefile", "setvar"])
# keywords that can appear multiple times, accumulating data each time.
_duplicates = _address_based | _boolean | _accumulators
# All the expected keywords.
_match_keywords = _integers | _floats | _repeated | _duplicates | set([
"autokey", "revoke", "multicastclient", "driftfile", "broadcastclient",
"manycastserver", "includefile", "interface", "disable", "includefile",
"discard", "logconfig", "logfile", "tos", "tinker", "keys", "keysdir",
"requestkey", "trustedkey", "crypto", "control", "statsdir", "filegen"
])
defaults = {
"auth": True,
"bclient": False,
"calibrate": False,
"kernel": False,
"monitor": True,
"ntp": True,
"pps": False,
"stats": False
}
def __init__(self):
super(NtpdFieldParser, self).__init__()
# ntp.conf has no line continuation. Override the default 'cont' values
# then parse up the lines.
self.cont = ""
self.config = self.defaults.copy()
self.keyed = {}
def ParseLine(self, entries):
"""Extracts keyword/value settings from the ntpd config.
The keyword is always the first entry item.
Values are the remainder of the entries. In cases where an ntpd config
allows multiple values, these are split according to whitespace or
duplicate entries.
Keywords and values are normalized. Keywords are converted to lowercase.
Values are converted into integers, floats or strings. Strings are always
lowercased.
Args:
entries: A list of items making up a single line of a ntp.conf file.
"""
# If no entries were found, short circuit.
if not entries:
return
keyword = entries[0].lower()
# Set the argument string if it wasn't found.
values = entries[1:] or [""]
# Convert any types we need too.
if keyword in self._integers:
values = [int(v) for v in values]
if keyword in self._floats:
values = [float(v) for v in values]
if keyword not in self._repeated | self._duplicates:
# We have a plain and simple single key/value config line.
if isinstance(values[0], string_types):
self.config[keyword] = " ".join(values)
else:
self.config[keyword] = values
elif keyword in self._repeated:
# The keyword can have multiple single-word options, so add them as a list
# and overwrite previous settings.
self.config[keyword] = values
elif keyword in self._duplicates:
if keyword in self._address_based:
# If we have an address keyed keyword, join the keyword and address
# together to make the complete key for this data.
address = values[0].lower()
values = values[1:] or [""]
# Add/overwrite the address in this 'keyed' keywords dictionary.
existing_keyword_config = self.keyed.setdefault(keyword, [])
# Create a dict which stores the server name and the options.
# Flatten the remaining options into a single string.
existing_keyword_config.append({
"address": address,
"options": " ".join(values)
})
# Are we toggling an option?
elif keyword in self._boolean:
for option in values:
if keyword == "enable":
self.config[option] = True
else:
# As there are only two items in this set, we can assume disable.
self.config[option] = False
else:
# We have a non-keyed & non-boolean keyword, so add to the collected
# data so far. Order matters technically.
prev_settings = self.config.setdefault(keyword, [])
prev_settings.append(" ".join(values))
class NtpdParser(parsers.SingleFileParser):
"""Artifact parser for ntpd.conf file."""
def ParseFile(self, knowledge_base, pathspec, filedesc):
del knowledge_base # Unused.
del pathspec # Unused.
# TODO(hanuszczak): This parser only allows single use because it messes
# with its state. This should be fixed.
field_parser = NtpdFieldParser()
for line in field_parser.ParseEntries(
utils.ReadFileBytesAsUnicode(filedesc)):
field_parser.ParseLine(line)
yield rdf_config_file.NtpConfig(
config=field_parser.config,
server=field_parser.keyed.get("server"),
restrict=field_parser.keyed.get("restrict"),
fudge=field_parser.keyed.get("fudge"),
trap=field_parser.keyed.get("trap"),
peer=field_parser.keyed.get("peer"),
broadcast=field_parser.keyed.get("broadcast"),
manycastclient=field_parser.keyed.get("manycastclient"))
def ParseMultiple(self, stats, file_objects, knowledge_base):
for s, f in zip(stats, file_objects):
for rslt in self.Parse(s, f, knowledge_base):
yield rslt
class SudoersFieldParser(FieldParser):
"""Parser for privileged configuration files such as sudoers and pam.d/su."""
# Regex to remove comments from the file. The first group in the OR condition
# handles comments that cover a full line, while also ignoring #include(dir).
# The second group in the OR condition handles comments that begin partways
# through a line, without matching UIDs or GIDs which are specified with # in
# the format.
# TODO(user): this regex fails to match '#32 users', but handles quite a
# lot else.
# TODO(user): this should be rewritten as a proper lexer
COMMENTS_RE = re.compile(r"(#(?!include(?:dir)?\s+)\D+?$)", re.MULTILINE)
ALIAS_TYPES = {
"User_Alias": rdf_config_file.SudoersAlias.Type.USER,
"Runas_Alias": rdf_config_file.SudoersAlias.Type.RUNAS,
"Host_Alias": rdf_config_file.SudoersAlias.Type.HOST,
"Cmnd_Alias": rdf_config_file.SudoersAlias.Type.CMD
}
ALIAS_FIELDS = {
"User_Alias": "users",
"Runas_Alias": "runas",
"Host_Alias": "hosts",
"Cmnd_Alias": "cmds"
}
DEFAULTS_KEY = "Defaults"
INCLUDE_KEYS = ["#include", "#includedir"]
def __init__(self, *args, **kwargs):
kwargs["comments"] = []
super(SudoersFieldParser, self).__init__(*args, **kwargs)
def _ExtractList(self, fields, ignores=(",",), terminators=()):
"""Extract a list from the given fields."""
extracted = []
i = 0
for i, field in enumerate(fields):
# Space-separated comma; ignore, but this is not a finished list.
# Similar for any other specified ignores (eg, equals sign).
if field in ignores:
continue
# However, some fields are specifically meant to terminate iteration.
if field in terminators:
break
extracted.append(field.strip("".join(ignores)))
# Check for continuation; this will either be a trailing comma or the
# next field after this one being a comma. The lookahead here is a bit
# nasty.
if not (field.endswith(",") or
set(fields[i + 1:i + 2]).intersection(ignores)):
break
return extracted, fields[i + 1:]
def ParseSudoersEntry(self, entry, sudoers_config):
"""Parse an entry and add it to the given SudoersConfig rdfvalue."""
key = entry[0]
if key in SudoersFieldParser.ALIAS_TYPES:
# Alias.
alias_entry = rdf_config_file.SudoersAlias(
type=SudoersFieldParser.ALIAS_TYPES.get(key), name=entry[1])
# Members of this alias, comma-separated.
members, _ = self._ExtractList(entry[2:], ignores=(",", "="))
field = SudoersFieldParser.ALIAS_FIELDS.get(key)
getattr(alias_entry, field).Extend(members)
sudoers_config.aliases.append(alias_entry)
elif key.startswith(SudoersFieldParser.DEFAULTS_KEY):
# Default.
# Identify scope if one exists (Defaults<scope> ...)
scope = None
if len(key) > len(SudoersFieldParser.DEFAULTS_KEY):
scope = key[len(SudoersFieldParser.DEFAULTS_KEY) + 1:]
# There can be multiple defaults on a line, for the one scope.
entry = entry[1:]
defaults, _ = self._ExtractList(entry)
for default in defaults:
default_entry = rdf_config_file.SudoersDefault(scope=scope)
# Extract key name and value(s).
default_name = default
value = []
if "=" in default_name:
default_name, remainder = default_name.split("=", 1)
value = [remainder]
default_entry.name = default_name
if entry:
default_entry.value = " ".join(value)
sudoers_config.defaults.append(default_entry)
elif key in SudoersFieldParser.INCLUDE_KEYS:
# TODO(user): make #includedir more obvious in the RDFValue somewhere
target = " ".join(entry[1:])
sudoers_config.includes.append(target)
else:
users, entry = self._ExtractList(entry)
hosts, entry = self._ExtractList(entry, terminators=("=",))
# Remove = from <user> <host> = <specs>
if entry[0] == "=":
entry = entry[1:]
# Command specification.
sudoers_entry = rdf_config_file.SudoersEntry(
users=users, hosts=hosts, cmdspec=entry)
sudoers_config.entries.append(sudoers_entry)
def Preprocess(self, data):
"""Preprocess the given data, ready for parsing."""
# Add whitespace to line continuations.
data = data.replace(":\\", ": \\")
# Strip comments manually because sudoers has multiple meanings for '#'.
data = SudoersFieldParser.COMMENTS_RE.sub("", data)
return data
class SudoersParser(parsers.SingleFileParser):
"""Artifact parser for privileged configuration files."""
output_types = [rdf_config_file.SudoersConfig]
supported_artifacts = ["UnixSudoersConfiguration"]
def __init__(self, *args, **kwargs):
super(SudoersParser, self).__init__(*args, **kwargs)
self._field_parser = SudoersFieldParser()
def ParseFile(self, knowledge_base, pathspec, filedesc):
del knowledge_base # Unused.
del pathspec # Unused.
self._field_parser.ParseEntries(
self._field_parser.Preprocess(utils.ReadFileBytesAsUnicode(filedesc)))
result = rdf_config_file.SudoersConfig()
for entry in self._field_parser.entries:
# Handle multiple entries in one line, eg:
# foo bar : baz
# ... would become ...
# [[foo, bar], [foo, baz]]
key = entry[0]
nested_entries = []
if ":" not in entry:
nested_entries = [entry]
else:
runner = []
for field in entry:
if field == ":":
nested_entries.append(runner)
runner = [key]
continue
runner.append(field)
nested_entries.append(runner)
for nested_entry in nested_entries:
self._field_parser.ParseSudoersEntry(nested_entry, result)
yield result
|
dunkhong/grr
|
grr/core/grr_response_core/lib/parsers/config_file.py
|
Python
|
apache-2.0
| 41,134
|
[
"TINKER"
] |
195fb651b1cc9a25adc88fa74d3fbe3195b76ebf0b82ef25916a94d7ab84157b
|
"""Read genome build configurations from Galaxy *.loc and bcbio-nextgen resource files.
"""
from six.moves import configparser
import glob
import os
import sys
from xml.etree import ElementTree
import toolz as tz
import yaml
from bcbio import utils
from bcbio.cwl import cwlutils
from bcbio.distributed import objectstore
from bcbio.log import logger
from bcbio.ngsalign import star
from bcbio.pipeline import alignment
from bcbio.provenance import do
from bcbio.rnaseq import gtf
# ## bcbio-nextgen genome resource files
def get_resources(genome, ref_file, data):
"""Retrieve genome information from a genome-references.yaml file.
"""
base_dir = os.path.normpath(os.path.dirname(ref_file))
resource_file = os.path.join(base_dir, "%s-resources.yaml" % genome.replace("-test", ""))
if not os.path.exists(resource_file):
raise IOError("Did not find resource file for %s: %s\n"
"To update bcbio_nextgen.py with genome resources for standard builds, run:\n"
"bcbio_nextgen.py upgrade -u skip"
% (genome, resource_file))
with open(resource_file) as in_handle:
resources = yaml.load(in_handle)
def resource_file_path(x):
if isinstance(x, basestring) and os.path.exists(os.path.join(base_dir, x)):
return os.path.normpath(os.path.join(base_dir, x))
return x
cleaned = utils.dictapply(resources, resource_file_path)
return ensure_annotations(cleaned, data)
def add_required_resources(resources):
"""Add empty values for required resources referenced in CWL
"""
required = [["variation", "cosmic"], ["variation", "dbsnp"]]
for key in required:
if not tz.get_in(key, resources):
resources = tz.update_in(resources, key, lambda x: None)
return resources
def ensure_annotations(resources, data):
"""Prepare any potentially missing annotations for downstream processing in a local directory.
"""
transcript_gff = tz.get_in(["rnaseq", "transcripts"], resources)
if transcript_gff and utils.file_exists(transcript_gff):
out_dir = os.path.join(tz.get_in(["dirs", "work"], data),
"inputs", "data", "annotations")
resources["rnaseq"]["gene_bed"] = gtf.gtf_to_bed(transcript_gff, out_dir)
return resources
# ## Utilities
def abs_file_paths(xs, base_dir=None, ignore_keys=None, fileonly_keys=None, cur_key=None,
do_download=True):
"""Normalize any file paths found in a subdirectory of configuration input.
base_dir -- directory to normalize relative paths to
ignore_keys -- algorithm key names to ignore normalize for (keywords, not files/directories)
fileonly_keys -- algorithm key names to only expand files (not directories)
cur_key -- current key when calling recursively
"""
ignore_keys = set([]) if ignore_keys is None else set(ignore_keys)
fileonly_keys = set([]) if fileonly_keys is None else set(fileonly_keys)
if base_dir is None:
base_dir = os.getcwd()
orig_dir = os.getcwd()
os.chdir(base_dir)
input_dir = os.path.join(base_dir, "inputs")
if isinstance(xs, dict):
out = {}
for k, v in xs.items():
if k not in ignore_keys and v and isinstance(v, basestring):
if v.lower() == "none":
out[k] = None
else:
out[k] = abs_file_paths(v, base_dir, ignore_keys, fileonly_keys, k, do_download=do_download)
elif isinstance(v, (list, tuple)):
out[k] = [abs_file_paths(x, base_dir, ignore_keys, fileonly_keys, k, do_download=do_download)
for x in v]
else:
out[k] = v
elif isinstance(xs, basestring):
if os.path.exists(xs) or (do_download and objectstore.is_remote(xs)):
dl = objectstore.download(xs, input_dir)
if dl and cur_key not in ignore_keys and not (cur_key in fileonly_keys and not os.path.isfile(dl)):
out = os.path.normpath(os.path.join(base_dir, dl))
else:
out = xs
else:
out = xs
else:
out = xs
os.chdir(orig_dir)
return out
# ## Galaxy integration -- *.loc files
def _get_galaxy_loc_file(name, galaxy_dt, ref_dir, galaxy_base):
"""Retrieve Galaxy *.loc file for the given reference/aligner name.
First tries to find an aligner specific *.loc file. If not defined
or does not exist, then we need to try and remap it from the
default reference file
"""
if "file" in galaxy_dt and os.path.exists(os.path.join(galaxy_base, galaxy_dt["file"])):
loc_file = os.path.join(galaxy_base, galaxy_dt["file"])
need_remap = False
elif alignment.TOOLS[name].galaxy_loc_file is None:
loc_file = os.path.join(ref_dir, alignment.BASE_LOCATION_FILE)
need_remap = True
else:
loc_file = os.path.join(ref_dir, alignment.TOOLS[name].galaxy_loc_file)
need_remap = False
if not os.path.exists(loc_file):
loc_file = os.path.join(ref_dir, alignment.BASE_LOCATION_FILE)
need_remap = True
return loc_file, need_remap
def _galaxy_loc_iter(loc_file, galaxy_dt, need_remap=False):
"""Iterator returning genome build and references from Galaxy *.loc file.
"""
if "column" in galaxy_dt:
dbkey_i = galaxy_dt["column"].index("dbkey")
path_i = galaxy_dt["column"].index("path")
else:
dbkey_i = None
if os.path.exists(loc_file):
with open(loc_file) as in_handle:
for line in in_handle:
if line.strip() and not line.startswith("#"):
parts = [x.strip() for x in line.strip().split("\t")]
# Detect and report spaces instead of tabs
if len(parts) == 1:
parts = [x.strip() for x in line.strip().split(" ") if x.strip()]
if len(parts) > 1:
raise IOError("Galaxy location file uses spaces instead of "
"tabs to separate fields: %s" % loc_file)
if dbkey_i is not None and not need_remap:
dbkey = parts[dbkey_i]
cur_ref = parts[path_i]
else:
if parts[0] == "index":
parts = parts[1:]
dbkey = parts[0]
cur_ref = parts[-1]
yield (dbkey, cur_ref)
def _get_ref_from_galaxy_loc(name, genome_build, loc_file, galaxy_dt, need_remap,
galaxy_config, data):
"""Retrieve reference genome file from Galaxy *.loc file.
Reads from tool_data_table_conf.xml information for the index if it
exists, otherwise uses heuristics to find line based on most common setups.
"""
refs = [ref for dbkey, ref in _galaxy_loc_iter(loc_file, galaxy_dt, need_remap)
if dbkey == genome_build]
remap_fn = alignment.TOOLS[name].remap_index_fn
need_remap = remap_fn is not None
if len(refs) == 0:
logger.info("Downloading %s %s from AWS" % (genome_build, name))
cur_ref = download_prepped_genome(genome_build, data, name, need_remap)
# allow multiple references in a file and use the most recently added
else:
cur_ref = refs[-1]
# Find genome directory and check for packed wf tarballs
cur_ref_norm = os.path.normpath(utils.add_full_path(cur_ref, galaxy_config["tool_data_path"]))
base_dir_i = cur_ref_norm.find("/%s/" % genome_build)
base_dir = os.path.join(cur_ref_norm[:base_dir_i], genome_build)
for tarball in glob.glob(os.path.join(base_dir, "*-wf.tar.gz")):
cwlutils.unpack_tarballs(tarball, {"dirs": {"work": base_dir}}, use_subdir=False)
if need_remap:
assert remap_fn is not None, "%s requires remapping function from base location file" % name
cur_ref = os.path.normpath(utils.add_full_path(cur_ref, galaxy_config["tool_data_path"]))
cur_ref = remap_fn(os.path.abspath(cur_ref))
return cur_ref
def _get_galaxy_tool_info(galaxy_base):
"""Retrieve Galaxy tool-data information from defaults or galaxy config file.
"""
ini_file = os.path.join(galaxy_base, "universe_wsgi.ini")
info = {"tool_data_table_config_path": os.path.join(galaxy_base, "tool_data_table_conf.xml"),
"tool_data_path": os.path.join(galaxy_base, "tool-data")}
config = configparser.ConfigParser()
config.read(ini_file)
if "app:main" in config.sections():
for option in config.options("app:main"):
if option in info:
info[option] = os.path.join(galaxy_base, config.get("app:main", option))
return info
def _get_galaxy_data_table(name, dt_config_file):
"""Parse data table config file for details on tool *.loc location and columns.
"""
out = {}
if os.path.exists(dt_config_file):
tdtc = ElementTree.parse(dt_config_file)
for t in tdtc.getiterator("table"):
if t.attrib.get("name", "") in [name, "%s_indexes" % name]:
out["column"] = [x.strip() for x in t.find("columns").text.split(",")]
out["file"] = t.find("file").attrib.get("path", "")
return out
def get_refs(genome_build, aligner, galaxy_base, data):
"""Retrieve the reference genome file location from galaxy configuration.
"""
out = {}
name_remap = {"samtools": "fasta"}
if genome_build:
galaxy_config = _get_galaxy_tool_info(galaxy_base)
for name in [x for x in ("samtools", aligner) if x]:
galaxy_dt = _get_galaxy_data_table(name, galaxy_config["tool_data_table_config_path"])
loc_file, need_remap = _get_galaxy_loc_file(name, galaxy_dt, galaxy_config["tool_data_path"],
galaxy_base)
cur_ref = _get_ref_from_galaxy_loc(name, genome_build, loc_file, galaxy_dt, need_remap,
galaxy_config, data)
base = os.path.normpath(utils.add_full_path(cur_ref, galaxy_config["tool_data_path"]))
if os.path.isdir(base):
indexes = sorted(glob.glob(os.path.join(base, "*")))
elif name != "samtools":
indexes = sorted(glob.glob("%s*" % utils.splitext_plus(base)[0]))
else:
indexes = []
name = name_remap.get(name, name)
out[name] = {}
if os.path.exists(base) and os.path.isfile(base):
out[name]["base"] = base
if indexes:
out[name]["indexes"] = indexes
# For references, add compressed inputs and indexes if they exist
if name == "fasta" and "base" in out[name] and os.path.exists(out[name]["base"] + ".gz"):
indexes = [out[name]["base"] + ".gz.fai", out[name]["base"] + ".gz.gzi",
utils.splitext_plus(out[name]["base"])[0] + ".dict"]
out[name + "gz"] = {"base": out[name]["base"] + ".gz",
"indexes": [x for x in indexes if os.path.exists(x)]}
# add additional indices relative to the base
if tz.get_in(["fasta", "base"], out):
ref_dir, ref_filebase = os.path.split(out["fasta"]["base"])
out["rtg"] = os.path.normpath(os.path.join(ref_dir, os.path.pardir, "rtg",
"%s.sdf" % (os.path.splitext(ref_filebase)[0])))
twobit = os.path.normpath(os.path.join(ref_dir, os.path.pardir, "ucsc",
"%s.2bit" % (os.path.splitext(ref_filebase)[0])))
if os.path.exists(twobit):
out["twobit"] = twobit
return out
def get_builds(galaxy_base):
"""Retrieve configured genome builds and reference files, using Galaxy configuration files.
Allows multiple dbkey specifications in the same file, using the most recently added.
"""
name = "samtools"
galaxy_config = _get_galaxy_tool_info(galaxy_base)
galaxy_dt = _get_galaxy_data_table(name, galaxy_config["tool_data_table_config_path"])
loc_file, need_remap = _get_galaxy_loc_file(name, galaxy_dt, galaxy_config["tool_data_path"],
galaxy_base)
assert not need_remap, "Should not need to remap reference files"
fnames = {}
for dbkey, fname in _galaxy_loc_iter(loc_file, galaxy_dt):
fnames[dbkey] = fname
out = []
for dbkey in sorted(fnames.keys()):
out.append((dbkey, fnames[dbkey]))
return out
# ## Retrieve pre-prepared genomes
REMAP_NAMES = {"tophat2": ["bowtie2"],
"samtools": ["rtg", "seq"]}
INPLACE_INDEX = {"star": star.index}
def download_prepped_genome(genome_build, data, name, need_remap, out_dir=None):
"""Get a pre-prepared genome from S3, unpacking it locally.
Supports runs on AWS where we can retrieve the resources on demand. Upgrades
GEMINI in place if installed inside a Docker container with the biological data.
GEMINI install requires write permissions to standard data directories -- works
on AWS but not generalizable elsewhere.
"""
from bcbio.variation import population
from bcbio import install
if not out_dir:
out_dir = utils.safe_makedir(os.path.join(tz.get_in(["dirs", "work"], data),
"inputs", "data", "genomes"))
for target in REMAP_NAMES.get(name, [name]):
ref_dir = os.path.join(out_dir, genome_build, target)
if not os.path.exists(ref_dir):
if target in INPLACE_INDEX:
ref_file = glob.glob(os.path.normpath(os.path.join(ref_dir, os.pardir, "seq", "*.fa")))[0]
# Need to add genome resources so we can retrieve GTF files for STAR
data["genome_resources"] = get_resources(data["genome_build"], ref_file, data)
INPLACE_INDEX[target](ref_file, ref_dir, data)
else:
# XXX Currently only supports genomes from S3 us-east-1 bucket.
# Need to assess how slow this is from multiple regions and generalize to non-AWS.
fname = objectstore.BIODATA_INFO["s3"].format(build=genome_build, target=target)
try:
objectstore.connect(fname)
except:
raise ValueError("Could not find reference genome file %s %s" % (genome_build, name))
with utils.chdir(out_dir):
cmd = objectstore.cl_input(fname, unpack=False, anonpipe=False) + " | pigz -d -c | tar -xvp"
do.run(cmd.format(**locals()), "Download pre-prepared genome data: %s" % genome_build)
ref_file = glob.glob(os.path.normpath(os.path.join(ref_dir, os.pardir, "seq", "*.fa")))[0]
if data.get("genome_build"):
if (data.get("files") and population.do_db_build([data], need_bam=False)
and population.support_gemini_orig(data)):
# symlink base GEMINI directory to work directory, avoiding write/space issues
out_gemini_dir = utils.safe_makedir(os.path.join(os.path.dirname(ref_dir), "gemini_data"))
orig_gemini_dir = install.get_gemini_dir()
# Remove empty initial directory created by installer
if os.path.isdir(orig_gemini_dir) and len(os.listdir(orig_gemini_dir)) == 0:
if os.path.islink(orig_gemini_dir):
os.remove(orig_gemini_dir)
else:
os.rmdir(orig_gemini_dir)
if not os.path.exists(orig_gemini_dir):
os.symlink(out_gemini_dir, orig_gemini_dir)
cmd = [os.path.join(os.path.dirname(sys.executable), "gemini"), "update", "--dataonly"]
do.run(cmd, "Download GEMINI data")
genome_dir = os.path.join(out_dir, genome_build)
genome_build = genome_build.replace("-test", "")
if need_remap or name == "samtools":
return os.path.join(genome_dir, "seq", "%s.fa" % genome_build)
else:
ref_dir = os.path.join(genome_dir, REMAP_NAMES.get(name, [name])[-1])
base_name = os.path.commonprefix(os.listdir(ref_dir))
while base_name.endswith("."):
base_name = base_name[:-1]
return os.path.join(ref_dir, base_name)
|
biocyberman/bcbio-nextgen
|
bcbio/pipeline/genome.py
|
Python
|
mit
| 16,520
|
[
"Galaxy"
] |
f83c7695cb376c853981565d67b88ad348e7635f4f14d74de24d50b334cbafa4
|
###############################################################################
#
# $Id: stretcher.py 585 2010-12-15 05:21:28Z weegreenblobbie $
#
###############################################################################
from Nsound import *
# Read in the wavefile.
a1 = AudioStream("Temperature_in.wav")
# Grab sample rate.
sr = a1.getSampleRate()
# Grab the duration in seconds.
duration = a1.getDuration()
# Create a Gaussian curve for pitch/time shifting.
sin = Sine(sr)
bend = Buffer()
bend << sin.drawFatGaussian(duration, 0.15) + 1.0
# Create a Stretcher instance
stretch = Stretcher(sr, 0.08, 0.25)
# Print progress to command line.
stretch.showProgress(True)
print("Pitch Shifting Up")
# Create new output AudioStream.
out = AudioStream(sr, 2)
# Pitch shift the input AudioStream.
out << stretch.pitchShift(a1, bend)
out >> "Temperature_Pitch_Shifted_Up.wav"
print("Time Shifting Faster")
# Time shift input AudioStream
out = AudioStream(sr,2)
out << stretch.timeShift(a1, 1.0 / bend)
out >> "Temperature_Time_Shifted_Faster.wav"
bend = Buffer()
bend << 1.0 - 0.25 * sin.drawFatGaussian(duration, 0.15)
print("Pitch Shifting Down")
out = AudioStream(sr, 2)
out << stretch.pitchShift(a1, bend)
out >> "Temperature_Pitch_Shifted_Down.wav"
print("Time Shifting Slower")
bend = Buffer()
bend << 1.0 + 0.75 * sin.drawFatGaussian(duration, 0.15)
out = AudioStream(sr, 2)
out << stretch.timeShift(a1, bend)
out >> "Temperature_Time_Shifted_Slower.wav"
|
weegreenblobbie/nsound
|
src/examples/stretcher.py
|
Python
|
gpl-2.0
| 1,481
|
[
"Gaussian"
] |
af6d8b1d6777c2e7904b48798f88f487d8f26e67ef4318b4f68ff3d505e1f8e7
|
#################################################################################
# File Name: rmsdAnalysis.py
# Author: Kory Melton
# Date: 6/28/17
# Project: ProteinAnalysis
# Purpose: This file will define and implement the rmsdAnalysis class
# to allow the easy use of analyzing information for
# root mean square deviation (a measure of molecular movement)
#################################################################################
import math
class rmsdAnalysis:
#################################################################################
# Initializer
#################################################################################
def __init__(self, rmsd_file):
#################################################################################
# Function: __init__
#
# Description: Initializes the variables for the rmsdAnalysis class
#
# Parameters: rmsd_file - the log_file with the simulation data stored
#
# Returned: none
#################################################################################
#################################################################################
# File Variables
#################################################################################
self.rmsd_file = rmsd_file # this is a .dat file created from VMD to analyze RMSD of the simulation
self.rmsd_lines = self.rmsd_file.readlines() # the lines from the RMSD file
#################################################################################
# Data Variables
# Each variable is a list of a different column from the dat file. The specific
# measurement will be listed to the right
#################################################################################
self.frames = [] # this is a list of the frames from the VMD output
self.RMSDs = [] # this is a list of the RMSDs from the VMD output
self.times = [] # this is a list of the time for each corresponding frame
self.temps = [] # this is a list of the temperatures for each corresponding frame
#################################################################################
# Index variables
# Each variable represents the index for each variable in the log file
#################################################################################
self.FRAME_INDEX = 0 # the index of the frame when separating the lines from the file
self.RMSD_INDEX = 1 # the index of the RMSD when seperating the lines from the file
#################################################################################
# Functions
#################################################################################
def extractRMSD(self):
#################################################################################
# Function: extractRMSD
#
# Description: Extracts the RMSD information in two lists that can be used later
#
# Parameters: none
#
# Returned: none
#################################################################################
self.cleanRMSD()
for line in self.rmsd_lines: # step through the lines
vars = line.split() # split the line by spaces to get the two variables
# retrieve the two variables from vars
frame = int(vars[self.FRAME_INDEX])
RMSD = float(vars[self.RMSD_INDEX])
# store them in each list
self.frames.append(frame)
self.RMSDs.append(RMSD)
def combineSims(self, newSim):
#################################################################################
# Function: combineSims
#
# Description: combines the log information and the rmsd for a simulation
#
# Parameters: newSim - the new simulation object
#
# Returned: the new simulation object
#################################################################################
start = len(self.frames) + 1
end = start + len(newSim.rmsd.frames)
newSim.rmsd.frames.clear()
for count in range (start, end):
newSim.rmsd.frames.append(count)
self.frames.extend(newSim.rmsd.frames)
self.RMSDs.extend(newSim.rmsd.RMSDs)
self.times.extend(newSim.rmsd.times)
def cleanRMSD(self):
if self.rmsd_lines[0] == '0 0\n':
del self.rmsd_lines[0]
del self.rmsd_lines[0]
del self.rmsd_lines[0]
self.rmsd_lines.pop()
def calculateFrameTimes(self, simTimeStart, simTimeEnd):
#################################################################################
# Function: calculateFrameTimes
#
# Description: This will calculate the time for each frame and add it to a list
# of times. First, it will look to see how many frames there are
# and how long the simulation ran. Then, it will simply divide the
# the simulation time by the number of frames to get the time for
# each frame.
#
# Parameters: simTimeStart - the start of the sim in picoseconds
# simTimeEnd - the end of the sim in picoseconds
#
# Returned: none
#################################################################################
numFrames = len(self.frames)
simTime = simTimeEnd - simTimeStart
timePerFrame = simTime / numFrames
start = 0
end = numFrames
for frame in range(start, end):
time = simTimeStart + frame * timePerFrame
self.times.append(time)
def calculateFrameTemps(self, simFrames, temps):
#################################################################################
# Function: calculateFrameTemps
#
# Description:
#
# Parameters:
#
# Returned: none
#################################################################################
# a stride is the how often the frames from the original file are used
# so a stride of 3 means every 3rd frame from the original simulation was used
stride = simFrames / len(self.frames) # find the stride of the .dat file
step = math.floor(stride)
start = self.frames[0]
end = len(self.frames) + start
# use the stride and the frame in the .dat file to find the temperature
for frame in range(start, end):
tempIndex = frame * step # the index(or frame) from the original simulation
self.temps.append(temps[tempIndex - 1]) # append temps with the temperature from that index
|
melt6457/MMProteinAnalysis
|
Source/rmsdAnalysis.py
|
Python
|
mit
| 7,068
|
[
"VMD"
] |
13ff21d842f356112eeec49de7b24aa5c9d4ba4d5e8e0da84269ee225ca134b7
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
import unittest
from mantid.kernel import FloatTimeSeriesProperty
from mantid.simpleapi import (DeleteWorkspace, CreateSampleWorkspace,
AddSampleLog, AddTimeSeriesLog, EditInstrumentGeometry,
CloneWorkspace, CompareWorkspaces, FindEPP, SetInstrumentParameter)
from testhelpers import run_algorithm
from mantid.api import AnalysisDataService
from scipy.constants import N_A, hbar, k
import numpy as np
class ComputeCalibrationCoefVanTest(unittest.TestCase):
def setUp(self):
input_ws = CreateSampleWorkspace(
Function="User Defined",
UserDefinedFunction="name=LinearBackground, " +
"A0=0.3;name=Gaussian, PeakCentre=5, Height=10, Sigma=0.3",
NumBanks=2, BankPixelWidth=1, XMin=0, XMax=10, BinWidth=0.1,
BankDistanceFromSample=4.0)
self._input_ws = input_ws
self._table = FindEPP(input_ws, OutputWorkspace="table")
AddSampleLog(self._input_ws, LogName='wavelength', LogText='4.0',
LogType='Number', LogUnit='Angstrom')
for i in range(input_ws.getNumberHistograms()):
y = input_ws.dataY(i)
y.fill(0.)
y[51] = 100.
e = input_ws.dataE(i)
e.fill(0.)
e[51] = 10.
def test_output(self):
outputWorkspaceName = "output_ws"
alg_test = run_algorithm("ComputeCalibrationCoefVan",
VanadiumWorkspace=self._input_ws,
EPPTable=self._table,
OutputWorkspace=outputWorkspaceName)
self.assertTrue(alg_test.isExecuted())
wsoutput = AnalysisDataService.retrieve(outputWorkspaceName)
# Output = Vanadium ws
self.assertEqual(wsoutput.getRun().getLogData('run_title').value,
self._input_ws.getRun().getLogData('run_title').value)
# Size of output workspace
self.assertEqual(wsoutput.getNumberHistograms(),
self._input_ws.getNumberHistograms())
DeleteWorkspace(wsoutput)
return
def test_sum(self):
outputWorkspaceName = "output_ws"
alg_test = run_algorithm("ComputeCalibrationCoefVan",
VanadiumWorkspace=self._input_ws,
EPPTable=self._table,
OutputWorkspace=outputWorkspaceName)
self.assertTrue(alg_test.isExecuted())
wsoutput = AnalysisDataService.retrieve(outputWorkspaceName)
for i in range(wsoutput.getNumberHistograms()):
self.assertEqual(100., wsoutput.readY(i)[0])
self.assertEqual(10., wsoutput.readE(i)[0])
DeleteWorkspace(wsoutput)
def test_dwf_using_default_temperature(self):
outputWorkspaceName = "output_ws"
# change theta to make dwf != 1
EditInstrumentGeometry(self._input_ws, L2="4,8", Polar="0,15",
Azimuthal="0,0", DetectorIDs="1,2")
alg_test = run_algorithm("ComputeCalibrationCoefVan",
VanadiumWorkspace=self._input_ws,
EPPTable=self._table,
OutputWorkspace=outputWorkspaceName)
self.assertTrue(alg_test.isExecuted())
wsoutput = AnalysisDataService.retrieve(outputWorkspaceName)
self._checkDWF(wsoutput, 293.0)
DeleteWorkspace(wsoutput)
def test_temperature_from_sample_log(self):
self._input_ws.mutableRun().addProperty('temperature', 0.0, True)
outputWorkspaceName = "output_ws"
EditInstrumentGeometry(self._input_ws, L2="4,8", Polar="0,15",
Azimuthal="0,0", DetectorIDs="1,2")
alg_test = run_algorithm("ComputeCalibrationCoefVan",
VanadiumWorkspace=self._input_ws,
EPPTable=self._table,
OutputWorkspace=outputWorkspaceName)
self.assertTrue(alg_test.isExecuted())
wsoutput = AnalysisDataService.retrieve(outputWorkspaceName)
self._checkDWF(wsoutput, 0.0)
DeleteWorkspace(wsoutput)
def test_temperature_log_is_time_series(self):
outputWorkspaceName = "output_ws"
EditInstrumentGeometry(self._input_ws, L2="4,8", Polar="0,15",
Azimuthal="0,0", DetectorIDs="1,2")
AddTimeSeriesLog(
self._input_ws,
'temperature',
'2010-09-14T04:20:12',
Value='0.0')
AddTimeSeriesLog(
self._input_ws,
'temperature',
'2010-09-14T04:20:13',
Value='0.0')
AddTimeSeriesLog(
self._input_ws,
'temperature',
'2010-09-14T04:20:14',
Value='0.0')
alg_test = run_algorithm("ComputeCalibrationCoefVan",
VanadiumWorkspace=self._input_ws,
EPPTable=self._table,
OutputWorkspace=outputWorkspaceName)
self.assertTrue(alg_test.isExecuted())
wsoutput = AnalysisDataService.retrieve(outputWorkspaceName)
self._checkDWF(wsoutput, 0.0)
def test_temperature_log_name_from_IPF(self):
self._input_ws.mutableRun().addProperty('sample.temperature', 0.0, True)
EditInstrumentGeometry(self._input_ws, L2="4,8", Polar="0,15",
Azimuthal="0,0", DetectorIDs="1,2")
SetInstrumentParameter(
Workspace=self._input_ws,
ParameterName="temperature_log_entry",
ParameterType="String",
Value="sample.temperature")
outputWorkspaceName = "output_ws"
alg_test = run_algorithm("ComputeCalibrationCoefVan",
VanadiumWorkspace=self._input_ws,
EPPTable=self._table,
OutputWorkspace=outputWorkspaceName)
self.assertTrue(alg_test.isExecuted())
wsoutput = AnalysisDataService.retrieve(outputWorkspaceName)
self._checkDWF(wsoutput, 0.)
def test_temperature_input_overrides_sample_log(self):
self._input_ws.mutableRun().addProperty('temperature', 567.0, True)
outputWorkspaceName = "output_ws"
EditInstrumentGeometry(self._input_ws, L2="4,8", Polar="0,15",
Azimuthal="0,0", DetectorIDs="1,2")
alg_test = run_algorithm("ComputeCalibrationCoefVan",
VanadiumWorkspace=self._input_ws,
EPPTable=self._table,
OutputWorkspace=outputWorkspaceName,
Temperature=0.0)
self.assertTrue(alg_test.isExecuted())
wsoutput = AnalysisDataService.retrieve(outputWorkspaceName)
self._checkDWF(wsoutput, 0.0)
DeleteWorkspace(wsoutput)
def test_input_not_modified(self):
backup = CloneWorkspace(self._input_ws)
outputWorkspaceName = "output_ws"
alg_test = run_algorithm("ComputeCalibrationCoefVan",
VanadiumWorkspace=self._input_ws,
EPPTable=self._table,
OutputWorkspace=outputWorkspaceName)
self.assertTrue(alg_test.isExecuted())
self.assertTrue(CompareWorkspaces(backup, self._input_ws)[0])
DeleteWorkspace(backup)
def test_disabled_debye_waller_correction(self):
outputWorkspaceName = "output_ws"
# change theta to make dwf != 1
EditInstrumentGeometry(self._input_ws, L2="4,8", Polar="0,15",
Azimuthal="0,0", DetectorIDs="1,2")
alg_test = run_algorithm("ComputeCalibrationCoefVan",
VanadiumWorkspace=self._input_ws,
EPPTable=self._table,
OutputWorkspace=outputWorkspaceName,
EnableDWF=False)
self.assertTrue(alg_test.isExecuted())
wsoutput = AnalysisDataService.retrieve(outputWorkspaceName)
for i in range(wsoutput.getNumberHistograms()):
self.assertEqual(100., wsoutput.readY(i)[0])
self.assertEqual(10., wsoutput.readE(i)[0])
DeleteWorkspace(wsoutput)
def tearDown(self):
if AnalysisDataService.doesExist(self._input_ws.name()):
DeleteWorkspace(self._input_ws)
if AnalysisDataService.doesExist(self._table.name()):
DeleteWorkspace(self._table)
def _checkDWF(self, wsoutput, temperature):
self.assertEqual(100., wsoutput.readY(0)[0])
self.assertEqual(10., wsoutput.readE(0)[0])
if temperature == 0.0:
integral = 0.5
elif temperature == 293.0:
integral = 4.736767162094296 / 3.0
else:
raise RuntimeError("Unsupported temperature supplied to " +
"_checkDWF(). Use 0K or 293K only.")
mvan = 0.001*50.942/N_A
Bcoef = 3.0*integral*1e+20*hbar*hbar/(2.0*mvan*k*389.0)
dwf = np.exp(
-1.0*Bcoef*(4.0*np.pi*np.sin(0.5*np.radians(15.0))/4.0)**2)
self.assertEqual(100./dwf, wsoutput.readY(1)[0])
self.assertEqual(10./dwf, wsoutput.readE(1)[0])
if __name__ == "__main__":
unittest.main()
|
mganeva/mantid
|
Framework/PythonInterface/test/python/plugins/algorithms/ComputeCalibrationCoefVanTest.py
|
Python
|
gpl-3.0
| 9,858
|
[
"Gaussian"
] |
1be90929626f634cba2a2660892689d38abb7cc66b2e1a4e55b63f5c0f27e1f2
|
import director.vtkAll as vtk
import director.objectmodel as om
from director import lcmUtils
# if bot_lcmgl cannot be important than this module will not be able to
# support lcmgl, but it can still be imported in a disabled state
try:
import bot_lcmgl
import octomap as lcmOctomap
LCMGL_AVAILABLE = True
except ImportError:
LCMGL_AVAILABLE = False
class OctomapObject(om.ObjectModelItem):
def __init__(self, name, actor):
om.ObjectModelItem.__init__(self, name, om.Icons.Octomap)
self.actor = actor
self.actor.SetUseBounds(False)
self.addProperty('Visible', actor.GetVisibility())
self.addProperty('Alpha', 0.8, attributes=om.PropertyAttributes(decimals=2, minimum=0, maximum=1.0, singleStep=0.1, hidden=False))
self.addProperty('Color Mode', 2, attributes=om.PropertyAttributes(enumNames=['Flat', 'Print', 'Height', 'Gray', 'Semantic']))
self.addProperty('Occ. Space', 1, attributes=om.PropertyAttributes(enumNames=['Hide', 'Show']))
self.addProperty('Free Space', 0, attributes=om.PropertyAttributes(enumNames=['Hide', 'Show']))
self.addProperty('Structure', 0, attributes=om.PropertyAttributes(enumNames=['Hide', 'Show']))
self.addProperty('Tree Depth', 16, attributes=om.PropertyAttributes(decimals=0, minimum=1, maximum=16, singleStep=1.0))
self.views = []
def _onPropertyChanged(self, propertySet, propertyName):
om.ObjectModelItem._onPropertyChanged(self, propertySet, propertyName)
if propertyName == 'Visible':
self.actor.SetVisibility(self.getProperty(propertyName))
self.renderAllViews()
elif propertyName == 'Alpha':
self.actor.setAlphaOccupied(self.getProperty(propertyName))
self.renderAllViews()
elif propertyName == 'Occ. Space':
self.actor.enableOcTreeCells(self.getProperty(propertyName))
self.renderAllViews()
elif propertyName == 'Free Space':
self.actor.enableFreespace(self.getProperty(propertyName))
self.renderAllViews()
elif propertyName == 'Structure':
self.actor.enableOctreeStructure(self.getProperty(propertyName))
self.renderAllViews()
elif propertyName == 'Tree Depth':
self.actor.changeTreeDepth(self.getProperty(propertyName))
self.renderAllViews()
elif propertyName == 'Color Mode':
heightColorMode = self.getProperty(propertyName)
self.actor.setColorMode(heightColorMode)
self.renderAllViews()
def addToView(self, view):
if view in self.views:
return
self.views.append(view)
view.renderer().AddActor(self.actor)
view.render()
def renderAllViews(self):
for view in self.views:
view.render()
def onRemoveFromObjectModel(self):
self.removeFromAllViews()
def removeFromAllViews(self):
for view in list(self.views):
self.removeFromView(view)
assert len(self.views) == 0
def removeFromView(self, view):
assert view in self.views
self.views.remove(view)
view.renderer().RemoveActor(self.actor)
view.render()
def onMessage(self, msgBytes):
#print "about to draw"
self.actor.UpdateOctomapData(msgBytes.data())
self.renderAllViews()
managerInstance = None
class OctomapManager(object):
def __init__(self, view):
assert LCMGL_AVAILABLE
self.view = view
self.subscriber = None
self.enable()
def isEnabled(self):
return self.subscriber is not None
def setEnabled(self, enabled):
if enabled and not self.subscriber:
#self.subscriber = lcmUtils.addSubscriber('LCMGL.*', callback=self.onMessage)
self.subscriber = lcmUtils.addSubscriber('OCTOMAP', callback=self.onMessage)
self.subscriber = lcmUtils.addSubscriber('OCTOMAP_REF', callback=self.onMessage)
self.subscriber = lcmUtils.addSubscriber('OCTOMAP_IN', callback=self.onMessage)
elif not enabled and self.subscriber:
lcmUtils.removeSubscriber(self.subscriber)
self.subscriber = None
def enable(self):
self.setEnabled(True)
def disable(self):
self.setEnabled(False)
def onMessage(self, msgBytes, channel):
#print " "
#print "got data"
msg = lcmOctomap.raw_t.decode(msgBytes.data())
drawObject = self.getDrawObject(channel)
if not drawObject:
drawObject = self.addDrawObject(channel, msgBytes)
drawObject.onMessage(msgBytes)
def getDrawObject(self, name):
parent = om.getOrCreateContainer('Octomap')
return parent.findChild(name)
def addDrawObject(self, name, msgBytes):
actor = vtk.vtkOctomap()
obj = OctomapObject(name, actor)
om.addToObjectModel(obj, om.getOrCreateContainer('Octomap'))
obj.addToView(self.view)
return obj
def init(view):
if not hasattr(vtk, 'vtkOctomap'):
return None
global managerInstance
managerInstance = OctomapManager(view)
return managerInstance
|
RobotLocomotion/director
|
src/python/director/lcmoctomap.py
|
Python
|
bsd-3-clause
| 5,236
|
[
"VTK"
] |
a2513d63f94d791f87b6885f300865c39921be2c25cdef0dd6d093d5c4b68c20
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
**********************************
espressopp.integrator.FixPositions
**********************************
.. function:: espressopp.integrator.FixPositions(system, particleGroup, fixMask)
:param system:
:param particleGroup:
:param fixMask:
:type system:
:type particleGroup:
:type fixMask:
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.integrator.Extension import *
from _espressopp import integrator_FixPositions
class FixPositionsLocal(ExtensionLocal, integrator_FixPositions):
def __init__(self, system, particleGroup, fixMask):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, integrator_FixPositions, system, particleGroup, fixMask)
if pmi.isController :
class FixPositions(Extension):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.integrator.FixPositionsLocal',
pmicall = ['setFixMask', 'getFixMask'],
pmiproperty = [ 'particleGroup' ]
)
|
kkreis/espressopp
|
src/integrator/FixPositions.py
|
Python
|
gpl-3.0
| 1,965
|
[
"ESPResSo"
] |
ae502a91e27e08f19e6f7d01cfbb84722bee3ad3f5e26c934153e02b9729acb0
|
"""
@author: rhallmen
@date: 24.06.2017
"""
from antlr4 import *
from sxlLexer import sxlLexer
from sxlParser import sxlParser
from sxlVisitor import sxlVisitor
class Signal(object):
def __init__(self, name):
self.name = name
self.position = None
self.mode = None
self.isInput = None
self.reset = None
def check(self):
"""After parsing all attributes must have values.
check is a helper to facilitate this checking.
If no reset is given, the assumption of reset = 0 is made.
"""
assert self.name is not None
assert self.position is not None
assert self.mode is not None
assert self.isInput is not None
if self.reset is None:
self.reset = 0
class Position(object):
def __init__(self, isRange: bool, left: int, right: int = None):
assert isRange is not None
assert left is not None
self.isRange = isRange
self.left = left
self.right = left # intentional. len() will always work this way
if isRange:
assert left > right
self.right = right
def __str__(self):
if self.isRange:
return "({} DOWNTO {})".format(self.left, self.right)
return "({})".format(self.left)
def decl(self):
if self.isRange:
return "({} DOWNTO 0)".format(self.left-self.right)
return "({})".format(self.left)
def __len__(self):
return self.left - self.right + 1
class SignalVisitor(sxlVisitor):
"""
Parse a sxl file with focus on getting information on signals.
"""
def __init__(self):
self.sigs = {}
self.notifies = {}
self.regs = {}
self._current_reg = None
self._current_sig = None
def visitRegister(self, ctx: sxlParser.RegisterContext):
key = ctx.LABEL().getText()
self.regs[key] = []
self._current_reg = self.regs[key]
self.visitChildren(ctx)
def visitSignal(self, ctx: sxlParser.SignalContext):
key = ctx.LABEL().getText()
self.sigs[key] = Signal(key)
self._current_sig = self.sigs[key]
self.visitChildren(ctx)
self._current_sig.check()
self._current_reg.append(self._current_sig)
def visitSigPosition(self, ctx: sxlParser.SigPositionContext):
self.visit(ctx.position())
def visitPosSingle(self, ctx: sxlParser.PosSingleContext):
val = int(ctx.getText())
pos = Position(False, val)
self._current_sig.position = pos
def visitPosRange(self, ctx: sxlParser.PosRangeContext):
left, right = ctx.getText().split(':')
pos = Position(True, int(left), int(right))
self._current_sig.position = pos
def visitSigmode(self, ctx: sxlParser.SigmodeContext):
mode = ctx.key.text
if mode in ['ro']:
isInput = True
else:
isInput = False
self._current_sig.mode = mode
self._current_sig.isInput = isInput
def visitResetInt(self, ctx: sxlParser.ResetIntContext):
self._current_sig.reset = int(ctx.getText())
def visitResetHex(self, ctx: sxlParser.ResetHexContext):
self._current_sig.reset = int(ctx.getText(), 16)
def visitRegNotify(self, ctx: sxlParser.RegNotifyContext):
notify = self.visit(ctx.notify())
key = ctx.parentCtx.LABEL().getText()
self.notifies[key] = notify
def visitNotify(self, ctx: sxlParser.NotifyContext):
return ctx.key.text
@classmethod
def parse_file(cls, path):
"""Parse SXL file and return visitor."""
inp = FileStream(path)
lexer = sxlLexer(inp)
stream = CommonTokenStream(lexer)
parser = sxlParser(stream)
tree = parser.blocks()
visitor = cls()
visitor.visit(tree)
return visitor
|
nussbrot/AdvPT
|
python/SignalVisitor.py
|
Python
|
mit
| 3,895
|
[
"VisIt"
] |
c93e602c42c4df1b9988f2ff4a5e06ba7c8d77c1e2bd1b44b34086a49db6885c
|
"""
Browser set up for acceptance tests.
"""
# pylint: disable=no-member
# pylint: disable=unused-argument
from base64 import encodestring
from json import dumps
from logging import getLogger
import requests
from django.conf import settings
from django.core.management import call_command
from lettuce import after, before, world
from selenium.common.exceptions import WebDriverException
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from splinter.browser import Browser
from os import environ
from xmodule.contentstore.django import _CONTENTSTORE
LOGGER = getLogger(__name__)
LOGGER.info("Loading the lettuce acceptance testing terrain file...")
MAX_VALID_BROWSER_ATTEMPTS = 20
GLOBAL_SCRIPT_TIMEOUT = 60
def get_saucelabs_username_and_key():
"""
Returns the Sauce Labs username and access ID as set by environment variables
"""
return {"username": settings.SAUCE.get('USERNAME'), "access-key": settings.SAUCE.get('ACCESS_ID')}
def set_saucelabs_job_status(jobid, passed=True):
"""
Sets the job status on sauce labs
"""
config = get_saucelabs_username_and_key()
url = 'http://saucelabs.com/rest/v1/{}/jobs/{}'.format(config['username'], world.jobid)
body_content = dumps({"passed": passed})
base64string = encodestring('{}:{}'.format(config['username'], config['access-key']))[:-1]
headers = {"Authorization": "Basic {}".format(base64string)}
result = requests.put(url, data=body_content, headers=headers)
return result.status_code == 200
def make_saucelabs_desired_capabilities():
"""
Returns a DesiredCapabilities object corresponding to the environment sauce parameters
"""
desired_capabilities = settings.SAUCE.get('BROWSER', DesiredCapabilities.CHROME)
desired_capabilities['platform'] = settings.SAUCE.get('PLATFORM')
desired_capabilities['version'] = settings.SAUCE.get('VERSION')
desired_capabilities['device-type'] = settings.SAUCE.get('DEVICE')
desired_capabilities['name'] = settings.SAUCE.get('SESSION')
desired_capabilities['build'] = settings.SAUCE.get('BUILD')
desired_capabilities['video-upload-on-pass'] = False
desired_capabilities['sauce-advisor'] = False
desired_capabilities['capture-html'] = True
desired_capabilities['record-screenshots'] = True
desired_capabilities['selenium-version'] = "2.34.0"
desired_capabilities['max-duration'] = 3600
desired_capabilities['public'] = 'public restricted'
return desired_capabilities
@before.harvest
def initial_setup(server):
"""
Launch the browser once before executing the tests.
"""
world.absorb(settings.LETTUCE_SELENIUM_CLIENT, 'LETTUCE_SELENIUM_CLIENT')
if world.LETTUCE_SELENIUM_CLIENT == 'local':
browser_driver = getattr(settings, 'LETTUCE_BROWSER', 'chrome')
if browser_driver == 'chrome':
desired_capabilities = DesiredCapabilities.CHROME
desired_capabilities['loggingPrefs'] = {
'browser': 'ALL',
}
else:
desired_capabilities = {}
browser_options = None
if browser_driver == 'chrome' and environ.get('JENKINS_HOME'):
browser_options = webdriver.ChromeOptions()
browser_options.add_argument('--no-sandbox')
# There is an issue with ChromeDriver2 r195627 on Ubuntu
# in which we sometimes get an invalid browser session.
# This is a work-around to ensure that we get a valid session.
success = False
num_attempts = 0
while (not success) and num_attempts < MAX_VALID_BROWSER_ATTEMPTS:
# Load the browser and try to visit the main page
# If the browser couldn't be reached or
# the browser session is invalid, this will
# raise a WebDriverException
try:
if browser_driver == 'firefox':
# Lettuce initializes differently for firefox, and sending
# desired_capabilities will not work. So initialize without
# sending desired_capabilities.
world.browser = Browser(browser_driver)
else:
world.browser = Browser(
driver_name=browser_driver,
options=browser_options,
desired_capabilities=desired_capabilities,
)
world.browser.driver.set_script_timeout(GLOBAL_SCRIPT_TIMEOUT)
world.visit('/')
except WebDriverException:
LOGGER.warn("Error acquiring %s browser, retrying", browser_driver, exc_info=True)
if hasattr(world, 'browser'):
world.browser.quit()
num_attempts += 1
else:
success = True
# If we were unable to get a valid session within the limit of attempts,
# then we cannot run the tests.
if not success:
raise IOError("Could not acquire valid {driver} browser session.".format(driver=browser_driver))
world.absorb(0, 'IMPLICIT_WAIT')
world.browser.driver.set_window_size(1280, 1024)
elif world.LETTUCE_SELENIUM_CLIENT == 'saucelabs':
config = get_saucelabs_username_and_key()
world.browser = Browser(
'remote',
url="http://{}:{}@ondemand.saucelabs.com:80/wd/hub".format(config['username'], config['access-key']),
**make_saucelabs_desired_capabilities()
)
world.absorb(30, 'IMPLICIT_WAIT')
world.browser.set_script_timeout(GLOBAL_SCRIPT_TIMEOUT)
elif world.LETTUCE_SELENIUM_CLIENT == 'grid':
world.browser = Browser(
'remote',
url=settings.SELENIUM_GRID.get('URL'),
browser=settings.SELENIUM_GRID.get('BROWSER'),
)
world.absorb(30, 'IMPLICIT_WAIT')
world.browser.driver.set_script_timeout(GLOBAL_SCRIPT_TIMEOUT)
else:
raise Exception("Unknown selenium client '{}'".format(world.LETTUCE_SELENIUM_CLIENT))
world.browser.driver.implicitly_wait(world.IMPLICIT_WAIT)
world.absorb(world.browser.driver.session_id, 'jobid')
@before.each_scenario
def reset_data(scenario):
"""
Clean out the django test database defined in the
envs/acceptance.py file: edx-platform/db/test_edx.db
"""
LOGGER.debug("Flushing the test database...")
call_command('flush', interactive=False, verbosity=0)
world.absorb({}, 'scenario_dict')
@before.each_scenario
def configure_screenshots(scenario):
"""
Before each scenario, turn off automatic screenshots.
Args: str, scenario. Name of current scenario.
"""
world.auto_capture_screenshots = False
@after.each_scenario
def clear_data(scenario):
world.spew('scenario_dict')
@after.each_scenario
def reset_databases(scenario):
"""
After each scenario, all databases are cleared/dropped. Contentstore data are stored in unique databases
whereas modulestore data is in unique collection names. This data is created implicitly during the scenarios.
If no data is created during the test, these lines equivilently do nothing.
"""
import xmodule.modulestore.django
xmodule.modulestore.django.modulestore()._drop_database() # pylint: disable=protected-access
xmodule.modulestore.django.clear_existing_modulestores()
_CONTENTSTORE.clear()
@world.absorb
def capture_screenshot(image_name):
"""
Capture a screenshot outputting it to a defined directory.
This function expects only the name of the file. It will generate
the full path of the output screenshot.
If the name contains spaces, they ill be converted to underscores.
"""
output_dir = '{}/log/auto_screenshots'.format(settings.TEST_ROOT)
image_name = '{}/{}.png'.format(output_dir, image_name.replace(' ', '_'))
try:
world.browser.driver.save_screenshot(image_name)
except WebDriverException:
LOGGER.error("Could not capture a screenshot '{}'".format(image_name))
@after.each_scenario
def screenshot_on_error(scenario):
"""
Save a screenshot to help with debugging.
"""
if scenario.failed:
try:
output_dir = '{}/log'.format(settings.TEST_ROOT)
image_name = '{}/{}.png'.format(output_dir, scenario.name.replace(' ', '_'))
world.browser.driver.save_screenshot(image_name)
except WebDriverException:
LOGGER.error('Could not capture a screenshot')
@after.each_scenario
def capture_console_log(scenario):
"""
Save the console log to help with debugging.
"""
if scenario.failed:
log = world.browser.driver.get_log('browser')
try:
output_dir = '{}/log'.format(settings.TEST_ROOT)
file_name = '{}/{}.log'.format(output_dir, scenario.name.replace(' ', '_'))
with open(file_name, 'w') as output_file:
for line in log:
output_file.write("{}{}".format(dumps(line), '\n'))
except WebDriverException:
LOGGER.error('Could not capture the console log')
def capture_screenshot_for_step(step, when):
"""
Useful method for debugging acceptance tests that are run in Vagrant.
This method runs automatically before and after each step of an acceptance
test scenario. The variable:
world.auto_capture_screenshots
either enables or disabled the taking of screenshots. To change the
variable there is a convenient step defined:
I (enable|disable) auto screenshots
If you just want to capture a single screenshot at a desired point in code,
you should use the method:
world.capture_screenshot("image_name")
"""
if world.auto_capture_screenshots:
scenario_num = step.scenario.feature.scenarios.index(step.scenario) + 1
step_num = step.scenario.steps.index(step) + 1
step_func_name = step.defined_at.function.func_name
image_name = "{prefix:03d}__{num:03d}__{name}__{postfix}".format(
prefix=scenario_num,
num=step_num,
name=step_func_name,
postfix=when
)
world.capture_screenshot(image_name)
@before.each_step
def before_each_step(step):
capture_screenshot_for_step(step, '1_before')
@after.each_step
def after_each_step(step):
capture_screenshot_for_step(step, '2_after')
@after.harvest
def saucelabs_status(total):
"""
Collect data for saucelabs.
"""
if world.LETTUCE_SELENIUM_CLIENT == 'saucelabs':
set_saucelabs_job_status(world.jobid, total.scenarios_ran == total.scenarios_passed)
|
Edraak/edraak-platform
|
common/djangoapps/terrain/browser.py
|
Python
|
agpl-3.0
| 10,736
|
[
"VisIt"
] |
9bc2134be5314ae47d3ca7acff4a97843f174c7c14081440cc01e3d31b5b8f08
|
# """
#
# Stuff to make multi-D dispersion spectra, for instance for Mayavi ...
# For now this works only for 4 curves, to give 3 dimensions.
#
# """
#
# import sys
# import numpy as np
# #import matplotlib.pyplot as plt
#
# import pycs.gen.util as util
# import pycs.gen.lc as lc
# import pycs.gen.ml as ml
#
#
#
#
#
# def cube(lcs, fitmethod, verbose=True, timewidth=30, timestep=1.0, filename="chi2cube.pkl"):
# """
# 3D specplot, calculates the chi2 over a cube of time-delays. And writes the result in a pickle.
#
# This pickle can then be looked at with Mayavi (see example below)
# """
#
# if len(lcs)!=4:
# raise RuntimeError, "I want 4 lightcurves."
#
# lcsc = [l.copy() for l in lcs]
#
# # We apply microlensing
# for l in lcsc:
# if l.ml != None:
# l.applyml()
#
# def chi2(delays):
# lc.multisettimedelays(lcsc, delays)
# chi2 = fitmethod(lcsc)["chi2n"]
# return chi2
#
# initparams = np.concatenate([lc.multigettimedelays(lcsc)])
# print "Initial shifts : ", initparams
#
# timeshifts = np.arange(-(timewidth)*timestep/2.0, (timewidth+1)*timestep/2.0, timestep)
# cubeindexes = np.arange(timewidth + 1)
#
# print "Points to calculate :", len(timeshifts)**3
# chi2cube = np.zeros((timewidth+1, timewidth+1, timewidth+1))
#
# xshifts = timeshifts + initparams[0]
# yshifts = timeshifts + initparams[1]
# zshifts = timeshifts + initparams[2]
#
# for ix in cubeindexes:
# print "Slice %i of %i" % (ix + 1, timewidth+1)
# for iy in cubeindexes:
# for iz in cubeindexes:
# chi2cube[ix, iy, iz] = chi2([xshifts[ix], yshifts[iy], zshifts[iz]])
#
#
# beg = -(timewidth)*timestep/2.0
# end = (timewidth+1)*timestep/2.0
# step = timestep
#
# x, y, z = np.mgrid[beg:end:step, beg:end:step, beg:end:step]
# #print x, y, z
# x += initparams[0]
# y += initparams[1]
# z += initparams[2]
# #print x, y, z
#
# util.writepickle({"lcs":lcs, "x":x, "y":y, "z":z, "chi2":chi2cube}, filename)
#
#
#
# # To give an idea how to plot such a data cube with Mayavi2/mlab :
# # import sys
# # sys.path.append("../")
# # from pycs.gen import *
# # import numpy as np
# # from enthought.mayavi import mlab
# #
# # pkldict = util.readpickle("dispcube50.pkl")
# #
# # maxval = 1.5
# # minval = 1.43
# #
# # x = pkldict["x"]
# # y = pkldict["y"]
# # z = pkldict["z"]
# # d2 = pkldict["d2"]
# #
# # lcs = pkldict["lcs"]
# #
# # minpos = np.argmin(d2)
# # minpos = np.unravel_index(minpos, d2.shape)
# # min_x = x[minpos]
# # min_y = y[minpos]
# # min_z = z[minpos]
# #
# #
# # mlab.clf()
# #
# # src = mlab.pipeline.scalar_field(x, y, z, d2)
# #
# # # in green, the minimum
# # mlab.points3d([min_x], [min_y], [min_z], color=(0,1,0), mode="cube", scale_mode="none", resolution=14, scale_factor=0.15)
# #
# # mlab.pipeline.scalar_cut_plane(src, vmin=minval, vmax=maxval)
# #
# # mlab.colorbar(title='Dispersion', orientation='vertical')
# #
# # mlab.xlabel("%s%s"% (lcs[0].object, lcs[1].object))
# # mlab.ylabel("%s%s"% (lcs[0].object, lcs[2].object))
# # mlab.zlabel("%s%s"% (lcs[0].object, lcs[3].object))
# #
# #
# # mlab.show()
#
#
#
#
#
#
#
#
#
|
COSMOGRAIL/PyCS
|
pycs/play/fit/multispec.py
|
Python
|
gpl-3.0
| 3,194
|
[
"Mayavi"
] |
4b042df90cd56d08a5f15d3d739f5fc363a37be0dd43b08a9c9cc6aecf3992d9
|
#!/usr/bin/python
#
# Flickr API implementation
#
# Inspired largely by Michele Campeotto's flickrclient and Aaron Swartz'
# xmltramp... but I wanted to get a better idea of how python worked in
# those regards, so I mostly worked those components out for myself.
#
# http://micampe.it/things/flickrclient
# http://www.aaronsw.com/2002/xmltramp/
#
# Release 1: initial release
# Release 2: added upload functionality
# Release 3: code cleanup, convert to doc strings
# Release 4: better permission support
# Release 5: converted into fuller-featured "flickrapi"
# Release 6: fix upload sig bug (thanks Deepak Jois), encode test output
# Release 7: fix path construction, Manish Rai Jain's improvements, exceptions
# Release 8: change API endpoint to "api.flickr.com"
#
# Work by (or inspired by) Manish Rai Jain <manishrjain@gmail.com>:
#
# improved error reporting, proper multipart MIME boundary creation,
# use of urllib2 to allow uploads through a proxy, upload accepts
# raw data as well as a filename
#
# Copyright 2005 Brian "Beej Jorgensen" Hall <beej@beej.us>
#
# This work is licensed under the Creative Commons
# Attribution License. To view a copy of this license,
# visit http://creativecommons.org/licenses/by/2.5/ or send
# a letter to Creative Commons, 543 Howard Street, 5th
# Floor, San Francisco, California, 94105, USA.
#
# This license says that I must be credited for any derivative works.
# You do not need to credit me to simply use the FlickrAPI classes in
# your Python scripts--you only need to credit me if you're taking this
# FlickrAPI class and modifying it or redistributing it.
#
# Previous versions of this API were granted to the public domain.
# You're free to use those as you please.
#
# Beej Jorgensen, Maintainer, November 2005
# beej@beej.us
#
# May 19, 2015 -- Edited by Cecilia Mauceri to add Expat error handeling
#
import sys
import hashlib
import string
import urllib.request, urllib.parse, urllib.error
import urllib.request, urllib.error, urllib.parse
import email
import http.client
import os.path
import xml.dom.minidom
import xml.parsers.expat
########################################################################
# Exceptions
########################################################################
class UploadException(Exception):
pass
########################################################################
# XML functionality
########################################################################
#-----------------------------------------------------------------------
class XMLNode:
"""XMLNode -- generic class for holding an XML node
xmlStr = \"\"\"<xml foo="32">
<name bar="10">Name0</name>
<name bar="11" baz="12">Name1</name>
</xml>\"\"\"
f = XMLNode.parseXML(xmlStr)
print f.elementName # xml
print f['foo'] # 32
print f.name # [<name XMLNode>, <name XMLNode>]
print f.name[0].elementName # name
print f.name[0]["bar"] # 10
print f.name[0].elementText # Name0
print f.name[1].elementName # name
print f.name[1]["bar"] # 11
print f.name[1]["baz"] # 12
"""
def __init__(self):
"""Construct an empty XML node."""
self.elementName = ""
self.elementText = ""
self.attrib = {}
self.xml = ""
def __setitem__(self, key, item):
"""Store a node's attribute in the attrib hash."""
self.attrib[key] = item
def __getitem__(self, key):
"""Retrieve a node's attribute from the attrib hash."""
try:
return self.attrib[key]
except:
return "null"
#-----------------------------------------------------------------------
#@classmethod
def parseXML(cls, xmlStr, storeXML=False):
"""Convert an XML string into a nice instance tree of XMLNodes.
xmlStr -- the XML to parse
storeXML -- if True, stores the XML string in the root XMLNode.xml
"""
def __parseXMLElement(element, thisNode):
"""Recursive call to process this XMLNode."""
thisNode.elementName = element.nodeName
#print element.nodeName
# add element attributes as attributes to this node
for i in range(element.attributes.length):
an = element.attributes.item(i)
thisNode[an.name] = an.nodeValue
for a in element.childNodes:
if a.nodeType == xml.dom.Node.ELEMENT_NODE:
child = XMLNode()
try:
list = getattr(thisNode, a.nodeName)
except AttributeError:
setattr(thisNode, a.nodeName, [])
# add the child node as an attrib to this node
list = getattr(thisNode, a.nodeName);
#print "appending child: %s to %s" % (a.nodeName, thisNode.elementName)
list.append(child);
__parseXMLElement(a, child)
elif a.nodeType == xml.dom.Node.TEXT_NODE:
thisNode.elementText += a.nodeValue
return thisNode
try:
dom = xml.dom.minidom.parseString(xmlStr)
except xml.parsers.expat.ExpatError as e:
raise FlickrExpatError(e.message, e, xmlStr)
# get the root
rootNode = XMLNode()
if storeXML: rootNode.xml = xmlStr
return __parseXMLElement(dom.firstChild, rootNode)
parseXML = classmethod(parseXML)
########################################################################
# Custom XML Expat Exception
########################################################################
class FlickrExpatError(Exception):
def __init__(self, message, org_error, xmlstr):
super(FlickrExpatError, self).__init__(message)
self.org_error = org_error
self.xmlstr = xmlstr
########################################################################
# Flickr functionality
########################################################################
#-----------------------------------------------------------------------
class FlickrAPI:
"""Encapsulated flickr functionality.
Example usage:
flickr = FlickrAPI(flickrAPIKey, flickrSecret)
rsp = flickr.auth_checkToken(api_key=flickrAPIKey, auth_token=token)
"""
flickrHost = "api.flickr.com"
flickrRESTForm = "/services/rest/"
flickrAuthForm = "/services/auth/"
flickrUploadForm = "/services/upload/"
#-------------------------------------------------------------------
def __init__(self, apiKey, secret):
"""Construct a new FlickrAPI instance for a given API key and secret."""
self.apiKey = apiKey
self.secret = secret
self.__handlerCache = {}
#-------------------------------------------------------------------
def __sign(self, data):
"""Calculate the flickr signature for a set of params.
data -- a hash of all the params and values to be hashed, e.g.
{"api_key":"AAAA", "auth_token":"TTTT"}
"""
dataName = self.secret
keys = list(data.keys())
keys.sort()
for a in keys: dataName += (a + data[a])
#print dataName
hash = hashlib.md5()
hash.update(dataName.encode('utf-8'))
return hash.hexdigest()
#-------------------------------------------------------------------
def __getattr__(self, method, **arg):
"""Handle all the flickr API calls.
This is Michele Campeotto's cleverness, wherein he writes a
general handler for methods not defined, and assumes they are
flickr methods. He then converts them to a form to be passed as
the method= parameter, and goes from there.
http://micampe.it/things/flickrclient
My variant is the same basic thing, except it tracks if it has
already created a handler for a specific call or not.
example usage:
flickr.auth_getFrob(api_key="AAAAAA")
rsp = flickr.favorites_getList(api_key=flickrAPIKey, \\
auth_token=token)
"""
if method not in self.__handlerCache:
def handler(_self=self, _method=method, **arg):
_method = "flickr." + _method.replace("_", ".")
url = "https://" + FlickrAPI.flickrHost + \
FlickrAPI.flickrRESTForm
arg["method"] = _method
postData = urllib.parse.urlencode(arg) + "&api_sig=" + \
_self.__sign(arg)
#print "--url---------------------------------------------"
#print url
#print "--postData----------------------------------------"
#print postData
f = urllib.request.urlopen(url, postData.encode('utf-8'))
data = f.read()
#print "--response----------------------------------------"
#print data
f.close()
return XMLNode.parseXML(data, True)
self.__handlerCache[method] = handler;
return self.__handlerCache[method]
#-------------------------------------------------------------------
def __getAuthURL(self, perms, frob):
"""Return the authorization URL to get a token.
This is the URL the app will launch a browser toward if it
needs a new token.
perms -- "read", "write", or "delete"
frob -- picked up from an earlier call to FlickrAPI.auth_getFrob()
"""
data = {"api_key": self.apiKey, "frob": frob, "perms": perms}
data["api_sig"] = self.__sign(data)
return "https://%s%s?%s" % (FlickrAPI.flickrHost, \
FlickrAPI.flickrAuthForm, urllib.parse.urlencode(data))
#-------------------------------------------------------------------
def upload(self, filename=None, jpegData=None, **arg):
"""Upload a file to flickr.
Be extra careful you spell the parameters correctly, or you will
get a rather cryptic "Invalid Signature" error on the upload!
Supported parameters:
One of filename or jpegData must be specified by name when
calling this method:
filename -- name of a file to upload
jpegData -- array of jpeg data to upload
api_key
auth_token
title
description
tags -- space-delimited list of tags, "tag1 tag2 tag3"
is_public -- "1" or "0"
is_friend -- "1" or "0"
is_family -- "1" or "0"
"""
if filename == None and jpegData == None or \
filename != None and jpegData != None:
raise UploadException("filename OR jpegData must be specified")
# verify key names
for a in list(arg.keys()):
if a != "api_key" and a != "auth_token" and a != "title" and \
a != "description" and a != "tags" and a != "is_public" and \
a != "is_friend" and a != "is_family":
sys.stderr.write("FlickrAPI: warning: unknown parameter " \
"\"%s\" sent to FlickrAPI.upload\n" % (a))
arg["api_sig"] = self.__sign(arg)
url = "https://" + FlickrAPI.flickrHost + FlickrAPI.flickrUploadForm
# construct POST data
boundary = email.generator._make_boundary()
body = ""
# required params
for a in ('api_key', 'auth_token', 'api_sig'):
body += "--%s\r\n" % (boundary)
body += "Content-Disposition: form-data; name=\"" + a + "\"\r\n\r\n"
body += "%s\r\n" % (arg[a])
# optional params
for a in ('title', 'description', 'tags', 'is_public', \
'is_friend', 'is_family'):
if a in arg:
body += "--%s\r\n" % (boundary)
body += "Content-Disposition: form-data; name=\"" + a + "\"\r\n\r\n"
body += "%s\r\n" % (arg[a])
body += "--%s\r\n" % (boundary)
body += "Content-Disposition: form-data; name=\"photo\";"
body += " filename=\"%s\"\r\n" % filename
body += "Content-Type: image/jpeg\r\n\r\n"
#print body
if filename != None:
fp = file(filename, "rb")
data = fp.read()
fp.close()
else:
data = jpegData
postData = body.encode("utf_8") + data + \
("--%s--" % (boundary)).encode("utf_8")
request = urllib.request.Request(url)
request.add_data(postData)
request.add_header("Content-Type", \
"multipart/form-data; boundary=%s" % boundary)
response = urllib.request.urlopen(request)
rspXML = response.read()
return XMLNode.parseXML(rspXML)
#-----------------------------------------------------------------------
#@classmethod
def testFailure(cls, rsp, exit=True):
"""Exit app if the rsp XMLNode indicates failure."""
if rsp['stat'] == "fail":
sys.stderr.write("%s\n" % (cls.getPrintableError(rsp)))
if exit: sys.exit(1)
testFailure = classmethod(testFailure)
#-----------------------------------------------------------------------
#@classmethod
def getPrintableError(cls, rsp):
"""Return a printed error message string."""
return "%s: error %s: %s" % (rsp.elementName, \
cls.getRspErrorCode(rsp), cls.getRspErrorMsg(rsp))
getPrintableError = classmethod(getPrintableError)
#-----------------------------------------------------------------------
#@classmethod
def getRspErrorCode(cls, rsp):
"""Return the error code of a response, or 0 if no error."""
if rsp['stat'] == "fail":
return rsp.err[0]['code']
return 0
getRspErrorCode = classmethod(getRspErrorCode)
#-----------------------------------------------------------------------
#@classmethod
def getRspErrorMsg(cls, rsp):
"""Return the error message of a response, or "Success" if no error."""
if rsp['stat'] == "fail":
return rsp.err[0]['msg']
return "Success"
getRspErrorMsg = classmethod(getRspErrorMsg)
#-----------------------------------------------------------------------
def __getCachedTokenPath(self):
"""Return the directory holding the app data."""
return os.path.expanduser(os.path.sep.join(["~", ".flickr", \
self.apiKey]))
#-----------------------------------------------------------------------
def __getCachedTokenFilename(self):
"""Return the full pathname of the cached token file."""
return os.path.sep.join([self.__getCachedTokenPath(), "auth.xml"])
#-----------------------------------------------------------------------
def __getCachedToken(self):
"""Read and return a cached token, or None if not found.
The token is read from the cached token file, which is basically the
entire RSP response containing the auth element.
"""
try:
f = file(self.__getCachedTokenFilename(), "r")
data = f.read()
f.close()
rsp = XMLNode.parseXML(data)
return rsp.auth[0].token[0].elementText
except IOError:
return None
#-----------------------------------------------------------------------
def __setCachedToken(self, xml):
"""Cache a token for later use.
The cached tag is stored by simply saving the entire RSP response
containing the auth element.
"""
path = self.__getCachedTokenPath()
if not os.path.exists(path):
os.makedirs(path)
f = file(self.__getCachedTokenFilename(), "w")
f.write(xml)
f.close()
#-----------------------------------------------------------------------
def getToken(self, perms="read", browser="lynx"):
"""Get a token either from the cache, or make a new one from the
frob.
This first attempts to find a token in the user's token cache on
disk.
If that fails (or if the token is no longer valid based on
flickr.auth.checkToken) a new frob is acquired. The frob is
validated by having the user log into flickr (with lynx), and
subsequently a valid token is retrieved.
The newly minted token is then cached locally for the next run.
perms--"read", "write", or "delete"
browser--whatever browser should be used in the system() call
"""
# see if we have a saved token
token = self.__getCachedToken()
# see if it's valid
if token != None:
rsp = self.auth_checkToken(api_key=self.apiKey, auth_token=token)
if rsp['stat'] != "ok":
token = None
else:
# see if we have enough permissions
tokenPerms = rsp.auth[0].perms[0].elementText
if tokenPerms == "read" and perms != "read":
token = None
elif tokenPerms == "write" and perms == "delete":
token = None
# get a new token if we need one
if token == None:
# get the frob
rsp = self.auth_getFrob(api_key=self.apiKey)
self.testFailure(rsp)
frob = rsp.frob[0].elementText
# validate online
os.system("%s '%s'" % (browser, self.__getAuthURL(perms, frob)))
# get a token
rsp = self.auth_getToken(api_key=self.apiKey, frob=frob)
self.testFailure(rsp)
token = rsp.auth[0].token[0].elementText
# store the auth info for next time
self.__setCachedToken(rsp.xml)
return token
########################################################################
# App functionality
########################################################################
def main(argv):
# flickr auth information:
flickrAPIKey = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" # API key
flickrSecret = "XXXXXXXXXXXXXXXX" # shared "secret"
# make a new FlickrAPI instance
fapi = FlickrAPI(flickrAPIKey, flickrSecret)
# do the whole whatever-it-takes to get a valid token:
token = fapi.getToken(browser="firefox")
# get my favorites
rsp = fapi.favorites_getList(api_key=flickrAPIKey, auth_token=token)
fapi.testFailure(rsp)
# and print them
for a in rsp.photos[0].photo:
print(("%10s: %s" % (a['id'], a['title'].encode("ascii", "replace"))))
# upload the file foo.jpg
#rsp = fapi.upload(filename="foo.jpg", \
# api_key=flickrAPIKey, auth_token=token, \
# title="This is the title", description="This is the description", \
# tags="tag1 tag2 tag3", is_public="1")
#if rsp == None:
# sys.stderr.write("can't find file\n")
#else:
# fapi.testFailure(rsp)
return 0
# run the main if we're not being imported:
if __name__ == "__main__": sys.exit(main(sys.argv))
|
crmauceri/VisualCommonSense
|
code/crawler/flickrapi2.py
|
Python
|
mit
| 19,386
|
[
"Brian",
"VisIt"
] |
b30333f9fb4c05ea6e501a6168b97392a71c11e67e94d35d34ccb4e1d204dbb9
|
# Copyright (c) 2000-2010 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""handle diagram generation options for class diagram or default diagrams
"""
from logilab.common.compat import builtins
BUILTINS_NAME = builtins.__name__
from logilab import astng
from logilab.astng.utils import LocalsVisitor
from pylint.pyreverse.diagrams import PackageDiagram, ClassDiagram
# diagram generators ##########################################################
class DiaDefGenerator:
"""handle diagram generation options
"""
def __init__(self, linker, handler):
"""common Diagram Handler initialization"""
self.config = handler.config
self._set_default_options()
self.linker = linker
self.classdiagram = None # defined by subclasses
def get_title(self, node):
"""get title for objects"""
title = node.name
if self.module_names:
title = '%s.%s' % (node.root().name, title)
return title
def _set_option(self, option):
"""activate some options if not explicitly deactivated"""
# if we have a class diagram, we want more information by default;
# so if the option is None, we return True
if option is None:
if self.config.classes:
return True
else:
return False
return option
def _set_default_options(self):
"""set different default options with _default dictionary"""
self.module_names = self._set_option(self.config.module_names)
all_ancestors = self._set_option(self.config.all_ancestors)
all_associated = self._set_option(self.config.all_associated)
anc_level, ass_level = (0, 0)
if all_ancestors:
anc_level = -1
if all_associated:
ass_level = -1
if self.config.show_ancestors is not None:
anc_level = self.config.show_ancestors
if self.config.show_associated is not None:
ass_level = self.config.show_associated
self.anc_level, self.ass_level = anc_level, ass_level
def _get_levels(self):
"""help function for search levels"""
return self.anc_level, self.ass_level
def show_node(self, node):
"""true if builtins and not show_builtins"""
if self.config.show_builtin:
return True
return node.root().name != BUILTINS_NAME
def add_class(self, node):
"""visit one class and add it to diagram"""
self.linker.visit(node)
self.classdiagram.add_object(self.get_title(node), node)
def get_ancestors(self, node, level):
"""return ancestor nodes of a class node"""
if level == 0:
return
for ancestor in node.ancestors(recurs=False):
if not self.show_node(ancestor):
continue
yield ancestor
def get_associated(self, klass_node, level):
"""return associated nodes of a class node"""
if level == 0:
return
for ass_nodes in list(klass_node.instance_attrs_type.values()) + \
list(klass_node.locals_type.values()):
for ass_node in ass_nodes:
if isinstance(ass_node, astng.Instance):
ass_node = ass_node._proxied
if not (isinstance(ass_node, astng.Class)
and self.show_node(ass_node)):
continue
yield ass_node
def extract_classes(self, klass_node, anc_level, ass_level):
"""extract recursively classes related to klass_node"""
if self.classdiagram.has_node(klass_node) or not self.show_node(klass_node):
return
self.add_class(klass_node)
for ancestor in self.get_ancestors(klass_node, anc_level):
self.extract_classes(ancestor, anc_level-1, ass_level)
for ass_node in self.get_associated(klass_node, ass_level):
self.extract_classes(ass_node, anc_level, ass_level-1)
class DefaultDiadefGenerator(LocalsVisitor, DiaDefGenerator):
"""generate minimum diagram definition for the project :
* a package diagram including project's modules
* a class diagram including project's classes
"""
def __init__(self, linker, handler):
DiaDefGenerator.__init__(self, linker, handler)
LocalsVisitor.__init__(self)
def visit_project(self, node):
"""visit an astng.Project node
create a diagram definition for packages
"""
mode = self.config.mode
if len(node.modules) > 1:
self.pkgdiagram = PackageDiagram('packages %s' % node.name, mode)
else:
self.pkgdiagram = None
self.classdiagram = ClassDiagram('classes %s' % node.name, mode)
def leave_project(self, node):
"""leave the astng.Project node
return the generated diagram definition
"""
if self.pkgdiagram:
return self.pkgdiagram, self.classdiagram
return self.classdiagram,
def visit_module(self, node):
"""visit an astng.Module node
add this class to the package diagram definition
"""
if self.pkgdiagram:
self.linker.visit(node)
self.pkgdiagram.add_object(node.name, node)
def visit_class(self, node):
"""visit an astng.Class node
add this class to the class diagram definition
"""
anc_level, ass_level = self._get_levels()
self.extract_classes(node, anc_level, ass_level)
def visit_from(self, node):
"""visit astng.From and catch modules for package diagram
"""
if self.pkgdiagram:
self.pkgdiagram.add_from_depend(node, node.modname)
class ClassDiadefGenerator(DiaDefGenerator):
"""generate a class diagram definition including all classes related to a
given class
"""
def __init__(self, linker, handler):
DiaDefGenerator.__init__(self, linker, handler)
def class_diagram(self, project, klass):
"""return a class diagram definition for the given klass and its
related klasses
"""
self.classdiagram = ClassDiagram(klass, self.config.mode)
if len(project.modules) > 1:
module, klass = klass.rsplit('.', 1)
module = project.get_module(module)
else:
module = project.modules[0]
klass = klass.split('.')[-1]
klass = next(module.ilookup(klass))
anc_level, ass_level = self._get_levels()
self.extract_classes(klass, anc_level, ass_level)
return self.classdiagram
# diagram handler #############################################################
class DiadefsHandler:
"""handle diagram definitions :
get it from user (i.e. xml files) or generate them
"""
def __init__(self, config):
self.config = config
def get_diadefs(self, project, linker):
"""get the diagrams configuration data
:param linker: astng.inspector.Linker(IdGeneratorMixIn, LocalsVisitor)
:param project: astng.manager.Project
"""
# read and interpret diagram definitions (Diadefs)
diagrams = []
generator = ClassDiadefGenerator(linker, self)
for klass in self.config.classes:
diagrams.append(generator.class_diagram(project, klass))
if not diagrams:
diagrams = DefaultDiadefGenerator(linker, self).visit(project)
for diagram in diagrams:
diagram.extract_relationships()
return diagrams
|
tlksio/tlksio
|
env/lib/python3.4/site-packages/pylint/pyreverse/diadefslib.py
|
Python
|
mit
| 8,339
|
[
"VisIt"
] |
1276239fa0d9b6036fddf9be81892391340b7fb2826f2a92a77acf4e1f23191e
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Spotify AB
import cStringIO
import signal
import unittest
from graphwalker import planning
class Thing(tuple):
id = property(lambda self: self[0])
name = property(lambda self: self[1])
outgoing = property(lambda self: self[2])
incoming = property(lambda self: self[3])
src = property(lambda self: self[2])
tgt = property(lambda self: self[3])
weight = property(lambda self: self[4] if len(self) > 4 else None)
class G(object):
def __init__(self, V, E):
self.V, self.E = V, E
del_vert = lambda s, v: v
eulerize = lambda s: s
copy = lambda s: s
def vert_degrees(self):
I = dict((v, 0) for v in self.V)
O = dict(I)
for edge in self.E.values():
O[edge.src] += 1
I[edge.tgt] += 1
return I, O
vert, edge = Thing('aa'), Thing('eeaa')
V, E = {'a': vert}, {'e': edge}
g = G(V, E)
def build_graph(spec):
V = dict((v, Thing((v, v, []))) for v in sorted(set(spec)))
E = dict((f + t, Thing((f + t, f + t, f, t))) for f, t in spec.split())
for edge in sorted(E.values()):
V[edge.src].outgoing.append(edge)
return G(V, E)
class EhmNo(object):
__nonzero__ = lambda s: False
add = lambda s, x: False
start = lambda s, *al: s
class TestPlanner(unittest.TestCase):
def test_ctor_smoke(self):
self.assert_(planning.Planner())
def test_setup_results(self):
p = planning.Planner()
V, E, plan, v = p._setup(g, EhmNo().start(None), 'a', '<ctx>')
self.assert_(V is g.V)
self.assert_(E is g.E)
self.assert_(v is vert)
self.assert_(p.g is g)
self.assert_(plan is p.plan)
self.assert_(v is p.vert)
def test_setup_rng_none(self):
calls = []
class Sub(planning.Planner):
randcls = lambda *al, **kw: calls.append((al, kw))
p = Sub()
self.assertEqual(calls, [((p, None,), {})])
def test_setup_rng_some(self):
calls = []
class Sub(planning.Planner):
randcls = lambda *al, **kw: calls.append((al, kw))
p = Sub(seed='cthulhu')
self.assertEqual(calls, [((p, 'cthulhu',), {})])
def test_forced_plan(self):
g = build_graph('ab bc cd de ef fd')
p = planning.Planner(seed='cthulhu')
p._setup(g, EhmNo(), 'a', '<ctx>')
self.assertEqual(p.plan, [])
p.forced_plan()
self.assertEqual([s[0] for s in p.plan], ['ab', 'b'])
def test_visit_own(self):
p = planning.Planner(seed='cthulhu')
p.stop, p.plan = set(), []
p.visit('moo')
self.assert_('moo' in p.stop)
self.assertEqual(p.plan, ['moo'])
def test_visit_not_own(self):
p = planning.Planner(seed='cthulhu')
p.stop, p.plan, extrap = set(), [], []
p.visit('moo', extrap)
self.assert_('moo' in p.stop)
self.assertEqual(p.plan, [])
self.assertEqual(extrap, ['moo'])
def test_step(self):
visited, plan = [], []
class g:
V = {'to': 'dest'}
class e:
src = 'fm'
tgt = 'to'
class v:
id = 'fm'
p = planning.Planner(seed='cthulhu')
p.stop, p.plan, p.g = set(), [], g
p.visit = lambda thing, plan: visited.append(thing)
result = p.step(v, e, plan)
self.assertEqual(result, 'dest')
self.assertEqual(visited, [e, 'dest'])
self.assertEqual(p.plan, [])
class rng:
def __init__(self, dice=None):
self.calls = []
self.dice = dice
def choice(self, seq):
self.calls.append(('choice', seq))
return seq[self.dice.pop(0) if self.dice else -1]
def uniform(self, a, b):
self.calls.append(('uniform', a, b))
return self.dice.pop(0) if self.dice else a + (b - a) / 2
class TestEvenRandom(unittest.TestCase):
thiscls = planning.EvenRandom
def test_ctor_smoke(self):
self.assert_(self.thiscls())
self.assert_(self.thiscls(12))
self.assert_(self.thiscls(seed=12))
def test_call(self):
g = build_graph('ab bc cb')
p = self.thiscls()
p.rng = rng([-1, -1, -1])
plan = zip(p(g, EhmNo(), 'a', 'context'), '012345')
self.assertEqual(plan, [
(g.E['ab'], '0'), (g.V['b'], '1'),
(g.E['bc'], '2'), (g.V['c'], '3'),
(g.E['cb'], '4'), (g.V['b'], '5'),
])
def test_call_choices(self):
g = build_graph('ab bc cb bb cc')
p = self.thiscls()
p.rng = rng([0, 1, 0, 1, 0])
plan = zip(p(g, EhmNo(), 'a', 'context'), range(10))
l = [
(g.E['ab'], 0), (g.V['b'], 1),
(g.E['bc'], 2), (g.V['c'], 3),
(g.E['cb'], 4), (g.V['b'], 5),
(g.E['bc'], 6), (g.V['c'], 7),
(g.E['cb'], 8), (g.V['b'], 9),
]
self.assertEqual(plan, l)
calls = [
('choice', [g.E['ab']]),
('choice', [g.E['bb'], g.E['bc']]),
('choice', [g.E['cb'], g.E['cc']]),
('choice', [g.E['bb'], g.E['bc']]),
('choice', [g.E['cb'], g.E['cc']]),
('choice', [g.E['bb'], g.E['bc']]),
]
self.assertEqual(p.rng.calls, calls)
class TestRandom(TestEvenRandom):
thiscls = planning.Random
def test_call_weighted_choices(self):
g = build_graph('ab bc cb bb cc')
g.E['bb'] = Thing(('bb', 'bb', 'b', 'b', '25%'))
g.V['b'].outgoing[0] = g.E['bb']
p = self.thiscls()
p.rng = r = rng([0, 0.26, 0, 0.24, 1, 0])
plan = zip(p(g, EhmNo(), 'a', 'context'), range(10))
l = [
(g.E['ab'], 0), (g.V['b'], 1),
(g.E['bc'], 2), (g.V['c'], 3),
(g.E['cb'], 4), (g.V['b'], 5),
(g.E['bb'], 6), (g.V['b'], 7),
(g.E['bc'], 8), (g.V['c'], 9),
]
if plan != l:
for i in range(min(len(plan), len(l))):
print (i, "=!"[plan[i] != l[i]], plan[i], l[i])
self.assertEqual(plan, l)
calls = [
('choice', [g.E['ab']]),
('uniform', 0.0, 1.0),
('choice', [g.E['cb'], g.E['cc']]),
('uniform', 0.0, 1.0),
('uniform', 0.0, 1.0),
('choice', [g.E['cb'], g.E['cc']]),
]
if r.calls != calls:
for i in range(min(len(r.calls), len(calls))):
print (i, "=!"[r.calls[i] != calls[i]], r.calls[i], calls[i])
self.assertEqual(p.rng.calls, calls)
class timeout(object):
@staticmethod
def alrm(sig, frame):
assert False, "Timeout"
def __init__(self, t=1):
self.t = t
def __enter__(self):
signal.signal(signal.SIGALRM, self.alrm)
signal.alarm(self.t)
def __exit__(self, t, v, tb):
signal.alarm(0)
class TestEuler(unittest.TestCase):
def test_ctor_smoke(self):
self.assert_(planning.Euler())
def test_fail_non_euler_a(self):
p = planning.Euler()
g = build_graph('ab bc bd')
p.forced_plan = lambda *al: None
try:
p(g, EhmNo(), 'a', '<context>')
except AssertionError as e:
self.assertEqual(e.args, ("Graph is not Eulerian",))
else:
self.assert_(False, "Expected exception")
def test_fail_non_euler_b(self):
p = planning.Euler()
g = build_graph('ab ba de ed')
p.forced_plan = lambda *al: None
try:
p(g, EhmNo(), 'a', '<context>')
except AssertionError as e:
self.assertEqual(e.args, ("Graph is not connected",))
else:
self.assert_(False, "Expected exception")
def test_early_stop(self):
class Some(EhmNo):
stops = [0, 0, 0, 1]
__nonzero__ = lambda s: s.stops.pop(0)
g = build_graph('ab bc cd de ef fg gh ha')
p = planning.Euler()
p.forced_plan = lambda *al: None
plan = p(g, Some(), 'a', '<context>')
self.assertEqual([x[0] for x in plan], ['ab', 'b', 'bc'])
def test_completes(self):
g = build_graph('ab bc cb ba')
p = planning.Euler()
p.forced_plan = lambda *al: None
with timeout(1):
p(g, EhmNo(), 'a', '<context>')
class TestGoto(unittest.TestCase):
def test_ctor_smoke(self):
self.assert_(planning.Goto())
def test_shortest(self):
g = build_graph('ab ac ad bc dc')
d = {('a', 'b'): (1, 'b'), ('a', 'c'): (1, 'c'), ('a', 'd'): (1, 'd'),
('b', 'c'): (1, 'c'), ('d', 'c'): (1, 'c')}
g.all_pairs_shortest_path = lambda *al: d
g.is_stuck = lambda *al: False
p = planning.Goto('c')
plan = p(g, EhmNo(), 'a', '<context>')
self.assertEqual([x[0] for x in plan], ['ac', 'c'])
def test_each_in_turn(self):
g = build_graph('ab bc cd da')
d = {
('a', 'b'): (1, 'b'),
('a', 'c'): (2, 'bc'),
('a', 'd'): (3, 'bcd'),
('b', 'c'): (1, 'c'),
('b', 'd'): (2, 'cd'),
('b', 'a'): (3, 'cda'),
('c', 'd'): (1, 'd'),
('c', 'a'): (2, 'da'),
('c', 'b'): (3, 'dab'),
('d', 'a'): (1, 'a'),
('d', 'b'): (2, 'ab'),
('d', 'c'): (3, 'abc'),
}
g.all_pairs_shortest_path = lambda *al: d
g.is_stuck = lambda *al: False
p = planning.Goto(*'dcba')
plan = p(g, EhmNo(), 'a', '<context>')
self.assertEqual(
'-'.join(x[0] for x in plan),
'ab-b-bc-c-cd-d-da-a-ab-b-bc-c-cd-d-da-a-ab-b-bc-c-cd-d-da-a')
# a -> d -> c -> b -> a
class TestInteractive(unittest.TestCase):
def test_ctor_smoke(self):
self.assert_(planning.Interactive())
def build(self, result='9\n'):
pi = planning.Interactive()
pi.out = cStringIO.StringIO()
if isinstance(result, BaseException):
def raiser():
raise result
pi.raw_input = raiser
else:
pi.raw_input = result if callable(result) else (lambda: result)
return pi
def test_choose_choice(self):
pi = self.build('9\n')
self.assertEqual(pi.choose(pi, 'abc'), '9\n')
def test_choose_alts(self):
pi = self.build()
alts = ['fleb', 'mefl', 'blof']
pi.choose(pi, alts)
out = pi.out.getvalue()
self.assert_(all(item in out for item in alts),
'All items should be listed before prompt')
def test_choose_sigint(self):
pi = self.build(KeyboardInterrupt())
self.assertEqual(pi.choose(pi, 'abc'), None)
def test_choose_eof(self):
pi = self.build(EOFError())
self.assertEqual(pi.choose(pi, 'abc'), None)
def test_choose_other_exception(self):
l = [5, 1, 0, 0]
pi = self.build(lambda: 1 / l.pop() and '0\n')
self.assertEqual(pi.choose(pi, 'abc'), '0\n')
self.assertEqual(l, [5])
self.assert_('huh?' in pi.out.getvalue())
class TestMasterPlan(unittest.TestCase):
def test_ctor_smoke(self):
self.assert_(planning.MasterPlan([]))
self.assert_(planning.MasterPlan(['meffel']))
def test_inner(self):
calls = []
inner = lambda *al: calls.append(al) or ['step']
p = planning.MasterPlan([inner])
steps = list(p('<g>', '<h>', '<start>', '<ctx>'))
self.assertEqual(steps, ['step'])
self.assertEqual(calls, [('<g>', '<h>', '<start>', '<ctx>')])
def test_inners(self):
calls = []
abe = lambda *al: calls.append(('a', al)) or ['step_a']
ben = lambda *al: calls.append(('b', al)) or ['step_b']
p = planning.MasterPlan([abe, ben])
steps = list(p('<g>', '<h>', '<start>', '<ctx>'))
self.assertEqual(steps, ['step_a', 'step_b'])
self.assertEqual(calls, [('a', ('<g>', '<h>', '<start>', '<ctx>')),
('b', ('<g>', '<h>', 's', '<ctx>'))])
|
bartvanherck/python-graphwalker
|
graphwalker/test/planning_test.py
|
Python
|
apache-2.0
| 12,153
|
[
"VisIt"
] |
cd388e8ef1bc5008edb5bbdb1734078e3953228053566edf2cfad807932a22e9
|
#!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_hardwaresecuritymodulegroup
author: Gaurav Rastogi (@grastogi23) <grastogi@avinetworks.com>
short_description: Module for setup of HardwareSecurityModuleGroup Avi RESTful Object
description:
- This module is used to configure HardwareSecurityModuleGroup object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
hsm:
description:
- Hardware security module configuration.
required: true
name:
description:
- Name of the hsm group configuration object.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the hsm group configuration object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create HardwareSecurityModuleGroup object
avi_hardwaresecuritymodulegroup:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_hardwaresecuritymodulegroup
"""
RETURN = '''
obj:
description: HardwareSecurityModuleGroup (api/hardwaresecuritymodulegroup) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, avi_ansible_api, HAS_AVI)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
hsm=dict(type='dict', required=True),
name=dict(type='str', required=True),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) or requests is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'hardwaresecuritymodulegroup',
set([]))
if __name__ == '__main__':
main()
|
thaim/ansible
|
lib/ansible/modules/network/avi/avi_hardwaresecuritymodulegroup.py
|
Python
|
mit
| 3,663
|
[
"VisIt"
] |
622a2448c86cf66dd469f73397a2b90d25afaae9e5fa46405cb1e7f927c9c692
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Initialization of LFPy, a Python module for simulating
extracellular potentials.
Group of Computational Neuroscience,
Department of Mathematical Sciences and Technology,
Norwegian University of Life Sciences.
Copyright (C) 2012 Computational Neuroscience Group, NMBU.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
:Classes:
* ``Cell`` - object built on top of NEURON representing biological neuron
* ``TemplateCell`` - Similar to ``Cell``, but for models using cell templates
* ``NetworkCell`` - Similar to ``TemplateCell`` with some attributes and
methods for spike communication between parallel RANKs
* ``PointProcess`` - Parent class of ``Synapse`` and ``StimIntElectrode``
* ``Synapse`` - Convenience class for inserting synapses onto ``Cell``
objects
* ``StimIntElectrode`` - Convenience class for inserting stimulating
electrodes into ``Cell`` objects
* ``Network`` - Class for creating distributed populations of cells and
handling connections between cells in populations
* ``NetworkPopulation`` - Class representing group of ``Cell`` objects
distributed across MPI RANKs
* ``RecExtElectrode`` - Class for setup of simulations of extracellular
potentials
* ``RecMEAElectrode`` - Class for setup of simulations of in vitro (slice)
extracellular potentials
* ``PointSourcePotential`` - Base forward-model for extracellular potentials
assuming point current sources in conductive media
* ``LineSourcePotential`` - Base forward-model for extracellular potentials
assuming line current sources in conductive media
* ``OneSphereVolumeConductor`` - For computing extracellular potentials
within and outside a homogeneous sphere
* ``CurrentDipoleMoment`` - For computing the current dipole moment,
* ``FourSphereVolumeConductor`` - For computing extracellular potentials in
four-sphere head model (brain, CSF, skull, scalp)
* ``InfiniteVolumeConductor`` - To compute extracellular potentials with
current dipoles in infinite volume conductor
* ``MEG`` - Class for computing magnetic field from current dipole moment
:Modules:
* ``lfpcalc`` - Misc. functions used by RecExtElectrode class
* ``tools`` - Some convenient functions
* ``inputgenerators`` - Functions for synaptic input time generation
* ``eegmegcalc`` - Classes for calculating current dipole moment vector
P and P_tot from currents and distances.
* ``run_simulations`` - Functions to run NEURON simulations
"""
from .version import version as __version__
from .pointprocess import Synapse, PointProcess, StimIntElectrode
from lfpykit import RecExtElectrode, RecMEAElectrode, CurrentDipoleMoment, \
PointSourcePotential, LineSourcePotential, OneSphereVolumeConductor, \
LaminarCurrentSourceDensity, VolumetricCurrentSourceDensity
from .cell import Cell
from .templatecell import TemplateCell
from .network import NetworkCell, NetworkPopulation, Network
from .test import _test as run_tests
from .eegmegcalc import FourSphereVolumeConductor, InfiniteVolumeConductor, \
MEG, InfiniteHomogeneousVolCondMEG, SphericallySymmetricVolCondMEG, \
NYHeadModel
from lfpykit import lfpcalc
from . import tools
from . import inputgenerators
from . import run_simulation
|
LFPy/LFPy
|
LFPy/__init__.py
|
Python
|
gpl-3.0
| 3,715
|
[
"NEURON"
] |
de8143601748f1719b0076ce0090d5526623c86798ae85b49798138b78a266c5
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: kubevirt_vm
short_description: Manage KubeVirt virtual machine
description:
- Use Openshift Python SDK to manage the state of KubeVirt virtual machines.
version_added: "2.8"
author: KubeVirt Team (@kubevirt)
options:
state:
description:
- Set the virtual machine to either I(present), I(absent), I(running) or I(stopped).
- "I(present) - Create or update a virtual machine. (And run it if it's ephemeral.)"
- "I(absent) - Remove a virtual machine."
- "I(running) - Create or update a virtual machine and run it."
- "I(stopped) - Stop a virtual machine. (This deletes ephemeral VMs.)"
default: "present"
choices:
- present
- absent
- running
- stopped
type: str
name:
description:
- Name of the virtual machine.
required: true
type: str
namespace:
description:
- Namespace where the virtual machine exists.
required: true
type: str
ephemeral:
description:
- If (true) ephemeral vitual machine will be created. When destroyed it won't be accessible again.
- Works only with C(state) I(present) and I(absent).
type: bool
default: false
datavolumes:
description:
- "DataVolumes are a way to automate importing virtual machine disks onto pvcs during the virtual machine's
launch flow. Without using a DataVolume, users have to prepare a pvc with a disk image before assigning
it to a VM or VMI manifest. With a DataVolume, both the pvc creation and import is automated on behalf of the user."
type: list
template:
description:
- "Name of Template to be used in creation of a virtual machine."
type: str
template_parameters:
description:
- "New values of parameters from Template."
type: dict
extends_documentation_fragment:
- k8s_auth_options
- kubevirt_vm_options
- kubevirt_common_options
requirements:
- python >= 2.7
- openshift >= 0.8.2
'''
EXAMPLES = '''
- name: Start virtual machine 'myvm'
kubevirt_vm:
state: running
name: myvm
namespace: vms
- name: Create virtual machine 'myvm' and start it
kubevirt_vm:
state: running
name: myvm
namespace: vms
memory: 64Mi
cpu_cores: 1
bootloader: efi
smbios_uuid: 5d307ca9-b3ef-428c-8861-06e72d69f223
cpu_model: Conroe
headless: true
hugepage_size: 2Mi
tablets:
- bus: virtio
name: tablet1
cpu_limit: 3
cpu_shares: 2
disks:
- name: containerdisk
volume:
containerDisk:
image: kubevirt/cirros-container-disk-demo:latest
path: /custom-disk/cirros.img
disk:
bus: virtio
- name: Create virtual machine 'myvm' with multus network interface
kubevirt_vm:
name: myvm
namespace: vms
memory: 512M
interfaces:
- name: default
bridge: {}
network:
pod: {}
- name: mynet
bridge: {}
network:
multus:
networkName: mynetconf
- name: Combine inline definition with Ansible parameters
kubevirt_vm:
# Kubernetes specification:
definition:
metadata:
labels:
app: galaxy
service: web
origin: vmware
# Ansible parameters:
state: running
name: myvm
namespace: vms
memory: 64M
disks:
- name: containerdisk
volume:
containerDisk:
image: kubevirt/cirros-container-disk-demo:latest
path: /custom-disk/cirros.img
disk:
bus: virtio
- name: Start ephemeral virtual machine 'myvm' and wait to be running
kubevirt_vm:
ephemeral: true
state: running
wait: true
wait_timeout: 180
name: myvm
namespace: vms
memory: 64M
labels:
kubevirt.io/vm: myvm
disks:
- name: containerdisk
volume:
containerDisk:
image: kubevirt/cirros-container-disk-demo:latest
path: /custom-disk/cirros.img
disk:
bus: virtio
- name: Start fedora vm with cloud init
kubevirt_vm:
state: running
wait: true
name: myvm
namespace: vms
memory: 1024M
cloud_init_nocloud:
userData: |-
#cloud-config
password: fedora
chpasswd: { expire: False }
disks:
- name: containerdisk
volume:
containerDisk:
image: kubevirt/fedora-cloud-container-disk-demo:latest
path: /disk/fedora.qcow2
disk:
bus: virtio
- name: Create virtual machine with datavolume
kubevirt_vm:
name: myvm
namespace: default
memory: 1024Mi
datavolumes:
- name: mydv
source:
http:
url: https://url/disk.qcow2
pvc:
accessModes:
- ReadWriteOnce
storage: 5Gi
- name: Remove virtual machine 'myvm'
kubevirt_vm:
state: absent
name: myvm
namespace: vms
'''
RETURN = '''
kubevirt_vm:
description:
- The virtual machine dictionary specification returned by the API.
- "This dictionary contains all values returned by the KubeVirt API all options
are described here U(https://kubevirt.io/api-reference/master/definitions.html#_v1_virtualmachine)"
returned: success
type: complex
contains: {}
'''
import copy
import traceback
from ansible.module_utils.k8s.common import AUTH_ARG_SPEC
from ansible.module_utils.kubevirt import (
virtdict,
KubeVirtRawModule,
VM_COMMON_ARG_SPEC,
VM_SPEC_DEF_ARG_SPEC
)
VM_ARG_SPEC = {
'ephemeral': {'type': 'bool', 'default': False},
'state': {
'type': 'str',
'choices': [
'present', 'absent', 'running', 'stopped'
],
'default': 'present'
},
'datavolumes': {'type': 'list'},
'template': {'type': 'str'},
'template_parameters': {'type': 'dict'},
}
# Which params (can) modify 'spec:' contents of a VM:
VM_SPEC_PARAMS = list(VM_SPEC_DEF_ARG_SPEC.keys()) + ['datavolumes', 'template', 'template_parameters']
class KubeVirtVM(KubeVirtRawModule):
@property
def argspec(self):
""" argspec property builder """
argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
argument_spec.update(VM_COMMON_ARG_SPEC)
argument_spec.update(VM_ARG_SPEC)
return argument_spec
@staticmethod
def fix_serialization(obj):
if obj and hasattr(obj, 'to_dict'):
return obj.to_dict()
return obj
def _wait_for_vmi_running(self):
for event in self._kind_resource.watch(namespace=self.namespace, timeout=self.params.get('wait_timeout')):
entity = event['object']
if entity.metadata.name != self.name:
continue
status = entity.get('status', {})
phase = status.get('phase', None)
if phase == 'Running':
return entity
self.fail("Timeout occurred while waiting for virtual machine to start. Maybe try a higher wait_timeout value?")
def _wait_for_vm_state(self, new_state):
if new_state == 'running':
want_created = want_ready = True
else:
want_created = want_ready = False
for event in self._kind_resource.watch(namespace=self.namespace, timeout=self.params.get('wait_timeout')):
entity = event['object']
if entity.metadata.name != self.name:
continue
status = entity.get('status', {})
created = status.get('created', False)
ready = status.get('ready', False)
if (created, ready) == (want_created, want_ready):
return entity
self.fail("Timeout occurred while waiting for virtual machine to achieve '{0}' state. "
"Maybe try a higher wait_timeout value?".format(new_state))
def manage_vm_state(self, new_state, already_changed):
new_running = True if new_state == 'running' else False
changed = False
k8s_obj = {}
if not already_changed:
k8s_obj = self.get_resource(self._kind_resource)
if not k8s_obj:
self.fail("VirtualMachine object disappeared during module operation, aborting.")
if k8s_obj.spec.get('running', False) == new_running:
return False, k8s_obj
newdef = dict(metadata=dict(name=self.name, namespace=self.namespace), spec=dict(running=new_running))
k8s_obj, err = self.patch_resource(self._kind_resource, newdef, k8s_obj,
self.name, self.namespace, merge_type='merge')
if err:
self.fail_json(**err)
else:
changed = True
if self.params.get('wait'):
k8s_obj = self._wait_for_vm_state(new_state)
return changed, k8s_obj
def _process_template_defaults(self, proccess_template, processedtemplate, defaults):
def set_template_default(default_name, default_name_index, definition_spec):
default_value = proccess_template['metadata']['annotations'][default_name]
if default_value:
values = definition_spec[default_name_index]
default_values = [d for d in values if d.get('name') == default_value]
defaults[default_name_index] = default_values
if definition_spec[default_name_index] is None:
definition_spec[default_name_index] = []
definition_spec[default_name_index].extend([d for d in values if d.get('name') != default_value])
devices = processedtemplate['spec']['template']['spec']['domain']['devices']
spec = processedtemplate['spec']['template']['spec']
set_template_default('defaults.template.cnv.io/disk', 'disks', devices)
set_template_default('defaults.template.cnv.io/volume', 'volumes', spec)
set_template_default('defaults.template.cnv.io/nic', 'interfaces', devices)
set_template_default('defaults.template.cnv.io/network', 'networks', spec)
def construct_definition(self, kind, our_state, ephemeral):
definition = virtdict()
processedtemplate = {}
# Construct the API object definition:
defaults = {'disks': [], 'volumes': [], 'interfaces': [], 'networks': []}
vm_template = self.params.get('template')
if vm_template:
# Find the template the VM should be created from:
template_resource = self.client.resources.get(api_version='template.openshift.io/v1', kind='Template', name='templates')
proccess_template = template_resource.get(name=vm_template, namespace=self.params.get('namespace'))
# Set proper template values taken from module option 'template_parameters':
for k, v in self.params.get('template_parameters', {}).items():
for parameter in proccess_template.parameters:
if parameter.name == k:
parameter.value = v
# Proccess the template:
processedtemplates_res = self.client.resources.get(api_version='template.openshift.io/v1', kind='Template', name='processedtemplates')
processedtemplate = processedtemplates_res.create(proccess_template.to_dict()).to_dict()['objects'][0]
# Process defaults of the template:
self._process_template_defaults(proccess_template, processedtemplate, defaults)
if not ephemeral:
definition['spec']['running'] = our_state == 'running'
template = definition if ephemeral else definition['spec']['template']
template['metadata']['labels']['vm.cnv.io/name'] = self.params.get('name')
dummy, definition = self.construct_vm_definition(kind, definition, template, defaults)
return dict(self.merge_dicts(definition, processedtemplate))
def execute_module(self):
# Parse parameters specific to this module:
ephemeral = self.params.get('ephemeral')
k8s_state = our_state = self.params.get('state')
kind = 'VirtualMachineInstance' if ephemeral else 'VirtualMachine'
_used_params = [name for name in self.params if self.params[name] is not None]
# Is 'spec:' getting changed?
vm_spec_change = True if set(VM_SPEC_PARAMS).intersection(_used_params) else False
changed = False
crud_executed = False
method = ''
# Underlying module_utils/k8s/* code knows only of state == present/absent; let's make sure not to confuse it
if ephemeral:
# Ephemerals don't actually support running/stopped; we treat those as aliases for present/absent instead
if our_state == 'running':
self.params['state'] = k8s_state = 'present'
elif our_state == 'stopped':
self.params['state'] = k8s_state = 'absent'
else:
if our_state != 'absent':
self.params['state'] = k8s_state = 'present'
self.client = self.get_api_client()
self._kind_resource = self.find_supported_resource(kind)
k8s_obj = self.get_resource(self._kind_resource)
if not self.check_mode and not vm_spec_change and k8s_state != 'absent' and not k8s_obj:
self.fail("It's impossible to create an empty VM or change state of a non-existent VM.")
# Changes in VM's spec or any changes to VMIs warrant a full CRUD, the latter because
# VMIs don't really have states to manage; they're either present or don't exist
# Also check_mode always warrants a CRUD, as that'll produce a sane result
if vm_spec_change or (ephemeral and vm_spec_change) or k8s_state == 'absent' or self.check_mode:
definition = self.construct_definition(kind, our_state, ephemeral)
result = self.execute_crud(kind, definition)
changed = result['changed']
k8s_obj = result['result']
method = result['method']
crud_executed = True
if ephemeral and self.params.get('wait') and k8s_state == 'present' and not self.check_mode:
# Waiting for k8s_state==absent is handled inside execute_crud()
k8s_obj = self._wait_for_vmi_running()
if not ephemeral and our_state in ['running', 'stopped'] and not self.check_mode:
# State==present/absent doesn't involve any additional VMI state management and is fully
# handled inside execute_crud() (including wait logic)
patched, k8s_obj = self.manage_vm_state(our_state, crud_executed)
changed = changed or patched
if changed:
method = method or 'patch'
# Return from the module:
self.exit_json(**{
'changed': changed,
'kubevirt_vm': self.fix_serialization(k8s_obj),
'method': method
})
def main():
module = KubeVirtVM()
try:
module.execute_module()
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
|
albertomurillo/ansible
|
lib/ansible/modules/cloud/kubevirt/kubevirt_vm.py
|
Python
|
gpl-3.0
| 15,977
|
[
"Galaxy"
] |
262565ff26d635f0dc001f5c784e2e5f993a9b16a929b5a27787653a08ff9dde
|
# Made by disKret, as a part of the
# Official L2J Datapack Project, please visit
# http://forum.l2jdp.com to meet the community behind it, or
# http://l2jdp.com/trac if you need to report a bug.
import sys
from com.l2scoria import Config
from com.l2scoria.gameserver.model.quest import State
from com.l2scoria.gameserver.model.quest import QuestState
from com.l2scoria.gameserver.model.quest.jython import QuestJython as JQuest
qn = "39_RedEyedInvaders"
#NPC
BABENCO = 30334
BATHIS = 30332
#MOBS
M_LIZARDMAN = 20919
M_LIZARDMAN_SCOUT = 20920
M_LIZARDMAN_GUARD = 20921
ARANEID = 20925
#QUEST DROPS
BLACK_BONE_NECKLACE,RED_BONE_NECKLACE,INCENSE_POUCH,GEM_OF_MAILLE = range(7178,7182)
NECKLACE={M_LIZARDMAN_GUARD:[RED_BONE_NECKLACE,100,BLACK_BONE_NECKLACE,"3"],
M_LIZARDMAN:[BLACK_BONE_NECKLACE,100,RED_BONE_NECKLACE,"3"],
M_LIZARDMAN_SCOUT:[BLACK_BONE_NECKLACE,100,RED_BONE_NECKLACE,"3"]
}
DROPLIST={ARANEID:[GEM_OF_MAILLE,30,INCENSE_POUCH,"5"],
M_LIZARDMAN_GUARD:[INCENSE_POUCH,30,GEM_OF_MAILLE,"5"],
M_LIZARDMAN_SCOUT:[INCENSE_POUCH,30,GEM_OF_MAILLE,"5"]
}
#REWARDS
GREEN_COLORED_LURE_HG = 6521
BABY_DUCK_RODE = 6529
FISHING_SHOT_NG = 6535
def drop(partyMember,array) :
item,max,item2,condition = array
st = partyMember.getQuestState(qn)
count = st.getQuestItemsCount(item)
numItems,chance = divmod(100*Config.RATE_QUESTS_REWARD,100)
if st.getRandom(100) < chance :
numItems = numItems + 1
if count+numItems > max :
numItems = max - count
st.giveItems(item,int(numItems))
if st.getQuestItemsCount(item) == max and st.getQuestItemsCount(item2) == max:
st.playSound("ItemSound.quest_middle")
st.set("cond",condition)
else:
st.playSound("ItemSound.quest_itemget")
return
class Quest (JQuest) :
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onEvent (self,event,st) :
htmltext = event
cond = st.getInt("cond")
if st.getState() != COMPLETED :
if event == "30334-1.htm" and cond == 0 :
st.set("cond","1")
st.setState(STARTED)
st.playSound("ItemSound.quest_accept")
elif event == "30332-1.htm" and cond == 1 :
st.set("cond","2")
elif event == "30332-3.htm" :
if st.getQuestItemsCount(BLACK_BONE_NECKLACE) == st.getQuestItemsCount(RED_BONE_NECKLACE) == 100 and cond == 3:
st.takeItems(BLACK_BONE_NECKLACE,100)
st.takeItems(RED_BONE_NECKLACE,100)
st.set("cond","4")
else :
htmltext = "You don't have required items"
elif event == "30332-5.htm" :
if st.getQuestItemsCount(INCENSE_POUCH) == st.getQuestItemsCount(GEM_OF_MAILLE) == 30 and cond == 5 :
st.takeItems(INCENSE_POUCH,30)
st.takeItems(GEM_OF_MAILLE,30)
st.giveItems(GREEN_COLORED_LURE_HG,60)
st.giveItems(BABY_DUCK_RODE,1)
st.giveItems(FISHING_SHOT_NG,500)
st.unset("cond")
st.playSound("ItemSound.quest_finish")
st.setState(COMPLETED)
else :
htmltext = "You don't have required items"
return htmltext
def onTalk (self,npc,player):
htmltext = "<html><body>You are either not carrying out your quest or don't meet the criteria.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
npcId = npc.getNpcId()
id = st.getState()
cond = st.getInt("cond")
if id == COMPLETED :
htmltext = "<html><body>This quest has already been completed.</body></html>"
elif npcId == BABENCO :
if id == CREATED :
if player.getLevel() >= 20 :
htmltext = "30334-0.htm"
else :
st.exitQuest(1)
htmltext = "30334-2.htm"
else :
htmltext = "30334-3.htm"
elif npcId == BATHIS and id == STARTED:
if cond == 1 :
htmltext = "30332-0.htm"
elif st.getQuestItemsCount(BLACK_BONE_NECKLACE) == st.getQuestItemsCount(RED_BONE_NECKLACE) == 100 :
htmltext = "30332-2.htm"
elif st.getQuestItemsCount(INCENSE_POUCH) == st.getQuestItemsCount(GEM_OF_MAILLE) == 30 :
htmltext = "30332-4.htm"
return htmltext
def onKill(self,npc,player,isPet):
npcId = npc.getNpcId()
partyMember = self.getRandomPartyMember(player,"2")
if (partyMember and npcId != ARANEID) :
drop(partyMember,NECKLACE[npcId])
else:
partyMember = self.getRandomPartyMember(player,"4")
if (partyMember and npcId != M_LIZARDMAN) :
drop(partyMember,DROPLIST[npcId])
return
QUEST = Quest(39,qn,"Red Eyed Invaders")
CREATED = State('Start', QUEST)
STARTED = State('Started', QUEST)
COMPLETED = State('Completed', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(BABENCO)
QUEST.addTalkId(BABENCO)
QUEST.addTalkId(BATHIS)
QUEST.addKillId(M_LIZARDMAN)
QUEST.addKillId(M_LIZARDMAN_SCOUT)
QUEST.addKillId(M_LIZARDMAN_GUARD)
QUEST.addKillId(ARANEID)
for item in range(7178,7182) :
STARTED.addQuestDrop(BABENCO,item,1)
|
zenn1989/scoria-interlude
|
L2Jscoria-Game/data/scripts/quests/39_RedEyedInvaders/__init__.py
|
Python
|
gpl-3.0
| 4,946
|
[
"VisIt"
] |
6cf9e05926e588343ee1675f160f476fa23fec0e142b266593838653f3622f3f
|
"""
=============================
Generic SpectralModel wrapper
=============================
.. moduleauthor:: Adam Ginsburg <adam.g.ginsburg@gmail.com>
"""
import numpy as np
from pyspeckit.mpfit import mpfit,mpfitException
from pyspeckit.spectrum.parinfo import ParinfoList,Parinfo
import copy
from astropy import log
import matplotlib.cbook as mpcb
from . import fitter
from . import mpfit_messages
from pyspeckit.specwarnings import warn
import itertools
import operator
from astropy.extern import six
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
except ImportError:
warn("OrderedDict is required for modeling. "
"If you have python <2.7, install the ordereddict module.")
# define the allowed guess types and the order in which they are received
valid_guess_types = ('amplitude', 'center', 'width')
class SpectralModel(fitter.SimpleFitter):
"""
A wrapper class for a spectra model. Includes internal functions to
generate multi-component models, annotations, integrals, and individual
components. The declaration can be complex, since you should name
individual variables, set limits on them, set the units the fit will be
performed in, and set the annotations to be used. Check out some
of the hyperfine codes (hcn, n2hp) for examples.
"""
def __init__(self, modelfunc, npars,
shortvarnames=("A","\\Delta x","\\sigma"),
fitunit=None,
centroid_par=None,
fwhm_func=None,
fwhm_pars=None,
integral_func=None,
use_lmfit=False,
guess_types=('amplitude', 'center', 'width'),
**kwargs):
"""
Spectral Model Initialization
Create a Spectral Model class for data fitting
Parameters
----------
modelfunc : function
the model function to be fitted. Should take an X-axis
(spectroscopic axis) as an input followed by input parameters.
Returns an array with the same shape as the input X-axis
npars : int
number of parameters required by the model
parnames : list (optional)
a list or tuple of the parameter names
parvalues : list (optional)
the initial guesses for the input parameters (defaults to ZEROS)
parlimits : list (optional)
the upper/lower limits for each variable (defaults to ZEROS)
parfixed : list (optional)
Can declare any variables to be fixed (defaults to ZEROS)
parerror : list (optional)
technically an output parameter. Specifying it here will have no
effect. (defaults to ZEROS)
partied : list (optional)
not the past tense of party. Can declare, via text, that
some parameters are tied to each other. Defaults to zeros like the
others, but it's not clear if that's a sensible default
fitunit : str (optional)
convert X-axis to these units before passing to model
parsteps : list (optional)
minimum step size for each paremeter (defaults to ZEROS)
npeaks : list (optional)
default number of peaks to assume when fitting (can be overridden)
shortvarnames : list (optional)
TeX names of the variables to use when annotating
amplitude_types : tuple
A tuple listing the types of the different parameters when guessing.
The valid values are 'amplitude', 'width', and 'center'. These are
handled by parse_3par_guesses, which translate these into input
guess lists for the fitter. For a "standard" 3-parameter Gaussian
fitter, nothing changes, but for other models that have more than
3 parameters, some translation is needed.
Returns
-------
A tuple containing (model best-fit parameters, the model, parameter
errors, chi^2 value)
"""
self.modelfunc = modelfunc
if self.__doc__ is None:
self.__doc__ = modelfunc.__doc__
elif modelfunc.__doc__ is not None:
self.__doc__ += modelfunc.__doc__
self.npars = npars
self.default_npars = npars
self.fitunit = fitunit
# this needs to be set once only
self.shortvarnames = shortvarnames
self.default_parinfo = None
self.default_parinfo, kwargs = self._make_parinfo(**kwargs)
self.parinfo = copy.copy(self.default_parinfo)
self.modelfunc_kwargs = kwargs
self.use_lmfit = use_lmfit
# default name of parameter that represents the profile centroid
self.centroid_par = centroid_par
# FWHM function and parameters
self.fwhm_func = fwhm_func
self.fwhm_pars = fwhm_pars
# analytic integral function
self.integral_func = integral_func
for gt in guess_types:
if not isinstance(gt, float) and not any(g in gt for g in valid_guess_types):
raise ValueError("Guess type must be one of {0} or a float"
.format(valid_guess_types))
self.guess_types = guess_types
def __copy__(self):
# http://stackoverflow.com/questions/1500718/what-is-the-right-way-to-override-the-copy-deepcopy-operations-on-an-object-in-p
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, copy.deepcopy(v, memo))
return result
def __call__(self, *args, **kwargs):
use_lmfit = kwargs.pop('use_lmfit') if 'use_lmfit' in kwargs else self.use_lmfit
if use_lmfit:
return self.lmfitter(*args,**kwargs)
return self.fitter(*args,**kwargs)
@property
def npeaks(self):
return int(self._npeaks)
@npeaks.setter
def npeaks(self, value):
if int(value) != value:
raise ValueError("npeaks must be an integer")
self._npeaks = int(value)
def make_parinfo(self, **kwargs):
return self._make_parinfo(**kwargs)[0]
def _make_parinfo(self, params=None, parnames=None, parvalues=None,
parlimits=None, parlimited=None, parfixed=None,
parerror=None, partied=None, fitunit=None,
parsteps=None, npeaks=1, parinfo=None, names=None,
values=None, limits=None, limited=None, fixed=None,
error=None, tied=None, steps=None, negamp=None,
limitedmin=None, limitedmax=None, minpars=None,
maxpars=None, vheight=False, debug=False, **kwargs):
"""
Generate a `ParinfoList` that matches the inputs
This code is complicated - it can take inputs in a variety of different
forms with different priority. It will return a `ParinfoList` (and
therefore must have values within parameter ranges)
"""
# for backwards compatibility - partied = tied, etc.
locals_dict = locals()
for varname in str.split("parnames,parvalues,parsteps,parlimits,parlimited,parfixed,parerror,partied",","):
shortvarname = varname.replace("par","")
if locals_dict.get(shortvarname) is not None and locals_dict.get(varname) is not None:
raise ValueError("Cannot specify both {0} and {1}".format(varname, shortvarname))
input_pardict = {k: locals_dict.get(k)
for k in str.split("parnames,parvalues,parsteps,parlimits,parlimited,parfixed,parerror,partied",",")}
_tip = {'par'+k: locals_dict.get(k)
for k in str.split("names,values,steps,limits,limited,fixed,error,tied",",")
if locals_dict.get(k)
}
input_pardict.update(_tip)
if params is not None and parvalues is not None:
raise ValueError("parvalues and params both specified; they're redundant so that's not allowed.")
elif params is not None and parvalues is None:
input_pardict['parvalues'] = params
log.debug("Parvalues = {0}, npeaks = {1}".format(input_pardict['parvalues'], npeaks))
# this is used too many damned times to keep referencing a dict.
parnames = input_pardict['parnames']
parlimited = input_pardict['parlimited']
parlimits = input_pardict['parlimits']
parvalues = input_pardict['parvalues']
if parnames is not None:
self.parnames = parnames
elif parnames is None and hasattr(self,'parnames') and self.parnames is not None:
parnames = self.parnames
elif self.default_parinfo is not None and parnames is None:
parnames = [p['parname'] for p in self.default_parinfo]
input_pardict['parnames'] = parnames
assert input_pardict['parnames'] is not None
if limitedmin is not None:
if limitedmax is not None:
parlimited = list(zip(limitedmin,limitedmax))
else:
parlimited = list(zip(limitedmin,(False,)*len(parnames)))
elif limitedmax is not None:
parlimited = list(zip((False,)*len(parnames),limitedmax))
elif self.default_parinfo is not None and parlimited is None:
parlimited = [p['limited'] for p in self.default_parinfo]
input_pardict['parlimited'] = parlimited
if minpars is not None:
if maxpars is not None:
parlimits = list(zip(minpars,maxpars))
else:
parlimits = list(zip(minpars,(False,)*len(parnames)))
elif maxpars is not None:
parlimits = list(zip((False,)*len(parnames),maxpars))
elif limits is not None:
parlimits = limits
elif self.default_parinfo is not None and parlimits is None:
parlimits = [p['limits'] for p in self.default_parinfo]
input_pardict['parlimits'] = parlimits
self.npeaks = int(npeaks)
# the height / parvalue popping needs to be done before the temp_pardict is set in order to make sure
# that the height guess isn't assigned to the amplitude
self.vheight = vheight
if ((vheight and len(self.parinfo) == self.default_npars and
len(parvalues) == self.default_npars + 1)):
# if the right number of parameters are passed, the first is the height
self.parinfo = [{'n':0, 'value':parvalues.pop(0), 'limits':(0,0),
'limited': (False,False), 'fixed':False, 'parname':'HEIGHT',
'error': 0, 'tied':""}]
elif vheight and len(self.parinfo) == self.default_npars and len(parvalues) == self.default_npars:
# if you're one par short, guess zero
self.parinfo = [{
'n':0, 'value': 0, 'limits':(0,0), 'limited': (False,False),
'fixed':False, 'parname':'HEIGHT', 'error': 0, 'tied':""
}]
elif vheight and len(self.parinfo) == self.default_npars+1 and len(parvalues) == self.default_npars+1:
# the right numbers are passed *AND* there is already a height param
self.parinfo = [{
'n':0, 'value':parvalues.pop(0), 'limits':(0,0),
'limited': (False,False), 'fixed': False, 'parname':'HEIGHT',
'error': 0, 'tied':""
}]
#heightparnum = (i for i,s in self.parinfo if 'HEIGHT' in s['parname'])
#for hpn in heightparnum:
# self.parinfo[hpn]['value'] = parvalues[0]
elif vheight:
raise ValueError('VHEIGHT is specified but a case was found that did not allow it to be included.')
else:
self.parinfo = []
log.debug("After VHEIGHT parse len(parinfo): %i vheight: %s" % (len(self.parinfo), vheight))
# this is a clever way to turn the parameter lists into a dict of lists
# clever = hard to read
temp_pardict = OrderedDict([(varname, np.zeros(self.npars*self.npeaks,
dtype='bool'))
if input_pardict.get(varname) is None else
(varname, list(input_pardict.get(varname)))
for varname in str.split("parnames,parvalues,parsteps,parlimits,parlimited,parfixed,parerror,partied",",")])
temp_pardict['parlimits'] = parlimits if parlimits is not None else [(0,0)] * (self.npars*self.npeaks)
temp_pardict['parlimited'] = parlimited if parlimited is not None else [(False,False)] * (self.npars*self.npeaks)
for k,v in temp_pardict.items():
if (self.npars*self.npeaks) / len(v) > 1:
n_components = ((self.npars*self.npeaks) / len(v))
if n_components != int(n_components):
raise ValueError("The number of parameter values is not a "
"multiple of the number of allowed "
"parameters.")
temp_pardict[k] = list(v) * int(n_components)
# generate the parinfo dict
# note that 'tied' must be a blank string (i.e. ""), not False, if it is not set
# parlimited, parfixed, and parlimits are all two-element items (tuples or lists)
self.parinfo += [{'n':ii+self.npars*jj+vheight,
'value':float(temp_pardict['parvalues'][ii+self.npars*jj]),
'step':temp_pardict['parsteps'][ii+self.npars*jj],
'limits':temp_pardict['parlimits'][ii+self.npars*jj],
'limited':temp_pardict['parlimited'][ii+self.npars*jj],
'fixed':temp_pardict['parfixed'][ii+self.npars*jj],
'parname':temp_pardict['parnames'][ii].upper()+"%0i" % int(jj),
'error':float(temp_pardict['parerror'][ii+self.npars*jj]),
'tied':temp_pardict['partied'][ii+self.npars*jj] if temp_pardict['partied'][ii+self.npars*jj] else ""}
for jj in range(self.npeaks)
for ii in range(self.npars) ] # order matters!
log.debug("After Generation step len(parinfo): %i vheight: %s "
"parinfo: %s" % (len(self.parinfo), vheight, self.parinfo))
if debug > True:
import pdb; pdb.set_trace()
# special keyword to specify emission/absorption lines
if negamp is not None:
if negamp:
for p in self.parinfo:
if 'AMP' in p['parname']:
p['limited'] = (p['limited'][0], True)
p['limits'] = (p['limits'][0], 0)
else:
for p in self.parinfo:
if 'AMP' in p['parname']:
p['limited'] = (True, p['limited'][1])
p['limits'] = (0, p['limits'][1])
# This is effectively an override of all that junk above (3/11/2012)
# Much of it is probably unnecessary, but it was easier to do this than
# rewrite the above
self.parinfo = ParinfoList([Parinfo(p) for p in self.parinfo])
# New feature: scaleability
for par in self.parinfo:
if par.parname.lower().strip('0123456789') in ('amplitude','amp'):
par.scaleable = True
log.debug("Parinfo has been set: {0}".format(self.parinfo))
log.debug("kwargs {0} were passed.".format(kwargs))
assert self.parinfo != []
return self.parinfo, kwargs
def n_modelfunc(self, pars=None, debug=False, **kwargs):
"""
Simple wrapper to deal with N independent peaks for a given spectral model
"""
if pars is None:
pars = self.parinfo
elif not isinstance(pars, ParinfoList):
try:
partemp = copy.copy(self.parinfo)
partemp._from_Parameters(pars)
pars = partemp
except AttributeError:
log.log(5, "Reading pars {0} as LMPar failed.".format(pars))
if debug > 1:
import pdb; pdb.set_trace()
if hasattr(pars,'values'):
# important to treat as Dictionary, since lmfit params & parinfo both have .items
parnames,parvals = list(zip(*list(pars.items())))
parnames = [p.lower() for p in parnames]
parvals = [p.value for p in parvals]
else:
parvals = list(pars)
if np.any(np.isnan(parvals)):
raise ValueError("A parameter is NaN. Unless you gave a NaN "
"value directly, this is a bug and should be "
"reported. If you specified a NaN parameter, "
"don't do that.")
log.debug("pars to n_modelfunc: {0}, parvals:{1}".format(pars, parvals))
def L(x):
v = np.zeros(len(x))
if self.vheight:
v += parvals[0]
# use len(pars) instead of self.npeaks because we want this to work
# independent of the current best fit
for jj in range(int((len(parvals)-self.vheight)/self.npars)):
lower_parind = jj*self.npars+self.vheight
upper_parind = (jj+1)*self.npars+self.vheight
v += self.modelfunc(x, *parvals[lower_parind:upper_parind], **kwargs)
return v
return L
def mpfitfun(self,x,y,err=None):
"""
Wrapper function to compute the fit residuals in an mpfit-friendly format
"""
if err is None:
def f(p,fjac=None):
residuals = (y-self.n_modelfunc(p, **self.modelfunc_kwargs)(x))
return [0,residuals]
else:
def f(p,fjac=None):
residuals = (y-self.n_modelfunc(p, **self.modelfunc_kwargs)(x))/err
return [0,residuals]
return f
def lmfitfun(self,x,y,err=None,debug=False):
"""
Wrapper function to compute the fit residuals in an lmfit-friendly format
"""
def f(p):
#pars = [par.value for par in p.values()]
kwargs = {}
kwargs.update(self.modelfunc_kwargs)
log.debug("Pars, kwarg keys: {0},{1}".format(p,list(kwargs.keys())))
if err is None:
return (y-self.n_modelfunc(p,**kwargs)(x))
else:
return (y-self.n_modelfunc(p,**kwargs)(x))/err
return f
def lmfitter(self, xax, data, err=None, parinfo=None, quiet=True, debug=False, **kwargs):
"""
Use lmfit instead of mpfit to do the fitting
Parameters
----------
xax : SpectroscopicAxis
The X-axis of the spectrum
data : ndarray
The data to fit
err : ndarray (optional)
The error on the data. If unspecified, will be uniform unity
parinfo : ParinfoList
The guesses, parameter limits, etc. See
`pyspeckit.spectrum.parinfo` for details
quiet : bool
If false, print out some messages about the fitting
"""
try:
import lmfit
except ImportError as e:
raise ImportError( "Could not import lmfit, try using mpfit instead." )
self.xax = xax # the 'stored' xax is just a link to the original
if hasattr(xax,'convert_to_unit') and self.fitunit is not None:
# some models will depend on the input units. For these, pass in an X-axis in those units
# (gaussian, voigt, lorentz profiles should not depend on units. Ammonia, formaldehyde,
# H-alpha, etc. should)
xax = copy.copy(xax)
xax.convert_to_unit(self.fitunit, quiet=quiet)
elif self.fitunit is not None:
raise TypeError("X axis does not have a convert method")
if np.any(np.isnan(data)) or np.any(np.isinf(data)):
err[np.isnan(data) + np.isinf(data)] = np.inf
data[np.isnan(data) + np.isinf(data)] = 0
if np.any(np.isnan(err)):
raise ValueError("One or more of the error values is NaN."
" This is not allowed. Errors can be infinite "
"(which is equivalent to giving zero weight to "
"a data point), but otherwise they must be positive "
"floats.")
elif np.any(err<0):
raise ValueError("At least one error value is negative, which is "
"not allowed as negative errors are not "
"meaningful in the optimization process.")
if parinfo is None:
parinfo, kwargs = self._make_parinfo(debug=debug, **kwargs)
log.debug("Parinfo created from _make_parinfo: {0}".format(parinfo))
LMParams = parinfo.as_Parameters()
log.debug("LMParams: "+"\n".join([repr(p) for p in list(LMParams.values())]))
log.debug("parinfo: {0}".format(parinfo))
minimizer = lmfit.minimize(self.lmfitfun(xax,np.array(data),err,debug=debug),LMParams,**kwargs)
if not quiet:
log.info("There were %i function evaluations" % (minimizer.nfev))
#modelpars = [p.value for p in parinfo.values()]
#modelerrs = [p.stderr for p in parinfo.values() if p.stderr is not None else 0]
self.LMParams = minimizer.params
self.parinfo._from_Parameters(self.LMParams)
log.debug("LMParams: {0}".format(self.LMParams))
log.debug("parinfo: {0}".format(parinfo))
self.mp = minimizer
self.mpp = self.parinfo.values
self.mpperr = self.parinfo.errors
self.mppnames = self.parinfo.names
modelkwargs = {}
modelkwargs.update(self.modelfunc_kwargs)
self.model = self.n_modelfunc(self.parinfo, **modelkwargs)(xax)
if hasattr(minimizer,'chisqr'):
chi2 = minimizer.chisqr
else:
try:
chi2 = (((data-self.model)/err)**2).sum()
except TypeError:
chi2 = ((data-self.model)**2).sum()
if np.isnan(chi2):
warn( "Warning: chi^2 is nan" )
if hasattr(self.mp,'ier') and self.mp.ier not in [1,2,3,4]:
log.warning("Fitter failed: %s, %s" % (self.mp.message, self.mp.lmdif_message))
return self.mpp,self.model,self.mpperr,chi2
def fitter(self, xax, data, err=None, quiet=True, veryverbose=False,
debug=False, parinfo=None, **kwargs):
"""
Run the fitter using mpfit.
kwargs will be passed to _make_parinfo and mpfit.
Parameters
----------
xax : SpectroscopicAxis
The X-axis of the spectrum
data : ndarray
The data to fit
err : ndarray (optional)
The error on the data. If unspecified, will be uniform unity
parinfo : ParinfoList
The guesses, parameter limits, etc. See
`pyspeckit.spectrum.parinfo` for details
quiet : bool
pass to mpfit. If False, will print out the parameter values for
each iteration of the fitter
veryverbose : bool
print out a variety of mpfit output parameters
debug : bool
raise an exception (rather than a warning) if chi^2 is nan
"""
if parinfo is None:
parinfo, kwargs = self._make_parinfo(debug=debug, **kwargs)
else:
log.debug("Using user-specified parinfo dict")
# clean out disallowed kwargs (don't want to pass them to mpfit)
#throwaway, kwargs = self._make_parinfo(debug=debug, **kwargs)
self.xax = xax # the 'stored' xax is just a link to the original
if hasattr(xax,'as_unit') and self.fitunit is not None:
# some models will depend on the input units. For these, pass in an X-axis in those units
# (gaussian, voigt, lorentz profiles should not depend on units. Ammonia, formaldehyde,
# H-alpha, etc. should)
xax = copy.copy(xax)
# xax.convert_to_unit(self.fitunit, quiet=quiet)
xax = xax.as_unit(self.fitunit, quiet=quiet, **kwargs)
elif self.fitunit is not None:
raise TypeError("X axis does not have a convert method")
if np.any(np.isnan(data)) or np.any(np.isinf(data)):
err[np.isnan(data) + np.isinf(data)] = np.inf
data[np.isnan(data) + np.isinf(data)] = 0
if np.any(np.isnan(err)):
raise ValueError("One or more of the error values is NaN."
" This is not allowed. Errors can be infinite "
"(which is equivalent to giving zero weight to "
"a data point), but otherwise they must be positive "
"floats.")
elif np.any(err<0):
raise ValueError("At least one error value is negative, which is "
"not allowed as negative errors are not "
"meaningful in the optimization process.")
for p in parinfo: log.debug( p )
log.debug( "\n".join(["%s %i: tied: %s value: %s" % (p['parname'],p['n'],p['tied'],p['value']) for p in parinfo]) )
mp = mpfit(self.mpfitfun(xax,data,err),parinfo=parinfo,quiet=quiet,debug=debug,**kwargs)
mpp = mp.params
if mp.perror is not None: mpperr = mp.perror
else: mpperr = mpp*0
chi2 = mp.fnorm
if mp.status == 0:
if "parameters are not within PARINFO limits" in mp.errmsg:
log.warning( parinfo )
raise mpfitException(mp.errmsg)
for i,(p,e) in enumerate(zip(mpp,mpperr)):
self.parinfo[i]['value'] = p
self.parinfo[i]['error'] = e
if veryverbose:
log.info("Fit status: {0}".format(mp.status))
log.info("Fit error message: {0}".format(mp.errmsg))
log.info("Fit message: {0}".format(mpfit_messages[mp.status]))
for i,p in enumerate(mpp):
log.info("{0}: {1} +/- {2}".format(self.parinfo[i]['parname'],
p,mpperr[i]))
log.info("Chi2: {0} Reduced Chi2: {1} DOF:{2}".format(mp.fnorm,
mp.fnorm/(len(data)-len(mpp)),
len(data)-len(mpp)))
self.mp = mp
self.mpp = self.parinfo.values
self.mpperr = self.parinfo.errors
self.mppnames = self.parinfo.names
self.model = self.n_modelfunc(self.parinfo,**self.modelfunc_kwargs)(xax)
log.debug("Modelpars: {0}".format(self.mpp))
if np.isnan(chi2):
if debug:
raise ValueError("Error: chi^2 is nan")
else:
log.warning("Warning: chi^2 is nan")
return mpp,self.model,mpperr,chi2
def slope(self, xinp):
"""
Find the local slope of the model at location x
(x must be in xax's units)
"""
if hasattr(self, 'model'):
dm = np.diff(self.model)
# convert requested x to pixels
xpix = self.xax.x_to_pix(xinp)
dmx = np.average(dm[xpix-1:xpix+1])
if np.isfinite(dmx):
return dmx
else:
return 0
def annotations(self, shortvarnames=None, debug=False):
"""
Return a list of TeX-formatted labels
The values and errors are formatted so that only the significant digits
are displayed. Rounding is performed using the decimal package.
Parameters
----------
shortvarnames : list
A list of variable names (tex is allowed) to include in the
annotations. Defaults to self.shortvarnames
Examples
--------
>>> # Annotate a Gaussian
>>> sp.specfit.annotate(shortvarnames=['A','\\Delta x','\\sigma'])
"""
from decimal import Decimal # for formatting
svn = self.shortvarnames if shortvarnames is None else shortvarnames
# if pars need to be replicated....
if len(svn) < self.npeaks*self.npars:
svn = svn * self.npeaks
parvals = self.parinfo.values
parerrs = self.parinfo.errors
loop_list = [(parvals[ii+jj*self.npars+self.vheight],
parerrs[ii+jj*self.npars+self.vheight],
svn[ii+jj*self.npars],
self.parinfo.fixed[ii+jj*self.npars+self.vheight],
jj)
for jj in range(self.npeaks) for ii in range(self.npars)]
label_list = []
for (value, error, varname, fixed, varnumber) in loop_list:
log.debug(", ".join([str(x) for x in (value, error, varname, fixed, varnumber)]))
if fixed or error==0:
label = ("$%s(%i)$=%8s" % (varname,varnumber,
Decimal("%g" % value).quantize( Decimal("%0.6g" % (value)) )))
else:
label = ("$%s(%i)$=%8s $\\pm$ %8s" % (varname,varnumber,
Decimal("%g" % value).quantize( Decimal("%0.2g" % (min(np.abs([value,error])))) ),
Decimal("%g" % error).quantize(Decimal("%0.2g" % (error))),))
label_list.append(label)
labels = tuple(mpcb.flatten(label_list))
return labels
def components(self, xarr, pars, **kwargs):
"""
Return a numpy ndarray of shape [npeaks x modelshape] of the
independent components of the fits
"""
modelcomponents = np.array(
[self.modelfunc(xarr,
*pars[i*self.npars:(i+1)*self.npars],
**dict(list(self.modelfunc_kwargs.items())+list(kwargs.items())))
for i in range(self.npeaks)])
if len(modelcomponents.shape) == 3:
newshape = [modelcomponents.shape[0]*modelcomponents.shape[1], modelcomponents.shape[2]]
modelcomponents = np.reshape(modelcomponents, newshape)
return modelcomponents
def integral(self, modelpars, dx=None, **kwargs):
"""
Extremely simple integrator:
IGNORES modelpars;
just sums self.model
"""
if not hasattr(self,'model'):
raise ValueError("Must fit (or compute) a model before computing"
" its integral.")
if dx is not None:
return (self.model*dx).sum()
else:
return self.model.sum()
def analytic_integral(self, modelpars=None, npeaks=None, npars=None):
"""
Placeholder for analyic integrals; these must be defined for individual models
"""
if self.integral_func is None:
raise NotImplementedError("Analytic integrals must be implemented independently for each model type")
# all of these parameters are allowed to be overwritten
if modelpars is None:
modelpars = self.parinfo.values
if npeaks is None:
npeaks = self.npeaks
if npars is None:
npars = self.npars
return np.sum([
self.integral_func(modelpars[npars*ii:npars*(1+ii)])
for ii in range(npeaks)])
def component_integrals(self, xarr, dx=None):
"""
Compute the integrals of each component
"""
components = self.components(xarr, self.parinfo.values)
if dx is None:
dx = 1
integrals = [com.sum()*dx for com in components]
return integrals
def analytic_fwhm(self, parinfo=None):
"""
Return the FWHMa of the model components *if* a fwhm_func has been
defined
Done with incomprehensible list comprehensions instead of nested for
loops... readability sacrificed for speed and simplicity. This is
unpythonic.
"""
if self.fwhm_func is None and self.fwhm_pars is None:
raise TypeError("fwhm_func not implemented for model %s" % self.__name__)
if parinfo is None:
parinfo = self.parinfo
fwhm = [self.fwhm_func(
*[self.parinfo[str.upper(p+'%i' % n)] for p in self.fwhm_pars]
)
for n in range(self.npeaks)]
return fwhm
def analytic_centroids(self, centroidpar=None):
"""
Return the *analytic* centroids of the model components
Parameters
----------
centroidpar : None or string
The name of the parameter in the fit that represents the centroid
*some models have default centroid parameters - these will be used
if centroidpar is unspecified*
Returns
-------
List of the centroid values (even if there's only 1)
"""
if centroidpar is None:
centroidpar = self.centroid_par
centr = [par.value
for par in self.parinfo
if str.upper(centroidpar) in par.parname]
return centr
def computed_centroid(self, xarr=None):
"""
Return the *computed* centroid of the model
Parameters
----------
xarr : None or np.ndarray
The X coordinates of the model over which the centroid should be
computed. If unspecified, the centroid will be in pixel units
"""
if not hasattr(self, 'model'):
raise ValueError("Must fit (or compute) a model before measuring "
"its centroid")
if xarr is None:
xarr = np.arange(self.model.size)
centr = (self.model*xarr).sum() / self.model.sum()
return centr
def logp(self, xarr, data, error, pars=None):
"""
Return the log probability of the model. If the parameter is out of
range, return -inf
"""
if pars is None:
pars = self.parinfo
else:
parinfo = copy.copy(self.parinfo)
for value,parameter in zip(pars,parinfo):
try:
parameter.value = value
except ValueError:
return -np.inf
model = self.n_modelfunc(pars, **self.modelfunc_kwargs)(xarr)
difference = np.abs(data-model)
# prob = 1/(2*np.pi)**0.5/error * exp(-difference**2/(2.*error**2))
#logprob = np.log(1./(2.*np.pi)**0.5/error) * (-difference**2/(2.*error**2))
logprob = (-difference**2/(2.*error**2))
totallogprob = np.sum(logprob)
return totallogprob
def get_emcee_sampler(self, xarr, data, error, **kwargs):
"""
Get an emcee walker for the data & model
Parameters
----------
xarr : pyspeckit.units.SpectroscopicAxis
data : np.ndarray
error : np.ndarray
Examples
--------
>>> import pyspeckit
>>> x = pyspeckit.units.SpectroscopicAxis(np.linspace(-10,10,50), unit='km/s')
>>> e = np.random.randn(50)
>>> d = np.exp(-np.asarray(x)**2/2.)*5 + e
>>> sp = pyspeckit.Spectrum(data=d, xarr=x, error=np.ones(50)*e.std())
>>> sp.specfit(fittype='gaussian')
>>> emcee_sampler = sp.specfit.fitter.get_emcee_sampler(sp.xarr, sp.data, sp.error)
>>> p0 = sp.specfit.parinfo
>>> emcee_sampler.run_mcmc(p0,100)
"""
try:
import emcee
except ImportError:
return
def probfunc(pars):
return self.logp(xarr, data, error, pars=pars)
raise NotImplementedError("emcee's metropolis-hastings sampler is not implemented; use pymc")
sampler = emcee.MHSampler(self.npars*self.npeaks+self.vheight, probfunc, **kwargs)
return sampler
def get_emcee_ensemblesampler(self, xarr, data, error, nwalkers, **kwargs):
"""
Get an emcee walker ensemble for the data & model
Parameters
----------
data : np.ndarray
error : np.ndarray
nwalkers : int
Number of walkers to use
Examples
--------
>>> import pyspeckit
>>> x = pyspeckit.units.SpectroscopicAxis(np.linspace(-10,10,50), unit='km/s')
>>> e = np.random.randn(50)
>>> d = np.exp(-np.asarray(x)**2/2.)*5 + e
>>> sp = pyspeckit.Spectrum(data=d, xarr=x, error=np.ones(50)*e.std())
>>> sp.specfit(fittype='gaussian')
>>> nwalkers = sp.specfit.fitter.npars * 2
>>> emcee_ensemble = sp.specfit.fitter.get_emcee_ensemblesampler(sp.xarr, sp.data, sp.error, nwalkers)
>>> p0 = np.array([sp.specfit.parinfo.values] * nwalkers)
>>> p0 *= np.random.randn(*p0.shape) / 10. + 1.0
>>> pos,logprob,state = emcee_ensemble.run_mcmc(p0,100)
"""
try:
import emcee
except ImportError:
return
def probfunc(pars):
return self.logp(xarr, data, error, pars=pars)
sampler = emcee.EnsembleSampler(nwalkers,
self.npars*self.npeaks+self.vheight,
probfunc, **kwargs)
return sampler
def get_pymc(self, xarr, data, error, use_fitted_values=False, inf=np.inf,
use_adaptive=False, return_dict=False, **kwargs):
"""
Create a pymc MCMC sampler. Defaults to 'uninformative' priors
Parameters
----------
data : np.ndarray
error : np.ndarray
use_fitted_values : bool
Each parameter with a measured error will have a prior defined by
the Normal distribution with sigma = par.error and mean = par.value
use_adaptive : bool
Use the Adaptive Metropolis-Hastings sampler?
Examples
--------
>>> x = pyspeckit.units.SpectroscopicAxis(np.linspace(-10,10,50), unit='km/s')
>>> e = np.random.randn(50)
>>> d = np.exp(-np.asarray(x)**2/2.)*5 + e
>>> sp = pyspeckit.Spectrum(data=d, xarr=x, error=np.ones(50)*e.std())
>>> sp.specfit(fittype='gaussian')
>>> MCuninformed = sp.specfit.fitter.get_pymc(sp.xarr, sp.data, sp.error)
>>> MCwithpriors = sp.specfit.fitter.get_pymc(sp.xarr, sp.data, sp.error, use_fitted_values=True)
>>> MCuninformed.sample(1000)
>>> MCuninformed.stats()['AMPLITUDE0']
>>> # WARNING: This will fail because width cannot be set <0, but it may randomly reach that...
>>> # How do you define a likelihood distribution with a lower limit?!
>>> MCwithpriors.sample(1000)
>>> MCwithpriors.stats()['AMPLITUDE0']
"""
old_errsettings = np.geterr()
try:
import pymc
finally:
# pymc breaks error settings
np.seterr(**old_errsettings)
#def lowerlimit_like(x,lolim):
# "lower limit (log likelihood - set very positive for unacceptable values)"
# return (x>=lolim) / 1e10
#def upperlimit_like(x,uplim):
# "upper limit"
# return (x<=uplim) / 1e10
#LoLim = pymc.distributions.stochastic_from_dist('lolim', logp=lowerlimit_like, dtype=np.float, mv=False)
#UpLim = pymc.distributions.stochastic_from_dist('uplim', logp=upperlimit_like, dtype=np.float, mv=False)
funcdict = {}
# very, very worrisome: pymc changes the values of parinfo
parcopy = copy.deepcopy(self.parinfo)
for par in parcopy:
lolim = par.limits[0] if par.limited[0] else -inf
uplim = par.limits[1] if par.limited[1] else inf
if par.fixed:
funcdict[par.parname] = pymc.distributions.Uniform(par.parname, par.value, par.value, value=par.value)
elif use_fitted_values:
if par.error > 0:
if any(par.limited):
try:
funcdict[par.parname] = pymc.distributions.TruncatedNormal(par.parname, par.value, 1./par.error**2, lolim, uplim)
except AttributeError:
# old versions used this?
funcdict[par.parname] = pymc.distributions.TruncNorm(par.parname, par.value, 1./par.error**2, lolim, uplim)
else:
funcdict[par.parname] = pymc.distributions.Normal(par.parname, par.value, 1./par.error**2)
else:
if any(par.limited):
funcdict[par.parname] = pymc.distributions.Uniform(par.parname, lolim, uplim, value=par.value)
else:
funcdict[par.parname] = pymc.distributions.Uninformative(par.parname, value=par.value)
elif any(par.limited):
lolim = par.limits[0] if par.limited[0] else -1e10
uplim = par.limits[1] if par.limited[1] else 1e10
funcdict[par.parname] = pymc.distributions.Uniform(par.parname, lower=lolim, upper=uplim, value=par.value)
else:
funcdict[par.parname] = pymc.distributions.Uninformative(par.parname, value=par.value)
d = dict(funcdict)
def modelfunc(xarr, pars=parcopy, **kwargs):
for k,v in kwargs.items():
if k in list(pars.keys()):
pars[k].value = v
return self.n_modelfunc(pars, **self.modelfunc_kwargs)(xarr)
funcdict['xarr'] = xarr
funcdet=pymc.Deterministic(name='f',eval=modelfunc,parents=funcdict,doc="The model function")
d['f'] = funcdet
datamodel = pymc.distributions.Normal('data', mu=funcdet,
tau=1/np.asarray(error)**2,
observed=True,
value=np.asarray(data))
d['data']=datamodel
if return_dict:
return d
mc = pymc.MCMC(d)
if use_adaptive:
mc.use_step_method(pymc.AdaptiveMetropolis,[d[p] for p in self.parinfo.names])
return mc
def parse_3par_guesses(self, guesses):
"""
Try to convert a set of interactive guesses (peak, center, width) into
guesses appropriate to the model.
"""
if len(guesses) % 3 != 0:
raise ValueError("Guesses passed to parse_3par_guesses must have "
"length % 3 == 0")
npeaks_guessed = len(guesses) // 3
gtypes = [parse_offset_guess(gtype, gval)[0]
for gtype, gval in zip(itertools.cycle(self.guess_types),
[0]*len(self.guess_types))]
guess_dict = {(valid_guess_types[ii % 3], ii // 3): gval
for ii, gval in enumerate(guesses)}
new_guesses = [guess_dict[(gtype, ii)]
if isinstance(gtype, str)
else gtype
for ii in range(npeaks_guessed)
for gtype in gtypes
]
new_guesses = [parse_offset_guess(gtype, gval)[1]
for gtype, gval in zip(itertools.cycle(self.guess_types),
new_guesses)]
assert len(new_guesses) % len(self.guess_types) == 0
return new_guesses
class AstropyModel(SpectralModel):
def __init__(self, model, shortvarnames=None, **kwargs):
"""
Override the SpectralModel initialization
"""
if hasattr(self,__doc__): # how do you extend a docstring really?
self.__doc__ += SpectralModel.__doc__
if shortvarnames is None:
shortvarnames = model.param_names
super(AstropyModel,self).__init__(model, len(model.parameters),
shortvarnames=shortvarnames,
model=model, **kwargs)
self.mp = None
self.vheight = False
self.npeaks = 1
def _make_parinfo(self, model=None):
self.parinfo = ParinfoList([
Parinfo(parname=name,value=value)
for name,value in zip(model.param_names,model.parameters)])
return self.parinfo, {}
def _parse_parinfo(self, parinfo):
"""
Parse a ParinfoList into astropy.models parameters
"""
if len(parinfo) > self.npars:
if len(parinfo) % self.npars != 0:
raise ValueError("Need to have an integer number of models")
else:
self.modelfunc.param_names = parinfo.names
self.modelfunc.parameters = parinfo.values
else:
self.modelfunc.param_names = parinfo.names
self.modelfunc.parameters = parinfo.values
def fitter(self, xax, data, err=None, quiet=True, veryverbose=False,
debug=False, parinfo=None, params=None, npeaks=None, **kwargs):
import astropy.models as models
if npeaks is not None and npeaks > 1:
raise NotImplementedError("Astropy models cannot be used to fit multiple peaks yet")
if parinfo is not None:
self._parse_parinfo(parinfo)
if params is not None:
self.modelfunc.parameters = params
self.astropy_fitter = models.fitting.NonLinearLSQFitter(self.modelfunc)
if err is None:
self.astropy_fitter(xax, data, **kwargs)
else:
self.astropy_fitter(xax, data, weights=1./err**2, **kwargs)
mpp = self.astropy_fitter.fitpars
cov = self.astropy_fitter.covar
if cov is None:
mpperr = np.zeros(len(mpp))
else:
mpperr = cov.diagonal()
self.model = self.astropy_fitter.model(xax)
if err is None:
chi2 = ((data-self.model)**2).sum()
else:
chi2 = ((data-self.model)**2/err**2).sum()
# update object paramters
self.modelfunc.parameters = mpp
self._make_parinfo(self.modelfunc)
return mpp,self.model,mpperr,chi2
def n_modelfunc(self, pars=None, debug=False, **kwargs):
"""
Only deals with single-peak functions
"""
try:
self._parse_parinfo(pars)
except AttributeError:
self.modelfunc.parameters = pars
return self.modelfunc
def parse_offset_guess(gname, gval):
"""
Utility function for handling guesses. Allows guess types to be specified
as 'amplitude*2' or 'width+3'.
"""
operators = '+-*/'
if not isinstance(gname, six.string_types):
return gname, gval
ops = [x for x in operators if x in gname]
if len(ops)>1:
raise ValueError("Invalid offset guess")
elif len(ops) == 0:
return gname,gval
else:
opmap = {"+": operator.add,
"-": operator.sub,
"*": operator.mul,
"/": operator.truediv,
}
op = ops[0]
pars = gname.split(op)
gname = [p for p in gname.split(op) if p in valid_guess_types][0]
pars = [gval if p in valid_guess_types else float(p)
for p in pars]
gval = opmap[op](*pars)
return gname, gval
|
vlas-sokolov/pyspeckit
|
pyspeckit/spectrum/models/model.py
|
Python
|
mit
| 47,916
|
[
"Gaussian"
] |
99808187106775c55e63e5d62528d02c96257d846fe436a60f170022be5e07c6
|
# -*- coding: utf-8 -*-
"""
End-to-end tests for the CCX dashboard.
"""
from nose.plugins.attrib import attr
from common.test.acceptance.fixtures.course import CourseFixture
from common.test.acceptance.tests.helpers import UniqueCourseTest, EventsTestMixin
from common.test.acceptance.pages.lms.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.ccx_dashboard_page import CoachDashboardPage
@attr('shard_7')
class CreateCCXCoachTest(EventsTestMixin, UniqueCourseTest):
"""
Test ccx end to end process.
"""
USERNAME = "coach_tester"
EMAIL = "coach_tester@example.com"
def setUp(self):
super(CreateCCXCoachTest, self).setUp()
self.course_info.update({"settings": {"enable_ccx": "true"}})
self.course_fixture = CourseFixture(**self.course_info).install()
self.coach_dashboard_page = CoachDashboardPage(self.browser, self.course_id)
def _auto_auth(self, username, email):
"""
Logout and login with given credentials.
"""
AutoAuthPage(self.browser, username=username, email=email,
course_id=self.course_id, staff=True).visit()
def test_create_ccx(self):
"""
Assert that ccx created.
"""
ccx_name = "Test ccx"
self._auto_auth(self.USERNAME, self.EMAIL)
self.coach_dashboard_page.visit()
self.coach_dashboard_page.fill_ccx_name_text_box(ccx_name)
self.coach_dashboard_page.wait_for_page()
# Assert that new ccx is created and we are on ccx dashboard/enrollment tab.
self.assertTrue(self.coach_dashboard_page.is_browser_on_enrollment_page())
|
ampax/edx-platform
|
common/test/acceptance/tests/lms/test_ccx.py
|
Python
|
agpl-3.0
| 1,656
|
[
"VisIt"
] |
25d8f8e7e0c7d9a050aef7590386fd9302dee6363aaada1c2a12e0892fee2047
|
#!/usr/bin/env python
## /*=========================================================================
## Program: Visualization Toolkit
## Module: HeaderTesting.py
## Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
## All rights reserved.
## See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notice for more information.
## =========================================================================*/
## .NAME HeaderTesting - a VTK style and validity checking utility
## .SECTION Description
## HeaderTesting is a script which checks the list of header files for
## validity based on VTK coding standard. It checks for proper super
## classes, number and style of include files, type macro, private
## copy constructor and assignment operator, broken constructors, and
## exsistence of PrintSelf method. This script should be run as a part
## of the dashboard checking of the Visualization Toolkit and related
## projects.
## .SECTION See Also
## http://www.vtk.org http://public.kitware.com/Dart/HTML/Index.shtml
## http://www.vtk.org/contribute.php#coding-standards
import sys
import re
import os
import stat
# Get the path to the directory containing this script.
if __name__ == '__main__':
selfpath = os.path.abspath(sys.path[0] or os.curdir)
else:
selfpath = os.path.abspath(os.path.dirname(__file__))
# Load the list of names mangled by windows.h.
exec(compile(open(os.path.join(selfpath, 'WindowsMangleList.py')).read(),
os.path.join(selfpath, 'WindowsMangleList.py'), 'exec'))
## If tested from dart, make sure to fix all the output strings
test_from_dart = 0
if "DART_TEST_FROM_DART" in os.environ:
test_from_dart = 1
## For backward compatibility
def StringEndsWith(str1, str2):
l1 = len(str1)
l2 = len(str2)
if l1 < l2:
return 0
return (str1[(l1-l2):] == str2)
##
class TestVTKFiles:
def __init__(self):
self.FileName = ""
self.ErrorValue = 0;
self.Errors = {}
self.WarningValue = 0;
self.Warnings = {}
self.FileLines = []
self.Export = ""
self.UnnecessaryIncludes = [
"stdio.h",
"stdlib.h",
"string.h",
"iostream",
"iostream.h",
"strstream",
"strstream.h",
"fstream",
"fstream.h",
"windows.h"
]
pass
def SetExport(self, export):
self.Export = export
def Print(self, text=""):
rtext = text
if test_from_dart:
rtext = rtext.replace("<", "<")
rtext = rtext.replace(">", ">")
print(rtext)
def Error(self, error):
self.ErrorValue = 1
self.Errors[error] = 1
pass
def Warning(self, warning):
self.WarningValue = 1
self.Warnings[warning] = 1
pass
def PrintErrors(self):
if self.ErrorValue:
self.Print( )
self.Print( "There were errors:" )
for a in self.Errors:
self.Print( "* %s" % a )
def PrintWarnings(self):
if self.WarningValue:
self.Print( )
self.Print( "There were warnings:" )
for a in self.Warnings:
self.Print( "* %s" % a )
def TestFile(self, filename):
self.FileName = filename
self.FileLines = []
self.ClassName = ""
self.ParentName = ""
try:
if sys.hexversion >= 0x03000000:
file = open(filename, encoding='ascii', errors='ignore')
else:
file = open(filename)
self.FileLines = file.readlines()
file.close()
except:
self.Print("Problem reading file %s:\n%s" %
(filename, str(sys.exc_info()[1])))
sys.exit(1)
return not self.CheckExclude()
def CheckExclude(self):
prefix = '// VTK-HeaderTest-Exclude:'
exclude = 0
for l in self.FileLines:
if l.startswith(prefix):
e = l[len(prefix):].strip()
if e == os.path.basename(self.FileName):
exclude += 1
else:
self.Error("Wrong exclusion: "+l.rstrip())
if exclude > 1:
self.Error("Multiple VTK-HeaderTest-Exclude lines")
return exclude > 0
def CheckIncludes(self):
count = 0
lines = []
nplines = []
unlines = []
includere = "^\s*#\s*include\s*[\"<]([^>\"]+)"
ignincludere = ".*\/\/.*"
regx = re.compile(includere)
regx1 = re.compile(ignincludere)
cc = 0
includeparent = 0
for a in self.FileLines:
line = a.strip()
rm = regx.match(line)
if rm and not regx1.match(line):
lines.append(" %4d: %s" % (cc, line))
file = rm.group(1)
if file == (self.ParentName + ".h"):
includeparent = 1
if not StringEndsWith(file, ".h"):
nplines.append(" %4d: %s" % (cc, line))
if file in self.UnnecessaryIncludes:
unlines.append(" %4d: %s" % (cc, line))
cc = cc + 1
if len(lines) > 1:
self.Print()
self.Print( "File: %s has %d includes: " %
( self.FileName, len(lines)) )
for a in lines:
self.Print( a )
self.Error("Multiple includes")
if len(nplines) > 0:
self.Print( )
self.Print( "File: %s has non-portable include(s): " % self.FileName )
for a in nplines:
self.Print( a )
self.Error("Non-portable includes")
if len(unlines) > 0:
self.Print( )
self.Print( "File: %s has unnecessary include(s): " % self.FileName )
for a in unlines:
self.Print( a )
self.Error("Unnecessary includes")
if not includeparent and self.ParentName:
self.Print()
self.Print( "File: %s does not include parent \"%s.h\"" %
( self.FileName, self.ParentName ) )
self.Error("Does not include parent")
pass
def CheckParent(self):
classre = "^class(\s+[^\s]*_EXPORT)?\s+(vtk[A-Z0-9_][^ :\n]*)\s*:\s*public\s+(vtk[^ \n\{]*)"
cname = ""
pname = ""
classlines = []
regx = re.compile(classre)
cc = 0
lastline = ""
for a in self.FileLines:
line = a.strip()
rm = regx.match(line)
if not rm and not cname:
rm = regx.match(lastline + line)
if rm:
export = rm.group(1)
export = export.strip()
cname = rm.group(2)
pname = rm.group(3)
classlines.append(" %4d: %s" % (cc, line))
if not export:
self.Print("File: %s defines 1 class with no export macro:" % self.FileName)
self.Print(" %4d: %s" % (cc, line))
self.Error("No export macro")
elif self.Export and self.Export != export:
self.Print("File: %s defines 1 class with wrong export macro:" % self.FileName)
self.Print(" %4d: %s" % (cc, line))
self.Print(" The export macro should be: %s" % (self.Export))
self.Error("Wrong export macro")
cc = cc + 1
lastline = a
if len(classlines) > 1:
self.Print()
self.Print( "File: %s defines %d classes: " %
(self.FileName, len(classlines)) )
for a in classlines:
self.Print( a )
self.Error("Multiple classes defined")
if len(classlines) < 1:
self.Print()
self.Print( "File: %s does not define any classes" % self.FileName )
self.Error("No class defined")
return
#self.Print( "Classname: %s ParentName: %s" % (cname, pname)
self.ClassName = cname
self.ParentName = pname
pass
def CheckTypeMacro(self):
count = 0
lines = []
oldlines = []
typere = "^\s*vtk(Abstract)?Type(Revision)*Macro\s*\(\s*(vtk[^ ,]+)\s*,\s*(vtk[^ \)]+)\s*\)\s*"
typesplitre = "^\s*vtk(Abstract)?Type(Revision)*Macro\s*\("
regx = re.compile(typere)
regxs = re.compile(typesplitre)
cc = 0
found = 0
for a in range(len(self.FileLines)):
line = self.FileLines[a].strip()
rm = regx.match(line)
if rm:
found = 1
if rm.group(2) == "Revision":
oldlines.append(" %4d: %s" % (cc, line))
cname = rm.group(3)
pname = rm.group(4)
if cname != self.ClassName or pname != self.ParentName:
lines.append(" %4d: %s" % (cc, line))
else:
# Maybe it is in two lines
rm = regxs.match(line)
if rm:
nline = nline = line + " " + self.FileLines[a+1].strip()
line = nline.strip()
rm = regx.match(line)
if rm:
found = 1
if rm.group(2) == "Revision":
oldlines.append(" %4d: %s" % (cc, line))
cname = rm.group(3)
pname = rm.group(4)
if cname != self.ClassName or pname != self.ParentName:
lines.append(" %4d: %s" % (cc, line))
cc = cc + 1
if len(lines) > 0:
self.Print( "File: %s has broken type macro(s):" % self.FileName )
for a in lines:
self.Print( a )
self.Print( "Should be:\n vtkTypeMacro(%s, %s)" %
(self.ClassName, self.ParentName) )
self.Error("Broken type macro")
if len(oldlines) > 0:
self.Print( "File: %s has legacy type-revision macro(s):" % self.FileName )
for a in oldlines:
self.Print( a )
self.Print( "Should be:\n vtkTypeMacro(%s, %s)" %
(self.ClassName, self.ParentName))
self.Error("Legacy style type-revision macro")
if not found:
self.Print( "File: %s does not have type macro" % self.FileName )
self.Print( "Should be:\n vtkTypeMacro(%s, %s)" %
(self.ClassName, self.ParentName))
self.Error("No type macro")
pass
def CheckForCopyAndAssignment(self):
if not self.ClassName:
return
count = 0
lines = []
oldlines = []
copyoperator = "^\s*%s\s*\(\s*const\s*%s\s*&\s*\)\s*;\s*\/\/\s*Not\s*[iI]mplemented(\.)*" % ( self.ClassName, self.ClassName)
asgnoperator = "^\s*void\s*operator\s*=\s*\(\s*const\s*%s\s*&\s*\)\s*;\s*\/\/\s*Not\s*[iI]mplemented(\.)*" % self.ClassName
#self.Print( copyoperator
regx1 = re.compile(copyoperator)
regx2 = re.compile(asgnoperator)
foundcopy = 0
foundasgn = 0
for a in self.FileLines:
line = a.strip()
if regx1.match(line):
foundcopy = foundcopy + 1
if regx2.match(line):
foundasgn = foundasgn + 1
lastline = ""
if foundcopy < 1:
for a in self.FileLines:
line = a.strip()
if regx1.match(lastline + line):
foundcopy = foundcopy + 1
lastline = a
lastline = ""
if foundasgn < 1:
for a in self.FileLines:
line = a.strip()
if regx2.match(lastline + line):
foundasgn = foundasgn + 1
lastline = a
if foundcopy < 1:
self.Print( "File: %s does not define copy constructor" %
self.FileName )
self.Print( "Should be:\n%s(const %s&); // Not implemented" %
(self.ClassName, self.ClassName) )
self.Error("No private copy constructor")
if foundcopy > 1:
self.Print( "File: %s defines multiple copy constructors" %
self.FileName )
self.Error("Multiple copy constructor")
if foundasgn < 1:
self.Print( "File: %s does not define assignment operator" %
self.FileName )
self.Print( "Should be:\nvoid operator=(const %s&); // Not implemented"
% self.ClassName )
self.Error("No private assignment operator")
if foundcopy > 1:
self.Print( "File: %s defines multiple assignment operators" %
self.FileName )
self.Error("Multiple assignment operators")
pass
def CheckWeirdConstructors(self):
count = 0
lines = []
oldlines = []
constructor = "^\s*%s\s*\(([^ )]*)\)" % self.ClassName
copyoperator = "^\s*%s\s*\(\s*const\s*%s\s*&\s*\)\s*;\s*\/\/\s*Not\s*implemented(\.)*" % ( self.ClassName, self.ClassName)
regx1 = re.compile(constructor)
regx2 = re.compile(copyoperator)
cc = 0
for a in self.FileLines:
line = a.strip()
rm = regx1.match(line)
if rm:
arg = rm.group(1).strip()
if arg and not regx2.match(line):
lines.append(" %4d: %s" % (cc, line))
cc = cc + 1
if len(lines) > 0:
self.Print( "File: %s has weird constructor(s):" % self.FileName )
for a in lines:
self.Print( a )
self.Print( "There should be only:\n %s();" % self.ClassName )
self.Error("Weird constructor")
pass
def CheckPrintSelf(self):
if not self.ClassName:
return
typere = "^\s*void\s*PrintSelf\s*\(\s*ostream\s*&\s*os*\s*,\s*vtkIndent\s*indent\s*\)"
newtypere = "^\s*virtual\s*void\s*PrintSelf\s*\(\s*ostream\s*&\s*os*\s*,\s*vtkIndent\s*indent\s*\)"
regx1 = re.compile(typere)
regx2 = re.compile(newtypere)
found = 0
oldstyle = 0
for a in self.FileLines:
line = a.strip()
rm1 = regx1.match(line)
rm2 = regx2.match(line)
if rm1 or rm2:
found = 1
if rm1:
oldstyle = 1
if not found:
self.Print( "File: %s does not define PrintSelf method:" %
self.FileName )
self.Warning("No PrintSelf method")
pass
def CheckWindowsMangling(self):
lines = []
regx1 = WindowsMangleRegEx
regx2 = re.compile("^.*VTK_LEGACY.*$")
# This version will leave out comment lines but we probably do
# not want to refer to mangled (hopefully deprecated) methods
# in comments.
# regx2 = re.compile("^(\s*//|\s*\*|.*VTK_LEGACY).*$")
cc = 1
for a in self.FileLines:
line = a.strip()
rm = regx1.match(line)
if rm:
arg = rm.group(1).strip()
if arg and not regx2.match(line):
lines.append(" %4d: %s" % (cc, line))
cc = cc + 1
if len(lines) > 0:
self.Print( "File: %s has windows.h mangling violations:" % self.FileName )
for a in lines:
self.Print(a)
self.Error("Windows Mangling Violation - choose another name that does not conflict.")
pass
##
test = TestVTKFiles()
## Check command line arguments
if len(sys.argv) < 2:
print("Testing directory not specified...")
print("Usage: %s <directory> [ exception(s) ]" % sys.argv[0])
sys.exit(1)
dirname = sys.argv[1]
exceptions = sys.argv[2:]
if len(sys.argv) > 2:
export = sys.argv[2]
if export[:3] == "VTK" and export[len(export)-len("EXPORT"):] == "EXPORT":
print("Use export macro: %s" % export)
exceptions = sys.argv[3:]
test.SetExport(export)
## Traverse through the list of files
for a in os.listdir(dirname):
## Skip non-header files
if not StringEndsWith(a, ".h"):
continue
## Skip non-vtk files
if not a.startswith('vtk'):
continue
## Skip exceptions
if a in exceptions:
continue
pathname = '%s/%s' % (dirname, a)
if pathname in exceptions:
continue
mode = os.stat(pathname)[stat.ST_MODE]
## Skip directories
if stat.S_ISDIR(mode):
continue
elif stat.S_ISREG(mode) and test.TestFile(pathname):
## Do all the tests
test.CheckParent()
test.CheckIncludes()
test.CheckTypeMacro()
test.CheckForCopyAndAssignment()
test.CheckWeirdConstructors()
test.CheckPrintSelf()
test.CheckWindowsMangling()
## Summarize errors
test.PrintWarnings()
test.PrintErrors()
sys.exit(test.ErrorValue)
|
mspark93/VTK
|
Testing/Core/HeaderTesting.py
|
Python
|
bsd-3-clause
| 17,406
|
[
"VTK"
] |
26ba702f1cff3f575e3ed99367db6e260e5d37977cc61c12920bcfa1adfceb44
|
from nanopores import *
from nanopores.physics.exittime import ExitTimeProblem
from dolfin import *
import math
# @Benjamin, Gregor TODO:
# -) check permittivity and surface charge of ahem
# -) what biased voltage to use?
geo_params = dict(
l3 = 30.,
l4 = 15.,
R = 40.,
x0 = [5., 0., 10.], # |x0| > 2.2
exit_i = None,
)
phys_params = dict(
bV = .5,
ahemqs = 0.02,
rTarget = 0.5*nm,
bulkcon = 1000,
)
# TODO: discriminate upper/lower side boundary
exit1 = {"upperb"}
exit2 = {"upperb", "lowerb"}
#StokesProblem.method["lusolver"] = "mumps" # doesn't work
#StokesProblem.method["iterative"] = True
print
print "--- INPUT VARIABLES:"
print
print "voltage bias: %.4f mV" %(1000.*phys_params["bV"],)
print "a-Hem surface charge: %.4f C/m^2" %(phys_params["ahemqs"],)
print "upper reservoir dimensions: %d x %d x %d nm" %(geo_params["R"], geo_params["R"], geo_params["l3"])
print "molecule position: %d nm above pore" %(geo_params["x0"][2],)
print
print "--- MESHING"
print
t = Timer("meshing")
meshdict = generate_mesh(10., "aHem", **geo_params)
print "Mesh generation time:",t.stop()
#print "Mesh file:",meshdict["fid_xml"]
#print "Mesh metadata:"
#for item in meshdict["meta"].items():
# print "%s = %s" %item
#print
t = Timer("reading geometry")
geo = geo_from_xml("aHem")
print "Geo generation time:",t.stop()
#print "Geo params:", geo.params
#print "Geo physical domains:", geo._physical_domain
#print "Geo physical boundaries:", geo._physical_boundary
#plot(geo.boundaries)
#plot(geo.submesh("pore"))
#plot(geo.submesh("exittime"))
phys = Physics("pore_molecule", geo, **phys_params)
x0 = geo.params["x0"]
r0 = math.sqrt(sum(x**2 for x in x0))
rnear = r0 - geo.params["rMolecule"]
rfar = r0 + geo.params["rMolecule"]
xnear = map(lambda x: rnear/r0*x, x0)
xfar = map(lambda x: rfar/r0*x, x0)
def avg(u, meas):
return assemble(u*meas)/assemble(Constant(1.0)*meas)
def exit_times(tau):
Tmin = tau(xnear)
Tmax = tau(xfar)
Tavg = avg(tau, geo.dS("moleculeb"))
return (Tmin, Tavg, Tmax)
print
print "--- STATISTICS FOR F=0"
etp_noF = LinearPDE(geo, ExitTimeProblem, phys, F=Constant((0.,0.,0.)), exitb=exitb)
etp_noF.solve(verbose=False)
T_noF = exit_times(etp_noF.solution)
print "\nTime [s] to reach bottom from molecule for F=0: (min, avg, max)"
print T_noF
print "\nTime [s] to reach bottom from pore entrance for F=0:"
print etp_noF.solution([0.,0.,-3.])
t = T_noF[1]
dt = t/100
survival = TransientLinearPDE(SurvivalProblem, geo, phys, dt=dt, F=Constant((0.,0.,0.)), exitb=exitb)
survival.solve(t=t, visualize=True, verbose=False)
p = survival.solution
print
print "After mean time (%s s) to reach bottom from molecule:" %T_noF[1]
for domain in ["pore", "poretop", "porecenter", "porebottom", "fluid_bulk_top", "fluid_bulk_bottom"]:
print "Average survival rate in %s: %.3f percent"%(domain,
100.*assemble(p*geo.dx(domain))/assemble(Constant(1.0)*geo.dx(domain)))
#print "Physics:"
#for item in phys.__dict__.items():
# print "%s = %s" %item
print
print "--- CALCULATING F from PNPS"
print
pde = PNPS(geo, phys)
pde.solve()
#pde.print_results()
(v, cp, cm, u, p) = pde.solutions(deepcopy=True)
F = phys.Feff(v, u)
for domain in ["pore", "poretop", "porecenter", "porebottom", "fluid_bulk_top", "fluid_bulk_bottom"]:
print "Average F in %s:"%domain, assemble(F[2]*geo.dx(domain))/assemble(Constant(1.0)*geo.dx(domain))
VV = VectorFunctionSpace(geo.mesh, "CG", 1)
Fproj = project(F, VV)
# solve exit time problem
print
print "--- STATISTICS FOR F=F"
etp = LinearPDE(geo, ExitTimeProblem, phys, F=F, exitb=exitb)
etp.solve(verbose=False)
T = exit_times(etp.solution)
print "\nTime [s] to reach bottom from molecule: (min, avg, max)"
print T
print "\nTime [s] to reach bottom from pore entrance:"
print etp.solution([0.,0.,-3.])
#plot(F, title="F")
#etp.visualize("exittime")
# TIMESTEP
t = T[1]
dt = t/100
survival = TransientLinearPDE(SurvivalProblem, geo, phys, dt=dt, F=F, exitb=exitb)
survival.solve(t=t, visualize=True, verbose=False)
p = survival.solution
print
print "After mean time (%s s) to reach bottom from molecule:" %T[1]
for domain in ["pore", "poretop", "porecenter", "porebottom", "fluid_bulk_top", "fluid_bulk_bottom"]:
print "Average survival rate in %s: %.3f percent"%(domain,
100.*assemble(p*geo.dx(domain))/assemble(Constant(1.0)*geo.dx(domain)))
print
|
mitschabaude/nanopores
|
scripts/test_ahem.py
|
Python
|
mit
| 4,411
|
[
"FEFF"
] |
9e9ce3f8bc9c13a3e3f3486e90dfb4c3b4a31b6509d7d9e438f881595fd7e731
|
import argparse
import csv
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn.dummy
import sklearn.gaussian_process
import sklearn.linear_model
import sklearn.kernel_approximation
LABEL_COL = 4
INPUT_COLS = 7, 9, 11, 13, 15
INPUT_DIM = len(INPUT_COLS)
INPUT_ROW_VALID = lambda row: row[2] == "Galaxy"
DEFAULT_TRAINING_SAMPLES_NUM = 1000
DEFAULT_TESTING_SAMPLES_NUM = 1000
GAMMA = 0.05623413252 # Found by binary search
def load_gp_regressor():
kernel = sklearn.gaussian_process.kernels.RBF(length_scale=GAMMA)
return sklearn.gaussian_process.GaussianProcessRegressor(kernel=kernel)
def load_sgd_regressor():
return sklearn.linear_model.SGDRegressor()
PREDICTOR_LOADERS = {'const': sklearn.dummy.DummyRegressor,
'GP': load_gp_regressor,
'SGD': load_sgd_regressor,
'linearSGD': load_sgd_regressor}
def preprocess_sgd(x):
rbf_feature = sklearn.kernel_approximation.RBFSampler(
gamma=GAMMA,
random_state=1)
x = rbf_feature.fit_transform(x)
return x
NOOP = lambda x: x
PREPROCESSING = {'const': NOOP,
'GP': NOOP,
'SGD': preprocess_sgd,
'linearSGD': NOOP}
ADMIT_SIGMA = { 'GP' }
def compute_R_sq(predictor, X, y):
y_pred = predictor.predict(X)
observed_mean = np.mean(y)
ss_tot = (y - observed_mean).dot(y - observed_mean)
residuals = y_pred - y
ss_res = residuals.dot(residuals)
return 1 - ss_res / ss_tot
def test_R_sq(score_a, predictor, X, y):
score_b = compute_R_sq(predictor, X, y)
if abs(score_b - score_a) < 1e-10:
print('R^2 test passed.')
else:
print('R^2 test failed. Sklearn score: {}. '
'Recomputed score: {}. Difference: {}.'.format(
score_a, score_b, abs(score_b - score_a)))
def plot(predictor, X, y, admits_sigma):
if admits_sigma:
y_pred, sigma = predictor.predict(X, return_std=True)
else:
y_pred = predictor.predict(X)
assert y.shape == y_pred.shape # Make sure sizes are the same
assert len(y.shape) == 1 # Make sure both are vectors
indices = np.argsort(y)
y = y[indices]
y_pred = y_pred[indices]
if admits_sigma:
sigma = sigma[indices]
if admits_sigma:
plt.errorbar(y, y_pred, yerr=sigma, fmt='x', ecolor='g')
else:
plt.scatter(y, y_pred, marker='x', s=10)
plt.show()
def load_data(
path,
train_samples_num,
test_samples_num,
x_cols=('psfMag_u', 'psfMag_g', 'psfMag_r', 'psfMag_i', 'psfMag_z'),
y_col='redshift',
class_col='class',
class_val='Galaxy'):
# Cast x_cols to list so Pandas doesn't complain…
x_cols_l = list(x_cols)
data_iter = pd.read_csv(
path,
iterator=True,
chunksize=100000,
usecols=x_cols_l + [y_col, class_col])
# Filter out anything that is not a galaxy without loading the whole file into memory.
data = pd.concat(chunk[chunk[class_col] == class_val]
for chunk in data_iter)
train_X = data[:train_samples_num][x_cols_l].as_matrix()
test_X = data[train_samples_num
:train_samples_num+test_samples_num][x_cols_l].as_matrix()
train_y = data[:train_samples_num][y_col].as_matrix()
test_y = data[train_samples_num
:train_samples_num+test_samples_num][y_col].as_matrix()
assert train_X.shape == (train_samples_num, len(x_cols))
assert train_y.shape == (train_samples_num,)
assert test_X.shape == (test_samples_num, len(x_cols))
assert test_y.shape == (test_samples_num,)
return train_X, train_y, test_X, test_y
def main():
parser = argparse.ArgumentParser(
description=('Perform regression on photometric '
'redshifts and report results.'),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('predictor', metavar='P',
choices=PREDICTOR_LOADERS.keys(),
help='predictor to use for the regression')
parser.add_argument('--train_n', metavar='TRAIN_N', type=int,
default=DEFAULT_TRAINING_SAMPLES_NUM,
help='number of training samples')
parser.add_argument('--test_n', metavar='TEST_N', type=int,
default=DEFAULT_TRAINING_SAMPLES_NUM,
help='number of testing samples')
parser.add_argument('path', type=str, help='data file')
parser.add_argument('-p', '--plot', action='store_true',
help='plot result of the regression')
parser.add_argument('-t', '--test', action='store_true',
help='perform tests of R^2 values')
parser.add_argument('-d', '--diffs', action='store_true',
help='investigate differences')
args = parser.parse_args()
predictor = PREDICTOR_LOADERS[args.predictor]()
preprocessor = PREPROCESSING[args.predictor]
train_X, train_y, test_X, test_y = load_data(args.path,
args.train_n,
args.test_n)
# Add differences if wanted.
if args.diffs:
diffs_train_X = np.empty((train_X.shape[0], train_X.shape[1] - 1))
for i in range(train_X.shape[1] - 1):
# print(train_X[:,i])
diffs_train_X[:,i] = train_X[:,i] - train_X[:,i+1]
train_X = np.concatenate((train_X, diffs_train_X), axis=1)
diffs_text_X = np.empty((test_X.shape[0], test_X.shape[1] - 1))
for i in range(test_X.shape[1] - 1):
# print(test_X[:,i])
diffs_text_X[:,i] = test_X[:,i] - test_X[:,i+1]
test_X = np.concatenate((test_X, diffs_text_X), axis=1)
# Fit.
train_X = preprocessor(train_X)
predictor.fit(train_X, train_y)
# Predict and get score.
test_X = preprocessor(test_X)
score = predictor.score(test_X, test_y)
print('R^2 score: {}'.format(score))
if args.test:
test_R_sq(score, predictor, test_X, test_y)
if args.plot:
plot(predictor, test_X, test_y, args.predictor in ADMIT_SIGMA)
if __name__ == '__main__':
main()
|
alasdairtran/mclearn
|
projects/jakub/redshift_regression/photometric_redshift.py
|
Python
|
bsd-3-clause
| 6,307
|
[
"Galaxy"
] |
35e26a1d187441b683bda844f4c2eccbc8e24417c18e82a75d730b2207334094
|
# PyCal - Python web calendar
#
# Copyright (C) 2004-6 Ray Osborn
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# $Id: Event.py,v 1.11 2006/12/30 02:28:05 rosborn Exp $
#
"""PyCal: Python web calendar
Editor class defining calendar events.
"""
import os
import time
import calendar
from PyCal import *
import CGImodule
import DatabaseModule
import Editor
import GetModule
import HTML
import LogModule
import OptionModule
import PageModule
import PrintModule
from Utilities import FormatTime, FormatDate, NextDay
from Utilities import PreviousMonth, NextMonth, CopyTime
from Utilities import PathDate, StripID, StripIDs, IsValidID, IDexists, IsEmail
from Utilities import ConvertBreaks, ConvertCRLFs, StripHTML
class Event(object):
"""Event class for a calendar object.
This class is instantiated by an Add Event click.
"""
def __init__(self, ID=None):
"""Initialize an instance of the Event class."""
self.ID = ""
self.title = ""
self.type = "Event"
self.description = ""
self.location = ""
self.start = ""
self.end = ""
self.repeats = []
self.logs = []
self.organizer = ""
self.phone = ""
self.email = ""
self.reservation = {}
self.locations = []
self.resources = []
self.categories = []
self.setup = ""
self.status = ""
self.created = None
self.editor = ""
self.notifyList = []
self.dir = ""
if ID:
self.ID = ID
self.dir = os.path.join(homeDir, ID)
self.Read()
#Put in for backward compatibility
if self.location:
try:
self.locations.remove(self.location)
except ValueError:
pass
self.locations.insert(0, self.location)
if not self.reservation.has_key("start"):
self.reservation["start"] = self.start
self.reservation["end"] = self.end
self.reservation["option"] = "Same as Event"
if self.locations:
self.location = self.locations[0]
def __cmp__(self, other):
"""Sort events by their start times."""
if self.type == "Holiday":
if other.type == "Holiday":
return 0
else:
return -1
elif other.type == "Holiday":
if self.type == "Holiday":
return 0
else:
return 1
elif self.type == "Banner":
if other.type == "Holiday":
return 1
elif other.type == "Banner":
return 0
else:
return -1
elif other.type == "Banner":
if self.type == "Holiday":
return -1
elif self.type == "Banner":
return 0
else:
return 1
elif self.start == other.start:
if self.type == "Special":
if other.type == "Holiday" or other.type == "Banner":
return 1
elif other.type == "Special":
return 0
else:
return -1
elif self.type == "Event":
if other.type == "Holiday" or other.type == "Banner" or \
other.type == "Special":
return 1
elif other.type == "Event":
return 0
else:
return -1
elif self.type == "Private":
if other.type == "Holiday" or other.type == "Banner" or \
other.type == "Special" or other.type == "Event":
return 1
elif other.type == "Private":
return 0
else:
return -1
elif self.type == "Setup":
if other.type == "Setup":
return 0
else:
return 1
return cmp(self.start, other.start)
def __str__(self):
"""Output event details for a command-line session."""
output = ["%s: %s" % (self.ID, self.title)]
output.append(FormatDate(self.start, day=True))
if self.type <> "Banner" and self.type <> "Holiday":
output.append("Event Time: %s to %s"
% (FormatTime(self.start), FormatTime(self.end)))
output.append("Event Reservation: %s to %s"
% (FormatTime(self.reservation["start"]),
FormatTime(self.reservation["end"])))
output.append("Status: %s" % self.status)
output.append("Type: %s" % self.type)
if self.description:
output.append("Description:")
output.append(ConvertBreaks(self.description))
if self.locations:
output.append("Locations: %s" % ", ".join(self.locations))
if self.resources:
output.append("Resources: %s" % ", ".join(self.resources))
if self.categories:
output.append("Categories: %s" % ", ".join(self.categories))
if self.organizer:
output.append("Organizer: %s" % self.organizer)
if self.phone:
output.append("Phone: %s" % self.phone)
if self.email:
output.append("Email: %s" % self.email)
if self.repeats:
output.append("Repeats: %s" % ", ".join(self.repeats))
return "\n".join(output)
def Read(self):
"""Read the current Event database into the Event object."""
DatabaseModule.Read("event", "events", self.dir, self)
def Store(self):
"""Store the current Event object for later use and update cache."""
DatabaseModule.Store(self, "event", "events", self.dir)
def Copy(self, other):
"""Copy another event ignoring the ID and directory."""
self.title = other.title
self.type = other.type
self.description = other.description
self.location = other.location
self.start = other.start
self.end = other.end
self.repeats = other.repeats
self.logs = other.logs
self.organizer = other.organizer
self.phone = other.phone
self.email = other.email
self.reservation = other.reservation
self.locations = other.locations
self.resources = other.resources
self.categories = other.categories
self.setup = other.setup
self.status = other.status
self.editor = other.editor
self.notifyList = other.notifyList
def Remove(self):
"""Delete the event."""
try:
os.chdir(self.dir)
for file in os.listdir("."):
os.remove(file)
os.chdir("../")
os.rmdir(self.dir)
except OSError, errorText:
raise CalendarError, errorText
self.ClearRepeats()
def AddLog(self, log, save=True):
"""Add a log entry to the event database with a time stamp."""
timestamp = FormatTime(time.localtime(time.time()), "ISO8601")
user = CGImodule.CGIgetUser()
try:
self.logs.append((timestamp, user, log))
except NameError:
self.logs = [(timestamp, user, log)]
if save:
LogModule.Add(timestamp, self.ID, self.title, user, log)
def ClearRepeats(self):
"""Remove ID from other repeated events."""
for repeat in self.repeats:
if repeat <> self.ID and IDexists(repeat):
other = Event(repeat)
try:
other.repeats.remove(self.ID)
if len(other.repeats) == 1:
other.repeats = []
other.Store()
except ValueError:
pass
def AddNotification(self, email):
"""Add an email address to the notify list."""
from Utilities import IsEmail
if IsEmail(email):
if email not in self.notifyList:
self.notifyList.append(email)
self.Store()
else:
raise CalendarError, "Invalid email address"
def RemoveNotification(self, email):
"""Remove an email address from the notify list."""
if email in self.notifyList:
self.notifyList.remove(email)
self.Store()
def UpdatePages(self):
"""Add a flag to update page caches for this event and all repeats."""
if self.repeats:
dates = []
for ID in self.repeats:
y, m, d = PathDate(StripID(ID))
if ID <> self.ID:
OptionModule.Add("updates", (y, m, d))
y, m, d = PathDate(StripID(self.ID))
OptionModule.Add("updates", (y, m, d))
#Update the primary event's display right away
p = PageModule.Page(y, m, d)
p.PutEvents()
p.Format()
p.Format(private=True)
def EventView(self, message=None, updating=False):
"""Print formatted display of event."""
user = CGImodule.CGIgetUser()
title = self.title
content = HTML.Container()
year, month, day = self.start[0:3]
if not updating:
content.Add(PrintModule.NavigationBar(year, month, day, self.ID))
content.Add(HTML.Header(PrintModule.CalendarTitle(year, month, day)))
if message:
content.Add(HTML.Para(message, class_="alert"))
table = HTML.Table([500, 200], cellspacing="0", align="center")
row = HTML.Row()
if self.status == "Requested":
cell = HTML.Cell(class_="requested")
elif self.type == "Private":
cell = HTML.Cell(class_="private")
elif self.type == "Setup":
cell = HTML.Cell(class_="setup")
else:
cell = HTML.Cell()
if user and updating:
f = HTML.Form("ConfirmEvent.py")
t = HTML.Table(class_="transparent", cellspacing="0",
cellpadding="5", align="center")
r = HTML.Row()
r.Add(HTML.HeaderCell(HTML.Submit("edit", "Return to Edit"),
class_="transparent"))
r.Add(HTML.HeaderCell(HTML.Submit("confirm", "Confirm Event"),
class_="transparent"))
r.Add(HTML.HeaderCell(HTML.Submit("cancel", "Cancel Edit"),
class_="transparent"))
t.Add(r)
f.Add(t)
f.Add(HTML.Para(
"""This is a preview of the event display.%sPress "Confirm Event"
to save it to the calendar.""" % HTML.Break(), class_="alert"))
f.Add(HTML.Header(self.title))
else:
cell.Add(HTML.Header(self.title))
t = HTML.Table([120, 360], cellspacing="0", align="center")
if user:
t.Add(HTML.Row(HTML.HeaderCell("Event Description", colspan=2,
class_="empty")))
r = HTML.Row()
r.Add(HTML.HeaderCell("Type", style="padding:5px"))
r.Add(HTML.Cell(self.type, style="padding:5px"))
t.Add(r)
if self.type <> "Banner" and self.type <> "Holiday":
r = HTML.Row()
r.Add(HTML.HeaderCell("Time", style="padding:5px"))
r.Add(HTML.Cell("%s to %s" % (FormatTime(self.start),
FormatTime(self.end)),
style="padding:5px"))
t.Add(r)
r = HTML.Row()
r.Add(HTML.HeaderCell("Description", style="padding:5px"))
r.Add(HTML.Cell(self.description, style="padding:5px"))
t.Add(r)
r = HTML.Row()
r.Add(HTML.HeaderCell("Location", style="padding:5px"))
r.Add(HTML.Cell(self.location, style="padding:5px"))
t.Add(r)
r = HTML.Row()
r.Add(HTML.HeaderCell("Organizer", style="padding:5px"))
if self.organizer and self.email:
url = "%s/ComposeMessage.py?ID=%s" % (cgiURL, self.ID)
r.Add(HTML.Cell(HTML.Anchor(url, self.organizer),
style="padding:5px"))
else:
r.Add(HTML.Cell(self.organizer, style="padding:5px"))
t.Add(r)
if user:
r = HTML.Row()
r.Add(HTML.HeaderCell("Phone", style="padding:5px"))
r.Add(HTML.Cell(self.phone, style="padding:5px"))
t.Add(r)
r = HTML.Row()
r.Add(HTML.HeaderCell("Email", style="padding:5px"))
r.Add(HTML.Cell(HTML.Anchor(self.email, scheme="mailto:"),
style="padding:5px"))
t.Add(r)
t.Add(HTML.Row(HTML.HeaderCell("Event Reservation", colspan=2,
class_="empty")))
if self.type <> "Banner" and self.type <> "Holiday":
r = HTML.Row()
r.Add(HTML.HeaderCell("Reservation", style="padding:5px"))
r.Add(HTML.Cell("%s to %s"
% (FormatTime(self.reservation["start"]),
FormatTime(self.reservation["end"])),
style="padding:5px"))
t.Add(r)
r = HTML.Row()
r.Add(HTML.HeaderCell("Locations", style="padding:5px"))
r.Add(HTML.Cell(", ".join(self.locations), style="padding:5px"))
t.Add(r)
r = HTML.Row()
r.Add(HTML.HeaderCell("Resources", style="padding:5px"))
r.Add(HTML.Cell(", ".join(self.resources), style="padding:5px"))
t.Add(r)
r = HTML.Row()
r.Add(HTML.HeaderCell("Categories", style="padding:5px"))
r.Add(HTML.Cell(", ".join(self.categories), style="padding:5px"))
t.Add(r)
r = HTML.Row()
r.Add(HTML.HeaderCell("Setup", style="padding:5px"))
r.Add(HTML.Cell(self.setup, style="padding:5px"))
t.Add(r)
r = HTML.Row()
r.Add(HTML.HeaderCell("Status", style="padding:5px"))
if self.status == "Requested" and self.created:
status = "Requested on %s at %s" % (FormatDate(self.created),
FormatTime(self.created))
else:
status = self.status
r.Add(HTML.Cell(status, style="padding:5px"))
t.Add(r)
if updating:
conflicts = self.CheckConflicts(checkRepeats=True)
else:
conflicts = self.CheckConflicts()
if conflicts:
r = HTML.Row()
r.Add(HTML.HeaderCell("Conflicts",
style="padding:5px;color:red"))
c = HTML.Cell(style="padding:5px")
c.Add(ListConflicts(conflicts))
r.Add(c)
t.Add(r)
if self.repeats:
r = HTML.Row()
r.Add(HTML.HeaderCell("Repeats", style="padding:5px"))
c = HTML.Cell(class_="sunday",
style="padding:5px;text-align:center")
if updating and hasattr(self, "pattern"):
c.Add(HTML.Checkboxes("repeats", self.repeats,
self.repeats,
StripIDs(self.repeats),
columns=3))
else:
c.Add(PrintModule.RepeatList(self.repeats))
if updating:
for repeat in self.repeats:
c.Add(HTML.HiddenInput("repeats", repeat))
r.Add(c)
t.Add(r)
if updating:
f.Add(t)
f.Add(HTML.HiddenInput("ID", self.ID))
cell.Add(f)
else:
f = self.EventOptions()
cell.Add(t)
cell.Add(f)
else:
cell.Add(t)
if self.status == "Approved" and not updating:
t = HTML.Table([480], cellspacing="0", align="center")
t.Add(HTML.Row(HTML.HeaderCell("Notification List")))
if user:
c = HTML.Cell(HTML.Para("""
Add email addresses (one per line) that you wish to add
to the event notification list. If this is a repeating
event, they will be added to all the repeats """,
class_="status"))
else:
c = HTML.Cell(HTML.Para("""
If you wish to be reminded of this event or notified if
there are any changes, submit your email address here.
If this is a repeating event, your email address will be
added to all the repeats. Contact the %s Administration
if you wish to have your address removed.""" % calendarAbbr,
class_="status"))
f = HTML.Form("AddNotification.py")
if user:
f.Add(HTML.Para("%s%s%s"
% (HTML.TextArea("email", rows=5, cols=40),
HTML.Break(),
HTML.Submit(value="Add Email Addresses")),
class_="center"))
else:
f.Add(HTML.Para("%s%s%s"
% (HTML.Input("email", size=40, maxlength=80),
HTML.TAB,
HTML.Submit(value="Add Email")), class_="center"))
f.Add(HTML.HiddenInput("ID", self.ID))
c.Add(f)
f = HTML.Form("RemoveNotification.py")
if user and self.notifyList:
f.Add(HTML.Para("%s%s%s"
% (HTML.Selections("email", self.notifyList,
label=True),
HTML.TAB,
HTML.Submit(value="Remove Email")),
class_="center"))
f.Add(HTML.HiddenInput("ID", self.ID))
c.Add(f)
t.Add(HTML.Row(c))
cell.Add(t)
row.Add(cell)
row.Add(PrintModule.SideMonthsCell(year, month))
table.Add(row)
content.Add(table)
content.Add(PrintModule.CalendarOptions(year, month, day))
return HTML.Page(StripHTML(title), content)
def EventOptions(self):
"""Add links to event options."""
user = CGImodule.CGIgetUser()
if user == "admin" or user in GetModule.GetSupervisors():
supervisor = True
else:
supervisor = False
table = HTML.Table(cellspacing="0", cellpadding="5", align="center")
row = HTML.Row()
if self.status == "Requested":
if supervisor:
link = "%s/ApproveEvent.py?ID=%s" % (cgiURL, self.ID)
row.Add(HTML.HeaderCell(HTML.Anchor(link, "Approve Event")))
elif user:
link = "%s/RequestEvent.py?ID=%s" % (cgiURL, self.ID)
row.Add(HTML.HeaderCell(HTML.Anchor(link, "Request Approval")))
elif not supervisor:
link = "%s/RequestEvent.py?ID=%s" % (cgiURL, self.ID)
row.Add(HTML.HeaderCell(HTML.Anchor(link, "Request Change")))
if user:
link = "%s/EditEvent.py?ID=%s" % (cgiURL, self.ID)
row.Add(HTML.HeaderCell(HTML.Anchor(link, "Edit Event")))
link = "%s/CopyEvent.py?ID=%s" % (cgiURL, self.ID)
row.Add(HTML.HeaderCell(HTML.Anchor(link, "Copy Event")))
if supervisor:
link = "%s/RemoveEvent.py?ID=%s" % (cgiURL, self.ID)
row.Add(HTML.HeaderCell(HTML.Anchor(link, "Remove Event")))
if user:
link = "%s/ViewLog.py?ID=%s" % (cgiURL, self.ID)
row.Add(HTML.HeaderCell(HTML.Anchor(link, "View Log")))
if self.status == "Approved":
link = "%s/NotifyList.py?ID=%s" % (cgiURL, self.ID)
row.Add(HTML.HeaderCell(HTML.Anchor(link, "Notify List")))
table.Add(row)
return table
def EditPage(self, message=None, copied=False):
"""Print form to add or modify a calendar event."""
if self.ID:
new = False
else:
new = True
user = CGImodule.CGIgetUser()
if user == "admin" or user in GetModule.GetSupervisors():
supervisor = True
else:
supervisor = False
if hasattr(self, "status"):
if self.status == "Approved":
requested = False
else:
requested = True
else:
requested = True
self.status = "Requested"
content = HTML.Container()
if new:
title = "Add New Event"
content.Add(HTML.Header(title))
self.status = "Requested"
if not copied:
self.type = "Event"
else:
title = "Event : %s" % self.title
content.Add(HTML.Header(title, class_="title"))
if isinstance(self.start, time.struct_time):
content.Add(HTML.Para("%s" % FormatDate(self.start),
class_="center",
style="font-weight: bold"))
if message:
content.Add(HTML.Para(message, class_="alert"))
form = HTML.Form("ModifyEvent.py")
table = HTML.Table([150, 550], cellspacing="0", align="center")
table.Add(HTML.Row(HTML.HeaderCell("Event Description", colspan=2,
class_="empty")))
row = HTML.Row()
row.Add(HTML.HeaderCell("Event Title"))
row.Add(HTML.Cell(HTML.Input("title", self.title, size=80,
maxlength=255)))
table.Add(row)
row = HTML.Row()
row.Add(HTML.HeaderCell("Type"))
if requested or supervisor:
options = ["Event", "Special", "Banner", "Holiday", "Private",
"Setup"]
row.Add(HTML.HeaderCell(HTML.RadioButtons("type",
options, self.type),
class_="sunday"))
else:
row.Add(HTML.Cell(self.type))
table.Add(row)
row = HTML.Row()
row.Add(HTML.HeaderCell("Description"))
row.Add(HTML.Cell(HTML.TextArea("description",
ConvertBreaks(self.description),
rows=10, cols=80)))
table.Add(row)
row = HTML.Row()
row.Add(HTML.HeaderCell("Location"))
if requested or supervisor:
locations = GetModule.GetLocations()
row.Add(HTML.HeaderCell(HTML.Selections("location", locations,
self.location, label=True),
class_="sunday"))
else:
row.Add(HTML.Cell(self.location))
table.Add(row)
if requested or supervisor:
if isinstance(self.start, time.struct_time):
year = time.strftime("%Y", self.start)
month = time.strftime("%B", self.start)
day = time.strftime("%d", self.start).lstrip("0")
starthour = time.strftime("%I", self.start).lstrip("0")
startmin = time.strftime("%M", self.start)
startmeridiem = time.strftime("%p", self.start).lower()
else:
year, month, day = None, None, None
starthour, startmin, startmeridiem = None, None, None
if isinstance(self.end, time.struct_time):
endhour = time.strftime("%I", self.end).lstrip("0")
endmin = time.strftime("%M", self.end)
endmeridiem = time.strftime("%p", self.end).lower()
else:
endhour, endmin, endmeridiem = None, None, None
row = HTML.Row()
row.Add(HTML.HeaderCell("Time"))
row.Add(HTML.HeaderCell("%s:%s%s to %s:%s%s"
% (HTML.Selections("starthour", hourList, starthour),
HTML.Selections("startminute", minuteList, startmin),
HTML.Selections("startampm", meridiemList,startmeridiem),
HTML.Selections("endhour", hourList, endhour),
HTML.Selections("endminute", minuteList, endmin),
HTML.Selections("endampm", meridiemList, endmeridiem)),
class_="sunday"))
table.Add(row)
row = HTML.Row()
row.Add(HTML.HeaderCell("Date"))
row.Add(HTML.HeaderCell("%s%s, %s"
% (HTML.Selections("startmonth", monthList, month),
HTML.Selections("startday", dayList, day),
HTML.Selections("startyear", yearList, year)),
class_="sunday"))
table.Add(row)
else:
if self.type <> "Banner" and self.type <> "Holiday":
row = HTML.Row()
row.Add(HTML.HeaderCell("Time", style="padding:5px"))
row.Add(HTML.Cell("%s to %s" % (FormatTime(self.start),
FormatTime(self.end)),
style="padding:5px"))
table.Add(row)
if hasattr(self, "pattern"):
row = HTML.Row()
row.Add(HTML.HeaderCell("Repeats"))
cell = HTML.HeaderCell(class_="sunday")
options = ["Once only", "Daily", "Weekly", "Monthly",
"Annually", "Same Day Monthly"]
cell.Add("%s%s%s%s%s%s"
% (HTML.Selections("pattern", options,
selected=self.pattern),
HTML.Input("number",
value=`len(self.repeats)`, size=5,
maxlength=5),
"times OR until",
HTML.Selections("endmonth", monthList, month),
HTML.Selections("endday", dayList, day),
HTML.Selections("endyear", yearList, year)))
row.Add(cell)
table.Add(row)
elif self.repeats:
row = HTML.Row()
row.Add(HTML.HeaderCell("Repeats"))
cell = HTML.HeaderCell(class_="sunday", style="padding:10px")
options = ["single", "future", "all"]
descriptions = ["Edit this event only", "Edit future repeats",
"Edit all repeats"]
cell.Add(HTML.RadioButtons("repeat", options, "single",
descriptions))
cell.Add(PrintModule.RepeatList(self.repeats))
for repeat in self.repeats:
cell.Add(HTML.HiddenInput("repeats", repeat))
row.Add(cell)
table.Add(row)
if new and not copied:
e = Editor.Editor(user)
self.organizer = e.name
self.phone = e.phone
self.email = e.email
row = HTML.Row()
row.Add(HTML.HeaderCell("Organizer"))
row.Add(HTML.Cell("%s%sOR%s%s"
% (HTML.Input("organizer", self.organizer,
size=30, maxlength=255),
HTML.TAB, HTML.TAB,
HTML.Selections("name",
GetModule.GetOrganizers(),
label=True))))
table.Add(row)
row = HTML.Row()
row.Add(HTML.HeaderCell("Phone"))
row.Add(HTML.Cell("%s%s%s"
% (HTML.Input("phone", self.phone, size=30,
maxlength=255),
HTML.TAB,
HTML.Span("Not displayed in public calendar",
style="font-style:italic;font-size:0.75em"))))
table.Add(row)
row = HTML.Row()
row.Add(HTML.HeaderCell("Email"))
row.Add(HTML.Cell("%s%s%s"
% (HTML.Input("email", self.email, size=30,
maxlength=255),
HTML.TAB,
HTML.Span("Not displayed in public calendar",
style="font-style:italic;font-size:0.75em"))))
table.Add(row)
table.Add(HTML.Row(HTML.HeaderCell("Event Reservations", colspan=2,
class_="empty")))
if requested or supervisor:
row = HTML.Row()
row.Add(HTML.HeaderCell("Reservation Times"))
cell = HTML.HeaderCell(class_="sunday")
options = ["Same as Event", "Longer than Event", "All Day"]
if new and not copied:
self.reservation["option"] = "Same as Event"
cell.Add(HTML.RadioButtons("reserve", options,
self.reservation["option"]))
cell.Add(HTML.Break())
start = self.reservation["start"]
if isinstance(start, time.struct_time):
starthour = time.strftime("%I", start).lstrip("0")
startmin = time.strftime("%M", start)
startmeridiem = time.strftime("%p", start).lower()
else:
starthour, startmin, startmeridiem = None, None, None
end = self.reservation["end"]
if isinstance(end, time.struct_time):
endhour = time.strftime("%I", end).lstrip("0")
endmin = time.strftime("%M", end)
endmeridiem = time.strftime("%p", end).lower()
else:
endhour, endmin, endmeridiem = None, None, None
cell.Add("%s:%s%s to %s:%s%s"
% (HTML.Selections("resstarthour", hourList, starthour),
HTML.Selections("resstartminute", minuteList, startmin),
HTML.Selections("resstartampm", meridiemList, startmeridiem),
HTML.Selections("resendhour", hourList, endhour),
HTML.Selections("resendminute", minuteList, endmin),
HTML.Selections("resendampm", meridiemList, endmeridiem)))
row.Add(cell)
table.Add(row)
row = HTML.Row()
row.Add(HTML.HeaderCell("Additional Resources"))
cell = HTML.HeaderCell(class_="sunday")
t = HTML.Table([180, 180, 180], class_="transparent",
cellspacing="0", align="center")
r = HTML.Row()
c = HTML.HeaderCell("Locations", class_="transparent")
c.Add(HTML.Break())
try:
self.locations.remove(self.location)
except ValueError:
pass
c.Add(HTML.Selections("locations", locations, self.locations,
multiple=True))
r.Add(c)
c = HTML.HeaderCell("Resources", class_="transparent")
c.Add(HTML.Break())
resources = GetModule.GetResources()
c.Add(HTML.Selections("resources", resources, self.resources,
multiple=True))
r.Add(c)
c = HTML.HeaderCell("Categories", class_="transparent")
c.Add(HTML.Break())
categories = GetModule.GetCategories()
c.Add(HTML.Selections("categories", categories, self.categories,
multiple=True))
r.Add(c)
t.Add(r)
cell.Add(t)
row.Add(cell)
table.Add(row)
else:
row = HTML.Row()
row.Add(HTML.HeaderCell("Locations", style="padding:5px"))
try:
self.locations.remove(self.location)
except ValueError:
pass
row.Add(HTML.Cell(", ".join(self.locations), style="padding:5px"))
for location in self.locations:
row.Add(HTML.HiddenInput("locations", location))
table.Add(row)
row = HTML.Row()
row.Add(HTML.HeaderCell("Resources", style="padding:5px"))
row.Add(HTML.Cell(", ".join(self.resources), style="padding:5px"))
for resource in self.resources:
row.Add(HTML.HiddenInput("resources", resource))
table.Add(row)
row = HTML.Row()
categories = GetModule.GetCategories()
row.Add(HTML.HeaderCell("Categories", style="padding:5px"))
row.Add(HTML.Cell(HTML.Selections("categories", categories,
self.categories, multiple=True)))
table.Add(row)
row = HTML.Row()
row.Add(HTML.HeaderCell("Setup Instructions"))
row.Add(HTML.Cell(HTML.TextArea("setup",
ConvertBreaks(self.setup),
rows=5, cols=80)))
table.Add(row)
row = HTML.Row()
row.Add(HTML.HeaderCell("Status"))
if supervisor:
options = ["Approved", "Requested"]
row.Add(HTML.HeaderCell(HTML.RadioButtons("status", options,
self.status),
class_="sunday"))
else:
row.Add(HTML.HeaderCell(self.status, class_="sunday"))
row.Add(HTML.HiddenInput("status", self.status))
table.Add(row)
form.Add(table)
form.Add(HTML.HiddenInput("editor", user))
if new:
form.Add(HTML.Para("%s%s%s"
% (HTML.Submit(value="Add Event"),
HTML.TAB,
HTML.Submit("cancel", "Cancel")),
class_="center"))
else:
form.Add(HTML.Para("%s%s%s"
% (HTML.Submit(value="Update Event"),
HTML.TAB,
HTML.Submit("cancel", "Cancel")),
class_="center"))
form.Add(HTML.HiddenInput("ID", self.ID))
content.Add(form)
if isinstance(self.start, time.struct_time):
year, month = self.start[0:2]
else:
year, month = None, None
content.Add(PrintModule.BottomMonthsTable(year, month))
return HTML.Page(title, content)
def RemovePage(self):
"""Print form to remove a calendar event."""
user = CGImodule.CGIgetUser()
if self.status == "Approved" and user <> "admin" and \
user not in GetModule.GetSupervisors():
message = "Not authorized to remove an approved event"
return self.EventView(message)
title = "Event : %s" % self.title
content = HTML.Container()
content.Add(HTML.Header("Event : %s" % self.title, class_="title"))
if isinstance(self.start, time.struct_time):
content.Add(HTML.Para("%s" % FormatDate(self.start),
class_="center", style="font-weight: bold"))
form = HTML.Form("ConfirmRemoval.py")
table = HTML.Table([150, 550], cellspacing="0", align="center")
row = HTML.Row()
row.Add(HTML.HeaderCell("Event Title", style="padding:5px"))
row.Add(HTML.Cell(self.title, style="padding:5px"))
table.Add(row)
row = HTML.Row()
row.Add(HTML.HeaderCell("Event Type", style="padding:5px"))
row.Add(HTML.Cell(self.type, style="padding:5px"))
table.Add(row)
if self.type <> "Banner" and self.type <> "Holiday":
row = HTML.Row()
row.Add(HTML.HeaderCell("Time", style="padding:5px"))
row.Add(HTML.Cell("%s to %s" % (FormatTime(self.start),
FormatTime(self.end)),
style="padding:5px"))
table.Add(row)
if self.repeats:
row = HTML.Row()
row.Add(HTML.HeaderCell("Repeats"))
cell = HTML.HeaderCell("Remove: %s" % HTML.TAB, class_="sunday",
style="padding:10px")
options = ["single", "future", "all"]
descriptions = ["Only this event", "Future repeats", "All repeats"]
cell.Add(HTML.RadioButtons("repeat", options, "single",
descriptions))
cell.Add(PrintModule.RepeatList(self.repeats))
for repeat in self.repeats:
cell.Add(HTML.HiddenInput("repeats", repeat))
row.Add(cell)
table.Add(row)
row = HTML.Row()
row.Add(HTML.HeaderCell("Status", style="padding:5px"))
row.Add(HTML.HeaderCell(self.status, class_="sunday",
style="padding:5px"))
table.Add(row)
form.Add(table)
form.Add(HTML.HiddenInput("editor", user))
form.Add(HTML.Para("%s%s%s"
% (HTML.Submit(value="Confirm Removal"),
HTML.TAB,
HTML.Submit("cancel", "Cancel")),
class_="center"))
form.Add(HTML.HiddenInput("ID", self.ID))
content.Add(form)
if isinstance(self.start, time.struct_time):
year, month = self.start[0:2]
else:
year, month = None, None
content.Add(PrintModule.BottomMonthsTable(year, month))
return HTML.Page(title, content)
def RequestPage(self):
"""Send an email requesting approval of an event."""
user = CGImodule.CGIgetUser()
name = Editor.Editor(user).name
email = Editor.Editor(user).email
if email:
email = "<%s>" % email
else:
email = ""
title = "%s Event Request" % calendarAbbr
content = HTML.Container()
content.Add(HTML.Header("%s Event Request" % calendarAbbr,
class_="title"))
content.Add(HTML.Para("""
The following message will be sent to the %s Administration. If you
wish to add a message, please use the text box below.
""" % calendarName))
table = HTML.Table([600], align="center", cellspacing="0",
cellpadding="20")
if self.status == "Requested":
prefix = "Your approval of"
else:
prefix = "A change to"
table.Add(HTML.Row(HTML.Cell(ConvertCRLFs("""\
%s the following %s event has been requested:
Title: %s
Date: %s
Time: %s to %s
Location: %s
Resource: %s
Category: %s
Requested by: %s %s
Please visit the following URL to approve or modify the requested event:
<%s/ViewEvent.py?ID=%s>
""" % (prefix, calendarAbbr, self.title, FormatDate(self.start, day=True),
FormatTime(self.start), FormatTime(self.end),
", ".join(self.locations), ", ".join(self.resources),
", ".join(self.categories),
name, email, cgiURL, self.ID)))))
content.Add(table)
form = HTML.Form("SendRequest.py")
table = HTML.Table([150, 450], cellspacing="0", align="center")
row = HTML.Row()
row.Add(HTML.HeaderCell("Message"))
row.Add(HTML.Cell(HTML.TextArea("message")))
table.Add(row)
form.Add(table)
form.Add(HTML.Para("%s%s%s"
% (HTML.Submit(value="Send Message"),
HTML.TAB,
HTML.Submit("cancel", "Cancel")),
class_="center"))
form.Add(HTML.HiddenInput("ID", self.ID))
form.Add(HTML.HiddenInput("prefix", prefix))
content.Add(form)
content.Add(PrintModule.CalendarOptions())
year, month = self.start[0:2]
content.Add(PrintModule.BottomMonthsTable(year, month))
return HTML.Page(title, content)
def NotifyPage(self):
"""Send an email notification to those who have requested it."""
user = CGImodule.CGIgetUser()
name = Editor.Editor(user).name
email = Editor.Editor(user).email
if email:
email = "<%s>" % email
else:
email = ""
title = "Event Log: %s" % self.title
content.Add(HTML.Header("%s Event Notification" % calendarAbbr,
class_="title"))
content.Add(HTML.Para("""
The following message will be sent to the list of those who
requested notification of this event. It contains the main
details of the event. If you wish to add a message, please use
the text box below.
"""))
table = HTML.Table([600], align="center", cellspacing="0",
cellpadding="20")
table.Add(HTML.Row(HTML.Cell(ConvertCRLFs("""\
Title: %s
Date: %s
Time: %s to %s
Location: %s
%s
Please visit the following URL to view further details:
<%s/ViewEvent.py?ID=%s>
If you wish to be removed from the notification list for this event,
please contact the %s Administration.
""" % (self.title, FormatDate(self.start, day=True),
FormatTime(self.start), FormatTime(self.end), self.location,
ConvertBreaks(self.description), cgiURL, self.ID, calendarAbbr)))))
content.Add(table)
form = HTML.Form("SendNotification.py")
table = HTML.Table([150, 450], cellspacing="0", align="center")
row = HTML.Row()
row.Add(HTML.HeaderCell("Message"))
row.Add(HTML.Cell(HTML.TextArea("message")))
table.Add(row)
form.Add(table)
form.Add(HTML.Para("%s%s%s"
% (HTML.Submit(value="Send Message"),
HTML.TAB,
HTML.Submit("cancel", "Cancel")),
class_="center"))
form.Add(HTML.HiddenInput("ID", self.ID))
content.Add(form)
content.Add(PrintModule.CalendarOptions())
year, month = self.start[0:2]
content.Add(PrintModule.BottomMonthsTable(year, month))
return HTML.Page(title, content)
def MessagePage(self):
"""Send an email to the event organizer (hiding their address)."""
if IsEmail(self.email):
user = CGImodule.CGIgetUser()
if user in GetModule.GetEditors():
e = GetModule.GetEditor(user)
name = e.name
email = e.email
else:
name = ""
email = ""
title = "%s Event Message" % calendarAbbr
content = HTML.Container()
content.Add(HTML.Header("%s Event Message" % calendarAbbr,
class_="title"))
content.Add(HTML.Para("""
Use this form to send an email to the organizer of the
event. You must supply your name and a valid email address,
but this information will not be stored.
"""))
form = HTML.Form("SendMessage.py")
table = HTML.Table([150, 450], cellspacing="0", align="center")
row = HTML.Row()
row.Add(HTML.HeaderCell("Name"))
row.Add(HTML.Cell(HTML.Input("name", name, size=50)))
table.Add(row)
row = HTML.Row()
row.Add(HTML.HeaderCell("Email"))
row.Add(HTML.Cell(HTML.Input("email", email, size=50)))
table.Add(row)
row = HTML.Row()
row.Add(HTML.HeaderCell("Subject"))
row.Add(HTML.Cell(HTML.Input("subject", size=50)))
table.Add(row)
row = HTML.Row()
row.Add(HTML.HeaderCell("Message"))
row.Add(HTML.Cell(HTML.TextArea("message")))
table.Add(row)
form.Add(table)
form.Add(HTML.Para("%s%s%s"
% (HTML.Submit(value="Send Message"),
HTML.TAB,
HTML.Submit("cancel", "Cancel")),
class_="center"))
form.Add(HTML.HiddenInput("ID", self.ID))
content.Add(form)
content.Add(PrintModule.CalendarOptions())
year, month = self.start[0:2]
content.Add(PrintModule.BottomMonthsTable(year, month))
return HTML.Page(title, content)
else:
self.PrintEventView("The organizer's email address is unavailable.")
def LogPage(self):
"""Print logs of event updates."""
year, month, day = self.start[0:3]
title = "Event Log: %s" % self.title
content = HTML.Container()
content.Add(HTML.Header("Event Log: %s" % self.title, class_="title"))
table = HTML.Table([500, 200], cellspacing="0", align="center")
row = HTML.Row()
cell = HTML.Cell()
t = HTML.Table([190,90,200], cellpadding="5")
r = HTML.Row()
r.Add(HTML.HeaderCell("Time"))
r.Add(HTML.HeaderCell("User"))
r.Add(HTML.HeaderCell("Log"))
t.Add(r)
for log in self.logs:
r = HTML.Row(HTML.HeaderCell(log[0], class_="sunday",
style="font-size:0.9em"))
r.Add(HTML.Cell(log[1], style="text-align: center"))
r.Add(HTML.Cell(log[2].replace('\n','<br>\n')))
t.Add(r)
cell.Add(t)
t = HTML.Table(cellspacing="0", cellpadding="5", align="center")
r = HTML.Row()
link = "%s/ViewEvent.py?ID=%s" % (cgiURL, self.ID)
r.Add(HTML.HeaderCell(HTML.Anchor(link, "View Event")))
t.Add(r)
cell.Add(t)
row.Add(cell)
row.Add(PrintModule.SideMonthsCell(year, month))
table.Add(row)
content.Add(table)
content.Add(PrintModule.CalendarOptions(year, month, day))
return HTML.Page(title, content)
def CheckConflicts(self, checkRepeats=False):
"""Check availability of specified locations and/or resources."""
start = self.reservation["start"]
end = self.reservation["end"]
list = []
if checkRepeats and self.repeats:
for repeat in self.repeats:
list.extend(GetConflicts(CopyTime(repeat, start),
CopyTime(repeat, end, end=True),
self.locations, self.resources))
else:
try:
list.extend(GetConflicts(start, end, self.locations,
self.resources))
except ValueError:
raise CalendarError, \
"There is a problem with Event ID %s" \
% HTML.Anchor("%s/ViewEvent.py?ID=%s"
% (cgiURL, self.ID), self.ID)
if "oldID" in self.__dict__.keys():
thisID = self.oldID
else:
thisID = self.ID
conflicts = []
for conflict in list:
ID, locations, resources = conflict
if ID <> thisID and ID not in self.repeats:
conflicts.append(conflict)
return conflicts
class TemporaryEvent(Event):
"""Class for temporary events."""
def __init__(self, ID=None):
"""Open or create an events database in a temporary directory."""
self.ID = ""
self.title = ""
self.type = "Event"
self.description = ""
self.location = ""
self.start = ""
self.end = ""
self.repeats = []
self.logs = []
self.organizer = ""
self.phone = ""
self.email = ""
self.reservation = {"start":None,"end":None,"option":"Same as Event"}
self.locations = []
self.resources = []
self.categories = []
self.setup = ""
self.status = ""
self.created = None
self.editor = ""
self.notifyList = []
self.dir = ""
if ID:
self.dir = os.path.join(homeDir, ID)
self.Read()
else:
tmpDir = os.path.join(homeDir, "tmp")
if not os.path.exists(tmpDir):
omask = os.umask(0)
os.mkdir(tmpDir)
os.umask(omask)
self.ID = os.path.join("tmp",
"%03d" % GetModule.GetNextEvent("tmp"))
self.dir = os.path.join(homeDir, self.ID)
def AddLog(self, log):
"""Add a log entry to the event database with a time stamp."""
timestamp = FormatTime(time.localtime(time.time()), "ISO8601")
user = CGImodule.CGIgetUser()
try:
self.logs.append((timestamp, user, log))
except NameError:
self.logs = [(timestamp, user, log)]
def GetConflicts(start, end, locations, resources):
"""Return a list of potential location and/or resource conflicts."""
year, month, day = start[0:3]
events = GetModule.GetEvents(year, month, day)
conflicts = []
for e in events:
if start >= e["reservation"]["end"] or \
end <= e["reservation"]["start"]:
pass
else:
locationConflicts = []
for location in locations:
if location in e["locations"]:
locationConflicts.append(location)
resourceConflicts = []
for resource in resources:
if resource in e["resources"]:
resourceConflicts.append(resource)
if locationConflicts or resourceConflicts:
conflicts.append((e["ID"], locationConflicts,
resourceConflicts))
return conflicts
def ListConflicts(conflicts):
"""Output a list of potential location and/or resource conflicts."""
d = HTML.Div()
for conflict in conflicts:
ID, locations, resources = conflict
e = Event(ID)
div = HTML.Div(class_="dayview")
para = HTML.Para(class_="time")
para.Add("%s %s to %s:"
% (FormatDate(e.reservation["start"]),
FormatTime(e.reservation["start"]),
FormatTime(e.reservation["end"])))
para.Add(", ".join(locations+resources))
div.Add(para)
para = HTML.Para(HTML.Anchor("%s/ViewEvent.py?ID=%s" % (cgiURL, e.ID),
e.title),
class_="event")
div.Add(para)
d.Add(div)
return str(d)
|
rayosborn/pycal
|
src/pycal/Event.py
|
Python
|
lgpl-3.0
| 51,992
|
[
"VisIt"
] |
32006a622309cf342d2190d8089f37020dcdcc5ce3e1b091c8364b45f9e41f62
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: acl
version_added: "1.4"
short_description: Sets and retrieves file ACL information.
description:
- Sets and retrieves file ACL information.
notes:
- As of Ansible 2.0, this module only supports Linux distributions.
options:
name:
required: true
default: null
description:
- The full path of the file or object.
aliases: ['path']
state:
required: false
default: query
choices: [ 'query', 'present', 'absent' ]
description:
- defines whether the ACL should be present or not. The C(query) state gets the current acl without changing it, for use in 'register' operations.
follow:
required: false
default: yes
choices: [ 'yes', 'no' ]
description:
- whether to follow symlinks on the path if a symlink is encountered.
default:
version_added: "1.5"
required: false
default: no
choices: [ 'yes', 'no' ]
description:
- if the target is a directory, setting this to yes will make it the default acl for entities created inside the directory. It causes an error if name is a file.
entity:
version_added: "1.5"
required: false
description:
- actual user or group that the ACL applies to when matching entity types user or group are selected.
etype:
version_added: "1.5"
required: false
default: null
choices: [ 'user', 'group', 'mask', 'other' ]
description:
- the entity type of the ACL to apply, see setfacl documentation for more info.
permissions:
version_added: "1.5"
required: false
default: null
description:
- Permissions to apply/remove can be any combination of r, w and x (read, write and execute respectively)
entry:
required: false
default: null
description:
- DEPRECATED. The acl to set or remove. This must always be quoted in the form of '<etype>:<qualifier>:<perms>'. The qualifier may be empty for some types, but the type and perms are always requried. '-' can be used as placeholder when you do not care about permissions. This is now superseded by entity, type and permissions fields.
recursive:
version_added: "2.0"
required: false
default: no
choices: [ 'yes', 'no' ]
description:
- Recursively sets the specified ACL (added in Ansible 2.0). Incompatible with C(state=query).
author:
- "Brian Coca (@bcoca)"
- "Jérémie Astori (@astorije)"
notes:
- The "acl" module requires that acls are enabled on the target filesystem and that the setfacl and getfacl binaries are installed.
'''
EXAMPLES = '''
# Grant user Joe read access to a file
- acl: name=/etc/foo.conf entity=joe etype=user permissions="r" state=present
# Removes the acl for Joe on a specific file
- acl: name=/etc/foo.conf entity=joe etype=user state=absent
# Sets default acl for joe on foo.d
- acl: name=/etc/foo.d entity=joe etype=user permissions=rw default=yes state=present
# Same as previous but using entry shorthand
- acl: name=/etc/foo.d entry="default:user:joe:rw-" state=present
# Obtain the acl for a specific file
- acl: name=/etc/foo.conf
register: acl_info
'''
RETURN = '''
acl:
description: Current acl on provided path (after changes, if any)
returned: success
type: list
sample: [ "user::rwx", "group::rwx", "other::rwx" ]
'''
def split_entry(entry):
''' splits entry and ensures normalized return'''
a = entry.split(':')
a.reverse()
if len(a) == 3:
a.append(False)
try:
p, e, t, d = a
except ValueError, e:
print "wtf?? %s => %s" % (entry, a)
raise e
if d:
d = True
if t.startswith("u"):
t = "user"
elif t.startswith("g"):
t = "group"
elif t.startswith("m"):
t = "mask"
elif t.startswith("o"):
t = "other"
else:
t = None
return [d, t, e, p]
def build_entry(etype, entity, permissions=None):
'''Builds and returns an entry string. Does not include the permissions bit if they are not provided.'''
if permissions:
return etype + ':' + entity + ':' + permissions
else:
return etype + ':' + entity
def build_command(module, mode, path, follow, default, recursive, entry=''):
'''Builds and returns agetfacl/setfacl command.'''
if mode == 'set':
cmd = [module.get_bin_path('setfacl', True)]
cmd.append('-m "%s"' % entry)
elif mode == 'rm':
cmd = [module.get_bin_path('setfacl', True)]
cmd.append('-x "%s"' % entry)
else: # mode == 'get'
cmd = [module.get_bin_path('getfacl', True)]
# prevents absolute path warnings and removes headers
cmd.append('--omit-header')
cmd.append('--absolute-names')
if recursive:
cmd.append('--recursive')
if not follow:
cmd.append('-h')
if default:
if(mode == 'rm'):
cmd.append('-k')
else: # mode == 'set' or mode == 'get'
cmd.append('-d')
cmd.append(path)
return cmd
def acl_changed(module, cmd):
'''Returns true if the provided command affects the existing ACLs, false otherwise.'''
cmd = cmd[:] # lists are mutables so cmd would be overriden without this
cmd.insert(1, '--test')
lines = run_acl(module, cmd)
for line in lines:
if not line.endswith('*,*'):
return False
return True
def run_acl(module, cmd, check_rc=True):
try:
(rc, out, err) = module.run_command(' '.join(cmd), check_rc=check_rc)
except Exception, e:
module.fail_json(msg=e.strerror)
lines = out.splitlines()
if lines and not lines[-1].split():
# trim last line only when it is empty
return lines[:-1]
else:
return lines
def main():
if get_platform().lower() != 'linux':
module.fail_json(msg="The acl module is only available for Linux distributions.")
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, aliases=['path'], type='str'),
entry=dict(required=False, type='str'),
entity=dict(required=False, type='str', default=''),
etype=dict(
required=False,
choices=['other', 'user', 'group', 'mask'],
type='str'
),
permissions=dict(required=False, type='str'),
state=dict(
required=False,
default='query',
choices=['query', 'present', 'absent'],
type='str'
),
follow=dict(required=False, type='bool', default=True),
default=dict(required=False, type='bool', default=False),
recursive=dict(required=False, type='bool', default=False),
),
supports_check_mode=True,
)
path = os.path.expanduser(module.params.get('name'))
entry = module.params.get('entry')
entity = module.params.get('entity')
etype = module.params.get('etype')
permissions = module.params.get('permissions')
state = module.params.get('state')
follow = module.params.get('follow')
default = module.params.get('default')
recursive = module.params.get('recursive')
if not os.path.exists(path):
module.fail_json(msg="Path not found or not accessible.")
if state == 'query' and recursive:
module.fail_json(msg="'recursive' MUST NOT be set when 'state=query'.")
if not entry:
if state == 'absent' and permissions:
module.fail_json(msg="'permissions' MUST NOT be set when 'state=absent'.")
if state == 'absent' and not entity:
module.fail_json(msg="'entity' MUST be set when 'state=absent'.")
if state in ['present', 'absent'] and not etype:
module.fail_json(msg="'etype' MUST be set when 'state=%s'." % state)
if entry:
if etype or entity or permissions:
module.fail_json(msg="'entry' MUST NOT be set when 'entity', 'etype' or 'permissions' are set.")
if state == 'present' and entry.count(":") != 3:
module.fail_json(msg="'entry' MUST have 3 sections divided by ':' when 'state=present'.")
if state == 'absent' and entry.count(":") != 2:
module.fail_json(msg="'entry' MUST have 2 sections divided by ':' when 'state=absent'.")
default, etype, entity, permissions = split_entry(entry)
changed = False
msg = ""
if state == 'present':
entry = build_entry(etype, entity, permissions)
command = build_command(
module, 'set', path, follow,
default, recursive, entry
)
changed = acl_changed(module, command)
if changed and not module.check_mode:
run_acl(module, command)
msg = "%s is present" % entry
elif state == 'absent':
entry = build_entry(etype, entity)
command = build_command(
module, 'rm', path, follow,
default, recursive, entry
)
changed = acl_changed(module, command)
if changed and not module.check_mode:
run_acl(module, command, False)
msg = "%s is absent" % entry
elif state == 'query':
msg = "current acl"
acl = run_acl(
module,
build_command(module, 'get', path, follow, default, recursive)
)
module.exit_json(changed=changed, msg=msg, acl=acl)
# import module snippets
from ansible.module_utils.basic import *
main()
|
yannh/ansible-modules-core
|
files/acl.py
|
Python
|
gpl-3.0
| 10,175
|
[
"Brian"
] |
fe50431a562629d7f2fd993e12abaed68494c1d9c4aa2e6776921806e357110b
|
"""Module containing physical constants and `NamedTuple`s to store molecular orbitals, shell, etc.
Index
-----
.. currentmodule:: nanoqm.common
.. autosummary::
DictConfig
change_mol_units
getmass
number_spherical_functions_per_atom
retrieve_hdf5_data
is_data_in_hdf5
store_arrays_in_hdf5
API
---
.. autoclass:: DictConfig
.. autofunction:: is_data_in_hdf5
.. autofunction:: retrieve_hdf5_data
.. autofunction:: number_spherical_functions_per_atom
.. autofunction:: store_arrays_in_hdf5
"""
__all__ = ['DictConfig', 'Matrix', 'Tensor3D', 'Vector',
'change_mol_units', 'getmass', 'h2ev', 'hardness',
'number_spherical_functions_per_atom', 'retrieve_hdf5_data',
'is_data_in_hdf5', 'store_arrays_in_hdf5', 'UniqueSafeLoader']
import os
from itertools import chain, repeat
from pathlib import Path
from typing import (Any, Dict, Iterable, List, Mapping, NamedTuple, Tuple,
Union, overload)
import h5py
import mendeleev
import numpy as np
from scipy.constants import physical_constants
from qmflows.common import AtomXYZ
from qmflows.type_hints import PathLike
from scm.plams import Atom, Molecule
from qmflows.yaml_utils import UniqueSafeLoader
class DictConfig(dict):
"""Class to extend the Dict class with `.` dot notation."""
def __getattr__(self, attr):
"""Extract key using dot notation."""
return self.get(attr)
def __setattr__(self, key, value):
"""Set value using dot notation."""
self.__setitem__(key, value)
def __deepcopy__(self, _):
"""Deepcopy of the Settings object."""
return DictConfig(self.copy())
class BasisFormats(NamedTuple):
"""NamedTuple that contains the name/value for the basis formats."""
name: str
value: List[str]
def concat(xss: Iterable) -> List[Any]:
"""Concatenate of all the elements of a list."""
return list(chain(*xss))
# ================> Constants <================
#: Angstrom to a.u
angs2au = 1e-10 / physical_constants['atomic unit of length'][0]
#: from femtoseconds to au
femtosec2au = 1e-15 / physical_constants['atomic unit of time'][0]
#: hartrees to electronvolts
h2ev = physical_constants['Hartree energy in eV'][0]
#: conversion from rydberg to meV
r2meV = 1e3 * physical_constants['Rydberg constant times hc in eV'][0]
#: conversion from fs to cm-1
fs_to_cm = 1e13 * physical_constants['hertz-inverse meter relationship'][0]
#: conversion from fs to nm
fs_to_nm = 299.79246
#: planck constant in eV * fs
hbar = 1e15 * physical_constants['Planck constant over 2 pi in eV s'][0]
# type hints
MolXYZ = List[AtomXYZ]
Vector = np.ndarray
Matrix = np.ndarray
Tensor3D = np.ndarray
def path_to_posix(path: Union[str, Path]) -> str:
"""Convert a Path to posix string."""
if isinstance(path, Path):
return path.absolute().as_posix()
else:
return path
def getmass(s: str) -> int:
"""Get the atomic mass for a given element s."""
element = mendeleev.element(s.capitalize())
return element.mass_number
def hardness(s: str) -> float:
"""Get the element hardness."""
d = {
'h': 6.4299, 'he': 12.5449, 'li': 2.3746, 'be': 3.4968, 'b': 4.619, 'c': 5.7410,
'n': 6.8624, 'o': 7.9854, 'f': 9.1065, 'ne': 10.2303, 'na': 2.4441, 'mg': 3.0146,
'al': 3.5849, 'si': 4.1551, 'p': 4.7258, 's': 5.2960, 'cl': 5.8662, 'ar': 6.4366,
'k': 2.3273, 'ca': 2.7587, 'sc': 2.8582, 'ti': 2.9578, 'v': 3.0573, 'cr': 3.1567,
'mn': 3.2564, 'fe': 3.3559, 'co': 3.4556, 'ni': 3.555, 'cu': 3.6544, 'zn': 3.7542,
'ga': 4.1855, 'ge': 4.6166, 'as': 5.0662, 'se': 5.4795, 'br': 5.9111, 'kr': 6.3418,
'rb': 2.1204, 'sr': 2.5374, 'y': 2.6335, 'zr': 2.7297, 'nb': 2.8260, 'mo': 2.9221,
'tc': 3.0184, 'ru': 3.1146, 'rh': 3.2107, 'pd': 3.3069, 'ag': 3.4032, 'cd': 3.4994,
'in': 3.9164, 'sn': 4.3332, 'sb': 4.7501, 'te': 5.167, 'i': 5.5839, 'xe': 6.0009,
'cs': 0.6829, 'ba': 0.9201, 'la': 1.1571, 'ce': 1.3943, 'pr': 1.6315, 'nd': 1.8686,
'pm': 2.1056, 'sm': 2.3427, 'eu': 2.5798, 'gd': 2.8170, 'tb': 3.0540, 'dy': 3.2912,
'ho': 3.5283, 'er': 3.7655, 'tm': 4.0026, 'yb': 4.2395, 'lu': 4.4766, 'hf': 4.7065,
'ta': 4.9508, 'w': 5.1879, 're': 5.4256, 'os': 5.6619, 'ir': 5.900, 'pt': 6.1367,
'au': 6.3741, 'hg': 6.6103, 'tl': 1.7043, 'pb': 1.9435, 'bi': 2.1785, 'po': 2.4158,
'at': 2.6528, 'rn': 2.8899, 'fr': 0.9882, 'ra': 1.2819, 'ac': 1.3497, 'th': 1.4175,
'pa': 1.9368, 'u': 2.2305, 'np': 2.5241, 'pu': 3.0436, 'am': 3.4169, 'cm': 3.4050,
'bk': 3.9244, 'cf': 4.2181, 'es': 4.5116, 'fm': 4.8051, 'md': 5.0100, 'no': 5.3926,
'lr': 5.4607}
return d[s] / 27.211
def xc(s: str) -> Dict[str, Any]:
"""Return the exchange functional composition."""
d = {
'pbe': {
'type': 'pure', 'alpha1': 1.42, 'alpha2': 0.48, 'ax': 0, 'beta1': 0.2, 'beta2': 1.83},
'blyp': {
'type': 'pure', 'alpha1': 1.42, 'alpha2': 0.48, 'ax': 0, 'beta1': 0.2, 'beta2': 1.83},
'bp': {
'type': 'pure', 'alpha1': 1.42, 'alpha2': 0.48, 'ax': 0, 'beta1': 0.2, 'beta2': 1.83},
'pbe0': {
'type': 'hybrid', 'alpha1': 1.42, 'alpha2': 0.48, 'ax': 0.25, 'beta1': 0.2, 'beta2': 1.83},
'b3lyp': {
'type': 'hybrid', 'alpha1': 1.42, 'alpha2': 0.48, 'ax': 0.20, 'beta1': 0.2, 'beta2': 1.83},
'bhlyp': {
'type': 'hybrid', 'alpha1': 1.42, 'alpha2': 0.48, 'ax': 0.50, 'beta1': 0.2, 'beta2': 1.83},
'cam-b3lyp': {
'type': 'rhs', 'alpha1': 1.86, 'alpha2': 0.00, 'ax': 0.38, 'beta1': 0.90, 'beta2': 0},
'lc-blyp': {
'type': 'rhs', 'alpha1': 8.0, 'alpha2': 0.00, 'ax': 0.53, 'beta1': 4.50, 'beta2': 0},
'wb97': {
'type': 'rhs', 'alpha1': 8.0, 'alpha2': 0.00, 'ax': 0.61, 'beta1': 4.41, 'beta2': 0.0}}
return d[s]
@overload
def retrieve_hdf5_data(path_hdf5: Union[str, Path], paths_to_prop: str) -> np.ndarray:
...
@overload
def retrieve_hdf5_data(path_hdf5: Union[str, Path], paths_to_prop: List[str]) -> List[np.ndarray]:
...
def retrieve_hdf5_data(path_hdf5, paths_to_prop):
"""Read Numerical properties from ``paths_hdf5``.
Parameters
----------
path_hdf5
path to the HDF5
path_to_prop
str or list of str to data
Returns
-------
np.ndarray
array or list of array
Raises
------
RuntimeError
The property has not been found
"""
path_hdf5 = path_to_posix(path_hdf5)
try:
with h5py.File(path_hdf5, 'r') as f5:
if isinstance(paths_to_prop, list):
return [f5[path][()] for path in paths_to_prop]
else:
return f5[paths_to_prop][()]
except KeyError:
msg = f"There is not {paths_to_prop} stored in the HDF5\n"
raise KeyError(msg)
except FileNotFoundError:
msg = "there is not HDF5 file containing the numerical results"
raise RuntimeError(msg)
def is_data_in_hdf5(path_hdf5: Union[str, Path], xs: Union[str, List[str]]) -> bool:
"""Search if the node exists in the HDF5 file.
Parameters
----------
path_hdf5
path to the HDF5
xs
either Node path or a list of paths to the stored data
Returns
-------
bool
Whether the data is stored
"""
path_hdf5 = path_to_posix(path_hdf5)
if os.path.exists(path_hdf5):
with h5py.File(path_hdf5, 'r+') as f5:
if isinstance(xs, list):
return all(path in f5 for path in xs)
else:
return xs in f5
else:
return False
@overload
def store_arrays_in_hdf5(
path_hdf5: PathLike, paths: str, tensor: np.ndarray,
dtype: float = np.float32, attribute: Union[BasisFormats, None] = None) -> None:
...
@overload
def store_arrays_in_hdf5(
path_hdf5: PathLike, paths: List[str], tensor: np.ndarray,
dtype: float = np.float32, attribute: Union[BasisFormats, None] = None) -> None:
...
def store_arrays_in_hdf5(
path_hdf5, paths, tensor, dtype=np.float32, attribute=None):
"""Store a tensor in the HDF5.
Parameters
----------
path_hdf5
path to the HDF5
paths
str or list of nodes where the data is going to be stored
tensor
Numpy array or list of array to store
dtype
Data type use to store the numerical array
attribute
Attribute associated with the tensor
"""
path_hdf5 = path_to_posix(path_hdf5)
def add_attribute(data_set, k: int = 0):
if attribute is not None:
data_set.attrs[attribute.name] = attribute.value[k]
with h5py.File(path_hdf5, 'r+') as f5:
if isinstance(paths, list):
for k, path in enumerate(paths):
data = tensor[k]
dset = f5.require_dataset(path, shape=np.shape(data),
data=data, dtype=dtype)
add_attribute(dset, k)
else:
dset = f5.require_dataset(paths, shape=np.shape(
tensor), data=tensor, dtype=dtype)
add_attribute(dset)
def change_mol_units(mol: List[AtomXYZ], factor: float = angs2au) -> List[AtomXYZ]:
"""Change the units of the molecular coordinates."""
new_molecule = []
for atom in mol:
coord = tuple(map(lambda x: x * factor, atom.xyz))
new_molecule.append(AtomXYZ(atom.symbol, coord))
return new_molecule
def tuplesXYZ_to_plams(xs: List[AtomXYZ]) -> Molecule:
"""Transform a list of namedTuples to a Plams molecule."""
plams_mol = Molecule()
for at in xs:
symb = at.symbol
cs = at.xyz
plams_mol.add_atom(Atom(symbol=symb, coords=tuple(cs)))
return plams_mol
def number_spherical_functions_per_atom(
mol: List[AtomXYZ], package_name: str, basis_name: str, path_hdf5: PathLike) -> np.ndarray:
"""Compute the number of spherical shells per atom."""
with h5py.File(path_hdf5, 'r') as f5:
xs = [f5[f'{package_name}/basis/{atom[0]}/{basis_name}/coefficients']
for atom in mol]
ys = [calc_orbital_Slabels(
read_basis_format(path.attrs['basisFormat'])) for path in xs]
return np.stack([sum(len(x) for x in ys[i]) for i in range(len(mol))])
@overload
def calc_orbital_Slabels(fss: List[int]) -> List[Tuple[str, ...]]:
...
@overload
def calc_orbital_Slabels(fss: List[List[int]]) -> List[Tuple[str, ...]]:
...
def calc_orbital_Slabels(fss):
"""Compute the spherical CGFs for a given basis set.
Most quantum packages use standard basis set which contraction is
presented usually by a format like:
c def2-SV(P)
# c (7s4p1d) / [3s2p1d] {511/31/1}
this mean that this basis set for the Carbon atom uses 7 ``s`` CGF,
4 ``p`` CGF and 1 ``d`` CGFs that are contracted in 3 groups of 5-1-1
``s`` functions, 3-1 ``p`` functions and 1 ``d`` function. Therefore
the basis set format can be represented by [[5,1,1], [3,1], [1]].
On the other hand Cp2k uses a special basis set ``MOLOPT`` which
format explanation can be found at: `C2pk
<https://github.com/cp2k/cp2k/blob/e392d1509d7623f3ebb6b451dab00d1dceb9a248/cp2k/data/BASIS_MOLOPT>`_.
Parameters
----------
name
Quantum package name
fss
Format basis set
Returns
-------
list
containing tuples with the spherical CGFs
"""
angular_momentum = ['s', 'p', 'd', 'f', 'g']
return concat([funSlabels(dict_cp2k_order_sphericals, label, fs)
for label, fs in zip(angular_momentum, fss)])
@overload
def funSlabels(d: Mapping[str, Tuple[str, ...]], label: str, fs: int) -> List[Tuple[str, ...]]:
...
@overload
def funSlabels(d: Mapping[str, Tuple[str, ...]], label: str, fs: List[int]) -> List[Tuple[str, ...]]:
...
def funSlabels(data, label, fs):
"""Search for the spherical functions for each orbital type `label`."""
if isinstance(fs, list):
fs = sum(fs)
labels = repeat(data[label], fs)
return labels
def read_basis_format(basis_format: str) -> List[int]:
"""Read the basis set using the specified format."""
s = basis_format.replace('[', '').split(']')[0]
fss = list(map(int, s.split(',')))
fss = fss[4:] # cp2k coefficient formats start in column 5
return fss
#: Ordering of the Spherical shells
dict_cp2k_order_sphericals: Mapping[str, Tuple[str, ...]] = {
's': ('s',),
'p': ('py', 'pz', 'px'),
'd': ('d-2', 'd-1', 'd0', 'd+1', 'd+2'),
'f': ('f-3', 'f-2', 'f-1', 'f0', 'f+1', 'f+2', 'f+3')
}
def read_cell_parameters_as_array(file_cell_parameters: PathLike) -> Tuple[str, np.ndarray]:
"""Read the cell parameters as a numpy array."""
arr = np.loadtxt(file_cell_parameters, skiprows=1)
with open(file_cell_parameters, 'r') as f:
header = f.readline()
return header, arr
|
SCM-NV/qmworks-namd
|
nanoqm/common.py
|
Python
|
mit
| 13,045
|
[
"CP2K"
] |
58ee3b28729a9b4c28590bb3f815d51b62caf83abd7a3a5f72d7f5872550a86e
|
#
# Copyright (C) 2000 greg Landrum
#
""" unit testing code for cross validation """
from __future__ import print_function
import os
import unittest
from rdkit import RDConfig
from rdkit import RDRandom
from rdkit.ML.DecTree import CrossValidate
from rdkit.ML.DecTree import randomtest
from rdkit.TestRunner import redirect_stdout
from rdkit.six import BytesIO, StringIO
from rdkit.six.moves import cPickle
class XValTestCase(unittest.TestCase):
def setUp(self):
self.origTreeName = RDConfig.RDCodeDir + '/ML/DecTree/test_data/XValTree.pkl'
self.randomSeed = 23
self.randomArraySeed = (23, 42)
def testRun(self):
# " test that the CrossValidationDriver runs "
examples, attrs, nPossibleVals = randomtest.GenRandomExamples(nExamples=200)
f = StringIO()
with redirect_stdout(f):
tree, frac = CrossValidate.CrossValidationDriver(examples, attrs, nPossibleVals, silent=False)
self.assertGreater(frac, 0)
self.assertEqual('Var: 1', tree.GetName())
self.assertIn('Validation error', f.getvalue())
CrossValidate.CrossValidationDriver(examples, attrs, nPossibleVals, lessGreedy=True,
calcTotalError=True, silent=True)
def testResults(self):
# " test the results of CrossValidation "
RDRandom.seed(self.randomSeed)
examples, attrs, nPossibleVals = randomtest.GenRandomExamples(nExamples=200,
seed=self.randomArraySeed)
tree, frac = CrossValidate.CrossValidationDriver(examples, attrs, nPossibleVals, silent=1)
self.assertGreater(frac, 0)
with open(self.origTreeName, 'r') as inTFile:
buf = inTFile.read().replace('\r\n', '\n').encode('utf-8')
inTFile.close()
inFile = BytesIO(buf)
oTree = cPickle.load(inFile)
assert oTree == tree, 'Random CrossValidation test failed'
def testReplacementSelection(self):
# " use selection with replacement "
RDRandom.seed(self.randomSeed)
examples, attrs, nPossibleVals = randomtest.GenRandomExamples(nExamples=200,
seed=self.randomArraySeed)
tree, frac = CrossValidate.CrossValidationDriver(examples, attrs, nPossibleVals, silent=1,
replacementSelection=1)
self.assertTrue(tree)
self.assertAlmostEqual(frac, 0.01666, 4)
def test_TestRun(self):
try:
f = StringIO()
with redirect_stdout(f):
CrossValidate.TestRun()
self.assertTrue(os.path.isfile('save.pkl'))
s = f.getvalue()
self.assertIn('t1 == t2 True', s)
finally:
if os.path.isfile('save.pkl'):
os.remove('save.pkl')
if __name__ == '__main__': # pragma: nocover
unittest.main()
|
rvianello/rdkit
|
rdkit/ML/DecTree/UnitTestXVal.py
|
Python
|
bsd-3-clause
| 2,793
|
[
"RDKit"
] |
f5324324af795008ff42df2cef4d709f3ab3840ecd56bb38a007055c78d237b2
|
#!/usr/bin/env python
#
# $File: utils.py $
# $LastChangedDate$
# $Rev$
#
# This file is part of simuPOP, a forward-time population genetics
# simulation environment. Please visit http://simupop.sourceforge.net
# for details.
#
# Copyright (C) 2004 - 2010 Bo Peng (bpeng@mdanderson.org)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
simuPOP utilities.
This module provides some commonly used operators
and format conversion utilities.
"""
__all__ = [
'viewVars',
'migrIslandRates',
'migrHierarchicalIslandRates',
'migrSteppingStoneRates',
'saveCSV',
'Exporter',
'export',
'importPopulation',
'ProgressBar',
'Trajectory',
'TrajectorySimulator',
'simulateBackwardTrajectory',
'simulateForwardTrajectory',
]
import sys
import time
from simuOpt import simuOptions
from simuPOP import moduleInfo, MALE, FEMALE, Population, PointMutator, getRNG,\
ALL_AVAIL, PyOperator, stat
import collections
def viewVars(var, gui=None):
'''
list a variable in tree format, either in text format or in a
wxPython window.
var
A dictionary variable to be viewed. Dictionary wrapper objects returned
by ``Population.dvars()`` and ``Simulator.dvars()`` are also acceptable.
gui
If gui is ``False`` or ``'Tkinter'``, a text presentation (use the
pprint module) of the variable will be printed to the screen. If gui is
``'wxPython'`` and wxPython is available, a wxPython windows will be
used. The default mode is determined by the global gui mode (see also
``simuOpt.setOptions``).
'''
if gui is None:
gui = simuOptions['GUI']
#
if gui in [False, 'batch', 'interactive', 'Tkinter']:
import pprint
try:
# a dvars() object
pprint.pprint(var.__dict__)
except:
pprint.pprint(var)
return
try:
import wx, wx.py.filling as fill
except ImportError:
import pprint
pprint.pprint(var)
return
app = wx.PySimpleApp()
wx.InitAllImageHandlers()
if var==None:
fillFrame = fill.FillingFrame()
else:
try:
# a dvars() object?
fillFrame = fill.FillingFrame(rootObject=var.__dict__,
rootLabel='var')
except:
fillFrame = fill.FillingFrame(rootObject=var,
rootLabel='var')
fillFrame.Show(True)
app.MainLoop()
# migration rate matrix generators
def migrIslandRates(r, n):
'''migration rate matrix
::
x m/(n-1) m/(n-1) ....
m/(n-1) x ............
.....
.... m/(n-1) m/(n-1) x
where x = 1-m
'''
# n==1?
if n == 1:
return [[1]]
#
m = []
for i in range(0,n):
m.append([r/(n-1.)]*n)
m[-1][i] = 1-r
return m
def migrHierarchicalIslandRates(r1, r2, n):
'''
Return the migration rate matrix for a hierarchical island model
where there are different migration rate within and across groups
of islands.
r1
Within group migration rates. It can be a number or a list of numbers
for each group of the islands.
r2
Across group migration rates which is the probability that someone will
migrate to a subpopulation outside of his group. A list of r2 could be
specified for each group of the islands.
n
Number of islands in each group. E.g. n=[5, 4] specifies two groups of
islands with 5 and 4 islands each.
For individuals in an island, the probability that it remains in the same
island is 1-r1-r2 (r1, r2 might vary by island groups), that it migrates
to another island in the same group is r1 and migrates to another island
outside of the group is r2. migrate rate to a specific island depends on
the size of group.
'''
if type(n) not in [type(()), type([])]:
raise ValueError('A list of size of island groups is expected for parameter n')
nIslands = sum(n)
if type(r1) in [type(0), type(1.)]:
r1 = [r1] * len(n)
elif len(r1) != len(n):
raise ValueError('If multiple r1 is given, it should be given to all island groups.')
#
if type(r2) in [type(0), type(1.)]:
r2 = [r2] * len(n)
elif len(r2) != len(n):
raise ValueError('If multiple r2 is given, it should be given to all island groups.')
#
m = []
for groupIdx, groupSize in enumerate(n):
nOther = nIslands - groupSize
groupStart = sum(n[:groupIdx])
groupEnd = groupStart + groupSize
for island in range(groupStart, groupEnd):
m.append([])
for i in range(groupStart):
m[-1].append(r2[groupIdx] * 1.0 / nOther)
for i in range(groupStart, groupEnd):
if i == island:
m[-1].append(1 - r1[groupIdx] - r2[groupIdx])
else:
m[-1].append(r1[groupIdx] * 1.0 / groupSize)
for i in range(groupEnd, nIslands):
m[-1].append(r2[groupIdx] * 1.0 / nOther)
return m
def migrSteppingStoneRates(r, n, circular=False):
'''migration rate matrix for circular stepping stone model (X=1-m)
::
X m/2 m/2
m/2 X m/2 0
0 m/2 x m/2 ......0
...
m/2 0 .... m/2 X
or non-circular
::
X m/2 m/2
m/2 X m/2 0
0 m/2 X m/2 ......0
...
... m X
This function returns [[1]] when there is only one subpopulation.
'''
if n < 2:
return [[1]]
elif n == 2:
return [[1-r,r],[r,1-r]]
# the normal case (n>2)
m = []
for i in range(0, n):
m.append([0]*n)
m[i][i] = 1-r
m[i][(i+1)%n] = r/2.
m[i][(i+n-1)%n] = r/2.
if not circular:
m[0][1] = r
m[0][-1] = 0
m[n-1][0] = 0
m[n-1][n-2] = r
return m
def saveCSV(pop, filename='', infoFields=[], loci=ALL_AVAIL, header=True,
subPops=ALL_AVAIL, genoFormatter=None, infoFormatter=None,
sexFormatter={MALE: 'M', FEMALE: 'F'},
affectionFormatter={True: 'A', False: 'U'}, sep=', ', **kwargs):
'''This function is deprecated. Please use ``export(format='csv')`` instead.
Save a simuPOP population ``pop`` in csv format. Columns of this
file is arranged in the order of information fields (``infoFields``),
sex (if ``sexFormatter`` is not ``None``), affection status (if
``affectionFormatter`` is not ``None``), and genotype (if ``genoFormatter`` is
not ``None``). This function only output individuals in the present
generation of population ``pop``. This function accepts the following
parameters:
pop
A simuPOP population object.
filename
Output filename. Leading '>' characters are ignored. However, if the first
character of this filename is '!', the rest of the name will be evalulated
in the population's local namespace. If ``filename`` is empty, the content
will be written to the standard output.
infoFileds
Information fields to be outputted. Default to none.
loci
If a list of loci is given, only genotype at these loci will be
written. Default to ``ALL_AVAIL``, meaning all available loci. You can
set this parameter to ``[]`` if you do not want to output any genotype.
header
Whether or not a header should be written. These headers will include
information fields, sex (if ``sexFormatter`` is not ``None``), affection
status (if ``affectionFormatter`` is not ``None``) and loci names. If
genotype at a locus needs more than one column, ``_1``, ``_2`` etc will
be appended to loci names. Alternatively, a complete header (a string)
or a list of column names could be specified directly.
subPops
A list of (virtual) subpopulations. If specified, only individuals
from these subpopulations will be outputed.
infoFormatter
A format string that is used to format all information fields. If
unspecified, ``str(value)`` will be used for each information field.
genoFormatter
How to output genotype at specified loci. Acceptable values include
``None`` (output allele names), a dictionary with genotype as keys,
(e.g. ``genoFormatter={(0,0):1, (0,1):2, (1,0):2, (1,1):3}``, or a function
with genotype (as a tuple of integers) as inputs. The dictionary value
or the return value of this function can be a single or a list of
number or strings.
sexFormatter
How to output individual sex. Acceptable values include ``None`` (no
output) or a dictionary with keys ``MALE`` and ``FEMALE``.
affectionFormatter
How to output individual affection status. Acceptable values include
``None`` (no output) or a dictionary with keys ``True`` and ``False``.
Parameters ``genoCode``, ``sexCode``, and ``affectionCode`` from version
1.0.0 have been renamed to ``genoFormatter``, ``sexFormatter`` and
``affectionFormatter`` but can still be used.
'''
if moduleInfo()['debug']['DBG_COMPATIBILITY']:
print('WARNING: Function saveCSV is deprecated. Use export(format="csv") instead.', file=sys.stderr)
# handle obsolete parameters affectionCode, sexCode and genoCode
if 'genoCode' in kwargs:
if moduleInfo()['debug']['DBG_COMPATIBILITY']:
print('WARNING: Parameter genoCode is obsolete. Use genoFormatter instead.', file=sys.stderr)
genoFormatter = kwargs['genoCode']
if 'sexCode' in kwargs:
if moduleInfo()['debug']['DBG_COMPATIBILITY']:
print('WARNING: Parameter sexCode is obsolete. Use sexFormatter instead.', file=sys.stderr)
sexFormatter = kwargs['sexCode']
if 'affectionCode' in kwargs:
if moduleInfo()['debug']['DBG_COMPATIBILITY']:
print('WARNING: Parameter genoCode is obsolete. Use sexFormatter instead.', file=sys.stderr)
affectionFormatter = kwargs['affectionCode']
for key in list(kwargs.keys()):
if key not in ('genoCode', 'sexCode', 'affectionCode'):
raise ValueError("Unrecognized keyword parameter %s" % key)
# parameter pop
if not isinstance(pop, Population):
raise ValueError("Passed population should either be a population object")
# parameter loci
if loci is ALL_AVAIL:
loci = list(range(0, pop.totNumLoci()))
elif type(loci) == type(1):
loci = [loci]
if not type(loci) in [type([]) or type(())]:
raise ValueError("Passed loci should be ALL_AVAIL or a list of loci.")
# parameter infoFields (allow single input)
if type(infoFields) == type(''):
infoFields = [infoFields]
# parameter filename
if filename.startswith('!'):
filename = str(pop.evalulate(filename[1:]))
if filename.startswith('>'):
filename = filename.lstrip('>')
#
try:
if filename:
out = open(filename, "w")
else:
out = sys.stdout
except IOError:
raise IOError("Can not open file " + filename +" to write.")
# parameter subPops
if subPops is ALL_AVAIL:
subPops = list(range(pop.numSubPop()))
#
# figure out columns per genotype
ploidy = pop.ploidy()
colPerGenotype = 0
if len(loci) > 0 and pop.totNumLoci() > 0 and pop.popSize() > 0:
if genoFormatter is None:
value = [0]*ploidy
elif isinstance(genoFormatter, dict):
if len(genoFormatter) == 0:
raise ValueError("genoFormatter cannot be empty")
value = list(genoFormatter.values())[0]
else:
if not isinstance(genoFormatter, collections.Callable):
raise ValueError("genoFormatter should be a None, a dictionary or a callable function")
value = genoFormatter(tuple([pop.individual(0).allele(0, p) for p in range(ploidy)]))
try:
if type(value) == type(''):
colPerGenotype = 1
else: # a sequece?
colPerGenotype = len(value)
except:
colPerGenotype = 1
# header
if header is True:
names = [x for x in infoFields]
if sexFormatter is not None:
names.append('sex')
if affectionFormatter is not None:
names.append('aff')
if colPerGenotype == 1:
names.extend([pop.locusName(loc) for loc in loci])
elif colPerGenotype > 1:
for loc in loci:
names.extend(['%s_%d' % (pop.locusName(loc), x+1) for x in range(colPerGenotype)])
# output header
print(sep.join(names), file=out)
elif type(header) == type(''):
print(header, file=out)
elif type(header) in [type(()), type([])]:
print(sep.join(header), file=out)
for subPop in subPops:
for ind in pop.individuals(subPop):
# information fields
if infoFormatter is None:
values = [str(ind.info(x)) for x in infoFields]
elif type(infoFormatter) == type(''):
values = [infoFormatter % tuple([ind.info(x) for x in infoFields])]
else:
raise ValueError('Parameter infoFormatter can only be None or a format string.')
# sex
if sexFormatter is not None:
values.append(str(sexFormatter[ind.sex()]))
# affection status
if affectionFormatter is not None:
values.append(str(affectionFormatter[ind.affected()]))
# genotype
for loc in loci:
if genoFormatter is None:
values.extend([ind.alleleChar(loc, p) for p in range(ploidy)])
else:
genotype = [ind.allele(loc, p) for p in range(ploidy)]
if isinstance(genoFormatter, dict):
code = genoFormatter[tuple(genotype)]
else:
code = genoFormatter(genotype)
if type(code) in [type([]), type(())]:
values.extend(['%s' % x for x in code])
else:
values.append(str(code))
# output
print(sep.join(values), file=out)
# clode output
if filename:
out.close()
class _baseProgressBar:
def __init__(self, message, totalCount):
'''
message
Title of the progress bar
totalCount
Total expected steps.
done
Message displayed when the job is finished.
'''
self.message = message
self.totalCount = totalCount
self.count = 0
self.percent = 0
self.completed = False
def update(self, count=None):
'''
Update the progress bar with ``count`` progress. If ``count`` is ``None``,
it updates by 1 count (not percent).
'''
if count is None:
self.count += 1
else:
self.count = min(count, self.totalCount)
self.progress = int(round(100*self.count/self.totalCount))
if self.progress <= self.percent:
return False
else:
return True
def done(self):
'''
Finish progressbar, print 'done' message.
'''
if self.completed:
return False
else:
self.completed = True
return True
class _textProgressBar(_baseProgressBar):
def __init__(self, message, totalCount, progressChar='.', block=2, done=' Done.\n'):
'''
message
Title of the progress bar
totalCount
Total expected steps.
progressChar
Character to be displayed for each progress.
block
display progress at which interval (in terms of percentage)?
done
Message displayed when the job is finished.
'''
_baseProgressBar.__init__(self, message, totalCount)
self.percent = 0
self.progressChar = progressChar
self.block = block
self.doneMsg = done
sys.stdout.write(message)
sys.stdout.flush()
def update(self, count):
''' Update the progress bar.'''
if not _baseProgressBar.update(self, count):
return
for p in range(self.percent + 1, self.progress + 1):
if p == 100:
self.done()
elif p % 10 == 0:
sys.stdout.write(str(p//10))
elif p % self.block == 0:
sys.stdout.write(self.progressChar)
sys.stdout.flush()
self.percent = self.progress
if self.percent == 100:
self.done()
def done(self):
'''
Finish progressbar, print 'done' message.
'''
if not _baseProgressBar.done(self):
return
sys.stdout.write(self.doneMsg)
sys.stdout.flush()
class _tkProgressBar(_baseProgressBar):
def __init__(self, message, totalCount):
'''
totalCount
Total expected steps.
progressChar
Character to be displayed for each progress.
block
display progress at which interval (in terms of percentage)?
done
Message displayed when the job is finished.
'''
_baseProgressBar.__init__(self, message, totalCount)
import tkinter as tk
self.width = 300
self.height = 30
self.max = 100
self.fillColor = 'blue'
self.labelColor = 'black'
self.label = 'Progress'
#
self.app = tk.Tk()
self.app.title(self.label)
self.frame = tk.Frame(self.app, bd=0)
self.canvas = tk.Canvas(self.frame, bd=0, width=self.width+40,
height = self.height + 70, highlightthickness=0)
self.label = self.canvas.create_text(20, 20,
text='', anchor="w", fill=self.labelColor, font=('Verdana', 10))
self.scale = self.canvas.create_rectangle(
20, 50, self.width + 20, 50 + self.height, fill=self.fillColor)
self.rect = self.canvas.create_rectangle(
20, 50, self.width + 20, 50 + self.height)
self.canvas.pack(side='top', fill='x', expand='yes', padx=0)
self.update(0)
self.frame.pack(padx=0, pady=0)
def update(self, count):
'''Update the progress bar.'''
if not _baseProgressBar.update(self, count):
return
#
self.canvas.coords(self.scale, 20, 50,
20 + self.progress * 1.0 / self.max * self.width, 50 + self.height)
# Now update the colors
self.canvas.itemconfig(self.scale, fill=self.fillColor)
self.canvas.itemconfig(self.label, fill=self.labelColor)
# And update the label
if self.progress > 0:
self.canvas.itemconfig(self.label, text=self.message + "\n%d%% completed." % self.progress)
else:
self.canvas.itemconfig(self.label, text=self.message)
self.canvas.update_idletasks()
self.app.update()
#
self.percent = self.progress
if self.percent == 100:
self.done()
def done(self):
'''
Finish progressbar, print 'done' message.
'''
if not _baseProgressBar.done(self):
return
self.app.destroy()
del self.app
class _wxProgressBar(_baseProgressBar):
def __init__(self, message, totalCount):
'''
totalCount
Total expected steps.
progressChar
Character to be displayed for each progress.
block
display progress at which interval (in terms of percentage)?
done
Message displayed when the job is finished.
'''
_baseProgressBar.__init__(self, message, totalCount)
import wx
self.app = wx.PySimpleApp(0)
self.dialog = wx.ProgressDialog(
'Progress', self.message + '\n', self.totalCount,
style = \
# wx.PD_CAN_ABORT | \
# wx.PD_CAN_SKIP | \
wx.PD_ELAPSED_TIME | \
# wx.PD_ESTIMATED_TIME | \
wx.PD_AUTO_HIDE | \
wx.PD_REMAINING_TIME
)
self.dialog.Update(0)
def update(self, count):
'''Update the progreebar.'''
if not _baseProgressBar.update(self, count):
return
self.dialog.Update(self.count, self.message + "\n%d%% completed." % self.progress)
self.percent = self.progress
if self.percent == 100:
self.done()
def done(self):
'''
Finish progressbar, print 'done' message.
'''
if not _baseProgressBar.done(self):
return
self.dialog.Destroy()
del self.app
class ProgressBar:
'''The ``ProgressBar`` class defines a progress bar. This class will use a
text-based progress bar that outputs progressing dots (.) with intermediate
numbers (e.g. 5 for 50%) under a non-GUI mode (``gui=False``) or not displaying
any progress bar if ``gui='batch'``. In the GUI mode, a Tkinter or wxPython
progress dialog will be used (``gui=Tkinter`` or ``gui=wxPython``). The default
mode is determined by the global gui mode of simuPOP
(see also ``simuOpt.setOptions``).
This class is usually used as follows::
progress = ProgressBar("Start simulation", 500)
for i in range(500):
# i+1 can be ignored if the progress bar is updated by 1 step
progress.update(i+1)
# if you would like to make sure the done message is displayed.
progress.done()
'''
def __init__(self, message, totalCount, progressChar='.', block=2, done=' Done.\n', gui=None):
'''Create a progress bar with ``message``, which will be the title of
a progress dialog or a message for textbased progress bar. Parameter
``totalCount`` specifies total expected steps. If a text-based progress
bar is used, you could specified progress character and intervals at
which progresses will be displayed using parameters ``progressChar``
and ``block``. A ending message will also be displayed in text mode.
'''
if gui is None:
self.gui = simuOptions['GUI']
else:
self.gui = gui
if self.gui == 'batch':
self.update = lambda count=None: None
self.done = lambda : None
return
if self.gui in ['wxPython', True]:
try:
import wx
self.gui = 'wxPython'
except ImportError:
self.gui = 'Tkinter'
if self.gui == 'Tkinter':
try:
import tkinter
except ImportError:
self.gui = False
if self.gui == 'wxPython':
self.progressBar = _wxProgressBar(message, totalCount)
elif self.gui == 'Tkinter':
self.progressBar = _tkProgressBar(message, totalCount)
else:
self.progressBar = _textProgressBar(message, totalCount, progressChar, block, done)
def update(self, count=None):
'''
Update the progreebar with ``count`` steps done. The dialog or textbar
may not be updated if it is updated by full percent(s). If ``count`` is
``None``, the progressbar increases by one step (not percent).
'''
self.progressBar.update(count)
def done(self):
'''
Finish progressbar, print 'done' message if in text-mode.
'''
self.progressBar.done()
class Trajectory:
'''A ``Trajectory`` object contains frequencies of one or more loci in one
or more subpopulations over several generations. It is usually returned by
member functions of class ``TrajectorySimulator`` or equivalent global
functions ``simulateForwardTrajectory`` and ``simulateBackwardTrajectory``.
The ``Trajectory`` object provides several member functions to facilitate
the use of Trajectory-simulation techiniques. For example,
``Trajectory.func()`` returns a trajectory function that can be provided
directly to a ``ControlledOffspringGenerator``; ``Trajectory.mutators()``
provides a list of ``PointMutator`` that insert mutants at the right
generations to initialize a trajectory.
For more information about Trajectory simulation techniques and related
controlled random mating scheme, please refer to the simuPOP user's guide,
and Peng et al (PLoS Genetics 3(3), 2007).
'''
def __init__(self, endGen, nLoci):
'''Create a ``Trajectory`` object of alleles at *nLoci* loci with
ending generation *endGen*. *endGen* is the generation when expected
allele frequencies are reached after mating. Therefore, a trajectory
for 1000 generations should have ``endGen=999``.
'''
# self.traj stores a list of frequencies for each loci.
# at each generation, the frequencies are saved as
# [[loc0_sp0, loc1_sp0], [loc0_sp1, loc1_sp1], ]
# and so on. That is to say, the frequencies should be accessed as
# self.traj[gen][sp][loc]
self.traj = {}
self.endGen = endGen
self.nLoci = nLoci
def _beginGen(self):
'''Return starting generation of all trajectories'''
return min(self.traj.keys())
def _freq(self, gen):
'''Return frequencies at all subpopulations at generation *gen*.'''
if gen not in self.traj:
# assuming no subpopulations
return [[0.] * self.nLoci]
assert len(self.traj[gen][0]) == self.nLoci
return self.traj[gen]
def freq(self, gen, subPop):
'''Return frequencies of all loci in subpopulation *subPop* at
generation *gen* of the simulated Trajectory. Allele frequencies are
assumed to be zero if *gen* is out of range of the simulated
Trajectory.
'''
if gen not in self.traj:
# assuming no subpopulations
return [0.] * self.nLoci
assert len(self.traj[gen][subPop]) == self.nLoci
return self.traj[gen][subPop]
def func(self):
'''Return a Python function that returns allele frequencies for each
locus at specified loci. If there are multiple subpopulations, allele
frequencies are arranged in the order of ``loc0_sp0``, ``loc1_sp0``,
..., ``loc0_sp1``, ``loc1_sp1``, ... and so on. The returned function
can be supplied directly to the ``freqFunc`` parameter of a controlled
random mating scheme (``ControlledRandomMating``) or a homogeneous
mating scheme that uses a controlled offspring generator
(``ControlledOffspringGenerator``).
'''
def trajFunc(gen):
if gen not in self.traj:
return [0.] * self.nLoci
freq = []
for spFreq in self.traj[gen]:
freq.extend(spFreq)
return freq
return trajFunc
def mutants(self):
'''Return a list of mutants in the form of (loc, gen, subPop)'''
gens = list(self.traj.keys())
gens.sort()
if len(gens) == 0:
return []
mut = []
for gen in gens[:-1]:
# no introduction of mutants with Population merge or split.
if len(self.traj[gen]) != len(self.traj[gen + 1]):
continue
# we may need to introduce mutant at each subpopulation.
for sp in range(len(self.traj[gen])):
for loc in range(self.nLoci):
if self.traj[gen][sp][loc] == 0 and self.traj[gen + 1][sp][loc] > 0:
mut.append((loc, gen + 1, sp))
return mut
def mutators(self, loci, inds=0, allele=1, *args, **kwargs):
'''Return a list of ``PointMutator`` operators that introduce mutants
at the beginning of simulated trajectories. These mutators should be
added to the ``preOps`` parameter of ``Simulator.evolve`` function to
introduce a mutant at the beginning of a generation with zero allele
frequency before mating, and a positive allele frequency after mating.
A parameter ``loci`` is needed to specify actual loci indexes in the
real forward simulation. Other than default parameters ``inds=0`` and
``allele=1``, additional parameters could be passed to point mutator
as keyward parameters.
'''
ops = []
if hasattr(loci, '__iter__') and len(loci) != self.nLoci:
raise ValueError('%d loci is expected' % self.nLoci)
for loc, gen, sp in self.mutants():
if self.nLoci == 1 and type(loci) == type(0):
ops.append(PointMutator(inds=inds, loci=loci, allele=allele,
subPops=sp, at=gen, *args, **kwargs))
elif hasattr(loci, '__iter__'):
ops.append(PointMutator(inds=inds, loci=loci[loc], allele=allele,
subPops=sp, at=gen, *args, **kwargs))
else:
raise ValueError('Invalid value for parameter loci')
return ops
def _setFreq(self, freq, gen):
'''This function sets frequency *freq* at specified generation *gen*.
*nSubPop* is used if frequency for multiple subpopulations are given.
'''
assert type(freq) in [type(()), type([])]
# deep copy to avoid trouble.
self.traj[gen] = []
for spFreq in freq:
assert len(spFreq) == self.nLoci
assert type(spFreq[0]) not in [type(()), type([])]
self.traj[gen].append([x for x in spFreq])
class TrajectorySimulator:
'''A Trajectory Simulator takes basic demographic and genetic (natural
selection) information of an evolutionary process of a diploid population
and allow the simulation of Trajectory of allele frequencies of one or
more loci. Trajectories could be simulated in two ways: forward-time and
backward-time. In a forward-time simulation, the simulation starts from
certain allele frequency and simulate the frequency at the next generation
using given demographic and genetic information. The simulation continues
until an ending generation is reached. A Trajectory is successfully
simulated if the allele frequency at the ending generation falls into a
specified range. In a backward-time simulation, the simulation starts from
the ending generation with a desired allele frequency and simulate the
allele frequency at previous generations one by one until the allele gets
lost (allele frequency equals zero).
The result of a trajectory simulation is a trajectory object which can be
used to direct the simulation of a special random mating process that
controls the evolution of one or more disease alleles so that allele
frequencies are consistent across replicate simulations. For more
information about Trajectory simulation techniques and related controlled
random mating scheme, please refer to the simuPOP user's guide, and Peng et
al (PLoS Genetics 3(3), 2007).
'''
def __init__(self, N, nLoci=1, fitness=None, logger=None):
'''Create a trajectory Simulator using provided demographic and genetic
(natural selection) parameters. Member functions *simuForward* and
*simuBackward* can then be used to simulate trajectories within certain
range of generations. This class accepts the following parameters
N
Parameter *N* accepts either a constant number for population size
(e.g. N=1000), a list of subpopulation sizes (e.g. N=[1000, 2000]),
or a demographic function that returns population or subpopulation
sizes at each generation. During the evolution, multiple
subpopulations can be merged into one, and one population can be
split into several subpopulations. The number of subpopulation is
determined by the return value of the demographic function. Note
that *N* should be considered as the population size at the end of
specified generation.
nLoci
Number of unlinked loci for which trajectories of allele
frequencies are simulated. We assume a diploid population with
diallelic loci. The Trajectory represents frequencies of a
fitness
Parameter fitness can be ``None`` (no selection), a list of fitness
values for genotype with 0, 1, and 2 disease alleles (*AA*, *Aa*,
and *aa*) at one or more loci; or a function that returns fitness
values at each generation. When multiple loci are involved
(*nLoci*), *fitness* can be a list of 3 (the same fitness values
for all loci), a list of 3*nLoci (different fitness values for each
locus) or a list of 3**nLoci (fitness value for each combination of
genotype). The fitness function should accept generation number and
a subpopulation index. The latter parameter allows, and is the only
way to specify different fitness in each subpopulation.
logger
A logging object (see Python module ``logging``) that can be used
to output intermediate results with debug information.
'''
# a vector of subpopulation sizes is needed
if type(N) in [type(1), type(1)]:
self.N = [N]
else: # N is a list or a function
self.N = N
if fitness is None:
self.fitness = [1, 1, 1]
else:
# fitness is a list or a function
if type(fitness) in [type(()), type([])] and len(fitness) not in [3, 3*nLoci, 3**nLoci]:
raise ValueError('Invalid list of fitness.')
self.fitness = fitness
self.logger = logger
self.nLoci = nLoci
self.maxMutAge = 0
self.minMutAge = 0
def _Nt(self, gen):
'Get Nt(gen) depending on the type of N'
# _Nt() expects parameter gen
if isinstance(self.N, collections.Callable):
nt = self.N(gen)
# the return value of a demographic function sometimes is not integer.
if type(nt) in [int, int, float]:
return [int(nt)]
else:
return [int(x) for x in nt]
else:
# a constant list
return self.N
def _marginalFitness(self, fitness, freq):
'''Convert interaction fitness (3**n elements) to marginal fitness
(3*n elements) using given allele frequency. The marginal fitnesses
are calculated using formula:
f(X=Aa) = Sum_g P(Y=g) * f(X=Aa, Y=g)
where g is genotype at all other loci.
'''
assert len(freq) == 2
assert len(fitness) == 3 ** self.nLoci
s = [0] * (3 * self.nLoci)
# each locus
for loc in range(self.nLoci):
# each genotype AA, Aa and aa (geno is the number of disease allele)
for geno in range(3):
# iterate through OTHER DSL
allgeno = [0] * self.nLoci
# set myself
allgeno[loc] = geno
# iterate through genotype at other loci
f = 0.
for it in range(3**(self.nLoci - 1)):
# assign allgeno, using it as a 3-based integer.
num = it
for l in range(self.nLoci):
if l != loc:
allgeno[l] = num % 3
num /= 3
# calculate P(Y=g) and f(X=Aa, Y=g)
index = 0
fq = 1.
for i in range(len(allgeno)):
if i != loc:
if allgeno[i] == 0:
fq *= (1 - freq[i]) * (1 - freq[i])
elif allgeno[i] == 1:
fq *= 2 * (1 - freq[i]) * freq[i]
else:
fq *= freq[i] * freq[i]
# index is determined by genotype.
index = index * 3 + allgeno[i]
f += fitness[index] * fq
# sum over other genotype
s[loc * 3 + geno] = f
# convert to form 0, s1, s2
s[3 * loc + 1] = float(s[3 * loc + 1]) / s[3 * loc] - 1.
s[3 * loc + 2] = float(s[3 * loc + 2]) / s[3 * loc] - 1.
s[3 * loc] = 0.
return s
def _getS(self, gen, subPop, freq):
'''Get s1, s2 for subpopulation *subPop* at generation *gen*. If
self.fitness is a function, it is called with *gen* and *subPop* to get
a generation and subpopulation specific fitness value. The fitness
value is then translated to 0, s1, s2. If interactions are involved,
marginal fitness is calculated using allele frequency (``freq``) in
subpopulation *subPop*.
'''
assert len(freq) == self.nLoci
# _fitness() expects parameters gen and a subpopulation index
if isinstance(self.fitness, collections.Callable):
fit = self.fitness(gen, subPop)
else:
fit = self.fitness
s = []
# simplest case when fitness only depends on gen if defined in fitness func:
# case 1: 3x self.nLoci no interaction
if len(fit) == 3 * self.nLoci:
for i in range(self.nLoci):
if fit[3 * i] == 0:
raise ValueError('fitness['+ str(3 * i) + '] should be a non zero value.')
s.append(0.)
s.append(float(fit[3 * i + 1]) / float(fit[3 * i]) - 1.)
s.append(float(fit[3 * i + 2]) / float(fit[3 * i]) - 1.)
# case 2: same fitness for multiple loci
elif len(fit) == 3 and self.nLoci > 1:
if fit[0] == 0:
raise ValueError('fitness[0] should be a non zero value.')
s.append(0.)
s.append(float(fit[1]) / float(fit[0]) - 1.)
s.append(float(fit[2]) / float(fit[0]) - 1.)
s = s * self.nLoci
# case 3: 3**self.nLoci, interaction
elif len(fit) == 3**self.nLoci:
# from fitness list, get s using allele frequency
# Allele frequency for each subpopulation is passed and there will be
# different s for each subpopulation because different allele frequencies.
s.extend(self._marginalFitness(fit, freq))
else:
raise ValueError('Wrong length of list of fitness: ' + str(len(fit)))
return s
def _getNextXt(self, curXt, Nt, s):
'''Solve y from the formula and simulate allele frequencies in the next
generation. All parameters are assumed to be for one subpopulation. Nt
is the population size of at the end of the current generation (or the
next generation)'''
assert len(curXt) == self.nLoci
assert type(Nt) not in [type(()), type([])]
it = []
xt = []
for loc in range(self.nLoci):
# if current allele freq in subpop sp at locus loc has already been 0 or 1,
# set it to be 0 or 1 for next gens
x = curXt[loc]
if x in [0, 1]:
xt.append(x)
continue
s1 = s[3 * loc + 1]
s2 = s[3 * loc + 2]
# with s1 and s2 on hand, calculate freq at the next generation
y = x * (1 + s2 * x + s1 * (1 - x)) / (1 + s2 * x * x + 2 * s1 * x * (1 - x))
# y is obtained, is the expected allele frequency for the next generation t+1
it = getRNG().randBinomial(2 * Nt, y)
xt.append(float(it) / (2 * Nt))
return xt
def _getPrevXt(self, curXt, Nt, s):
'''Solve y from the backward formula and simulate allele frequencies in
the previous generation. All parameters are assumed to be for one
subpopulation. Nt is the population size at the begining of the current
generation, e.g. the population size at the previous generation.
'''
assert type(Nt) not in [type(()), type([])]
assert len(curXt) == self.nLoci
#
# given x(t)
# calculate y=x(t-1)' by solving an equation
#
# x_t = y(1+s2 y+s1 (1-y))/(1+s2 y+2 s1 Y(1-y))
it = []
xt = []
for loc in range(self.nLoci):
x = curXt[loc]
# if current allele freq in subpop sp at locus loc has already been 0,
# it to be 0 for previous gens
if x == 0:
xt.append(x)
continue
# if current allele freq in subop sp is 1, we assume that it just reaches
# here by losing one allele
if x == 1:
xt.append(float(2 * Nt - 1) / (2 * Nt))
continue
# In the interaction case, s1, s2 will be different
# from subpopulation to subpopulation.
s1 = s[3 * loc + 1]
s2 = s[3 * loc + 2]
# with s1 and s2 on hand, calculate freq at the previous generation
if s1 == 0 and s2 == 0:
# special case when a = 0
y = x
else:
a = s2 * x - 2 * s1 * x - s2 + s1
b = 2 * s1 * x - 1 - s1
c = float(x)
b2_4ac = b * b - 4 * a * c
if abs(a) < 1e-8:
y1 = float(-c) / float(b)
# y1 should be valid
y2 = 1000.
else:
y1 = (-b + b2_4ac**0.5) / (2 * a)
y2 = (-b - b2_4ac**0.5) / (2 * a)
#
# choose one of the solutions
if y1 >= 0 or y1 <= 1:
y = y2
else:
y = y1
# y is obtained, is the expected allele frequency for the previous generation t-1
it = getRNG().randBinomial(int(2 * Nt), y)
xt.append(float(it) / (2 * Nt))
return xt
def _simuForward(self, freq, endFreq, beginGen, endGen):
'''Simulates a trajectory froward in time, starting from frequency
``freq`` at generation ``beginGen`` to frequency ranges specified in
``endFreq`` at generation ``endGen``. During the evolution, multiple
subpopulations can be merged into one, and one population can be split
into several subpopulations. The number of subpopulation is determined
by the demographic function. The function returns the ending allele
frequency if the simulated Trajectory does not fall into ``destFeq``,
and a ``Trajectory`` object otherwise.
'''
# initialize a trajectory
# freq is assumed to be at the beginning of the beginGen.
# so we do not it does not count into the Trajectory.
xt = Trajectory(endGen = endGen, nLoci = self.nLoci)
# go through each generation
for gen in range(beginGen, endGen + 1):
# first get beginXt, N(t+1), then calculate nextXt.
if gen == beginGen:
beginXt = freq
else:
# current Xt is the frequency at the previous generation.
beginXt = xt._freq(gen - 1)
# _Ne(gen) is the population size at the end of this generation.
Nt = self._Nt(gen)
#
if len(Nt) > len(beginXt):
# split (forward sense) from one population to nSP subpopulations
if len(beginXt) != 1:
raise RuntimeError('Can only split from one subpopulation.')
#
# get NextXt using one subpopulation, then split...
tmpXt = self._getNextXt(beginXt[0], sum(Nt), self._getS(gen, 0, beginXt[0]))
# split tmpXt to multiple subpopulations and assign endingXt.
# here we assume a multi-nomial distribution of disease alleels.
endingXt = [[0]*self.nLoci for x in Nt]
p = [float(x) / sum(Nt) for x in Nt]
for loc in range(self.nLoci):
it = getRNG().randMultinomial(int(tmpXt[loc]*sum(Nt)), p)
for sp in range(len(Nt)):
endingXt[sp][loc] = float(it[sp]) / Nt[sp]
elif len(Nt) < len(beginXt):
# check length of next Nt.
if len(Nt) != 1:
raise RuntimeError('Can only merge into one subpopulation')
# merge (forward sense) from multiple subpopulations to one pop.
Nt_prev = self._Nt(gen - 1)
if len(beginXt) != len(Nt_prev):
raise RuntimeError('Subpopulation size and allele frequency mismatch.')
Nt_tmp = [int(x * 1.0 / sum(Nt_prev) * Nt[0]) for x in Nt_prev]
#
endingXt = [[0] * self.nLoci]
for sp in range(len(Nt_prev)):
# simulate frequency in each subpopulation
tmpXt = self._getNextXt(beginXt[sp], Nt_tmp[sp], self._getS(gen, sp, beginXt[sp]))
# and accumulate alleles in the final merged frequency
for loc in range(self.nLoci):
endingXt[0][loc] += tmpXt[loc] * Nt_tmp[sp] / Nt[0]
else:
endingXt = [self._getNextXt(beginXt[sp], Nt[sp], self._getS(gen, sp, beginXt[sp]))
for sp in range(len(Nt))]
#
assert len(endingXt) == len(Nt)
# set frequency at the end of this generation
#if self.logger:
# self.logger.debug('Gen=%d, xt=%s' % (gen, endingXt))
xt._setFreq(endingXt, gen)
# and now we go to the next generation...
# not we have a trajectory... is it valid?
freq = xt._freq(endGen)
Nt = self._Nt(endGen)
for loc in range(self.nLoci):
# case 1: allele frequency at each subpopulation
if len(endFreq) == self.nLoci * len(Nt):
for sp in range(len(Nt)):
if freq[sp][loc] < endFreq[sp * self.nLoci + loc][0] or \
freq[sp][loc] > endFreq[sp * self.nLoci + loc][1]:
if self.logger:
self.logger.debug('Forward Trajectory restarted, hitting allele requency ' + str(freq))
return freq
# case 2: combined allele frequency
else:
allFreq = 0
for sp in range(len(Nt)):
allFreq += freq[sp][loc] * Nt[sp]
allFreq /= sum(Nt)
if allFreq < endFreq[loc][0] or allFreq > endFreq[loc][1]:
if self.logger:
self.logger.debug('Forward Trajectory restarted, hitting allele frequency %s (combined %.3f)' \
% (freq, allFreq))
return allFreq
if self.logger:
self.logger.info('Forward Trajectory succeed, hitting allele frequency ' + str(freq))
return xt
def _avgOfNestedList(self, value):
'''Take average of each element of a nested list of the same shape. For
example, _avgOfNestedList([[1,[2,3]], [2,[3,4]]]) would return
[1.5, [2.5, 3.5]]. This is used to return summary statistics of failed
attempts.
'''
if len(value) == 0:
return []
if type(value[0]) in [type(()), type([])]:
avg = []
for i in range(len(value[0])):
avg.append(self._avgOfNestedList([val[i] for val in value]))
else:
return float(sum(value)) / len(value)
return avg
def _simuBackward(self, endGen, freq, minMutAge, maxMutAge):
'''Simulates a trajectory backward from allele frequency ``freq`` at
generation ``endGen``. During the evolution, multiple subpopulations can
be merged into one, and one population can be split into several
subpopulations. The number of subpopulation is determined by the
demographic function. If a simulated Trajectory is shorter than
``minMutAge`` or is longer than ``maxMutAge``, the function will raise
an exception.
'''
if endGen <= 0:
raise ValueError("A positive ending generation is needed.")
# done[i] is used to track at which generation a trajectory
# is successfully generated at locus i.
done = [False] * self.nLoci
# initialize a trajectory
xt = Trajectory(endGen=endGen, nLoci = self.nLoci)
# because freq is the allele frequency at the end of the last generation,
# it is part of the Trajectory.
xt._setFreq(freq, gen=endGen)
# start from endGen, go backward.
for gen in range(endGen, -1, -1):
# first get curXt, N(t-1), then calculate prevXt
endingXt = xt._freq(gen)
# Nt is the size at the beginning of the current generation.
Nt = self._Nt(gen - 1)
Nt_end = self._Nt(gen)
if len(Nt) > len(endingXt):
if len(endingXt) != 1:
raise RuntimeError('Can only merge to one subpopulation')
# merge (forward sense)
tmpXt = self._getPrevXt(endingXt[0], sum(Nt), self._getS(gen, 0, endingXt[0]))
assert len(tmpXt) == self.nLoci
# SPLIT tmpXt to multiple subpopulations and assign an expanded beginXt.
beginXt = [[0]*self.nLoci for x in Nt]
p = [float(x)/sum(Nt) for x in Nt]
for loc in range(self.nLoci):
it = getRNG().randMultinomial(int(tmpXt[loc]*sum(Nt)), p)
beginXt[sp][loc] = float(it[sp]) / Nt[sp]
elif len(Nt) < len(endingXt):
# check length of previous Nt.
if len(Nt) != 1:
raise ValueError('Can only split from one subpoplation.')
# split (forward sense)
Nt_tmp = [int(float(x) / sum(Nt_end) * Nt[0]) for x in Nt_end]
beginXt = [[0] * self.nLoci]
for sp in range(len(Nt)):
tmpXt = self._getPrevXt(endingXt[sp], Nt_tmp[sp], self._getS(gen, sp, endingXt[sp]))
for loc in range(self.nLoci):
beginXt[0][loc] += tmpXt[loc] * Nt_tmp[sp] / Nt[0]
else:
beginXt = [self._getPrevXt(endingXt[sp], Nt[sp], self._getS(gen, sp, endingXt[sp]))
for sp in range(len(Nt))]
#
assert len(beginXt) == len(Nt)
# set frequency at the end of this generation
if self.logger:
self.logger.debug('Gen=%d, xt=%s' % (gen - 1, beginXt))
#
xt._setFreq(beginXt, gen - 1)
# check all loci and see if beginXt is 0
for loc in range(self.nLoci):
doneSP = [False] * len(Nt)
if done[loc]:
continue
# loop over subpopulation
for sp in range(len(Nt)):
if (len(Nt_end) == 1 and endingXt[0][loc] == 0.) or \
(len(Nt_end) > 1 and endingXt[sp][loc] == 0.):
# already done in a previous generation
doneSP[sp] = True
continue
if beginXt[sp][loc] == 0.:
# success
doneSP[sp] = True
if endGen - gen < minMutAge:
if self.logger:
self.logger.debug('Backward failed - Trajectory too short. gen = %d subPop=%d locus = %d' \
% (gen, sp, loc))
return (gen, beginXt)
if self.logger:
self.logger.debug('Backward success: gen = %d subPop=%d locus = %d' % (gen, sp, loc))
break
elif beginXt[sp][loc] == 1: # fixed
if self.logger:
self.logger.debug('Backward failed - allele gets fixed. gen = %d subPop=%d locus = %d' \
% (gen, sp, loc))
return (gen, beginXt)
if False not in doneSP:
done[loc] = True
if False not in done:
# success
if self.logger:
self.logger.info('Backward Trajectory succeded at gen = %d' % gen)
return xt
# go back gen == 0 and not successful, or if the Trajectory is too long
if gen == 0 or gen + self.maxMutAge < endGen:
if self.logger:
self.logger.debug('Backward failed - Trajectory too long. gen = %d' % gen)
return (gen, beginXt)
def simuForward(self, beginGen, endGen, beginFreq, endFreq, maxAttempts=10000):
'''Simulate trajectories of multiple disease susceptibility loci using a
forward time approach. This function accepts allele frequencies of
alleles of multiple unlinked loci at the beginning generation (``freq``)
at generation ``beginGen``, and expected *range* of allele frequencies
of these alleles (``endFreq``) at the end of generation ``endGen``.
Depending on the number of loci and subpopulations, these parameters
accept the following inputs:
beginGen
Starting generation. The initial frequecies are considered as
frequencies at the *beginning* of this generation.
endGen
Ending generation. The ending frequencies are considerd as
frequencies at the *end* of this generation.
beginFreq
The initial allele frequency of involved loci in all subpopulations.
It can be a number (same frequency for all loci in all
subpopulations), or a list of frequencies for each locus (same
frequency in all subpopulations), or a list of frequencies for each
locus in each subpopulation in the order of ``loc0_sp0``,
``loc1_sp0``, ..., ``loc0_sp1``, ``loc1_sp1``, ... and so on.
endFreq
The range of acceptable allele frequencies at the ending generation.
The ranges can be specified for all loci in all subpopulations,
for all loci (allele frequency in the whole population is
considered), or for all loci in all subpopulations, in the order
of ``loc0_sp0``, ``loc1_sp0``, .... ``loc0_sp1``, ... and so on.
This simulator will simulate a trajectory generation by generation and
restart if the resulting frequencies do not fall into specified range
of frequencies. This simulator will return ``None`` if no valid
Trajectory is found after ``maxAttempts`` attemps.
'''
#
# This functin wraps around _simuForward. It handles parameter
# validation and maxAttempts.
#
# endGen
if not beginGen <= endGen or endGen <= 0:
raise ValueError('Beginning generation should be less than ending generation')
# beginFreq
if type(beginFreq) in [type(0), type(0.)]:
freq = [[beginFreq] * self.nLoci for sp in self._Nt(beginGen)]
elif type(beginFreq) in [type(()), type([])]:
if len(beginFreq) == self.nLoci:
freq = [beginFreq for sp in self._Nt(beginGen)]
elif len(beginFreq) == self.nLoci * len(self._Nt(beginGen)):
freq = []
for sp in range(len(self._Nt(endGen))):
freq.append(beginFreq[self.nLoci*sp : self.nLoci * (sp+1)])
else:
raise ValueError("Initial frequency should be provided for each locus (nLoci) or each locus at each subpopulation (nLoci * len(N)).")
else:
raise ValueError("Invalid initial frequency list")
#
# endFreq
if type(endFreq) not in [type(()), type([])] or len(endFreq) == 0:
raise ValueError('A list of frequency range is expected')
elif type(endFreq[0]) not in [type(()), type([])]:
if len(endFreq) == 2:
endFreq = [endFreq]
else:
raise ValueError('A list of frequency range is expected.')
if len(endFreq) not in [self.nLoci, self.nLoci * len(self._Nt(endGen))]:
raise ValueError('Please specify a frequency range for each locus')
for rng in endFreq:
if len(rng) != 2:
raise ValueError('Please specify frequency range of each marker')
if rng[0] > rng[1]:
raise ValueError('Invalid frequency range %f - %f' % (rng[0], rng[1]))
failedFreq = []
for failedCount in range(maxAttempts):
xt = self._simuForward(freq, endFreq, beginGen, endGen)
if isinstance(xt, Trajectory):
if self.logger:
self.logger.info('Simulation succeed after %d attempts with average ending frequencies %s.' \
% (failedCount, self._avgOfNestedList(failedFreq)))
return xt
else:
failedFreq.append(xt)
if self.logger:
self.logger.debug('Ending frequencies:')
for freq in failedFreq:
self.logger.debug(' ' + str(freq))
self.logger.info(('Simulation failed after %d attempts with average frequencies ' % failedCount) \
+ str(self._avgOfNestedList(failedFreq)))
return None
def simuBackward(self, endGen, endFreq, minMutAge=None, maxMutAge=None,
maxAttempts = 1000):
'''Simulate trajectories of multiple disease susceptibility loci using
a forward time approach. This function accepts allele frequencies of
alleles of multiple unlinked loci (*endFreq*) at the end of generation
*endGen*. Depending on the number of loci and subpopulations, parameter
*beginFreq* can be a number (same frequency for all loci in all
subpopulations), or a list of frequencies for each locus (same
frequency in all subpopulations), or a list of frequencies for each
locus in each subpopulation in the order of ``loc0_sp0``, ``loc1_sp0``,
..., ``loc0_sp1``, ``loc1_sp1``, ... and so on.
This simulator will simulate a trajectory generation by generation and
restart if the disease allele got fixed (instead of lost), or if the
length simulated Trajectory does not fall into *minMutAge* and
*maxMutAge* (ignored if ``None`` is given). This simulator will return
``None`` if no valid Trajectory is found after ``maxAttempts`` attemps.
'''
#
# This functin wraps around _simuBackward. It handles parameter
# validation and maxAttempts.
#
if endGen <= 0:
raise ValueError('A positive ending generation number is expected.')
if minMutAge is not None and minMutAge > endGen:
raise ValueError('Minimal mutation age is larger than ending generation.')
#
if minMutAge is None:
self.minMutAge = 0
else:
self.minMutAge = minMutAge
#
if maxMutAge is None:
self.maxMutAge = endGen
else:
self.maxMutAge = maxMutAge
if not self.maxMutAge >= self.minMutAge:
raise ValueError('maxMutAge should >= minMutAge')
if endGen == 0 and (isinstance(self.N, collections.Callable) or isinstance(self.fitness, collections.Callable)):
raise ValueError('endGen should be > 0 if N or fitness is defined in the form of function')
if endGen > 0 and endGen < self.maxMutAge:
raise ValueError('endGen should be >= maxMutAge')
#
# endFreq
if type(endFreq) in [type(0), type(0.)]:
freq = [[endFreq] * self.nLoci for sp in self._Nt(endGen)]
elif type(endFreq) in [type(()), type([])]:
if len(endFreq) == self.nLoci:
freq = [endFreq for sp in self._Nt(endGen)]
elif len(endFreq) == self.nLoci * len(self._Nt(endGen)):
freq = []
for sp in range(len(self._Nt(endGen))):
freq.append(endFreq[self.nLoci*sp : self.nLoci * (sp+1)])
else:
raise ValueError("Invalid ending frequency list")
else:
raise ValueError("Invalid ending frequency list")
#
failedFreq = []
for failedCount in range(maxAttempts):
xt = self._simuBackward(endGen, freq, self.minMutAge, self.maxMutAge)
if isinstance(xt, Trajectory):
if self.logger:
self.logger.info(('Simulation succeeded after %d attempts with average generation and frequencies' \
% failedCount) + str(self._avgOfNestedList(failedFreq)))
return xt
else:
failedFreq.append(xt)
if self.logger:
self.logger.debug('Beginning generation and frequencies:')
for freq in failedFreq:
self.logger.debug(' ' + str(freq))
self.logger.info(('Simulation failed after %d attempts with average starting generation and frequencies ' % failedCount) \
+ str(self._avgOfNestedList(failedFreq)))
return None
def simulateForwardTrajectory(N, beginGen, endGen, beginFreq, endFreq, nLoci=1,
fitness=None, maxAttempts=10000, logger=None):
'''Given a demographic model (*N*) and the fitness of genotype at one or
more loci (*fitness*), this function simulates a trajectory of one or more
unlinked loci (*nLoci*) from allele frequency *freq* at generation
*beginGen* forward in time, until it reaches generation *endGen*. A
``Trajectory`` object will be returned if the allele frequency falls
into specified ranges (*endFreq*). ``None`` will be returned if no valid
Trajectory is simulated after ``maxAttempts`` attempts. Please refer to
class ``Trajectory``, ``TrajectorySimulator`` and their member functions
for more details about allowed input for these parameters. If a *logger*
object is given, it will send detailed debug information at ``DEBUG``
level and ending allele frequencies at the ``INFO`` level. The latter
can be used to adjust your fitness model and/or ending allele frequency
if a trajectory is difficult to obtain because of parameter mismatch.
'''
return TrajectorySimulator(N, nLoci, fitness, logger).simuForward(
beginGen, endGen, beginFreq, endFreq, maxAttempts)
def simulateBackwardTrajectory(N, endGen, endFreq, nLoci=1, fitness=None,
minMutAge=None, maxMutAge=None, maxAttempts=1000, logger=None):
'''Given a demographic model (*N*) and the fitness of genotype at one or
more loci (*fitness*), this function simulates a trajectory of one or more
unlinked loci (*nLoci*) from allele frequency *freq* at generation *endGen*
backward in time, until all alleles get lost. A ``Trajectory`` object will
be returned if the length of simulated Trajectory with ``minMutAge`` and
``maxMutAge`` (if specified). ``None`` will be returned if no valid
Trajectory is simulated after ``maxAttempts`` attempts. Please refer to
class ``Trajectory``, ``TrajectorySimulator`` and their member functions
for more details about allowed input for these parameters. If a *logger*
object is given, it will send detailed debug information at ``DEBUG``
level and ending generation and frequency at the ``INFO`` level. The latter
can be used to adjust your fitness model and/or ending allele frequency
if a trajectory is difficult to obtain because of parameter mismatch.
'''
return TrajectorySimulator(N, nLoci, fitness, logger).simuBackward(
endGen, endFreq, minMutAge, maxMutAge, maxAttempts)
#
# STRUCTURE format (no import yet)
#
class StructureExporter:
'''An exporter to export given population in structure format'''
def __init__(self, markerNames=True, recessiveAlleles=None, interMarkerDistances=True,
phaseInformation=None, label=True, popData=True, popFlag=None, locData=None,
phenotype=None):
self.markerNames = markerNames
self.recessiveAlleles = recessiveAlleles
self.interMarkerDistances = interMarkerDistances
self.phaseInformation = phaseInformation
self.label = label
self.popData = popData
self.popFlag = popFlag
self.locData = locData
self.phenotype = phenotype
def export(self, pop, output, subPops, infoFields, gui):
'''export in structure format '''
# http://pritch.bsd.uchicago.edu/structure_software/release_versions/v2.3.4/structure_doc.pdf
#
# first line: marker names
#
if self.markerNames is True:
names = pop.lociNames()
if names:
output('\t'.join(names) + '\n')
elif hasattr(self.markerNames, '__iter__'):
if len(self.markerNames) != pop.totNumLoci():
raise ValueError('%d names are provided for %d markers' % (len(self.markerNames), pop.totNumLoci()))
output('\t'.join(self.markerNames) + '\n')
else:
raise ValueError('Please provide a list of marker names for parameter markerNames')
#
# second line: recessive alleles
#
if self.recessiveAlleles is not None:
if self.recessiveAlleles not in [0, 1]:
raise ValueError('Only 0 or 1 is acceptable for parameter revessiveAlleles')
output('%d\n' % self.recessiveAlleles)
#
# third line: inter marker distance
#
if self.interMarkerDistances is True:
loci_pos = pop.lociPos()
# get difference
loci_dist = [-1] + [loci_pos[i] - loci_pos[i-1] for i in range(1, len(loci_pos))]
# set beginning of each chromosome to -1
for ch in range(pop.numChrom()):
loci_dist[pop.chromBegin(ch)] = -1
output('\t'.join(['%s' % x for x in loci_dist]) + '\n')
#
# fourth line: phase information
#
if self.phaseInformation is not None:
if self.phaseInformation not in [0, 1]:
raise ValueError('Only 0 or 1 is acceptable for parameter revessiveAlleles')
output('%d\n' % self.phaseInformation)
#
# sixth line and later: genotype lines
#
# progress bar might be wrong with subPops parameter...
prog = ProgressBar('Exporting', pop.popSize(), gui=gui)
count = 0
for vsp in subPops:
sp = vsp if type(vsp) == type(0) else vsp[0]
for idx, ind in enumerate(pop.individuals(vsp)):
items = []
#
# label
#
if self.label:
items.append(str(idx + 1))
#
# popData
#
if self.popData:
items.append(str(sp + 1))
#
# popFlag
#
if self.popFlag is not None:
if self.popFlag not in [0, 1]:
raise ValueError('Only 0 or 1 is acceptable for parameter popFlag')
items.append(str(self.popFlag))
#
# locData
#
if self.locData is not None:
try:
items.append(str(int(ind.info(self.locData))))
except:
raise ValueError('Population does not have information field %s as locData' % self.locData)
#
# phenotype
#
if self.phenotype is not None:
try:
items.append(str(int(ind.info(self.phenotype))))
except:
raise ValueError('Population does not have information field %s as phenotype' % self.locData)
#
# genotype
#
for p in range(pop.ploidy()):
if items:
output('%s\t%s\n' % ('\t'.join(items), '\t'.join([str(x) for x in ind.genotype(p)])))
else:
output('%s\n' % '\t'.join([str(x) for x in ind.genotype(p)]))
#
# update progress bar
#
count += 1
prog.update(count)
prog.done()
#
# GenePop format
#
class GenePopExporter:
'''An exporter to export given population in structure format'''
def __init__(self, title=None, adjust=1):
self.title = title.rstrip() if title is not None else None
self.adjust = adjust
def export(self, pop, output, subPops, infoFields, gui):
''' Export in genepop format '''
# http://genepop.curtin.edu.au/help_input.html
if pop.ploidy() != 2:
raise ValueError('simuPOP currently can only export diploid populations in GenePop format.')
#
#
# first line: title
#
if self.title is not None:
output(self.title + '\n')
else:
output('Outputted by simuPOP at %s\n' % (
time.strftime("%a, %d %b %Y %H:%M:%S", time.gmtime())))
#
# second line: allele names
#
names = pop.lociNames()
if names:
# if names are specified
output(', '.join(names) + '\n')
else:
names = []
for ch in range(pop.numChrom()):
for loc in range(pop.numLoci(ch)):
names.append('ch%d-loc%d' % (ch + 1, loc + 1))
output(', '.join(names) + '\n')
#
# output genotype
#
# progress bar might be wrong with subPops parameter...
alleleWidth = 3 if max(pop.genotype()) >= 99 else 2
format_string = '%%0%dd%%0%dd' % (alleleWidth, alleleWidth)
prog = ProgressBar('Exporting', pop.popSize(), gui=gui)
count = 0
numLoci = pop.totNumLoci()
for vsp in subPops:
#
# for each subpopulation, output pop
#
output('POP\n')
# the name might contain space etc
name = ''.join([x for x in pop.subPopName(vsp) if x.isalnum()])
if not name:
name = 'SubPop%d' % (vsp if type(vsp) == type(0) else vsp[0])
#
for idx, ind in enumerate(pop.individuals(vsp)):
#
# label
#
output('%s-%d, ' % (name, idx + 1))
#
# genotype
#
geno = ind.genotype()
output(' '.join([format_string % (geno[x] + self.adjust, geno[numLoci + x] + self.adjust) for x in range(numLoci)]) + '\n')
#
# update progress bar
#
count += 1
prog.update(count)
prog.done()
class GenePopImporter:
def __init__(self, adjust=0):
self.adjust = adjust
def importFrom(self, filename):
with open(filename, 'r') as input:
#
# ignore the first line
#
input.readline()
#
# read all loci names
#
loci_names = []
while True:
line = input.readline()
if not line.rstrip():
raise ValueError('No POP line is found. This file must not be in GenePop format')
if line.lower().rstrip() == 'pop':
break
loci_names.extend([x.strip() for x in line.split(',')])
#
# read genotypes
#
popSize = [0]
genotypes = []
while True:
line = input.readline()
if not line.rstrip():
break
# new subpopulation
if line.lower().rstrip() == 'pop':
popSize.append(0)
continue
# increase pop size count
popSize[-1] = popSize[-1] + 1
#
try:
# ignore ind name
name, geno = line.split(',', 1)
# split genotype into pieces
geno = [x.strip() for x in geno.split() if x.strip()]
# get alleles (adjusted with self.adjust)
alleles = [(int(x[:3]) + self.adjust, int(x[3:]) + self.adjust) if len(x) == 6 else \
(int(x[:2]) + self.adjust, int(x[2:]) + self.adjust) for x in geno]
# append alleles in simuPOP order
genotypes.extend([x[0] for x in alleles])
genotypes.extend([x[1] for x in alleles])
if len(geno) != len(loci_names):
raise ValueError('Incorrect number of genotype (%d expected)' % len(loci_names))
except Exception as e:
raise ValueError('Invalid input genotype line (%s). The file must not be in GenePop format. %s' % (line, e))
#
# create a population
pop = Population(size=popSize, loci = len(loci_names), lociNames=loci_names)
pop.setGenotype(genotypes)
return pop
#
# FSTAT format
#
# The first line contains 4 numbers: the number of samples, np , the
# number of loci, nl, the highest number used to label an allele, nu,
# and a 1 if the code for alleles is a one digit number (1-9), a 2 if
# code for alleles is a 2 digit number (01-99) or a 3 if code for
# alleles is a 3 digit number (001-999). These 4 numbers need to be
# separated by any number of spaces.
#
# The first line is immediately followed by nl lines, each containing the
# name of a locus, in the order they will appear in the rest of the file.
#
# On line nl+2, a series of numbers as follow:
# 1 0102 0103 0101 0203 0 0303
#
# The first number identifies the sample to which the individual belongs,
# the second is the genotype of the individual at the first locus, coded
# with a 2 digits number for each allele, the third is the genotype at the
# second locus, until locus nl is entered (in the example above, nl=6).
# Missing genotypes are encoded with 0. Note that 0001 or 0100 are not
# a valid format, that is, both alleles at a locus have to be known,
# otherwise, the genotype is considered as missing. No empty lines
# are needed between samples.
#
class FStatExporter:
'''An exporter to export given population in fstat format'''
def __init__(self, lociNames=None, adjust=1):
self.lociNames = lociNames
self.adjust = adjust
def export(self, pop, output, subPops, infoFields, gui):
'''Export in FSTAT format
'''
#
# first line: np, nl, nu and nd
#
np = pop.numSubPop()
nl = pop.totNumLoci()
nu = max(pop.genotype()) + self.adjust
if nu < 10:
nd = 1
elif nu < 100:
nd = 2
elif nu < 1000:
nd = 3
else: # FSTAT can not handle this now. how many digits?
nd = len(str(nu))
#
output( '%d %d %d %d\n' % (np, nl, nu, nd))
#
# loci names
#
if self.lociNames:
if len(self.lociNames) != pop.totNumLoci():
raise ValueError('Parameter lociNames, if specified, should give all %d loci a name' % pop.totNumLoci())
[output(x + '\n') for x in self.lociNames]
else:
names = pop.lociNames()
if names:
[output(x + '\n') for x in names]
else:
# cook up some name
for ch in range(pop.numChrom()):
for loc in range(pop.numLoci(ch)):
output('chr%d_%d\n' % (ch, loc))
#
# genotype
#
format_string = '%%0%dd%%0%dd' % (nd, nd)
numLoci = pop.totNumLoci()
prog = ProgressBar('Exporting', pop.popSize(), gui=gui)
count = 0
for vsp in subPops:
sp = vsp if type(vsp) == type(0) else vsp[0]
for ind in pop.individuals(vsp):
geno = ind.genotype()
output("%d " % (sp + 1) + ' '.join([format_string % (geno[x] + self.adjust, geno[numLoci + x] + self.adjust) for x in range(numLoci)]) + '\n')
count += 1
prog.update(count)
prog.done()
class FStatImporter:
def __init__(self, adjust=0):
self.adjust = adjust
def importFrom(self, filename):
with open(filename, 'r') as input:
# file is opened. get basic parameters
try:
# get numSubPop(), totNumLoci(), maxAllele(), digit
[np, nl, nu, nd] = list(map(int, input.readline().split()))
except ValueError:
raise ValueError("The first line does not have 4 numbers. Are you sure this is a FSTAT file?")
# now, ignore nl lines, if loci is empty try to see if we have info here
# following lines with loci name.
lociNames = []
for al in range(nl):
lociNames.append(input.readline().strip())
#
# get all the genotypes
subPopIndex = []
genotypes = []
for line in input.readlines():
try:
items = line.split()
if len(items) != nl + 1:
raise ValueError('Genotype line (%s) has incorrect number of items' % line)
subPopIndex.append(int(items[0]))
#
# split genotype into pieces
geno = [x.strip() for x in items[1:]]
# get alleles (adjusted with self.adjust)
alleles = [(int(x[:nd]) + self.adjust, int(x[nd:]) + self.adjust) if x != '0' else (self.adjust, self.adjust) for x in geno]
# append alleles in simuPOP order
genotypes.extend([x[0] for x in alleles])
genotypes.extend([x[1] for x in alleles])
if len(geno) != nl:
raise ValueError('Incorrect number of genotype (%d expected)' % len(loci_names))
except Exception as e:
raise ValueError('Invalid input genotype line (%s). The file must not be in FSTAT format. %s' % (line, e))
# subpop size?
# count number of subpopulations
subPopSize = [0] * (max(subPopIndex) + 1)
for idx in subPopIndex:
subPopSize[idx] += 1
if len([x for x in subPopSize if x != 0]) != np:
raise ValueError("Number of subpop does not match")
# we have all the information, create a population
pop = Population(size=[x for x in subPopSize if x != 0],
subPopNames=[str(idx) for idx,x in enumerate(subPopSize) if x != 0],
loci=len(lociNames), lociNames=lociNames)
# set genotype
pop.setGenotype(genotypes)
return pop
#
# Format MAP
#
class MapExporter:
'''An exporter to export loci information in MAP format'''
def __init__(self, posMultiplier = 1):
self.posMultiplier = posMultiplier
def export(self, pop, output, subPops, infoFields, gui):
'''Export in MAP format
'''
# progress bar
prog = ProgressBar('Exporting', pop.totNumLoci(), gui=gui)
count = 0
for ch in range(pop.numChrom()):
for loc in range(pop.chromBegin(ch), pop.chromEnd(ch)):
chName = pop.chromName(ch)
if chName == '':
chName = str(ch + 1)
locusName = pop.locusName(loc)
if locusName == '':
locusName = '.'
locusPos = str(pop.locusPos(loc) * self.posMultiplier)
if locusPos.endswith('.0'):
locusPos = locusPos[:-2]
output('%s %s %s\n' % (chName, locusName, locusPos))
count += 1
prog.update(count)
prog.done()
#
# Format PED
#
class PEDExporter:
'''An exporter to export given population in PED format'''
def __init__(self, idField = 'ind_id', fatherField = 'father_id',
motherField = 'mother_id', phenoField = None,
adjust = 1):
self.idField = idField
self.fatherField = fatherField
self.motherField = motherField
self.phenoField = phenoField
self.adjust = adjust
self.sexCode = {MALE: '1', FEMALE: '2'}
self.affectedCode = {True: '2', False: '1'}
def _exportUnrelated(self, pop, output, subPops, gui):
'''Export unrelated individuals, this is easy...'''
#
ploidy = pop.ploidy()
# progress bar
prog = ProgressBar('Exporting', pop.popSize(), gui=gui)
count = 0
hasID = self.idField in pop.infoFields()
for vsp in subPops:
for ind in pop.individuals(vsp):
values = [str(count + 1), '0', '0', '0', self.sexCode[ind.sex()], self.affectedCode[ind.affected()]]
if hasID:
values[1] = str(int(ind.info(self.idField)))
if self.phenoField is not None:
values[5] = str(ind.info(self.phenoField))
for geno in zip(*[ind.genotype(p) for p in range(ploidy)]):
values.extend([str(geno[0] + self.adjust), str(geno[1] + self.adjust)])
output(' '.join(values) + '\n')
count += 1
prog.update(count)
prog.done()
def _exportPedigree(self, pop, output, subPops, gui):
# find set of families
pop.asPedigree(idField=self.idField, fatherField=self.fatherField,
motherField=self.motherField)
pop.addInfoFields('ped_index')
sizes = pop.identifyFamilies(pedField='ped_index', subPops=subPops)
# group ind_id by sizes
fam_ids = [[] for x in sizes]
for ind in pop.allIndividuals(subPops=subPops):
try:
fam_ids[int(ind.ped_index)].append(int(ind.info(self.idField)))
except:
# unacceptable ped_index will be ignored
pass
#
# progress bar
prog = ProgressBar('Exporting', len(sizes), gui=gui)
count = 0
for fam_id in fam_ids:
for ind_id in fam_id:
ind = pop.indByID(ind_id)
try:
father = pop.indByID(ind.info(self.fatherField))
fa = int(father.info(self.idField))
mother = pop.indByID(ind.info(self.motherField))
mo = int(mother.info(self.idField))
except IndexError:
fa = 0
mo = 0
values = [str(count + 1), str(ind_id), str(fa), str(mo), self.sexCode[ind.sex()], self.affectedCode[ind.affected()]]
if self.phenoField is not None:
values[5] = str(ind.info(self.phenoField))
for geno in zip(*[ind.genotype(p) for p in range(2)]):
values.extend([str(geno[0] + self.adjust), str(geno[1] + self.adjust)])
output(' '.join(values) + '\n')
count += 1
prog.update(count)
prog.done()
# change ped to a population again
pop.removeInfoFields('ped_index')
pop.asPopulation()
def export(self, pop, output, subPops, infoFields, gui):
'''Export in PED format
'''
fields = pop.infoFields()
if self.idField not in fields or self.fatherField not in fields or self.motherField not in fields:
# output as unrelated individuals
self._exportUnrelated(pop, output, subPops, gui)
else:
# output pedigree
if pop.ploidy() != 2:
raise ValueError('Exporting non-diploid population in PED format is not currently supported.')
self._exportPedigree(pop, output, subPops, gui)
#
# Format Phylip
#
class PhylipExporter:
'''An exporter to export sequence data in Phylip format'''
def __init__(self, alleleNames = None, seqNames = None, style='sequential'):
self.alleleNames = alleleNames
self.seqNames = seqNames
self.style = style
if self.style not in ['sequential', 'interleaved']:
raise ValueError('Style of phylip file has to be sequential or interleaved')
def export(self, pop, output, subPops, infoFields, gui):
'''Export in Phylip format
'''
if self.style == 'sequential':
self._exportSequential(pop, output, subPops, infoFields, gui)
else:
self._exportInterleaved(pop, output, subPops, infoFields, gui)
def _exportSequential(self, pop, output, subPops, infoFields, gui):
# count the number of sequences
ploidy = pop.ploidy()
nLoci = pop.totNumLoci()
nSeq = 0
for vsp in subPops:
nSeq += pop.subPopSize(vsp)
nSeq *= ploidy
locusSpecific = False
if self.alleleNames is not None:
alleleNames = self.alleleNames
else:
alleleNames = pop.alleleNames()
if len(alleleNames) > 1:
locusSpecific = True
if len(alleleNames) != nLoci:
raise ValueError('If allele names are specified for each locus, it should be specified for all of them.')
#
if self.seqNames is not None:
if len(self.seqNames) != nSeq and len(self.seqNames) * ploidy != nSeq:
raise ValueError('If sequence names are specified, it should be specified for all individuals or sequences.')
#
output('%d %d\n' % (nSeq, nLoci))
# progress bar
prog = ProgressBar('Exporting', nSeq, gui=gui)
count = 0
for vsp in subPops:
for ind in pop.individuals(vsp):
for p in range(ploidy):
if self.seqNames is None:
if ploidy == 1:
name = 'S%d' % (count + 1)
else:
name = 'S%d_%d' % (count + 1, p + 1)
else:
if len(self.seqNames) == nSeq:
name = self.seqNames[count * ploidy + p]
else:
name = '%s_%d' % (self.seqNames[count], p + 1)
#
# pick the first 10 ...
output(('%-10s' % name)[:10])
try:
if locusSpecific:
seq = ''.join([alleleNames[i][x] for i,x in enumerate(ind.genotype(p))])
else:
seq = ''.join([alleleNames[x] for x in ind.genotype(p)])
except IndexError:
for i,x in enumerate(ind.genotype(p)):
if locusSpecific:
try:
alleleNames[i][x]
except IndexError:
raise ValueError('Allele %d at locus %d does not have a name. Please specify a name for each allele using parameter alleleName.' % (x, i))
else:
try:
alleleNames[x]
except IndexError:
raise ValueError('Allele %d does not have a name. Please specify a name for each allele using parameter alleleName.' % x)
# output sequence
output(seq[:90] + '\n')
# 0 - 89
# 90 - 189
# 190 - 289
#
# length = 100,
if nLoci > 90:
for line in range(((nLoci-90) // 100) + 1):
output(seq[(90 + line*100) : (190 + line*100)] + '\n')
count += 1
prog.update(count)
prog.done()
def _exportInterleaved(self, pop, output, subPops, infoFields, gui):
# count the number of sequences
ploidy = pop.ploidy()
nLoci = pop.totNumLoci()
nSeq = 0
for vsp in subPops:
nSeq += pop.subPopSize(vsp)
nSeq *= ploidy
locusSpecific = False
if self.alleleNames is not None:
alleleNames = self.alleleNames
else:
alleleNames = pop.alleleNames()
if len(alleleNames) > 1:
locusSpecific = True
if len(alleleNames) != nLoci:
raise ValueError('If allele names are specified for each locus, it should be specified for all of them.')
#
if self.seqNames is not None:
if len(self.seqNames) != nSeq and len(self.seqNames) * ploidy != nSeq:
raise ValueError('If sequence names are specified, it should be specified for all individuals or sequences.')
#
output('%d %d\n' % (nSeq, nLoci))
# progress bar
prog = ProgressBar('Exporting', nSeq * nLoci, gui=gui)
count = 0
# first block
for vsp in subPops:
for ind in pop.individuals(vsp):
for p in range(ploidy):
if self.seqNames is None:
if ploidy == 1:
name = 'S%d' % (count + 1)
else:
name = 'S%d_%d' % (count + 1, p + 1)
else:
if len(self.seqNames) == nSeq:
name = self.seqNames[count * ploidy + p]
else:
name = '%s_%d' % (self.seqNames[count], p + 1)
#
# pick the first 10 ...
output(('%-10s' % name)[:10])
try:
if locusSpecific:
seq = ''.join([alleleNames[i][x] for i,x in enumerate(ind.genotype(p)[:90])])
else:
seq = ''.join([alleleNames[x] for x in ind.genotype(p)[:90]])
except IndexError:
for i,x in enumerate(ind.genotype(p)):
if locusSpecific:
try:
alleleNames[i][x]
except IndexError:
raise ValueError('Allele %d at locus %d does not have a name. Please specify a name for each allele using parameter alleleName.' % (x, i))
else:
try:
alleleNames[x]
except IndexError:
raise ValueError('Allele %d does not have a name. Please specify a name for each allele using parameter alleleName.' % x)
# output sequence
output(seq + '\n')
count += 1
prog.update(count * len(seq))
#
count *= len(seq)
# other blocks
#
if nLoci > 90:
for line in range(((nLoci-90) // 100) + 1):
output('\n')
s = 90 + line*100
e = 190 + line*100
for vsp in subPops:
for ind in pop.individuals(vsp):
for p in range(ploidy):
try:
if locusSpecific:
seq = ''.join([alleleNames[i][x] for i,x in enumerate(ind.genotype(p)[s:e])])
else:
seq = ''.join([alleleNames[x] for x in ind.genotype(p)[s:e]])
except IndexError:
for i,x in enumerate(ind.genotype(p)):
if locusSpecific:
try:
alleleNames[i][x]
except IndexError:
raise ValueError('Allele %d at locus %d does not have a name. Please specify a name for each allele using parameter alleleName.' % (x, i))
else:
try:
alleleNames[x]
except IndexError:
raise ValueError('Allele %d does not have a name. Please specify a name for each allele using parameter alleleName.' % x)
# output sequence
output(seq + '\n')
count += len(seq)
prog.update(count)
prog.done()
class PhylipImporter:
def __init__(self, alleleNames, ploidy=1):
self.alleleNames = alleleNames
self.nameMap = {}
for idx, name in enumerate(alleleNames):
self.nameMap[name] = idx
#
self.ploidy = ploidy
def importFrom(self, filename):
with open(filename, 'r') as input:
# file is opened. get basic parameters
try:
[nSeq, nLoci] = list(map(int, input.readline().split()))
except ValueError:
raise ValueError("The first line does not have 2 numbers for number of sequence and loci. Are you sure this is a Phylip file?")
if nSeq // self.ploidy * self.ploidy != nSeq:
raise ValueError('Inconsistent number of sequences %d for ploidy %d' % (nSeq, self.ploidy))
# determine the style of the input file, first read nSeq lines
for i in range(nSeq):
input.readline()
# if there is next line?
try:
line = input.readline()
if line.rstrip() == '':
style = 'interleaved'
else:
style = 'sequential'
except:
# no next line
style = 'sequential'
#
# create a population
pop = Population(size=nSeq // self.ploidy, ploidy=self.ploidy, loci=nLoci, alleleNames=self.alleleNames)
if style == 'sequential':
with open(filename, 'r') as input:
# skip the first line
input.readline()
# for each sequence
idx = 0
p = 0
for seq in range(nSeq):
# first line, start from column 11, remove space
alleles = input.readline()[10:].rstrip().replace(' ', '')
while len(alleles) < nLoci:
alleles += input.readline().rstrip().replace(' ', '')
# ok?
if len(alleles) != nLoci:
raise ValueError('Could not read %d symbols for sequence %d. %s (length %d) obtained' % (nLoci, seq, alleles, len(alleles)))
# translate to numbers
try:
geno = [self.nameMap[x] for x in alleles]
except KeyError:
for x in alleles:
try:
self.nameMap[x]
except KeyError:
raise ValueError('Could not locate allele %s in provided allele names.' % x)
# set genotype
pop.individual(idx).setGenotype(geno, p)
if p + 1 < self.ploidy:
p += 1
else:
p = 0
idx += 1
else:
# interleaved
with open(filename, 'r') as input:
# skip the first line
input.readline()
# for each sequence
nAlleles = 0
idx = 0
p = 0
for seq in range(nSeq):
# first line, start from column 11, remove space
alleles = input.readline()[10:].rstrip().replace(' ', '')
if nAlleles != 0 and nAlleles != len(alleles):
raise ValueError('Inconsistent number of alleles between sequences are found. (previous: %d, current: %d)' % (nAlleles, len(alleles)))
nAlleles = len(alleles)
# translate to numbers
try:
geno = [self.nameMap[x] for x in alleles]
except KeyError:
for x in alleles:
try:
self.nameMap[x]
except KeyError:
raise ValueError('Could not locate allele %s in provided allele names.' % x)
# set genotype, genotype will be repeated, but does not re
pop.individual(idx).genotype(p)[:nAlleles] = geno
if p + 1 < self.ploidy:
p += 1
else:
p = 0
idx += 1
# other lines
while nAlleles < nLoci:
#
line = input.readline().strip()
if line != '':
raise ValueError('An empty line between blocks is expected')
blockAlleles = 0
idx = 0
p = 0
for seq in range(nSeq):
alleles = input.readline().rstrip().replace(' ', '')
if blockAlleles != 0 and blockAlleles != len(alleles):
raise ValueError('Inconsistent number of alleles between sequences are found. (previous: %d, current: %d)' % (blockAlleles, len(alleles)))
blockAlleles = len(alleles)
# translate to numbers
try:
geno = [self.nameMap[x] for x in alleles]
except KeyError:
for x in alleles:
try:
self.nameMap[x]
except KeyError:
raise ValueError('Could not locate allele %s in provided allele names.' % x)
# set genotype, genotype will be repeated, but does not re
pop.individual(idx).genotype(p)[nAlleles : (nAlleles + blockAlleles)] = geno
if p + 1 < self.ploidy:
p += 1
else:
p = 0
idx += 1
# total number of alleles read
nAlleles += blockAlleles
# finally
if nAlleles != nLoci:
raise ValueError('Inconsistent number of alleles are read. Expected %d, read %d.' % (nLoci, nAlleles))
return pop
#
# Format CSV
#
class CSVExporter:
'''An exporter to export given population in csv format'''
def __init__(self, header=True, genoFormatter=None, infoFormatter=None,
sexFormatter={MALE: 'M', FEMALE: 'F'},
affectionFormatter={True: 'A', False: 'U'}, delimiter=',',
subPopFormatter=None):
self.header = header
self.genoFormatter = genoFormatter
self.infoFormatter = infoFormatter
self.sexFormatter = sexFormatter
self.affectionFormatter = affectionFormatter
self.delimiter = delimiter
self.subPopFormatter = subPopFormatter
def _genoFromDict(self, geno):
return self.genoFormatter[geno]
def _genoDirect(self, geno):
return geno
def _genoCallable(self, geno):
return self.genoFormatter(geno)
def export(self, pop, output, subPops, infoFields, gui):
'''Export in CSV format
'''
ploidy = pop.ploidy()
colPerGenotype = 0
if pop.totNumLoci() > 0 and pop.popSize() > 0:
if self.genoFormatter is None:
_genoFunc = self._genoDirect
colPerGenotype = ploidy
elif isinstance(self.genoFormatter, dict):
value = list(self.genoFormatter.values())[0]
colPerGenotype = 1 if type(value) in [type(''), type(1), type(1)] else len(value)
_genoFunc = self._genoFromDict
else:
if not isinstance(self.genoFormatter, collections.Callable):
raise ValueError("genoFormatter should be a None, a dictionary or a callable function")
value = self.genoFormatter(tuple([pop.individual(0).allele(0, p) for p in range(ploidy)]))
colPerGenotype = 1 if type(value) in [type(''), type(1), type(1)] else len(value)
_genoFunc = self._genoCallable
print(colPerGenotype, ploidy)
#
# header
if self.header is True:
names = list(infoFields)
if self.sexFormatter is not None:
names.append('sex')
if self.affectionFormatter is not None:
names.append('aff')
if colPerGenotype == 1:
names.extend([pop.locusName(loc) for loc in range(pop.totNumLoci())])
elif colPerGenotype > 1:
for loc in range(pop.totNumLoci()):
names.extend(['%s_%d' % (pop.locusName(loc), x+1) for x in range(colPerGenotype)])
if self.subPopFormatter is not None:
if type(self.subPopFormatter) is bool:
names.append('pop')
elif type(self.subPopFormatter) is str:
names.append(self.subPopFormatter)
# output header
output(self.delimiter.join(names) + '\n')
elif type(self.header) == type(''):
output(self.header + '\n')
elif type(self.header) in [type(()), type([])]:
output(self.delimiter.join([str(x) for x in self.header]) + '\n')
# progress bar
prog = ProgressBar('Exporting', pop.popSize(), gui=gui)
count = 0
for vsp in subPops:
for ind in pop.individuals(vsp):
# information fields
if self.infoFormatter is None:
values = [str(ind.info(x)) for x in infoFields]
elif type(self.infoFormatter) == type(''):
values = [self.infoFormatter % tuple([ind.info(x) for x in infoFields])]
else:
raise ValueError('Parameter infoFormatter can only be None or a format string.')
# sex
if self.sexFormatter is not None:
values.append(str(self.sexFormatter[ind.sex()]))
# affection status
if self.affectionFormatter is not None:
values.append(str(self.affectionFormatter[ind.affected()]))
# genotype
for geno in zip(*[ind.genotype(p) for p in range(ploidy)]):
val = _genoFunc(geno)
if type(val) in [type([]), type(())]:
values.extend(['%s' % x for x in val])
else:
values.append(str(val))
if self.subPopFormatter is not None:
values.append(str(vsp))
# output
output(self.delimiter.join(values) + '\n')
count += 1
prog.update(count)
prog.done()
#
#
# Format MS
#
class MSExporter:
'''An exporter to export given population in MS format'''
def __init__(self, splitBy=None):
self.splitBy = splitBy
def export(self, pop, output, subPops, infoFields, gui):
'''Export in MS format
'''
# all ...
if self.splitBy is None:
#
# first line: command, nseq, nblocks
#
stat(pop, popSize=True, alleleFreq=list(range(pop.numLoci(0))), vars=['alleleNum'],
subPops=subPops)
output('simuPOP_export %d 1\n' % (pop.dvars().popSize * pop.ploidy()))
# some random random number seeds
output('30164 48394 29292\n')
#
prog = ProgressBar('Exporting', pop.dvars().popSize, gui=gui)
count = 0
# find segregating sites
seg_sites = [x for x in range(pop.numLoci(0)) if len(pop.dvars().alleleNum[x]) != 1]
output('\n//\nsegsites: %d\n' % len(seg_sites))
output('positions: %s\n' % ' '.join([str(pop.locusPos(x)) for x in seg_sites]))
#
# genotype
for vsp in subPops:
for ind in pop.individuals(vsp):
for p in range(pop.ploidy()):
geno = ind.genotype(p, 0)
output(''.join([str(0 if geno[x] == 0 else 1) for x in seg_sites]) + '\n')
count += 1
prog.update(count)
prog.done()
elif self.splitBy == 'subPop':
#
# first line: command, nseq, nblocks
#
stat(pop, popSize=True, subPops=subPops)
sz = pop.dvars().subPopSize
if False in [sz[i] == sz[i-1] for i in range(1, len(sz))]:
raise ValueError('Subpopulations should have the same size if splitBy="subPop" is specified.')
output('simuPOP_export %d %d\n' % (sz[0] * pop.ploidy(), len(sz)))
# some random random number seeds
output('30164 48394 29292\n')
#
prog = ProgressBar('Exporting', sum(sz), gui=gui)
count = 0
# find segregating sites
stat(pop, alleleFreq=list(range(pop.numLoci(0))), subPops=subPops, vars='alleleNum_sp')
for vsp in subPops:
seg_sites = [x for x in range(pop.numLoci(0)) if len(pop.dvars(vsp).alleleNum[x]) != 1]
output('\n//\nsegsites: %d\n' % len(seg_sites))
output('positions: %s\n' % ' '.join([str(pop.locusPos(x)) for x in seg_sites]))
#
# genotype
for ind in pop.individuals(vsp):
for p in range(pop.ploidy()):
geno = ind.genotype(p, 0)
output(''.join([str(0 if geno[x] == 0 else 1) for x in seg_sites]) + '\n')
count += 1
prog.update(count)
prog.done()
elif self.splitBy == 'chrom':
#
# first line: command, nseq, nblocks
#
stat(pop, popSize=True, alleleFreq=ALL_AVAIL, vars=['alleleNum'],
subPops=subPops)
output('simuPOP_export %d %d\n' % (pop.dvars().popSize * pop.ploidy(), pop.numChrom()))
# some random random number seeds
output('30164 48394 29292\n')
#
prog = ProgressBar('Exporting', pop.dvars().popSize, gui=gui)
count = 0
for ch in range(pop.numChrom()):
b = pop.chromBegin(ch)
# find segregating sites
seg_sites = [x for x in range(pop.chromBegin(ch), pop.chromEnd(ch)) \
if len(pop.dvars().alleleNum[x]) != 1]
output('\n//\nsegsites: %d\n' % len(seg_sites))
output('positions: %s\n' % ' '.join([str(pop.locusPos(x)) for x in seg_sites]))
#
# genotype
for vsp in subPops:
for ind in pop.individuals(vsp):
for p in range(pop.ploidy()):
geno = ind.genotype(p, ch)
output(''.join([str(0 if geno[x - b] == 0 else 1) for x in seg_sites]) + '\n')
count += 1
prog.update(count)
prog.done()
else:
raise ValueError('Parameter splitBy can only take values None (default), '
'subPop, and chrom')
class MSImporter:
def __init__(self, ploidy=1, mergeBy='subPop'):
self.ploidy = ploidy
self.mergeBy = mergeBy
def importFrom(self, filename):
with open(filename, 'r') as input:
# parse the first line to get popualtion size and sample info
cmd = input.readline().split()
# the first items hould be ms, ./ms, ms.exe etc
try:
numChrom = int(cmd[1])
except ValueError as e:
raise ValueError('Failed to get number of chromosomes from command line: %s' \
% (' '.join(cmd)))
#
if numChrom // self.ploidy * self.ploidy != numChrom:
raise ValueError('Failed to pair %d haploid chromsomes for ploidy %d' \
% (numChrom, self.ploidy))
#
sz = numChrom // self.ploidy
#
try:
numSP = int(cmd[2])
except ValueError as e:
raise ValueError('Failed to get number of populations from command line: %s' \
% (' '.join(cmd)))
#
# now, we need to know the loci positions and import genotype
idx = 0
pops = []
for line in input:
if idx == 0:
# waiting
if line.startswith('//'):
idx = 1
elif idx == 1:
# segsites:
if not line.startswith('segsites:'):
raise ValueError('Incorrect input file: No segsites line after //')
idx = 2
elif idx == 2:
# segsites:
if not line.startswith('positions:'):
raise ValueError('Incorrect input file: No positionss line after segsites')
try:
pos = [float(x) for x in line[10:].split()]
except Exception as e:
raise ValueError('Failed to import loci positions from %s' \
% line)
pop = Population(size=sz, loci=len(pos), lociPos=pos, ploidy=self.ploidy)
idx = 3
elif idx >= 3:
iidx = (idx - 3) // self.ploidy
pidx = idx -3 - self.ploidy * iidx
geno = [int(x) for x in line.strip()]
pop.individual(iidx).setGenotype(geno, pidx)
if idx == numChrom + 2:
idx = 0
pops.append(pop.clone())
else:
idx += 1
# merge populations
if len(pops) == 1:
return pops[0]
elif self.mergeBy == 'chrom':
pop = pops[0]
for p in pops[1:]:
pop.addChromFrom(p)
elif self.mergeBy == 'subPop':
for i in range(len(pops)):
for j in range(len(pops)):
if i == j:
continue
newPos = [x for x in pops[j].lociPos() if x not in pops[i].lociPos()]
pops[i].addLoci([0]*len(newPos), newPos)
# every population should have the same structure now
pop = pops[0]
for p in pops[1:]:
pop.addIndFrom(p)
return pop
class _binaryWriter:
def __init__(self, func):
self.func = func
def __call__(self, item):
self.func(item.encode('ISO8859-1'))
class Exporter(PyOperator):
'''An operator to export the current population in specified format.
Currently supported file formats include:
STRUCTURE (http://pritch.bsd.uchicago.edu/structure.html). This format
accepts the following parameters:
markerNames
If set to True (default), output names of loci that are specified by parameter
*lociNames* of the ``Population`` class. No names will be outputted if loci are
anonymous. A list of loci names are acceptable which will be outputted directly.
recessiveAlleles
If specified, value of this parameter will be outputted after the marker names
line.
interMarkerDistances
If set to True (default), output distances between markers. The first marker
of each chromosome has distance -1, as required by this format.
phaseInformation
If specified, output the value (0 or 1) of this parameter after the inter marker
distances line. Note that simuPOP populations always have phase information.
label
Output 1-based indexes of individuals if this parameter is true (default)
popData
Output 1-based index of subpopulation if this parameter is set to true (default).
popFlag
Output value of this parameter (0 or 1) after popData if this parameter specified.
locData
Name of an information field with location information of each individual. Default
to None (no location data)
phenotype
Name of an information field with phenotype information of each individual. Default
to None (no phenotype)
Genotype information are always outputted. Alleles are coded the same way (0, 1, 2, etc)
as they are stored in simuPOP.
GENEPOP (http://genepop.curtin.edu.au/). The genepop format accepts the following
parameters:
title
The tile line. If unspecified, a line similar to 'produced by simuPOP on XXX'
will be outputted.
adjust
Adjust values of alleles by specified value (1 as default). This adjustment is
necessary in many cases because GENEPOP treats allele 0 as missing values, and
simuPOP treats allele 0 as a valid allele. Exporting alleles 0 and 1 as 1 and 2
will allow GENEPOP to analyze simuPOP-exported files correctly.
Because 0 is reserved as missing data in this format, allele A is outputted as A+adjust.
simuPOP will use subpopulation names (if available) and 1-based individual index
to output individual label (e.g. SubPop2-3). If parameter subPops is used to output
selected individuals, each subpop will be outputted as a separate subpopulation even
if there are multiple virtual subpopulations from the same subpopulation. simuPOP
currently only export diploid populations to this format.
FSTAT (http://www2.unil.ch/popgen/softwares/fstat.htm). The fstat format accepts
the following parameters:
lociNames
Names of loci that will be outputted. If unspecified, simuPOP will try to use
names of loci that are specified by parameter *lociNames* of the ``Population``
class, or names in the form of chrX-Y.
adjust
Adjust values of alleles by specified value (1 as default). This adjustment is
necessary in many cases because FSTAT treats allele 0 as missing values, and
simuPOP treats allele 0 as a valid allele. Exporting alleles 0 and 1 as 1 and 2
will allow FSTAT to analyze simuPOP-exported files correctly.
MAP (marker information format) output information about each loci. Each line of
the map file describes a single marker and contains chromosome name, locus name,
and position. Chromosome and loci names will be the names specified by parameters
``chromNames`` and ``lociNames`` of the ``Population`` object, and will be
chromosome index + 1, and '.' if these parameters are not specified. This
format output loci position to the third column. If the unit assumed in your
population does not match the intended unit in the MAP file, (e.g. you would like
to output position in basepair while the population uses Mbp), you can use parameter
``posMultiplier`` to adjust it. This format accepts the following parameters:
posMultiplier
A number that will be multiplied to loci positions (default to 1). The result
will be outputted in the third column of the output.
PED (Linkage Pedigree pre MAKEPED format), with columns of family, individual,
father mother, gender, affection status and genotypes. The output should be
acceptable by HaploView or plink, which provides more details of this format in
their documentation. If a population does not have ``ind_id``, ``father_id`` or
``mother_id``, this format will output individuals in specified (virtual)
subpopulations in the current generation (parental generations are ignored)
as unrelated individuals with 0, 0 as parent IDs. An incremental family
ID will be assigned for each individual. If a population have ``ind_id``,
``father_id`` and ``mother_id``, parents will be recursively traced to separate
all individuals in a (multigenerational) population into families of related
individuals. father and mother id will be set to zero if one of them does not
exist. This format uses 1 for MALE, 2 for FEMALE. If phenoField is ``None``,
individual affection status will be outputted with 1 for Unaffected and 2
for affected. Otherwise, values of an information field will be outputted as
phenotype. Because 0 value indicates missing value, values of alleles will
be adjusted by 1 by default, which should be avoided if you are using non-zero
alleles to model ACTG alleles in simuPOP. This format will ignore subpopulation
structure because parents might belong to different subpopulations. This format
accepts the following parameters:
idField
A field for individual id, default to ``ind_id``. Value at this field will be
individual ID inside a pedigree.
fatherField
A field for father id, default to ``father_id``. Value at this field will be
used to output father of an individual, if an individual with this ID exists
in the population.
motherField
A field for mother id, default to ``mother_id``. Value at this field will be
used to output mother of an individual, if an individual with this ID exists
in the population.
phenoField
A field for individual phenotype that will be outputted as the sixth column of
the PED file. If ``None`` is specified (default), individual affection status
will be outputted (1 for unaffected and 2 for affected).
adjust
Adjust values of alleles by specified value (1 as default). This adjustment
is necessary in many cases because LINKAGE/PED format treats allele 0 as
missing values, and simuPOP treats allele 0 as a valid allele. You should set
this paremter to zero if you have already used alleles 1, 2, 3, 4 to model
A, C, T, and G alleles.
Phylip (Joseph Felsenstein's Phylip format). Phylip is generally used for nuclotide
sequences and protein sequences. This makes this format suitable for simulations
of haploid populations (ploidy=1) with nucleotide or protein sequences (number of
alleles = 4 or 24 with alleleNames as nucleotide or amino acid names). If your
population does satisfy these conditions, you can still export it, with homologous
chromosomes in a diploid population as two sequences, and with specified allele
names for allele 0, 1, 2, .... This function outputs sequence name as SXXX where
XXX is the 1-based index of individual and SXXX_Y (Y=1 or 2) for diploid individuals,
unless names of sequences are provided by parameter seqNames. This format supports
the following parameters:
alleleNames
Names of alleles 0, 1, 2, ... as a single string (e.g. 'ACTG') or a list of
single-character strings (e.g. ['A', 'C', 'T', 'G']). If this parameter is
unspecified (default), this program will try to use names of alleles
specified in alleleNames parameter of a Population, and raise an error if no
name could be found.
seqNames
Names of each sequence outputted, for each individual, or for each sequences
for non-haploid population. If unspecified, default names such as SXXX or
SXXX_Y will be used.
style
Output style, can be 'sequential' (default) or 'interleaved'. For sequential
output, each sequence consists of for the first line a name and 90 symbols
starting from column 11, and subsequent lines of 100 symbols. The interleaved
style have subsequent lines as separate blocks.
MS (output from Richard R. Hudson's MS or msHOT program). This format records
genotypes of SNP markers at segregating site so all non-zero genotypes are
recorded as 1. simuPOP by default outputs a single block of genotypes at
all loci on the first chromosome, and for all individuals, unless parameter
``splitBy`` is specified to separate genotypes by chromosome or subpopulations.
splitBy:
simuPOP by default output segregating sites at all loci on the first
chromosome for all individuals. If ``splitBy`` is set to ``'subPop'``,
genotypes for individuals in all or specified (parameter ``subPops``)
subpopulations are outputted in separate blocks. The subpopulations should
have the same number of individuals to produce blocks of the same number
of sequences. Alternatively, ``splitBy`` can be set to ``chrom``, for
which genotypes on different chromosomes will be outputted separately.
CSV (comma separated values). This is a general format that output genotypes in
comma (or tab etc) separated formats. The function form of this operator
``export(format='csv')`` is similar to the now-deprecated ``saveCSV`` function,
but its interface has been adjusted to match other formats supported by this
operator. This format outputs a header (optiona), and one line for each individual
with values of specified information fields, sex, affection status, and genotypes.
All fields except for genotypes are optional. The output format is controlled by the
following parameters:
infoFileds
Information fields to be outputted. Default to none.
header
Whether or not a header should be written. These headers will include
information fields, sex (if ``sexFormatter`` is not ``None``), affection
status (if ``affectionFormatter`` is not ``None``) and loci names. If
genotype at a locus needs more than one column, ``_1``, ``_2`` etc will
be appended to loci names. Alternatively, a complete header (a string)
or a list of column names could be specified directly.
infoFormatter
A format string that is used to format all information fields. If
unspecified, ``str(value)`` will be used for each information field.
genoFormatter
How to output genotype at specified loci. Acceptable values include
``None`` (output allele values), a dictionary with genotype as keys,
(e.g. ``genoFormatter={(0,0):1, (0,1):2, (1,0):2, (1,1):3}``, or a function
with genotype (as a tuple of integers) as inputs. The dictionary value
or the return value of this function can be a single or a list of
number or strings.
sexFormatter
How to output individual sex. Acceptable values include ``None`` (no
output) or a dictionary with keys ``MALE`` and ``FEMALE``.
affectionFormatter
How to output individual affection status. Acceptable values include
``None`` (no output) or a dictionary with keys ``True`` and ``False``.
delimiter
Delimiter used to separate values, default to ','.
subPopFormatter
How to output population membership. Acceptable values include
``None`` (no output), a string that will be used for the column name, or
``True`` which uses 'pop' as the column name. If present, the column is
written with the string represenation of the (virtual) subpopulation.
This operator supports the usual applicability parameters such as begin,
end, step, at, reps, and subPops. If subPops are specified, only
individuals from specified (virtual) subPops are exported. Similar to
other operators, parameter ``output`` can be an output specification string
(``filename``, ``>>filename``, ``!expr``), filehandle (or any Python object
with a ``write`` function), any python function. Unless explicitly stated for
a particular format, this operator exports individuals from the current
generation if there are multiple ancestral generations in the population.
The Exporter class will make use of a progress bar to show the progress. The
interface of the progress bar is by default determined by the global GUI status
but you can also set it to, for example, ``gui=False`` to forcefully use a
text-based progress bar, or ``gui='batch'`` to suppress the progress bar.
'''
def __init__(self, format, output, begin=0, end=-1, step=1, at=[],
reps=ALL_AVAIL, subPops=ALL_AVAIL, infoFields=[], gui=None, *args, **kwargs):
self.output = output
self.subPops = subPops
self.infoFields = [infoFields] if type(infoFields) == type('') else infoFields
self.gui = gui
if format.lower() == 'structure':
self.exporter = StructureExporter(*args, **kwargs)
elif format.lower() == 'genepop':
self.exporter = GenePopExporter(*args, **kwargs)
elif format.lower() == 'fstat':
self.exporter = FStatExporter(*args, **kwargs)
elif format.lower() == 'map':
self.exporter = MapExporter(*args, **kwargs)
elif format.lower() == 'ped':
self.exporter = PEDExporter(*args, **kwargs)
elif format.lower() == 'phylip':
self.exporter = PhylipExporter(*args, **kwargs)
elif format.lower() == 'csv':
self.exporter = CSVExporter(*args, **kwargs)
elif format.lower() == 'ms':
self.exporter = MSExporter(*args, **kwargs)
else:
raise ValueError('Unrecognized fileformat: {}.'.format(format))
PyOperator.__init__(self, func=self._export, begin=begin, end=end,
step=step, at=at, reps=reps, subPops=ALL_AVAIL, infoFields=[])
def _determineSubPops(self, pop):
# this is basically subPopList::expandFrom(pop)
if self.subPops is ALL_AVAIL:
return list(range(pop.numSubPop()))
elif type(self.subPops) == type(0):
return [self.subPops]
elif type(self.subPops) == type(''):
try:
return [pop.subPopNames().index(self.subPops)]
except:
raise ValueError('%s is not a valid subpop name' % self.subPops)
# handle vsps such as (ALL_AVAIL, vsp)
subPops = []
for vsp in self.subPops:
# is it a number?
if type(vsp) == type(0):
subPops.append(vsp)
elif type(vsp) == type(''):
subPops.append(pop.subPopNames().index(vsp))
else:
# vsp is a tuple
if type(vsp[0]) == type(''):
try:
vsp[0] = pop.subPopNames().index(vsp[0])
except:
raise ValueError('%s is not a valid subpop name' % vsp[0])
if type(vsp[1]) == type(''):
try:
vsp[1] = pop.virtualSplitter().vspByName(vsp[1])
except:
raise ValueError('Population does not have any virtual subpopulation %s' % vsp[1])
if vsp[0] is ALL_AVAIL:
for u in range(pop.numSubPop()):
if vsp[1] is ALL_AVAIL:
for v in range(pop.numVirtualSubPops()):
subPops.append([u, v])
else:
subPops.append([u, vsp[1]])
else:
if vsp[1] is ALL_AVAIL:
for v in range(pop.numVirtualSubPops()):
subPops.append([vsp[0], v])
else:
subPops.append(vsp)
return subPops
def _export(self, pop):
bin_mode = False
if hasattr(self.output, '_with_output') and hasattr(self.output, '_with_mode'):
bin_mode = 'b' in self.output._with_mode
self.output = self.output._with_output
if isinstance(self.output, str):
if self.output.startswith('!'):
output = eval(self.output[1:], pop.vars(), pop.vars())
else:
output = self.output
if output.startswith('>>'):
mode = 'a'
else:
mode = 'w'
if bin_mode:
mode += 'b'
with open(output.lstrip('>'), mode) as out:
self.exporter.export(pop, out.write,
self._determineSubPops(pop), self.infoFields, gui=self.gui)
elif isinstance(self.output, collections.Callable):
# it is a regular python function, call it with output
if bin_mode:
self.exporter.export(pop, _binaryWriter(self.output),
self._determineSubPops(pop), self.infoFields, gui=self.gui)
else:
self.exporter.export(pop, self.output,
self._determineSubPops(pop), self.infoFields, gui=self.gui)
elif hasattr(self.output, 'write'):
# this must be a file handle
if bin_mode:
self.exporter.export(pop, _binaryWriter(self.output.write),
self._determineSubPops(pop), self.infoFields, gui=self.gui)
else:
self.exporter.export(pop, self.output.write,
self._determineSubPops(pop), self.infoFields, gui=self.gui)
else:
raise ValueError('Invalid output specification.')
return True
def export(pop, format, *args, **kwargs):
'''Apply operator ``Exporter`` to population *pop* in format *format*.'''
Exporter(format, *args, **kwargs).apply(pop)
def importPopulation(format, filename, *args, **kwargs):
'''This function import and return a population from a file *filename* in
specified *format*. Format-specific parameters can be used to define how the
input should be interpreted and imported. This function supports the following
file format.
GENEPOP (http://genepop.curtin.edu.au/). For input file of this format, this
function ignores the first title line, load the second line as loci names,
and import genotypes of different POP sections as different subpopulations.
This format accepts the following parameters:
adjust
Adjust alleles by specified value (default to 0 for no adjustment). This
parameter is mostly used to convert alleles 1 and 2 in a GenePop file to
alleles 0 and 1 (with adjust=-1) in simuPOP. Negative allele (e.g. missing
value 0) will be imported as regular allele with module-dependent values
(e.g. -1 imported as 255 for standard module).
FSTAT (http://www2.unil.ch/popgen/softwares/fstat.htm). This format accepts
the following parameters:
adjust
Adjust alleles by specified value (default to 0 for no adjustment). This
parameter is mostly used to convert alleles 1 and 2 in a GenePop file to
alleles 0 and 1 (with adjust=-1) in simuPOP. Negative allele (e.g. missing
value 0) will be imported as regular allele with module-dependent values
(e.g. -1 imported as 255 for standard module).
Phylip (Joseph Felsenstein's Phylip format). This function ignores sequence
names and import sequences in a haploid (default) or diploid population (if
there are even number of sequences). An list of allele names are required to
translate symbols to allele names. This format accepts the following
parameters:
alleleNames
Names of alleles 0, 1, 2, ... as a single string (e.g. 'ACTG') or a list of
single-character strings (e.g. ['A', 'C', 'T', 'G']). This will be used to
translate symbols into numeric alleles in simuPOP. Allele names will continue
to be used as allele names of the returned population.
ploidy
Ploidy of the returned population, default to 1 (haploid). There should be
even number of sequences if ploidy=2 (haploid) is specified.
MS (output from Richard R. Hudson's MS or msHOT program). The ms program generates
npop blocks of nseq haploid chromosomes for command starting with
``ms nsample nrepeat``. By default, the result is imported as a haploid
population of size nsample. The population will have nrepeat subpopulations
each with the same number of loci but different number of segregating sites.
This behavior could be changed by the following parameters:
ploidy
If ``ploidy`` is set to 2, the sequenences will be paired so the population
will have ``nseq/2`` individuals. An error will be raised if an odd number
of sequences are simulated.
mergeBy
By default, replicate samples will be presented as subpopulations. All
individuals have the same number of loci but individuals in different
subpopulations have different segregating sites. If ``mergeBy`` is set
to ``"chrom"``, the replicates will be presented as separate chromosomes,
each with a different set of loci determined by segregating sites.
'''
if format.lower() == 'genepop':
importer = GenePopImporter(*args, **kwargs)
elif format.lower() == 'fstat':
importer = FStatImporter(*args, **kwargs)
elif format.lower() == 'phylip':
importer = PhylipImporter(*args, **kwargs)
elif format.lower() == 'ms':
importer = MSImporter(*args, **kwargs)
else:
raise ValueError('Importing genotypes in format %s is currently not supported' % format)
return importer.importFrom(filename)
if __name__ == "__main__":
pass
|
BoPeng/simuPOP
|
src/utils.py
|
Python
|
gpl-2.0
| 136,911
|
[
"VisIt"
] |
d9d39e3c5e5241c8c9b1c7a4c5c75a892dc2b1f0b7eed1dd8f203163b64cadf8
|
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
__all__ = ['Bloch3d']
import numpy as np
from qutip.qobj import Qobj
from qutip.expect import expect
from qutip.operators import sigmax, sigmay, sigmaz
class Bloch3d():
"""Class for plotting data on a 3D Bloch sphere using mayavi.
Valid data can be either points, vectors, or qobj objects
corresponding to state vectors or density matrices. for
a two-state system (or subsystem).
Attributes
----------
fig : instance {None}
User supplied Matplotlib Figure instance for plotting Bloch sphere.
font_color : str {'black'}
Color of font used for Bloch sphere labels.
font_scale : float {0.08}
Scale for font used for Bloch sphere labels.
frame : bool {True}
Draw frame for Bloch sphere
frame_alpha : float {0.05}
Sets transparency of Bloch sphere frame.
frame_color : str {'gray'}
Color of sphere wireframe.
frame_num : int {8}
Number of frame elements to draw.
frame_radius : floats {0.005}
Width of wireframe.
point_color : list {['r', 'g', 'b', 'y']}
List of colors for Bloch sphere point markers to cycle through.
i.e. By default, points 0 and 4 will both be blue ('r').
point_mode : string {'sphere','cone','cube','cylinder','point'}
Point marker shapes.
point_size : float {0.075}
Size of points on Bloch sphere.
sphere_alpha : float {0.1}
Transparency of Bloch sphere itself.
sphere_color : str {'#808080'}
Color of Bloch sphere.
size : list {[500,500]}
Size of Bloch sphere plot in pixels. Best to have both numbers the same
otherwise you will have a Bloch sphere that looks like a football.
vector_color : list {['r', 'g', 'b', 'y']}
List of vector colors to cycle through.
vector_width : int {3}
Width of displayed vectors.
view : list {[45,65]}
Azimuthal and Elevation viewing angles.
xlabel : list {['|x>', '']}
List of strings corresponding to +x and -x axes labels, respectively.
xlpos : list {[1.07,-1.07]}
Positions of +x and -x labels respectively.
ylabel : list {['|y>', '']}
List of strings corresponding to +y and -y axes labels, respectively.
ylpos : list {[1.07,-1.07]}
Positions of +y and -y labels respectively.
zlabel : list {['|0>', '|1>']}
List of strings corresponding to +z and -z axes labels, respectively.
zlpos : list {[1.07,-1.07]}
Positions of +z and -z labels respectively.
Notes
-----
The use of mayavi for 3D rendering of the Bloch sphere comes with
a few limitations: I) You can not embed a Bloch3d figure into a
matplotlib window. II) The use of LaTex is not supported by the
mayavi rendering engine. Therefore all labels must be defined using
standard text. Of course you can post-process the generated figures
later to add LaTeX using other software if needed.
"""
def __init__(self, fig=None):
# ----check for mayavi-----
try:
from mayavi import mlab
except:
raise Exception("This function requires the mayavi module.")
# ---Image options---
self.fig = None
self.user_fig = None
# check if user specified figure or axes.
if fig:
self.user_fig = fig
# The size of the figure in inches, default = [500,500].
self.size = [500, 500]
# Azimuthal and Elvation viewing angles, default = [45,65].
self.view = [45, 65]
# Image background color
self.bgcolor = 'white'
# Image foreground color. Other options can override.
self.fgcolor = 'black'
# ---Sphere options---
# Color of Bloch sphere, default = #808080
self.sphere_color = '#808080'
# Transparency of Bloch sphere, default = 0.1
self.sphere_alpha = 0.1
# ---Frame options---
# Draw frame?
self.frame = True
# number of lines to draw for frame
self.frame_num = 8
# Color of wireframe, default = 'gray'
self.frame_color = 'black'
# Transparency of wireframe, default = 0.2
self.frame_alpha = 0.05
# Radius of frame lines
self.frame_radius = 0.005
# --Axes---
# Axes color
self.axes_color = 'black'
# Transparency of axes
self.axes_alpha = 0.4
# Radius of axes lines
self.axes_radius = 0.005
# ---Labels---
# Labels for x-axis (in LaTex), default = ['$x$','']
self.xlabel = ['|x>', '']
# Position of x-axis labels, default = [1.2,-1.2]
self.xlpos = [1.07, -1.07]
# Labels for y-axis (in LaTex), default = ['$y$','']
self.ylabel = ['|y>', '']
# Position of y-axis labels, default = [1.1,-1.1]
self.ylpos = [1.07, -1.07]
# Labels for z-axis
self.zlabel = ['|0>', '|1>']
# Position of z-axis labels, default = [1.05,-1.05]
self.zlpos = [1.07, -1.07]
# ---Font options---
# Color of fonts, default = 'black'
self.font_color = 'black'
# Size of fonts, default = 20
self.font_scale = 0.08
# ---Vector options---
# Object used for representing vectors on Bloch sphere.
# List of colors for Bloch vectors, default = ['b','g','r','y']
self.vector_color = ['r', 'g', 'b', 'y']
# Transparency of vectors
self.vector_alpha = 1.0
# Width of Bloch vectors, default = 2
self.vector_width = 2.0
# Height of vector head
self.vector_head_height = 0.15
# Radius of vector head
self.vector_head_radius = 0.075
# ---Point options---
# List of colors for Bloch point markers, default = ['b','g','r','y']
self.point_color = ['r', 'g', 'b', 'y']
# Size of point markers
self.point_size = 0.06
# Shape of point markers
# Options: 'cone' or 'cube' or 'cylinder' or 'point' or 'sphere'.
# Default = 'sphere'
self.point_mode = 'sphere'
# ---Data lists---
# Data for point markers
self.points = []
# Data for Bloch vectors
self.vectors = []
# Number of times sphere has been saved
self.savenum = 0
# Style of points, 'm' for multiple colors, 's' for single color
self.point_style = []
def __str__(self):
s = ""
s += "Bloch3D data:\n"
s += "-----------\n"
s += "Number of points: " + str(len(self.points)) + "\n"
s += "Number of vectors: " + str(len(self.vectors)) + "\n"
s += "\n"
s += "Bloch3D sphere properties:\n"
s += "--------------------------\n"
s += "axes_alpha: " + str(self.axes_alpha) + "\n"
s += "axes_color: " + str(self.axes_color) + "\n"
s += "axes_radius: " + str(self.axes_radius) + "\n"
s += "bgcolor: " + str(self.bgcolor) + "\n"
s += "fgcolor: " + str(self.fgcolor) + "\n"
s += "font_color: " + str(self.font_color) + "\n"
s += "font_scale: " + str(self.font_scale) + "\n"
s += "frame: " + str(self.frame) + "\n"
s += "frame_alpha: " + str(self.frame_alpha) + "\n"
s += "frame_color: " + str(self.frame_color) + "\n"
s += "frame_num: " + str(self.frame_num) + "\n"
s += "frame_radius: " + str(self.frame_radius) + "\n"
s += "point_color: " + str(self.point_color) + "\n"
s += "point_mode: " + str(self.point_mode) + "\n"
s += "point_size: " + str(self.point_size) + "\n"
s += "sphere_alpha: " + str(self.sphere_alpha) + "\n"
s += "sphere_color: " + str(self.sphere_color) + "\n"
s += "size: " + str(self.size) + "\n"
s += "vector_alpha: " + str(self.vector_alpha) + "\n"
s += "vector_color: " + str(self.vector_color) + "\n"
s += "vector_width: " + str(self.vector_width) + "\n"
s += "vector_head_height: " + str(self.vector_head_height) + "\n"
s += "vector_head_radius: " + str(self.vector_head_radius) + "\n"
s += "view: " + str(self.view) + "\n"
s += "xlabel: " + str(self.xlabel) + "\n"
s += "xlpos: " + str(self.xlpos) + "\n"
s += "ylabel: " + str(self.ylabel) + "\n"
s += "ylpos: " + str(self.ylpos) + "\n"
s += "zlabel: " + str(self.zlabel) + "\n"
s += "zlpos: " + str(self.zlpos) + "\n"
return s
def clear(self):
"""Resets the Bloch sphere data sets to empty.
"""
self.points = []
self.vectors = []
self.point_style = []
def add_points(self, points, meth='s'):
"""Add a list of data points to bloch sphere.
Parameters
----------
points : array/list
Collection of data points.
meth : str {'s','m'}
Type of points to plot, use 'm' for multicolored.
"""
if not isinstance(points[0], (list, np.ndarray)):
points = [[points[0]], [points[1]], [points[2]]]
points = np.array(points)
if meth == 's':
if len(points[0]) == 1:
pnts = np.array(
[[points[0][0]], [points[1][0]], [points[2][0]]])
pnts = np.append(pnts, points, axis=1)
else:
pnts = points
self.points.append(pnts)
self.point_style.append('s')
else:
self.points.append(points)
self.point_style.append('m')
def add_states(self, state, kind='vector'):
"""Add a state vector Qobj to Bloch sphere.
Parameters
----------
state : qobj
Input state vector.
kind : str {'vector','point'}
Type of object to plot.
"""
if isinstance(state, Qobj):
state = [state]
for st in state:
if kind == 'vector':
vec = [expect(sigmax(), st), expect(sigmay(), st),
expect(sigmaz(), st)]
self.add_vectors(vec)
elif kind == 'point':
pnt = [expect(sigmax(), st), expect(sigmay(), st),
expect(sigmaz(), st)]
self.add_points(pnt)
def add_vectors(self, vectors):
"""Add a list of vectors to Bloch sphere.
Parameters
----------
vectors : array/list
Array with vectors of unit length or smaller.
"""
if isinstance(vectors[0], (list, np.ndarray)):
for vec in vectors:
self.vectors.append(vec)
else:
self.vectors.append(vectors)
def plot_vectors(self):
"""
Plots vectors on the Bloch sphere.
"""
from mayavi import mlab
from tvtk.api import tvtk
import matplotlib.colors as colors
ii = 0
for k in range(len(self.vectors)):
vec = np.array(self.vectors[k])
norm = np.linalg.norm(vec)
theta = np.arccos(vec[2] / norm)
phi = np.arctan2(vec[1], vec[0])
vec -= 0.5 * self.vector_head_height * \
np.array([np.sin(theta) * np.cos(phi),
np.sin(theta) * np.sin(phi), np.cos(theta)])
color = colors.colorConverter.to_rgb(
self.vector_color[np.mod(k, len(self.vector_color))])
mlab.plot3d([0, vec[0]], [0, vec[1]], [0, vec[2]],
name='vector' + str(ii), tube_sides=100,
line_width=self.vector_width,
opacity=self.vector_alpha,
color=color)
cone = tvtk.ConeSource(height=self.vector_head_height,
radius=self.vector_head_radius,
resolution=100)
cone_mapper = tvtk.PolyDataMapper(input=cone.output)
prop = tvtk.Property(opacity=self.vector_alpha, color=color)
cc = tvtk.Actor(mapper=cone_mapper, property=prop)
cc.rotate_z(np.degrees(phi))
cc.rotate_y(-90 + np.degrees(theta))
cc.position = vec
self.fig.scene.add_actor(cc)
def plot_points(self):
"""
Plots points on the Bloch sphere.
"""
from mayavi import mlab
import matplotlib.colors as colors
for k in range(len(self.points)):
num = len(self.points[k][0])
dist = [np.sqrt(self.points[k][0][j] ** 2 +
self.points[k][1][j] ** 2 +
self.points[k][2][j] ** 2) for j in range(num)]
if any(abs(dist - dist[0]) / dist[0] > 1e-12):
# combine arrays so that they can be sorted together
zipped = zip(dist, range(num))
zipped.sort() # sort rates from lowest to highest
dist, indperm = zip(*zipped)
indperm = np.array(indperm)
else:
indperm = range(num)
if self.point_style[k] == 's':
color = colors.colorConverter.to_rgb(
self.point_color[np.mod(k, len(self.point_color))])
mlab.points3d(
self.points[k][0][indperm], self.points[k][1][indperm],
self.points[k][2][indperm], figure=self.fig,
resolution=100, scale_factor=self.point_size,
mode=self.point_mode, color=color)
elif self.point_style[k] == 'm':
pnt_colors = np.array(self.point_color * np.ceil(
num / float(len(self.point_color))))
pnt_colors = pnt_colors[0:num]
pnt_colors = list(pnt_colors[indperm])
for kk in range(num):
mlab.points3d(
self.points[k][0][
indperm[kk]], self.points[k][1][indperm[kk]],
self.points[k][2][
indperm[kk]], figure=self.fig, resolution=100,
scale_factor=self.point_size, mode=self.point_mode,
color=colors.colorConverter.to_rgb(pnt_colors[kk]))
def make_sphere(self):
"""
Plots Bloch sphere and data sets.
"""
# setup plot
# Figure instance for Bloch sphere plot
from mayavi import mlab
import matplotlib.colors as colors
if self.user_fig:
self.fig = self.user_fig
else:
self.fig = mlab.figure(
1, size=self.size,
bgcolor=colors.colorConverter.to_rgb(self.bgcolor),
fgcolor=colors.colorConverter.to_rgb(self.fgcolor))
sphere = mlab.points3d(
0, 0, 0, figure=self.fig, scale_mode='none', scale_factor=2,
color=colors.colorConverter.to_rgb(self.sphere_color),
resolution=100, opacity=self.sphere_alpha, name='bloch_sphere')
# Thse commands make the sphere look better
sphere.actor.property.specular = 0.45
sphere.actor.property.specular_power = 5
sphere.actor.property.backface_culling = True
# make frame for sphere surface
if self.frame:
theta = np.linspace(0, 2 * np.pi, 100)
for angle in np.linspace(-np.pi, np.pi, self.frame_num):
xlat = np.cos(theta) * np.cos(angle)
ylat = np.sin(theta) * np.cos(angle)
zlat = np.ones_like(theta) * np.sin(angle)
xlon = np.sin(angle) * np.sin(theta)
ylon = np.cos(angle) * np.sin(theta)
zlon = np.cos(theta)
mlab.plot3d(
xlat, ylat, zlat,
color=colors.colorConverter.to_rgb(self.frame_color),
opacity=self.frame_alpha, tube_radius=self.frame_radius)
mlab.plot3d(
xlon, ylon, zlon,
color=colors.colorConverter.to_rgb(self.frame_color),
opacity=self.frame_alpha, tube_radius=self.frame_radius)
# add axes
axis = np.linspace(-1.0, 1.0, 10)
other = np.zeros_like(axis)
mlab.plot3d(
axis, other, other,
color=colors.colorConverter.to_rgb(self.axes_color),
tube_radius=self.axes_radius, opacity=self.axes_alpha)
mlab.plot3d(
other, axis, other,
color=colors.colorConverter.to_rgb(self.axes_color),
tube_radius=self.axes_radius, opacity=self.axes_alpha)
mlab.plot3d(
other, other, axis,
color=colors.colorConverter.to_rgb(self.axes_color),
tube_radius=self.axes_radius, opacity=self.axes_alpha)
# add data to sphere
self.plot_points()
self.plot_vectors()
# #add labels
mlab.text3d(0, 0, self.zlpos[0], self.zlabel[0],
color=colors.colorConverter.to_rgb(self.font_color),
scale=self.font_scale)
mlab.text3d(0, 0, self.zlpos[1], self.zlabel[1],
color=colors.colorConverter.to_rgb(self.font_color),
scale=self.font_scale)
mlab.text3d(self.xlpos[0], 0, 0, self.xlabel[0],
color=colors.colorConverter.to_rgb(self.font_color),
scale=self.font_scale)
mlab.text3d(self.xlpos[1], 0, 0, self.xlabel[1],
color=colors.colorConverter.to_rgb(self.font_color),
scale=self.font_scale)
mlab.text3d(0, self.ylpos[0], 0, self.ylabel[0],
color=colors.colorConverter.to_rgb(self.font_color),
scale=self.font_scale)
mlab.text3d(0, self.ylpos[1], 0, self.ylabel[1],
color=colors.colorConverter.to_rgb(self.font_color),
scale=self.font_scale)
def show(self):
"""
Display the Bloch sphere and corresponding data sets.
"""
from mayavi import mlab
self.make_sphere()
mlab.view(azimuth=self.view[0], elevation=self.view[1], distance=5)
if self.fig:
mlab.show()
def save(self, name=None, format='png', dirc=None):
"""Saves Bloch sphere to file of type ``format`` in directory ``dirc``.
Parameters
----------
name : str
Name of saved image. Must include path and format as well.
i.e. '/Users/Paul/Desktop/bloch.png'
This overrides the 'format' and 'dirc' arguments.
format : str
Format of output image. Default is 'png'.
dirc : str
Directory for output images. Defaults to current working directory.
Returns
-------
File containing plot of Bloch sphere.
"""
from mayavi import mlab
import os
self.make_sphere()
mlab.view(azimuth=self.view[0], elevation=self.view[1], distance=5)
if dirc:
if not os.path.isdir(os.getcwd() + "/" + str(dirc)):
os.makedirs(os.getcwd() + "/" + str(dirc))
if name is None:
if dirc:
mlab.savefig(os.getcwd() + "/" + str(dirc) + '/bloch_' +
str(self.savenum) + '.' + format)
else:
mlab.savefig(os.getcwd() + '/bloch_' + str(self.savenum) +
'.' + format)
else:
mlab.savefig(name)
self.savenum += 1
if self.fig:
mlab.close(self.fig)
|
zasdfgbnm/qutip
|
qutip/bloch3d.py
|
Python
|
bsd-3-clause
| 21,732
|
[
"Mayavi"
] |
cc1bfdf8202a9117b116e6edea1a88f74d6e221b4805c966b665668699bc072f
|
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from django.db.models import Q
from epl.models import Teamrecord, Players, Teams, Stats, Games
from epl.serializers import TeamRecordSerializer, PlayerRecordSerializer, TeamsSerializer, StatsSerializer, GamesSerializer, ScheduleSerializer, PerformanceSerializer
import datetime
import random
from calendar import monthrange
class epl(APIView):
def getTeamRecord(params, format=None):
data = []
if params["FC"][0] == "리그":
params["FC"] = ["Arsenal", "Bournemouth", "Brighton", "Burnley", "Chelsea", "Crystal Palace",
"Everton", "Huddersfield Town", "Leicester City", "Liverpool", "Manchester City", "Manchester United",
"Newcastle United", "Southampton", "Stoke City", "Swansea City", "Tottenham Hotspur", "Watford",
"West Bromwich Albion", "West Ham United"]
print(params["FC"])
for param in params["FC"]:
obj = Teams.objects.filter(Q(team_nickname__icontains=param))
teamserializer = TeamsSerializer(obj, many=True)
if len(teamserializer.data) == 0:
obj = Teams.objects.filter(Q(team_name=param))
teamserializer = TeamsSerializer(obj, many=True)
teamname = teamserializer.data[0]["team_name"]
teamrecord = Teamrecord.objects.filter(Q(pk=teamname))
serializer = TeamRecordSerializer(teamrecord, many=True)
team_pic = teamserializer.data[0]["team_pic"]
serializer.data[0]["team_pic"] = team_pic
data.extend(serializer.data)
message = ["검색하신 팀(들)의 향후 일정이 궁금하세요? '향후 일정도 알려줘!' 라고 하시면 알려드릴께요!",
"검색하신 팀(들)의 최근 경기 결과가 궁금하시면, '최근 경기 결과가 어떻게 돼?' 라고 쳐보세요 :D",
"다른 팀(들)의 승점도 궁금하신가요? '[팀 이름] 도 부탁해!', 라고 말씀하세요!"]
n = random.randrange(0, len(message))
return data, message[n]
def getPlayerInfo(params, format=None):
data = []
players = params["Players"]
for p in players:
try:
playerrecord = Players.objects.filter(pl_nic__icontains=p)
except Players.DoesNotExist:
continue
else:
playerserializer = PlayerRecordSerializer(playerrecord, many=True)
player_name = playerserializer.data[0]["pl_name"]
player_id = playerserializer.data[0]["pl_id"]
teamid = playerserializer.data[0]["team"]
teamlist = Teams.objects.get(pk=teamid)
teamserializer = TeamsSerializer(teamlist)
playerserializer.data[0]["team_name"] = teamserializer.data["team_name"]
del playerserializer.data[0]["team"]
stats_data = Stats.objects.filter(fk_pl__exact=player_id)
statsserializer = StatsSerializer(stats_data, many=True)
goals, assists, shots, min_played, card_yellow, card_red, passes, touches, fouls, playedcount = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
for i in statsserializer.data:
goals = goals + int(i["goals"])
assists = assists + int(i["assists"])
shots = shots + int(i["shots"])
min_played = min_played + int(i["min_played"])
card_yellow = card_yellow + int(i["card_yellow"])
card_red = card_red + int(i["card_red"])
passes = passes + int(i["passes"])
touches = touches + int(i["touches"])
fouls = fouls + int(i["fouls"])
if int(i["min_played"]) > 0:
playedcount = playedcount + 1
playerserializer.data[0]["goals"] = goals
playerserializer.data[0]["assists"] = assists
playerserializer.data[0]["shots"] = shots
playerserializer.data[0]["min_played"] = min_played
playerserializer.data[0]["card_yellow"] = card_yellow
playerserializer.data[0]["card_red"] = card_red
playerserializer.data[0]["passes"] = passes
playerserializer.data[0]["touches"] = touches
playerserializer.data[0]["fouls"] = fouls
playerserializer.data[0]["played_games"] = playedcount
data.extend(playerserializer.data)
message = ["다른 선수의 성적도 궁금하시면 '[선수 이름] 은? 라고 해주세요! 알려드리겠습니다 :D",
"[선수 이름] 도 부탁해! 라고 하시면 그 선수 성적도 보여 드릴께요! XD"]
n = random.randrange(0, len(message))
return data, message[n]
def getGameRecord(params, format=None):
try:
params["Date"]
except KeyError:
params["Date"] = ["00-00"]
message = []
date = params["Date"][0].split('-')
print("at getGameRecord", date)
month = int(date[0])
day = int(date[1])
if len(params["FC"]) == 2:
data = []
team1 = params["FC"][0]
team2 = params["FC"][1]
t1 = Teams.objects.filter(Q(team_nickname__icontains=team1))
t2 = Teams.objects.filter(Q(team_nickname__icontains=team2))
t1serializer = TeamsSerializer(t1, many=True)
t2serializer = TeamsSerializer(t2, many=True)
t1_id = t1serializer.data[0]["team_id"]
t2_id = t2serializer.data[0]["team_id"]
teamname1 = t1serializer.data[0]["team_name"]
teamname2 = t2serializer.data[0]["team_name"]
home_pic = t1serializer.data[0]["team_pic"]
away_pic = t2serializer.data[0]["team_pic"]
if month == 0 and day == 0:
d = datetime.datetime.today()
obj = Games.objects.filter((Q(home_team=t1_id) | Q(away_team=t1_id)) & (Q(home_team=t2_id) | Q(away_team=t2_id)) & Q(game_date__lt=d)).order_by('-game_date')
gameserializer = GamesSerializer(obj, many=True)
elif month != 0 and day == 0:
startd = datetime.date(2017, month, 1)
endd = datetime.date(2017, month, monthrange(2017, month)[1])
obj = obj = Games.objects.filter((Q(home_team=t1_id) | Q(away_team=t1_id)) & (Q(home_team=t2_id) | Q(away_team=t2_id)) & Q(game_date__range=(startd, endd)))
gameserializer = GamesSerializer(obj, many=True)
else:
d = datetime.date(2017, month, day)
obj = Games.objects.filter((Q(home_team=t1_id) | Q(away_team=t1_id)) & (Q(home_team=t2_id) | Q(away_team=t2_id)) & Q(game_date__startswith=d))
gameserializer = GamesSerializer(obj, many=True)
for i in gameserializer.data:
home_id = i["home_team"]
away_id = i["away_team"]
homeObj = Teams.objects.filter(Q(team_id=home_id))
awayObj = Teams.objects.filter(Q(team_id=away_id))
h_serializer = TeamsSerializer(homeObj, many=True)
aw_serializer = TeamsSerializer(awayObj, many=True)
home_name = h_serializer.data[0]["team_name"]
away_name = aw_serializer.data[0]["team_name"]
home_pic = h_serializer.data[0]["team_pic"]
away_pic = aw_serializer.data[0]["team_pic"]
i["home_team"] = home_name
i["away_team"] = away_name
i["home_pic"] = home_pic
i["away_pic"] = away_pic
del i["game_id"]
del i["round_id"]
data.extend(gameserializer.data)
message = ["검색하신 팀의 향후 일정도 궁금하세요? '향후 일정도 알려줘!' 라고 하시면 알려드릴께요!",
"검색하신 팀의 현재 순위가 궁금하시면, '순위 알려줘!' 라고 쳐보세요 :D",
"다른 팀의 경기 결과도 궁금하신가요? '[팀 이름] 은?', 라고 말씀하세요!",
]
n = random.randrange(0, len(message))
return data, message[n]
elif len(params["FC"]) == 1:
data = []
team = params["FC"][0]
t = Teams.objects.filter(Q(team_nickname__icontains=team))
teamserializer = TeamsSerializer(t, many=True)
t_id = teamserializer.data[0]["team_id"]
if month == 0 and day == 0:
d = datetime.datetime.today()
obj = Games.objects.filter((Q(home_team=t_id) | Q(away_team=t_id)) & Q(game_date__lt=d)).order_by('-game_date')
gameserializer = GamesSerializer(obj, many=True)
elif month != 0 and day == 0:
startd = datetime.date(2017, month, 1)
endd = datetime.date(2017, month, monthrange(2017, month)[1])
obj = Games.objects.filter((Q(home_team=t_id) | Q(away_team=t_id)) & Q(game_date__range=(startd, endd))).order_by('-game_date')
gameserializer = GamesSerializer(obj, many=True)
else:
d = datetime.date(2017, month, day)
obj = Games.objects.filter((Q(home_team=t_id) | Q(away_team=t_id)) & Q(game_date__startswith=d))
gameserializer = GamesSerializer(obj, many=True)
for i in gameserializer.data:
home_id = i["home_team"]
away_id = i["away_team"]
homeObj = Teams.objects.filter(Q(team_id=home_id))
awayObj = Teams.objects.filter(Q(team_id=away_id))
h_serializer = TeamsSerializer(homeObj, many=True)
aw_serializer = TeamsSerializer(awayObj, many=True)
home_name = h_serializer.data[0]["team_name"]
away_name = aw_serializer.data[0]["team_name"]
home_pic = h_serializer.data[0]["team_pic"]
away_pic = aw_serializer.data[0]["team_pic"]
i["home_team"] = home_name
i["away_team"] = away_name
i["home_pic"] = home_pic
i["away_pic"] = away_pic
del i["game_id"]
del i["round_id"]
data.extend(gameserializer.data)
message = ["검색하신 팀의 향후 일정도 궁금하세요? '향후 일정도 알려줘!' 라고 하시면 알려드릴께요!",
"검색하신 팀의 현재 순위가 궁금하시면, '순위 알려줘!' 라고 쳐보세요 :D",
"다른 팀의 경기 결과도 궁금하신가요? '[팀 이름] 은?', 라고 말씀하세요!"]
n = random.randrange(0, len(message))
return data, message[n]
def getSchedule(params, format=None):
try:
params["Date"]
except KeyError:
params["Date"] = ["00-00"]
date = params["Date"][0].split('-')
month = int(date[0])
day = int(date[1])
data = []
team = params["FC"][0]
if team == "리그":
t = Teams.objects.all()
teamserializer = TeamsSerializer(t, many=True)
d = datetime.datetime.today()
obj = Games.objects.filter(Q(game_date__gt=d)).order_by('game_date')
scdserializer = ScheduleSerializer(obj, many=True)
else:
for team in params["FC"]:
t = Teams.objects.filter(Q(team_nickname__icontains=team))
teamserializer = TeamsSerializer(t, many=True)
t_id = teamserializer.data[0]["team_id"]
if month == 0 and day == 0:
d = datetime.datetime.today()
obj = Games.objects.filter((Q(home_team=t_id) | Q(away_team=t_id)) & Q(game_date__gt=d)).order_by('game_date')
scdserializer = ScheduleSerializer(obj, many=True)
elif month != 0 and day == 0:
startd = datetime.date(2017, month, 1)
endd = datetime.date(2017, month, monthrange(2017, month)[1])
obj = Games.objects.filter((Q(home_team=t_id) | Q(away_team=t_id)) & Q(game_date__range=(startd, endd))).order_by('game_date')
scdserializer = ScheduleSerializer(obj, many=True)
else:
d = datetime.date(2017, month, day)
obj = Games.objects.filter((Q(home_team=t_id) | Q(away_team=t_id)) & Q(game_date__startswith=d))
scdserializer = ScheduleSerializer(obj, many=True)
for i in scdserializer.data:
home_id = i["home_team"]
away_id = i["away_team"]
homeObj = Teams.objects.filter(Q(team_id=home_id))
awayObj = Teams.objects.filter(Q(team_id=away_id))
h_serializer = TeamsSerializer(homeObj, many=True)
aw_serializer = TeamsSerializer(awayObj, many=True)
home_name = h_serializer.data[0]["team_name"]
away_name = aw_serializer.data[0]["team_name"]
home_pic = h_serializer.data[0]["team_pic"]
away_pic = aw_serializer.data[0]["team_pic"]
i["home_team"] = home_name
i["away_team"] = away_name
i["home_pic"] = home_pic
i["away_pic"] = away_pic
data.extend(scdserializer.data)
message = ["이 팀의 현재 승점이 궁금하시면 '승점은 몇 점이야?' 라고 해주세요! 알려드릴께요 :D",
"다른 팀의 일정도 알고 싶으시면 '[팀 이름] 은? 라고 물어보세요! 안내하겟읍니다 ( _ _)",
"'최근 경기 어떻게 됬어?' 라고 물어보시면 경기 결과를 알려드리겠습니다 :)"]
n = random.randrange(0, len(message))
return data, message[n]
def playerPerformance(params, format=None):
players = params["Players"]
playerObj = Players.objects.filter(Q(pl_nic__icontains=players[0]))
playerserializer = PlayerRecordSerializer(playerObj, many=True)
player_id = playerserializer.data[0]["pl_id"]
player_name = playerserializer.data[0]["pl_name"]
player_pic = playerserializer.data[0]["pl_pic"]
teamid = playerserializer.data[0]["team"]
player_in_team = False
if len(params["FC"]) == 2:
data = []
team1 = params["FC"][0]
team2 = params["FC"][1]
t1 = Teams.objects.filter(Q(team_nickname__icontains=team1))
t2 = Teams.objects.filter(Q(team_nickname__icontains=team2))
t1serializer = TeamsSerializer(t1, many=True)
t2serializer = TeamsSerializer(t2, many=True)
t1_id = t1serializer.data[0]["team_id"]
t2_id = t2serializer.data[0]["team_id"]
obj = Games.objects.filter((Q(home_team=t1_id) | Q(away_team=t1_id)) & (Q(home_team=t2_id) | Q(away_team=t2_id))).order_by('-game_date')
gameserializer = GamesSerializer(obj, many=True)
home_id = gameserializer.data[0]["home_team"]
away_id = gameserializer.data[0]["away_team"]
t1 = Teams.objects.filter(Q(team_id=home_id))
t2 = Teams.objects.filter(Q(team_id=away_id))
t1serializer = TeamsSerializer(t1, many=True)
t2serializer = TeamsSerializer(t2, many=True)
teamname1 = t1serializer.data[0]["team_name"]
teamname2 = t2serializer.data[0]["team_name"]
t1pic = t1serializer.data[0]["team_pic"]
t2pic = t2serializer.data[0]["team_pic"]
if teamid == t1_id or teamid == t2_id:
player_in_team = True
elif len(params["FC"]) == 1:
data = []
team = params["FC"][0]
t = Teams.objects.filter(Q(team_nickname__icontains=team))
teamserializer = TeamsSerializer(t, many=True)
t_id = teamserializer.data[0]["team_id"]
obj = Games.objects.filter(Q(home_team=t_id) | Q(away_team=t_id)).order_by(-'game_date')
gameserializer = GamesSerializer(obj, many=True)
home_id = gameserializer.data[0]["home_team"]
away_id = gameserializer.data[0]["away_team"]
t1 = Teams.objects.filter(Q(team_id=home_id))
t2 = Teams.objects.filter(Q(team_id=away_id))
t1serializer = TeamsSerializer(t1, many=True)
t2serializer = TeamsSerializer(t2, many=True)
teamname1 = t1serializer.data[0]["team_name"]
teamname2 = t2serializer.data[0]["team_name"]
t1pic = t1serializer.data[0]["team_pic"]
t2pic = t2serializer.data[0]["team_pic"]
if teamid == t_id:
player_in_team = True
for i in gameserializer.data:
gameid = i["game_id"]
gamedate = i["game_date"]
home_score = i["home_score"]
away_score = i["away_score"]
statsObj = Stats.objects.filter(Q(fk_game=gameid) & Q(fk_pl=player_id))
sSerializer = PerformanceSerializer(statsObj, many=True)
if len(sSerializer.data) == 0:
continue
if sSerializer.data[0]["sub_with_id"] != None:
subObj = Players.objects.filter(Q(pl_id=sSerializer.data[0]["sub_with_id"]))
subserializer = PlayerRecordSerializer(subObj, many=True)
sSerializer.data[0]["sub_with_id"] = subserializer.data[0]["pl_name"]
sSerializer.data[0]["game_date"]=gamedate
sSerializer.data[0]["pl_name"]=player_name
sSerializer.data[0]["pl_pic"]=player_pic
sSerializer.data[0]["home_team"]=teamname1
sSerializer.data[0]["away_team"]=teamname2
sSerializer.data[0]["home_score"]=home_score
sSerializer.data[0]["away_score"]=away_score
sSerializer.data[0]["home_pic"]=t1pic
sSerializer.data[0]["away_pic"]=t2pic
del sSerializer.data[0]["fk_game"]
del sSerializer.data[0]["fk_team"]
del sSerializer.data[0]["fk_pl"]
data.extend(sSerializer.data)
if len(data) == 0 and player_in_team:
message = "검색하신 선수는 선발/후보 명단에 들지 못했어요"
return data, message
elif len(data) == 0 and not player_in_team:
message = "검색하신 선수는 검색하신 팀(들)에 소속되지 않은 선수입니다."
return data, message
else:
message = ["이 경기의 다른 선수의 퍼포먼스도 궁금하시다면 [선수 이름] 도 알려줘! 라고 말해주세요 :D",
"'성적 종합해서 보여줘' 라고 말씀하시면 이번 시즌 이 선수의 종합 스텟을 보여드릴게요 XD",
"이 게임의 결과는 '경기는 어떻게 됬어?' 로 검색해보세요!"]
n = random.randrange(0, len(message))
return data[0], message[n]
|
TeamEmily/Emily_server
|
epl/views.py
|
Python
|
mit
| 19,515
|
[
"CRYSTAL"
] |
dcf4726f2d067e89807a30e1df7d5857b63dd6dee73039a2ac06c23627c2bda2
|
#!/usr/bin/env python2
# Copyright (C) 2012 W. Trevor King <wking@drexel.edu>
# Copyright (C) 2012 Sebastian Pipping <sebastian@pipping.org>
# Copyright (C) 2013 Brian dolbec <dolsen@gentoo.org>
# Licensed under GPL v2 or later
# This script should be run from the root of the catalyst source.
# source the testpath file then run "doc/make_target_table.py"
from __future__ import print_function
import sys as _sys
import glob
import re
def key_netboot_before_netboot2((target_name, module)):
return target_name + '1'
if __name__ == '__main__':
extractor = re.compile('^catalyst/targets/(([^ ]+)).py$')
targets = list()
for filename in sorted(glob.glob('catalyst/targets/*.py')):
if '__init__' in filename:
continue
match = extractor.match(filename)
target_name = match.group(2).replace('_', '-')
module_name = 'catalyst.targets.' + match.group(1)
__import__(module_name)
module = _sys.modules[module_name]
targets.append((target_name, module))
for target_name, module in sorted(targets, key=key_netboot_before_netboot2):
print('`%s`;;' % target_name)
# Replace blank lines with `+` (asciidoc list item continuation)
print(module.__doc__.strip().replace('\n\n', '\n+\n'))
print('')
|
proneetv/catalyst
|
doc/make_target_table.py
|
Python
|
gpl-2.0
| 1,224
|
[
"Brian"
] |
dfbcf32394afc0ada4ac14435d9918c22429841f30c331509b2df3f8dcdaf3a3
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007 Johan Gonqvist <johan.gronqvist@gmail.com>
# Copyright (C) 2007-2009 Gary Burton <gary.burton@zen.co.uk>
# Copyright (C) 2007-2009 Stephane Charette <stephanecharette@gmail.com>
# Copyright (C) 2008-2009 Brian G. Matherly
# Copyright (C) 2008 Jason M. Simanek <jason@bohemianalps.com>
# Copyright (C) 2008-2011 Rob G. Healey <robhealey1@gmail.com>
# Copyright (C) 2010 Doug Blank <doug.blank@gmail.com>
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2010-2017 Serge Noiraud
# Copyright (C) 2011 Tim G L Lyons
# Copyright (C) 2013 Benny Malengier
# Copyright (C) 2016 Allen Crider
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Narrative Web Page generator.
Classe:
EventPage - Event index page and individual Event pages
"""
#------------------------------------------------
# python modules
#------------------------------------------------
from collections import defaultdict
from operator import itemgetter
from decimal import getcontext
import logging
#------------------------------------------------
# Gramps module
#------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
from gramps.gen.lib import (Date, Event)
from gramps.gen.plug.report import Bibliography
from gramps.plugins.lib.libhtml import Html
#------------------------------------------------
# specific narrative web import
#------------------------------------------------
from gramps.plugins.webreport.basepage import BasePage
from gramps.plugins.webreport.common import (get_first_letters, _ALPHAEVENT,
_EVENTMAP, alphabet_navigation,
FULLCLEAR, sort_event_types,
primary_difference,
get_index_letter)
_ = glocale.translation.sgettext
LOG = logging.getLogger(".NarrativeWeb")
getcontext().prec = 8
#################################################
#
# creates the Event List Page and EventPages
#
#################################################
class EventPages(BasePage):
"""
This class is responsible for displaying information about the 'Person'
database objects. It displays this information under the 'Events'
tab. It is told by the 'add_instances' call which 'Person's to display,
and remembers the list of persons. A single call to 'display_pages'
displays both the Event List (Index) page and all the Event
pages.
The base class 'BasePage' is initialised once for each page that is
displayed.
"""
def __init__(self, report):
"""
@param: report -- The instance of the main report class for
this report
"""
BasePage.__init__(self, report, title="")
self.event_handle_list = []
self.event_types = []
self.event_dict = defaultdict(set)
def display_pages(self, title):
"""
Generate and output the pages under the Event tab, namely the event
index and the individual event pages.
@param: title -- Is the title of the web page
"""
LOG.debug("obj_dict[Event]")
for item in self.report.obj_dict[Event].items():
LOG.debug(" %s", str(item))
event_handle_list = self.report.obj_dict[Event].keys()
event_types = []
for event_handle in event_handle_list:
event = self.r_db.get_event_from_handle(event_handle)
event_types.append(self._(event.get_type().xml_str()))
with self.r_user.progress(_("Narrated Web Site Report"),
_("Creating event pages"),
len(event_handle_list) + 1
) as step:
self.eventlistpage(self.report, title, event_types,
event_handle_list)
for event_handle in event_handle_list:
step()
self.eventpage(self.report, title, event_handle)
def eventlistpage(self, report, title, event_types, event_handle_list):
"""
Will create the event list page
@param: report -- The instance of the main report class for
this report
@param: title -- Is the title of the web page
@param: event_types -- A list of the type in the events database
@param: event_handle_list -- A list of event handles
"""
BasePage.__init__(self, report, title)
ldatec = 0
prev_letter = " "
output_file, sio = self.report.create_file("events")
eventslistpage, head, body = self.write_header(self._("Events"))
# begin events list division
with Html("div", class_="content", id="EventList") as eventlist:
body += eventlist
msg = self._("This page contains an index of all the events in the "
"database, sorted by their type and date (if one is "
"present). Clicking on an event’s Gramps ID "
"will open a page for that event.")
eventlist += Html("p", msg, id="description")
# get alphabet navigation...
index_list = get_first_letters(self.r_db, event_types,
_ALPHAEVENT)
alpha_nav = alphabet_navigation(index_list, self.rlocale)
if alpha_nav:
eventlist += alpha_nav
# begin alphabet event table
with Html("table",
class_="infolist primobjlist alphaevent") as table:
eventlist += table
thead = Html("thead")
table += thead
trow = Html("tr")
thead += trow
trow.extend(
Html("th", label, class_=colclass, inline=True)
for (label, colclass) in [(self._("Letter"),
"ColumnRowLabel"),
(self._("Type"), "ColumnType"),
(self._("Date"), "ColumnDate"),
(self._("Gramps ID"),
"ColumnGRAMPSID"),
(self._("Person"), "ColumnPerson")
]
)
tbody = Html("tbody")
table += tbody
# separate events by their type and then thier event handles
for (evt_type,
data_list) in sort_event_types(self.r_db,
event_types,
event_handle_list,
self.rlocale):
first = True
_event_displayed = []
# sort datalist by date of event and by event handle...
data_list = sorted(data_list, key=itemgetter(0, 1))
first_event = True
for (sort_value, event_handle) in data_list:
event = self.r_db.get_event_from_handle(event_handle)
_type = event.get_type()
gid = event.get_gramps_id()
if event.get_change_time() > ldatec:
ldatec = event.get_change_time()
# check to see if we have listed this gramps_id yet?
if gid not in _event_displayed:
# family event
if int(_type) in _EVENTMAP:
handle_list = set(
self.r_db.find_backlink_handles(
event_handle,
include_classes=['Family', 'Person']))
else:
handle_list = set(
self.r_db.find_backlink_handles(
event_handle,
include_classes=['Person']))
if handle_list:
trow = Html("tr")
tbody += trow
# set up hyperlinked letter for
# alphabet_navigation
tcell = Html("td", class_="ColumnLetter",
inline=True)
trow += tcell
if evt_type and not evt_type.isspace():
letter = get_index_letter(
self._(str(evt_type)[0].capitalize()),
index_list, self.rlocale)
else:
letter = " "
if first or primary_difference(letter,
prev_letter,
self.rlocale):
first = False
prev_letter = letter
t_a = 'class = "BeginLetter BeginType"'
trow.attr = t_a
ttle = self._("Event types beginning "
"with letter %s") % letter
tcell += Html("a", letter, name=letter,
id_=letter, title=ttle,
inline=True)
else:
tcell += " "
# display Event type if first in the list
tcell = Html("td", class_="ColumnType",
title=self._(evt_type),
inline=True)
trow += tcell
if first_event:
tcell += self._(evt_type)
if trow.attr == "":
trow.attr = 'class = "BeginType"'
else:
tcell += " "
# event date
tcell = Html("td", class_="ColumnDate",
inline=True)
trow += tcell
date = Date.EMPTY
if event:
date = event.get_date_object()
if date and date is not Date.EMPTY:
tcell += self.rlocale.get_date(date)
else:
tcell += " "
# Gramps ID
trow += Html("td", class_="ColumnGRAMPSID") + (
self.event_grampsid_link(event_handle,
gid, None)
)
# Person(s) column
tcell = Html("td", class_="ColumnPerson")
trow += tcell
# classname can either be a person or a family
first_person = True
# get person(s) for ColumnPerson
sorted_list = sorted(handle_list)
self.complete_people(tcell, first_person,
sorted_list,
uplink=False)
_event_displayed.append(gid)
first_event = False
# add clearline for proper styling
# add footer section
footer = self.write_footer(ldatec)
body += (FULLCLEAR, footer)
# send page ut for processing
# and close the file
self.xhtml_writer(eventslistpage, output_file, sio, ldatec)
def _geteventdate(self, event_handle):
"""
Get the event date
@param: event_handle -- The handle for the event to use
"""
event_date = Date.EMPTY
event = self.r_db.get_event_from_handle(event_handle)
if event:
date = event.get_date_object()
if date:
# returns the date in YYYY-MM-DD format
return Date(date.get_year_calendar("Gregorian"),
date.get_month(), date.get_day())
# return empty date string
return event_date
def event_grampsid_link(self, handle, grampsid, uplink):
"""
Create a hyperlink from event handle, but show grampsid
@param: handle -- The handle for the event
@param: grampsid -- The gramps ID to display
@param: uplink -- If True, then "../../../" is inserted in front of
the result.
"""
url = self.report.build_url_fname_html(handle, "evt", uplink)
# return hyperlink to its caller
return Html("a", grampsid, href=url, title=grampsid, inline=True)
def eventpage(self, report, title, event_handle):
"""
Creates the individual event page
@param: report -- The instance of the main report class for
this report
@param: title -- Is the title of the web page
@param: event_handle -- The event handle for the database
"""
event = report.database.get_event_from_handle(event_handle)
BasePage.__init__(self, report, title, event.get_gramps_id())
if not event:
return None
ldatec = event.get_change_time()
event_media_list = event.get_media_list()
self.uplink = True
subdirs = True
evt_type = self._(event.get_type().xml_str())
self.page_title = "%(eventtype)s" % {'eventtype' : evt_type}
self.bibli = Bibliography()
output_file, sio = self.report.create_file(event_handle, "evt")
eventpage, head, body = self.write_header(self._("Events"))
# start event detail division
with Html("div", class_="content", id="EventDetail") as eventdetail:
body += eventdetail
thumbnail = self.disp_first_img_as_thumbnail(event_media_list,
event)
if thumbnail is not None:
eventdetail += thumbnail
# display page title
eventdetail += Html("h3", self.page_title, inline=True)
# begin eventdetail table
with Html("table", class_="infolist eventlist") as table:
eventdetail += table
tbody = Html("tbody")
table += tbody
evt_gid = event.get_gramps_id()
if not self.noid and evt_gid:
trow = Html("tr") + (
Html("td", self._("Gramps ID"),
class_="ColumnAttribute", inline=True),
Html("td", evt_gid,
class_="ColumnGRAMPSID", inline=True)
)
tbody += trow
# get event data
#
# for more information: see get_event_data()
#
event_data = self.get_event_data(event, event_handle,
subdirs, evt_gid)
for (label, colclass, data) in event_data:
if data:
trow = Html("tr") + (
Html("td", label, class_="ColumnAttribute",
inline=True),
Html('td', data, class_="Column" + colclass)
)
tbody += trow
# Narrative subsection
notelist = event.get_note_list()
notelist = self.display_note_list(notelist)
if notelist is not None:
eventdetail += notelist
# get attribute list
attrlist = event.get_attribute_list()
if attrlist:
attrsection, attrtable = self.display_attribute_header()
self.display_attr_list(attrlist, attrtable)
eventdetail += attrsection
# event source references
srcrefs = self.display_ind_sources(event)
if srcrefs is not None:
eventdetail += srcrefs
# display additional images as gallery
if self.create_media:
addgallery = self.disp_add_img_as_gallery(event_media_list,
event)
if addgallery:
eventdetail += addgallery
# References list
ref_list = self.display_bkref_list(Event, event_handle)
if ref_list is not None:
eventdetail += ref_list
# add clearline for proper styling
# add footer section
footer = self.write_footer(ldatec)
body += (FULLCLEAR, footer)
# send page out for processing
# and close the page
self.xhtml_writer(eventpage, output_file, sio, ldatec)
|
jralls/gramps
|
gramps/plugins/webreport/event.py
|
Python
|
gpl-2.0
| 18,906
|
[
"Brian"
] |
9893129f17c02551f80dd049b0d9b0e9a49837c5babef7542c74bb2a9d7421aa
|
#!/usr/bin/env python
# Copyright 2014-2021 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
# James D. McClain
# Jason Yu
# Shining Sun
# Mario Motta
# Chong Sun
#
import numpy as np
from pyscf import lib
from pyscf.lib import logger
from pyscf import ao2mo
from pyscf.cc import ccsd
from pyscf.cc import uccsd
from pyscf.cc import eom_rccsd
from pyscf.cc import uintermediates
########################################
# EOM-IP-CCSD
########################################
def vector_to_amplitudes_ip(vector, nmo, nocc):
'''For spin orbitals'''
nocca, noccb = nocc
nmoa, nmob = nmo
nvira, nvirb = nmoa-nocca, nmob-noccb
sizes = (nocca, noccb, nocca*(nocca-1)//2*nvira, noccb*nocca*nvira,
nocca*noccb*nvirb, noccb*(noccb-1)//2*nvirb)
sections = np.cumsum(sizes[:-1])
r1a, r1b, r2a, r2baa, r2abb, r2b = np.split(vector, sections)
r2a = r2a.reshape(nocca*(nocca-1)//2,nvira)
r2b = r2b.reshape(noccb*(noccb-1)//2,nvirb)
r2baa = r2baa.reshape(noccb,nocca,nvira).copy()
r2abb = r2abb.reshape(nocca,noccb,nvirb).copy()
idxa = np.tril_indices(nocca, -1)
idxb = np.tril_indices(noccb, -1)
r2aaa = np.zeros((nocca,nocca,nvira), vector.dtype)
r2bbb = np.zeros((noccb,noccb,nvirb), vector.dtype)
r2aaa[idxa[0],idxa[1]] = r2a
r2aaa[idxa[1],idxa[0]] =-r2a
r2bbb[idxb[0],idxb[1]] = r2b
r2bbb[idxb[1],idxb[0]] =-r2b
r1 = (r1a.copy(), r1b.copy())
r2 = (r2aaa, r2baa, r2abb, r2bbb)
return r1, r2
def amplitudes_to_vector_ip(r1, r2):
'''For spin orbitals'''
r1a, r1b = r1
r2aaa, r2baa, r2abb, r2bbb = r2
nocca, noccb, nvirb = r2abb.shape
idxa = np.tril_indices(nocca, -1)
idxb = np.tril_indices(noccb, -1)
return np.hstack((r1a, r1b,
r2aaa[idxa].ravel(), r2baa.ravel(),
r2abb.ravel(), r2bbb[idxb].ravel()))
def spatial2spin_ip(r1, r2, orbspin=None):
'''Convert R1/R2 of spatial orbital representation to R1/R2 of
spin-orbital representation
'''
r1a, r1b = r1
r2aaa, r2baa, r2abb, r2bbb = r2
nocc_a, nvir_a = r2aaa.shape[1:]
nocc_b, nvir_b = r2bbb.shape[1:]
if orbspin is None:
orbspin = np.zeros((nocc_a+nvir_a)*2, dtype=int)
orbspin[1::2] = 1
nocc = nocc_a + nocc_b
nvir = nvir_a + nvir_b
idxoa = np.where(orbspin[:nocc] == 0)[0]
idxob = np.where(orbspin[:nocc] == 1)[0]
idxva = np.where(orbspin[nocc:] == 0)[0]
idxvb = np.where(orbspin[nocc:] == 1)[0]
r1 = np.zeros((nocc), dtype=r1a.dtype)
r1[idxoa] = r1a
r1[idxob] = r1b
r2 = np.zeros((nocc**2, nvir), dtype=r2aaa.dtype)
idxoaa = idxoa[:,None] * nocc + idxoa
idxoab = idxoa[:,None] * nocc + idxob
idxoba = idxob[:,None] * nocc + idxoa
idxobb = idxob[:,None] * nocc + idxob
# idxvaa = idxva[:,None] * nvir + idxva
# idxvab = idxva[:,None] * nvir + idxvb
# idxvba = idxvb[:,None] * nvir + idxva
# idxvbb = idxvb[:,None] * nvir + idxvb
r2aaa = r2aaa.reshape(nocc_a*nocc_a, nvir_a)
r2baa = r2baa.reshape(nocc_b*nocc_a, nvir_a)
r2abb = r2abb.reshape(nocc_a*nocc_b, nvir_b)
r2bbb = r2bbb.reshape(nocc_b*nocc_b, nvir_b)
lib.takebak_2d(r2, r2aaa, idxoaa.ravel(), idxva.ravel())
lib.takebak_2d(r2, r2baa, idxoba.ravel(), idxva.ravel())
lib.takebak_2d(r2, r2abb, idxoab.ravel(), idxvb.ravel())
lib.takebak_2d(r2, r2bbb, idxobb.ravel(), idxvb.ravel())
r2aba = -r2baa
r2bab = -r2abb
lib.takebak_2d(r2, r2aba, idxoab.T.ravel(), idxva.ravel())
lib.takebak_2d(r2, r2bab, idxoba.T.ravel(), idxvb.ravel())
return r1, r2.reshape(nocc, nocc, nvir)
def spin2spatial_ip(r1, r2, orbspin):
nocc, nvir = r2.shape[1:]
idxoa = np.where(orbspin[:nocc] == 0)[0]
idxob = np.where(orbspin[:nocc] == 1)[0]
idxva = np.where(orbspin[nocc:] == 0)[0]
idxvb = np.where(orbspin[nocc:] == 1)[0]
nocc_a = len(idxoa)
nocc_b = len(idxob)
nvir_a = len(idxva)
nvir_b = len(idxvb)
r1a = r1[idxoa]
r1b = r1[idxob]
idxoaa = idxoa[:,None] * nocc + idxoa
idxoab = idxoa[:,None] * nocc + idxob
idxoba = idxob[:,None] * nocc + idxoa
idxobb = idxob[:,None] * nocc + idxob
#idxvaa = idxva[:,None] * nvir + idxva
#idxvab = idxva[:,None] * nvir + idxvb
#idxvba = idxvb[:,None] * nvir + idxva
#idxvbb = idxvb[:,None] * nvir + idxvb
r2 = r2.reshape(nocc**2, nvir)
r2aaa = lib.take_2d(r2, idxoaa.ravel(), idxva.ravel())
r2baa = lib.take_2d(r2, idxoba.ravel(), idxva.ravel())
r2abb = lib.take_2d(r2, idxoab.ravel(), idxvb.ravel())
r2bbb = lib.take_2d(r2, idxobb.ravel(), idxvb.ravel())
r2aaa = r2aaa.reshape(nocc_a, nocc_a, nvir_a)
r2baa = r2baa.reshape(nocc_b, nocc_a, nvir_a)
r2abb = r2abb.reshape(nocc_a, nocc_b, nvir_b)
r2bbb = r2bbb.reshape(nocc_b, nocc_b, nvir_b)
return [r1a, r1b], [r2aaa, r2baa, r2abb, r2bbb]
def ipccsd_matvec(eom, vector, imds=None, diag=None):
'''For spin orbitals
R2 operators of the form s_{ij}^{ b}, i.e. indices jb are coupled.'''
# Ref: Tu, Wang, and Li, J. Chem. Phys. 136, 174102 (2012) Eqs.(8)-(9)
if imds is None: imds = eom.make_imds()
t1, t2 = imds.t1, imds.t2
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb, nvira, nvirb = t2ab.shape
nmoa, nmob = nocca+nvira, noccb+nvirb
r1, r2 = vector_to_amplitudes_ip(vector, (nmoa,nmob), (nocca,noccb))
r1a, r1b = r1
r2aaa, r2baa, r2abb, r2bbb = r2
#Foo, Fov, and Wooov
Hr1a = np.einsum('me,mie->i', imds.Fov, r2aaa)
Hr1a -= np.einsum('ME,iME->i', imds.FOV, r2abb)
Hr1b = np.einsum('ME,MIE->I', imds.FOV, r2bbb)
Hr1b -= np.einsum('me,Ime->I', imds.Fov, r2baa)
Hr1a += -np.einsum('mi,m->i', imds.Foo, r1a)
Hr1b += -np.einsum('MI,M->I', imds.FOO, r1b)
Hr1a += -0.5*np.einsum('nime,mne->i', imds.Wooov, r2aaa)
Hr1b += np.einsum('NIme,Nme->I', imds.WOOov, r2baa)
Hr1b += -0.5*np.einsum('NIME,MNE->I', imds.WOOOV, r2bbb)
Hr1a += np.einsum('niME,nME->i', imds.WooOV, r2abb)
# Fvv term
Hr2aaa = lib.einsum('be,ije->ijb', imds.Fvv, r2aaa)
Hr2abb = lib.einsum('BE,iJE->iJB', imds.FVV, r2abb)
Hr2bbb = lib.einsum('BE,IJE->IJB', imds.FVV, r2bbb)
Hr2baa = lib.einsum('be,Ije->Ijb', imds.Fvv, r2baa)
# Foo term
tmpa = lib.einsum('mi,mjb->ijb', imds.Foo, r2aaa)
Hr2aaa -= tmpa - tmpa.transpose((1,0,2))
Hr2abb -= lib.einsum('mi,mJB->iJB', imds.Foo, r2abb)
Hr2abb -= lib.einsum('MJ,iMB->iJB', imds.FOO, r2abb)
Hr2baa -= lib.einsum('MI,Mjb->Ijb', imds.FOO, r2baa)
Hr2baa -= lib.einsum('mj,Imb->Ijb', imds.Foo, r2baa)
tmpb = lib.einsum('MI,MJB->IJB', imds.FOO, r2bbb)
Hr2bbb -= tmpb - tmpb.transpose((1,0,2))
# Wovoo term
Hr2aaa -= np.einsum('mjbi,m->ijb', imds.Woovo, r1a)
Hr2abb += np.einsum('miBJ,m->iJB', imds.WooVO, r1a)
Hr2baa += np.einsum('MIbj,M->Ijb', imds.WOOvo, r1b)
Hr2bbb -= np.einsum('MJBI,M->IJB', imds.WOOVO, r1b)
# Woooo term
Hr2aaa += .5 * lib.einsum('minj,mnb->ijb', imds.Woooo, r2aaa)
Hr2abb += lib.einsum('miNJ,mNB->iJB', imds.WooOO, r2abb)
Hr2bbb += .5 * lib.einsum('MINJ,MNB->IJB', imds.WOOOO, r2bbb)
Hr2baa += lib.einsum('njMI,Mnb->Ijb', imds.WooOO, r2baa)
# Wovvo terms
tmp = lib.einsum('mebj,ime->ijb', imds.Wovvo, r2aaa)
tmp += lib.einsum('MEbj,iME->ijb', imds.WOVvo, r2abb)
Hr2aaa += tmp - tmp.transpose(1, 0, 2)
WooVV = -imds.WoVVo.transpose(0,3,2,1)
WOOvv = -imds.WOvvO.transpose(0,3,2,1)
Hr2abb += lib.einsum('MEBJ,iME->iJB', imds.WOVVO, r2abb)
Hr2abb += lib.einsum('meBJ,ime->iJB', imds.WovVO, r2aaa)
Hr2abb += -lib.einsum('miBE,mJE->iJB', WooVV, r2abb)
Hr2baa += lib.einsum('meaj,Ime->Ija', imds.Wovvo, r2baa)
Hr2baa += lib.einsum('MEaj,IME->Ija', imds.WOVvo, r2bbb)
Hr2baa += -lib.einsum('MIab,Mjb->Ija', WOOvv, r2baa)
tmp = lib.einsum('MEBJ,IME->IJB', imds.WOVVO, r2bbb)
tmp += lib.einsum('meBJ,Ime->IJB', imds.WovVO, r2baa)
Hr2bbb += tmp - tmp.transpose(1, 0, 2)
# T2 term
Hr2aaa -= 0.5 * lib.einsum('menf,mnf,jibe->ijb', imds.Wovov, r2aaa, t2aa)
Hr2aaa -= lib.einsum('meNF,mNF,jibe->ijb', imds.WovOV, r2abb, t2aa)
Hr2abb -= 0.5 * lib.einsum('menf,mnf,iJeB->iJB', imds.Wovov, r2aaa, t2ab)
Hr2abb -= lib.einsum('meNF,mNF,iJeB->iJB', imds.WovOV, r2abb, t2ab)
Hr2baa -= 0.5 * lib.einsum('MENF,MNF,jIbE->Ijb', imds.WOVOV, r2bbb, t2ab)
Hr2baa -= lib.einsum('nfME,Mnf,jIbE->Ijb', imds.WovOV, r2baa, t2ab)
Hr2bbb -= 0.5 * lib.einsum('MENF,MNF,JIBE->IJB', imds.WOVOV, r2bbb, t2bb)
Hr2bbb -= lib.einsum('nfME,Mnf,JIBE->IJB', imds.WovOV, r2baa, t2bb)
vector = amplitudes_to_vector_ip([Hr1a, Hr1b], [Hr2aaa, Hr2baa, Hr2abb, Hr2bbb])
return vector
def ipccsd_diag(eom, imds=None):
if imds is None: imds = eom.make_imds()
t1, t2 = imds.t1, imds.t2
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocc_a, nvir_a = t1a.shape
nocc_b, nvir_b = t1b.shape
Hr1a = -np.diag(imds.Foo)
Hr1b = -np.diag(imds.FOO)
Fvv_diag = np.diag(imds.Fvv)
Foo_diag = np.diag(imds.Foo)
FOO_diag = np.diag(imds.FOO)
FVV_diag = np.diag(imds.FVV)
Woooo_slice = np.einsum('iijj->ij',imds.Woooo)
Wovvo_slice = np.einsum('iaai->ia',imds.Wovvo)
WooOO_slice = np.einsum('jjii->ij',imds.WooOO)
WOvvO_slice = np.einsum('iaai->ia',imds.WOvvO)
WooOO_slice_T = np.einsum('iijj->ij',imds.WooOO)
WoVVo_slice = np.einsum('iaai->ia',imds.WoVVo)
WOVVO_slice = np.einsum('jaaj->ja',imds.WOVVO)
WOOOO_slice = np.einsum('iijj->ij',imds.WOOOO)
Wovov_t2_dot = np.einsum('jaib,jiab->ija',imds.Wovov,t2aa)
WovOV_t2_dot = np.einsum('ibja,ijba->ija',imds.WovOV,t2ab)
WovOV_t2_dot_T = np.einsum('jaib,jiab->ija',imds.WovOV,t2ab)
WOVOV_t2_dot = np.einsum('jaib,jiab->ija',imds.WOVOV,t2bb)
Hr2aaa = Fvv_diag[None,None,:] - Foo_diag[:,None,None] - Foo_diag[None,:,None] \
+ Woooo_slice[:,:,None] + Wovvo_slice[:,None,:] + Wovvo_slice[None,:,:] \
- Wovov_t2_dot
Hr2baa = Fvv_diag[None,None,:] - FOO_diag[:,None,None] - Foo_diag[None,:,None] \
+ WooOO_slice[:,:,None] + WOvvO_slice[:,None,:] + Wovvo_slice[None,:,:] \
- WovOV_t2_dot_T
Hr2abb = FVV_diag[None,None,:] - Foo_diag[:,None,None] - FOO_diag[None,:,None] \
+ WooOO_slice_T[:,:,None] + WoVVo_slice[:,None,:] + WOVVO_slice[None,:,:] \
- WovOV_t2_dot
Hr2bbb = FVV_diag[None,None,:] - FOO_diag[:,None,None] - FOO_diag[None,:,None] \
+ WOOOO_slice[:,:,None] + WOVVO_slice[:,None,:] + WOVVO_slice[None,:,:] \
- WOVOV_t2_dot
vector = amplitudes_to_vector_ip([Hr1a, Hr1b], [Hr2aaa, Hr2baa, Hr2abb, Hr2bbb])
return vector
class EOMIP(eom_rccsd.EOMIP):
matvec = ipccsd_matvec
l_matvec = None
get_diag = ipccsd_diag
ipccsd_star = None
ccsd_star_contract = None
def __init__(self, cc):
eom_rccsd.EOMIP.__init__(self, cc)
self.nocc = cc.get_nocc()
self.nmo = cc.get_nmo()
def get_init_guess(self, nroots=1, koopmans=True, diag=None):
if koopmans:
nocca, noccb = self.nocc
idx = diag[:nocca+noccb].argsort()
else:
idx = diag.argsort()
size = self.vector_size()
dtype = getattr(diag, 'dtype', np.double)
nroots = min(nroots, size)
guess = []
for i in idx[:nroots]:
g = np.zeros(size, dtype)
g[i] = 1.0
guess.append(g)
return guess
def vector_to_amplitudes(self, vector, nmo=None, nocc=None):
if nmo is None: nmo = self.nmo
if nocc is None: nocc = self.nocc
return vector_to_amplitudes_ip(vector, nmo, nocc)
def amplitudes_to_vector(self, r1, r2):
return amplitudes_to_vector_ip(r1, r2)
def vector_size(self):
'''size of the vector based on spin-orbital basis'''
nocca, noccb = self.nocc
nmoa, nmob = self.nmo
nvira, nvirb = nmoa-nocca, nmob-noccb
return (nocca + noccb
+ nocca*(nocca-1)//2*nvira + noccb*nocca*nvira
+ nocca*noccb*nvirb + noccb*(noccb-1)//2*nvirb)
def make_imds(self, eris=None):
imds = _IMDS(self._cc, eris)
imds.make_ip()
return imds
########################################
# EOM-EA-CCSD
########################################
def vector_to_amplitudes_ea(vector, nmo, nocc):
nocca, noccb = nocc
nmoa, nmob = nmo
nvira, nvirb = nmoa-nocca, nmob-noccb
sizes = (nvira, nvirb, nocca*nvira*(nvira-1)//2, nocca*nvirb*nvira,
noccb*nvira*nvirb, noccb*nvirb*(nvirb-1)//2)
sections = np.cumsum(sizes[:-1])
r1a, r1b, r2a, r2aba, r2bab, r2b = np.split(vector, sections)
r2a = r2a.reshape(nocca,nvira*(nvira-1)//2)
r2b = r2b.reshape(noccb,nvirb*(nvirb-1)//2)
r2aba = r2aba.reshape(nocca,nvirb,nvira).copy()
r2bab = r2bab.reshape(noccb,nvira,nvirb).copy()
idxa = np.tril_indices(nvira, -1)
idxb = np.tril_indices(nvirb, -1)
r2aaa = np.zeros((nocca,nvira,nvira), vector.dtype)
r2bbb = np.zeros((noccb,nvirb,nvirb), vector.dtype)
r2aaa[:,idxa[0],idxa[1]] = r2a
r2aaa[:,idxa[1],idxa[0]] =-r2a
r2bbb[:,idxb[0],idxb[1]] = r2b
r2bbb[:,idxb[1],idxb[0]] =-r2b
r1 = (r1a.copy(), r1b.copy())
r2 = (r2aaa, r2aba, r2bab, r2bbb)
return r1, r2
def amplitudes_to_vector_ea(r1, r2):
r1a, r1b = r1
r2aaa, r2aba, r2bab, r2bbb = r2
nocca, nvirb, nvira = r2aba.shape
idxa = np.tril_indices(nvira, -1)
idxb = np.tril_indices(nvirb, -1)
return np.hstack((r1a, r1b,
r2aaa[:,idxa[0],idxa[1]].ravel(),
r2aba.ravel(), r2bab.ravel(),
r2bbb[:,idxb[0],idxb[1]].ravel()))
def spatial2spin_ea(r1, r2, orbspin=None):
'''Convert R1/R2 of spatial orbital representation to R1/R2 of
spin-orbital representation
'''
r1a, r1b = r1
r2aaa, r2aba, r2bab, r2bbb = r2
nocc_a, nvir_a = r2aaa.shape[:2]
nocc_b, nvir_b = r2bbb.shape[:2]
if orbspin is None:
orbspin = np.zeros((nocc_a+nvir_a)*2, dtype=int)
orbspin[1::2] = 1
nocc = nocc_a + nocc_b
nvir = nvir_a + nvir_b
idxoa = np.where(orbspin[:nocc] == 0)[0]
idxob = np.where(orbspin[:nocc] == 1)[0]
idxva = np.where(orbspin[nocc:] == 0)[0]
idxvb = np.where(orbspin[nocc:] == 1)[0]
r1 = np.zeros((nvir), dtype=r1a.dtype)
r1[idxva] = r1a
r1[idxvb] = r1b
r2 = np.zeros((nocc, nvir**2), dtype=r2aaa.dtype)
#idxoaa = idxoa[:,None] * nocc + idxoa
#idxoab = idxoa[:,None] * nocc + idxob
#idxoba = idxob[:,None] * nocc + idxoa
#idxobb = idxob[:,None] * nocc + idxob
idxvaa = idxva[:,None] * nvir + idxva
idxvab = idxva[:,None] * nvir + idxvb
idxvba = idxvb[:,None] * nvir + idxva
idxvbb = idxvb[:,None] * nvir + idxvb
r2aaa = r2aaa.reshape(nocc_a, nvir_a*nvir_a)
r2aba = r2aba.reshape(nocc_a, nvir_b*nvir_a)
r2bab = r2bab.reshape(nocc_b, nvir_a*nvir_b)
r2bbb = r2bbb.reshape(nocc_b, nvir_b*nvir_b)
lib.takebak_2d(r2, r2aaa, idxoa.ravel(), idxvaa.ravel())
lib.takebak_2d(r2, r2aba, idxoa.ravel(), idxvba.ravel())
lib.takebak_2d(r2, r2bab, idxob.ravel(), idxvab.ravel())
lib.takebak_2d(r2, r2bbb, idxob.ravel(), idxvbb.ravel())
r2aab = -r2aba
r2bba = -r2bab
lib.takebak_2d(r2, r2bba, idxob.ravel(), idxvba.T.ravel())
lib.takebak_2d(r2, r2aab, idxoa.ravel(), idxvab.T.ravel())
r2 = r2.reshape(nocc, nvir, nvir)
return r1, r2
def spin2spatial_ea(r1, r2, orbspin):
nocc, nvir = r2.shape[:2]
idxoa = np.where(orbspin[:nocc] == 0)[0]
idxob = np.where(orbspin[:nocc] == 1)[0]
idxva = np.where(orbspin[nocc:] == 0)[0]
idxvb = np.where(orbspin[nocc:] == 1)[0]
nocc_a = len(idxoa)
nocc_b = len(idxob)
nvir_a = len(idxva)
nvir_b = len(idxvb)
r1a = r1[idxva]
r1b = r1[idxvb]
#idxoaa = idxoa[:,None] * nocc + idxoa
#idxoab = idxoa[:,None] * nocc + idxob
#idxoba = idxob[:,None] * nocc + idxoa
#idxobb = idxob[:,None] * nocc + idxob
idxvaa = idxva[:,None] * nvir + idxva
idxvab = idxva[:,None] * nvir + idxvb
idxvba = idxvb[:,None] * nvir + idxva
idxvbb = idxvb[:,None] * nvir + idxvb
r2 = r2.reshape(nocc, nvir**2)
r2aaa = lib.take_2d(r2, idxoa.ravel(), idxvaa.ravel())
r2aba = lib.take_2d(r2, idxoa.ravel(), idxvba.ravel())
r2bab = lib.take_2d(r2, idxob.ravel(), idxvab.ravel())
r2bbb = lib.take_2d(r2, idxob.ravel(), idxvbb.ravel())
r2aaa = r2aaa.reshape(nocc_a, nvir_a, nvir_a)
r2aba = r2aba.reshape(nocc_a, nvir_b, nvir_a)
r2bab = r2bab.reshape(nocc_b, nvir_a, nvir_b)
r2bbb = r2bbb.reshape(nocc_b, nvir_b, nvir_b)
return [r1a, r1b], [r2aaa, r2aba, r2bab, r2bbb]
def eaccsd_matvec(eom, vector, imds=None, diag=None):
'''For spin orbitals.
R2 operators of the form s_{ j}^{ab}, i.e. indices jb are coupled.'''
# Ref: Nooijen and Bartlett, J. Chem. Phys. 102, 3629 (1994) Eqs.(30)-(31)
if imds is None: imds = eom.make_imds()
t1, t2, eris = imds.t1, imds.t2, imds.eris
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb, nvira, nvirb = t2ab.shape
nmoa, nmob = nocca+nvira, noccb+nvirb
r1, r2 = vector_to_amplitudes_ea(vector, (nmoa,nmob), (nocca,noccb))
r1a, r1b = r1
r2aaa, r2aba, r2bab, r2bbb = r2
# Fov terms
Hr1a = np.einsum('ld,lad->a', imds.Fov, r2aaa)
Hr1a += np.einsum('LD,LaD->a', imds.FOV, r2bab)
Hr1b = np.einsum('ld,lAd->A', imds.Fov, r2aba)
Hr1b += np.einsum('LD,LAD->A', imds.FOV, r2bbb)
# Fvv terms
Hr1a += np.einsum('ac,c->a', imds.Fvv, r1a)
Hr1b += np.einsum('AC,C->A', imds.FVV, r1b)
# Wvovv
Hr1a += 0.5*lib.einsum('acld,lcd->a', imds.Wvvov, r2aaa)
Hr1a += lib.einsum('acLD,LcD->a', imds.WvvOV, r2bab)
Hr1b += 0.5*lib.einsum('ACLD,LCD->A', imds.WVVOV, r2bbb)
Hr1b += lib.einsum('ACld,lCd->A', imds.WVVov, r2aba)
#** Wvvvv term
#:Hr2aaa = lib.einsum('acbd,jcd->jab', eris_vvvv, r2aaa)
#:Hr2aba = lib.einsum('bdac,jcd->jab', eris_vvVV, r2aba)
#:Hr2bab = lib.einsum('acbd,jcd->jab', eris_vvVV, r2bab)
#:Hr2bbb = lib.einsum('acbd,jcd->jab', eris_VVVV, r2bbb)
u2 = (r2aaa + np.einsum('c,jd->jcd', r1a, t1a) - np.einsum('d,jc->jcd', r1a, t1a),
r2aba + np.einsum('c,jd->jcd', r1b, t1a),
r2bab + np.einsum('c,jd->jcd', r1a, t1b),
r2bbb + np.einsum('c,jd->jcd', r1b, t1b) - np.einsum('d,jc->jcd', r1b, t1b))
Hr2aaa, Hr2aba, Hr2bab, Hr2bbb = _add_vvvv_ea(eom._cc, u2, eris)
u2 = None
tauaa, tauab, taubb = uccsd.make_tau(t2, t1, t1)
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
tmpaaa = lib.einsum('menf,jef->mnj', eris_ovov, r2aaa) * .5
Hr2aaa += lib.einsum('mnj,mnab->jab', tmpaaa, tauaa)
tmpaaa = tauaa = None
tmpbbb = lib.einsum('menf,jef->mnj', eris_OVOV, r2bbb) * .5
Hr2bbb += lib.einsum('mnj,mnab->jab', tmpbbb, taubb)
tmpbbb = taubb = None
tmpabb = lib.einsum('menf,jef->mnj', eris_ovOV, r2bab)
Hr2bab += lib.einsum('mnj,mnab->jab', tmpabb, tauab)
tmpaba = lib.einsum('nfme,jef->nmj', eris_ovOV, r2aba)
Hr2aba += lib.einsum('nmj,nmba->jab', tmpaba, tauab)
tmpaba = tauab = None
eris_ovov = eris_OVOV = eris_ovOV = None
eris_ovvv = imds.eris.get_ovvv(slice(None))
tmpaaa = lib.einsum('mebf,jef->mjb', eris_ovvv, r2aaa)
tmpaaa = lib.einsum('mjb,ma->jab', tmpaaa, t1a)
Hr2aaa-= tmpaaa - tmpaaa.transpose(0,2,1)
tmpaaa = eris_ovvv = None
eris_OVVV = imds.eris.get_OVVV(slice(None))
tmpbbb = lib.einsum('mebf,jef->mjb', eris_OVVV, r2bbb)
tmpbbb = lib.einsum('mjb,ma->jab', tmpbbb, t1b)
Hr2bbb-= tmpbbb - tmpbbb.transpose(0,2,1)
tmpbbb = eris_OVVV = None
eris_ovVV = imds.eris.get_ovVV(slice(None))
eris_OVvv = imds.eris.get_OVvv(slice(None))
tmpaab = lib.einsum('meBF,jFe->mjB', eris_ovVV, r2aba)
Hr2aba-= lib.einsum('mjB,ma->jBa', tmpaab, t1a)
tmpabb = lib.einsum('meBF,JeF->mJB', eris_ovVV, r2bab)
Hr2bab-= lib.einsum('mJB,ma->JaB', tmpabb, t1a)
tmpaab = tmpabb = eris_ovVV = None
tmpbaa = lib.einsum('MEbf,jEf->Mjb', eris_OVvv, r2aba)
Hr2aba-= lib.einsum('Mjb,MA->jAb', tmpbaa, t1b)
tmpbba = lib.einsum('MEbf,JfE->MJb', eris_OVvv, r2bab)
Hr2bab-= lib.einsum('MJb,MA->JbA', tmpbba, t1b)
tmpbaa = tmpbba = eris_OVvv = None
#** Wvvvv term end
# Wvvvo
Hr2aaa += np.einsum('acbj,c->jab', imds.Wvvvo, r1a)
Hr2bbb += np.einsum('ACBJ,C->JAB', imds.WVVVO, r1b)
Hr2bab += np.einsum('acBJ,c->JaB', imds.WvvVO, r1a)
Hr2aba += np.einsum('ACbj,C->jAb', imds.WVVvo, r1b)
# Wovvo
tmp2aa = lib.einsum('ldbj,lad->jab', imds.Wovvo, r2aaa)
tmp2aa += lib.einsum('ldbj,lad->jab', imds.WOVvo, r2bab)
Hr2aaa += tmp2aa - tmp2aa.transpose(0,2,1)
Hr2bab += lib.einsum('ldbj,lad->jab', imds.WovVO, r2aaa)
Hr2bab += lib.einsum('ldbj,lad->jab', imds.WOVVO, r2bab)
Hr2bab += lib.einsum('ldaj,ldb->jab', imds.WOvvO, r2bab)
Hr2aba += lib.einsum('ldbj,lad->jab', imds.WOVvo, r2bbb)
Hr2aba += lib.einsum('ldbj,lad->jab', imds.Wovvo, r2aba)
Hr2aba += lib.einsum('ldaj,ldb->jab', imds.WoVVo, r2aba)
tmp2bb = lib.einsum('ldbj,lad->jab', imds.WOVVO, r2bbb)
tmp2bb += lib.einsum('ldbj,lad->jab', imds.WovVO, r2aba)
Hr2bbb += tmp2bb - tmp2bb.transpose(0,2,1)
#Fvv Term
tmpa = lib.einsum('ac,jcb->jab', imds.Fvv, r2aaa)
Hr2aaa += tmpa - tmpa.transpose((0,2,1))
Hr2aba += lib.einsum('AC,jCb->jAb', imds.FVV, r2aba)
Hr2bab += lib.einsum('ac,JcB->JaB', imds.Fvv, r2bab)
Hr2aba += lib.einsum('bc, jAc -> jAb', imds.Fvv, r2aba)
Hr2bab += lib.einsum('BC, JaC -> JaB', imds.FVV, r2bab)
tmpb = lib.einsum('AC,JCB->JAB', imds.FVV, r2bbb)
Hr2bbb += tmpb - tmpb.transpose((0,2,1))
#Foo Term
Hr2aaa -= lib.einsum('lj,lab->jab', imds.Foo, r2aaa)
Hr2bbb -= lib.einsum('LJ,LAB->JAB', imds.FOO, r2bbb)
Hr2bab -= lib.einsum('LJ,LaB->JaB', imds.FOO, r2bab)
Hr2aba -= lib.einsum('lj,lAb->jAb', imds.Foo, r2aba)
# Woovv term
Hr2aaa -= 0.5 * lib.einsum('kcld,lcd,kjab->jab', imds.Wovov, r2aaa, t2aa)
Hr2bab -= 0.5 * lib.einsum('kcld,lcd,kJaB->JaB', imds.Wovov, r2aaa, t2ab)
Hr2aba -= lib.einsum('ldKC,lCd,jKbA->jAb', imds.WovOV, r2aba, t2ab)
Hr2aaa -= lib.einsum('kcLD,LcD,kjab->jab', imds.WovOV, r2bab, t2aa)
Hr2aba -= 0.5 * lib.einsum('KCLD,LCD,jKbA->jAb', imds.WOVOV, r2bbb, t2ab)
Hr2bbb -= 0.5 * lib.einsum('KCLD,LCD,KJAB->JAB', imds.WOVOV, r2bbb, t2bb)
Hr2bbb -= lib.einsum('ldKC,lCd,KJAB->JAB', imds.WovOV, r2aba, t2bb)
Hr2bab -= lib.einsum('kcLD,LcD,kJaB->JaB', imds.WovOV, r2bab, t2ab)
vector = amplitudes_to_vector_ea([Hr1a, Hr1b], [Hr2aaa, Hr2aba, Hr2bab, Hr2bbb])
return vector
def _add_vvvv_ea(mycc, r2, eris):
time0 = logger.process_clock(), logger.perf_counter()
log = logger.Logger(mycc.stdout, mycc.verbose)
r2aaa, r2aba, r2bab, r2bbb = r2
nocca, noccb = mycc.nocc
if mycc.direct:
if getattr(eris, 'mo_coeff', None) is not None:
mo_a, mo_b = eris.mo_coeff
else:
moidxa, moidxb = mycc.get_frozen_mask()
mo_a = mycc.mo_coeff[0][:,moidxa]
mo_b = mycc.mo_coeff[1][:,moidxb]
r2aaa = lib.einsum('xab,pa->xpb', r2aaa, mo_a[:,nocca:])
r2aaa = lib.einsum('xab,pb->xap', r2aaa, mo_a[:,nocca:])
r2aba = lib.einsum('xab,pa->xpb', r2aba, mo_b[:,noccb:])
r2aba = lib.einsum('xab,pb->xap', r2aba, mo_a[:,nocca:])
r2bab = lib.einsum('xab,pa->xpb', r2bab, mo_a[:,nocca:])
r2bab = lib.einsum('xab,pb->xap', r2bab, mo_b[:,noccb:])
r2bbb = lib.einsum('xab,pa->xpb', r2bbb, mo_b[:,noccb:])
r2bbb = lib.einsum('xab,pb->xap', r2bbb, mo_b[:,noccb:])
r2 = np.vstack((r2aaa, r2aba, r2bab, r2bbb))
r2aaa = r2aba = r2bab = r2bbb = None
time0 = log.timer_debug1('vvvv-tau', *time0)
buf = ccsd._contract_vvvv_t2(mycc, mycc.mol, None, r2, verbose=log)
sections = np.cumsum([nocca,nocca,noccb])
Hr2aaa, Hr2aba, Hr2bab, Hr2bbb = np.split(buf, sections)
buf = None
Hr2aaa = lib.einsum('xpb,pa->xab', Hr2aaa, mo_a[:,nocca:])
Hr2aaa = lib.einsum('xap,pb->xab', Hr2aaa, mo_a[:,nocca:])
Hr2aba = lib.einsum('xpb,pa->xab', Hr2aba, mo_b[:,noccb:])
Hr2aba = lib.einsum('xap,pb->xab', Hr2aba, mo_a[:,nocca:])
Hr2bab = lib.einsum('xpb,pa->xab', Hr2bab, mo_a[:,nocca:])
Hr2bab = lib.einsum('xap,pb->xab', Hr2bab, mo_b[:,noccb:])
Hr2bbb = lib.einsum('xpb,pa->xab', Hr2bbb, mo_b[:,noccb:])
Hr2bbb = lib.einsum('xap,pb->xab', Hr2bbb, mo_b[:,noccb:])
elif r2aaa.dtype == np.double:
r2aab = np.asarray(r2aba.transpose(0,2,1), order='C')
Hr2aab = eris._contract_vvVV_t2(mycc, r2aab, mycc.direct, None)
Hr2aba = np.asarray(Hr2aab.transpose(0,2,1), order='C')
r2aab = Hr2aab = None
Hr2bab = eris._contract_vvVV_t2(mycc, r2bab, mycc.direct, None)
Hr2aaa = eris._contract_vvvv_t2(mycc, r2aaa, mycc.direct, None)
Hr2bbb = eris._contract_VVVV_t2(mycc, r2bbb, mycc.direct, None)
else:
noccb, nvira, nvirb = r2bab.shape
eris_vvvv = ao2mo.restore(1, np.asarray(eris.vvvv), nvira)
Hr2aaa = lib.einsum('acbd,jcd->jab', eris_vvvv, r2aaa)
eris_vvvv = None
eris_VVVV = ao2mo.restore(1, np.asarray(eris.VVVV), nvirb)
Hr2bbb = lib.einsum('acbd,jcd->jab', eris_VVVV, r2bbb)
eris_VVVV = None
sqa = lib.square_mat_in_trilu_indices(nvira)
sqb = lib.square_mat_in_trilu_indices(nvirb)
eris_vvVV = np.asarray(eris.vvVV)[:,sqb][sqa]
Hr2aba = lib.einsum('bdac,jcd->jab', eris_vvVV, r2aba)
Hr2bab = lib.einsum('acbd,jcd->jab', eris_vvVV, r2bab)
eris_vvVV = None
return Hr2aaa, Hr2aba, Hr2bab, Hr2bbb
def eaccsd_diag(eom, imds=None):
if imds is None: imds = eom.make_imds()
eris = imds.eris
t1, t2 = imds.t1, imds.t2
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
t2ba = t2ab.transpose(1,0,3,2)
nocca, nvira = t1a.shape
noccb, nvirb = t1b.shape
Hr1a = np.diag(imds.Fvv)
Hr1b = np.diag(imds.FVV)
#-------------- intermediates
Fvv_diag = np.diag(imds.Fvv)
Foo_diag = np.diag(imds.Foo)
FOO_diag = np.diag(imds.FOO)
FVV_diag = np.diag(imds.FVV)
Wovvo_slice = np.einsum('jbbj->jb',imds.Wovvo)
Wovov_t2_dot = np.einsum('iajb,ijab->jab',imds.Wovov,t2aa)
WoVVo_slice = np.einsum('jaaj->ja',imds.WoVVo)
WovOV_t2_dot = np.einsum('jbia,ijab->jab',imds.WovOV,t2ba)
WOVVO_slice = np.einsum('jaaj->ja',imds.WOVVO)
WOvvO_slice = np.einsum('jbbj->jb',imds.WOvvO)
WovOV_t2_dot_T = np.einsum('ibja,ijba->jab',imds.WovOV,t2ab)
WOVOV_t2_dot = np.einsum('iajb,ijab->jab',imds.WOVOV,t2bb)
#-------------- contraction
Hr2aaa = Fvv_diag[None,:,None]+Fvv_diag[None,None,:]-Foo_diag[:,None,None]+ \
Wovvo_slice[:,None,:]+Wovvo_slice[:,:,None]-Wovov_t2_dot
Hr2aba = FVV_diag[None,:,None]+Fvv_diag[None,None,:]-Foo_diag[:,None,None]+ \
Wovvo_slice[:,None,:]+WoVVo_slice[:,:,None]-WovOV_t2_dot
Hr2bab = -FOO_diag[:,None,None]+FVV_diag[None,:,None]+Fvv_diag[None,None,:]+ \
WOVVO_slice[:,:,None]+WOvvO_slice[:,None,:]-WovOV_t2_dot_T
Hr2bab = Hr2bab.transpose(0,2,1)
Hr2bbb = -FOO_diag[:,None,None]+FVV_diag[None,:,None]+FVV_diag[None,None,:]+ \
WOVVO_slice[:,:,None]+WOVVO_slice[:,None,:]-WOVOV_t2_dot
# if imds.Wvvvv is not None:
# Wvvvv_slice = np.einsum('aabb->ab',imds.Wvvvv)
# Hr2aaa += 0.5 * Wvvvv_slice[None,:,:]
# WVVvv_slice = np.einsum('aabb->ba',imds.WvvVV)
# Hr2aba += WVVvv_slice[None,:,:]
# WvvVV_slice = np.einsum('aabb->ab',imds.WvvVV)
# Hr2bab += WvvVV_slice[None,:,:]
# WVVVV_slice = np.einsum('aabb->ab',imds.WVVVV)
# Hr2bbb += 0.5 * WVVVV_slice[None,:,:]
# TODO: test Wvvvv contribution
# See also the code for Wvvvv contribution in function eeccsd_diag
tauaa, tauab, taubb = uccsd.make_tau(t2, t1, t1)
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
Wvvaa = .5*np.einsum('mnab,manb->ab', tauaa, eris_ovov)
Wvvbb = .5*np.einsum('mnab,manb->ab', taubb, eris_OVOV)
Wvvab = np.einsum('mNaB,maNB->aB', tauab, eris_ovOV)
eris_ovov = eris_OVOV = eris_ovOV = None
mem_now = lib.current_memory()[0]
max_memory = max(0, eom.max_memory - mem_now)
blksize = min(nocca, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvira**3*3))))
for p0,p1 in lib.prange(0, nocca, blksize):
ovvv = eris.get_ovvv(slice(p0,p1)) # ovvv = eris.ovvv[p0:p1]
Wvvaa += np.einsum('mb,maab->ab', t1a[p0:p1], ovvv)
Wvvaa -= np.einsum('mb,mbaa->ab', t1a[p0:p1], ovvv)
ovvv = None
blksize = min(noccb, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvirb**3*3))))
for p0, p1 in lib.prange(0, noccb, blksize):
OVVV = eris.get_OVVV(slice(p0,p1)) # OVVV = eris.OVVV[p0:p1]
Wvvbb += np.einsum('mb,maab->ab', t1b[p0:p1], OVVV)
Wvvbb -= np.einsum('mb,mbaa->ab', t1b[p0:p1], OVVV)
OVVV = None
blksize = min(nocca, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvira*nvirb**2*3))))
for p0,p1 in lib.prange(0, nocca, blksize):
ovVV = eris.get_ovVV(slice(p0,p1)) # ovVV = eris.ovVV[p0:p1]
Wvvab -= np.einsum('mb,mbaa->ba', t1a[p0:p1], ovVV)
ovVV = None
blksize = min(noccb, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvirb*nvira**2*3))))
for p0, p1 in lib.prange(0, noccb, blksize):
OVvv = eris.get_OVvv(slice(p0,p1)) # OVvv = eris.OVvv[p0:p1]
Wvvab -= np.einsum('mb,mbaa->ab', t1b[p0:p1], OVvv)
OVvv = None
Wvvaa = Wvvaa + Wvvaa.T
Wvvbb = Wvvbb + Wvvbb.T
if eris.vvvv is not None:
for i in range(nvira):
i0 = i*(i+1)//2
vvv = lib.unpack_tril(np.asarray(eris.vvvv[i0:i0+i+1]))
tmp = np.einsum('bb->b', vvv[i])
Wvvaa[i] += tmp
tmp = np.einsum('bb->b', vvv[:,:i+1,i])
Wvvaa[i,:i+1] -= tmp
Wvvaa[:i ,i] -= tmp[:i]
vvv = lib.unpack_tril(np.asarray(eris.vvVV[i0:i0+i+1]))
Wvvab[i] += np.einsum('bb->b', vvv[i])
vvv = None
for i in range(nvirb):
i0 = i*(i+1)//2
vvv = lib.unpack_tril(np.asarray(eris.VVVV[i0:i0+i+1]))
tmp = np.einsum('bb->b', vvv[i])
Wvvbb[i] += tmp
tmp = np.einsum('bb->b', vvv[:,:i+1,i])
Wvvbb[i,:i+1] -= tmp
Wvvbb[:i ,i] -= tmp[:i]
vvv = None
Wvvba = Wvvab.T
Hr2aaa += Wvvaa[None,:,:]
Hr2aba += Wvvba[None,:,:]
Hr2bab += Wvvab[None,:,:]
Hr2bbb += Wvvbb[None,:,:]
# Wvvvv contribution end
vector = amplitudes_to_vector_ea((Hr1a,Hr1b), (Hr2aaa,Hr2aba,Hr2bab,Hr2bbb))
return vector
class EOMEA(eom_rccsd.EOMEA):
matvec = eaccsd_matvec
l_matvec = None
get_diag = eaccsd_diag
eaccsd_star = None
ccsd_star_contract = None
def __init__(self, cc):
eom_rccsd.EOMEA.__init__(self, cc)
self.nocc = cc.get_nocc()
self.nmo = cc.get_nmo()
def get_init_guess(self, nroots=1, koopmans=True, diag=None):
if koopmans:
nocca, noccb = self.nocc
nmoa, nmob = self.nmo
nvira, nvirb = nmoa-nocca, nmob-noccb
idx = diag[:nvira+nvirb].argsort()
else:
idx = diag.argsort()
size = self.vector_size()
dtype = getattr(diag, 'dtype', np.double)
nroots = min(nroots, size)
guess = []
for i in idx[:nroots]:
g = np.zeros(size, dtype)
g[i] = 1.0
guess.append(g)
return guess
def vector_to_amplitudes(self, vector, nmo=None, nocc=None):
if nmo is None: nmo = self.nmo
if nocc is None: nocc = self.nocc
return vector_to_amplitudes_ea(vector, nmo, nocc)
def amplitudes_to_vector(self, r1, r2):
return amplitudes_to_vector_ea(r1, r2)
def vector_size(self):
'''size of the vector based on spin-orbital basis'''
nocca, noccb = self.nocc
nmoa, nmob = self.nmo
nvira, nvirb = nmoa-nocca, nmob-noccb
return (nvira + nvirb
+ nocca*nvira*(nvira-1)//2 + nocca*nvirb*nvira
+ noccb*nvira*nvirb + noccb*nvirb*(nvirb-1)//2)
def make_imds(self, eris=None):
imds = _IMDS(self._cc, eris=eris)
imds.make_ea()
return imds
########################################
# EOM-EE-CCSD
########################################
def eeccsd(eom, nroots=1, koopmans=False, guess=None, eris=None, imds=None):
'''Calculate N-electron neutral excitations via EOM-EE-CCSD.
Kwargs:
nroots : int
Number of roots (eigenvalues) requested
koopmans : bool
Calculate Koopmans'-like (1p1h) excitations only, targeting via
overlap.
guess : list of ndarray
List of guess vectors to use for targeting via overlap.
'''
if eris is None: eris = eom._cc.ao2mo()
if imds is None: imds = eom.make_imds(eris)
spinvec_size = eom.vector_size()
nroots = min(nroots, spinvec_size)
diag_ee, diag_sf = eom.get_diag(imds)
guess_ee = []
guess_sf = []
if guess and guess[0].size == spinvec_size:
raise NotImplementedError
#TODO: initial guess from GCCSD EOM amplitudes
#from pyscf.cc import addons
#from pyscf.cc import eom_gccsd
#orbspin = scf.addons.get_ghf_orbspin(eris.mo_coeff)
#nmo = np.sum(eom.nmo)
#nocc = np.sum(eom.nocc)
#for g in guess:
# r1, r2 = eom_gccsd.vector_to_amplitudes_ee(g, nmo, nocc)
# r1aa = r1[orbspin==0][:,orbspin==0]
# r1ab = r1[orbspin==0][:,orbspin==1]
# if abs(r1aa).max() > 1e-7:
# r1 = addons.spin2spatial(r1, orbspin)
# r2 = addons.spin2spatial(r2, orbspin)
# guess_ee.append(eom.amplitudes_to_vector(r1, r2))
# else:
# r1 = spin2spatial_eomsf(r1, orbspin)
# r2 = spin2spatial_eomsf(r2, orbspin)
# guess_sf.append(amplitudes_to_vector_eomsf(r1, r2))
# r1 = r2 = r1aa = r1ab = g = None
#nroots_ee = len(guess_ee)
#nroots_sf = len(guess_sf)
elif guess:
for g in guess:
if g.size == diag_ee.size:
guess_ee.append(g)
else:
guess_sf.append(g)
nroots_ee = len(guess_ee)
nroots_sf = len(guess_sf)
else:
dee = np.sort(diag_ee)[:nroots]
dsf = np.sort(diag_sf)[:nroots]
dmax = np.sort(np.hstack([dee,dsf]))[nroots-1]
nroots_ee = np.count_nonzero(dee <= dmax)
nroots_sf = np.count_nonzero(dsf <= dmax)
guess_ee = guess_sf = None
def eomee_sub(cls, nroots, guess, diag):
ee_sub = cls(eom._cc)
ee_sub.__dict__.update(eom.__dict__)
e, v = ee_sub.kernel(nroots, koopmans, guess, eris, imds, diag=diag)
if nroots == 1:
e, v = [e], [v]
ee_sub.converged = [ee_sub.converged]
return list(ee_sub.converged), list(e), list(v)
e0 = e1 = []
v0 = v1 = []
conv0 = conv1 = []
if nroots_ee > 0:
conv0, e0, v0 = eomee_sub(EOMEESpinKeep, nroots_ee, guess_ee, diag_ee)
if nroots_sf > 0:
conv1, e1, v1 = eomee_sub(EOMEESpinFlip, nroots_sf, guess_sf, diag_sf)
e = np.hstack([e0,e1])
idx = e.argsort()
e = e[idx]
conv = conv0 + conv1
conv = [conv[x] for x in idx]
v = v0 + v1
v = [v[x] for x in idx]
if nroots == 1:
conv = conv[0]
e = e[0]
v = v[0]
eom.converged = conv
eom.e = e
eom.v = v
return eom.e, eom.v
def eomee_ccsd(eom, nroots=1, koopmans=False, guess=None,
eris=None, imds=None, diag=None):
if eris is None: eris = eom._cc.ao2mo()
if imds is None: imds = eom.make_imds(eris)
eom.converged, eom.e, eom.v \
= eom_rccsd.kernel(eom, nroots, koopmans, guess, imds=imds, diag=diag)
return eom.e, eom.v
def eomsf_ccsd(eom, nroots=1, koopmans=False, guess=None,
eris=None, imds=None, diag=None):
'''Spin flip EOM-EE-CCSD
'''
return eomee_ccsd(eom, nroots, koopmans, guess, eris, imds, diag)
amplitudes_to_vector_ee = uccsd.amplitudes_to_vector
vector_to_amplitudes_ee = uccsd.vector_to_amplitudes
def amplitudes_to_vector_eomsf(t1, t2, out=None):
t1ab, t1ba = t1
t2baaa, t2aaba, t2abbb, t2bbab = t2
nocca, nvirb = t1ab.shape
noccb, nvira = t1ba.shape
otrila = np.tril_indices(nocca, k=-1)
otrilb = np.tril_indices(noccb, k=-1)
vtrila = np.tril_indices(nvira, k=-1)
vtrilb = np.tril_indices(nvirb, k=-1)
baaa = np.take(t2baaa.reshape(noccb*nocca,nvira*nvira),
vtrila[0]*nvira+vtrila[1], axis=1)
abbb = np.take(t2abbb.reshape(nocca*noccb,nvirb*nvirb),
vtrilb[0]*nvirb+vtrilb[1], axis=1)
vector = np.hstack((t1ab.ravel(), t1ba.ravel(),
baaa.ravel(), t2aaba[otrila].ravel(),
abbb.ravel(), t2bbab[otrilb].ravel()))
return vector
def vector_to_amplitudes_eomsf(vector, nmo, nocc):
nocca, noccb = nocc
nmoa, nmob = nmo
nvira, nvirb = nmoa-nocca, nmob-noccb
nbaaa = noccb*nocca*nvira*(nvira-1)//2
naaba = nocca*(nocca-1)//2*nvirb*nvira
nabbb = nocca*noccb*nvirb*(nvirb-1)//2
nbbab = noccb*(noccb-1)//2*nvira*nvirb
sizes = (nocca*nvirb, noccb*nvira, nbaaa, naaba, nabbb, nbbab)
sections = np.cumsum(sizes[:-1])
t1ab, t1ba, vbaaa, vaaba, vabbb, vbbab = np.split(vector, sections)
t1ab = t1ab.reshape(nocca,nvirb).copy()
t1ba = t1ba.reshape(noccb,nvira).copy()
t2baaa = np.zeros((noccb*nocca,nvira*nvira), dtype=vector.dtype)
t2aaba = np.zeros((nocca*nocca,nvirb*nvira), dtype=vector.dtype)
t2abbb = np.zeros((nocca*noccb,nvirb*nvirb), dtype=vector.dtype)
t2bbab = np.zeros((noccb*noccb,nvira*nvirb), dtype=vector.dtype)
otrila = np.tril_indices(nocca, k=-1)
otrilb = np.tril_indices(noccb, k=-1)
vtrila = np.tril_indices(nvira, k=-1)
vtrilb = np.tril_indices(nvirb, k=-1)
oidxab = np.arange(nocca*noccb, dtype=np.int32)
vidxab = np.arange(nvira*nvirb, dtype=np.int32)
vbaaa = vbaaa.reshape(noccb*nocca,-1)
lib.takebak_2d(t2baaa, vbaaa, oidxab, vtrila[0]*nvira+vtrila[1])
lib.takebak_2d(t2baaa,-vbaaa, oidxab, vtrila[1]*nvira+vtrila[0])
vaaba = vaaba.reshape(-1,nvirb*nvira)
lib.takebak_2d(t2aaba, vaaba, otrila[0]*nocca+otrila[1], vidxab)
lib.takebak_2d(t2aaba,-vaaba, otrila[1]*nocca+otrila[0], vidxab)
vabbb = vabbb.reshape(nocca*noccb,-1)
lib.takebak_2d(t2abbb, vabbb, oidxab, vtrilb[0]*nvirb+vtrilb[1])
lib.takebak_2d(t2abbb,-vabbb, oidxab, vtrilb[1]*nvirb+vtrilb[0])
vbbab = vbbab.reshape(-1,nvira*nvirb)
lib.takebak_2d(t2bbab, vbbab, otrilb[0]*noccb+otrilb[1], vidxab)
lib.takebak_2d(t2bbab,-vbbab, otrilb[1]*noccb+otrilb[0], vidxab)
t2baaa = t2baaa.reshape(noccb,nocca,nvira,nvira)
t2aaba = t2aaba.reshape(nocca,nocca,nvirb,nvira)
t2abbb = t2abbb.reshape(nocca,noccb,nvirb,nvirb)
t2bbab = t2bbab.reshape(noccb,noccb,nvira,nvirb)
return (t1ab,t1ba), (t2baaa, t2aaba, t2abbb, t2bbab)
def spatial2spin_eomsf(rx, orbspin):
'''Convert EOM spatial R1,R2 to spin-orbital R1,R2'''
if len(rx) == 2: # r1
r1ab, r1ba = rx
nocca, nvirb = r1ab.shape
noccb, nvira = r1ba.shape
else:
r2baaa,r2aaba,r2abbb,r2bbab = rx
noccb, nocca, nvira = r2baaa.shape[:3]
nvirb = r2aaba.shape[2]
nocc = nocca + noccb
nvir = nvira + nvirb
idxoa = np.where(orbspin[:nocc] == 0)[0]
idxob = np.where(orbspin[:nocc] == 1)[0]
idxva = np.where(orbspin[nocc:] == 0)[0]
idxvb = np.where(orbspin[nocc:] == 1)[0]
if len(rx) == 2: # r1
r1 = np.zeros((nocc,nvir), dtype=r1ab.dtype)
lib.takebak_2d(r1, r1ab, idxoa, idxvb)
lib.takebak_2d(r1, r1ba, idxob, idxva)
return r1
else:
r2 = np.zeros((nocc**2,nvir**2), dtype=r2aaba.dtype)
idxoaa = idxoa[:,None] * nocc + idxoa
idxoab = idxoa[:,None] * nocc + idxob
idxoba = idxob[:,None] * nocc + idxoa
idxobb = idxob[:,None] * nocc + idxob
idxvaa = idxva[:,None] * nvir + idxva
idxvab = idxva[:,None] * nvir + idxvb
idxvba = idxvb[:,None] * nvir + idxva
idxvbb = idxvb[:,None] * nvir + idxvb
r2baaa = r2baaa.reshape(noccb*nocca,nvira*nvira)
r2aaba = r2aaba.reshape(nocca*nocca,nvirb*nvira)
r2abbb = r2abbb.reshape(nocca*noccb,nvirb*nvirb)
r2bbab = r2bbab.reshape(noccb*noccb,nvira*nvirb)
lib.takebak_2d(r2, r2baaa, idxoba.ravel(), idxvaa.ravel())
lib.takebak_2d(r2, r2aaba, idxoaa.ravel(), idxvba.ravel())
lib.takebak_2d(r2, r2abbb, idxoab.ravel(), idxvbb.ravel())
lib.takebak_2d(r2, r2bbab, idxobb.ravel(), idxvab.ravel())
lib.takebak_2d(r2, r2baaa, idxoab.T.ravel(), idxvaa.T.ravel())
lib.takebak_2d(r2, r2aaba, idxoaa.T.ravel(), idxvab.T.ravel())
lib.takebak_2d(r2, r2abbb, idxoba.T.ravel(), idxvbb.T.ravel())
lib.takebak_2d(r2, r2bbab, idxobb.T.ravel(), idxvba.T.ravel())
return r2.reshape(nocc,nocc,nvir,nvir)
def spin2spatial_eomsf(rx, orbspin):
'''Convert EOM spin-orbital R1,R2 to spatial R1,R2'''
if rx.ndim == 2: # r1
nocc, nvir = rx.shape
else:
nocc, nvir = rx.shape[1:3]
idxoa = np.where(orbspin[:nocc] == 0)[0]
idxob = np.where(orbspin[:nocc] == 1)[0]
idxva = np.where(orbspin[nocc:] == 0)[0]
idxvb = np.where(orbspin[nocc:] == 1)[0]
nocca = len(idxoa)
noccb = len(idxob)
nvira = len(idxva)
nvirb = len(idxvb)
if rx.ndim == 2:
r1ab = lib.take_2d(rx, idxoa, idxvb)
r1ba = lib.take_2d(rx, idxob, idxva)
return r1ab, r1ba
else:
idxoaa = idxoa[:,None] * nocc + idxoa
idxoab = idxoa[:,None] * nocc + idxob
idxoba = idxob[:,None] * nocc + idxoa
idxobb = idxob[:,None] * nocc + idxob
idxvaa = idxva[:,None] * nvir + idxva
idxvab = idxva[:,None] * nvir + idxvb
idxvba = idxvb[:,None] * nvir + idxva
idxvbb = idxvb[:,None] * nvir + idxvb
r2 = rx.reshape(nocc**2,nvir**2)
r2baaa = lib.take_2d(r2, idxoba.ravel(), idxvaa.ravel())
r2aaba = lib.take_2d(r2, idxoaa.ravel(), idxvba.ravel())
r2abbb = lib.take_2d(r2, idxoab.ravel(), idxvbb.ravel())
r2bbab = lib.take_2d(r2, idxobb.ravel(), idxvab.ravel())
r2baaa = r2baaa.reshape(noccb,nocca,nvira,nvira)
r2aaba = r2aaba.reshape(nocca,nocca,nvirb,nvira)
r2abbb = r2abbb.reshape(nocca,noccb,nvirb,nvirb)
r2bbab = r2bbab.reshape(noccb,noccb,nvira,nvirb)
return r2baaa,r2aaba,r2abbb,r2bbab
# Ref: Wang, Tu, and Wang, J. Chem. Theory Comput. 10, 5567 (2014) Eqs.(9)-(10)
# Note: Last line in Eq. (10) is superfluous.
# See, e.g. Gwaltney, Nooijen, and Barlett, Chem. Phys. Lett. 248, 189 (1996)
def eomee_ccsd_matvec(eom, vector, imds=None):
if imds is None: imds = eom.make_imds()
t1, t2, eris = imds.t1, imds.t2, imds.eris
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb, nvira, nvirb = t2ab.shape
nmoa, nmob = nocca+nvira, noccb+nvirb
r1, r2 = vector_to_amplitudes_ee(vector, (nmoa,nmob), (nocca,noccb))
r1a, r1b = r1
r2aa, r2ab, r2bb = r2
#:Hr2aa += lib.einsum('ijef,aebf->ijab', tau2aa, eris.vvvv) * .5
#:Hr2bb += lib.einsum('ijef,aebf->ijab', tau2bb, eris.VVVV) * .5
#:Hr2ab += lib.einsum('iJeF,aeBF->iJaB', tau2ab, eris.vvVV)
tau2aa, tau2ab, tau2bb = uccsd.make_tau(r2, r1, t1, 2)
Hr2aa, Hr2ab, Hr2bb = eom._cc._add_vvvv(None, (tau2aa,tau2ab,tau2bb), eris)
Hr2aa *= .5
Hr2bb *= .5
tau2aa = tau2ab = tau2bb = None
Hr1a = lib.einsum('ae,ie->ia', imds.Fvva, r1a)
Hr1a -= lib.einsum('mi,ma->ia', imds.Fooa, r1a)
Hr1a += np.einsum('me,imae->ia',imds.Fova, r2aa)
Hr1a += np.einsum('ME,iMaE->ia',imds.Fovb, r2ab)
Hr1b = lib.einsum('ae,ie->ia', imds.Fvvb, r1b)
Hr1b -= lib.einsum('mi,ma->ia', imds.Foob, r1b)
Hr1b += np.einsum('me,imae->ia',imds.Fovb, r2bb)
Hr1b += np.einsum('me,mIeA->IA',imds.Fova, r2ab)
Hr2aa += lib.einsum('minj,mnab->ijab', imds.woooo, r2aa) * .25
Hr2bb += lib.einsum('minj,mnab->ijab', imds.wOOOO, r2bb) * .25
Hr2ab += lib.einsum('miNJ,mNaB->iJaB', imds.wooOO, r2ab)
Hr2aa += lib.einsum('be,ijae->ijab', imds.Fvva, r2aa)
Hr2bb += lib.einsum('be,ijae->ijab', imds.Fvvb, r2bb)
Hr2ab += lib.einsum('BE,iJaE->iJaB', imds.Fvvb, r2ab)
Hr2ab += lib.einsum('be,iJeA->iJbA', imds.Fvva, r2ab)
Hr2aa -= lib.einsum('mj,imab->ijab', imds.Fooa, r2aa)
Hr2bb -= lib.einsum('mj,imab->ijab', imds.Foob, r2bb)
Hr2ab -= lib.einsum('MJ,iMaB->iJaB', imds.Foob, r2ab)
Hr2ab -= lib.einsum('mj,mIaB->jIaB', imds.Fooa, r2ab)
#:tau2aa, tau2ab, tau2bb = uccsd.make_tau(r2, r1, t1, 2)
#:eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvira,nvira)
#:eris_ovVV = lib.unpack_tril(np.asarray(eris.ovVV).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvirb,nvirb)
#:eris_OVvv = lib.unpack_tril(np.asarray(eris.OVvv).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvira,nvira)
#:eris_OVVV = lib.unpack_tril(np.asarray(eris.OVVV).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvirb,nvirb)
#:Hr1a += lib.einsum('mfae,imef->ia', eris_ovvv, r2aa)
#:tmpaa = lib.einsum('meaf,ijef->maij', eris_ovvv, tau2aa)
#:Hr2aa+= lib.einsum('mb,maij->ijab', t1a, tmpaa)
#:tmpa = lib.einsum('mfae,me->af', eris_ovvv, r1a)
#:tmpa-= lib.einsum('meaf,me->af', eris_ovvv, r1a)
#:Hr1b += lib.einsum('mfae,imef->ia', eris_OVVV, r2bb)
#:tmpbb = lib.einsum('meaf,ijef->maij', eris_OVVV, tau2bb)
#:Hr2bb+= lib.einsum('mb,maij->ijab', t1b, tmpbb)
#:tmpb = lib.einsum('mfae,me->af', eris_OVVV, r1b)
#:tmpb-= lib.einsum('meaf,me->af', eris_OVVV, r1b)
#:Hr1b += lib.einsum('mfAE,mIfE->IA', eris_ovVV, r2ab)
#:tmpab = lib.einsum('meAF,iJeF->mAiJ', eris_ovVV, tau2ab)
#:Hr2ab-= lib.einsum('mb,mAiJ->iJbA', t1a, tmpab)
#:tmpb-= lib.einsum('meAF,me->AF', eris_ovVV, r1a)
#:Hr1a += lib.einsum('MFae,iMeF->ia', eris_OVvv, r2ab)
#:tmpba =-lib.einsum('MEaf,iJfE->MaiJ', eris_OVvv, tau2ab)
#:Hr2ab+= lib.einsum('MB,MaiJ->iJaB', t1b, tmpba)
#:tmpa-= lib.einsum('MEaf,ME->af', eris_OVvv, r1b)
tau2aa = uccsd.make_tau_aa(r2aa, r1a, t1a, 2)
mem_now = lib.current_memory()[0]
max_memory = max(0, eom.max_memory - mem_now)
tmpa = np.zeros((nvira,nvira))
tmpb = np.zeros((nvirb,nvirb))
blksize = min(nocca, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvira**3*3))))
for p0, p1 in lib.prange(0, nocca, blksize):
ovvv = eris.get_ovvv(slice(p0,p1)) # ovvv = eris.ovvv[p0:p1]
Hr1a += lib.einsum('mfae,imef->ia', ovvv, r2aa[:,p0:p1])
tmpaa = lib.einsum('meaf,ijef->maij', ovvv, tau2aa)
Hr2aa+= lib.einsum('mb,maij->ijab', t1a[p0:p1], tmpaa)
tmpa+= lib.einsum('mfae,me->af', ovvv, r1a[p0:p1])
tmpa-= lib.einsum('meaf,me->af', ovvv, r1a[p0:p1])
ovvv = tmpaa = None
tau2aa = None
tau2bb = uccsd.make_tau_aa(r2bb, r1b, t1b, 2)
blksize = min(noccb, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvirb**3*3))))
for p0, p1 in lib.prange(0, noccb, blksize):
OVVV = eris.get_OVVV(slice(p0,p1)) # OVVV = eris.OVVV[p0:p1]
Hr1b += lib.einsum('mfae,imef->ia', OVVV, r2bb[:,p0:p1])
tmpbb = lib.einsum('meaf,ijef->maij', OVVV, tau2bb)
Hr2bb+= lib.einsum('mb,maij->ijab', t1b[p0:p1], tmpbb)
tmpb+= lib.einsum('mfae,me->af', OVVV, r1b[p0:p1])
tmpb-= lib.einsum('meaf,me->af', OVVV, r1b[p0:p1])
OVVV = tmpbb = None
tau2bb = None
tau2ab = uccsd.make_tau_ab(r2ab, r1 , t1 , 2)
blksize = min(nocca, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvira*nvirb**2*3))))
for p0, p1 in lib.prange(0, nocca, blksize):
ovVV = eris.get_ovVV(slice(p0,p1)) # ovVV = eris.ovVV[p0:p1]
Hr1b += lib.einsum('mfAE,mIfE->IA', ovVV, r2ab[p0:p1])
tmpab = lib.einsum('meAF,iJeF->mAiJ', ovVV, tau2ab)
Hr2ab-= lib.einsum('mb,mAiJ->iJbA', t1a[p0:p1], tmpab)
tmpb-= lib.einsum('meAF,me->AF', ovVV, r1a[p0:p1])
ovVV = tmpab = None
blksize = min(noccb, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvirb*nvira**2*3))))
for p0, p1 in lib.prange(0, noccb, blksize):
OVvv = eris.get_OVvv(slice(p0,p1)) # OVvv = eris.OVvv[p0:p1]
Hr1a += lib.einsum('MFae,iMeF->ia', OVvv, r2ab[:,p0:p1])
tmpba = lib.einsum('MEaf,iJfE->MaiJ', OVvv, tau2ab)
Hr2ab-= lib.einsum('MB,MaiJ->iJaB', t1b[p0:p1], tmpba)
tmpa-= lib.einsum('MEaf,ME->af', OVvv, r1b[p0:p1])
OVvv = tmpba = None
tau2ab = None
Hr2aa-= lib.einsum('af,ijfb->ijab', tmpa, t2aa)
Hr2bb-= lib.einsum('af,ijfb->ijab', tmpb, t2bb)
Hr2ab-= lib.einsum('af,iJfB->iJaB', tmpa, t2ab)
Hr2ab-= lib.einsum('AF,iJbF->iJbA', tmpb, t2ab)
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
tau2aa = uccsd.make_tau_aa(r2aa, r1a, t1a, 2)
tauaa = uccsd.make_tau_aa(t2aa, t1a, t1a)
tmpaa = lib.einsum('menf,ijef->mnij', eris_ovov, tau2aa)
Hr2aa += lib.einsum('mnij,mnab->ijab', tmpaa, tauaa) * 0.25
tmpaa = tau2aa = tauaa = None
tau2bb = uccsd.make_tau_aa(r2bb, r1b, t1b, 2)
taubb = uccsd.make_tau_aa(t2bb, t1b, t1b)
tmpbb = lib.einsum('menf,ijef->mnij', eris_OVOV, tau2bb)
Hr2bb += lib.einsum('mnij,mnab->ijab', tmpbb, taubb) * 0.25
tmpbb = tau2bb = taubb = None
tau2ab = uccsd.make_tau_ab(r2ab, r1 , t1 , 2)
tauab = uccsd.make_tau_ab(t2ab, t1 , t1)
tmpab = lib.einsum('meNF,iJeF->mNiJ', eris_ovOV, tau2ab)
Hr2ab += lib.einsum('mNiJ,mNaB->iJaB', tmpab, tauab)
tmpab = tau2ab = tauab = None
tmpa = lib.einsum('menf,imef->ni', eris_ovov, r2aa)
tmpa-= lib.einsum('neMF,iMeF->ni', eris_ovOV, r2ab)
tmpb = lib.einsum('menf,imef->ni', eris_OVOV, r2bb)
tmpb-= lib.einsum('mfNE,mIfE->NI', eris_ovOV, r2ab)
Hr1a += lib.einsum('na,ni->ia', t1a, tmpa)
Hr1b += lib.einsum('na,ni->ia', t1b, tmpb)
Hr2aa+= lib.einsum('mj,imab->ijab', tmpa, t2aa)
Hr2bb+= lib.einsum('mj,imab->ijab', tmpb, t2bb)
Hr2ab+= lib.einsum('MJ,iMaB->iJaB', tmpb, t2ab)
Hr2ab+= lib.einsum('mj,mIaB->jIaB', tmpa, t2ab)
tmp1a = np.einsum('menf,mf->en', eris_ovov, r1a)
tmp1a-= np.einsum('mfne,mf->en', eris_ovov, r1a)
tmp1a-= np.einsum('neMF,MF->en', eris_ovOV, r1b)
tmp1b = np.einsum('menf,mf->en', eris_OVOV, r1b)
tmp1b-= np.einsum('mfne,mf->en', eris_OVOV, r1b)
tmp1b-= np.einsum('mfNE,mf->EN', eris_ovOV, r1a)
tmpa = np.einsum('en,nb->eb', tmp1a, t1a)
tmpa+= lib.einsum('menf,mnfb->eb', eris_ovov, r2aa)
tmpa-= lib.einsum('meNF,mNbF->eb', eris_ovOV, r2ab)
tmpb = np.einsum('en,nb->eb', tmp1b, t1b)
tmpb+= lib.einsum('menf,mnfb->eb', eris_OVOV, r2bb)
tmpb-= lib.einsum('nfME,nMfB->EB', eris_ovOV, r2ab)
Hr2aa+= lib.einsum('eb,ijae->ijab', tmpa, t2aa)
Hr2bb+= lib.einsum('eb,ijae->ijab', tmpb, t2bb)
Hr2ab+= lib.einsum('EB,iJaE->iJaB', tmpb, t2ab)
Hr2ab+= lib.einsum('eb,iJeA->iJbA', tmpa, t2ab)
eris_ovOV = eris_OVOV = None
Hr2aa-= lib.einsum('mbij,ma->ijab', imds.wovoo, r1a)
Hr2bb-= lib.einsum('mbij,ma->ijab', imds.wOVOO, r1b)
Hr2ab-= lib.einsum('mBiJ,ma->iJaB', imds.woVoO, r1a)
Hr2ab-= lib.einsum('MbJi,MA->iJbA', imds.wOvOo, r1b)
Hr1a-= 0.5*lib.einsum('mine,mnae->ia', imds.wooov, r2aa)
Hr1a-= lib.einsum('miNE,mNaE->ia', imds.wooOV, r2ab)
Hr1b-= 0.5*lib.einsum('mine,mnae->ia', imds.wOOOV, r2bb)
Hr1b-= lib.einsum('MIne,nMeA->IA', imds.wOOov, r2ab)
tmpa = lib.einsum('mine,me->ni', imds.wooov, r1a)
tmpa-= lib.einsum('niME,ME->ni', imds.wooOV, r1b)
tmpb = lib.einsum('mine,me->ni', imds.wOOOV, r1b)
tmpb-= lib.einsum('NIme,me->NI', imds.wOOov, r1a)
Hr2aa+= lib.einsum('ni,njab->ijab', tmpa, t2aa)
Hr2bb+= lib.einsum('ni,njab->ijab', tmpb, t2bb)
Hr2ab+= lib.einsum('ni,nJaB->iJaB', tmpa, t2ab)
Hr2ab+= lib.einsum('NI,jNaB->jIaB', tmpb, t2ab)
for p0, p1 in lib.prange(0, nvira, nocca):
Hr2aa+= lib.einsum('ejab,ie->ijab', imds.wvovv[p0:p1], r1a[:,p0:p1])
Hr2ab+= lib.einsum('eJaB,ie->iJaB', imds.wvOvV[p0:p1], r1a[:,p0:p1])
for p0, p1 in lib.prange(0, nvirb, noccb):
Hr2bb+= lib.einsum('ejab,ie->ijab', imds.wVOVV[p0:p1], r1b[:,p0:p1])
Hr2ab+= lib.einsum('EjBa,IE->jIaB', imds.wVoVv[p0:p1], r1b[:,p0:p1])
Hr1a += np.einsum('maei,me->ia',imds.wovvo,r1a)
Hr1a += np.einsum('MaEi,ME->ia',imds.wOvVo,r1b)
Hr1b += np.einsum('maei,me->ia',imds.wOVVO,r1b)
Hr1b += np.einsum('mAeI,me->IA',imds.woVvO,r1a)
Hr2aa+= lib.einsum('mbej,imae->ijab', imds.wovvo, r2aa) * 2
Hr2aa+= lib.einsum('MbEj,iMaE->ijab', imds.wOvVo, r2ab) * 2
Hr2bb+= lib.einsum('mbej,imae->ijab', imds.wOVVO, r2bb) * 2
Hr2bb+= lib.einsum('mBeJ,mIeA->IJAB', imds.woVvO, r2ab) * 2
Hr2ab+= lib.einsum('mBeJ,imae->iJaB', imds.woVvO, r2aa)
Hr2ab+= lib.einsum('MBEJ,iMaE->iJaB', imds.wOVVO, r2ab)
Hr2ab+= lib.einsum('mBEj,mIaE->jIaB', imds.woVVo, r2ab)
Hr2ab+= lib.einsum('mbej,mIeA->jIbA', imds.wovvo, r2ab)
Hr2ab+= lib.einsum('MbEj,IMAE->jIbA', imds.wOvVo, r2bb)
Hr2ab+= lib.einsum('MbeJ,iMeA->iJbA', imds.wOvvO, r2ab)
Hr2aa *= .5
Hr2bb *= .5
Hr2aa = Hr2aa - Hr2aa.transpose(0,1,3,2)
Hr2aa = Hr2aa - Hr2aa.transpose(1,0,2,3)
Hr2bb = Hr2bb - Hr2bb.transpose(0,1,3,2)
Hr2bb = Hr2bb - Hr2bb.transpose(1,0,2,3)
vector = amplitudes_to_vector_ee((Hr1a,Hr1b), (Hr2aa,Hr2ab,Hr2bb))
return vector
def eomsf_ccsd_matvec(eom, vector, imds=None):
'''Spin flip EOM-CCSD'''
if imds is None: imds = eom.make_imds()
t1, t2, eris = imds.t1, imds.t2, imds.eris
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb, nvira, nvirb = t2ab.shape
nmoa, nmob = nocca+nvira, noccb+nvirb
r1, r2 = vector_to_amplitudes_eomsf(vector, (nmoa,nmob), (nocca,noccb))
r1ab, r1ba = r1
r2baaa, r2aaba, r2abbb, r2bbab = r2
Hr1ab = np.einsum('ae,ie->ia', imds.Fvvb, r1ab)
Hr1ab -= np.einsum('mi,ma->ia', imds.Fooa, r1ab)
Hr1ab += np.einsum('me,imae->ia', imds.Fovb, r2abbb)
Hr1ab += np.einsum('me,imae->ia', imds.Fova, r2aaba)
Hr1ba = np.einsum('ae,ie->ia', imds.Fvva, r1ba)
Hr1ba -= np.einsum('mi,ma->ia', imds.Foob, r1ba)
Hr1ba += np.einsum('me,imae->ia', imds.Fova, r2baaa)
Hr1ba += np.einsum('me,imae->ia', imds.Fovb, r2bbab)
Hr2baaa = .5 *lib.einsum('njMI,Mnab->Ijab', imds.wooOO, r2baaa)
Hr2aaba = .25*lib.einsum('minj,mnAb->ijAb', imds.woooo, r2aaba)
Hr2abbb = .5 *lib.einsum('miNJ,mNAB->iJAB', imds.wooOO, r2abbb)
Hr2bbab = .25*lib.einsum('MINJ,MNaB->IJaB', imds.wOOOO, r2bbab)
Hr2baaa += lib.einsum('be,Ijae->Ijab', imds.Fvva , r2baaa)
Hr2baaa -= lib.einsum('mj,imab->ijab', imds.Fooa*.5, r2baaa)
Hr2baaa -= lib.einsum('MJ,Miab->Jiab', imds.Foob*.5, r2baaa)
Hr2bbab -= lib.einsum('mj,imab->ijab', imds.Foob , r2bbab)
Hr2bbab += lib.einsum('BE,IJaE->IJaB', imds.Fvvb*.5, r2bbab)
Hr2bbab += lib.einsum('be,IJeA->IJbA', imds.Fvva*.5, r2bbab)
Hr2aaba -= lib.einsum('mj,imab->ijab', imds.Fooa , r2aaba)
Hr2aaba += lib.einsum('be,ijAe->ijAb', imds.Fvva*.5, r2aaba)
Hr2aaba += lib.einsum('BE,ijEa->ijBa', imds.Fvvb*.5, r2aaba)
Hr2abbb += lib.einsum('BE,iJAE->iJAB', imds.Fvvb , r2abbb)
Hr2abbb -= lib.einsum('mj,imab->ijab', imds.Foob*.5, r2abbb)
Hr2abbb -= lib.einsum('mj,mIAB->jIAB', imds.Fooa*.5, r2abbb)
tau2baaa = np.einsum('ia,jb->ijab', r1ba, t1a)
tau2baaa = tau2baaa - tau2baaa.transpose(0,1,3,2)
tau2abbb = np.einsum('ia,jb->ijab', r1ab, t1b)
tau2abbb = tau2abbb - tau2abbb.transpose(0,1,3,2)
tau2aaba = np.einsum('ia,jb->ijab', r1ab, t1a)
tau2aaba = tau2aaba - tau2aaba.transpose(1,0,2,3)
tau2bbab = np.einsum('ia,jb->ijab', r1ba, t1b)
tau2bbab = tau2bbab - tau2bbab.transpose(1,0,2,3)
tau2baaa += r2baaa
tau2bbab += r2bbab
tau2abbb += r2abbb
tau2aaba += r2aaba
#:eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvira,nvira)
#:Hr1ba += lib.einsum('mfae,Imef->Ia', eris_ovvv, r2baaa)
#:tmp1aaba = lib.einsum('meaf,Ijef->maIj', eris_ovvv, tau2baaa)
#:Hr2baaa += lib.einsum('mb,maIj->Ijab', t1a , tmp1aaba)
mem_now = lib.current_memory()[0]
max_memory = max(0, eom.max_memory - mem_now)
blksize = min(nocca, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvira**3*3))))
for p0,p1 in lib.prange(0, nocca, blksize):
ovvv = eris.get_ovvv(slice(p0,p1)) # ovvv = eris.ovvv[p0:p1]
Hr1ba += lib.einsum('mfae,Imef->Ia', ovvv, r2baaa[:,p0:p1])
tmp1aaba = lib.einsum('meaf,Ijef->maIj', ovvv, tau2baaa)
Hr2baaa += lib.einsum('mb,maIj->Ijab', t1a[p0:p1], tmp1aaba)
ovvv = tmp1aaba = None
#:eris_OVVV = lib.unpack_tril(np.asarray(eris.OVVV).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvirb,nvirb)
#:Hr1ab += lib.einsum('MFAE,iMEF->iA', eris_OVVV, r2abbb)
#:tmp1bbab = lib.einsum('MEAF,iJEF->MAiJ', eris_OVVV, tau2abbb)
#:Hr2abbb += lib.einsum('MB,MAiJ->iJAB', t1b , tmp1bbab)
blksize = min(noccb, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvirb**3*3))))
for p0, p1 in lib.prange(0, noccb, blksize):
OVVV = eris.get_OVVV(slice(p0,p1)) # OVVV = eris.OVVV[p0:p1]
Hr1ab += lib.einsum('MFAE,iMEF->iA', OVVV, r2abbb[:,p0:p1])
tmp1bbab = lib.einsum('MEAF,iJEF->MAiJ', OVVV, tau2abbb)
Hr2abbb += lib.einsum('MB,MAiJ->iJAB', t1b[p0:p1], tmp1bbab)
OVVV = tmp1bbab = None
#:eris_ovVV = lib.unpack_tril(np.asarray(eris.ovVV).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvirb,nvirb)
#:Hr1ab += lib.einsum('mfAE,imEf->iA', eris_ovVV, r2aaba)
#:tmp1abaa = lib.einsum('meAF,ijFe->mAij', eris_ovVV, tau2aaba)
#:tmp1abbb = lib.einsum('meAF,IJeF->mAIJ', eris_ovVV, tau2bbab)
#:tmp1ba = lib.einsum('mfAE,mE->Af', eris_ovVV, r1ab)
#:Hr2bbab -= lib.einsum('mb,mAIJ->IJbA', t1a*.5, tmp1abbb)
#:Hr2aaba -= lib.einsum('mb,mAij->ijAb', t1a*.5, tmp1abaa)
tmp1ba = np.zeros((nvirb,nvira))
blksize = min(nocca, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvira*nvirb**2*3))))
for p0,p1 in lib.prange(0, nocca, blksize):
ovVV = eris.get_ovVV(slice(p0,p1)) # ovVV = eris.ovVV[p0:p1]
Hr1ab += lib.einsum('mfAE,imEf->iA', ovVV, r2aaba[:,p0:p1])
tmp1abaa = lib.einsum('meAF,ijFe->mAij', ovVV, tau2aaba)
tmp1abbb = lib.einsum('meAF,IJeF->mAIJ', ovVV, tau2bbab)
tmp1ba += lib.einsum('mfAE,mE->Af', ovVV, r1ab[p0:p1])
Hr2bbab -= lib.einsum('mb,mAIJ->IJbA', t1a[p0:p1]*.5, tmp1abbb)
Hr2aaba -= lib.einsum('mb,mAij->ijAb', t1a[p0:p1]*.5, tmp1abaa)
#:eris_OVvv = lib.unpack_tril(np.asarray(eris.OVvv).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvira,nvira)
#:Hr1ba += lib.einsum('MFae,IMeF->Ia', eris_OVvv, r2bbab)
#:tmp1baaa = lib.einsum('MEaf,ijEf->Maij', eris_OVvv, tau2aaba)
#:tmp1babb = lib.einsum('MEaf,IJfE->MaIJ', eris_OVvv, tau2bbab)
#:tmp1ab = lib.einsum('MFae,Me->aF', eris_OVvv, r1ba)
#:Hr2aaba -= lib.einsum('MB,Maij->ijBa', t1b*.5, tmp1baaa)
#:Hr2bbab -= lib.einsum('MB,MaIJ->IJaB', t1b*.5, tmp1babb)
tmp1ab = np.zeros((nvira,nvirb))
blksize = min(noccb, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvirb*nvira**2*3))))
for p0, p1 in lib.prange(0, noccb, blksize):
OVvv = eris.get_OVvv(slice(p0,p1)) # OVvv = eris.OVvv[p0:p1]
Hr1ba += lib.einsum('MFae,IMeF->Ia', OVvv, r2bbab[:,p0:p1])
tmp1baaa = lib.einsum('MEaf,ijEf->Maij', OVvv, tau2aaba)
tmp1babb = lib.einsum('MEaf,IJfE->MaIJ', OVvv, tau2bbab)
tmp1ab+= lib.einsum('MFae,Me->aF', OVvv, r1ba[p0:p1])
Hr2aaba -= lib.einsum('MB,Maij->ijBa', t1b[p0:p1]*.5, tmp1baaa)
Hr2bbab -= lib.einsum('MB,MaIJ->IJaB', t1b[p0:p1]*.5, tmp1babb)
Hr2baaa += lib.einsum('aF,jIbF->Ijba', tmp1ab , t2ab)
Hr2bbab -= lib.einsum('aF,IJFB->IJaB', tmp1ab*.5, t2bb)
Hr2abbb += lib.einsum('Af,iJfB->iJBA', tmp1ba , t2ab)
Hr2aaba -= lib.einsum('Af,ijfb->ijAb', tmp1ba*.5, t2aa)
Hr2baaa -= lib.einsum('MbIj,Ma->Ijab', imds.wOvOo, r1ba )
Hr2bbab -= lib.einsum('MBIJ,Ma->IJaB', imds.wOVOO, r1ba*.5)
Hr2abbb -= lib.einsum('mBiJ,mA->iJAB', imds.woVoO, r1ab )
Hr2aaba -= lib.einsum('mbij,mA->ijAb', imds.wovoo, r1ab*.5)
Hr1ab -= 0.5*lib.einsum('mine,mnAe->iA', imds.wooov, r2aaba)
Hr1ab -= lib.einsum('miNE,mNAE->iA', imds.wooOV, r2abbb)
Hr1ba -= 0.5*lib.einsum('MINE,MNaE->Ia', imds.wOOOV, r2bbab)
Hr1ba -= lib.einsum('MIne,Mnae->Ia', imds.wOOov, r2baaa)
tmp1ab = lib.einsum('MIne,Me->nI', imds.wOOov, r1ba)
tmp1ba = lib.einsum('miNE,mE->Ni', imds.wooOV, r1ab)
Hr2baaa += lib.einsum('nI,njab->Ijab', tmp1ab*.5, t2aa)
Hr2bbab += lib.einsum('nI,nJaB->IJaB', tmp1ab , t2ab)
Hr2abbb += lib.einsum('Ni,NJAB->iJAB', tmp1ba*.5, t2bb)
Hr2aaba += lib.einsum('Ni,jNbA->ijAb', tmp1ba , t2ab)
for p0, p1 in lib.prange(0, nvira, nocca):
Hr2baaa += lib.einsum('ejab,Ie->Ijab', imds.wvovv[p0:p1], r1ba[:,p0:p1]*.5)
Hr2bbab += lib.einsum('eJaB,Ie->IJaB', imds.wvOvV[p0:p1], r1ba[:,p0:p1] )
for p0, p1 in lib.prange(0, nvirb, noccb):
Hr2abbb += lib.einsum('EJAB,iE->iJAB', imds.wVOVV[p0:p1], r1ab[:,p0:p1]*.5)
Hr2aaba += lib.einsum('EjAb,iE->ijAb', imds.wVoVv[p0:p1], r1ab[:,p0:p1] )
Hr1ab += np.einsum('mAEi,mE->iA', imds.woVVo, r1ab)
Hr1ba += np.einsum('MaeI,Me->Ia', imds.wOvvO, r1ba)
Hr2baaa += lib.einsum('mbej,Imae->Ijab', imds.wovvo, r2baaa)
Hr2baaa += lib.einsum('MbeJ,Miae->Jiab', imds.wOvvO, r2baaa)
Hr2baaa += lib.einsum('MbEj,IMaE->Ijab', imds.wOvVo, r2bbab)
Hr2bbab += lib.einsum('MBEJ,IMaE->IJaB', imds.wOVVO, r2bbab)
Hr2bbab += lib.einsum('MbeJ,IMeA->IJbA', imds.wOvvO, r2bbab)
Hr2bbab += lib.einsum('mBeJ,Imae->IJaB', imds.woVvO, r2baaa)
Hr2aaba += lib.einsum('mbej,imAe->ijAb', imds.wovvo, r2aaba)
Hr2aaba += lib.einsum('mBEj,imEa->ijBa', imds.woVVo, r2aaba)
Hr2aaba += lib.einsum('MbEj,iMAE->ijAb', imds.wOvVo, r2abbb)
Hr2abbb += lib.einsum('MBEJ,iMAE->iJAB', imds.wOVVO, r2abbb)
Hr2abbb += lib.einsum('mBEj,mIAE->jIAB', imds.woVVo, r2abbb)
Hr2abbb += lib.einsum('mBeJ,imAe->iJAB', imds.woVvO, r2aaba)
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
tauaa, tauab, taubb = uccsd.make_tau(t2, t1, t1)
tmp1baaa = lib.einsum('nfME,ijEf->Mnij', eris_ovOV, tau2aaba)
tmp1aaba = lib.einsum('menf,Ijef->mnIj', eris_ovov, tau2baaa)
tmp1abbb = lib.einsum('meNF,IJeF->mNIJ', eris_ovOV, tau2bbab)
tmp1bbab = lib.einsum('MENF,iJEF->MNiJ', eris_OVOV, tau2abbb)
Hr2baaa += 0.5*.5*lib.einsum('mnIj,mnab->Ijab', tmp1aaba, tauaa)
Hr2bbab += .5*lib.einsum('nMIJ,nMaB->IJaB', tmp1abbb, tauab)
Hr2aaba += .5*lib.einsum('Nmij,mNbA->ijAb', tmp1baaa, tauab)
Hr2abbb += 0.5*.5*lib.einsum('MNiJ,MNAB->iJAB', tmp1bbab, taubb)
tauaa = tauab = taubb = None
tmpab = lib.einsum('menf,Imef->nI', eris_ovov, r2baaa)
tmpab -= lib.einsum('nfME,IMfE->nI', eris_ovOV, r2bbab)
tmpba = lib.einsum('MENF,iMEF->Ni', eris_OVOV, r2abbb)
tmpba -= lib.einsum('meNF,imFe->Ni', eris_ovOV, r2aaba)
Hr1ab += np.einsum('NA,Ni->iA', t1b, tmpba)
Hr1ba += np.einsum('na,nI->Ia', t1a, tmpab)
Hr2baaa -= lib.einsum('mJ,imab->Jiab', tmpab*.5, t2aa)
Hr2bbab -= lib.einsum('mJ,mIaB->IJaB', tmpab*.5, t2ab) * 2
Hr2aaba -= lib.einsum('Mj,iMbA->ijAb', tmpba*.5, t2ab) * 2
Hr2abbb -= lib.einsum('Mj,IMAB->jIAB', tmpba*.5, t2bb)
tmp1ab = np.einsum('meNF,mF->eN', eris_ovOV, r1ab)
tmp1ba = np.einsum('nfME,Mf->En', eris_ovOV, r1ba)
tmpab = np.einsum('eN,NB->eB', tmp1ab, t1b)
tmpba = np.einsum('En,nb->Eb', tmp1ba, t1a)
tmpab -= lib.einsum('menf,mnBf->eB', eris_ovov, r2aaba)
tmpab += lib.einsum('meNF,mNFB->eB', eris_ovOV, r2abbb)
tmpba -= lib.einsum('MENF,MNbF->Eb', eris_OVOV, r2bbab)
tmpba += lib.einsum('nfME,Mnfb->Eb', eris_ovOV, r2baaa)
Hr2baaa -= lib.einsum('Eb,jIaE->Ijab', tmpba*.5, t2ab) * 2
Hr2bbab -= lib.einsum('Eb,IJAE->IJbA', tmpba*.5, t2bb)
Hr2aaba -= lib.einsum('eB,ijae->ijBa', tmpab*.5, t2aa)
Hr2abbb -= lib.einsum('eB,iJeA->iJAB', tmpab*.5, t2ab) * 2
eris_ovov = eris_OVOV = eris_ovOV = None
#:Hr2baaa += .5*lib.einsum('Ijef,aebf->Ijab', tau2baaa, eris.vvvv)
#:Hr2abbb += .5*lib.einsum('iJEF,AEBF->iJAB', tau2abbb, eris.VVVV)
#:Hr2bbab += .5*lib.einsum('IJeF,aeBF->IJaB', tau2bbab, eris.vvVV)
#:Hr2aaba += .5*lib.einsum('ijEf,bfAE->ijAb', tau2aaba, eris.vvVV)
fakeri = uccsd._ChemistsERIs()
fakeri.mol = eris.mol
if eom._cc.direct:
orbva = eris.mo_coeff[0][:,nocca:]
orbvb = eris.mo_coeff[1][:,noccb:]
tau2baaa = lib.einsum('ijab,pa,qb->ijpq', tau2baaa, .5*orbva, orbva)
tmp = eris._contract_vvvv_t2(eom._cc, tau2baaa, True)
Hr2baaa += lib.einsum('ijpq,pa,qb->ijab', tmp, orbva.conj(), orbva.conj())
tmp = None
tau2abbb = lib.einsum('ijab,pa,qb->ijpq', tau2abbb, .5*orbvb, orbvb)
tmp = eris._contract_VVVV_t2(eom._cc, tau2abbb, True)
Hr2abbb += lib.einsum('ijpq,pa,qb->ijab', tmp, orbvb.conj(), orbvb.conj())
tmp = None
else:
tau2baaa *= .5
Hr2baaa += eris._contract_vvvv_t2(eom._cc, tau2baaa, False)
tau2abbb *= .5
Hr2abbb += eris._contract_VVVV_t2(eom._cc, tau2abbb, False)
tau2bbab *= .5
Hr2bbab += eom._cc._add_vvVV(None, tau2bbab, eris)
tau2aaba = tau2aaba.transpose(0,1,3,2)*.5
Hr2aaba += eom._cc._add_vvVV(None, tau2aaba, eris).transpose(0,1,3,2)
Hr2baaa = Hr2baaa - Hr2baaa.transpose(0,1,3,2)
Hr2bbab = Hr2bbab - Hr2bbab.transpose(1,0,2,3)
Hr2abbb = Hr2abbb - Hr2abbb.transpose(0,1,3,2)
Hr2aaba = Hr2aaba - Hr2aaba.transpose(1,0,2,3)
vector = amplitudes_to_vector_eomsf((Hr1ab, Hr1ba), (Hr2baaa,Hr2aaba,Hr2abbb,Hr2bbab))
return vector
def eeccsd_diag(eom, imds=None):
if imds is None: imds = eom.make_imds()
eris = imds.eris
t1, t2 = imds.t1, imds.t2
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
tauaa, tauab, taubb = uccsd.make_tau(t2, t1, t1)
nocca, noccb, nvira, nvirb = t2ab.shape
Foa = imds.Fooa.diagonal()
Fob = imds.Foob.diagonal()
Fva = imds.Fvva.diagonal()
Fvb = imds.Fvvb.diagonal()
Wovaa = np.einsum('iaai->ia', imds.wovvo)
Wovbb = np.einsum('iaai->ia', imds.wOVVO)
Wovab = np.einsum('iaai->ia', imds.woVVo)
Wovba = np.einsum('iaai->ia', imds.wOvvO)
Hr1aa = lib.direct_sum('-i+a->ia', Foa, Fva)
Hr1bb = lib.direct_sum('-i+a->ia', Fob, Fvb)
Hr1ab = lib.direct_sum('-i+a->ia', Foa, Fvb)
Hr1ba = lib.direct_sum('-i+a->ia', Fob, Fva)
Hr1aa += Wovaa
Hr1bb += Wovbb
Hr1ab += Wovab
Hr1ba += Wovba
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
ovov = eris_ovov - eris_ovov.transpose(0,3,2,1)
OVOV = eris_OVOV - eris_OVOV.transpose(0,3,2,1)
Wvvaa = .5*np.einsum('mnab,manb->ab', tauaa, eris_ovov)
Wvvbb = .5*np.einsum('mnab,manb->ab', taubb, eris_OVOV)
Wvvab = np.einsum('mNaB,maNB->aB', tauab, eris_ovOV)
ijb = np.einsum('iejb,ijbe->ijb', ovov, t2aa)
IJB = np.einsum('iejb,ijbe->ijb', OVOV, t2bb)
iJB =-np.einsum('ieJB,iJeB->iJB', eris_ovOV, t2ab)
Ijb =-np.einsum('jbIE,jIbE->Ijb', eris_ovOV, t2ab)
iJb =-np.einsum('ibJE,iJbE->iJb', eris_ovOV, t2ab)
jab = np.einsum('kajb,jkab->jab', ovov, t2aa)
JAB = np.einsum('kajb,jkab->jab', OVOV, t2bb)
jAb =-np.einsum('jbKA,jKbA->jAb', eris_ovOV, t2ab)
JaB =-np.einsum('kaJB,kJaB->JaB', eris_ovOV, t2ab)
jaB =-np.einsum('jaKB,jKaB->jaB', eris_ovOV, t2ab)
eris_ovov = eris_ovOV = eris_OVOV = ovov = OVOV = None
Hr2aa = lib.direct_sum('ijb+a->ijba', ijb, Fva)
Hr2bb = lib.direct_sum('ijb+a->ijba', IJB, Fvb)
Hr2ab = lib.direct_sum('iJb+A->iJbA', iJb, Fvb)
Hr2ab+= lib.direct_sum('iJB+a->iJaB', iJB, Fva)
Hr2aa+= lib.direct_sum('-i+jab->ijab', Foa, jab)
Hr2bb+= lib.direct_sum('-i+jab->ijab', Fob, JAB)
Hr2ab+= lib.direct_sum('-i+JaB->iJaB', Foa, JaB)
Hr2ab+= lib.direct_sum('-I+jaB->jIaB', Fob, jaB)
Hr2aa = Hr2aa + Hr2aa.transpose(0,1,3,2)
Hr2aa = Hr2aa + Hr2aa.transpose(1,0,2,3)
Hr2bb = Hr2bb + Hr2bb.transpose(0,1,3,2)
Hr2bb = Hr2bb + Hr2bb.transpose(1,0,2,3)
Hr2aa *= .5
Hr2bb *= .5
Hr2baaa = lib.direct_sum('Ijb+a->Ijba', Ijb, Fva)
Hr2aaba = lib.direct_sum('ijb+A->ijAb', ijb, Fvb)
Hr2aaba+= Fva.reshape(1,1,1,-1)
Hr2abbb = lib.direct_sum('iJB+A->iJBA', iJB, Fvb)
Hr2bbab = lib.direct_sum('IJB+a->IJaB', IJB, Fva)
Hr2bbab+= Fvb.reshape(1,1,1,-1)
Hr2baaa = Hr2baaa + Hr2baaa.transpose(0,1,3,2)
Hr2abbb = Hr2abbb + Hr2abbb.transpose(0,1,3,2)
Hr2baaa+= lib.direct_sum('-I+jab->Ijab', Fob, jab)
Hr2baaa-= Foa.reshape(1,-1,1,1)
tmpaaba = lib.direct_sum('-i+jAb->ijAb', Foa, jAb)
Hr2abbb+= lib.direct_sum('-i+JAB->iJAB', Foa, JAB)
Hr2abbb-= Fob.reshape(1,-1,1,1)
tmpbbab = lib.direct_sum('-I+JaB->IJaB', Fob, JaB)
Hr2aaba+= tmpaaba + tmpaaba.transpose(1,0,2,3)
Hr2bbab+= tmpbbab + tmpbbab.transpose(1,0,2,3)
tmpaaba = tmpbbab = None
Hr2aa += Wovaa.reshape(1,nocca,1,nvira)
Hr2aa += Wovaa.reshape(nocca,1,1,nvira)
Hr2aa += Wovaa.reshape(nocca,1,nvira,1)
Hr2aa += Wovaa.reshape(1,nocca,nvira,1)
Hr2ab += Wovbb.reshape(1,noccb,1,nvirb)
Hr2ab += Wovab.reshape(nocca,1,1,nvirb)
Hr2ab += Wovaa.reshape(nocca,1,nvira,1)
Hr2ab += Wovba.reshape(1,noccb,nvira,1)
Hr2bb += Wovbb.reshape(1,noccb,1,nvirb)
Hr2bb += Wovbb.reshape(noccb,1,1,nvirb)
Hr2bb += Wovbb.reshape(noccb,1,nvirb,1)
Hr2bb += Wovbb.reshape(1,noccb,nvirb,1)
Hr2baaa += Wovaa.reshape(1,nocca,1,nvira)
Hr2baaa += Wovba.reshape(noccb,1,1,nvira)
Hr2baaa += Wovba.reshape(noccb,1,nvira,1)
Hr2baaa += Wovaa.reshape(1,nocca,nvira,1)
Hr2aaba += Wovaa.reshape(1,nocca,1,nvira)
Hr2aaba += Wovaa.reshape(nocca,1,1,nvira)
Hr2aaba += Wovab.reshape(nocca,1,nvirb,1)
Hr2aaba += Wovab.reshape(1,nocca,nvirb,1)
Hr2abbb += Wovbb.reshape(1,noccb,1,nvirb)
Hr2abbb += Wovab.reshape(nocca,1,1,nvirb)
Hr2abbb += Wovab.reshape(nocca,1,nvirb,1)
Hr2abbb += Wovbb.reshape(1,noccb,nvirb,1)
Hr2bbab += Wovbb.reshape(1,noccb,1,nvirb)
Hr2bbab += Wovbb.reshape(noccb,1,1,nvirb)
Hr2bbab += Wovba.reshape(noccb,1,nvira,1)
Hr2bbab += Wovba.reshape(1,noccb,nvira,1)
Wooaa = np.einsum('iijj->ij', imds.woooo).copy()
Wooaa -= np.einsum('ijji->ij', imds.woooo)
Woobb = np.einsum('iijj->ij', imds.wOOOO).copy()
Woobb -= np.einsum('ijji->ij', imds.wOOOO)
Wooab = np.einsum('iijj->ij', imds.wooOO)
Wooba = Wooab.T
Wooaa *= .5
Woobb *= .5
Hr2aa += Wooaa.reshape(nocca,nocca,1,1)
Hr2ab += Wooab.reshape(nocca,noccb,1,1)
Hr2bb += Woobb.reshape(noccb,noccb,1,1)
Hr2baaa += Wooba.reshape(noccb,nocca,1,1)
Hr2aaba += Wooaa.reshape(nocca,nocca,1,1)
Hr2abbb += Wooab.reshape(nocca,noccb,1,1)
Hr2bbab += Woobb.reshape(noccb,noccb,1,1)
#:eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvira,nvira)
#:Wvvaa += np.einsum('mb,maab->ab', t1a, eris_ovvv)
#:Wvvaa -= np.einsum('mb,mbaa->ab', t1a, eris_ovvv)
mem_now = lib.current_memory()[0]
max_memory = max(0, eom.max_memory - mem_now)
blksize = min(nocca, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvira**3*3))))
for p0,p1 in lib.prange(0, nocca, blksize):
ovvv = eris.get_ovvv(slice(p0,p1)) # ovvv = eris.ovvv[p0:p1]
Wvvaa += np.einsum('mb,maab->ab', t1a[p0:p1], ovvv)
Wvvaa -= np.einsum('mb,mbaa->ab', t1a[p0:p1], ovvv)
ovvv = None
#:eris_OVVV = lib.unpack_tril(np.asarray(eris.OVVV).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvirb,nvirb)
#:Wvvbb += np.einsum('mb,maab->ab', t1b, eris_OVVV)
#:Wvvbb -= np.einsum('mb,mbaa->ab', t1b, eris_OVVV)
blksize = min(noccb, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvirb**3*3))))
for p0, p1 in lib.prange(0, noccb, blksize):
OVVV = eris.get_OVVV(slice(p0,p1)) # OVVV = eris.OVVV[p0:p1]
Wvvbb += np.einsum('mb,maab->ab', t1b[p0:p1], OVVV)
Wvvbb -= np.einsum('mb,mbaa->ab', t1b[p0:p1], OVVV)
OVVV = None
#:eris_ovVV = lib.unpack_tril(np.asarray(eris.ovVV).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvirb,nvirb)
#:Wvvab -= np.einsum('mb,mbaa->ba', t1a, eris_ovVV)
blksize = min(nocca, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvira*nvirb**2*3))))
for p0,p1 in lib.prange(0, nocca, blksize):
ovVV = eris.get_ovVV(slice(p0,p1)) # ovVV = eris.ovVV[p0:p1]
Wvvab -= np.einsum('mb,mbaa->ba', t1a[p0:p1], ovVV)
ovVV = None
blksize = min(noccb, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvirb*nvira**2*3))))
#:eris_OVvv = lib.unpack_tril(np.asarray(eris.OVvv).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvira,nvira)
#:Wvvab -= np.einsum('mb,mbaa->ab', t1b, eris_OVvv)
#idxa = np.arange(nvira)
#idxa = idxa*(idxa+1)//2+idxa
#for p0, p1 in lib.prange(0, noccb, blksize):
# OVvv = np.asarray(eris.OVvv[p0:p1])
# Wvvab -= np.einsum('mb,mba->ab', t1b[p0:p1], OVvv[:,:,idxa])
# OVvv = None
for p0, p1 in lib.prange(0, noccb, blksize):
OVvv = eris.get_OVvv(slice(p0,p1)) # OVvv = eris.OVvv[p0:p1]
Wvvab -= np.einsum('mb,mbaa->ab', t1b[p0:p1], OVvv)
OVvv = None
Wvvaa = Wvvaa + Wvvaa.T
Wvvbb = Wvvbb + Wvvbb.T
#:eris_vvvv = ao2mo.restore(1, np.asarray(eris.vvvv), nvirb)
#:eris_VVVV = ao2mo.restore(1, np.asarray(eris.VVVV), nvirb)
#:eris_vvVV = _restore(np.asarray(eris.vvVV), nvira, nvirb)
#:Wvvaa += np.einsum('aabb->ab', eris_vvvv) - np.einsum('abba->ab', eris_vvvv)
#:Wvvbb += np.einsum('aabb->ab', eris_VVVV) - np.einsum('abba->ab', eris_VVVV)
#:Wvvab += np.einsum('aabb->ab', eris_vvVV)
if eris.vvvv is not None:
for i in range(nvira):
i0 = i*(i+1)//2
vvv = lib.unpack_tril(np.asarray(eris.vvvv[i0:i0+i+1]))
tmp = np.einsum('bb->b', vvv[i])
Wvvaa[i] += tmp
tmp = np.einsum('bb->b', vvv[:,:i+1,i])
Wvvaa[i,:i+1] -= tmp
Wvvaa[:i ,i] -= tmp[:i]
vvv = lib.unpack_tril(np.asarray(eris.vvVV[i0:i0+i+1]))
Wvvab[i] += np.einsum('bb->b', vvv[i])
vvv = None
for i in range(nvirb):
i0 = i*(i+1)//2
vvv = lib.unpack_tril(np.asarray(eris.VVVV[i0:i0+i+1]))
tmp = np.einsum('bb->b', vvv[i])
Wvvbb[i] += tmp
tmp = np.einsum('bb->b', vvv[:,:i+1,i])
Wvvbb[i,:i+1] -= tmp
Wvvbb[:i ,i] -= tmp[:i]
vvv = None
Wvvba = Wvvab.T
Hr2aa += Wvvaa.reshape(1,1,nvira,nvira)
Hr2ab += Wvvab.reshape(1,1,nvira,nvirb)
Hr2bb += Wvvbb.reshape(1,1,nvirb,nvirb)
Hr2baaa += Wvvaa.reshape(1,1,nvira,nvira)
Hr2aaba += Wvvba.reshape(1,1,nvirb,nvira)
Hr2abbb += Wvvbb.reshape(1,1,nvirb,nvirb)
Hr2bbab += Wvvab.reshape(1,1,nvira,nvirb)
vec_ee = amplitudes_to_vector_ee((Hr1aa,Hr1bb), (Hr2aa,Hr2ab,Hr2bb))
vec_sf = amplitudes_to_vector_eomsf((Hr1ab,Hr1ba), (Hr2baaa,Hr2aaba,Hr2abbb,Hr2bbab))
return vec_ee, vec_sf
class EOMEE(eom_rccsd.EOMEE):
def __init__(self, cc):
eom_rccsd.EOMEE.__init__(self, cc)
self.nocc = cc.get_nocc()
self.nmo = cc.get_nmo()
kernel = eeccsd
eeccsd = eeccsd
get_diag = eeccsd_diag
def vector_size(self):
'''size of the vector based on spin-orbital basis'''
nocc = np.sum(self.nocc)
nvir = np.sum(self.nmo) - nocc
return nocc*nvir + nocc*(nocc-1)//2*nvir*(nvir-1)//2
def make_imds(self, eris=None):
imds = _IMDS(self._cc, eris=eris)
imds.make_ee()
return imds
class EOMEESpinKeep(EOMEE):
kernel = eomee_ccsd
eomee_ccsd = eomee_ccsd
matvec = eomee_ccsd_matvec
get_diag = eeccsd_diag
def get_init_guess(self, nroots=1, koopmans=True, diag=None):
if koopmans:
nocca, noccb = self.nocc
nmoa, nmob = self.nmo
nvira, nvirb = nmoa-nocca, nmob-noccb
# amplitudes are compressed by the function amplitudes_to_vector_ee. sizea is
# the offset in the compressed vector that points to the amplitudes R1_beta
# The addresses of R1_alpha and R1_beta are not contiguous in the compressed
# vector.
sizea = nocca * nvira + nocca*(nocca-1)//2*nvira*(nvira-1)//2
diag = np.append(diag[:nocca*nvira], diag[sizea:sizea+noccb*nvirb])
addr = np.append(np.arange(nocca*nvira),
np.arange(sizea,sizea+noccb*nvirb))
idx = addr[diag.argsort()]
else:
idx = diag.argsort()
size = self.vector_size()
dtype = getattr(diag, 'dtype', np.double)
nroots = min(nroots, size)
guess = []
for i in idx[:nroots]:
g = np.zeros(size, dtype)
g[i] = 1.0
guess.append(g)
return guess
def gen_matvec(self, imds=None, diag=None, **kwargs):
if imds is None: imds = self.make_imds()
if diag is None: diag = self.get_diag(imds)[0]
matvec = lambda xs: [self.matvec(x, imds) for x in xs]
return matvec, diag
def vector_to_amplitudes(self, vector, nmo=None, nocc=None):
if nmo is None: nmo = self.nmo
if nocc is None: nocc = self.nocc
return vector_to_amplitudes_ee(vector, nmo, nocc)
def amplitudes_to_vector(self, r1, r2):
return amplitudes_to_vector_ee(r1, r2)
def vector_size(self):
'''size of the vector based on spin-orbital basis'''
nocca, noccb = self.nocc
nmoa, nmob = self.nmo
nvira, nvirb = nmoa-nocca, nmob-noccb
sizea = nocca * nvira + nocca*(nocca-1)//2*nvira*(nvira-1)//2
sizeb = noccb * nvirb + noccb*(noccb-1)//2*nvirb*(nvirb-1)//2
sizeab = nocca * noccb * nvira * nvirb
return sizea+sizeb+sizeab
class EOMEESpinFlip(EOMEE):
kernel = eomsf_ccsd
eomsf_ccsd = eomsf_ccsd
matvec = eomsf_ccsd_matvec
def get_init_guess(self, nroots=1, koopmans=True, diag=None):
if koopmans:
nocca, noccb = self.nocc
nmoa, nmob = self.nmo
nvira, nvirb = nmoa-nocca, nmob-noccb
idx = diag[:nocca*nvirb+noccb*nvira].argsort()
else:
idx = diag.argsort()
size = self.vector_size()
dtype = getattr(diag, 'dtype', np.double)
nroots = min(nroots, size)
guess = []
for i in idx[:nroots]:
g = np.zeros(size, dtype)
g[i] = 1.0
guess.append(g)
return guess
def gen_matvec(self, imds=None, diag=None, **kwargs):
if imds is None: imds = self.make_imds()
if diag is None: diag = self.get_diag(imds)[1]
matvec = lambda xs: [self.matvec(x, imds) for x in xs]
return matvec, diag
def vector_to_amplitudes(self, vector, nmo=None, nocc=None):
if nmo is None: nmo = self.nmo
if nocc is None: nocc = self.nocc
return vector_to_amplitudes_eomsf(vector, nmo, nocc)
def amplitudes_to_vector(self, r1, r2):
return amplitudes_to_vector_eomsf(r1, r2)
def vector_size(self):
'''size of the vector based on spin-orbital basis'''
nocca, noccb = self.nocc
nmoa, nmob = self.nmo
nvira, nvirb = nmoa-nocca, nmob-noccb
nbaaa = noccb*nocca*nvira*(nvira-1)//2
naaba = nocca*(nocca-1)//2*nvirb*nvira
nabbb = nocca*noccb*nvirb*(nvirb-1)//2
nbbab = noccb*(noccb-1)//2*nvira*nvirb
return nocca*nvirb + noccb*nvira + nbaaa + naaba + nabbb + nbbab
uccsd.UCCSD.EOMIP = lib.class_as_method(EOMIP)
uccsd.UCCSD.EOMEA = lib.class_as_method(EOMEA)
uccsd.UCCSD.EOMEE = lib.class_as_method(EOMEE)
uccsd.UCCSD.EOMEESpinKeep = lib.class_as_method(EOMEESpinKeep)
uccsd.UCCSD.EOMEESpinFlip = lib.class_as_method(EOMEESpinFlip)
class _IMDS:
# Exactly the same as RCCSD IMDS except
# -- rintermediates --> uintermediates
# -- Loo, Lvv, cc_Fov --> Foo, Fvv, Fov
# -- One less 2-virtual intermediate
def __init__(self, cc, eris=None):
self.verbose = cc.verbose
self.stdout = cc.stdout
self.t1 = cc.t1
self.t2 = cc.t2
if eris is None:
eris = cc.ao2mo()
self.eris = eris
self._made_shared = False
self.made_ip_imds = False
self.made_ea_imds = False
self.made_ee_imds = False
def _make_shared(self):
cput0 = (logger.process_clock(), logger.perf_counter())
t1, t2, eris = self.t1, self.t2, self.eris
self.Foo, self.FOO = uintermediates.Foo(t1, t2, eris)
self.Fvv, self.FVV = uintermediates.Fvv(t1, t2, eris)
self.Fov, self.FOV = uintermediates.Fov(t1, t2, eris)
# 2 virtuals
self.Wovvo, self.WovVO, self.WOVvo, self.WOVVO, self.WoVVo, self.WOvvO = \
uintermediates.Wovvo(t1, t2, eris)
Wovov = np.asarray(eris.ovov)
WOVOV = np.asarray(eris.OVOV)
Wovov = Wovov - Wovov.transpose(0,3,2,1)
WOVOV = WOVOV - WOVOV.transpose(0,3,2,1)
self.Wovov = Wovov
self.WovOV = eris.ovOV
self.WOVov = None
self.WOVOV = WOVOV
self._made_shared = True
logger.timer_debug1(self, 'EOM-CCSD shared intermediates', *cput0)
return self
def make_ip(self):
if not self._made_shared:
self._make_shared()
cput0 = (logger.process_clock(), logger.perf_counter())
t1, t2, eris = self.t1, self.t2, self.eris
# 0 or 1 virtuals
self.Woooo, self.WooOO, _ , self.WOOOO = uintermediates.Woooo(t1, t2, eris)
self.Wooov, self.WooOV, self.WOOov, self.WOOOV = uintermediates.Wooov(t1, t2, eris)
self.Woovo, self.WooVO, self.WOOvo, self.WOOVO = uintermediates.Woovo(t1, t2, eris)
self.made_ip_imds = True
logger.timer_debug1(self, 'EOM-UCCSD IP intermediates', *cput0)
return self
def make_ea(self):
if not self._made_shared:
self._make_shared()
cput0 = (logger.process_clock(), logger.perf_counter())
t1, t2, eris = self.t1, self.t2, self.eris
# 3 or 4 virtuals
self.Wvvov, self.WvvOV, self.WVVov, self.WVVOV = uintermediates.Wvvov(t1, t2, eris)
self.Wvvvv = None # too expensive to hold Wvvvv
self.Wvvvo, self.WvvVO, self.WVVvo, self.WVVVO = uintermediates.Wvvvo(t1, t2, eris)
# The contribution of Wvvvv
t1a, t1b = t1
# The contraction to eris.vvvv is included in eaccsd_matvec
#:vvvv = eris.vvvv - eris.vvvv.transpose(0,3,2,1)
#:VVVV = eris.VVVV - eris.VVVV.transpose(0,3,2,1)
#:self.Wvvvo += lib.einsum('abef,if->abei', vvvv, t1a)
#:self.WvvVO += lib.einsum('abef,if->abei', eris_vvVV, t1b)
#:self.WVVvo += lib.einsum('efab,if->abei', eris_vvVV, t1a)
#:self.WVVVO += lib.einsum('abef,if->abei', VVVV, t1b)
tauaa, tauab, taubb = uccsd.make_tau(t2, t1, t1)
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
ovov = eris_ovov - eris_ovov.transpose(0,3,2,1)
OVOV = eris_OVOV - eris_OVOV.transpose(0,3,2,1)
tmp = lib.einsum('menf,if->meni', ovov, t1a) * .5
self.Wvvvo += lib.einsum('meni,mnab->aebi', tmp, tauaa)
tmp = tauaa = None
tmp = lib.einsum('menf,if->meni', OVOV, t1b) * .5
self.WVVVO += lib.einsum('meni,mnab->aebi', tmp, taubb)
tmp = taubb = None
tmp = lib.einsum('menf,if->meni', eris_ovOV, t1b)
self.WvvVO += lib.einsum('meni,mnab->aebi', tmp, tauab)
tmp = lib.einsum('nfme,if->meni', eris_ovOV, t1a)
self.WVVvo += lib.einsum('meni,nmba->aebi', tmp, tauab)
tauab = None
ovov = OVOV = eris_ovov = eris_OVOV = eris_ovOV = None
eris_ovvv = eris.get_ovvv(slice(None))
ovvv = eris_ovvv - eris_ovvv.transpose(0,3,2,1)
tmp = lib.einsum('mebf,if->mebi', ovvv, t1a)
tmp = lib.einsum('mebi,ma->aebi', tmp, t1a)
self.Wvvvo -= tmp - tmp.transpose(2,1,0,3)
tmp = eris_ovvv = ovvv = None
eris_OVVV = eris.get_OVVV(slice(None))
OVVV = eris_OVVV - eris_OVVV.transpose(0,3,2,1)
tmp = lib.einsum('mebf,if->mebi', OVVV, t1b)
tmp = lib.einsum('mebi,ma->aebi', tmp, t1b)
self.WVVVO -= tmp - tmp.transpose(2,1,0,3)
tmp = eris_OVVV = OVVV = None
eris_ovVV = eris.get_ovVV(slice(None))
eris_OVvv = eris.get_OVvv(slice(None))
tmpaabb = lib.einsum('mebf,if->mebi', eris_ovVV, t1b)
tmpbaab = lib.einsum('mebf,ie->mfbi', eris_OVvv, t1b)
tmp = lib.einsum('mebi,ma->aebi', tmpaabb, t1a)
tmp += lib.einsum('mfbi,ma->bfai', tmpbaab, t1b)
self.WvvVO -= tmp
tmp = tmpaabb = tmpbaab = None
tmpbbaa = lib.einsum('mebf,if->mebi', eris_OVvv, t1a)
tmpabba = lib.einsum('mebf,ie->mfbi', eris_ovVV, t1a)
tmp = lib.einsum('mebi,ma->aebi', tmpbbaa, t1b)
tmp += lib.einsum('mfbi,ma->bfai', tmpabba, t1a)
self.WVVvo -= tmp
tmp = tmpbbaa = tmpabba = None
eris_ovVV = eris_OVvv = None
# The contribution of Wvvvv end
self.made_ea_imds = True
logger.timer_debug1(self, 'EOM-UCCSD EA intermediates', *cput0)
return self
def make_ee(self):
cput0 = (logger.process_clock(), logger.perf_counter())
log = logger.Logger(self.stdout, self.verbose)
t1, t2, eris = self.t1, self.t2, self.eris
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb, nvira, nvirb = t2ab.shape
dtype = np.result_type(t1a, t1b, t2aa, t2ab, t2bb)
fooa = eris.focka[:nocca,:nocca]
foob = eris.fockb[:noccb,:noccb]
fova = eris.focka[:nocca,nocca:]
fovb = eris.fockb[:noccb,noccb:]
fvva = eris.focka[nocca:,nocca:]
fvvb = eris.fockb[noccb:,noccb:]
self.Fooa = np.zeros((nocca,nocca), dtype=dtype)
self.Foob = np.zeros((noccb,noccb), dtype=dtype)
self.Fvva = np.zeros((nvira,nvira), dtype=dtype)
self.Fvvb = np.zeros((nvirb,nvirb), dtype=dtype)
wovvo = np.zeros((nocca,nvira,nvira,nocca), dtype=dtype)
wOVVO = np.zeros((noccb,nvirb,nvirb,noccb), dtype=dtype)
woVvO = np.zeros((nocca,nvirb,nvira,noccb), dtype=dtype)
woVVo = np.zeros((nocca,nvirb,nvirb,nocca), dtype=dtype)
wOvVo = np.zeros((noccb,nvira,nvirb,nocca), dtype=dtype)
wOvvO = np.zeros((noccb,nvira,nvira,noccb), dtype=dtype)
wovoo = np.zeros((nocca,nvira,nocca,nocca), dtype=dtype)
wOVOO = np.zeros((noccb,nvirb,noccb,noccb), dtype=dtype)
woVoO = np.zeros((nocca,nvirb,nocca,noccb), dtype=dtype)
wOvOo = np.zeros((noccb,nvira,noccb,nocca), dtype=dtype)
tauaa, tauab, taubb = uccsd.make_tau(t2, t1, t1)
#:eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvira,nvira)
#:ovvv = eris_ovvv - eris_ovvv.transpose(0,3,2,1)
#:self.Fvva = np.einsum('mf,mfae->ae', t1a, ovvv)
#:self.wovvo = lib.einsum('jf,mebf->mbej', t1a, ovvv)
#:self.wovoo = 0.5 * lib.einsum('mebf,ijef->mbij', eris_ovvv, tauaa)
#:self.wovoo -= 0.5 * lib.einsum('mfbe,ijef->mbij', eris_ovvv, tauaa)
mem_now = lib.current_memory()[0]
max_memory = max(0, lib.param.MAX_MEMORY - mem_now)
blksize = min(nocca, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvira**3*3))))
for p0,p1 in lib.prange(0, nocca, blksize):
ovvv = eris.get_ovvv(slice(p0,p1)) # ovvv = eris.ovvv[p0:p1]
ovvv = ovvv - ovvv.transpose(0,3,2,1)
self.Fvva += np.einsum('mf,mfae->ae', t1a[p0:p1], ovvv)
wovvo[p0:p1] = lib.einsum('jf,mebf->mbej', t1a, ovvv)
wovoo[p0:p1] = 0.5 * lib.einsum('mebf,ijef->mbij', ovvv, tauaa)
ovvv = None
#:eris_OVVV = lib.unpack_tril(np.asarray(eris.OVVV).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvirb,nvirb)
#:OVVV = eris_OVVV - eris_OVVV.transpose(0,3,2,1)
#:self.Fvvb = np.einsum('mf,mfae->ae', t1b, OVVV)
#:self.wOVVO = lib.einsum('jf,mebf->mbej', t1b, OVVV)
#:self.wOVOO = 0.5 * lib.einsum('mebf,ijef->mbij', OVVV, taubb)
blksize = min(noccb, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvirb**3*3))))
for p0, p1 in lib.prange(0, noccb, blksize):
OVVV = eris.get_OVVV(slice(p0,p1)) # OVVV = eris.OVVV[p0:p1]
OVVV = OVVV - OVVV.transpose(0,3,2,1)
self.Fvvb += np.einsum('mf,mfae->ae', t1b[p0:p1], OVVV)
wOVVO[p0:p1] = lib.einsum('jf,mebf->mbej', t1b, OVVV)
wOVOO[p0:p1] = 0.5 * lib.einsum('mebf,ijef->mbij', OVVV, taubb)
OVVV = None
#:eris_ovVV = lib.unpack_tril(np.asarray(eris.ovVV).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvirb,nvirb)
#:self.Fvvb += np.einsum('mf,mfAE->AE', t1a, eris_ovVV)
#:self.woVvO = lib.einsum('JF,meBF->mBeJ', t1b, eris_ovVV)
#:self.woVVo = lib.einsum('jf,mfBE->mBEj',-t1a, eris_ovVV)
#:self.woVoO = 0.5 * lib.einsum('meBF,iJeF->mBiJ', eris_ovVV, tauab)
#:self.woVoO += 0.5 * lib.einsum('mfBE,iJfE->mBiJ', eris_ovVV, tauab)
blksize = min(nocca, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvira*nvirb**2*3))))
for p0,p1 in lib.prange(0, nocca, blksize):
ovVV = eris.get_ovVV(slice(p0,p1)) # ovVV = eris.ovVV[p0:p1]
self.Fvvb += np.einsum('mf,mfAE->AE', t1a[p0:p1], ovVV)
woVvO[p0:p1] = lib.einsum('JF,meBF->mBeJ', t1b, ovVV)
woVVo[p0:p1] = lib.einsum('jf,mfBE->mBEj',-t1a, ovVV)
woVoO[p0:p1] = 0.5 * lib.einsum('meBF,iJeF->mBiJ', ovVV, tauab)
woVoO[p0:p1]+= 0.5 * lib.einsum('mfBE,iJfE->mBiJ', ovVV, tauab)
ovVV = None
#:eris_OVvv = lib.unpack_tril(np.asarray(eris.OVvv).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvira,nvira)
#:self.Fvva += np.einsum('MF,MFae->ae', t1b, eris_OVvv)
#:self.wOvVo = lib.einsum('jf,MEbf->MbEj', t1a, eris_OVvv)
#:self.wOvvO = lib.einsum('JF,MFbe->MbeJ',-t1b, eris_OVvv)
#:self.wOvOo = 0.5 * lib.einsum('MEbf,jIfE->MbIj', eris_OVvv, tauab)
#:self.wOvOo += 0.5 * lib.einsum('MFbe,jIeF->MbIj', eris_OVvv, tauab)
blksize = min(noccb, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvirb*nvira**2*3))))
for p0, p1 in lib.prange(0, noccb, blksize):
OVvv = eris.get_OVvv(slice(p0,p1)) # OVvv = eris.OVvv[p0:p1]
self.Fvva += np.einsum('MF,MFae->ae', t1b[p0:p1], OVvv)
wOvVo[p0:p1] = lib.einsum('jf,MEbf->MbEj', t1a, OVvv)
wOvvO[p0:p1] = lib.einsum('JF,MFbe->MbeJ',-t1b, OVvv)
wOvOo[p0:p1] = 0.5 * lib.einsum('MEbf,jIfE->MbIj', OVvv, tauab)
wOvOo[p0:p1]+= 0.5 * lib.einsum('MFbe,jIeF->MbIj', OVvv, tauab)
OVvv = None
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
ovov = eris_ovov - eris_ovov.transpose(0,3,2,1)
OVOV = eris_OVOV - eris_OVOV.transpose(0,3,2,1)
self.Fova = np.einsum('nf,menf->me', t1a, ovov)
self.Fova+= np.einsum('NF,meNF->me', t1b, eris_ovOV)
self.Fova += fova
self.Fovb = np.einsum('nf,menf->me', t1b, OVOV)
self.Fovb+= np.einsum('nf,nfME->ME', t1a, eris_ovOV)
self.Fovb += fovb
tilaa, tilab, tilbb = uccsd.make_tau(t2,t1,t1,fac=0.5)
self.Fooa = lib.einsum('inef,menf->mi', tilaa, eris_ovov)
self.Fooa += lib.einsum('iNeF,meNF->mi', tilab, eris_ovOV)
self.Foob = lib.einsum('inef,menf->mi', tilbb, eris_OVOV)
self.Foob += lib.einsum('nIfE,nfME->MI', tilab, eris_ovOV)
self.Fvva -= lib.einsum('mnaf,menf->ae', tilaa, eris_ovov)
self.Fvva -= lib.einsum('mNaF,meNF->ae', tilab, eris_ovOV)
self.Fvvb -= lib.einsum('mnaf,menf->ae', tilbb, eris_OVOV)
self.Fvvb -= lib.einsum('nMfA,nfME->AE', tilab, eris_ovOV)
wovvo -= lib.einsum('jnfb,menf->mbej', t2aa, ovov)
wovvo += lib.einsum('jNbF,meNF->mbej', t2ab, eris_ovOV)
wOVVO -= lib.einsum('jnfb,menf->mbej', t2bb, OVOV)
wOVVO += lib.einsum('nJfB,nfME->MBEJ', t2ab, eris_ovOV)
woVvO += lib.einsum('nJfB,menf->mBeJ', t2ab, ovov)
woVvO -= lib.einsum('JNFB,meNF->mBeJ', t2bb, eris_ovOV)
wOvVo -= lib.einsum('jnfb,nfME->MbEj', t2aa, eris_ovOV)
wOvVo += lib.einsum('jNbF,MENF->MbEj', t2ab, OVOV)
woVVo += lib.einsum('jNfB,mfNE->mBEj', t2ab, eris_ovOV)
wOvvO += lib.einsum('nJbF,neMF->MbeJ', t2ab, eris_ovOV)
eris_ovoo = np.asarray(eris.ovoo)
eris_OVOO = np.asarray(eris.OVOO)
eris_OVoo = np.asarray(eris.OVoo)
eris_ovOO = np.asarray(eris.ovOO)
self.Fooa += np.einsum('ne,nemi->mi', t1a, eris_ovoo)
self.Fooa -= np.einsum('ne,meni->mi', t1a, eris_ovoo)
self.Fooa += np.einsum('NE,NEmi->mi', t1b, eris_OVoo)
self.Foob += np.einsum('ne,nemi->mi', t1b, eris_OVOO)
self.Foob -= np.einsum('ne,meni->mi', t1b, eris_OVOO)
self.Foob += np.einsum('ne,neMI->MI', t1a, eris_ovOO)
eris_ovoo = eris_ovoo + np.einsum('nfme,jf->menj', eris_ovov, t1a)
eris_OVOO = eris_OVOO + np.einsum('nfme,jf->menj', eris_OVOV, t1b)
eris_OVoo = eris_OVoo + np.einsum('nfme,jf->menj', eris_ovOV, t1a)
eris_ovOO = eris_ovOO + np.einsum('menf,jf->menj', eris_ovOV, t1b)
ovoo = eris_ovoo - eris_ovoo.transpose(2,1,0,3)
OVOO = eris_OVOO - eris_OVOO.transpose(2,1,0,3)
wovvo += lib.einsum('nb,nemj->mbej', t1a, ovoo)
wOVVO += lib.einsum('nb,nemj->mbej', t1b, OVOO)
woVvO -= lib.einsum('NB,meNJ->mBeJ', t1b, eris_ovOO)
wOvVo -= lib.einsum('nb,MEnj->MbEj', t1a, eris_OVoo)
woVVo += lib.einsum('NB,NEmj->mBEj', t1b, eris_OVoo)
wOvvO += lib.einsum('nb,neMJ->MbeJ', t1a, eris_ovOO)
self.Fooa += fooa + 0.5*lib.einsum('me,ie->mi', self.Fova+fova, t1a)
self.Foob += foob + 0.5*lib.einsum('me,ie->mi', self.Fovb+fovb, t1b)
self.Fvva += fvva - 0.5*lib.einsum('me,ma->ae', self.Fova+fova, t1a)
self.Fvvb += fvvb - 0.5*lib.einsum('me,ma->ae', self.Fovb+fovb, t1b)
# 0 or 1 virtuals
eris_ovoo = np.asarray(eris.ovoo)
eris_OVOO = np.asarray(eris.OVOO)
eris_OVoo = np.asarray(eris.OVoo)
eris_ovOO = np.asarray(eris.ovOO)
ovoo = eris_ovoo - eris_ovoo.transpose(2,1,0,3)
OVOO = eris_OVOO - eris_OVOO.transpose(2,1,0,3)
woooo = lib.einsum('je,nemi->minj', t1a, ovoo)
wOOOO = lib.einsum('je,nemi->minj', t1b, OVOO)
wooOO = lib.einsum('JE,NEmi->miNJ', t1b, eris_OVoo)
woOOo = lib.einsum('je,meNI->mINj',-t1a, eris_ovOO)
tmpaa = lib.einsum('nemi,jnbe->mbij', ovoo, t2aa)
tmpaa+= lib.einsum('NEmi,jNbE->mbij', eris_OVoo, t2ab)
tmpbb = lib.einsum('nemi,jnbe->mbij', OVOO, t2bb)
tmpbb+= lib.einsum('neMI,nJeB->MBIJ', eris_ovOO, t2ab)
woVoO += lib.einsum('nemi,nJeB->mBiJ', ovoo, t2ab)
woVoO += lib.einsum('NEmi,JNBE->mBiJ', eris_OVoo, t2bb)
woVoO -= lib.einsum('meNI,jNeB->mBjI', eris_ovOO, t2ab)
wOvOo += lib.einsum('NEMI,jNbE->MbIj', OVOO, t2ab)
wOvOo += lib.einsum('neMI,jnbe->MbIj', eris_ovOO, t2aa)
wOvOo -= lib.einsum('MEni,nJbE->MbJi', eris_OVoo, t2ab)
wovoo += tmpaa - tmpaa.transpose(0,1,3,2)
wOVOO += tmpbb - tmpbb.transpose(0,1,3,2)
self.wooov = np.array( ovoo.transpose(2,3,0,1), dtype=dtype)
self.wOOOV = np.array( OVOO.transpose(2,3,0,1), dtype=dtype)
self.wooOV = np.array(eris_OVoo.transpose(2,3,0,1), dtype=dtype)
self.wOOov = np.array(eris_ovOO.transpose(2,3,0,1), dtype=dtype)
#X self.wOooV =-np.array(eris_OVoo.transpose(0,3,2,1), dtype=dtype)
#X self.woOOv =-np.array(eris_ovOO.transpose(0,3,2,1), dtype=dtype)
eris_ovoo = eris_OVOO = eris_ovOO = eris_OVoo = None
woooo += np.asarray(eris.oooo)
wOOOO += np.asarray(eris.OOOO)
wooOO += np.asarray(eris.ooOO)
self.woooo = woooo - woooo.transpose(0,3,2,1)
self.wOOOO = wOOOO - wOOOO.transpose(0,3,2,1)
self.wooOO = wooOO - woOOo.transpose(0,3,2,1)
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
ovov = eris_ovov - eris_ovov.transpose(0,3,2,1)
OVOV = eris_OVOV - eris_OVOV.transpose(0,3,2,1)
tauaa, tauab, taubb = uccsd.make_tau(t2,t1,t1)
self.woooo += 0.5*lib.einsum('ijef,menf->minj', tauaa, ovov)
self.wOOOO += 0.5*lib.einsum('ijef,menf->minj', taubb, OVOV)
self.wooOO += lib.einsum('iJeF,meNF->miNJ', tauab, eris_ovOV)
self.wooov += lib.einsum('if,mfne->mine', t1a, ovov)
self.wOOOV += lib.einsum('if,mfne->mine', t1b, OVOV)
self.wooOV += lib.einsum('if,mfNE->miNE', t1a, eris_ovOV)
self.wOOov += lib.einsum('IF,neMF->MIne', t1b, eris_ovOV)
#X self.wOooV -= lib.einsum('if,nfME->MinE', t1a, eris_ovOV)
#X self.woOOv -= lib.einsum('IF,meNF->mINe', t1b, eris_ovOV)
tmp1aa = lib.einsum('njbf,menf->mbej', t2aa, ovov)
tmp1aa-= lib.einsum('jNbF,meNF->mbej', t2ab, eris_ovOV)
tmp1bb = lib.einsum('njbf,menf->mbej', t2bb, OVOV)
tmp1bb-= lib.einsum('nJfB,nfME->MBEJ', t2ab, eris_ovOV)
tmp1ab = lib.einsum('NJBF,meNF->mBeJ', t2bb, eris_ovOV)
tmp1ab-= lib.einsum('nJfB,menf->mBeJ', t2ab, ovov)
tmp1ba = lib.einsum('njbf,nfME->MbEj', t2aa, eris_ovOV)
tmp1ba-= lib.einsum('jNbF,MENF->MbEj', t2ab, OVOV)
tmp1abba =-lib.einsum('jNfB,mfNE->mBEj', t2ab, eris_ovOV)
tmp1baab =-lib.einsum('nJbF,neMF->MbeJ', t2ab, eris_ovOV)
tmpaa = lib.einsum('ie,mbej->mbij', t1a, tmp1aa)
tmpbb = lib.einsum('ie,mbej->mbij', t1b, tmp1bb)
tmpab = lib.einsum('ie,mBeJ->mBiJ', t1a, tmp1ab)
tmpab-= lib.einsum('IE,mBEj->mBjI', t1b, tmp1abba)
tmpba = lib.einsum('IE,MbEj->MbIj', t1b, tmp1ba)
tmpba-= lib.einsum('ie,MbeJ->MbJi', t1a, tmp1baab)
wovoo -= tmpaa - tmpaa.transpose(0,1,3,2)
wOVOO -= tmpbb - tmpbb.transpose(0,1,3,2)
woVoO -= tmpab
wOvOo -= tmpba
eris_ovov = eris_OVOV = eris_ovOV = None
eris_ovoo = np.asarray(eris.ovoo)
eris_OVOO = np.asarray(eris.OVOO)
eris_ovOO = np.asarray(eris.ovOO)
eris_OVoo = np.asarray(eris.OVoo)
wovoo += eris_ovoo.transpose(3,1,2,0) - eris_ovoo.transpose(2,1,0,3)
wOVOO += eris_OVOO.transpose(3,1,2,0) - eris_OVOO.transpose(2,1,0,3)
woVoO += eris_OVoo.transpose(3,1,2,0)
wOvOo += eris_ovOO.transpose(3,1,2,0)
eris_ovoo = eris_OVOO = eris_ovOO = eris_OVoo = None
eris_ovvo = np.asarray(eris.ovvo)
eris_OVVO = np.asarray(eris.OVVO)
eris_OVvo = np.asarray(eris.OVvo)
eris_ovVO = np.asarray(eris.ovVO)
eris_oovv = np.asarray(eris.oovv)
eris_OOVV = np.asarray(eris.OOVV)
eris_OOvv = np.asarray(eris.OOvv)
eris_ooVV = np.asarray(eris.ooVV)
wovvo += eris_ovvo.transpose(0,2,1,3)
wOVVO += eris_OVVO.transpose(0,2,1,3)
woVvO += eris_ovVO.transpose(0,2,1,3)
wOvVo += eris_OVvo.transpose(0,2,1,3)
wovvo -= eris_oovv.transpose(0,2,3,1)
wOVVO -= eris_OOVV.transpose(0,2,3,1)
woVVo -= eris_ooVV.transpose(0,2,3,1)
wOvvO -= eris_OOvv.transpose(0,2,3,1)
tmpaa = lib.einsum('ie,mebj->mbij', t1a, eris_ovvo)
tmpbb = lib.einsum('ie,mebj->mbij', t1b, eris_OVVO)
tmpaa-= lib.einsum('ie,mjbe->mbij', t1a, eris_oovv)
tmpbb-= lib.einsum('ie,mjbe->mbij', t1b, eris_OOVV)
woVoO += lib.einsum('ie,meBJ->mBiJ', t1a, eris_ovVO)
woVoO -= lib.einsum('IE,mjBE->mBjI',-t1b, eris_ooVV)
wOvOo += lib.einsum('IE,MEbj->MbIj', t1b, eris_OVvo)
wOvOo -= lib.einsum('ie,MJbe->MbJi',-t1a, eris_OOvv)
wovoo += tmpaa - tmpaa.transpose(0,1,3,2)
wOVOO += tmpbb - tmpbb.transpose(0,1,3,2)
wovoo -= lib.einsum('me,ijbe->mbij', self.Fova, t2aa)
wOVOO -= lib.einsum('me,ijbe->mbij', self.Fovb, t2bb)
woVoO += lib.einsum('me,iJeB->mBiJ', self.Fova, t2ab)
wOvOo += lib.einsum('ME,jIbE->MbIj', self.Fovb, t2ab)
wovoo -= lib.einsum('nb,minj->mbij', t1a, self.woooo)
wOVOO -= lib.einsum('nb,minj->mbij', t1b, self.wOOOO)
woVoO -= lib.einsum('NB,miNJ->mBiJ', t1b, self.wooOO)
wOvOo -= lib.einsum('nb,njMI->MbIj', t1a, self.wooOO)
eris_ovvo = eris_OVVO = eris_OVvo = eris_ovVO = None
eris_oovv = eris_OOVV = eris_OOvv = eris_ooVV = None
self.saved = lib.H5TmpFile()
self.saved['ovvo'] = wovvo
self.saved['OVVO'] = wOVVO
self.saved['oVvO'] = woVvO
self.saved['OvVo'] = wOvVo
self.saved['oVVo'] = woVVo
self.saved['OvvO'] = wOvvO
self.wovvo = self.saved['ovvo']
self.wOVVO = self.saved['OVVO']
self.woVvO = self.saved['oVvO']
self.wOvVo = self.saved['OvVo']
self.woVVo = self.saved['oVVo']
self.wOvvO = self.saved['OvvO']
self.saved['ovoo'] = wovoo
self.saved['OVOO'] = wOVOO
self.saved['oVoO'] = woVoO
self.saved['OvOo'] = wOvOo
self.wovoo = self.saved['ovoo']
self.wOVOO = self.saved['OVOO']
self.woVoO = self.saved['oVoO']
self.wOvOo = self.saved['OvOo']
self.wvovv = self.saved.create_dataset('vovv', (nvira,nocca,nvira,nvira), t1a.dtype.char)
self.wVOVV = self.saved.create_dataset('VOVV', (nvirb,noccb,nvirb,nvirb), t1a.dtype.char)
self.wvOvV = self.saved.create_dataset('vOvV', (nvira,noccb,nvira,nvirb), t1a.dtype.char)
self.wVoVv = self.saved.create_dataset('VoVv', (nvirb,nocca,nvirb,nvira), t1a.dtype.char)
# 3 or 4 virtuals
eris_ovoo = np.asarray(eris.ovoo)
eris_ovov = np.asarray(eris.ovov)
eris_ovOV = np.asarray(eris.ovOV)
ovov = eris_ovov - eris_ovov.transpose(0,3,2,1)
eris_oovv = np.asarray(eris.oovv)
eris_ovvo = np.asarray(eris.ovvo)
oovv = eris_oovv - eris_ovvo.transpose(0,3,2,1)
eris_oovv = eris_ovvo = None
#:wvovv = .5 * lib.einsum('meni,mnab->eiab', eris_ovoo, tauaa)
#:wvovv -= .5 * lib.einsum('me,miab->eiab', self.Fova, t2aa)
#:tmp1aa = lib.einsum('nibf,menf->mbei', t2aa, ovov)
#:tmp1aa-= lib.einsum('iNbF,meNF->mbei', t2ab, eris_ovOV)
#:wvovv+= lib.einsum('ma,mbei->eiab', t1a, tmp1aa)
#:wvovv+= lib.einsum('ma,mibe->eiab', t1a, oovv)
for p0, p1 in lib.prange(0, nvira, nocca):
wvovv = .5*lib.einsum('meni,mnab->eiab', eris_ovoo[:,p0:p1], tauaa)
wvovv -= .5*lib.einsum('me,miab->eiab', self.Fova[:,p0:p1], t2aa)
tmp1aa = lib.einsum('nibf,menf->mbei', t2aa, ovov[:,p0:p1])
tmp1aa-= lib.einsum('iNbF,meNF->mbei', t2ab, eris_ovOV[:,p0:p1])
wvovv += lib.einsum('ma,mbei->eiab', t1a, tmp1aa)
wvovv += lib.einsum('ma,mibe->eiab', t1a, oovv[:,:,:,p0:p1])
self.wvovv[p0:p1] = wvovv
tmp1aa = None
eris_ovov = eris_ovoo = eris_ovOV = None
#:eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvira,nvira)
#:ovvv = eris_ovvv - eris_ovvv.transpose(0,3,2,1)
#:wvovv += lib.einsum('mebf,miaf->eiab', ovvv, t2aa)
#:eris_OVvv = lib.unpack_tril(np.asarray(eris.OVvv).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvira,nvira)
#:wvovv += lib.einsum('MFbe,iMaF->eiab', eris_OVvv, t2ab)
#:wvovv += eris_ovvv.transpose(2,0,3,1).conj()
#:self.wvovv -= wvovv - wvovv.transpose(0,1,3,2)
mem_now = lib.current_memory()[0]
max_memory = max(0, lib.param.MAX_MEMORY - mem_now)
blksize = min(nocca, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvira**3*6))))
for i0,i1 in lib.prange(0, nocca, blksize):
wvovv = self.wvovv[:,i0:i1]
for p0,p1 in lib.prange(0, noccb, blksize):
OVvv = eris.get_OVvv(slice(p0,p1)) # OVvv = eris.OVvv[p0:p1]
wvovv -= lib.einsum('MFbe,iMaF->eiab', OVvv, t2ab[i0:i1,p0:p1])
OVvv = None
for p0,p1 in lib.prange(0, nocca, blksize):
ovvv = eris.get_ovvv(slice(p0,p1)) # ovvv = eris.ovvv[p0:p1]
if p0 == i0:
wvovv += ovvv.transpose(2,0,3,1).conj()
ovvv = ovvv - ovvv.transpose(0,3,2,1)
wvovv -= lib.einsum('mebf,miaf->eiab', ovvv, t2aa[p0:p1,i0:i1])
ovvv = None
wvovv = wvovv - wvovv.transpose(0,1,3,2)
self.wvovv[:,i0:i1] = wvovv
eris_OVOO = np.asarray(eris.OVOO)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
OVOV = eris_OVOV - eris_OVOV.transpose(0,3,2,1)
eris_OOVV = np.asarray(eris.OOVV)
eris_OVVO = np.asarray(eris.OVVO)
OOVV = eris_OOVV - eris_OVVO.transpose(0,3,2,1)
eris_OOVV = eris_OVVO = None
#:wVOVV = .5*lib.einsum('meni,mnab->eiab', eris_OVOO, taubb)
#:wVOVV -= .5*lib.einsum('me,miab->eiab', self.Fovb, t2bb)
#:tmp1bb = lib.einsum('nibf,menf->mbei', t2bb, OVOV)
#:tmp1bb-= lib.einsum('nIfB,nfME->MBEI', t2ab, eris_ovOV)
#:wVOVV += lib.einsum('ma,mbei->eiab', t1b, tmp1bb)
#:wVOVV += lib.einsum('ma,mibe->eiab', t1b, OOVV)
for p0, p1 in lib.prange(0, nvirb, noccb):
wVOVV = .5*lib.einsum('meni,mnab->eiab', eris_OVOO[:,p0:p1], taubb)
wVOVV -= .5*lib.einsum('me,miab->eiab', self.Fovb[:,p0:p1], t2bb)
tmp1bb = lib.einsum('nibf,menf->mbei', t2bb, OVOV[:,p0:p1])
tmp1bb-= lib.einsum('nIfB,nfME->MBEI', t2ab, eris_ovOV[:,:,:,p0:p1])
wVOVV += lib.einsum('ma,mbei->eiab', t1b, tmp1bb)
wVOVV += lib.einsum('ma,mibe->eiab', t1b, OOVV[:,:,:,p0:p1])
self.wVOVV[p0:p1] = wVOVV
tmp1bb = None
eris_OVOV = eris_OVOO = eris_ovOV = None
#:eris_OVVV = lib.unpack_tril(np.asarray(eris.OVVV).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvirb,nvirb)
#:OVVV = eris_OVVV - eris_OVVV.transpose(0,3,2,1)
#:wVOVV -= lib.einsum('MEBF,MIAF->EIAB', OVVV, t2bb)
#:eris_ovVV = lib.unpack_tril(np.asarray(eris.ovVV).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvirb,nvirb)
#:wVOVV -= lib.einsum('mfBE,mIfA->EIAB', eris_ovVV, t2ab)
#:wVOVV += eris_OVVV.transpose(2,0,3,1).conj()
#:self.wVOVV += wVOVV - wVOVV.transpose(0,1,3,2)
blksize = min(noccb, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvirb**3*6))))
for i0,i1 in lib.prange(0, noccb, blksize):
wVOVV = self.wVOVV[:,i0:i1]
for p0,p1 in lib.prange(0, nocca, blksize):
ovVV = eris.get_ovVV(slice(p0,p1)) # ovVV = eris.ovVV[p0:p1]
wVOVV -= lib.einsum('mfBE,mIfA->EIAB', ovVV, t2ab[p0:p1,i0:i1])
ovVV = None
for p0,p1 in lib.prange(0, noccb, blksize):
OVVV = eris.get_OVVV(slice(p0,p1)) # OVVV = eris.OVVV[p0:p1]
if p0 == i0:
wVOVV += OVVV.transpose(2,0,3,1).conj()
OVVV = OVVV - OVVV.transpose(0,3,2,1)
wVOVV -= lib.einsum('mebf,miaf->eiab', OVVV, t2bb[p0:p1,i0:i1])
OVVV = None
wVOVV = wVOVV - wVOVV.transpose(0,1,3,2)
self.wVOVV[:,i0:i1] = wVOVV
eris_ovOV = np.asarray(eris.ovOV)
eris_ovOO = np.asarray(eris.ovOO)
eris_OOvv = np.asarray(eris.OOvv)
eris_ovVO = np.asarray(eris.ovVO)
#:self.wvOvV = lib.einsum('meNI,mNaB->eIaB', eris_ovOO, tauab)
#:self.wvOvV -= lib.einsum('me,mIaB->eIaB', self.Fova, t2ab)
#:tmp1ab = lib.einsum('NIBF,meNF->mBeI', t2bb, eris_ovOV)
#:tmp1ab-= lib.einsum('nIfB,menf->mBeI', t2ab, ovov)
#:tmp1baab = lib.einsum('nIbF,neMF->MbeI', t2ab, eris_ovOV)
#:tmpab = lib.einsum('ma,mBeI->eIaB', t1a, tmp1ab)
#:tmpab+= lib.einsum('MA,MbeI->eIbA', t1b, tmp1baab)
#:tmpab-= lib.einsum('MA,MIbe->eIbA', t1b, eris_OOvv)
#:tmpab-= lib.einsum('ma,meBI->eIaB', t1a, eris_ovVO)
#:self.wvOvV += tmpab
for p0, p1 in lib.prange(0, nvira, nocca):
wvOvV = lib.einsum('meNI,mNaB->eIaB', eris_ovOO[:,p0:p1], tauab)
wvOvV -= lib.einsum('me,mIaB->eIaB', self.Fova[:,p0:p1], t2ab)
tmp1ab = lib.einsum('NIBF,meNF->mBeI', t2bb, eris_ovOV[:,p0:p1])
tmp1ab-= lib.einsum('nIfB,menf->mBeI', t2ab, ovov[:,p0:p1])
wvOvV+= lib.einsum('ma,mBeI->eIaB', t1a, tmp1ab)
tmp1ab = None
tmp1baab = lib.einsum('nIbF,neMF->MbeI', t2ab, eris_ovOV[:,p0:p1])
wvOvV+= lib.einsum('MA,MbeI->eIbA', t1b, tmp1baab)
tmp1baab = None
wvOvV-= lib.einsum('MA,MIbe->eIbA', t1b, eris_OOvv[:,:,:,p0:p1])
wvOvV-= lib.einsum('ma,meBI->eIaB', t1a, eris_ovVO[:,p0:p1])
self.wvOvV[p0:p1] = wvOvV
eris_ovOV = eris_ovOO = eris_OOvv = eris_ovVO = None
#:eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvira,nvira)
#:ovvv = eris_ovvv - eris_ovvv.transpose(0,3,2,1)
#:self.wvOvV -= lib.einsum('mebf,mIfA->eIbA', ovvv, t2ab)
#:eris_ovVV = lib.unpack_tril(np.asarray(eris.ovVV).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvirb,nvirb)
#:self.wvOvV -= lib.einsum('meBF,mIaF->eIaB', eris_ovVV, t2ab)
#:eris_OVvv = lib.unpack_tril(np.asarray(eris.OVvv).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvira,nvira)
#:self.wvOvV -= lib.einsum('MFbe,MIAF->eIbA', eris_OVvv, t2bb)
#:self.wvOvV += eris_OVvv.transpose(2,0,3,1).conj()
blksize = min(noccb, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvira**3*6))))
for i0,i1 in lib.prange(0, noccb, blksize):
wvOvV = self.wvOvV[:,i0:i1]
for p0,p1 in lib.prange(0, nocca, blksize):
ovVV = eris.get_ovVV(slice(p0,p1)) # ovVV = eris.ovVV[p0:p1]
wvOvV -= lib.einsum('meBF,mIaF->eIaB', ovVV, t2ab[p0:p1,i0:i1])
ovVV = None
for p0,p1 in lib.prange(0, nocca, blksize):
ovvv = eris.get_ovvv(slice(p0,p1)) # ovvv = eris.ovvv[p0:p1]
ovvv = ovvv - ovvv.transpose(0,3,2,1)
wvOvV -= lib.einsum('mebf,mIfA->eIbA',ovvv, t2ab[p0:p1,i0:i1])
ovvv = None
self.wvOvV[:,i0:i1] = wvOvV
blksize = min(noccb, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvirb*nvira**2*3))))
for i0,i1 in lib.prange(0, noccb, blksize):
wvOvV = self.wvOvV[:,i0:i1]
for p0,p1 in lib.prange(0, noccb, blksize):
OVvv = eris.get_OVvv(slice(p0,p1)) # OVvv = eris.OVvv[p0:p1]
if p0 == i0:
wvOvV += OVvv.transpose(2,0,3,1).conj()
wvOvV -= lib.einsum('MFbe,MIAF->eIbA', OVvv, t2bb[p0:p1,i0:i1])
OVvv = None
self.wvOvV[:,i0:i1] = wvOvV
eris_ovOV = np.asarray(eris.ovOV)
eris_OVoo = np.asarray(eris.OVoo)
eris_ooVV = np.asarray(eris.ooVV)
eris_OVvo = np.asarray(eris.OVvo)
#:self.wVoVv = lib.einsum('MEni,nMbA->EiAb', eris_OVoo, tauab)
#:self.wVoVv -= lib.einsum('ME,iMbA->EiAb', self.Fovb, t2ab)
#:tmp1ba = lib.einsum('nibf,nfME->MbEi', t2aa, eris_ovOV)
#:tmp1ba-= lib.einsum('iNbF,MENF->MbEi', t2ab, OVOV)
#:tmp1abba = lib.einsum('iNfB,mfNE->mBEi', t2ab, eris_ovOV)
#:tmpba = lib.einsum('MA,MbEi->EiAb', t1b, tmp1ba)
#:tmpba+= lib.einsum('ma,mBEi->EiBa', t1a, tmp1abba)
#:tmpba-= lib.einsum('ma,miBE->EiBa', t1a, eris_ooVV)
#:tmpba-= lib.einsum('MA,MEbi->EiAb', t1b, eris_OVvo)
#:self.wVoVv += tmpba
for p0, p1 in lib.prange(0, nvirb, noccb):
wVoVv = lib.einsum('MEni,nMbA->EiAb', eris_OVoo[:,p0:p1], tauab)
wVoVv -= lib.einsum('ME,iMbA->EiAb', self.Fovb[:,p0:p1], t2ab)
tmp1ba = lib.einsum('nibf,nfME->MbEi', t2aa, eris_ovOV[:,:,:,p0:p1])
tmp1ba-= lib.einsum('iNbF,MENF->MbEi', t2ab, OVOV[:,p0:p1])
wVoVv += lib.einsum('MA,MbEi->EiAb', t1b, tmp1ba)
tmp1ba = None
tmp1abba = lib.einsum('iNfB,mfNE->mBEi', t2ab, eris_ovOV[:,:,:,p0:p1])
wVoVv += lib.einsum('ma,mBEi->EiBa', t1a, tmp1abba)
tmp1abba = None
wVoVv -= lib.einsum('ma,miBE->EiBa', t1a, eris_ooVV[:,:,:,p0:p1])
wVoVv -= lib.einsum('MA,MEbi->EiAb', t1b, eris_OVvo[:,p0:p1])
self.wVoVv[p0:p1] = wVoVv
eris_ovOV = eris_OVoo = eris_ooVV = eris_OVvo = None
#:eris_OVVV = lib.unpack_tril(np.asarray(eris.OVVV).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvirb,nvirb)
#:OVVV = eris_OVVV - eris_OVVV.transpose(0,3,2,1)
#:self.wVoVv -= lib.einsum('MEBF,iMaF->EiBa', OVVV, t2ab)
#:eris_OVvv = lib.unpack_tril(np.asarray(eris.OVvv).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvira,nvira)
#:self.wVoVv -= lib.einsum('MEbf,iMfA->EiAb', eris_OVvv, t2ab)
#:eris_ovVV = lib.unpack_tril(np.asarray(eris.ovVV).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvirb,nvirb)
#:self.wVoVv -= lib.einsum('mfBE,miaf->EiBa', eris_ovVV, t2aa)
#:self.wVoVv += eris_ovVV.transpose(2,0,3,1).conj()
blksize = min(noccb, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvirb**3*6))))
for i0,i1 in lib.prange(0, nocca, blksize):
wVoVv = self.wVoVv[:,i0:i1]
for p0,p1 in lib.prange(0, noccb, blksize):
OVvv = eris.get_OVvv(slice(p0,p1)) # OVvv = eris.OVvv[p0:p1]
wVoVv -= lib.einsum('MEbf,iMfA->EiAb', OVvv, t2ab[i0:i1,p0:p1])
OVvv = None
for p0,p1 in lib.prange(0, noccb, blksize):
OVVV = eris.get_OVVV(slice(p0,p1)) # OVVV = eris.OVVV[p0:p1]
OVVV = OVVV - OVVV.transpose(0,3,2,1)
wVoVv -= lib.einsum('MEBF,iMaF->EiBa', OVVV, t2ab[i0:i1,p0:p1])
OVVV = None
self.wVoVv[:,i0:i1] = wVoVv
blksize = min(nocca, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvira*nvirb**2*3))))
for i0,i1 in lib.prange(0, nocca, blksize):
wVoVv = self.wVoVv[:,i0:i1]
for p0,p1 in lib.prange(0, nocca, blksize):
ovVV = eris.get_ovVV(slice(p0,p1)) # ovVV = eris.ovVV[p0:p1]
if p0 == i0:
wVoVv += ovVV.transpose(2,0,3,1).conj()
wVoVv -= lib.einsum('mfBE,miaf->EiBa', ovVV, t2aa[p0:p1,i0:i1])
ovVV = None
self.wVoVv[:,i0:i1] = wVoVv
self.made_ee_imds = True
log.timer('EOM-UCCSD EE intermediates', *cput0)
def rand_mf(mol, seed=1):
from pyscf import scf
from pyscf import gto
from pyscf import lo
mol = gto.Mole()
mol.atom = [
[8 , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]]
mol.basis = 'sto-3g'
mol.verbose = 0
mol.spin = 0
mol.build()
np.random.seed(seed)
mf = scf.UHF(mol).run(conv_tol=1e-14)
nmo = mol.nao_nr()
mf.mo_occ = np.zeros((2,nmo))
mf.mo_occ[0,:4] = 1
mf.mo_occ[1,:2] = 1
mf.mo_energy = np.arange(nmo) + np.random.random((2,nmo)) * .3
mf.mo_energy[mf.mo_occ == 0] += 2
mo = np.random.random((2,nmo,nmo))
s = mf.get_ovlp()
mf.mo_coeff = np.empty_like(mo)
mf.mo_coeff[0] = lo.orth.vec_lowdin(mo[0], s)
mf.mo_coeff[1] = lo.orth.vec_lowdin(mo[1], s)
return mf
def rand_cc_t1_t2(mf, seed=1):
from pyscf import ao2mo
from pyscf.cc import uccsd
mycc = uccsd.UCCSD(mf)
nocca, noccb = mycc.nocc
nmoa, nmob = mycc.nmo
nvira, nvirb = nmoa - nocca, nmob - noccb
def my_ao2mo(mo):
eris = ao2mo.kernel(mycc._scf._eri, mo, compact=False)
eris = ao2mo.restore(1, eris, mf.mol.nao_nr())
eris = eris + np.cos(eris)*1j
eris = eris + eris.transpose(1, 0, 3, 2)
eris = eris + eris.conj().transpose(2, 3, 0, 1)
return eris
eris = uccsd._make_eris_incore(mycc)#, ao2mofn=my_ao2mo)
np.random.seed(seed)
t1a = (np.random.random((nocca,nvira)) +
np.random.random((nocca,nvira))*1j - .5 - .5j)
t1b = (np.random.random((noccb,nvirb)) +
np.random.random((noccb,nvirb))*1j - .5 - .5j)
t2aa = (np.random.random((nocca,nocca,nvira,nvira)) +
np.random.random((nocca,nocca,nvira,nvira))*1j - .5 - .5j)
t2aa = t2aa - t2aa.transpose(1, 0, 2, 3)
t2aa = t2aa - t2aa.transpose(0, 1, 3, 2)
t2ab = (np.random.random((nocca,noccb,nvira,nvirb)) +
np.random.random((nocca,noccb,nvira,nvirb))*1j - .5 - .5j)
t2bb = (np.random.random((noccb,noccb,nvirb,nvirb)) +
np.random.random((noccb,noccb,nvirb,nvirb))*1j - .5 - .5j)
t2bb = t2bb - t2bb.transpose(1, 0, 2, 3)
t2bb = t2bb - t2bb.transpose(0, 1, 3, 2)
t1 = (t1a, t1b)
t2 = (t2aa, t2ab, t2bb)
return mycc, eris, t1, t2
def enforce_symm_2p_spin(r1, r2, orbspin, excitation):
assert(excitation in ['ip', 'ea'])
if excitation == 'ip':
nocc, nvir = r2.shape[1:]
elif excitation == 'ea':
nocc, nvir = r2.shape[:2]
else:
raise NotImplementedError
idxoa = np.where(orbspin[:nocc] == 0)[0]
idxob = np.where(orbspin[:nocc] == 1)[0]
idxva = np.where(orbspin[nocc:] == 0)[0]
idxvb = np.where(orbspin[nocc:] == 1)[0]
idxoaa = idxoa[:,None] * nocc + idxoa
idxobb = idxob[:,None] * nocc + idxob
idxvaa = idxva[:,None] * nvir + idxva
idxvbb = idxvb[:,None] * nvir + idxvb
if excitation == 'ip':
r2 = r2 - r2.transpose(1, 0, 2)
r2 = r2.reshape(nocc**2, nvir)
r2[idxobb.ravel()[:, None], idxva.ravel()] = 0.0
r2[idxoaa.ravel()[:, None], idxvb.ravel()] = 0.0
r2 = r2.reshape(nocc, nocc, nvir)
if excitation == 'ea':
r2 = r2 - r2.transpose(0, 2, 1)
r2 = r2.reshape(nocc, nvir**2)
r2[idxoa.ravel(), idxvbb.ravel()[:, None]] = 0.0
r2[idxob.ravel(), idxvaa.ravel()[:, None]] = 0.0
r2 = r2.reshape(nocc, nvir, nvir)
return r1, r2
def enforce_symm_2p_spin_ip(r1, r2, orbspin):
return enforce_symm_2p_spin(r1, r2, orbspin, 'ip')
def enforce_symm_2p_spin_ea(r1, r2, orbspin):
return enforce_symm_2p_spin(r1, r2, orbspin, 'ea')
if __name__ == '__main__':
from pyscf import gto
#from pyscf import scf
#from pyscf.cc import rccsd
mol = gto.Mole()
mol.atom = [
[8 , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]]
mol.basis = 'sto-3g'
mol.verbose = 0
mol.spin = 0
mol.build()
mf = rand_mf(mol)
mycc, eris, t1, t2 = rand_cc_t1_t2(mf)
mycc.t1 = t1
mycc.t2 = t2
nocca, noccb = mycc.nocc
nmoa, nmob = mycc.nmo
nvira, nvirb = nmoa - nocca, nmob - noccb
nocc = nocca + noccb
nvir = nvira + nvirb
nmo = nocc + nvir
def my_ao2mo(mo):
nao, nmo = mo.shape
orbspin = mo.orbspin
# eris = ao2mo.kernel(mygcc._scf._eri, mo_a + mo_b)
# sym_forbid = (orbspin[:,None] != orbspin)[np.tril_indices(nmo)]
# eris[sym_forbid,:] = 0
# eris[:,sym_forbid] = 0
# eris = ao2mo.restore(1, eris, nao)
# return eris
eris =(np.random.random((nmo,nmo,nmo,nmo)) +
np.random.random((nmo,nmo,nmo,nmo)) * 1j)
eris = eris + np.cos(eris)*1j
eris = eris + eris.transpose(1, 0, 3, 2)
eris = eris + eris.conj().transpose(2, 3, 0, 1)
eris[orbspin[:,None] != orbspin] = 0
eris[:,:,orbspin[:,None] != orbspin] = 0
return eris
import pyscf.cc.addons
from pyscf.cc import gccsd
mygcc = pyscf.cc.addons.convert_to_gccsd(mycc)
mygcc._ucc = mycc
mygcc._ucc_eris = eris
eris = gccsd._make_eris_incore(mygcc)#, ao2mofn=my_ao2mo)
orbspin = eris.orbspin
## EOM-IP
myeom = EOMIP(mycc)
imds = myeom.make_imds()
np.random.seed(1)
r1 = np.random.rand(nocc)*1j + np.random.rand(nocc) - 0.5 - 0.5*1j
r2 = np.random.rand(nocc**2 * nvir)*1j + np.random.rand(nocc**2 * nvir) - 0.5 - 0.5*1j
r2 = r2.reshape(nocc, nocc, nvir)
r1, r2 = enforce_symm_2p_spin_ip(r1, r2, orbspin)
r1, r2 = spin2spatial_ip(r1, r2, orbspin)
vector = myeom.amplitudes_to_vector(r1, r2)
r1x, r2x = myeom.vector_to_amplitudes(vector)
print(abs(r1[0]-r1x[0]).max() < 1e-13 and
abs(r1[1]-r1x[1]).max() < 1e-13 and
abs(r2[0]-r2x[0]).max() < 1e-13 and
abs(r2[1]-r2x[1]).max() < 1e-13 and
abs(r2[2]-r2x[2]).max() < 1e-13 and
abs(r2[3]-r2x[3]).max() < 1e-13)
Hvector = myeom.matvec(vector, imds=imds)
print('ip', lib.finger(Hvector) - (21.67127462317093-19.068987454261908j))
print('diag', lib.finger(myeom.get_diag()) - (-9.6676217223549763+9.325219825942975j))
# EOM-EA
myeom = EOMEA(mycc)
imds = myeom.make_imds()
np.random.seed(1)
r1 = np.random.rand(nvir)*1j + np.random.rand(nvir) - 0.5 - 0.5*1j
r2 = np.random.rand(nocc * nvir**2)*1j + np.random.rand(nocc * nvir**2) - 0.5 - 0.5*1j
r2 = r2.reshape(nocc, nvir, nvir)
r1, r2 = enforce_symm_2p_spin_ea(r1, r2, orbspin)
r1, r2 = spin2spatial_ea(r1, r2, orbspin)
vector = myeom.amplitudes_to_vector(r1, r2)
r1x, r2x = myeom.vector_to_amplitudes(vector)
print(abs(r1[0]-r1x[0]).max() < 1e-13 and
abs(r1[1]-r1x[1]).max() < 1e-13 and
abs(r2[0]-r2x[0]).max() < 1e-13 and
abs(r2[1]-r2x[1]).max() < 1e-13 and
abs(r2[2]-r2x[2]).max() < 1e-13 and
abs(r2[3]-r2x[3]).max() < 1e-13)
Hvector = myeom.matvec(vector, imds=imds)
print('ea', lib.finger(Hvector) - (6.5543877287461187-13.175055314063574j))
print('diag', lib.finger(myeom.get_diag()) - (-57.353207240857785+1.4052857730841204j))
mycc = uccsd.UCCSD(mol.UHF().run())
ecc, t1, t2 = mycc.kernel()
print(ecc - -0.04946750711013597)
e,v = mycc.ipccsd(nroots=6)
print(e[0] - 0.3092874511803249)
print(e[1] - 0.3092874511803249)
print(e[2] - 0.4011171373779585)
print(e[3] - 0.4011171373779585)
print(e[4] - 0.6107409208314764)
print(e[5] - 0.6107409208314764)
|
sunqm/pyscf
|
pyscf/cc/eom_uccsd.py
|
Python
|
apache-2.0
| 124,148
|
[
"PySCF"
] |
f76fd934800ee28b38e22d1295b79245097dbdc55d57010e15f8d51bb402baf4
|
#
# Copyright (C) 2010-2011, 2011 Canonical Ltd. All Rights Reserved
#
# This file is part of txzookeeper.
#
# Authors:
# Kapil Thangavelu
#
# txzookeeper is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# txzookeeper is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with txzookeeper. If not, see <http://www.gnu.org/licenses/>.
#
import base64
import hashlib
from twisted.internet.defer import Deferred, maybeDeferred
from twisted.internet.base import DelayedCall
from twisted.python.failure import Failure
import zookeeper
from mocker import ANY, MATCH, ARGS
from txzookeeper.tests import ZookeeperTestCase, utils
from txzookeeper.client import (
ZookeeperClient, ZOO_OPEN_ACL_UNSAFE, ConnectionTimeoutException,
ConnectionException, NotConnectedException, ClientEvent)
PUBLIC_ACL = ZOO_OPEN_ACL_UNSAFE
def match_deferred(arg):
return isinstance(arg, Deferred)
DEFERRED_MATCH = MATCH(match_deferred)
class ClientTests(ZookeeperTestCase):
def setUp(self):
super(ClientTests, self).setUp()
self.client = ZookeeperClient("127.0.0.1:2181", 3000)
self.client2 = None
def tearDown(self):
if self.client.connected:
utils.deleteTree(handle=self.client.handle)
self.client.close()
if self.client2 and self.client2.connected:
self.client2.close()
super(ClientTests, self).tearDown()
def test_wb_connect_after_timeout(self):
"""
Test an odd error scenario. If the zookeeper client succeeds in
connecting after a timeout, the connection should be closed, as
the connect deferred has already fired.
"""
mock_client = self.mocker.patch(self.client)
mock_client.close()
def close_state():
# Ensure the client state variable is correct after the close call.
self.client.connected = False
self.mocker.call(close_state)
self.mocker.replay()
task = DelayedCall(1, lambda: 1, None, None, None, None)
task.called = True
d = Deferred()
d.errback(ConnectionTimeoutException())
self.client._cb_connected(
task, d, None, zookeeper.CONNECTED_STATE, "/")
self.failUnlessFailure(d, ConnectionTimeoutException)
return d
def test_wb_reconnect_after_timeout_and_close(self):
"""
Another odd error scenario, if a client instance has has
connect and closed methods invoked in succession multiple
times, and a previous callback connect timeouts, the callback
of a previous connect can be invoked by a subsequent connect,
with a CONNECTING_STATE. Verify this does not attempt to
invoke the connect deferred again.
"""
d = Deferred()
d.callback(True)
task = DelayedCall(1, lambda: 1, None, None, None, None)
task.called = True
self.assertEqual(
self.client._cb_connected(
task, d, None, zookeeper.CONNECTING_STATE, ""),
None)
def test_connect(self):
"""
The client can connect to a zookeeper instance.
"""
d = self.client.connect()
def check_connected(client):
self.assertEquals(client.connected, True)
self.assertEquals(client.state, zookeeper.CONNECTED_STATE)
d.addCallback(check_connected)
return d
def test_close(self):
"""
Test that the connection is closed, also for the first
connection when the zookeeper handle is 0.
"""
def _fake_init(*_):
return 0
mock_init = self.mocker.replace("zookeeper.init")
mock_init(ARGS)
self.mocker.call(_fake_init)
def _fake_close(handle):
return zookeeper.OK
mock_close = self.mocker.replace("zookeeper.close")
mock_close(0)
self.mocker.call(_fake_close)
self.mocker.replay()
# Avoid unclean reactor by letting the callLater go through,
# but we do not care about the timeout.
def _silence_timeout(failure):
failure.trap(ConnectionTimeoutException)
self.client.connect(timeout=0).addErrback(_silence_timeout)
d = maybeDeferred(self.client.close)
def _verify(result):
self.mocker.verify()
d.addCallback(_verify)
return d
def test_client_event_repr(self):
event = ClientEvent(zookeeper.SESSION_EVENT,
zookeeper.EXPIRED_SESSION_STATE, '')
self.assertEqual(repr(event),
"<ClientEvent session at '' state: expired>")
def test_client_event_attributes(self):
event = ClientEvent(4, 'state', 'path')
self.assertEqual(event.type, 4)
self.assertEqual(event.connection_state, 'state')
self.assertEqual(event.path, 'path')
self.assertEqual(event, (4, 'state', 'path'))
def test_client_use_while_disconnected_returns_failure(self):
return self.assertFailure(
self.client.exists("/"), NotConnectedException)
def test_create_ephemeral_node_and_close_connection(self):
"""
The client can create transient nodes that are destroyed when the
client is closed and the session is destroyed on the zookeeper servers.
"""
d = self.client.connect()
def test_create_ephemeral_node(client):
d = self.client.create(
"/foobar-transient", "rabbit", flags=zookeeper.EPHEMERAL)
return d
def check_node_path(path):
self.assertEqual(path, "/foobar-transient")
return path
def close_connection(path):
return self.client.close()
def new_connection(close_result):
self.client2 = new_client = ZookeeperClient("127.0.0.1:2181")
return new_client.connect()
def check_node_doesnt_exist(connected):
self.assertRaises(
zookeeper.NoNodeException,
zookeeper.get,
connected.handle,
"/foobar-transient")
self.client2.close()
d.addCallback(test_create_ephemeral_node)
d.addCallback(check_node_path)
d.addCallback(close_connection)
d.addCallback(new_connection)
d.addCallback(check_node_doesnt_exist)
return d
def test_create_node(self):
"""
We can create a node in zookeeper, with a given path
"""
d = self.client.connect()
def create_ephemeral_node(connected):
d = self.client.create(
"/foobar", "rabbit", flags=zookeeper.EPHEMERAL)
return d
def verify_node_path_and_content(path):
self.assertEqual(path, "/foobar")
self.assertNotEqual(
zookeeper.exists(self.client.handle, path), None)
data, stat = zookeeper.get(self.client.handle, path)
self.assertEqual(data, "rabbit")
d.addCallback(create_ephemeral_node)
d.addCallback(verify_node_path_and_content)
return d
def test_create_persistent_node_and_close(self):
"""
The client creates persistent nodes by default that exist independently
of the client session.
"""
d = self.client.connect()
def test_create_ephemeral_node(client):
d = self.client.create(
"/foobar-persistent", "rabbit")
return d
def check_node_path(path):
self.assertEqual(path, "/foobar-persistent")
self.assertNotEqual(
zookeeper.exists(self.client.handle, path), None)
return path
def close_connection(path):
self.client.close()
self.client2 = new_client = ZookeeperClient("127.0.0.1:2181")
return new_client.connect()
def check_node_exists(client):
data, stat = zookeeper.get(client.handle, "/foobar-persistent")
self.assertEqual(data, "rabbit")
d.addCallback(test_create_ephemeral_node)
d.addCallback(check_node_path)
d.addCallback(close_connection)
d.addCallback(check_node_exists)
return d
def test_get(self):
"""
The client can retrieve a node's data via its get method.
"""
d = self.client.connect()
def create_node(client):
d = self.client.create(
"/foobar-transient", "rabbit", flags=zookeeper.EPHEMERAL)
return d
def get_contents(path):
return self.client.get(path)
def verify_contents((data, stat)):
self.assertEqual(data, "rabbit")
d.addCallback(create_node)
d.addCallback(get_contents)
d.addCallback(verify_contents)
return d
def test_get_with_error(self):
"""
On get error the deferred's errback is raised.
"""
d = self.client.connect()
def get_contents(client):
return client.get("/foobar-transient")
def verify_failure(failure):
self.assertTrue(
isinstance(failure.value, zookeeper.NoNodeException))
def assert_failure(extra):
self.fail("get should have failed")
d.addCallback(get_contents)
d.addCallback(verify_failure)
d.addErrback(verify_failure)
return d
def test_get_with_watcher(self):
"""
The client can specify a callable watcher when invoking get. The
watcher will be called back when the client path is modified in
another session.
"""
d = self.client.connect()
watch_deferred = Deferred()
def create_node(client):
return self.client.create("/foobar-watched", "rabbit")
def get_node(path):
data, watch = self.client.get_and_watch(path)
watch.chainDeferred(watch_deferred)
return data
def new_connection(data):
self.client2 = ZookeeperClient("127.0.0.1:2181")
return self.client2.connect()
def trigger_watch(client):
zookeeper.set(self.client2.handle, "/foobar-watched", "abc")
return watch_deferred
def verify_watch(event):
self.assertEqual(event.path, "/foobar-watched")
self.assertEqual(event.type, zookeeper.CHANGED_EVENT)
d.addCallback(create_node)
d.addCallback(get_node)
d.addCallback(new_connection)
d.addCallback(trigger_watch)
d.addCallback(verify_watch)
return d
def test_get_with_watcher_and_delete(self):
"""
The client can specify a callable watcher when invoking get. The
watcher will be called back when the client path is modified in
another session.
"""
d = self.client.connect()
def create_node(client):
return self.client.create("/foobar-watched", "rabbit")
def get_node(path):
data, watch = self.client.get_and_watch(path)
return data.addCallback(lambda x: (watch,))
def new_connection((watch,)):
self.client2 = ZookeeperClient("127.0.0.1:2181")
return self.client2.connect().addCallback(
lambda x, y=None, z=None: (x, watch))
def trigger_watch((client, watch)):
zookeeper.delete(self.client2.handle, "/foobar-watched")
self.client2.close()
return watch
def verify_watch(event):
self.assertEqual(event.path, "/foobar-watched")
self.assertEqual(event.type, zookeeper.DELETED_EVENT)
d.addCallback(create_node)
d.addCallback(get_node)
d.addCallback(new_connection)
d.addCallback(trigger_watch)
d.addCallback(verify_watch)
return d
def test_delete(self):
"""
The client can delete a node via its delete method.
"""
d = self.client.connect()
def create_node(client):
return self.client.create(
"/foobar-transient", "rabbit", flags=zookeeper.EPHEMERAL)
def verify_exists(path):
self.assertNotEqual(
zookeeper.exists(self.client.handle, path), None)
return path
def delete_node(path):
return self.client.delete(path)
def verify_not_exists(*args):
self.assertEqual(
zookeeper.exists(self.client.handle, "/foobar-transient"),
None)
d.addCallback(create_node)
d.addCallback(verify_exists)
d.addCallback(delete_node)
d.addCallback(verify_not_exists)
return d
def test_exists_with_existing(self):
"""
The exists method returns node stat information for an existing node.
"""
d = self.client.connect()
def create_node(client):
return self.client.create(
"/foobar-transient", "rabbit", flags=zookeeper.EPHEMERAL)
def check_exists(path):
return self.client.exists(path)
def verify_exists(node_stat):
self.assertEqual(node_stat["dataLength"], 6)
self.assertEqual(node_stat["version"], 0)
d.addCallback(create_node)
d.addCallback(check_exists)
d.addCallback(verify_exists)
return d
def test_exists_with_error(self):
"""
On error exists invokes the errback with the exception.
"""
d = self.client.connect()
def inject_error(result_code, d, extra_codes=None, path=None):
error = SyntaxError()
d.errback(error)
return error
def check_exists(client):
mock_client = self.mocker.patch(client)
mock_client._check_result(
ANY, DEFERRED_MATCH, extra_codes=(zookeeper.NONODE,),
path="/zebra-moon")
self.mocker.call(inject_error)
self.mocker.replay()
return client.exists("/zebra-moon")
def verify_failure(failure):
self.assertTrue(isinstance(failure.value, SyntaxError))
d.addCallback(check_exists)
d.addErrback(verify_failure)
return d
def test_exists_with_nonexistant(self):
"""
The exists method returns None when the value node doesn't exist.
"""
d = self.client.connect()
def check_exists(client):
return self.client.exists("/abcdefg")
def verify_exists(node_stat):
self.assertEqual(node_stat, None)
d.addCallback(check_exists)
d.addCallback(verify_exists)
return d
def test_exist_watch_with_node_change(self):
"""
Setting an exist watches on existing node will also respond to
node changes.
"""
d = self.client.connect()
def create_node(client):
return client.create("/rome")
def check_exists(path):
existsd, w = self.client.exists_and_watch(path)
w.addCallback(node_watcher)
return existsd
def node_watcher(event):
self.assertEqual(event.type_name, "changed")
def verify_exists(node_stat):
self.assertTrue(node_stat)
return self.client.set("/rome", "magic")
d.addCallback(create_node)
d.addCallback(check_exists)
d.addCallback(verify_exists)
return d
def test_exists_with_watcher_and_close(self):
"""
Closing a connection with an watch outstanding behaves correctly.
"""
d = self.client.connect()
def node_watcher(event):
client = getattr(self, "client", None)
if client is not None and client.connected:
self.fail("Client should be disconnected")
def create_node(client):
return client.create("/syracuse")
def check_exists(path):
# shouldn't fire till unit test cleanup
d, w = self.client.exists_and_watch(path)
w.addCallback(node_watcher)
return d
def verify_exists(result):
self.assertTrue(result)
d.addCallback(create_node)
d.addCallback(check_exists)
d.addCallback(verify_exists)
return d
def test_exists_with_nonexistant_watcher(self):
"""
The exists method can also be used to set an optional watcher on a
node. The watch can be set on a node that does not yet exist.
"""
d = self.client.connect()
node_path = "/animals"
watcher_deferred = Deferred()
def create_container(path):
return self.client.create(node_path, "")
def check_exists(path):
exists, watch = self.client.exists_and_watch(
"%s/wooly-mammoth" % node_path)
watch.chainDeferred(watcher_deferred)
return exists
def new_connection(node_stat):
self.assertFalse(node_stat)
self.client2 = ZookeeperClient("127.0.0.1:2181")
return self.client2.connect()
def create_node(client):
self.assertEqual(client.connected, True)
return self.client2.create(
"%s/wooly-mammoth" % node_path, "extinct")
def shim(path):
return watcher_deferred
def verify_watch(event):
self.assertEqual(event.path, "%s/wooly-mammoth" % node_path)
self.assertEqual(event.type, zookeeper.CREATED_EVENT)
d.addCallback(create_container)
d.addCallback(check_exists)
d.addCallback(new_connection)
d.addCallback(create_node)
d.addCallback(shim)
d.addCallback(verify_watch)
return d
def test_create_sequence_node(self):
"""
The client can create a monotonically increasing sequence nodes.
"""
d = self.client.connect()
def create_node(client):
return self.client.create("/seq-a")
def create_seq_node(path):
return self.client.create(
"/seq-a/seq-", flags=zookeeper.EPHEMERAL | zookeeper.SEQUENCE)
def get_children(path):
return self.client.get_children("/seq-a")
def verify_children(children):
self.assertEqual(children, ["seq-0000000000", "seq-0000000001"])
d.addCallback(create_node)
d.addCallback(create_seq_node)
d.addCallback(create_seq_node)
d.addCallback(get_children)
d.addCallback(verify_children)
return d
def test_create_duplicate_node(self):
"""
Attempting to create a node that already exists results in a failure.
"""
d = self.client.connect()
def create_node(client):
return self.client.create("/abc")
def create_duplicate(path):
return self.client.create("/abc")
def verify_fails(*args):
self.fail("Invoked Callback")
def verify_succeeds(failure):
self.assertTrue(failure)
self.assertEqual(failure.value.args, ("node exists",))
d.addCallback(create_node)
d.addCallback(create_duplicate)
d.addCallback(verify_fails)
d.addErrback(verify_succeeds)
return d
def test_delete_nonexistant_node(self):
"""
Attempting to delete a node that already exists results in a failure.
"""
d = self.client.connect()
def delete_node(client):
return client.delete("/abcd")
def verify_fails(*args):
self.fail("Invoked Callback")
def verify_succeeds(failure):
self.assertTrue(failure)
self.assertEqual(
failure.value.args, ("no node /abcd",))
d.addCallback(delete_node)
d.addCallback(verify_fails)
d.addErrback(verify_succeeds)
return d
def test_set(self):
"""
The client can be used to set contents of a node.
"""
d = self.client.connect()
def create_node(client):
return client.create("/zebra", "horse")
def set_node(path):
return self.client.set("/zebra", "mammal")
def verify_contents(junk):
self.assertEqual(zookeeper.get(self.client.handle, "/zebra")[0],
"mammal")
d.addCallback(create_node)
d.addCallback(set_node)
d.addCallback(verify_contents)
return d
def test_set_nonexistant(self):
"""
if the client is used to set the contents of a nonexistant node
an error is raised.
"""
d = self.client.connect()
def set_node(client):
return client.set("/xy1")
def verify_fails(*args):
self.fail("Invoked Callback")
def verify_succeeds(failure):
self.assertTrue(failure)
self.assertTrue(
failure.value.args, ("no node /xy1"))
d.addCallback(set_node)
d.addCallback(verify_fails)
d.addErrback(verify_succeeds)
return d
def test_get_children(self):
d = self.client.connect()
def create_nodes(client):
zookeeper.create(
self.client.handle, "/tower", "", [PUBLIC_ACL], 0)
zookeeper.create(
self.client.handle, "/tower/london", "", [PUBLIC_ACL], 0)
zookeeper.create(
self.client.handle, "/tower/paris", "", [PUBLIC_ACL], 0)
return client
def get_children(client):
return client.get_children("/tower")
def verify_children(children):
self.assertEqual(children, ["paris", "london"])
d.addCallback(create_nodes)
d.addCallback(get_children)
d.addCallback(verify_children)
return d
def test_get_children_with_error(self):
"""If the result of an api call is an error, its propgated.
"""
d = self.client.connect()
def get_children(client):
# Get the children of a nonexistant node
return client.get_children("/tower")
def verify_failure(failure):
self.assertTrue(isinstance(failure, Failure))
self.assertTrue(
isinstance(failure.value, zookeeper.NoNodeException))
d.addCallback(get_children)
d.addBoth(verify_failure)
return d
# seems to be a segfault on this one, must be running latest zk
def test_get_children_with_watch(self):
"""
The get_children method optionally takes a watcher callable which will
be notified when the node is modified, or a child deleted or added.
"""
d = self.client.connect()
watch_deferred = Deferred()
def create_node(client):
return client.create("/jupiter")
def get_children(path):
ids, watch = self.client.get_children_and_watch(path)
watch.chainDeferred(watch_deferred)
return ids
def new_connection(children):
self.assertFalse(children)
self.client2 = ZookeeperClient("127.0.0.1:2181")
return self.client2.connect()
def trigger_watch(client):
zookeeper.create(
self.client2.handle, "/jupiter/io", "", [PUBLIC_ACL], 0)
return watch_deferred
def verify_observed(data):
self.assertTrue(data)
d.addCallback(create_node)
d.addCallback(get_children)
d.addCallback(new_connection)
d.addCallback(trigger_watch)
d.addCallback(verify_observed)
return d
def test_get_children_with_watch_container_deleted(self):
"""
Establishing a child watch on a path, and then deleting the path,
will fire a child event watch on the container. This seems a little
counterintutive, but zookeeper docs state they do this as a signal
the container will never have any children. And logically you'd
would want to fire, so that in case the container node gets recreated
later and the watch fires, you don't want to the watch to fire then,
as its a technically a different container.
"""
d = self.client.connect()
watch_deferred = Deferred()
def create_node(client):
return self.client.create("/prison")
def get_children(path):
childd, w = self.client.get_children_and_watch(path)
w.addCallback(verify_watch)
return childd
def delete_node(children):
return self.client.delete("/prison")
def verify_watch(event):
self.assertTrue(event.type_name, "child")
watch_deferred.callback(None)
d.addCallback(create_node)
d.addCallback(get_children)
d.addCallback(delete_node)
return watch_deferred
test_get_children_with_watch_container_deleted.timeout = 5
def test_get_no_children(self):
"""
Getting children of a node without any children returns an empty list.
"""
d = self.client.connect()
def create_node(client):
return self.client.create("/tower")
def get_children(path):
return self.client.get_children(path)
def verify_children(children):
self.assertEqual(children, [])
d.addCallback(create_node)
d.addCallback(get_children)
d.addCallback(verify_children)
return d
def test_get_children_nonexistant(self):
"""
Getting children of a nonexistant node raises a no node exception.
"""
d = self.client.connect()
def get_children(client):
return client.get_children("/tower")
d.addCallback(get_children)
self.failUnlessFailure(d, zookeeper.NoNodeException)
return d
def xtest_add_auth(self):
"""
The connection can have zero or more authentication infos. This
authentication infos are used when accessing nodes to veriy access
against the node's acl.
"""
d = self.client.connect()
credentials = "mary:apples"
user, password = credentials.split(":")
identity = "%s:%s" % (
user,
base64.b64encode(hashlib.new('sha1', credentials).digest()))
acl = {'id': identity, 'scheme': 'digest', 'perms': zookeeper.PERM_ALL}
failed = []
def add_auth_one(client):
d = client.add_auth("digest", "bob:martini")
# a little hack to avoid slowness around adding auth
# see https://issues.apache.org/jira/browse/ZOOKEEPER-770
# by pushing an additional message send/response cycle
# we don't have to wait for the io thread to timeout
# on the socket.
client.exists("/orchard")
return d
def create_node(client):
return client.create("/orchard", "apple trees", acls=[acl])
def try_node_access(path):
return self.client.set("/orchard", "bar")
def node_access_failed(failure):
self.assertEqual(failure.value.args, ("not authenticated /orchard",))
failed.append(True)
return
def add_auth_two(result):
d = self.client.add_auth("digest", credentials)
# a little hack to avoid slowness around adding auth
# see https://issues.apache.org/jira/browse/ZOOKEEPER-770
self.client.get_children("/orchard")
return d
def verify_node_access(stat):
self.assertEqual(stat['version'], 1)
self.assertEqual(stat['dataLength'], 3)
self.assertTrue(failed) # we should have hit the errback
d.addCallback(add_auth_one)
d.addCallback(create_node)
d.addCallback(try_node_access)
d.addErrback(node_access_failed)
d.addCallback(add_auth_two)
d.addCallback(try_node_access)
d.addCallback(verify_node_access)
return d
def test_add_auth_with_error(self):
"""
On add_auth error the deferred errback is invoked with the exception.
"""
d = self.client.connect()
def _fake_auth(handle, scheme, identity, callback):
callback(0, zookeeper.AUTHFAILED)
return 0
mock_auth = self.mocker.replace("zookeeper.add_auth")
mock_auth(ANY, ANY, ANY, ANY)
self.mocker.call(_fake_auth)
self.mocker.replay()
def add_auth(client):
d = self.client.add_auth("digest", "mary:lamb")
return d
def verify_failure(failure):
self.assertTrue(
isinstance(failure.value, zookeeper.AuthFailedException))
def assert_failed(result):
self.fail("should not get here")
d.addCallback(add_auth)
d.addCallback(assert_failed)
d.addErrback(verify_failure)
return d
def test_set_acl(self):
"""
The client can be used to set an ACL on a node.
"""
d = self.client.connect()
acl = [PUBLIC_ACL,
dict(scheme="digest",
id="zebra:moon",
perms=zookeeper.PERM_ALL)]
def create_node(client):
return client.create("/moose")
def set_acl(path):
return self.client.set_acl(path, acl)
def verify_acl(junk):
self.assertEqual(
zookeeper.get_acl(self.client.handle, "/moose")[1],
acl)
d.addCallback(create_node)
d.addCallback(set_acl)
d.addCallback(verify_acl)
return d
def test_set_acl_with_error(self):
"""
on error set_acl invokes the deferred's errback with an exception.
"""
d = self.client.connect()
acl = dict(scheme="digest", id="a:b", perms=zookeeper.PERM_ALL)
def set_acl(client):
return client.set_acl("/zebra-moon22", [acl])
def verify_failure(failure):
self.assertTrue(
isinstance(failure.value, zookeeper.NoNodeException))
d.addCallback(set_acl)
d.addErrback(verify_failure)
return d
def test_get_acl(self):
"""
The client can be used to get an ACL on a node.
"""
d = self.client.connect()
def create_node(client):
return client.create("/moose")
def get_acl(path):
return self.client.get_acl(path)
def verify_acl((acls, stat)):
self.assertEqual(acls, [PUBLIC_ACL])
d.addCallback(create_node)
d.addCallback(get_acl)
d.addCallback(verify_acl)
return d
def test_get_acl_error(self):
"""
On error the acl callback invokes the deferred errback with the
exception.
"""
d = self.client.connect()
def inject_error(result, d):
error = zookeeper.ZooKeeperException()
d.errback(error)
return error
def get_acl(path):
# Get the ACL of a nonexistant node
return self.client.get_acl("/moose")
def verify_failure(failure):
self.assertTrue(isinstance(failure, Failure))
self.assertTrue(
isinstance(failure.value, zookeeper.ZooKeeperException))
d.addCallback(get_acl)
d.addBoth(verify_failure)
return d
def test_client_id(self):
"""
The client exposes a client id which is useful when examining
the server logs.
"""
# if we're not connected returns none
self.assertEqual(self.client.client_id, None)
d = self.client.connect()
def verify_client_id(client):
self.assertTrue(isinstance(self.client.client_id, tuple))
self.assertTrue(isinstance(self.client.client_id[0], long))
self.assertTrue(isinstance(self.client.client_id[1], str))
d.addCallback(verify_client_id)
return d
def test_sync(self):
"""
The sync method on the client flushes the connection to leader.
In practice this seems hard to test functionally, but we at
least verify the method executes without issue.
"""
d = self.client.connect()
def create_node(client):
return client.create("/abc")
def client_sync(path):
return self.client.sync(path)
def verify_sync(result):
self.assertTrue(
zookeeper.exists(self.client.handle, "/abc"))
d.addCallback(create_node)
d.addCallback(client_sync)
d.addCallback(verify_sync)
return d
def test_property_servers(self):
"""
The servers property of the client, shows which if any servers
it might be connected, else it returns.
"""
self.assertEqual(self.client.servers, None)
d = self.client.connect()
def verify_servers(client):
self.assertEqual(client.servers, "127.0.0.1:2181")
d.addCallback(verify_servers)
return d
def test_property_session_timeout(self):
"""
The negotiated session timeout is available as a property on the
client. If the client isn't connected, the value is None.
"""
self.assertEqual(self.client.session_timeout, None)
d = self.client.connect()
def verify_session_timeout(client):
self.assertIn(client.session_timeout, (4000, 10000))
d.addCallback(verify_session_timeout)
return d
def test_property_unrecoverable(self):
"""
The unrecoverable property specifies whether the connection can be
recovered or must be discarded.
"""
d = self.client.connect()
def verify_recoverable(client):
self.assertEqual(client.unrecoverable, False)
return client
d.addCallback(verify_recoverable)
return d
def test_invalid_watcher(self):
"""
Setting an invalid watcher raises a syntaxerror.
"""
d = self.client.connect()
def set_invalid_watcher(client):
return client.set_connection_watcher(1)
def verify_invalid(failure):
self.assertEqual(failure.value.args, ("Invalid Watcher 1",))
self.assertTrue(isinstance(failure.value, SyntaxError))
d.addCallback(set_invalid_watcher)
d.addErrback(verify_invalid)
return d
def test_connect_with_server(self):
"""
A client's servers can be specified in the connect method.
"""
d = self.client.connect("127.0.0.1:2181")
def verify_connected(client):
self.assertTrue(client.connected)
d.addCallback(verify_connected)
return d
def test_connect_with_error(self):
"""
An error in the connect invokes the deferred errback with exception.
"""
def _fake_init(handle, callback, timeout):
callback(0, 0, zookeeper.ASSOCIATING_STATE, "")
return 0
mock_init = self.mocker.replace("zookeeper.init")
mock_init(ANY, ANY, ANY)
self.mocker.call(_fake_init)
self.mocker.replay()
d = self.client.connect()
def verify_error(failure):
self.assertFalse(self.client.connected)
self.assertTrue(isinstance(failure.value, ConnectionException))
self.assertEqual(failure.value.args[0], "connection error")
def assert_failed(any):
self.fail("should not be invoked")
d.addCallback(assert_failed)
d.addErrback(verify_error)
return d
test_connect_with_error.timeout = 5
def test_connect_timeout(self):
"""
A timeout in seconds can be specified on connect, if the client hasn't
connected before then, then an errback is invoked with a timeout
exception.
"""
mock_init = self.mocker.replace("zookeeper.init")
mock_init(ANY, ANY, ANY)
self.mocker.result(0)
self.mocker.replay()
d = self.client.connect(timeout=0.1)
def verify_timeout(failure):
self.assertTrue(
isinstance(failure.value, ConnectionTimeoutException))
def assert_failure(any):
self.fail("should not be reached")
d.addCallback(assert_failure)
d.addErrback(verify_timeout)
return d
def test_connect_ensured(self):
"""
All of the client apis (with the exception of connect) attempt
to ensure the client is connected before executing an operation.
"""
self.assertFailure(
self.client.get_children("/abc"), zookeeper.ZooKeeperException)
self.assertFailure(
self.client.create("/abc"), zookeeper.ZooKeeperException)
self.assertFailure(
self.client.set("/abc", "123"), zookeeper.ZooKeeperException)
def test_connect_multiple_raises(self):
"""
Attempting to connect on a client that is already connected raises
an exception.
"""
d = self.client.connect()
def connect_again(client):
d = client.connect()
self.failUnlessFailure(d, zookeeper.ZooKeeperException)
return d
d.addCallback(connect_again)
return d
def test_bad_result_raises_error(self):
"""
A not OK return from zookeeper api method result raises an exception.
"""
mock_acreate = self.mocker.replace("zookeeper.acreate")
mock_acreate(ANY, ANY, ANY, ANY, ANY, ANY)
self.mocker.result(-100)
self.mocker.replay()
d = self.client.connect()
def verify_failure(client):
d = client.create("/abc")
self.failUnlessFailure(d, zookeeper.ZooKeeperException)
d.addCallback(verify_failure)
return d
def test_connection_watcher(self):
"""
A connection watcher can be set that receives notices on when
the connection state changes. Technically zookeeper would also
use this as a global watcher for node watches, but zkpython
doesn't expose that api, as its mostly considered legacy.
its out of scope to simulate a connection level event within unit tests
such as the server restarting.
"""
d = self.client.connect()
observed = []
def watch(*args):
observed.append(args)
def set_global_watcher(client):
client.set_connection_watcher(watch)
return client
def close_connection(client):
return client.close()
def verify_observed(stat):
self.assertFalse(observed)
d.addCallback(set_global_watcher)
d.addCallback(close_connection)
d.addCallback(verify_observed)
return d
def test_close_not_connected(self):
"""
If the client is not connected, closing returns None.
"""
self.assertEqual(self.client.close(), None)
def test_invalid_connection_error_callback(self):
self.assertRaises(TypeError,
self.client.set_connection_error_callback,
None)
def test_invalid_session_callback(self):
self.assertRaises(TypeError,
self.client.set_session_callback,
None)
|
racker/txzookeeper
|
txzookeeper/tests/test_client.py
|
Python
|
gpl-3.0
| 40,264
|
[
"MOOSE"
] |
d33407e58cd42d396dbdcdbe145738c8d49b9817d96e027545d24ece709abf9c
|
import os, string, tempfile, shutil
from subprocess import Popen
from ase.io import write
from ase.units import Bohr
class Bader:
'''class for running bader analysis and extracting data from it.
The class runs bader, extracts the charge density and outputs it
to a cube file. Then you call different functions of the class to
extract the charges, volumes, etc...
ACF.dat contains the coordinates of each atom, the charge
associated with it according to Bader partitioning, percentage of
the whole according to Bader partitioning and the minimum distance
to the surface. This distance should be compared to maximum
cut-off radius for the core region if pseudo potentials have been
used.
BCF.dat contains the coordinates of each Bader maxima, the charge
within that volume, the nearest atom and the distance to that
atom.
AtomVolumes.dat contains the number of each volume that has been
assigned to each atom. These numbers correspond to the number of
the BvAtxxxx.dat files.
The options for the executable are::
bader [ -c bader | voronoi ]
[ -n bader | voronoi ]
[ -b neargrid | ongrid ]
[ -r refine_edge_iterations ]
[ -ref reference_charge ]
[ -p all_atom | all_bader ]
[ -p sel_atom | sel_bader ] [volume list]
[ -p atom_index | bader_index ]
[ -i cube | chgcar ]
[ -h ] [ -v ]
chargefile
References:
G. Henkelman, A. Arnaldsson, and H. Jonsson, A fast and robust
algorithm for Bader decomposition of charge density,
Comput. Mater. Sci. 36 254-360 (2006).
E. Sanville, S. D. Kenny, R. Smith, and G. Henkelman An improved
grid-based algorithm for Bader charge allocation,
J. Comp. Chem. 28 899-908 (2007).
W. Tang, E. Sanville, and G. Henkelman A grid-based Bader analysis
algorithm without lattice bias, J. Phys.: Condens. Matter 21
084204 (2009).
'''
def __init__(self, atoms):
'''
'''
self.atoms = atoms
#get density and write cube file
calc = atoms.get_calculator()
ncfile = calc.get_nc()
base, ext = os.path.splitext(ncfile)
x, y, z, density = calc.get_charge_density()
cubefile = base + '_charge_density.cube'
self.densityfile = cubefile
if not os.path.exists(cubefile):
write(cubefile, atoms, data=density * Bohr ** 3)
#cmd to run for bader analysis. check if output exists so we
#don't run this too often.
acf_file = base + '_ACF.dat'
if not os.path.exists(acf_file):
#mk tempdir
tempdir = tempfile.mkdtemp()
cwd = os.getcwd()
abscubefile = os.path.abspath(cubefile)
os.chdir(tempdir)
cmd = 'bader %s' % abscubefile
process = Popen(cmd)
status = Popen.wait()
if status != 0:
print process
shutil.copy2('ACF.dat', os.path.join(cwd, acf_file))
os.chdir(cwd)
shutil.rmtree(tempdir)
self.charges = []
self.volumes = []
#now parse the output
f = open(acf_file, 'r')
#skip 2 lines
f.readline()
f.readline()
for i, atom in enumerate(self.atoms):
line = f.readline()
fields = line.split()
n = int(fields[0])
x = float(fields[1])
y = float(fields[2])
z = float(fields[3])
chg = float(fields[4])
mindist = float(fields[5])
vol = float(fields[6])
self.charges.append(chg)
self.volumes.append(vol)
f.close()
def get_bader_charges(self):
return self.charges
def get_bader_volumes(self):
'return volumes in Ang**3'
return [x * Bohr ** 3 for x in self.volumes]
def write_atom_volume(self, atomlist):
'''write bader atom volumes to cube files.
atomlist = [0,2] #for example
-p sel_atom Write the selected atomic volumes, read from the
subsequent list of volumes.
'''
alist = string.join([str(x) for x in atomlist], ' ')
cmd = 'bader -p sel_atom %s %s' % (alist, self.densityfile)
print cmd
os.system(cmd)
def write_bader_volume(self, atomlist):
"""write bader atom volumes to cube files.
::
atomlist = [0,2] # for example
-p sel_bader Write the selected Bader volumes, read from the
subsequent list of volumes.
"""
alist = string.join([str(x) for x in atomlist], ' ')
cmd = 'bader -p sel_bader %s %s' % (alist, self.densityfile)
print cmd
os.system(cmd)
def write_atom_index(self):
''' -p atom_index Write the atomic volume index to a charge
density file.
'''
cmd = 'bader -p atom_index %s' % (self.densityfile)
print cmd
os.system(cmd)
def write_bader_index(self):
'''
-p bader_index Write the Bader volume index to a charge
density file.
'''
cmd = 'bader -p bader_index %s' % (self.densityfile)
print cmd
os.system(cmd)
def write_all_atom(self):
'''
-p all_atom Combine all volumes associated with an atom and
write to file. This is done for all atoms and written to files
named BvAtxxxx.dat. The volumes associated with atoms are
those for which the maximum in charge density within the
volume is closest to the atom.
'''
cmd = 'bader -p all_atom %s' % (self.densityfile)
print cmd
os.system(cmd)
def write_all_bader(self):
'''
-p all_bader Write all Bader volumes (containing charge above
threshold of 0.0001) to a file. The charge distribution in
each volume is written to a separate file, named
Bvolxxxx.dat. It will either be of a CHGCAR format or a CUBE
file format, depending on the format of the initial charge
density file. These files can be quite large, so this option
should be used with caution.
'''
cmd = 'bader -p all_bader %s' % (self.densityfile)
print cmd
os.system(cmd)
if __name__ == '__main__':
from ase.calculators.jacapo import Jacapo
atoms = Jacapo.read_atoms('ethylene.nc')
b = Bader(atoms)
print b.get_bader_charges()
print b.get_bader_volumes()
b.write_atom_volume([3, 4])
|
askhl/ase
|
ase/calculators/jacapo/utils/bader.py
|
Python
|
gpl-2.0
| 6,698
|
[
"ASE"
] |
8cb1222ce09b579db35de20dee5a211ffc122d7ae3c0d80708808ff68f6f3ddc
|
''' CacheFeederAgent
This agent feeds the Cache tables with the outputs of the cache commands.
.. literalinclude:: ../ConfigTemplate.cfg
:start-after: ##BEGIN CacheFeederAgent
:end-before: ##END
:dedent: 2
:caption: CacheFeederAgent options
'''
__RCSID__ = '$Id$'
from DIRAC import S_OK
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.Core.LCG.GOCDBClient import GOCDBClient
from DIRAC.Core.Utilities.ObjectLoader import ObjectLoader
from DIRAC.AccountingSystem.Client.ReportsClient import ReportsClient
from DIRAC.WorkloadManagementSystem.Client.WMSAdministratorClient import WMSAdministratorClient
from DIRAC.ResourceStatusSystem.Command import CommandCaller
from DIRAC.WorkloadManagementSystem.Client.PilotManagerClient import PilotManagerClient
AGENT_NAME = 'ResourceStatus/CacheFeederAgent'
class CacheFeederAgent(AgentModule):
'''
The CacheFeederAgent feeds the cache tables for the client and the accounting.
It runs periodically a set of commands, and stores it's results on the
tables.
'''
def __init__(self, *args, **kwargs):
AgentModule.__init__(self, *args, **kwargs)
self.commands = {}
self.clients = {}
self.cCaller = None
self.rmClient = None
def initialize(self):
""" Define the commands to be executed, and instantiate the clients that will be used.
"""
self.am_setOption('shifterProxy', 'DataManager')
res = ObjectLoader().loadObject('DIRAC.ResourceStatusSystem.Client.ResourceStatusClient',
'ResourceStatusClient')
if not res['OK']:
self.log.error('Failed to load ResourceStatusClient class: %s' % res['Message'])
return res
rsClass = res['Value']
res = ObjectLoader().loadObject('DIRAC.ResourceStatusSystem.Client.ResourceManagementClient',
'ResourceManagementClient')
if not res['OK']:
self.log.error('Failed to load ResourceManagementClient class: %s' % res['Message'])
return res
rmClass = res['Value']
self.commands['Downtime'] = [{'Downtime': {}}]
self.commands['GOCDBSync'] = [{'GOCDBSync': {}}]
self.commands['FreeDiskSpace'] = [{'FreeDiskSpace': {}}]
# PilotsCommand
# self.commands[ 'Pilots' ] = [
# { 'PilotsWMS' : { 'element' : 'Site', 'siteName' : None } },
# { 'PilotsWMS' : { 'element' : 'Resource', 'siteName' : None } }
# ]
# FIXME: do not forget about hourly vs Always ...etc
# AccountingCacheCommand
# self.commands[ 'AccountingCache' ] = [
# {'SuccessfullJobsBySiteSplitted' :{'hours' :24, 'plotType' :'Job' }},
# {'FailedJobsBySiteSplitted' :{'hours' :24, 'plotType' :'Job' }},
# {'SuccessfullPilotsBySiteSplitted' :{'hours' :24, 'plotType' :'Pilot' }},
# {'FailedPilotsBySiteSplitted' :{'hours' :24, 'plotType' :'Pilot' }},
# {'SuccessfullPilotsByCESplitted' :{'hours' :24, 'plotType' :'Pilot' }},
# {'FailedPilotsByCESplitted' :{'hours' :24, 'plotType' :'Pilot' }},
# {'RunningJobsBySiteSplitted' :{'hours' :24, 'plotType' :'Job' }},
# # {'RunningJobsBySiteSplitted' :{'hours' :168, 'plotType' :'Job' }},
# # {'RunningJobsBySiteSplitted' :{'hours' :720, 'plotType' :'Job' }},
# # {'RunningJobsBySiteSplitted' :{'hours' :8760, 'plotType' :'Job' }},
# ]
# VOBOXAvailability
# self.commands[ 'VOBOXAvailability' ] = [
# { 'VOBOXAvailability' : {} }
#
# Reuse clients for the commands
self.clients['GOCDBClient'] = GOCDBClient()
self.clients['ReportGenerator'] = RPCClient('Accounting/ReportGenerator')
self.clients['ReportsClient'] = ReportsClient()
self.clients['ResourceStatusClient'] = rsClass()
self.clients['ResourceManagementClient'] = rmClass()
self.clients['WMSAdministrator'] = WMSAdministratorClient()
self.clients['Pilots'] = PilotManagerClient()
self.cCaller = CommandCaller
return S_OK()
def loadCommand(self, commandModule, commandDict):
""" Loads and executes commands.
:param commandModule: Name of the command (e.g. 'Downtime')
:type commandModule: basestring
:param commandDict: dictionary of {'CommandClass':{arguments}}
:type commandDict: dict
"""
commandName = commandDict.keys()[0]
commandArgs = commandDict[commandName]
commandTuple = ('%sCommand' % commandModule, '%sCommand' % commandName)
commandObject = self.cCaller.commandInvocation(commandTuple, pArgs=commandArgs,
clients=self.clients)
if not commandObject['OK']:
self.log.error('Error initializing %s' % commandName)
return commandObject
commandObject = commandObject['Value']
# Set master mode
commandObject.masterMode = True
self.log.info('%s/%s' % (commandModule, commandName))
return S_OK(commandObject)
def execute(self):
""" Just executes, via `loadCommand`, the commands in self.commands one after the other
"""
for commandModule, commandList in self.commands.iteritems():
self.log.info('%s module initialization' % commandModule)
for commandDict in commandList:
commandObject = self.loadCommand(commandModule, commandDict)
if not commandObject['OK']:
self.log.error(commandObject['Message'])
continue
commandObject = commandObject['Value']
try:
results = commandObject.doCommand()
if not results['OK']:
self.log.error('Failed to execute command', '%s: %s' % (commandModule, results['Message']))
continue
results = results['Value']
if not results:
self.log.info('Empty results')
continue
self.log.verbose('Command OK Results')
self.log.verbose(results)
except Exception as excp: # pylint: disable=broad-except
self.log.exception("Failed to execute command, with exception: %s" % commandModule, lException=excp)
return S_OK()
|
fstagni/DIRAC
|
ResourceStatusSystem/Agent/CacheFeederAgent.py
|
Python
|
gpl-3.0
| 6,604
|
[
"DIRAC"
] |
aecd72fce5d4b0574e386c742d44772f219c7163d4644efb65d6d90c6f606b6e
|
# !usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2018-10-11 17:51:43
# @Last modified by: Brian Cherinka
# @Last Modified time: 2018-11-29 17:23:15
from __future__ import print_function, division, absolute_import
import marvin.tools
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from marvin import log
from .base import VACMixIn, VACTarget
class VMORPHOVAC(VACMixIn):
"""Provides access to the MaNGA-VISUAL-MORPHOLOGY VAC.
VAC name: manga_visual_morpho
URL: https://www.sdss.org/dr17/data_access/value-added-catalogs/?vac_id=manga-visual-morphologies-from-sdss-and-desi-images
Description: A new morphology catalogue is presented in this VAC, based on a pure visual
morphological classification. This catalogue contains the T-Type morphology, visual
attributes (barred, edge-on, tidal debris) and the CAS parameters (Concentration, Asymmetry
and Clumpiness; from the DESI images.
Authors: J. Antonio Vazquez-Mata and Hector Hernandez-Toledo
"""
# hidden from DR17 until future notice
_hidden = True
_hidden_for = 'DR17'
# Required parameters
name = 'visual_morphology'
description = 'Returns visual morphology data'
version = {'DR16': '1.0.1', 'DR17': '2.0.1', 'MPL-11': '2.0.1'}
display_name = 'Visual Morphology'
url = 'https://www.sdss.org/dr17/data_access/value-added-catalogs/?vac_id=manga-visual-morphologies-from-sdss-and-desi-images'
# optional Marvin Tools to attach your vac to
include = (marvin.tools.cube.Cube, marvin.tools.maps.Maps)
# Required method
def set_summary_file(self, release):
''' Sets the path to the Visual Morphology summary file '''
# define the variables to build a unique path to your VAC file
self.path_params = {"vmver": self.version[release]}
# get_path returns False if the files do not exist locally
self.summary_file = self.get_path('mangaVmorpho', path_params=self.path_params)
# Required method
def get_target(self, parent_object):
''' Accesses VAC data for a specific target from a Marvin Tool object '''
if parent_object.release == 'DR16':
log.warning('You are accessing outdated DR16 data for this VAC. This target has updated data in DR17. We recommend using the new data release instead.')
# get any parameters you need from the parent object
plateifu = parent_object.plateifu
# download the vac from the SAS if it does not already exist locally
if not self.file_exists(self.summary_file):
self.summary_file = self.download_vac('mangaVmorpho', path_params=self.path_params)
# get path to ancillary VAC files
if parent_object.release == 'DR16':
# for DR16 SDSS/DESI mosaic images
self.update_path_params({'plateifu': plateifu, 'survey': '*'})
sdss_mos, desi_mos= self._get_mosaics(self.path_params)
# create container for more complex return data
vmdata = VizMorphTarget(plateifu, vacfile=self.summary_file, sdss=sdss_mos, desi=desi_mos)
elif parent_object.release in ['DR17', 'MPL-11']:
# for DR17 combined mosaic images
self.update_path_params({'plateifu': plateifu})
mos_mos = self._check_mosaic('mos', self.path_params)
# create container for more complex return data
vmdata = VizMorphTarget(plateifu, vacfile=self.summary_file, mos=mos_mos)
return vmdata
def _get_mosaics(self, path_params):
''' Get the mosaic images for SDSS and DESI surveys for DR16
Parameters:
path_params (dict):
The sdss_access keyword parameters to define a file path
Returns:
The SDSS and DESI local image filepaths
'''
sdss_mosaic = self._check_mosaic('sdss', path_params)
desi_mosaic = self._check_mosaic('desi', path_params)
return sdss_mosaic, desi_mosaic
def _check_mosaic(self, survey, path_params):
''' Get a mosaic image file for a survey path
Checks for local existence of the mosaic image filepath.
If it does not exists, it downloads it.
Parameters:
survey (str):
The survey to download. Either sdss or desi in DR16; or mos in DR17
path_params (dict):
The sdss_access keyword parameters to define a file path
Returns:
The mosaic image file path
'''
path_params['survey'] = survey
mosaic = self.get_path('mangaVmorphoImgs', path_params=path_params)
# download the mosaic file (downloads both surveys at once)
if not self.file_exists(mosaic):
pp = path_params.copy()
pp['survey'] = '*'
mosaics = self.download_vac('mangaVmorphoImgs', path_params=pp)
# get the path again for the single survey
mosaic = self.get_path('mangaVmorphoImgs', path_params=path_params)
return mosaic
class VizMorphTarget(VACTarget):
''' A customized target class to also display morphology mosaics
This class handles data from both the Visual Morphology summary file and the
individual image files. Row data from the summary file for the given target
is returned via the `data` property. Images can be displayed via
the the `show_mosaic` method.
Parameters:
targetid (str):
The plateifu or mangaid designation
vacfile (str):
The path of the VAC summary file
sdss (str):
The path to the DR16 SDSS image mosaic
desi (str):
The path to the DR16 DESI image mosaic
mos (str):
The path to the DR17 combined image mosaic
Attributes:
data:
The target row data from the main VAC file
targetid (str):
The target identifier
'''
def __init__(self, targetid, vacfile, sdss=None, desi=None, mos=None):
super(VizMorphTarget, self).__init__(targetid, vacfile)
self._sdss_img = sdss
self._desi_img = desi
self._mos_img = mos
def show_mosaic(self, survey=None):
''' Show the mosaic image for the given survey in DR16 or the combined in DR17
Displays the mosaic image of visual morphology classification
for the given survey as a Matplotlib Figure/Axis object.
Parameters:
survey (str):
The survey name. Can be either "sdss" or "desi" for DR16; or "mos" for DR17
Returns:
A matplotlib axis object
'''
#print('NOTE: For DR16, must specify either survey: sdss or desi. For DR17 must write: mos')
if survey == 'sdss':
impath = self._sdss_img
fsize = (15,5)
elif survey == 'desi':
impath = self._desi_img
fsize = (10,5)
elif survey == 'mos':
impath = self._mos_img
fsize = (20,5)
else:
raise ValueError('survey must be either "sdss" or "desi" for DR16, or "mos" for DR17')
imdata = mpimg.imread(impath)
fig, ax = plt.subplots(figsize = fsize)
ax.imshow(imdata)
title = '{0} Mosaic'.format(survey.upper())
fig.suptitle(title)
return ax
|
sdss/marvin
|
python/marvin/contrib/vacs/visual_morph.py
|
Python
|
bsd-3-clause
| 7,457
|
[
"Brian"
] |
04df6468800c578d044bb0d7cd44d86b4241f8f6e236db29a61e9b186e97b8cd
|
# Copyright (c) 2013 Huan Do, http://huan.do
import ast
import environment
from future_finder import FutureFinder
class FutureVisitor(object):
def __init__(self, env):
self.env = env
self.tree = env.tree
def traverse(self):
future_finder = FutureFinder(self.env)
future_finder.visit(self.tree)
if future_finder.future_import_nodes:
self.bring_nodes_to_top(future_finder.future_import_nodes)
def bring_nodes_to_top(self, nodes):
for node in nodes:
self.tree.body.remove(node)
self.tree.body = [node] + self.tree.body
|
huan/Underscore
|
underscore/future_visitor.py
|
Python
|
mit
| 618
|
[
"VisIt"
] |
98545aa9418ebc68e7bb75ba5bebc643dc1a56b6b4d1ca00a69ca7518983d14a
|
#!/usr/bin/env python
# This example shows how to load a 3D image into VTK and then reformat
# that image into a different orientation for viewing. It uses
# vtkImageReslice for reformatting the image, and uses vtkImageActor
# and vtkInteractorStyleImage to display the image. This InteractorStyle
# forces the camera to stay perpendicular to the XY plane.
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Start by loading some data.
reader = vtk.vtkImageReader2()
reader.SetFilePrefix(VTK_DATA_ROOT + "/Data/headsq/quarter")
reader.SetDataExtent(0, 63, 0, 63, 1, 93)
reader.SetDataSpacing(3.2, 3.2, 1.5)
reader.SetDataOrigin(0.0, 0.0, 0.0)
reader.SetDataScalarTypeToUnsignedShort()
reader.UpdateWholeExtent()
# Calculate the center of the volume
reader.GetOutput().UpdateInformation()
(xMin, xMax, yMin, yMax, zMin, zMax) = reader.GetOutput().GetWholeExtent()
(xSpacing, ySpacing, zSpacing) = reader.GetOutput().GetSpacing()
(x0, y0, z0) = reader.GetOutput().GetOrigin()
center = [x0 + xSpacing * 0.5 * (xMin + xMax),
y0 + ySpacing * 0.5 * (yMin + yMax),
z0 + zSpacing * 0.5 * (zMin + zMax)]
# Matrices for axial, coronal, sagittal, oblique view orientations
axial = vtk.vtkMatrix4x4()
axial.DeepCopy((1, 0, 0, center[0],
0, 1, 0, center[1],
0, 0, 1, center[2],
0, 0, 0, 1))
coronal = vtk.vtkMatrix4x4()
coronal.DeepCopy((1, 0, 0, center[0],
0, 0, 1, center[1],
0,-1, 0, center[2],
0, 0, 0, 1))
sagittal = vtk.vtkMatrix4x4()
sagittal.DeepCopy((0, 0,-1, center[0],
1, 0, 0, center[1],
0,-1, 0, center[2],
0, 0, 0, 1))
oblique = vtk.vtkMatrix4x4()
oblique.DeepCopy((1, 0, 0, center[0],
0, 0.866025, -0.5, center[1],
0, 0.5, 0.866025, center[2],
0, 0, 0, 1))
# Extract a slice in the desired orientation
reslice = vtk.vtkImageReslice()
reslice.SetInputConnection(reader.GetOutputPort())
reslice.SetOutputDimensionality(2)
reslice.SetResliceAxes(sagittal)
reslice.SetInterpolationModeToLinear()
# Create a greyscale lookup table
table = vtk.vtkLookupTable()
table.SetRange(0, 2000) # image intensity range
table.SetValueRange(0.0, 1.0) # from black to white
table.SetSaturationRange(0.0, 0.0) # no color saturation
table.SetRampToLinear()
table.Build()
# Map the image through the lookup table
color = vtk.vtkImageMapToColors()
color.SetLookupTable(table)
color.SetInputConnection(reslice.GetOutputPort())
# Display the image
actor = vtk.vtkImageActor()
actor.SetInput(color.GetOutput())
renderer = vtk.vtkRenderer()
renderer.AddActor(actor)
window = vtk.vtkRenderWindow()
window.AddRenderer(renderer)
# Set up the interaction
interactorStyle = vtk.vtkInteractorStyleImage()
interactor = vtk.vtkRenderWindowInteractor()
interactor.SetInteractorStyle(interactorStyle)
window.SetInteractor(interactor)
window.Render()
# Create callbacks for slicing the image
actions = {}
actions["Slicing"] = 0
def ButtonCallback(obj, event):
if event == "LeftButtonPressEvent":
actions["Slicing"] = 1
else:
actions["Slicing"] = 0
def MouseMoveCallback(obj, event):
(lastX, lastY) = interactor.GetLastEventPosition()
(mouseX, mouseY) = interactor.GetEventPosition()
if actions["Slicing"] == 1:
deltaY = mouseY - lastY
reslice.GetOutput().UpdateInformation()
sliceSpacing = reslice.GetOutput().GetSpacing()[2]
matrix = reslice.GetResliceAxes()
# move the center point that we are slicing through
center = matrix.MultiplyPoint((0, 0, sliceSpacing*deltaY, 1))
matrix.SetElement(0, 3, center[0])
matrix.SetElement(1, 3, center[1])
matrix.SetElement(2, 3, center[2])
window.Render()
else:
interactorStyle.OnMouseMove()
interactorStyle.AddObserver("MouseMoveEvent", MouseMoveCallback)
interactorStyle.AddObserver("LeftButtonPressEvent", ButtonCallback)
interactorStyle.AddObserver("LeftButtonReleaseEvent", ButtonCallback)
# Start interaction
interactor.Start()
|
naucoin/VTKSlicerWidgets
|
Examples/ImageProcessing/Python/ImageSlicing.py
|
Python
|
bsd-3-clause
| 4,168
|
[
"VTK"
] |
6a599f4d09525cb4aa861abfface71388a2c9079846e0649fc2a94310944231e
|
# pylint: disable=missing-docstring
from lettuce import step, world
SELECTORS = {
'spinner': '.video-wrapper .spinner',
'controls': '.video-controls',
}
# We should wait 300 ms for event handler invocation + 200ms for safety.
DELAY = 0.5
@step('I have uploaded subtitles "([^"]*)"$')
def i_have_uploaded_subtitles(_step, sub_id):
_step.given('I go to the files and uploads page')
_step.given('I upload the test file "subs_{}.srt.sjson"'.format(sub_id.strip()))
@step('I have created a Video component$')
def i_created_a_video_component(step):
step.given('I am in Studio editing a new unit')
world.create_component_instance(
step=step,
category='video',
)
world.wait_for_xmodule()
world.disable_jquery_animations()
world.wait_for_present('.is-initialized')
world.wait(DELAY)
world.wait_for_invisible(SELECTORS['spinner'])
if not world.youtube.config.get('youtube_api_blocked'):
world.wait_for_visible(SELECTORS['controls'])
@step('I have created a Video component with subtitles$')
def i_created_a_video_with_subs(_step):
_step.given('I have created a Video component with subtitles "3_yD_cEKoCk"')
@step('I have created a Video component with subtitles "([^"]*)"$')
def i_created_a_video_with_subs_with_name(_step, sub_id):
_step.given('I have created a Video component')
# Store the current URL so we can return here
video_url = world.browser.url
# Upload subtitles for the video using the upload interface
_step.given('I have uploaded subtitles "{}"'.format(sub_id))
# Return to the video
world.visit(video_url)
world.wait_for_xmodule()
# update .sub filed with proper subs name (which mimics real Studio/XML behavior)
# this is needed only for that videos which are created in acceptance tests.
_step.given('I edit the component')
world.wait_for_ajax_complete()
_step.given('I save changes')
world.disable_jquery_animations()
world.wait_for_present('.is-initialized')
world.wait_for_invisible(SELECTORS['spinner'])
|
fintech-circle/edx-platform
|
cms/djangoapps/contentstore/features/video.py
|
Python
|
agpl-3.0
| 2,082
|
[
"VisIt"
] |
bbe92b381904249ad9ef4368d501293ca996b0d4972e8167491cf61fa6bc89cc
|
import commands
import os
import Queue
import settings
import time
import threading
import serial
import sys
import traceback
from thirtybirds_2_0.Network.manager import init as network_init
from thirtybirds_2_0.Updates.manager import init as updates_init
class Network(object):
def __init__(self, hostname, network_message_handler, network_status_handler):
self.hostname = hostname
self.thirtybirds = network_init(
hostname=hostname,
role="client",
discovery_multicastGroup=settings.discovery_multicastGroup,
discovery_multicastPort=settings.discovery_multicastPort,
discovery_responsePort=settings.discovery_responsePort,
pubsub_pubPort=settings.pubsub_pubPort,
message_callback=network_message_handler,
status_callback=network_status_handler
)
########################
## UTILS
########################
class Utils(object):
def __init__(self, hostname):
self.hostname = hostname
def reboot(self):
os.system("sudo reboot now")
def remote_update_git(self, oratio, thirtybirds, update, upgrade):
if oratio:
subprocess.call(['sudo', 'git', 'pull'], cwd='/home/pi/oratio')
if thirtybirds:
subprocess.call(['sudo', 'git', 'pull'], cwd='/home/pi/thirtybirds_2_0')
return
def remote_update_scripts(self):
updates_init("/home/pi/oratio", False, True)
return
def get_update_script_version(self):
(updates, ghStatus, bsStatus) = updates_init("/home/pi/oratio", False, False)
return updates.read_version_pickle()
def get_git_timestamp(self):
return commands.getstatusoutput("cd /home/pi/oratio/; git log -1 --format=%cd")[1]
def get_temp(self):
return commands.getstatusoutput("/opt/vc/bin/vcgencmd measure_temp")[1]
def get_cpu(self):
bash_output = commands.getstatusoutput("uptime")[1]
split_output = bash_output.split(" ")
return split_output[12]
def get_uptime(self):
bash_output = commands.getstatusoutput("uptime")[1]
split_output = bash_output.split(" ")
return split_output[4]
def get_disk(self):
# stub for now
return "0"
def get_client_status(self):
return (self.hostname, self.get_update_script_version(), self.get_git_timestamp(), self.get_temp(), self.get_cpu(), self.get_uptime(), self.get_disk())
class Poller(threading.Thread):
def __init__(self, _main_, poll_delay_time):
threading.Thread.__init__(self)
self._main_ = _main_
self.poll_delay_time = poll_delay_time
def set_poll_period(self, period):
self.poll_delay_time = period
def run(self):
while True:
print "Poller Thread"
self._main_.network.thirtybirds.send("mandala_device_request", True)
self._main_.queue.put(("mandala_device_status", "('avl-medulla','pass')"))
self._main_.queue.put(("mandala_check_finished", ""))
time.sleep(self.poll_delay_time)
# Main handles network send/recv and can see all other classes directly
class Main(threading.Thread):
def __init__(self, hostname):
threading.Thread.__init__(self)
time.sleep(1)
print os. system("stty -F -hupcl /dev/ttyACM0 -9600")
time.sleep(1)
self.network = Network(hostname, self.network_message_handler, self.network_status_handler)
self.queue = Queue.Queue()
#self.arduino_connection = open("/dev/ttyACM0",'w')
self.arduino_connection = serial.Serial('/dev/ttyACM0', 9600, timeout=.1)
time.sleep(1) #give the connection a second to settle
self.utils = Utils(hostname)
self.network.thirtybirds.subscribe_to_topic("mandala_device_status")
self.finished = False
self.UNSET = 0
self.FAIL = 500
self.PASS = 4000
self.QUIET = 2000
self.mandala_device_status = None
self.mandala_tlc_ids = {
"avl-controller":39,
"avl-formant-1":15,
"avl-formant-1-amplifier":4,
"avl-formant-2":16,
"avl-formant-2-amplifier":5,
"avl-formant-3":17,
"avl-formant-3-amplifier":6,
"avl-layer-1":40,
"avl-layer-2":11,
"avl-layer-3":12,
"avl-medulla":35,
"avl-pitch-keys":18,
"avl-pitch-keys-sensor-1":7,
"avl-pitch-keys-sensor-2":8,
"avl-pitch-keys-sensor-3":9,
"avl-pitch-keys-sensor-4":10,
"avl-settings":34,
"avl-settings-adcs":24,
"avl-transport":13,
"avl-transport-encoder":0,
"avl-voice-1":36,
"avl-voice-1-crystal-frequency-counter":25,
"avl-voice-1-harmonic-generators":26,
"avl-voice-1-harmonic-volume":27,
"avl-voice-2":37,
"avl-voice-2-crystal-frequency-counter":28,
"avl-voice-2-harmonic-generators":29,
"avl-voice-2-harmonic-volume":30,
"avl-voice-3":38,
"avl-voice-3-crystal-frequency-counter":31,
"avl-voice-3-harmonic-generators":32,
"avl-voice-3-harmonic-volume":33,
"avl-voice-keys":14,
"avl-voice-keys-encoder-1":1,
"avl-voice-keys-encoder-2":2,
"avl-voice-keys-encoder-3":3
}
self.mandala_status = {
"avl-controller":"pass", # because if this is sending data, it's online.
"avl-formant-1":"unset",
"avl-formant-1-amplifier":"unset",
"avl-formant-2":"unset",
"avl-formant-2-amplifier":"unset",
"avl-formant-3":"unset",
"avl-formant-3-amplifier":"unset",
"avl-layer-1":"unset",
"avl-layer-2":"unset",
"avl-layer-3":"unset",
"avl-medulla":"pass",# because if this is sending data, it's online.
"avl-pitch-keys":"unset",
"avl-pitch-keys-sensor-1":"unset",
"avl-pitch-keys-sensor-2":"unset",
"avl-pitch-keys-sensor-3":"unset",
"avl-pitch-keys-sensor-4":"unset",
"avl-settings":"unset",
"avl-settings-adcs":"unset",
"avl-transport":"unset",
"avl-transport-encoder":"unset",
"avl-voice-1":"unset",
"avl-voice-1-crystal-frequency-counter":"unset",
"avl-voice-1-harmonic-generators":"unset",
"avl-voice-1-harmonic-volume":"unset",
"avl-voice-2":"unset",
"avl-voice-2-crystal-frequency-counter":"unset",
"avl-voice-2-harmonic-generators":"unset",
"avl-voice-2-harmonic-volume":"unset",
"avl-voice-3":"unset",
"avl-voice-3-crystal-frequency-counter":"unset",
"avl-voice-3-harmonic-generators":"unset",
"avl-voice-3-harmonic-volume":"unset",
"avl-voice-keys":"unset",
"avl-voice-keys-encoder-1":"unset",
"avl-voice-keys-encoder-2":"unset",
"avl-voice-keys-encoder-3":"unset"
}
self.arduino_delay_time = 0.05
self.poller = Poller(self, 5)
self.poller.start()
def network_message_handler(self, topic_msg):
# this method runs in the thread of the caller, not the tread of Main
topic, msg = topic_msg # separating just to eval msg. best to do it early. it should be done in TB.
#print "network_message_handler", topic, msg
#if len(msg) > 0:
# msg = eval(msg)
self.add_to_queue(topic, msg)
def network_status_handler(self, topic_msg):
# this method runs in the thread of the caller, not the tread of Mains
print "Main.network_status_handler", topic_msg
def add_to_queue(self, topic, msg):
self.queue.put((topic, msg))
def update_mandala_status(self, devicename, status):
#print "update_mandala_status", devicename, self.mandala_status[devicename], status, self.mandala_status[devicename] == status
#if str(self.mandala_status[devicename]) != str(status):
self.mandala_status[devicename] = status
tlc_id_int = self.mandala_tlc_ids[devicename] + 5000
tlc_id_str = "{}\n".format(tlc_id_int)
if self.mandala_status[devicename] == "unset":
tlc_level_int = 0
if self.mandala_status[devicename] == "fail":
tlc_level_int = self.FAIL
if self.mandala_status[devicename] == "pass":
tlc_level_int = self.PASS
#tlc_level_int = self.QUIET if self.finished else self.PASS
tlc_level_str = "{}\n".format(tlc_level_int)
self.write_to_arduino(tlc_id_str,tlc_level_str)
def check_finished(self):
return all(status == "pass" for status in self.mandala_status.values())
def write_to_arduino(self, id, level):
#print "write_to_arduino", repr(id), repr(level)
time.sleep(self.arduino_delay_time)
self.arduino_connection.write(id)
time.sleep(self.arduino_delay_time)
self.arduino_connection.write(level)
def run(self):
devicenames = self.mandala_tlc_ids.keys()
devicenames.sort()
for devicename in devicenames:
tlc_id_int = self.mandala_tlc_ids[devicename] + 5000
tlc_id_str = "{}\n".format(tlc_id_int)
tlc_level_str = "0/n"
self.write_to_arduino(tlc_id_str,tlc_level_str)
time.sleep(0.01)
self.write_to_arduino("5035\n", "4000\n") # set medulla as pass
#self.write_to_arduino("5039\n", "4000\n") # set medulla as pass
while True:
# self.network.thirtybirds.send("mandala_device_request", True)
try:
topic, msg_str = self.queue.get(True)
if topic == "mandala_device_status":
msg = eval(msg_str)
#print topic, msg
devicename, status = msg
self.update_mandala_status(devicename, status)
if topic == "mandala_check_finished":
print "self.check_finished()",self.check_finished()
if self.check_finished():
self.finished = True
devicenames = self.mandala_tlc_ids.keys()
for devicename in devicenames:
tlc_id_int = self.mandala_tlc_ids[devicename] + 5000
tlc_id_str = "{}\n".format(self.QUIET)
tlc_level_str = "0/n"
self.write_to_arduino(tlc_id_str,tlc_level_str)
time.sleep(0.01)
self.poller.set_poll_period(60)
time.sleep(0.01)
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
print e, repr(traceback.format_exception(exc_type, exc_value,exc_traceback))
def init(hostname):
main = Main(hostname)
main.daemon = True
main.start()
return main
|
andycavatorta/oratio
|
Roles/avl-medulla/main.py
|
Python
|
mit
| 11,212
|
[
"CRYSTAL"
] |
a51d99384cf1886ad35779380db898c44ec3e3e240a8daf530855bb7feb388f0
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# create a rendering window and renderer
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
renWin.SetSize(400, 400)
puzzle = vtk.vtkSpherePuzzle()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(puzzle.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
arrows = vtk.vtkSpherePuzzleArrows()
mapper2 = vtk.vtkPolyDataMapper()
mapper2.SetInputConnection(arrows.GetOutputPort())
actor2 = vtk.vtkActor()
actor2.SetMapper(mapper2)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(actor)
ren1.AddActor(actor2)
ren1.SetBackground(0.1, 0.2, 0.4)
LastVal = -1
def MotionCallback (x, y):
global LastVal
WindowY = 400
y = WindowY - y
z = ren1.GetZ(x, y)
ren1.SetDisplayPoint(x, y, z)
ren1.DisplayToWorld()
pt = ren1.GetWorldPoint()
print pt ###############
x = pt[0]
y = pt[1]
z = pt[2]
val = puzzle.SetPoint(x, y, z)
if (val != LastVal):
renWin.Render()
LastVal = val
pass
def ButtonCallback (x, y):
WindowY = 400
y = WindowY - y
z = ren1.GetZ(x, y)
ren1.SetDisplayPoint(x, y, z)
ren1.DisplayToWorld()
pt = ren1.GetWorldPoint()
# print pt
x = pt[0]
y = pt[1]
z = pt[2]
i = 0
while i <= 100:
puzzle.SetPoint(x, y, z)
puzzle.MovePoint(i)
renWin.Render()
i += 5
renWin.Render()
cam = ren1.GetActiveCamera()
cam.Elevation(-40)
ButtonCallback(261, 272)
arrows.SetPermutation(puzzle)
renWin.Render()
iren.Initialize()
#iren.Start()
|
timkrentz/SunTracker
|
IMU/VTK-6.2.0/Filters/Modeling/Testing/Python/TestSpherePuzzleArrows.py
|
Python
|
mit
| 1,865
|
[
"VTK"
] |
31505c93199947928204cf9e460cc45b404bed9826630a892b869222dd0c14bb
|
#
# Author: Henrique Pereira Coutada Miranda
# Run a GW calculation using yambo
#
from __future__ import print_function
from builtins import range
from yambopy import *
from qepy import *
import argparse
yambo = "yambo"
p2y = "p2y"
folder='bse'
def doublegrid():
global folder
folder = "%s_dbg"%folder
database()
#check if the nscf cycle is present
if os.path.isdir('nscf_double/mos2.save'):
print('nscf_double calculation found!')
else:
print('nscf_double calculation not found!')
exit()
#check if the SAVE folder is present
if not os.path.isdir('database_double/SAVE'):
if not os.path.isdir('database_double'):
os.mkdir('database_double')
print('preparing yambo database')
# we don't need to read the wavefunctions for the double grid
os.system('cd nscf_double/mos2.save; %s -w > p2y.log'%p2y)
os.system('cd nscf_double/mos2.save; %s > yambo.log'%yambo)
os.system('mv nscf_double/mos2.save/SAVE database_double')
#copy databases
if not os.path.isdir(folder):
os.mkdir(folder)
os.system('cp -r database/SAVE %s'%folder)
#initialize the double grid
print("creating double grid")
f = open('%s/ypp.in'%folder,'w')
f.write("""kpts_map
%DbGd_DB1_paths
"../database_double"
%""")
f.close()
os.system('cd %s; ypp'%folder)
def database():
#check if the nscf cycle is present
if os.path.isdir('nscf/mos2.save'):
print('nscf calculation found!')
else:
print('nscf calculation not found!')
#check if the SAVE folder is present
if not os.path.isdir('database/SAVE'):
if not os.path.isdir('database'):
os.mkdir('database')
print('preparing yambo database')
# we don't need to read the wavefunctions for the double grid
os.system('cd nscf/mos2.save; %s > p2y.log'%p2y)
os.system('cd nscf/mos2.save; %s > yambo.log'%yambo)
os.system('mv nscf/mos2.save/SAVE database')
#copy databases
if not os.path.isdir(folder):
os.mkdir(folder)
os.system('cp -r database/SAVE %s'%folder)
def run():
database()
#check if the SAVE folder is present
if not os.path.isdir('database/SAVE'):
if not os.path.isdir('database'):
os.mkdir('database')
print('preparing yambo database')
os.system('cd nscf/mos2.save; %s > p2y.log'%p2y)
os.system('cd nscf/mos2.save; %s > yambo.log'%yambo)
os.system('mv nscf/mos2.save/SAVE database')
#create the yambo input file
y = YamboIn('%s -b -o b -k sex -y d -V all'%yambo,folder=folder)
y['FFTGvecs'] = [20,'Ry']
y['NGsBlkXs'] = [1,'Ry']
y['BndsRnXs'] = [1,40]
y['BSEBands'] = [8,11]
y['BEnSteps'] = [500,'']
y['BEnRange'] = [[0.0,6.0],'eV']
y.arguments.append('WRbsWF')
y.write('%s/yambo_run.in'%folder)
print('running yambo')
os.system('cd %s; %s -F yambo_run.in -J yambo'%(folder,yambo))
def analyse():
#pack in a json file
y = YamboOut('bse')
y.pack()
#get the absorption spectra
a = YamboBSEAbsorptionSpectra('yambo',path='bse')
excitons = a.get_excitons(min_intensity=0.5,max_energy=5,Degen_Step=0.001)
print( "nexcitons: %d"%len(excitons) )
print( "excitons:" )
print( excitons )
a.get_wavefunctions(Degen_Step=0.001,repx=list(range(-1,2)),repy=list(range(-1,2)),repz=list(range(1)),
Cells=[13,13,1],Hole=[0,0,9+.5], FFTGvecs=10,wf=True)
a.write_json()
if __name__ == '__main__':
#parse options
parser = argparse.ArgumentParser(description='Test the yambopy script.')
parser.add_argument('-r' ,'--run', action="store_true", help='Use double grid')
parser.add_argument('-dg' ,'--doublegrid', action="store_true", help='Use double grid')
parser.add_argument('-a', '--analyse', action="store_true", help='plot the results')
args = parser.parse_args()
if args.doublegrid:
doublegrid()
if args.run: run()
if args.analyse: analyse()
|
henriquemiranda/yambo-py
|
tutorial/mos2/bse_mos2.py
|
Python
|
bsd-3-clause
| 4,093
|
[
"Yambo"
] |
ade5c6561fd66873a40f35a86582f9936447ade18f37d61dba751b9cac38e99f
|
#!/usr/bin/env python
from __future__ import division
from optparse import OptionParser
import rospy
import rosparam
import copy
# import cv: open cv 1 not used
import cv2
import numpy as np
import threading
import dynamic_reconfigure.server
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image
from std_msgs.msg import Float32, Header, String
from multi_tracker.msg import Contourinfo, Contourlist, DeltaVid
from multi_tracker.msg import Trackedobject, Trackedobjectlist
from multi_tracker.srv import resetBackgroundService
import time
import os
import image_processing
import matplotlib.pyplot as plt
# for basler ace cameras, use camera_aravis
# https://github.com/ssafarik/camera_aravis
# rosrun camera_aravis camnode
# default image: /camera/image_raw
# for firefley cameras, camera1394 does not provide timestamps but otherwise works
# use point grey drivers
# http://wiki.ros.org/pointgrey_camera_driver
# rosrun pointgrey_camera_driver camera_node
# default image: /camera/image_mono
# The main tracking class, a ROS node
class Compressor:
def __init__(self, nodenum):
'''
Default image_topic for:
Basler ace cameras with camera_aravis driver: camera/image_raw
Pt Grey Firefly cameras with pt grey driver : camera/image_mono
'''
# default parameters (parameter server overides them)
self.nodenum = nodenum
self.params = { 'image_topic' : '/camera/image_raw',
'threshold' : 10,
'camera_encoding' : 'mono8', # fireflies are bgr8, basler gige cams are mono8
'max_change_in_frame' : 0.2,
'roi_l' : 0,
'roi_r' : -1,
'roi_b' : 0,
'roi_t' : -1,
'circular_mask_x' : 'none',
'circular_mask_y' : 'none',
'circular_mask_r' : 'none',
}
for parameter, value in self.params.items():
try:
p = '/multi_tracker/' + nodenum + '/delta_video/' + parameter
self.params[parameter] = rospy.get_param(p)
except:
print 'Using default parameter: ', parameter, ' = ', value
# initialize the node
rospy.init_node('delta_compressor_' + nodenum)
self.nodename = rospy.get_name().rstrip('/')
self.time_start = time.time()
# experiment basename
self.experiment_basename = rospy.get_param('/multi_tracker/' + nodenum + '/experiment_basename', 'none')
if self.experiment_basename == 'none':
self.experiment_basename = time.strftime("%Y%m%d_%H%M%S_N" + nodenum, time.localtime())
# Publishers - publish pixel changes
self.pubDeltaVid = rospy.Publisher('/multi_tracker/' + nodenum + '/delta_video', DeltaVid, queue_size=30)
# background reset service
self.reset_background_flag = False
self.reset_background_service = rospy.Service('/multi_tracker/' + nodenum + '/reset_background', resetBackgroundService, self.reset_background)
self.cvbridge = CvBridge()
self.imgScaled = None
self.backgroundImage = None
self.background_img_filename = 'none'
# buffer locking
self.lockBuffer = threading.Lock()
self.image_buffer = []
self.framestamp = None
self.current_background_img = 0
# Subscriptions - subscribe to images, and tracked objects
self.image_mask = None
sizeImage = 128+1024*1024*3 # Size of header + data.
self.subImage = rospy.Subscriber(self.params['image_topic'], Image, self.image_callback, queue_size=5, buff_size=2*sizeImage, tcp_nodelay=True)
def reset_background(self, service_call):
self.reset_background_flag = True
return 1
def image_callback(self, rosimg):
with self.lockBuffer:
self.image_buffer.append(rosimg)
def process_image_buffer(self, rosimg):
if self.framestamp is not None:
self.dtCamera = (rosimg.header.stamp - self.framestamp).to_sec()
else:
self.dtCamera = 0.03
self.framenumber = rosimg.header.seq
self.framestamp = rosimg.header.stamp
# Convert the image.
try:
img = self.cvbridge.imgmsg_to_cv2(rosimg, 'passthrough') # might need to change to bgr for color cameras
except CvBridgeError, e:
rospy.logwarn ('Exception converting background image from ROS to opencv: %s' % e)
img = np.zeros((320,240))
self.imgScaled = img #[self.params['roi_b']:self.params['roi_t'], self.params['roi_l']:self.params['roi_r']]
self.shapeImage = self.imgScaled.shape # (height,width)
# add roi as mask
if self.image_mask is None:
self.image_mask = np.zeros_like(self.imgScaled)
self.image_mask[self.params['roi_b']:self.params['roi_t'], self.params['roi_l']:self.params['roi_r']] = 1
self.imgScaled = self.image_mask*self.imgScaled
if self.params['circular_mask_x'] != 'none':
if self.image_mask is None:
self.image_mask = np.zeros_like(self.imgScaled)
cv2.circle(self.image_mask,(self.params['circular_mask_x'], self.params['circular_mask_y']),int(self.params['circular_mask_r']),[1,1,1],-1)
self.imgScaled = self.image_mask*self.imgScaled
########### image processing function ##############################################################
# If there is no background image, grab one, and move on to the next frame
if self.backgroundImage is None:
self.backgroundImage = copy.copy(self.imgScaled)
self.background_img_filename = self.experiment_basename + '_deltavideo_bgimg_' + time.strftime("%Y%m%d_%H%M.png", time.localtime())
data_directory = os.path.expanduser( rospy.get_param('/multi_tracker/' + self.nodenum + '/data_directory') )
self.background_img_filename = os.path.join(data_directory, self.background_img_filename)
cv2.imwrite(self.background_img_filename, self.backgroundImage)
self.current_background_img += 1
return
if self.reset_background_flag:
self.backgroundImage = copy.copy(self.imgScaled)
self.background_img_filename = time.strftime("%Y%m%d_%H%M_deltavideo_bgimg_N" + self.nodenum, time.localtime()) + '.png'
data_directory = os.path.expanduser( rospy.get_param('/multi_tracker/' + self.nodenum + '/data_directory') )
self.background_img_filename = os.path.join(data_directory, self.background_img_filename)
cv2.imwrite(self.background_img_filename, self.backgroundImage)
self.current_background_img += 1
self.reset_background_flag = False
return
# Absdiff
self.absdiff = cv2.absdiff(self.imgScaled, self.backgroundImage)
changed_pixels = np.where(self.absdiff>self.params['threshold'])
delta_msg = DeltaVid()
header = Header(stamp=self.framestamp,frame_id=str(self.framenumber))
delta_msg.header = header
delta_msg.background_image = self.background_img_filename
if len(changed_pixels[0]) > 0:
delta_msg.xpixels = changed_pixels[0].tolist()
delta_msg.ypixels = changed_pixels[1].tolist()
delta_msg.values = self.imgScaled[changed_pixels].reshape(len(changed_pixels[0])).tolist()
else:
delta_msg.xpixels = [0]
delta_msg.ypixels = [0]
#delta_msg.values = [0]
self.pubDeltaVid.publish(delta_msg)
# if the thresholded absolute difference is too large, reset the background
if len(changed_pixels[0]) / (self.absdiff.shape[0]*self.absdiff.shape[1])>self.params['max_change_in_frame']:
self.reset_background_flag = True
#self.backgroundImage[delta_msg.xpixels, delta_msg.ypixels] = delta_msg.values
def Main(self):
while (not rospy.is_shutdown()):
t = time.time() - self.time_start
if t > 24*3600:
cv2.destroyAllWindows()
return
with self.lockBuffer:
time_now = rospy.Time.now()
if len(self.image_buffer) > 0:
self.process_image_buffer(self.image_buffer.pop(0))
pt = (rospy.Time.now()-time_now).to_sec()
if len(self.image_buffer) > 3:
rospy.logwarn("Delta video processing time exceeds acquisition rate. Processing time: %f, Buffer: %d", pt, len(self.image_buffer))
cv2.destroyAllWindows()
#####################################################################################################
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("--nodenum", type="str", dest="nodenum", default='1',
help="node number, for example, if running multiple tracker instances on one computer")
(options, args) = parser.parse_args()
compressor = Compressor(options.nodenum)
compressor.Main()
|
florisvb/multi_tracker
|
nodes/delta_video_simplebuffer.py
|
Python
|
mit
| 9,513
|
[
"Firefly"
] |
52dde3f4c5a5c950124675743edfffef24c2b249ba9d1ac73e4d940bfe3e31a8
|
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# If extension plugins are installed in pyscf, search and load the pbc
# submodule in all plugins if applicable
if len(__import__('pyscf').__path__) > 1:
__path__ = __import__('pkgutil').extend_path(__path__, __name__)
from pyscf.pbc import gto
from pyscf.pbc import scf
#from pyscf.pbc import tools
DEBUG = False
M = gto.M
|
sunqm/pyscf
|
pyscf/pbc/__init__.py
|
Python
|
apache-2.0
| 943
|
[
"PySCF"
] |
f30fdd7b0c166a9f187489e01235c54974aa5bf7d32bdfe77edb0d5e8f32090f
|
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A MultivariateNormalLinearOperator parametrized by a precision."""
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import invert
from tensorflow_probability.python.bijectors import scale_matvec_linear_operator
from tensorflow_probability.python.bijectors import shift as shift_bijector
from tensorflow_probability.python.distributions import mvn_diag
from tensorflow_probability.python.distributions import transformed_distribution
from tensorflow_probability.python.internal import distribution_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.internal import tensorshape_util
__all__ = ['MultivariateNormalPrecisionFactorLinearOperator']
class MultivariateNormalPrecisionFactorLinearOperator(
transformed_distribution.TransformedDistribution):
"""A multivariate normal on `R^k`, parametrized by a precision factor.
The multivariate normal distribution is defined over `R^k` and parameterized
by a (batch of) length-`k` `loc` vector (aka "mu") and a (batch of) `k x k`
`precision_factor` `LinearOperator`, and optionally a `precision`.
The precision of this distribution is the inverse of its covariance matrix.
The `precision_factor` is a matrix such that,
```
precision = precision_factor @ precision_factor.T,
```
where `@` denotes matrix-multiplication and `.T` transposition.
Providing `precision` may improve efficiency in computation of the log
probability density. This will be the case if matrix-vector products with
the `precision` linear operator are more efficient than with
`precision_factor`. For example, if `precision` has a sparse structure
`D + X @ X.T`, where `D` is diagonal and `X` is low rank, then one may use a
`LinearOperatorLowRankUpdate` for the `precision` arg.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; loc, precision_factor) = exp(-0.5 ||y||**2) / Z,
y = precision_factor @ (x - loc),
Z = (2 pi)**(0.5 k) / |det(precision_factor)|,
```
where:
* `loc` is a vector in `R^k`,
* `Z` denotes the normalization constant, and,
* `||y||**2` denotes the squared Euclidean norm of `y`.
#### Examples
```python
tfd_e = tfp.experimental.distributions
# Initialize a single 3-variate Gaussian.
mu = [1., 2, 3]
cov = [[ 0.36, 0.12, 0.06],
[ 0.12, 0.29, -0.13],
[ 0.06, -0.13, 0.26]]
precision = tf.linalg.inv(cov)
precision_factor = tf.linalg.cholesky(precision)
mvn = tfd_e.MultivariateNormalPrecisionFactorLinearOperator(
loc=mu,
precision_factor=tf.linalg.LinearOperatorFullmatrix(precision_factor),
)
# Covariance is equal to `cov`.
mvn.covariance()
# ==> [[ 0.36, 0.12, 0.06],
# [ 0.12, 0.29, -0.13],
# [ 0.06, -0.13, 0.26]]
# Compute the pdf of an`R^3` observation; return a scalar.
mvn.prob([-1., 0, 1]) # shape: []
# Initialize a 2-batch of 3-variate Gaussians.
mu = [[1., 2, 3],
[11, 22, 33]] # shape: [2, 3]
variance = [[1., 2, 3],
[0.5, 1, 1.5]] # shape: [2, 3]
inverse_variance = 1. / tf.constant(variance)
diagonal_precision_factors = tf.sqrt(inverse_variance)
mvn = tfd_e.MultivariateNormalPrecisionFactorLinearOperator(
loc=mu,
precision_factor=tf.linalg.LinearOperatorDiag(diagonal_precision_factors),
)
# Compute the pdf of two `R^3` observations; return a length-2 vector.
x = [[-0.9, 0, 0.1],
[-10, 0, 9]] # shape: [2, 3]
mvn.prob(x) # shape: [2]
```
"""
def __init__(self,
loc=None,
precision_factor=None,
precision=None,
validate_args=False,
allow_nan_stats=True,
name='MultivariateNormalPrecisionFactorLinearOperator'):
"""Initialize distribution.
Precision is the inverse of the covariance matrix, and
`precision_factor @ precision_factor.T = precision`.
The `batch_shape` of this distribution is the broadcast of
`loc.shape[:-1]` and `precision_factor.batch_shape`.
The `event_shape` of this distribution is determined by `loc.shape[-1:]`,
OR `precision_factor.shape[-1:]`, which must match.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
precision_factor: Required nonsingular `tf.linalg.LinearOperator` instance
with same `dtype` and shape compatible with `loc`.
precision: Optional square `tf.linalg.LinearOperator` instance with same
`dtype` and shape compatible with `loc` and `precision_factor`.
validate_args: Python `bool`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: Python `bool`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
"""
parameters = dict(locals())
with tf.name_scope(name) as name:
if precision_factor is None:
raise ValueError(
'Argument `precision_factor` must be provided. Found `None`')
dtype = dtype_util.common_dtype([loc, precision_factor, precision],
dtype_hint=tf.float32)
loc = tensor_util.convert_nonref_to_tensor(loc, dtype=dtype, name='loc')
self._loc = loc
self._precision_factor = precision_factor
self._precision = precision
batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
loc, precision_factor)
# Proof of factors (used throughout code):
# Let,
# C = covariance,
# P = inv(covariance) = precision
# P = F @ F.T (so F is the `precision_factor`).
#
# Then, the log prob term is
# x.T @ inv(C) @ x
# = x.T @ P @ x
# = x.T @ F @ F.T @ x
# = || F.T @ x ||**2
# notice it involves F.T, which is why we set adjoint=True in various
# places.
#
# Also, if w ~ Normal(0, I), then we can sample by setting
# x = inv(F.T) @ w + loc,
# since then
# E[(x - loc) @ (x - loc).T]
# = E[inv(F.T) @ w @ w.T @ inv(F)]
# = inv(F.T) @ inv(F)
# = inv(F @ F.T)
# = inv(P)
# = C.
if precision is not None:
precision.shape.assert_is_compatible_with(precision_factor.shape)
bijector = invert.Invert(
scale_matvec_linear_operator.ScaleMatvecLinearOperator(
scale=precision_factor,
validate_args=validate_args,
adjoint=True)
)
if loc is not None:
shift = shift_bijector.Shift(shift=loc, validate_args=validate_args)
bijector = shift(bijector)
super(MultivariateNormalPrecisionFactorLinearOperator, self).__init__(
distribution=mvn_diag.MultivariateNormalDiag(
loc=tf.zeros(
ps.concat([batch_shape, event_shape], axis=0), dtype=dtype)),
bijector=bijector,
validate_args=validate_args,
name=name)
self._parameters = parameters
@classmethod
def _parameter_properties(cls, dtype, num_classes=None):
return dict(
loc=parameter_properties.ParameterProperties(event_ndims=1),
precision_factor=parameter_properties.BatchedComponentProperties(),
precision=parameter_properties.BatchedComponentProperties())
@property
def loc(self):
# Note: if the `loc` kwarg is None, this is `None`.
return self._loc
@property
def precision_factor(self):
return self._precision_factor
@property
def precision(self):
return self._precision
experimental_is_sharded = False
def _mean(self):
shape = tensorshape_util.concatenate(self.batch_shape, self.event_shape)
has_static_shape = tensorshape_util.is_fully_defined(shape)
if not has_static_shape:
shape = tf.concat([
self.batch_shape_tensor(),
self.event_shape_tensor(),
], 0)
if self.loc is None:
return tf.zeros(shape, self.dtype)
return tf.broadcast_to(self.loc, shape)
def _covariance(self):
if self._precision is None:
inv_precision_factor = self._precision_factor.inverse()
cov = inv_precision_factor.matmul(inv_precision_factor, adjoint=True)
else:
cov = self._precision.inverse()
return cov.to_dense()
def _variance(self):
if self._precision is None:
precision = self._precision_factor.matmul(
self._precision_factor, adjoint_arg=True)
else:
precision = self._precision
variance = precision.inverse().diag_part()
return tf.broadcast_to(
variance,
ps.broadcast_shape(ps.shape(variance),
ps.shape(self.loc)))
def _stddev(self):
return tf.sqrt(self._variance())
def _mode(self):
return self._mean()
def _log_prob_unnormalized(self, value):
"""Unnormalized log probability.
Costs a matvec and reduce_sum over a squared (batch of) vector(s).
Args:
value: Floating point `Tensor`.
Returns:
Floating point `Tensor` with batch shape.
"""
# We override log prob functions in order to make use of self._precision.
if self._loc is None:
dx = value
else:
dx = value - self._loc
if self._precision is None:
# See "Proof of factors" above for use of adjoint=True.
dy = self._precision_factor.matvec(dx, adjoint=True)
return -0.5 * tf.reduce_sum(dy**2, axis=-1)
return -0.5 * tf.einsum('...i,...i->...', dx, self._precision.matvec(dx))
def _log_prob(self, value):
"""Log probability of multivariate normal.
Costs a log_abs_determinant, matvec, and a reduce_sum over a squared
(batch of) vector(s)
Args:
value: Floating point `Tensor`.
Returns:
Floating point `Tensor` with batch shape.
"""
dim = self.precision_factor.domain_dimension_tensor()
return (ps.cast(-0.5 * np.log(2 * np.pi), self.dtype) *
ps.cast(dim, self.dtype) +
# Notice the sign on the LinearOperator.log_abs_determinant is
# positive, since it is precision_factor not scale.
self._precision_factor.log_abs_determinant() +
self._log_prob_unnormalized(value))
|
tensorflow/probability
|
tensorflow_probability/python/experimental/distributions/mvn_precision_factor_linop.py
|
Python
|
apache-2.0
| 11,578
|
[
"Gaussian"
] |
7ae6e0a91f670692f32f39eaa088b399936d7bb8cdd4261c7cb03cf7e4fdad75
|
# coding=utf-8
# octopus在etcd中的根节点
ROOT_NODE = '/octopus'
# service信息在etcd中的节点
SERVICE_NODE = ROOT_NODE + '/service'
# config信息在etcd中的节点
CONFIG_NODE = ROOT_NODE + '/config'
# locker 信息在etcd中的节点
LOCKER_NODE = ROOT_NODE + '/locker'
# logger_name
LOGGER_NAME = 'octopus'
class SERVICE_ACTION:
"""
service 变更操作
"""
ADD = 'add'
DEL = 'del'
UPDATE = 'update'
NONE = 'none'
class CONFIG_ACTION:
"""
config action
"""
ADD = 'add'
DEL = 'del'
UPDATE = 'update'
NONE = 'none'
# 刷新service节点的间隔时间
SERVICE_REFRESH_INTERVAL = 20
# service节点的过期时间
SERVICE_TTL = 30
# watch操作的超时时间
WATCH_TIMEOUT = 10
# server端,尝试重连etcd的的间隔时间
ETCD_RECONNECT_INTERVAL = 3
# 初始化时,尝试连接etcd的次数
ETCD_RECONNECT_MAX_RETRY_INIT = 5
# 尝试连接的等待时间
ETCD_CONNECT_TIMEOUT = 3
# election
class Election:
MAX_RETRY = 3 # 选举中最大尝试次数
TIMEOUT = 3 # 选举中的等待超时时间
LOCKER_TTL = 5 # 选举中使用的locker的过期时间
LOCK_INTERVAL = 3 # 刷新locker的间隔时间
|
ideascf/octopus
|
constant.py
|
Python
|
mit
| 1,208
|
[
"Octopus"
] |
5573b8ba8bc9ff30015b85facfbc48b6d0d4352798062d0191f97dd8fbbf965c
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
=================================================
Parsing Service Description Tables in DVB streams
=================================================
ParseServiceDescriptionTable parses a reconstructed PSI table from a DVB MPEG
Transport Stream, and outputs a dictionary containing the data in the table.
The purpose of the SDT and details of the fields within in are defined in the
DVB SI specification, including the possible 'descriptor' fields that feature in
the table:
- ETSI EN 300 468
"Digital Video Broadcasting (DVB); Specification for Service Information (SI)
in DVB systems"
ETSI / EBU (DVB group)
See Kamaelia.Support.DVB.Descriptors for information on how they are parsed.
Example Usage
~~~~~~~~~~~~~
A simple pipeline to receive, parse and display the Service Description Table
applying to the transport stream (MUX) being received ("actual TS")::
FREQUENCY = 505.833330
feparams = {
"inversion" : dvb3.frontend.INVERSION_AUTO,
"constellation" : dvb3.frontend.QAM_16,
"code_rate_HP" : dvb3.frontend.FEC_3_4,
"code_rate_LP" : dvb3.frontend.FEC_3_4,
}
SID_Actual_PID = 0x11
Pipeline( DVB_Multiplex(FREQUENCY, [SID_Actual_PID], feparams),
DVB_Demuxer({ SID_Actual_PID:["outbox"]}),
ReassemblePSITables(),
ParseServiceDescriptionTable_ActualTS(),
PrettifyServiceDescriptionTable(),
ConsoleEchoer(),
).run()
A simple pipeline to receive and parse the Service Description Table then
convert it to a simple list mapping service names to service ids::
Pipeline( DVB_Multiplex(FREQUENCY, [SID_Actual_PID], feparams),
DVB_Demuxer({ SID_Actual_PID:["outbox"]}),
ReassemblePSITables(),
ParseServiceDescriptionTable_ActualTS(),
SDT_to_SimpleServiceList(),
ConsoleEchoer(),
).run()
ParseServiceDescriptionTable
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Behaviour
---------
At initialisation, specify whether you want ParseServiceDescriptionTables to
parse 'actual' or 'other' tables (or both). 'Actual' tables describe services
within the actual transport stream the table is it. 'Other' tables describe
services carried in other transport streams - ie. broadcast in a different MUX
in the same network. For example::
ParseServiceDescriptionTable(acceptTables = {0x42:"ACTUAL",0x46:"OTHER"})
There are shorthands available for the various combinations::
ParseServiceDescriptionTable_ActualTS()
ParseServiceDescriptionTable_OtherTS()
ParseServiceDescriptionTable_ActualAndOtherTS():
Send reconstructed PSI table 'sections' to the "inbox" inbox. When all sections
of the table have arrived, ParseServiceDescriptionTable will parse the table and
send it out of its "outbox" outbox.
If the table is unchanged since last time it was parsed, then it will not be
sent out. Parsed tables are only sent out when they are new or have just
changed.
The parsed table is sent out as a dictionary data structure, similar to this
(the 'streams' list here is abridged for brevity)::
{
'actual_other' : 'ACTUAL',
'table_type' : 'SDT',
'current' : 1,
'original_network_id' : 9018,
'table_id' : 66,
'services': {
4228: { 'running_status' : 4,
'free_CA_mode' : 0,
'eit_present_following': 1,
'eit_schedule' : 2,
'descriptors': [
( 72, { 'type': 'service',
'service_name': 'BBC TWO',
'service_type': 'digital television service',
'service_provider_name': 'BBC'
} ),
(115, { 'type': 'UNKNOWN',
'contents': 'fp.bbc.co.uk'
} )
] },
4164: { 'running_status' : 4,
'free_CA_mode' : 0,
'eit_present_following': 1,
'eit_schedule' : 2,
'descriptors': [
( 72, { 'type': 'service',
'service_name': 'BBC ONE',
'service_type': 'digital television service',
'service_provider_name': 'BBC'
} ),
(115, { 'type': 'UNKNOWN',
'contents': 'fp.bbc.co.uk'
} )
] },
.....
4671: { 'running_status': 4,
'free_CA_mode' : 0,
'eit_present_following': 1,
'eit_schedule' : 2,
'descriptors': [
( 72, { 'type': 'service',
'service_name': 'CBBC Channel',
'service_type': 'digital television service',
'service_provider_name': 'BBC'
} ),
(115, { 'type': 'UNKNOWN',
'contents': 'fp.bbc.co.uk'
} )
] }
},
'transport_stream_id': 4100
}
This table contains information about the services within the transport stream.
It lists the services (channels) including their names. types, and the fact that
there is now & next data (eit_present_following) and Electronic Programme Guide
(eit_schedule) data available for each of them.
This is part of an instantaneous snapshot of the SDT broadcast from Crystal
Palace MUX 1 (505.8MHz) in the UK on 21th Dec 2006.
If this data is sent on through a PrettifyServiceDescriptionTable component,
then the equivalent output is a string containing the following (again, abridged
here for brevity)::
Table ID : 66
Table is valid for : CURRENT (valid)
Actual or Other n/w: ACTUAL
Transport stream id: 4100
Original network id: 9018
Services:
Service id : 4228
EIT present_following? : YES
EIT schedule? : YES
Running status : 4 (RUNNING)
Scrambled? : NO
Service descriptors:
Descriptor 0x48 : service
service_name : 'BBC TWO'
service_provider_name : 'BBC'
service_type : 'digital television service'
Descriptor 0x73 : UNKNOWN
contents : 'fp.bbc.co.uk'
Service id : 4164
EIT present_following? : YES
EIT schedule? : YES
Running status : 4 (RUNNING)
Scrambled? : NO
Service descriptors:
Descriptor 0x48 : service
service_name : 'BBC ONE'
service_provider_name : 'BBC'
service_type : 'digital television service'
Descriptor 0x73 : UNKNOWN
contents : 'fp.bbc.co.uk'
.....
Service id : 4671
EIT present_following? : YES
EIT schedule? : YES
Running status : 4 (RUNNING)
Scrambled? : NO
Service descriptors:
Descriptor 0x48 : service
service_name : 'CBBC Channel'
service_provider_name : 'BBC'
service_type : 'digital television service'
Descriptor 0x73 : UNKNOWN
contents : 'fp.bbc.co.uk'
ParseServiceDescriptionTable can collect the sections of, and then parse, both
'current' and 'next' tables simultaneously.
See the "DVB SI" specifications for information on the purposes of the
descriptor fields that appear in various parts of this table.
See Kamaelia.Support.DVB.Descriptors for information on how each is parsed.
If a shutdownMicroprocess or producerFinished message is received on the
"control" inbox, then it will immediately be sent on out of the "signal" outbox
and the component will then immediately terminate.
How does it work?
-----------------
ParseServiceDescriptionTable logs all the table sections it receives, until it
determines it has the complete set; then it parses them.
If the version number field in any table section changes, then the log is
cleared, and the component starts collecting the sections again from scratch.
SDT_to_SimpleServiceList
~~~~~~~~~~~~~~~~~~~~~~~~
Behaviour
---------
Send parsed service description tables to this component's "inbox" inbox and
a dictionary mapping service names to service ids will be sent out the "outbox"
outbox. For example::
{ 'BBCi' : 4479,
'BBC ONE' : 4164,
'BBC TWO' : 4228,
'CBBC Channel': 4671,
'BBC NEWS 24' : 4415,
'BBC THREE' : 4351
}
If a shutdownMicroprocess or producerFinished message is received on the
"control" inbox, then it will immediately be sent on out of the "signal" outbox
and the component will then immediately terminate.
"""
from Axon.Component import component
from Axon.Ipc import producerFinished,shutdownMicroprocess
from Kamaelia.Support.DVB.Descriptors import parseDescriptor
from Kamaelia.Support.DVB.CRC import dvbcrc
SDT_PID = 0x11
def ParseServiceDescriptionTable_ActualTS():
"""\
ParseServiceDescriptionTable_ActualTS() -> new ParseServiceDescriptionTable component.
Instantiates a ParseServiceDescriptionTable component configured to parse
'ACTUAL TS' tables only (table id 0x42)
"""
return ParseServiceDescriptionTable(acceptTables = {0x42:"ACTUAL"})
def ParseServiceDescriptionTable_OtherTS():
"""\
ParseServiceDescriptionTable_OtherTS() -> new ParseServiceDescriptionTable component.
Instantiates a ParseServiceDescriptionTable component configured to parse
'OTHER TS' tables only (table id 0x46)
"""
return ParseServiceDescriptionTable(acceptTables = {0x46:"OTHER"})
def ParseServiceDescriptionTable_ActualAndOtherTS():
"""\
ParseServiceDescriptionTable_ActualAndOtherTS() -> new ParseServiceDescriptionTable component.
Instantiates a ParseServiceDescriptionTable component configured to parse
both 'ACTUAL' and 'OTHER TS' tables (table ids 0x42 and 0x46)
"""
return ParseServiceDescriptionTable(acceptTables = {0x42:"ACTUAL",0x46:"OTHER"})
class ParseServiceDescriptionTable(component):
"""\
ParseServiceDescriptionTable([acceptTables]) -> new ParseServiceDescriptionTable component.
Send reconstructed PSI table sections to the "inbox" inbox. When a complete
table is assembled and parsed, the result is sent out of the "outbox" outbox
as a dictionary.
Doesn't emit anything again until the version number of the table changes.
Keyword arguments::
- acceptTables - dict of (table_id,string_description) mappings for tables to be accepted (default={0x42:"ACTUAL",0x46:"OTHER"})
"""
Inboxes = { "inbox" : "DVB PSI Packets from a single PID containing SDT table sections",
"control" : "Shutdown signalling",
}
Outboxes = { "outbox" : "Parsed PMT table (only when it changes)",
"signal" : "Shutdown signalling",
}
def __init__(self, acceptTables = {0x42:"ACTUAL",0x46:"OTHER"}):
super(ParseServiceDescriptionTable,self).__init__()
self.acceptTables = acceptTables
def parseTable(self, index, sections):
(table_id, current_next, transport_stream_id, original_network_id) = index
msg = { "table_type" : "SDT",
"table_id" : table_id,
"actual_other" : self.acceptTables[table_id],
"current" : current_next,
"transport_stream_id" : transport_stream_id,
"original_network_id" : original_network_id,
}
services = {}
for (data,section_length) in sections:
i=11
while i < section_length+3-4:
service_id = (ord(data[i])<<8) + ord(data[i+1])
service = {}
lo = ord(data[i+2])
service['eit_schedule'] = lo & 0x02
service['eit_present_following'] = lo & 0x01
hi = ord(data[i+3])
service['running_status'] = hi >> 5
service['free_CA_mode'] = hi & 0x10
descriptors_length = ((hi<<8) + ord(data[i+4])) & 0x0fff
i = i + 5
descriptors_end = i + descriptors_length
service['descriptors'] = []
while i < descriptors_end:
descriptor,i = parseDescriptor(i,data)
service['descriptors'].append(descriptor)
services[service_id] = service
msg['services'] = services
return msg
def shutdown(self):
while self.dataReady("control"):
msg = self.recv("control")
self.send(msg,"signal")
if isinstance(msg, (shutdownMicroprocess, producerFinished)):
return True
return False
def main(self):
# initialise buffers
# ...for holding table sections (until we get complete table)
# indexed by (table_id, current_next, transport_stream_id, original_network_id)
sections = {}
latest_versions = {}
last_section_numbers = {}
missing_sections_count = {}
while not self.shutdown():
while self.dataReady("inbox"):
data = self.recv("inbox")
# extract basic info from this PSI packet - enough to work
# out what table it is; what section, and the version
e = [ord(data[i]) for i in range(0,3) ]
table_id = e[0]
if table_id not in self.acceptTables.keys():
continue
syntax = e[1] & 0x80
if not syntax:
continue
section_length = ((e[1]<<8) + e[2]) & 0x0fff
# now were reasonably certain we've got a correct packet
# we'll convert the rest of the packet
e = [ord(data[i]) for i in range(0,10) ]
version = (e[5] &0x3e) # no need to >> 1
current_next = e[5] & 0x01
section_number = e[6]
last_section_number = e[7]
transport_stream_id = (e[3]<<8) + e[4]
original_network_id = (e[8]<<8) + e[9]
index = (table_id, current_next, transport_stream_id, original_network_id)
# if version number has changed, flush out all previously fetched tables
crcpass = False
if version != latest_versions.get(index,-1):
if not dvbcrc(data[:3+section_length]):
continue
else:
crcpass = True
latest_versions[index] = version
sections[index] = [None]*(last_section_number+1)
missing_sections_count[index] = last_section_number+1
if sections[index][section_number] == None:
if crcpass or dvbcrc(data[:3+section_length]):
sections[index][section_number] = (data, section_length)
missing_sections_count[index] -= 1
# see if we have all sections of the table
# if we do, send the whole bundle onwards
if missing_sections_count[index] == 0:
table = self.parseTable(index, sections[index])
self.send( table, "outbox")
self.pause()
yield 1
class SDT_to_SimpleServiceList(component):
"""\
SDT_to_SimpleServiceList() -> new SDT_to_SimpleServiceList component.
Converts parsed Service Description Tables to a simplified list of services.
"""
def shutdown(self):
while self.dataReady("control"):
msg = self.recv("control")
self.send(msg,"signal")
if isinstance(msg, (shutdownMicroprocess, producerFinished)):
return True
return False
def main(self):
while not self.shutdown():
while self.dataReady("inbox"):
sdt = self.recv("inbox")
s =dict([(service['descriptors'][0][1]['service_name'],sid) for (sid,service) in sdt['services'].items()])
self.send(s,"outbox")
self.pause()
yield 1
__kamaelia_components__ = ( ParseServiceDescriptionTable,
SDT_to_SimpleServiceList )
__kamaelia_prefabs__ = ( ParseServiceDescriptionTable_ActualTS,
ParseServiceDescriptionTable_OtherTS,
ParseServiceDescriptionTable_ActualAndOtherTS, )
if __name__ == "__main__":
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Util.Console import ConsoleEchoer
from Kamaelia.Device.DVB.Core import DVB_Multiplex, DVB_Demuxer
from Kamaelia.Device.DVB.Parse.ReassemblePSITables import ReassemblePSITables
from Kamaelia.Device.DVB.Parse.PrettifyTables import PrettifyServiceDescriptionTable
import dvb3.frontend
feparams = {
"inversion" : dvb3.frontend.INVERSION_AUTO,
"constellation" : dvb3.frontend.QAM_16,
"code_rate_HP" : dvb3.frontend.FEC_3_4,
"code_rate_LP" : dvb3.frontend.FEC_3_4,
}
Pipeline( DVB_Multiplex(505833330.0/1000000.0, [SDT_PID], feparams),
DVB_Demuxer({ SDT_PID:["outbox"]}),
ReassemblePSITables(),
ParseServiceDescriptionTable_ActualAndOtherTS(),
PrettifyServiceDescriptionTable(),
ConsoleEchoer(),
).run()
|
sparkslabs/kamaelia
|
Sketches/MPS/BugReports/FixTests/Kamaelia/Kamaelia/Device/DVB/Parse/ParseServiceDescriptionTable.py
|
Python
|
apache-2.0
| 19,571
|
[
"CRYSTAL"
] |
c2b6893bf70942efa4eaed7aab4bad87267f126785bb5c92951738a36d5b3cae
|
# !usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2017-05-19 16:08:47
# @Last modified by: Brian Cherinka
# @Last Modified time: 2017-07-30 19:12:14
from __future__ import print_function, division, absolute_import
from tests.api.conftest import ApiPage
import pytest
@pytest.mark.parametrize('page', [('api', 'PlateView:index')], ids=['plate'], indirect=True)
class TestPlateView(object):
def test_get_map_success(self, page, params):
page.load_page('get', page.url, params=params)
data = 'this is a plate'
page.assert_success(data)
@pytest.mark.parametrize('page', [('api', 'getPlate')], ids=['getPlate'], indirect=True)
class TestGetPlate(object):
@pytest.mark.parametrize('reqtype', [('get'), ('post')])
def test_plate_success(self, galaxy, page, params, reqtype):
params.update({'plateid': galaxy.plate})
data = {'plateid': str(galaxy.plate)}
page.load_page(reqtype, page.url.format(**params), params=params)
page.assert_success(data)
assert data['plateid'] == page.json['data']['plateid']
@pytest.mark.parametrize('plateid, missing, errmsg',
[(None, 'release', 'Missing data for required field.'),
('5000', 'plateid', 'Plateid must be > 6500'),
('84', 'plateid', ['Length must be between 4 and 5.', 'Plateid must be > 6500'])],
ids=['norelease', 'badplate', 'shortplate'])
def test_plate_failures(self, galaxy, page, params, plateid, missing, errmsg):
params.update({'plateid': plateid})
if plateid is None:
page.route_no_valid_params(page.url.format(plateid=galaxy.plate), missing, reqtype='post', errmsg=errmsg)
else:
url = page.url.format(**params)
page.route_no_valid_params(url, missing, reqtype='post', params=params, errmsg=errmsg)
@pytest.mark.parametrize('page', [('api', 'getPlateCubes')], ids=['getPlateCubes'], indirect=True)
class TestGetPlateCubes(object):
@pytest.mark.parametrize('reqtype', [('get'), ('post')])
def test_plate_success(self, galaxy, page, params, reqtype):
params.update({'plateid': galaxy.plate})
#data = {"plateifus": ["8485-1902", "8485-12702", "8485-12701", "8485-1901"]}
data = {'plateifus': [galaxy.plateifu]}
page.load_page(reqtype, page.url.format(**params), params=params)
page.assert_success(data)
@pytest.mark.parametrize('plateid, missing, errmsg',
[(None, 'release', 'Missing data for required field.'),
('5000', 'plateid', 'Plateid must be > 6500'),
('84', 'plateid', ['Length must be between 4 and 5.', 'Plateid must be > 6500'])],
ids=['norelease', 'badplate', 'shortplate'])
def test_plate_failures(self, galaxy, page, params, plateid, missing, errmsg):
params.update({'plateid': plateid})
if plateid is None:
page.route_no_valid_params(page.url.format(plateid=galaxy.plate), missing, reqtype='post', errmsg=errmsg)
else:
url = page.url.format(**params)
page.route_no_valid_params(url, missing, reqtype='post', params=params, errmsg=errmsg)
|
sdss/marvin
|
tests/api/test_plate.py
|
Python
|
bsd-3-clause
| 3,385
|
[
"Brian",
"Galaxy"
] |
03183b7ce6b852cf8f52977226d3f097015d3a5032bf6c5de8f95afc95b19d89
|
"""
Encoding and decoding for dirac, Ids:
i -> int
I -> long
f -> float
b -> bool
s -> string
z -> datetime
n -> none
l -> list
t -> tuple
d -> dictionary
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
__RCSID__ = "$Id$"
from past.builtins import long
import six
import datetime
import os
import functools
import inspect
import traceback
from collections import defaultdict
from pprint import pprint
def _ord(char):
""" Convert a single character string to it's byte value
In Python 2 a single byte is represented as a string whereas in Python 3
it is an integer. This function converts it as appropriate.
"""
if six.PY2:
return char
else:
return ord(char)
# This is a hack for Python 3 to make it possible to import DEncode
# There is not point in porting DEncode to Python 3 as it will be removed as
# part of the HTTPS transition.
class types(object):
IntType = int
LongType = long if six.PY2 else int
FloatType = float
BooleanType = bool
StringType = str
UnicodeType = type(u"")
NoneType = type(None)
ListType = list
TupleType = tuple
DictType = dict
# Setting this environment variable to any value will enable the dump of the debugging
# call stack
DIRAC_DEBUG_DENCODE_CALLSTACK = bool(os.environ.get('DIRAC_DEBUG_DENCODE_CALLSTACK', False))
# This global dictionary contains
# {<method name> : set (<class names)}
# (a method name can be reused in other classes)
# DO NOT EDIT BY HAND, use ignoreEncodeWarning decorator
DENCODE_WARNING_IGNORED_METHODS = defaultdict(set)
# Depth of the stack to look for with inspect
CONTEXT_DEPTH = 100
def ignoreEncodeWarning(meth):
""" Decorator to put around method that should not anymore throw warnings
:warning: do not use around functions
:warning: for a class method, put it after the @classmethod decorator
:param meth: decorated method
"""
@functools.wraps(meth)
def inner(*args, **kwargs):
""" Add the method and the class name to the DENCODE_WARNING_IGNORED_METHODS dict """
# The first parameter in args is "self"
# Find out the class Name
objInst = args[0]
className = objInst.__class__.__name__
if className == 'type': # This happens for class method
className = objInst.__name__
# if the decorated method is an exported method, just remove the 'export_' bit
methName = meth.__name__.replace('export_', '')
# Add the method name and the object name to the dictionary
DENCODE_WARNING_IGNORED_METHODS[methName].add(className)
return meth(*args, **kwargs)
return inner
def printDebugCallstack(headerMessage):
""" Prints information about the current stack as well as the caller parameters.
The purpose of this method is to track down all the places in DIRAC that might
not survive the change to JSON encoding.
Some methods are ignored:
* all the AccountingDB method: https://github.com/DIRACGrid/DIRAC/issues/4319
* all the method in DENCODE_WARNING_IGNORED_METHODS (see ignoreEncodeWarning)
:param headerMessage: message to be displayed first
:returns: None
"""
def stripArgs(frame):
""" Keeps only the parameters and their values from a frame
:param frame: frame object
:returns: dict {param name: value}
"""
# Get all the arguments of the call
allArgs = inspect.getargvalues(frame)
# Keep only the arguments that are parameters of the call, as well as their value
return dict([(argName, allArgs.locals[argName]) for argName in allArgs.args])
tb = traceback.format_stack()
frames = inspect.stack(context=CONTEXT_DEPTH)
# Flag set to true only if we figure it's an RPC call
# In that case, we display more info
isRPCCall = False
# For each entry in the stack, check if the method name is in the list of method to be ignored
for frameRecord in reversed(frames):
frameFuncName = frameRecord[3]
# If the method is in the list of ignored method,
# check that the method is from the good class
if frameFuncName in DENCODE_WARNING_IGNORED_METHODS:
try:
# Take the frame object https://docs.python.org/2.7/reference/datamodel.html
frameObj = frameRecord[0]
# Check that the self attribute of the function points to a class which is listed
# as to be ignored
className = frameObj.f_locals['self'].__class__.__name__
# if that is the case, then we return
if className in DENCODE_WARNING_IGNORED_METHODS[frameFuncName]:
return
# Exception may be thrown when trying to get the className
except (KeyError, AttributeError):
pass
# Else, if we are answering an RPC call
elif frameFuncName == '_executeAction':
# This requires special handling because the only way to know
# which method was called server side is to check at the proposalTuple
frameObj = frameRecord[0]
# The _executeAction method takes as parameter the handlerObj and the proposalTuple
# Extract the method name from the proposalTuple
funcName = frameObj.f_locals['proposalTuple'][1][1]
# Extract the class name from the handlerObj
className = frameObj.f_locals['handlerObj'].__class__.__name__
if funcName in DENCODE_WARNING_IGNORED_METHODS and className in DENCODE_WARNING_IGNORED_METHODS[funcName]:
return
else:
# If it is not to be ignored, save the parameters to display them
isRPCCall = True
rpcDetails = "RPC call service %s method %s" % (className, funcName)
break
# The datetime are encoded as tuple. Since datetime are taken care of
# in JSerializer, just don't print a warning here
# Note: -3 because we have to go past (de/encodeTuple and the Traceback module)
if 'encodeDateTime' in tb[-3] or 'decodeDateTime' in tb[-3]:
return
# The accountingDB stores a encoding of the bucketsLength
# this is ok for now, so silent all the AccountingDB error
if any(['AccountingDB' in tr for tr in reversed(tb)]):
return
print('=' * 45, headerMessage, '=' * 45)
# print the traceback that leads us here
# remove the last element which is the traceback module call
for line in tb[:-1]:
print(line)
# Now we try to navigate up to the caller of dEncode.
# For this, we find the frame in which we enter dEncode.
# We keep the parameters to display it.
# Then we navigate to the parent frame, and we display the file
# and line number where this call was done
try:
framesIter = iter(frames)
for frame in framesIter:
# First check that we are using either 'encode' or 'decode' function
if frame[3] in ('encode', 'decode'):
# Then check it is the good file
if frame[1].endswith('DIRAC/Core/Utilities/DEncode.py'):
# Keep the arguments of the DEncode call
dencArgs = stripArgs(frame[0])
# Take the calling frame
frame = next(framesIter)
print("Calling frame: %s" % (frame[1:3],))
if isRPCCall:
print(rpcDetails)
print("With arguments ", end=' ')
pprint(dencArgs)
break
except Exception:
pass
print("=" * 100)
print()
print()
_dateTimeObject = datetime.datetime.utcnow()
_dateTimeType = type(_dateTimeObject)
_dateType = type(_dateTimeObject.date())
_timeType = type(_dateTimeObject.time())
g_dEncodeFunctions = {}
g_dDecodeFunctions = {}
def encodeInt(iValue, eList):
"""Encoding ints """
eList.extend((b"i", str(iValue).encode(), b"e"))
def decodeInt(data, i):
"""Decoding ints """
i += 1
end = data.index(b'e', i)
value = int(data[i:end])
return (value, end + 1)
g_dEncodeFunctions[types.IntType] = encodeInt
g_dDecodeFunctions[_ord("i")] = decodeInt
def encodeLong(iValue, eList):
""" Encoding longs """
# corrected by KGG eList.extend( ( "l", str( iValue ), "e" ) )
eList.extend((b"I", str(iValue).encode(), b"e"))
def decodeLong(data, i):
""" Decoding longs """
i += 1
end = data.index(_ord('e'), i)
value = long(data[i:end])
return (value, end + 1)
if not six.PY3:
g_dEncodeFunctions[types.LongType] = encodeLong
g_dDecodeFunctions[_ord("I")] = decodeLong
def encodeFloat(iValue, eList):
""" Encoding floats """
eList.extend((b"f", str(iValue).encode(), b"e"))
def decodeFloat(data, i):
""" Decoding floats """
i += 1
end = data.index(b'e', i)
if end + 1 < len(data) and data[end + 1] in (_ord('+'), _ord('-')):
eI = end
end = data.index(b'e', end + 1)
value = float(data[i:eI].decode()) * 10 ** int(data[eI + 1:end].decode())
else:
value = float(data[i:end].decode())
return (value, end + 1)
g_dEncodeFunctions[types.FloatType] = encodeFloat
g_dDecodeFunctions[_ord("f")] = decodeFloat
def encodeBool(bValue, eList):
""" Encoding booleans """
if bValue:
eList.append(b"b1")
else:
eList.append(b"b0")
def decodeBool(data, i):
""" Decoding booleans """
if data[i + 1] == _ord("0"):
return (False, i + 2)
else:
return (True, i + 2)
g_dEncodeFunctions[types.BooleanType] = encodeBool
g_dDecodeFunctions[_ord("b")] = decodeBool
def encodeString(sValue, eList):
""" Encoding strings """
if six.PY3 and not isinstance(sValue, bytes):
sValue = sValue.encode()
eList.extend((b's', str(len(sValue)).encode(), b':', sValue))
def decodeString(data, i):
""" Decoding strings """
i += 1
colon = data.index(b":", i)
value = int(data[i: colon].decode())
colon += 1
end = colon + value
retVal = data[colon: end]
if six.PY3:
retVal = retVal.decode(errors="surrogateescape")
return (retVal, end)
g_dEncodeFunctions[types.StringType] = encodeString
g_dEncodeFunctions[bytes] = encodeString
g_dDecodeFunctions[_ord("s")] = decodeString
def encodeUnicode(sValue, eList):
""" Encoding unicode strings """
valueStr = sValue.encode('utf-8')
eList.extend((b'u', str(len(valueStr)).encode(), b':', valueStr))
def decodeUnicode(data, i):
""" Decoding unicode strings """
i += 1
colon = data.index(b":", i)
value = int(data[i: colon])
colon += 1
end = colon + value
return (six.text_type(data[colon: end].decode('utf-8')), end)
if six.PY2:
g_dEncodeFunctions[types.UnicodeType] = encodeUnicode
g_dDecodeFunctions[_ord("u")] = decodeUnicode
else:
g_dDecodeFunctions[_ord("u")] = decodeString
def encodeDateTime(oValue, eList):
""" Encoding datetime """
if isinstance(oValue, _dateTimeType):
tDateTime = (oValue.year, oValue.month, oValue.day,
oValue.hour, oValue.minute, oValue.second,
oValue.microsecond, oValue.tzinfo)
eList.append(b"za")
# corrected by KGG encode( tDateTime, eList )
g_dEncodeFunctions[type(tDateTime)](tDateTime, eList)
elif isinstance(oValue, _dateType):
tData = (oValue.year, oValue.month, oValue. day)
eList.append(b"zd")
# corrected by KGG encode( tData, eList )
g_dEncodeFunctions[type(tData)](tData, eList)
elif isinstance(oValue, _timeType):
tTime = (oValue.hour, oValue.minute, oValue.second, oValue.microsecond, oValue.tzinfo)
eList.append(b"zt")
# corrected by KGG encode( tTime, eList )
g_dEncodeFunctions[type(tTime)](tTime, eList)
else:
raise Exception("Unexpected type %s while encoding a datetime object" % str(type(oValue)))
def decodeDateTime(data, i):
""" Decoding datetime """
i += 1
dataType = data[i]
# corrected by KGG tupleObject, i = decode( data, i + 1 )
tupleObject, i = g_dDecodeFunctions[data[i + 1]](data, i + 1)
if dataType == _ord('a'):
dtObject = datetime.datetime(*tupleObject)
elif dataType == _ord('d'):
dtObject = datetime.date(*tupleObject)
elif dataType == _ord('t'):
dtObject = datetime.time(*tupleObject)
else:
raise Exception("Unexpected type %s while decoding a datetime object" % dataType)
return (dtObject, i)
g_dEncodeFunctions[_dateTimeType] = encodeDateTime
g_dEncodeFunctions[_dateType] = encodeDateTime
g_dEncodeFunctions[_timeType] = encodeDateTime
g_dDecodeFunctions[_ord("z")] = decodeDateTime
def encodeNone(_oValue, eList):
""" Encoding None """
eList.append(b"n")
def decodeNone(_data, i):
""" Decoding None """
return (None, i + 1)
g_dEncodeFunctions[types.NoneType] = encodeNone
g_dDecodeFunctions[_ord("n")] = decodeNone
def encodeList(lValue, eList):
""" Encoding list """
eList.append(b"l")
for uObject in lValue:
g_dEncodeFunctions[type(uObject)](uObject, eList)
eList.append(b"e")
def decodeList(data, i):
""" Decoding list """
oL = []
i += 1
while data[i] != _ord("e"):
ob, i = g_dDecodeFunctions[data[i]](data, i)
oL.append(ob)
return(oL, i + 1)
g_dEncodeFunctions[types.ListType] = encodeList
g_dDecodeFunctions[_ord("l")] = decodeList
def encodeTuple(lValue, eList):
""" Encoding tuple """
if DIRAC_DEBUG_DENCODE_CALLSTACK:
printDebugCallstack('Encoding tuples')
eList.append(b"t")
for uObject in lValue:
g_dEncodeFunctions[type(uObject)](uObject, eList)
eList.append(b"e")
def decodeTuple(data, i):
""" Decoding tuple """
if DIRAC_DEBUG_DENCODE_CALLSTACK:
printDebugCallstack('Decoding tuples')
oL, i = decodeList(data, i)
return (tuple(oL), i)
g_dEncodeFunctions[types.TupleType] = encodeTuple
g_dDecodeFunctions[_ord("t")] = decodeTuple
def encodeDict(dValue, eList):
""" Encoding dictionary """
if DIRAC_DEBUG_DENCODE_CALLSTACK:
# If we have numbers as keys
if any([isinstance(x, six.integer_types + (float,)) for x in dValue]):
printDebugCallstack("Encoding dict with numeric keys")
eList.append(b"d")
for key in sorted(dValue):
g_dEncodeFunctions[type(key)](key, eList)
g_dEncodeFunctions[type(dValue[key])](dValue[key], eList)
eList.append(b"e")
def decodeDict(data, i):
""" Decoding dictionary """
oD = {}
i += 1
while data[i] != _ord("e"):
if DIRAC_DEBUG_DENCODE_CALLSTACK:
# If we have numbers as keys
if data[i] in (_ord('i'), _ord('I'), _ord('f')):
printDebugCallstack("Decoding dict with numeric keys")
k, i = g_dDecodeFunctions[data[i]](data, i)
oD[k], i = g_dDecodeFunctions[data[i]](data, i)
return (oD, i + 1)
g_dEncodeFunctions[types.DictType] = encodeDict
g_dDecodeFunctions[_ord("d")] = decodeDict
# Encode function
def encode(uObject):
""" Generic encoding function """
eList = []
# print("ENCODE FUNCTION : %s" % g_dEncodeFunctions[ type( uObject ) ])
g_dEncodeFunctions[type(uObject)](uObject, eList)
return b"".join(eList)
def decode(data):
""" Generic decoding function """
if not data:
return data
# print("DECODE FUNCTION : %s" % g_dDecodeFunctions[ sStream [ iIndex ] ])
if not isinstance(data, bytes):
raise NotImplementedError("This should never happen")
return g_dDecodeFunctions[data[0]](data, 0)
if __name__ == "__main__":
gObject = {2: "3", True: (3, None), 2.0 * 10 ** 20: 2.0 * 10 ** -10}
print("Initial: %s" % gObject)
gData = encode(gObject)
print("Encoded: %s" % gData)
print("Decoded: %s, [%s]" % decode(gData))
|
yujikato/DIRAC
|
src/DIRAC/Core/Utilities/DEncode.py
|
Python
|
gpl-3.0
| 15,157
|
[
"DIRAC"
] |
72a85d8051e6e7356b4bf26fe2e04512ed16f56a89b49e3daa7170125cdd9c8f
|
# -*- coding: utf-8 -*-
'''
*GSASIIstrMath - structure math routines*
-----------------------------------------
'''
########### SVN repository information ###################
# $Date: 2018-06-13 20:58:28 +0300 (Wed, 13 Jun 2018) $
# $Author: vondreele $
# $Revision: 3433 $
# $URL: https://subversion.xray.aps.anl.gov/pyGSAS/trunk/GSASIIstrMath.py $
# $Id: GSASIIstrMath.py 3433 2018-06-13 17:58:28Z vondreele $
########### SVN repository information ###################
from __future__ import division, print_function
import time
import copy
import numpy as np
import numpy.ma as ma
import numpy.linalg as nl
import scipy.stats as st
import multiprocessing as mp
import GSASIIpath
GSASIIpath.SetVersionNumber("$Revision: 3433 $")
import GSASIIElem as G2el
import GSASIIlattice as G2lat
import GSASIIspc as G2spc
import GSASIIpwd as G2pwd
import GSASIImapvars as G2mv
import GSASIImath as G2mth
# </ Anton Gagin
import config_example
# Anton Gagin />
import GSASIIobj as G2obj
import GSASIImpsubs as G2mp
#G2mp.InitMP(False) # This disables multiprocessing
sind = lambda x: np.sin(x*np.pi/180.)
cosd = lambda x: np.cos(x*np.pi/180.)
tand = lambda x: np.tan(x*np.pi/180.)
asind = lambda x: 180.*np.arcsin(x)/np.pi
acosd = lambda x: 180.*np.arccos(x)/np.pi
atan2d = lambda y,x: 180.*np.arctan2(y,x)/np.pi
ateln2 = 8.0*np.log(2.0)
twopi = 2.0*np.pi
twopisq = 2.0*np.pi**2
nxs = np.newaxis
################################################################################
##### Rigid Body Models
################################################################################
def ApplyRBModels(parmDict,Phases,rigidbodyDict,Update=False):
''' Takes RB info from RBModels in Phase and RB data in rigidbodyDict along with
current RB values in parmDict & modifies atom contents (xyz & Uij) of parmDict
'''
atxIds = ['Ax:','Ay:','Az:']
atuIds = ['AU11:','AU22:','AU33:','AU12:','AU13:','AU23:']
RBIds = rigidbodyDict.get('RBIds',{'Vector':[],'Residue':[]}) #these are lists of rbIds
if not RBIds['Vector'] and not RBIds['Residue']:
return
VRBIds = RBIds['Vector']
RRBIds = RBIds['Residue']
if Update:
RBData = rigidbodyDict
else:
RBData = copy.deepcopy(rigidbodyDict) # don't mess with original!
if RBIds['Vector']: # first update the vector magnitudes
VRBData = RBData['Vector']
for i,rbId in enumerate(VRBIds):
if VRBData[rbId]['useCount']:
for j in range(len(VRBData[rbId]['VectMag'])):
name = '::RBV;'+str(j)+':'+str(i)
VRBData[rbId]['VectMag'][j] = parmDict[name]
for phase in Phases:
Phase = Phases[phase]
General = Phase['General']
cx,ct,cs,cia = General['AtomPtrs']
cell = General['Cell'][1:7]
Amat,Bmat = G2lat.cell2AB(cell)
AtLookup = G2mth.FillAtomLookUp(Phase['Atoms'],cia+8)
pfx = str(Phase['pId'])+'::'
if Update:
RBModels = Phase['RBModels']
else:
RBModels = copy.deepcopy(Phase['RBModels']) # again don't mess with original!
for irb,RBObj in enumerate(RBModels.get('Vector',[])):
jrb = VRBIds.index(RBObj['RBId'])
rbsx = str(irb)+':'+str(jrb)
for i,px in enumerate(['RBVPx:','RBVPy:','RBVPz:']):
RBObj['Orig'][0][i] = parmDict[pfx+px+rbsx]
for i,po in enumerate(['RBVOa:','RBVOi:','RBVOj:','RBVOk:']):
RBObj['Orient'][0][i] = parmDict[pfx+po+rbsx]
RBObj['Orient'][0] = G2mth.normQ(RBObj['Orient'][0])
TLS = RBObj['ThermalMotion']
if 'T' in TLS[0]:
for i,pt in enumerate(['RBVT11:','RBVT22:','RBVT33:','RBVT12:','RBVT13:','RBVT23:']):
TLS[1][i] = parmDict[pfx+pt+rbsx]
if 'L' in TLS[0]:
for i,pt in enumerate(['RBVL11:','RBVL22:','RBVL33:','RBVL12:','RBVL13:','RBVL23:']):
TLS[1][i+6] = parmDict[pfx+pt+rbsx]
if 'S' in TLS[0]:
for i,pt in enumerate(['RBVS12:','RBVS13:','RBVS21:','RBVS23:','RBVS31:','RBVS32:','RBVSAA:','RBVSBB:']):
TLS[1][i+12] = parmDict[pfx+pt+rbsx]
if 'U' in TLS[0]:
TLS[1][0] = parmDict[pfx+'RBVU:'+rbsx]
XYZ,Cart = G2mth.UpdateRBXYZ(Bmat,RBObj,RBData,'Vector')
UIJ = G2mth.UpdateRBUIJ(Bmat,Cart,RBObj)
for i,x in enumerate(XYZ):
atId = RBObj['Ids'][i]
for j in [0,1,2]:
parmDict[pfx+atxIds[j]+str(AtLookup[atId])] = x[j]
if UIJ[i][0] == 'A':
for j in range(6):
parmDict[pfx+atuIds[j]+str(AtLookup[atId])] = UIJ[i][j+2]
elif UIJ[i][0] == 'I':
parmDict[pfx+'AUiso:'+str(AtLookup[atId])] = UIJ[i][1]
for irb,RBObj in enumerate(RBModels.get('Residue',[])):
jrb = RRBIds.index(RBObj['RBId'])
rbsx = str(irb)+':'+str(jrb)
for i,px in enumerate(['RBRPx:','RBRPy:','RBRPz:']):
RBObj['Orig'][0][i] = parmDict[pfx+px+rbsx]
for i,po in enumerate(['RBROa:','RBROi:','RBROj:','RBROk:']):
RBObj['Orient'][0][i] = parmDict[pfx+po+rbsx]
RBObj['Orient'][0] = G2mth.normQ(RBObj['Orient'][0])
TLS = RBObj['ThermalMotion']
if 'T' in TLS[0]:
for i,pt in enumerate(['RBRT11:','RBRT22:','RBRT33:','RBRT12:','RBRT13:','RBRT23:']):
RBObj['ThermalMotion'][1][i] = parmDict[pfx+pt+rbsx]
if 'L' in TLS[0]:
for i,pt in enumerate(['RBRL11:','RBRL22:','RBRL33:','RBRL12:','RBRL13:','RBRL23:']):
RBObj['ThermalMotion'][1][i+6] = parmDict[pfx+pt+rbsx]
if 'S' in TLS[0]:
for i,pt in enumerate(['RBRS12:','RBRS13:','RBRS21:','RBRS23:','RBRS31:','RBRS32:','RBRSAA:','RBRSBB:']):
RBObj['ThermalMotion'][1][i+12] = parmDict[pfx+pt+rbsx]
if 'U' in TLS[0]:
RBObj['ThermalMotion'][1][0] = parmDict[pfx+'RBRU:'+rbsx]
for itors,tors in enumerate(RBObj['Torsions']):
tors[0] = parmDict[pfx+'RBRTr;'+str(itors)+':'+rbsx]
XYZ,Cart = G2mth.UpdateRBXYZ(Bmat,RBObj,RBData,'Residue')
UIJ = G2mth.UpdateRBUIJ(Bmat,Cart,RBObj)
for i,x in enumerate(XYZ):
atId = RBObj['Ids'][i]
for j in [0,1,2]:
parmDict[pfx+atxIds[j]+str(AtLookup[atId])] = x[j]
if UIJ[i][0] == 'A':
for j in range(6):
parmDict[pfx+atuIds[j]+str(AtLookup[atId])] = UIJ[i][j+2]
elif UIJ[i][0] == 'I':
parmDict[pfx+'AUiso:'+str(AtLookup[atId])] = UIJ[i][1]
def ApplyRBModelDervs(dFdvDict,parmDict,rigidbodyDict,Phase):
'Needs a doc string'
atxIds = ['dAx:','dAy:','dAz:']
atuIds = ['AU11:','AU22:','AU33:','AU12:','AU13:','AU23:']
OIds = ['Oa:','Oi:','Oj:','Ok:']
RBIds = rigidbodyDict.get('RBIds',{'Vector':[],'Residue':[]}) #these are lists of rbIds
if not RBIds['Vector'] and not RBIds['Residue']:
return
VRBIds = RBIds['Vector']
RRBIds = RBIds['Residue']
RBData = rigidbodyDict
for item in parmDict:
if 'RB' in item:
dFdvDict[item] = 0. #NB: this is a vector which is no. refl. long & must be filled!
General = Phase['General']
cx,ct,cs,cia = General['AtomPtrs']
cell = General['Cell'][1:7]
Amat,Bmat = G2lat.cell2AB(cell)
rpd = np.pi/180.
rpd2 = rpd**2
g = nl.inv(np.inner(Bmat,Bmat))
gvec = np.sqrt(np.array([g[0][0]**2,g[1][1]**2,g[2][2]**2,
g[0][0]*g[1][1],g[0][0]*g[2][2],g[1][1]*g[2][2]]))
AtLookup = G2mth.FillAtomLookUp(Phase['Atoms'],cia+8)
pfx = str(Phase['pId'])+'::'
RBModels = Phase['RBModels']
for irb,RBObj in enumerate(RBModels.get('Vector',[])):
VModel = RBData['Vector'][RBObj['RBId']]
Q = RBObj['Orient'][0]
jrb = VRBIds.index(RBObj['RBId'])
rbsx = str(irb)+':'+str(jrb)
dXdv = []
for iv in range(len(VModel['VectMag'])):
dCdv = []
for vec in VModel['rbVect'][iv]:
dCdv.append(G2mth.prodQVQ(Q,vec))
dXdv.append(np.inner(Bmat,np.array(dCdv)).T)
XYZ,Cart = G2mth.UpdateRBXYZ(Bmat,RBObj,RBData,'Vector')
for ia,atId in enumerate(RBObj['Ids']):
atNum = AtLookup[atId]
dx = 0.00001
for iv in range(len(VModel['VectMag'])):
for ix in [0,1,2]:
dFdvDict['::RBV;'+str(iv)+':'+str(jrb)] += dXdv[iv][ia][ix]*dFdvDict[pfx+atxIds[ix]+str(atNum)]
for i,name in enumerate(['RBVPx:','RBVPy:','RBVPz:']):
dFdvDict[pfx+name+rbsx] += dFdvDict[pfx+atxIds[i]+str(atNum)]
for iv in range(4):
Q[iv] -= dx
XYZ1 = G2mth.RotateRBXYZ(Bmat,Cart,G2mth.normQ(Q))
Q[iv] += 2.*dx
XYZ2 = G2mth.RotateRBXYZ(Bmat,Cart,G2mth.normQ(Q))
Q[iv] -= dx
dXdO = (XYZ2[ia]-XYZ1[ia])/(2.*dx)
for ix in [0,1,2]:
dFdvDict[pfx+'RBV'+OIds[iv]+rbsx] += dXdO[ix]*dFdvDict[pfx+atxIds[ix]+str(atNum)]
X = G2mth.prodQVQ(Q,Cart[ia])
dFdu = np.array([dFdvDict[pfx+Uid+str(AtLookup[atId])] for Uid in atuIds]).T/gvec
dFdu = G2lat.U6toUij(dFdu.T)
dFdu = np.tensordot(Amat,np.tensordot(Amat,dFdu,([1,0])),([0,1]))
dFdu = G2lat.UijtoU6(dFdu)
atNum = AtLookup[atId]
if 'T' in RBObj['ThermalMotion'][0]:
for i,name in enumerate(['RBVT11:','RBVT22:','RBVT33:','RBVT12:','RBVT13:','RBVT23:']):
dFdvDict[pfx+name+rbsx] += dFdu[i]
if 'L' in RBObj['ThermalMotion'][0]:
dFdvDict[pfx+'RBVL11:'+rbsx] += rpd2*(dFdu[1]*X[2]**2+dFdu[2]*X[1]**2-dFdu[5]*X[1]*X[2])
dFdvDict[pfx+'RBVL22:'+rbsx] += rpd2*(dFdu[0]*X[2]**2+dFdu[2]*X[0]**2-dFdu[4]*X[0]*X[2])
dFdvDict[pfx+'RBVL33:'+rbsx] += rpd2*(dFdu[0]*X[1]**2+dFdu[1]*X[0]**2-dFdu[3]*X[0]*X[1])
dFdvDict[pfx+'RBVL12:'+rbsx] += rpd2*(-dFdu[3]*X[2]**2-2.*dFdu[2]*X[0]*X[1]+
dFdu[4]*X[1]*X[2]+dFdu[5]*X[0]*X[2])
dFdvDict[pfx+'RBVL13:'+rbsx] += rpd2*(-dFdu[4]*X[1]**2-2.*dFdu[1]*X[0]*X[2]+
dFdu[3]*X[1]*X[2]+dFdu[5]*X[0]*X[1])
dFdvDict[pfx+'RBVL23:'+rbsx] += rpd2*(-dFdu[5]*X[0]**2-2.*dFdu[0]*X[1]*X[2]+
dFdu[3]*X[0]*X[2]+dFdu[4]*X[0]*X[1])
if 'S' in RBObj['ThermalMotion'][0]:
dFdvDict[pfx+'RBVS12:'+rbsx] += rpd*(dFdu[5]*X[1]-2.*dFdu[1]*X[2])
dFdvDict[pfx+'RBVS13:'+rbsx] += rpd*(-dFdu[5]*X[2]+2.*dFdu[2]*X[1])
dFdvDict[pfx+'RBVS21:'+rbsx] += rpd*(-dFdu[4]*X[0]+2.*dFdu[0]*X[2])
dFdvDict[pfx+'RBVS23:'+rbsx] += rpd*(dFdu[4]*X[2]-2.*dFdu[2]*X[0])
dFdvDict[pfx+'RBVS31:'+rbsx] += rpd*(dFdu[3]*X[0]-2.*dFdu[0]*X[1])
dFdvDict[pfx+'RBVS32:'+rbsx] += rpd*(-dFdu[3]*X[1]+2.*dFdu[1]*X[0])
dFdvDict[pfx+'RBVSAA:'+rbsx] += rpd*(dFdu[4]*X[1]-dFdu[3]*X[2])
dFdvDict[pfx+'RBVSBB:'+rbsx] += rpd*(dFdu[5]*X[0]-dFdu[3]*X[2])
if 'U' in RBObj['ThermalMotion'][0]:
dFdvDict[pfx+'RBVU:'+rbsx] += dFdvDict[pfx+'AUiso:'+str(AtLookup[atId])]
for irb,RBObj in enumerate(RBModels.get('Residue',[])):
Q = RBObj['Orient'][0]
jrb = RRBIds.index(RBObj['RBId'])
torData = RBData['Residue'][RBObj['RBId']]['rbSeq']
rbsx = str(irb)+':'+str(jrb)
XYZ,Cart = G2mth.UpdateRBXYZ(Bmat,RBObj,RBData,'Residue')
for itors,tors in enumerate(RBObj['Torsions']): #derivative error?
tname = pfx+'RBRTr;'+str(itors)+':'+rbsx
orId,pvId = torData[itors][:2]
pivotVec = Cart[orId]-Cart[pvId]
QA = G2mth.AVdeg2Q(-0.001,pivotVec)
QB = G2mth.AVdeg2Q(0.001,pivotVec)
for ir in torData[itors][3]:
atNum = AtLookup[RBObj['Ids'][ir]]
rVec = Cart[ir]-Cart[pvId]
dR = G2mth.prodQVQ(QB,rVec)-G2mth.prodQVQ(QA,rVec)
dRdT = np.inner(Bmat,G2mth.prodQVQ(Q,dR))/.002
for ix in [0,1,2]:
dFdvDict[tname] += dRdT[ix]*dFdvDict[pfx+atxIds[ix]+str(atNum)]
for ia,atId in enumerate(RBObj['Ids']):
atNum = AtLookup[atId]
dx = 0.00001
for i,name in enumerate(['RBRPx:','RBRPy:','RBRPz:']):
dFdvDict[pfx+name+rbsx] += dFdvDict[pfx+atxIds[i]+str(atNum)]
for iv in range(4):
Q[iv] -= dx
XYZ1 = G2mth.RotateRBXYZ(Bmat,Cart,G2mth.normQ(Q))
Q[iv] += 2.*dx
XYZ2 = G2mth.RotateRBXYZ(Bmat,Cart,G2mth.normQ(Q))
Q[iv] -= dx
dXdO = (XYZ2[ia]-XYZ1[ia])/(2.*dx)
for ix in [0,1,2]:
dFdvDict[pfx+'RBR'+OIds[iv]+rbsx] += dXdO[ix]*dFdvDict[pfx+atxIds[ix]+str(atNum)]
X = G2mth.prodQVQ(Q,Cart[ia])
dFdu = np.array([dFdvDict[pfx+Uid+str(AtLookup[atId])] for Uid in atuIds]).T/gvec
dFdu = G2lat.U6toUij(dFdu.T)
dFdu = np.tensordot(Amat.T,np.tensordot(Amat,dFdu,([1,0])),([0,1]))
dFdu = G2lat.UijtoU6(dFdu)
atNum = AtLookup[atId]
if 'T' in RBObj['ThermalMotion'][0]:
for i,name in enumerate(['RBRT11:','RBRT22:','RBRT33:','RBRT12:','RBRT13:','RBRT23:']):
dFdvDict[pfx+name+rbsx] += dFdu[i]
if 'L' in RBObj['ThermalMotion'][0]:
dFdvDict[pfx+'RBRL11:'+rbsx] += rpd2*(dFdu[1]*X[2]**2+dFdu[2]*X[1]**2-dFdu[5]*X[1]*X[2])
dFdvDict[pfx+'RBRL22:'+rbsx] += rpd2*(dFdu[0]*X[2]**2+dFdu[2]*X[0]**2-dFdu[4]*X[0]*X[2])
dFdvDict[pfx+'RBRL33:'+rbsx] += rpd2*(dFdu[0]*X[1]**2+dFdu[1]*X[0]**2-dFdu[3]*X[0]*X[1])
dFdvDict[pfx+'RBRL12:'+rbsx] += rpd2*(-dFdu[3]*X[2]**2-2.*dFdu[2]*X[0]*X[1]+
dFdu[4]*X[1]*X[2]+dFdu[5]*X[0]*X[2])
dFdvDict[pfx+'RBRL13:'+rbsx] += rpd2*(dFdu[4]*X[1]**2-2.*dFdu[1]*X[0]*X[2]+
dFdu[3]*X[1]*X[2]+dFdu[5]*X[0]*X[1])
dFdvDict[pfx+'RBRL23:'+rbsx] += rpd2*(dFdu[5]*X[0]**2-2.*dFdu[0]*X[1]*X[2]+
dFdu[3]*X[0]*X[2]+dFdu[4]*X[0]*X[1])
if 'S' in RBObj['ThermalMotion'][0]:
dFdvDict[pfx+'RBRS12:'+rbsx] += rpd*(dFdu[5]*X[1]-2.*dFdu[1]*X[2])
dFdvDict[pfx+'RBRS13:'+rbsx] += rpd*(-dFdu[5]*X[2]+2.*dFdu[2]*X[1])
dFdvDict[pfx+'RBRS21:'+rbsx] += rpd*(-dFdu[4]*X[0]+2.*dFdu[0]*X[2])
dFdvDict[pfx+'RBRS23:'+rbsx] += rpd*(dFdu[4]*X[2]-2.*dFdu[2]*X[0])
dFdvDict[pfx+'RBRS31:'+rbsx] += rpd*(dFdu[3]*X[0]-2.*dFdu[0]*X[1])
dFdvDict[pfx+'RBRS32:'+rbsx] += rpd*(-dFdu[3]*X[1]+2.*dFdu[1]*X[0])
dFdvDict[pfx+'RBRSAA:'+rbsx] += rpd*(dFdu[4]*X[1]-dFdu[3]*X[2])
dFdvDict[pfx+'RBRSBB:'+rbsx] += rpd*(dFdu[5]*X[0]-dFdu[3]*X[2])
if 'U' in RBObj['ThermalMotion'][0]:
dFdvDict[pfx+'RBRU:'+rbsx] += dFdvDict[pfx+'AUiso:'+str(AtLookup[atId])]
################################################################################
##### Penalty & restraint functions
################################################################################
def penaltyFxn(HistoPhases,calcControls,parmDict,varyList):
'Needs a doc string'
Histograms,Phases,restraintDict,rigidbodyDict = HistoPhases
pNames = []
pVals = []
pWt = []
negWt = {}
pWsum = {}
pWnum = {}
for phase in Phases:
pId = Phases[phase]['pId']
negWt[pId] = Phases[phase]['General']['Pawley neg wt']
General = Phases[phase]['General']
cx,ct,cs,cia = General['AtomPtrs']
textureData = General['SH Texture']
SGData = General['SGData']
Atoms = Phases[phase]['Atoms']
AtLookup = G2mth.FillAtomLookUp(Phases[phase]['Atoms'],cia+8)
cell = General['Cell'][1:7]
Amat,Bmat = G2lat.cell2AB(cell)
if phase not in restraintDict:
continue
phaseRest = restraintDict[phase]
names = [['Bond','Bonds'],['Angle','Angles'],['Plane','Planes'],
['Chiral','Volumes'],['Torsion','Torsions'],['Rama','Ramas'],
['ChemComp','Sites'],['Texture','HKLs'],]
for name,rest in names:
pWsum[name] = 0.
pWnum[name] = 0
if name not in phaseRest:
continue
itemRest = phaseRest[name]
if itemRest[rest] and itemRest['Use']:
wt = itemRest['wtFactor']
if name in ['Bond','Angle','Plane','Chiral']:
for i,[indx,ops,obs,esd] in enumerate(itemRest[rest]):
pNames.append(str(pId)+':'+name+':'+str(i))
XYZ = np.array(G2mth.GetAtomCoordsByID(pId,parmDict,AtLookup,indx))
XYZ = G2mth.getSyXYZ(XYZ,ops,SGData)
if name == 'Bond':
calc = G2mth.getRestDist(XYZ,Amat)
elif name == 'Angle':
calc = G2mth.getRestAngle(XYZ,Amat)
elif name == 'Plane':
calc = G2mth.getRestPlane(XYZ,Amat)
elif name == 'Chiral':
calc = G2mth.getRestChiral(XYZ,Amat)
pVals.append(obs-calc)
pWt.append(wt/esd**2)
pWsum[name] += wt*((obs-calc)/esd)**2
pWnum[name] += 1
elif name in ['Torsion','Rama']:
coeffDict = itemRest['Coeff']
for i,[indx,ops,cofName,esd] in enumerate(itemRest[rest]):
pNames.append(str(pId)+':'+name+':'+str(i))
XYZ = np.array(G2mth.GetAtomCoordsByID(pId,parmDict,AtLookup,indx))
XYZ = G2mth.getSyXYZ(XYZ,ops,SGData)
if name == 'Torsion':
tor = G2mth.getRestTorsion(XYZ,Amat)
restr,calc = G2mth.calcTorsionEnergy(tor,coeffDict[cofName])
else:
phi,psi = G2mth.getRestRama(XYZ,Amat)
restr,calc = G2mth.calcRamaEnergy(phi,psi,coeffDict[cofName])
pVals.append(restr)
pWt.append(wt/esd**2)
pWsum[name] += wt*(restr/esd)**2
pWnum[name] += 1
elif name == 'ChemComp':
for i,[indx,factors,obs,esd] in enumerate(itemRest[rest]):
pNames.append(str(pId)+':'+name+':'+str(i))
mul = np.array(G2mth.GetAtomItemsById(Atoms,AtLookup,indx,cs+1))
frac = np.array(G2mth.GetAtomFracByID(pId,parmDict,AtLookup,indx))
calc = np.sum(mul*frac*factors)
pVals.append(obs-calc)
pWt.append(wt/esd**2)
pWsum[name] += wt*((obs-calc)/esd)**2
pWnum[name] += 1
elif name == 'Texture':
SHkeys = list(textureData['SH Coeff'][1].keys())
SHCoef = G2mth.GetSHCoeff(pId,parmDict,SHkeys)
shModels = ['cylindrical','none','shear - 2/m','rolling - mmm']
SamSym = dict(zip(shModels,['0','-1','2/m','mmm']))
for i,[hkl,grid,esd1,ifesd2,esd2] in enumerate(itemRest[rest]):
PH = np.array(hkl)
phi,beta = G2lat.CrsAng(np.array(hkl),cell,SGData)
ODFln = G2lat.Flnh(False,SHCoef,phi,beta,SGData)
R,P,Z = G2mth.getRestPolefig(ODFln,SamSym[textureData['Model']],grid)
Z1 = ma.masked_greater(Z,0.0) #is this + or -?
IndZ1 = np.array(ma.nonzero(Z1))
for ind in IndZ1.T:
pNames.append('%d:%s:%d:%.2f:%.2f'%(pId,name,i,R[ind[0],ind[1]],P[ind[0],ind[1]]))
pVals.append(Z1[ind[0]][ind[1]])
pWt.append(wt/esd1**2)
pWsum[name] += wt*(-Z1[ind[0]][ind[1]]/esd1)**2
pWnum[name] += 1
if ifesd2:
Z2 = 1.-Z
for ind in np.ndindex(grid,grid):
pNames.append('%d:%s:%d:%.2f:%.2f'%(pId,name+'-unit',i,R[ind[0],ind[1]],P[ind[0],ind[1]]))
pVals.append(Z2[ind[0]][ind[1]])
pWt.append(wt/esd2**2)
pWsum[name] += wt*(Z2/esd2)**2
pWnum[name] += 1
for phase in Phases:
name = 'SH-Pref.Ori.'
pId = Phases[phase]['pId']
General = Phases[phase]['General']
SGData = General['SGData']
cell = General['Cell'][1:7]
pWsum[name] = 0.0
pWnum[name] = 0
for hist in Phases[phase]['Histograms']:
if not Phases[phase]['Histograms'][hist]['Use']:
continue
if hist in Histograms and 'PWDR' in hist:
hId = Histograms[hist]['hId']
phfx = '%d:%d:'%(pId,hId)
if calcControls[phfx+'poType'] == 'SH':
toler = calcControls[phfx+'SHtoler']
wt = 1./toler**2
HKLs = np.array(calcControls[phfx+'SHhkl'])
SHnames = calcControls[phfx+'SHnames']
SHcof = dict(zip(SHnames,[parmDict[phfx+cof] for cof in SHnames]))
for i,PH in enumerate(HKLs):
phi,beta = G2lat.CrsAng(PH,cell,SGData)
SH3Coef = {}
for item in SHcof:
L,N = eval(item.strip('C'))
SH3Coef['C%d,0,%d'%(L,N)] = SHcof[item]
ODFln = G2lat.Flnh(False,SH3Coef,phi,beta,SGData)
X = np.linspace(0,90.0,26)
Y = ma.masked_greater(G2lat.polfcal(ODFln,'0',X,0.0),0.0) #+ or -?
IndY = ma.nonzero(Y)
for ind in IndY[0]:
pNames.append('%d:%d:%s:%d:%.2f'%(pId,hId,name,i,X[ind]))
pVals.append(Y[ind])
pWt.append(wt)
pWsum[name] += wt*(Y[ind])**2
pWnum[name] += 1
pWsum['PWLref'] = 0.
pWnum['PWLref'] = 0
for item in varyList:
if 'PWLref' in item and parmDict[item] < 0.:
pId = int(item.split(':')[0])
if negWt[pId]:
pNames.append(item)
pVals.append(parmDict[item])
pWt.append(negWt[pId])
pWsum['PWLref'] += negWt[pId]*(parmDict[item])**2
pWnum['PWLref'] += 1
pVals = np.array(pVals)
pWt = np.array(pWt) #should this be np.sqrt?
return pNames,pVals,pWt,pWsum,pWnum
def penaltyDeriv(pNames,pVal,HistoPhases,calcControls,parmDict,varyList):
'Needs a doc string'
Histograms,Phases,restraintDict,rigidbodyDict = HistoPhases
pDerv = np.zeros((len(varyList),len(pVal)))
for phase in Phases:
# if phase not in restraintDict:
# continue
pId = Phases[phase]['pId']
General = Phases[phase]['General']
cx,ct,cs,cia = General['AtomPtrs']
SGData = General['SGData']
Atoms = Phases[phase]['Atoms']
AtLookup = G2mth.FillAtomLookUp(Phases[phase]['Atoms'],cia+8)
cell = General['Cell'][1:7]
Amat,Bmat = G2lat.cell2AB(cell)
textureData = General['SH Texture']
SHkeys = list(textureData['SH Coeff'][1].keys())
SHCoef = G2mth.GetSHCoeff(pId,parmDict,SHkeys)
shModels = ['cylindrical','none','shear - 2/m','rolling - mmm']
SamSym = dict(zip(shModels,['0','-1','2/m','mmm']))
sam = SamSym[textureData['Model']]
phaseRest = restraintDict.get(phase,{})
names = {'Bond':'Bonds','Angle':'Angles','Plane':'Planes',
'Chiral':'Volumes','Torsion':'Torsions','Rama':'Ramas',
'ChemComp':'Sites','Texture':'HKLs'}
lasthkl = np.array([0,0,0])
for ip,pName in enumerate(pNames):
pnames = pName.split(':')
if pId == int(pnames[0]):
name = pnames[1]
if 'PWL' in pName:
pDerv[varyList.index(pName)][ip] += 1.
continue
elif 'SH-' in pName:
continue
id = int(pnames[2])
itemRest = phaseRest[name]
if name in ['Bond','Angle','Plane','Chiral']:
indx,ops,obs,esd = itemRest[names[name]][id]
dNames = []
for ind in indx:
dNames += [str(pId)+'::dA'+Xname+':'+str(AtLookup[ind]) for Xname in ['x','y','z']]
XYZ = np.array(G2mth.GetAtomCoordsByID(pId,parmDict,AtLookup,indx))
if name == 'Bond':
deriv = G2mth.getRestDeriv(G2mth.getRestDist,XYZ,Amat,ops,SGData)
elif name == 'Angle':
deriv = G2mth.getRestDeriv(G2mth.getRestAngle,XYZ,Amat,ops,SGData)
elif name == 'Plane':
deriv = G2mth.getRestDeriv(G2mth.getRestPlane,XYZ,Amat,ops,SGData)
elif name == 'Chiral':
deriv = G2mth.getRestDeriv(G2mth.getRestChiral,XYZ,Amat,ops,SGData)
elif name in ['Torsion','Rama']:
coffDict = itemRest['Coeff']
indx,ops,cofName,esd = itemRest[names[name]][id]
dNames = []
for ind in indx:
dNames += [str(pId)+'::dA'+Xname+':'+str(AtLookup[ind]) for Xname in ['x','y','z']]
XYZ = np.array(G2mth.GetAtomCoordsByID(pId,parmDict,AtLookup,indx))
if name == 'Torsion':
deriv = G2mth.getTorsionDeriv(XYZ,Amat,coffDict[cofName])
else:
deriv = G2mth.getRamaDeriv(XYZ,Amat,coffDict[cofName])
elif name == 'ChemComp':
indx,factors,obs,esd = itemRest[names[name]][id]
dNames = []
for ind in indx:
dNames += [str(pId)+'::Afrac:'+str(AtLookup[ind])]
mul = np.array(G2mth.GetAtomItemsById(Atoms,AtLookup,indx,cs+1))
deriv = mul*factors
elif 'Texture' in name:
deriv = []
dNames = []
hkl,grid,esd1,ifesd2,esd2 = itemRest[names[name]][id]
hkl = np.array(hkl)
if np.any(lasthkl-hkl):
phi,beta = G2lat.CrsAng(np.array(hkl),cell,SGData)
ODFln = G2lat.Flnh(False,SHCoef,phi,beta,SGData)
lasthkl = copy.copy(hkl)
if 'unit' in name:
pass
else:
gam = float(pnames[3])
psi = float(pnames[4])
for SHname in ODFln:
l,m,n = eval(SHname[1:])
Ksl = G2lat.GetKsl(l,m,sam,psi,gam)[0]
dNames += [str(pId)+'::'+SHname]
deriv.append(-ODFln[SHname][0]*Ksl/SHCoef[SHname])
for dName,drv in zip(dNames,deriv):
try:
ind = varyList.index(dName)
pDerv[ind][ip] += drv
except ValueError:
pass
lasthkl = np.array([0,0,0])
for ip,pName in enumerate(pNames):
deriv = []
dNames = []
pnames = pName.split(':')
if 'SH-' in pName and pId == int(pnames[0]):
hId = int(pnames[1])
phfx = '%d:%d:'%(pId,hId)
psi = float(pnames[4])
HKLs = calcControls[phfx+'SHhkl']
SHnames = calcControls[phfx+'SHnames']
SHcof = dict(zip(SHnames,[parmDict[phfx+cof] for cof in SHnames]))
hkl = np.array(HKLs[int(pnames[3])])
if np.any(lasthkl-hkl):
phi,beta = G2lat.CrsAng(np.array(hkl),cell,SGData)
SH3Coef = {}
for item in SHcof:
L,N = eval(item.strip('C'))
SH3Coef['C%d,0,%d'%(L,N)] = SHcof[item]
ODFln = G2lat.Flnh(False,SH3Coef,phi,beta,SGData)
lasthkl = copy.copy(hkl)
for SHname in SHnames:
l,n = eval(SHname[1:])
SH3name = 'C%d,0,%d'%(l,n)
Ksl = G2lat.GetKsl(l,0,'0',psi,0.0)[0]
dNames += [phfx+SHname]
deriv.append(ODFln[SH3name][0]*Ksl/SHcof[SHname])
for dName,drv in zip(dNames,deriv):
try:
ind = varyList.index(dName)
pDerv[ind][ip] += drv
except ValueError:
pass
return pDerv
################################################################################
##### Function & derivative calculations
################################################################################
def GetAtomFXU(pfx,calcControls,parmDict):
'Needs a doc string'
Natoms = calcControls['Natoms'][pfx]
Tdata = Natoms*[' ',]
Mdata = np.zeros(Natoms)
IAdata = Natoms*[' ',]
Fdata = np.zeros(Natoms)
Xdata = np.zeros((3,Natoms))
dXdata = np.zeros((3,Natoms))
Uisodata = np.zeros(Natoms)
Uijdata = np.zeros((6,Natoms))
Gdata = np.zeros((3,Natoms))
keys = {'Atype:':Tdata,'Amul:':Mdata,'Afrac:':Fdata,'AI/A:':IAdata,
'dAx:':dXdata[0],'dAy:':dXdata[1],'dAz:':dXdata[2],
'Ax:':Xdata[0],'Ay:':Xdata[1],'Az:':Xdata[2],'AUiso:':Uisodata,
'AU11:':Uijdata[0],'AU22:':Uijdata[1],'AU33:':Uijdata[2],
'AU12:':Uijdata[3],'AU13:':Uijdata[4],'AU23:':Uijdata[5],
'AMx:':Gdata[0],'AMy:':Gdata[1],'AMz:':Gdata[2],}
for iatm in range(Natoms):
for key in keys:
parm = pfx+key+str(iatm)
if parm in parmDict:
keys[key][iatm] = parmDict[parm]
Fdata = np.where(Fdata,Fdata,1.e-8) #avoid divide by zero in derivative calc.
return Tdata,Mdata,Fdata,Xdata,dXdata,IAdata,Uisodata,Uijdata,Gdata
def GetAtomSSFXU(pfx,calcControls,parmDict):
'Needs a doc string'
Natoms = calcControls['Natoms'][pfx]
maxSSwave = calcControls['maxSSwave'][pfx]
Nwave = {'F':maxSSwave['Sfrac'],'X':maxSSwave['Spos'],'Y':maxSSwave['Spos'],'Z':maxSSwave['Spos'],
'U':maxSSwave['Sadp'],'M':maxSSwave['Smag'],'T':maxSSwave['Spos']}
XSSdata = np.zeros((6,maxSSwave['Spos'],Natoms))
FSSdata = np.zeros((2,maxSSwave['Sfrac'],Natoms))
USSdata = np.zeros((12,maxSSwave['Sadp'],Natoms))
MSSdata = np.zeros((6,maxSSwave['Smag'],Natoms))
waveTypes = []
keys = {'Fsin:':FSSdata[0],'Fcos:':FSSdata[1],'Fzero:':FSSdata[0],'Fwid:':FSSdata[1],
'Tmin:':XSSdata[0],'Tmax:':XSSdata[1],'Xmax:':XSSdata[2],'Ymax:':XSSdata[3],'Zmax:':XSSdata[4],
'Xsin:':XSSdata[0],'Ysin:':XSSdata[1],'Zsin:':XSSdata[2],'Xcos:':XSSdata[3],'Ycos:':XSSdata[4],'Zcos:':XSSdata[5],
'U11sin:':USSdata[0],'U22sin:':USSdata[1],'U33sin:':USSdata[2],'U12sin:':USSdata[3],'U13sin:':USSdata[4],'U23sin:':USSdata[5],
'U11cos:':USSdata[6],'U22cos:':USSdata[7],'U33cos:':USSdata[8],'U12cos:':USSdata[9],'U13cos:':USSdata[10],'U23cos:':USSdata[11],
'MXsin:':MSSdata[0],'MYsin:':MSSdata[1],'MZsin:':MSSdata[2],'MXcos:':MSSdata[3],'MYcos:':MSSdata[4],'MZcos:':MSSdata[5]}
for iatm in range(Natoms):
for kind in ['F','P','A','M']:
wavetype = []
wavetype += [parmDict.get(pfx+kind+'waveType:'+str(iatm),''),]
waveTypes.append(wavetype)
for key in keys:
for m in range(Nwave[key[0]]):
parm = pfx+key+str(iatm)+':%d'%(m)
if parm in parmDict:
keys[key][m][iatm] = parmDict[parm]
return np.array(waveTypes),FSSdata,XSSdata,USSdata,MSSdata
def StructureFactor2(refDict,G,hfx,pfx,SGData,calcControls,parmDict):
''' Compute structure factors for all h,k,l for phase
puts the result, F^2, in each ref[8] in refList
operates on blocks of 100 reflections for speed
input:
:param dict refDict: where
'RefList' list where each ref = h,k,l,it,d,...
'FF' dict of form factors - filed in below
:param np.array G: reciprocal metric tensor
:param str pfx: phase id string
:param dict SGData: space group info. dictionary output from SpcGroup
:param dict calcControls:
:param dict ParmDict:
'''
phfx = pfx.split(':')[0]+hfx
ast = np.sqrt(np.diag(G))
Mast = twopisq*np.multiply.outer(ast,ast)
SGMT = np.array([ops[0].T for ops in SGData['SGOps']])
SGT = np.array([ops[1] for ops in SGData['SGOps']])
FFtables = calcControls['FFtables']
BLtables = calcControls['BLtables']
Amat,Bmat = G2lat.Gmat2AB(G)
Flack = 1.0
if not SGData['SGInv'] and 'S' in calcControls[hfx+'histType'] and phfx+'Flack' in parmDict:
Flack = 1.-2.*parmDict[phfx+'Flack']
TwinLaw = np.array([[[1,0,0],[0,1,0],[0,0,1]],])
TwDict = refDict.get('TwDict',{})
if 'S' in calcControls[hfx+'histType']:
NTL = calcControls[phfx+'NTL']
NM = calcControls[phfx+'TwinNMN']+1
TwinLaw = calcControls[phfx+'TwinLaw']
TwinFr = np.array([parmDict[phfx+'TwinFr:'+str(i)] for i in range(len(TwinLaw))])
TwinInv = list(np.where(calcControls[phfx+'TwinInv'],-1,1))
Tdata,Mdata,Fdata,Xdata,dXdata,IAdata,Uisodata,Uijdata,Gdata = \
GetAtomFXU(pfx,calcControls,parmDict)
if not Xdata.size: #no atoms in phase!
return
if 'NC' in calcControls[hfx+'histType']:
FP,FPP = G2el.BlenResCW(Tdata,BLtables,parmDict[hfx+'Lam'])
elif 'X' in calcControls[hfx+'histType']:
FP = np.array([FFtables[El][hfx+'FP'] for El in Tdata])
FPP = np.array([FFtables[El][hfx+'FPP'] for El in Tdata])
Uij = np.array(G2lat.U6toUij(Uijdata))
bij = Mast*Uij.T
blkSize = 100 #no. of reflections in a block - size seems optimal
nRef = refDict['RefList'].shape[0]
SQ = 1./(2.*refDict['RefList'].T[4])**2
if 'N' in calcControls[hfx+'histType']:
dat = G2el.getBLvalues(BLtables)
refDict['FF']['El'] = list(dat.keys())
refDict['FF']['FF'] = np.ones((nRef,len(dat)))*list(dat.values())
else: #'X'
dat = G2el.getFFvalues(FFtables,0.)
refDict['FF']['El'] = list(dat.keys())
refDict['FF']['FF'] = np.zeros((nRef,len(dat)))
for iel,El in enumerate(refDict['FF']['El']):
refDict['FF']['FF'].T[iel] = G2el.ScatFac(FFtables[El],SQ)
#reflection processing begins here - big arrays!
iBeg = 0
while iBeg < nRef:
iFin = min(iBeg+blkSize,nRef)
refl = refDict['RefList'][iBeg:iFin] #array(blkSize,nItems)
H = refl.T[:3] #array(blkSize,3)
H = np.squeeze(np.inner(H.T,TwinLaw)) #maybe array(blkSize,nTwins,3) or (blkSize,3)
TwMask = np.any(H,axis=-1)
if TwinLaw.shape[0] > 1 and TwDict: #need np.inner(TwinLaw[?],TwDict[iref][i])*TwinInv[i]
for ir in range(blkSize):
iref = ir+iBeg
if iref in TwDict:
for i in TwDict[iref]:
for n in range(NTL):
H[ir][i+n*NM] = np.inner(TwinLaw[n*NM],np.array(TwDict[iref][i])*TwinInv[i+n*NM])
TwMask = np.any(H,axis=-1)
SQ = 1./(2.*refl.T[4])**2 #array(blkSize)
SQfactor = 4.0*SQ*twopisq #ditto prev.
if 'T' in calcControls[hfx+'histType']:
if 'P' in calcControls[hfx+'histType']:
FP,FPP = G2el.BlenResTOF(Tdata,BLtables,refl.T[14])
else:
FP,FPP = G2el.BlenResTOF(Tdata,BLtables,refl.T[12])
FP = np.repeat(FP.T,len(SGT)*len(TwinLaw),axis=0)
FPP = np.repeat(FPP.T,len(SGT)*len(TwinLaw),axis=0)
Uniq = np.inner(H,SGMT)
Phi = np.inner(H,SGT)
phase = twopi*(np.inner(Uniq,(dXdata+Xdata).T).T+Phi.T).T
sinp = np.sin(phase)
cosp = np.cos(phase)
biso = -SQfactor*Uisodata[:,nxs]
Tiso = np.repeat(np.where(biso<1.,np.exp(biso),1.0),len(SGT)*len(TwinLaw),axis=1).T
HbH = -np.sum(Uniq.T*np.swapaxes(np.inner(bij,Uniq),2,-1),axis=1)
Tuij = np.where(HbH<1.,np.exp(HbH),1.0).T
Tcorr = np.reshape(Tiso,Tuij.shape)*Tuij*Mdata*Fdata/len(SGMT)
Tindx = np.array([refDict['FF']['El'].index(El) for El in Tdata])
FF = np.repeat(refDict['FF']['FF'][iBeg:iFin].T[Tindx].T,len(SGT)*len(TwinLaw),axis=0)
Bab = np.repeat(parmDict[phfx+'BabA']*np.exp(-parmDict[phfx+'BabU']*SQfactor),len(SGT)*len(TwinLaw))
if 'T' in calcControls[hfx+'histType']: #fa,fb are 2 X blkSize X nTwin X nOps x nAtoms
fa = np.array([np.reshape(((FF+FP).T-Bab).T,cosp.shape)*cosp*Tcorr,-np.reshape(Flack*FPP,sinp.shape)*sinp*Tcorr])
fb = np.array([np.reshape(((FF+FP).T-Bab).T,sinp.shape)*sinp*Tcorr,np.reshape(Flack*FPP,cosp.shape)*cosp*Tcorr])
else:
fa = np.array([np.reshape(((FF+FP).T-Bab).T,cosp.shape)*cosp*Tcorr,-Flack*FPP*sinp*Tcorr])
fb = np.array([np.reshape(((FF+FP).T-Bab).T,sinp.shape)*sinp*Tcorr,Flack*FPP*cosp*Tcorr])
fas = np.sum(np.sum(fa,axis=-1),axis=-1) #real 2 x blkSize x nTwin; sum over atoms & uniq hkl
fbs = np.sum(np.sum(fb,axis=-1),axis=-1) #imag
if SGData['SGInv']: #centrosymmetric; B=0
fbs[0] *= 0.
fas[1] *= 0.
if 'P' in calcControls[hfx+'histType']: #PXC, PNC & PNT: F^2 = A[0]^2 + A[1]^2 + B[0]^2 + B[1]^2
refl.T[9] = np.sum(fas**2,axis=0)+np.sum(fbs**2,axis=0) #add fam**2 & fbm**2 here
refl.T[10] = atan2d(fbs[0],fas[0]) #ignore f' & f"
else: #HKLF: F^2 = (A[0]+A[1])^2 + (B[0]+B[1])^2
if len(TwinLaw) > 1:
refl.T[9] = np.sum(fas[:,:,0],axis=0)**2+np.sum(fbs[:,:,0],axis=0)**2 #FcT from primary twin element
refl.T[7] = np.sum(TwinFr*TwMask*np.sum(fas,axis=0)**2,axis=-1)+ \
np.sum(TwinFr*TwMask*np.sum(fbs,axis=0)**2,axis=-1) #Fc sum over twins
refl.T[10] = atan2d(fbs[0].T[0],fas[0].T[0]) #ignore f' & f" & use primary twin
else: # checked correct!!
refl.T[9] = np.sum(fas,axis=0)**2+np.sum(fbs,axis=0)**2
refl.T[7] = np.copy(refl.T[9])
refl.T[10] = atan2d(fbs[0],fas[0]) #ignore f' & f"
# refl.T[10] = atan2d(np.sum(fbs,axis=0),np.sum(fas,axis=0)) #include f' & f"
iBeg += blkSize
# print 'sf time %.4f, nref %d, blkSize %d'%(time.time()-time0,nRef,blkSize)
def StructureFactorDerv2(refDict,G,hfx,pfx,SGData,calcControls,parmDict):
'''Compute structure factor derivatives on blocks of reflections - for powders/nontwins only
faster than StructureFactorDerv - correct for powders/nontwins!!
input:
:param dict refDict: where
'RefList' list where each ref = h,k,l,it,d,...
'FF' dict of form factors - filled in below
:param np.array G: reciprocal metric tensor
:param str hfx: histogram id string
:param str pfx: phase id string
:param dict SGData: space group info. dictionary output from SpcGroup
:param dict calcControls:
:param dict parmDict:
:returns: dict dFdvDict: dictionary of derivatives
'''
phfx = pfx.split(':')[0]+hfx
ast = np.sqrt(np.diag(G))
Mast = twopisq*np.multiply.outer(ast,ast)
SGMT = np.array([ops[0].T for ops in SGData['SGOps']])
SGT = np.array([ops[1] for ops in SGData['SGOps']])
FFtables = calcControls['FFtables']
BLtables = calcControls['BLtables']
Amat,Bmat = G2lat.Gmat2AB(G)
nRef = len(refDict['RefList'])
Tdata,Mdata,Fdata,Xdata,dXdata,IAdata,Uisodata,Uijdata,Gdata = \
GetAtomFXU(pfx,calcControls,parmDict)
if not Xdata.size: #no atoms in phase!
return {}
mSize = len(Mdata)
FF = np.zeros(len(Tdata))
if 'NC' in calcControls[hfx+'histType']:
FP,FPP = G2el.BlenResCW(Tdata,BLtables,parmDict[hfx+'Lam'])
elif 'X' in calcControls[hfx+'histType']:
FP = np.array([FFtables[El][hfx+'FP'] for El in Tdata])
FPP = np.array([FFtables[El][hfx+'FPP'] for El in Tdata])
Uij = np.array(G2lat.U6toUij(Uijdata))
bij = Mast*Uij.T
dFdvDict = {}
dFdfr = np.zeros((nRef,mSize))
dFdx = np.zeros((nRef,mSize,3))
dFdui = np.zeros((nRef,mSize))
dFdua = np.zeros((nRef,mSize,6))
dFdbab = np.zeros((nRef,2))
dFdfl = np.zeros((nRef))
Flack = 1.0
if not SGData['SGInv'] and 'S' in calcControls[hfx+'histType'] and phfx+'Flack' in parmDict:
Flack = 1.-2.*parmDict[phfx+'Flack']
time0 = time.time()
#reflection processing begins here - big arrays!
iBeg = 0
blkSize = 32 #no. of reflections in a block - optimized for speed
while iBeg < nRef:
iFin = min(iBeg+blkSize,nRef)
refl = refDict['RefList'][iBeg:iFin] #array(blkSize,nItems)
H = refl.T[:3].T
SQ = 1./(2.*refl.T[4])**2 # or (sin(theta)/lambda)**2
SQfactor = 8.0*SQ*np.pi**2
if 'T' in calcControls[hfx+'histType']:
if 'P' in calcControls[hfx+'histType']:
FP,FPP = G2el.BlenResTOF(Tdata,BLtables,refl.T[14])
else:
FP,FPP = G2el.BlenResTOF(Tdata,BLtables,refl.T[12])
FP = np.repeat(FP.T,len(SGT),axis=0)
FPP = np.repeat(FPP.T,len(SGT),axis=0)
dBabdA = np.exp(-parmDict[phfx+'BabU']*SQfactor)
Bab = np.repeat(parmDict[phfx+'BabA']*np.exp(-parmDict[phfx+'BabU']*SQfactor),len(SGT))
Tindx = np.array([refDict['FF']['El'].index(El) for El in Tdata])
FF = np.repeat(refDict['FF']['FF'][iBeg:iFin].T[Tindx].T,len(SGT),axis=0)
Uniq = np.inner(H,SGMT) # array(nSGOp,3)
Phi = np.inner(H,SGT)
phase = twopi*(np.inner(Uniq,(dXdata+Xdata).T).T+Phi.T).T
sinp = np.sin(phase) #refBlk x nOps x nAtoms
cosp = np.cos(phase)
occ = Mdata*Fdata/len(SGT)
biso = -SQfactor*Uisodata[:,nxs]
Tiso = np.repeat(np.where(biso<1.,np.exp(biso),1.0),len(SGT),axis=1).T
HbH = np.sum(Uniq.T*np.swapaxes(np.inner(bij,Uniq),2,-1),axis=1)
Tuij = np.where(HbH<1.,np.exp(-HbH),1.0).T
Tcorr = np.reshape(Tiso,Tuij.shape)*Tuij*Mdata*Fdata/len(SGMT)
Hij = np.array([Mast*np.multiply.outer(U,U) for U in np.reshape(Uniq,(-1,3))]) #Nref*Nops,3,3
Hij = np.reshape(np.array([G2lat.UijtoU6(uij) for uij in Hij]),(-1,len(SGT),6)) #Nref,Nops,6
fot = np.reshape(((FF+FP).T-Bab).T,cosp.shape)*Tcorr
if len(FPP.shape) > 1:
fotp = np.reshape(FPP,cosp.shape)*Tcorr
else:
fotp = FPP*Tcorr
if 'T' in calcControls[hfx+'histType']:
fa = np.array([fot*cosp,-np.reshape(Flack*FPP,sinp.shape)*sinp*Tcorr])
fb = np.array([fot*sinp,np.reshape(Flack*FPP,cosp.shape)*cosp*Tcorr])
else:
fa = np.array([fot*cosp,-Flack*FPP*sinp*Tcorr])
fb = np.array([fot*sinp,Flack*FPP*cosp*Tcorr])
fas = np.sum(np.sum(fa,axis=-1),axis=-1) #real sum over atoms & unique hkl array(2,refBlk,nTwins)
fbs = np.sum(np.sum(fb,axis=-1),axis=-1) #imag sum over atoms & uniq hkl
fax = np.array([-fot*sinp,-fotp*cosp]) #positions array(2,refBlk,nEqv,nAtoms)
fbx = np.array([fot*cosp,-fotp*sinp])
#sum below is over Uniq
dfadfr = np.sum(fa/occ,axis=-2) #array(2,refBlk,nAtom) Fdata != 0 avoids /0. problem
dfadba = np.sum(-cosp*Tcorr,axis=-2) #array(refBlk,nAtom)
dfadx = np.sum(twopi*Uniq[nxs,:,nxs,:,:]*np.swapaxes(fax,-2,-1)[:,:,:,:,nxs],axis=-2)
dfadui = np.sum(-SQfactor[nxs,:,nxs,nxs]*fa,axis=-2) #array(Ops,refBlk,nAtoms)
dfadua = np.sum(-Hij[nxs,:,nxs,:,:]*np.swapaxes(fa,-2,-1)[:,:,:,:,nxs],axis=-2)
# array(2,refBlk,nAtom,3) & array(2,refBlk,nAtom,6)
if not SGData['SGInv']:
dfbdfr = np.sum(fb/occ,axis=-2) #Fdata != 0 avoids /0. problem
dfbdba = np.sum(-sinp*Tcorr,axis=-2)
dfadfl = np.sum(np.sum(-fotp*sinp,axis=-1),axis=-1)
dfbdfl = np.sum(np.sum(fotp*cosp,axis=-1),axis=-1)
dfbdx = np.sum(twopi*Uniq[nxs,:,nxs,:,:]*np.swapaxes(fbx,-2,-1)[:,:,:,:,nxs],axis=-2)
dfbdui = np.sum(-SQfactor[nxs,:,nxs,nxs]*fb,axis=-2)
dfbdua = np.sum(-Hij[nxs,:,nxs,:,:]*np.swapaxes(fb,-2,-1)[:,:,:,:,nxs],axis=-2)
else:
dfbdfr = np.zeros_like(dfadfr)
dfbdx = np.zeros_like(dfadx)
dfbdui = np.zeros_like(dfadui)
dfbdua = np.zeros_like(dfadua)
dfbdba = np.zeros_like(dfadba)
dfadfl = 0.0
dfbdfl = 0.0
#NB: the above have been checked against PA(1:10,1:2) in strfctr.for for Al2O3!
SA = fas[0]+fas[1]
SB = fbs[0]+fbs[1]
if 'P' in calcControls[hfx+'histType']: #checked perfect for centro & noncentro
dFdfr[iBeg:iFin] = 2.*np.sum(fas[:,:,nxs]*dfadfr+fbs[:,:,nxs]*dfbdfr,axis=0)*Mdata/len(SGMT)
dFdx[iBeg:iFin] = 2.*np.sum(fas[:,:,nxs,nxs]*dfadx+fbs[:,:,nxs,nxs]*dfbdx,axis=0)
dFdui[iBeg:iFin] = 2.*np.sum(fas[:,:,nxs]*dfadui+fbs[:,:,nxs]*dfbdui,axis=0)
dFdua[iBeg:iFin] = 2.*np.sum(fas[:,:,nxs,nxs]*dfadua+fbs[:,:,nxs,nxs]*dfbdua,axis=0)
else:
dFdfr[iBeg:iFin] = (2.*SA[:,nxs]*(dfadfr[0]+dfadfr[1])+2.*SB[:,nxs]*(dfbdfr[0]+dfbdfr[1]))*Mdata/len(SGMT)
dFdx[iBeg:iFin] = 2.*SA[:,nxs,nxs]*(dfadx[0]+dfadx[1])+2.*SB[:,nxs,nxs]*(dfbdx[0]+dfbdx[1])
dFdui[iBeg:iFin] = 2.*SA[:,nxs]*(dfadui[0]+dfadui[1])+2.*SB[:,nxs]*(dfbdui[0]+dfbdui[1])
dFdua[iBeg:iFin] = 2.*SA[:,nxs,nxs]*(dfadua[0]+dfadua[1])+2.*SB[:,nxs,nxs]*(dfbdua[0]+dfbdua[1])
dFdfl[iBeg:iFin] = -SA*dfadfl-SB*dfbdfl #array(nRef,)
dFdbab[iBeg:iFin] = 2.*(fas[0,nxs]*np.array([np.sum(dfadba.T*dBabdA,axis=0),np.sum(-dfadba.T*parmDict[phfx+'BabA']*SQfactor*dBabdA,axis=0)])+ \
fbs[0,nxs]*np.array([np.sum(dfbdba.T*dBabdA,axis=0),np.sum(-dfbdba.T*parmDict[phfx+'BabA']*SQfactor*dBabdA,axis=0)])).T
iBeg += blkSize
# print 'derv time %.4f, nref %d, blkSize %d'%(time.time()-time0,nRef,blkSize)
#loop over atoms - each dict entry is list of derivatives for all the reflections
for i in range(len(Mdata)):
dFdvDict[pfx+'Afrac:'+str(i)] = dFdfr.T[i]
dFdvDict[pfx+'dAx:'+str(i)] = dFdx.T[0][i]
dFdvDict[pfx+'dAy:'+str(i)] = dFdx.T[1][i]
dFdvDict[pfx+'dAz:'+str(i)] = dFdx.T[2][i]
dFdvDict[pfx+'AUiso:'+str(i)] = dFdui.T[i]
dFdvDict[pfx+'AU11:'+str(i)] = dFdua.T[0][i]
dFdvDict[pfx+'AU22:'+str(i)] = dFdua.T[1][i]
dFdvDict[pfx+'AU33:'+str(i)] = dFdua.T[2][i]
dFdvDict[pfx+'AU12:'+str(i)] = 2.*dFdua.T[3][i]
dFdvDict[pfx+'AU13:'+str(i)] = 2.*dFdua.T[4][i]
dFdvDict[pfx+'AU23:'+str(i)] = 2.*dFdua.T[5][i]
dFdvDict[phfx+'Flack'] = 4.*dFdfl.T
dFdvDict[phfx+'BabA'] = dFdbab.T[0]
dFdvDict[phfx+'BabU'] = dFdbab.T[1]
return dFdvDict
def MagStructureFactor2(refDict,G,hfx,pfx,SGData,calcControls,parmDict):
''' Compute neutron magnetic structure factors for all h,k,l for phase
puts the result, F^2, in each ref[8] in refList
operates on blocks of 100 reflections for speed
input:
:param dict refDict: where
'RefList' list where each ref = h,k,l,it,d,...
'FF' dict of form factors - filed in below
:param np.array G: reciprocal metric tensor
:param str pfx: phase id string
:param dict SGData: space group info. dictionary output from SpcGroup
:param dict calcControls:
:param dict ParmDict:
'''
g = nl.inv(G)
ast = np.sqrt(np.diag(G))
ainv = np.sqrt(np.diag(g))
GS = G/np.outer(ast,ast)
Ginv = g/np.outer(ainv,ainv)
uAmat = G2lat.Gmat2AB(GS)[0]
Mast = twopisq*np.multiply.outer(ast,ast)
SGMT = np.array([ops[0].T for ops in SGData['SGOps']])
SGT = np.array([ops[1] for ops in SGData['SGOps']])
Ncen = len(SGData['SGCen'])
Nops = len(SGMT)*Ncen
if not SGData['SGFixed']:
Nops *= (1+SGData['SGInv'])
MFtables = calcControls['MFtables']
Bmat = G2lat.Gmat2AB(G)[1]
TwinLaw = np.ones(1)
# TwinLaw = np.array([[[1,0,0],[0,1,0],[0,0,1]],])
# TwDict = refDict.get('TwDict',{})
# if 'S' in calcControls[hfx+'histType']:
# NTL = calcControls[phfx+'NTL']
# NM = calcControls[phfx+'TwinNMN']+1
# TwinLaw = calcControls[phfx+'TwinLaw']
# TwinFr = np.array([parmDict[phfx+'TwinFr:'+str(i)] for i in range(len(TwinLaw))])
# TwinInv = list(np.where(calcControls[phfx+'TwinInv'],-1,1))
Tdata,Mdata,Fdata,Xdata,dXdata,IAdata,Uisodata,Uijdata,Gdata = \
GetAtomFXU(pfx,calcControls,parmDict)
if not Xdata.size: #no atoms in phase!
return
Mag = np.array([np.sqrt(np.inner(mag,np.inner(mag,Ginv))) for mag in Gdata.T])
Gdata = np.inner(Gdata.T,SGMT).T #apply sym. ops.
if SGData['SGInv'] and not SGData['SGFixed']:
Gdata = np.hstack((Gdata,-Gdata)) #inversion if any
Gdata = np.hstack([Gdata for icen in range(Ncen)]) #dup over cell centering--> [Mxyz,nops,natms]
Gdata = SGData['MagMom'][nxs,:,nxs]*Gdata #flip vectors according to spin flip * det(opM)
Mag = np.tile(Mag[:,nxs],Nops).T #make Mag same length as Gdata
VGi = np.sqrt(nl.det(Ginv))
Kdata = np.inner(Gdata.T,uAmat).T*VGi/Mag #Cartesian unit vectors
Uij = np.array(G2lat.U6toUij(Uijdata))
bij = Mast*Uij.T
blkSize = 100 #no. of reflections in a block - size seems optimal
nRef = refDict['RefList'].shape[0]
SQ = 1./(2.*refDict['RefList'].T[4])**2
refDict['FF']['El'] = list(MFtables.keys())
refDict['FF']['MF'] = np.zeros((nRef,len(MFtables)))
for iel,El in enumerate(refDict['FF']['El']):
refDict['FF']['MF'].T[iel] = G2el.MagScatFac(MFtables[El],SQ)
#reflection processing begins here - big arrays!
iBeg = 0
while iBeg < nRef:
iFin = min(iBeg+blkSize,nRef)
refl = refDict['RefList'][iBeg:iFin] #array(blkSize,nItems)
H = refl.T[:3].T #array(blkSize,3)
# H = np.squeeze(np.inner(H.T,TwinLaw)) #maybe array(blkSize,nTwins,3) or (blkSize,3)
# TwMask = np.any(H,axis=-1)
# if TwinLaw.shape[0] > 1 and TwDict: #need np.inner(TwinLaw[?],TwDict[iref][i])*TwinInv[i]
# for ir in range(blkSize):
# iref = ir+iBeg
# if iref in TwDict:
# for i in TwDict[iref]:
# for n in range(NTL):
# H[ir][i+n*NM] = np.inner(TwinLaw[n*NM],np.array(TwDict[iref][i])*TwinInv[i+n*NM])
# TwMask = np.any(H,axis=-1)
SQ = 1./(2.*refl.T[4])**2 #array(blkSize)
SQfactor = 4.0*SQ*twopisq #ditto prev.
Uniq = np.inner(H,SGMT)
Phi = np.inner(H,SGT)
phase = twopi*(np.inner(Uniq,(dXdata+Xdata).T).T+Phi.T).T
biso = -SQfactor*Uisodata[:,nxs]
Tiso = np.repeat(np.where(biso<1.,np.exp(biso),1.0),len(SGT)*len(TwinLaw),axis=1).T
HbH = -np.sum(Uniq.T*np.swapaxes(np.inner(bij,Uniq),2,-1),axis=1)
Tuij = np.where(HbH<1.,np.exp(HbH),1.0).T
Tindx = np.array([refDict['FF']['El'].index(El) for El in Tdata])
MF = refDict['FF']['MF'][iBeg:iFin].T[Tindx].T #Nref,Natm
TMcorr = 0.539*(np.reshape(Tiso,Tuij.shape)*Tuij)[:,0,:]*Fdata*Mdata*MF/(2*Nops) #Nref,Natm
if SGData['SGInv']:
if not SGData['SGFixed']:
mphase = np.hstack((phase,-phase)) #OK
else:
mphase = phase
else:
mphase = phase #
mphase = np.array([mphase+twopi*np.inner(cen,H)[:,nxs,nxs] for cen in SGData['SGCen']])
mphase = np.concatenate(mphase,axis=1) #Nref,full Nop,Natm
sinm = np.sin(mphase) #ditto - match magstrfc.for
cosm = np.cos(mphase) #ditto
HM = np.inner(Bmat.T,H) #put into cartesian space
HM = HM/np.sqrt(np.sum(HM**2,axis=0)) #Kdata = MAGS & HM = UVEC in magstrfc.for both OK
eDotK = np.sum(HM[:,:,nxs,nxs]*Kdata[:,nxs,:,:],axis=0)
Q = HM[:,:,nxs,nxs]*eDotK[nxs,:,:,:]-Kdata[:,nxs,:,:] #xyz,Nref,Nop,Natm = BPM in magstrfc.for OK
fam = Q*TMcorr[nxs,:,nxs,:]*cosm[nxs,:,:,:]*Mag[nxs,nxs,:,:] #ditto
fbm = Q*TMcorr[nxs,:,nxs,:]*sinm[nxs,:,:,:]*Mag[nxs,nxs,:,:] #ditto
fams = np.sum(np.sum(fam,axis=-1),axis=-1) #xyz,Nref
fbms = np.sum(np.sum(fbm,axis=-1),axis=-1) #ditto
refl.T[9] = np.sum(fams**2,axis=0)+np.sum(fbms**2,axis=0)
refl.T[7] = np.copy(refl.T[9])
refl.T[10] = atan2d(fbms[0],fams[0]) #- what is phase for mag refl?
# if 'P' in calcControls[hfx+'histType']: #PXC, PNC & PNT: F^2 = A[0]^2 + A[1]^2 + B[0]^2 + B[1]^2
# refl.T[9] = np.sum(fas**2,axis=0)+np.sum(fbs**2,axis=0) #add fam**2 & fbm**2 here
# refl.T[10] = atan2d(fbs[0],fas[0]) #ignore f' & f"
# else: #HKLF: F^2 = (A[0]+A[1])^2 + (B[0]+B[1])^2
# if len(TwinLaw) > 1:
# refl.T[9] = np.sum(fas[:,:,0],axis=0)**2+np.sum(fbs[:,:,0],axis=0)**2 #FcT from primary twin element
# refl.T[7] = np.sum(TwinFr*TwMask*np.sum(fas,axis=0)**2,axis=-1)+ \
# np.sum(TwinFr*TwMask*np.sum(fbs,axis=0)**2,axis=-1) #Fc sum over twins
# refl.T[10] = atan2d(fbs[0].T[0],fas[0].T[0]) #ignore f' & f" & use primary twin
# else: # checked correct!!
# refl.T[9] = np.sum(fas,axis=0)**2+np.sum(fbs,axis=0)**2
# refl.T[7] = np.copy(refl.T[9])
# refl.T[10] = atan2d(fbs[0],fas[0]) #ignore f' & f"
## refl.T[10] = atan2d(np.sum(fbs,axis=0),np.sum(fas,axis=0)) #include f' & f"
iBeg += blkSize
# print 'sf time %.4f, nref %d, blkSize %d'%(time.time()-time0,nRef,blkSize)
def MagStructureFactorDerv(refDict,G,hfx,pfx,SGData,calcControls,parmDict):
'''Compute magnetic structure factor derivatives on blocks of reflections - for powders/nontwins only
input:
:param dict refDict: where
'RefList' list where each ref = h,k,l,it,d,...
'FF' dict of form factors - filled in below
:param np.array G: reciprocal metric tensor
:param str hfx: histogram id string
:param str pfx: phase id string
:param dict SGData: space group info. dictionary output from SpcGroup
:param dict calcControls:
:param dict parmDict:
:returns: dict dFdvDict: dictionary of derivatives
'''
g = nl.inv(G)
ast = np.sqrt(np.diag(G))
ainv = np.sqrt(np.diag(g))
GS = G/np.outer(ast,ast)
Ginv = g/np.outer(ainv,ainv)
uAmat = G2lat.Gmat2AB(GS)[0]
Mast = twopisq*np.multiply.outer(ast,ast)
SGMT = np.array([ops[0].T for ops in SGData['SGOps']])
SGT = np.array([ops[1] for ops in SGData['SGOps']])
Ncen = len(SGData['SGCen'])
Nops = len(SGMT)*Ncen
if not SGData['SGFixed']:
Nops *= (1+SGData['SGInv'])
Bmat = G2lat.Gmat2AB(G)[1]
nRef = len(refDict['RefList'])
Tdata,Mdata,Fdata,Xdata,dXdata,IAdata,Uisodata,Uijdata,Gdata = \
GetAtomFXU(pfx,calcControls,parmDict)
if not Xdata.size: #no atoms in phase!
return {}
mSize = len(Mdata)
Mag = np.array([np.sqrt(np.inner(mag,np.inner(mag,Ginv))) for mag in Gdata.T])
dMdm = np.inner(Gdata.T,Ginv).T/Mag
Gones = np.ones_like(Gdata)
Gdata = np.inner(Gdata.T,SGMT).T #apply sym. ops.
Gones = np.inner(Gones.T,SGMT).T
if SGData['SGInv'] and not SGData['SGFixed']:
Gdata = np.hstack((Gdata,-Gdata)) #inversion if any
Gones = np.hstack((Gones,-Gones)) #inversion if any
Gdata = np.hstack([Gdata for icen in range(Ncen)]) #dup over cell centering
Gones = np.hstack([Gones for icen in range(Ncen)]) #dup over cell centering
Gdata = SGData['MagMom'][nxs,:,nxs]*Gdata #flip vectors according to spin flip
Gones = SGData['MagMom'][nxs,:,nxs]*Gones #flip vectors according to spin flip
Mag = np.tile(Mag[:,nxs],Nops).T #make Mag same length as Gdata
VGi = np.sqrt(nl.det(Ginv))
Kdata = np.inner(Gdata.T,uAmat).T*VGi/Mag #make unit vectors in Cartesian space
dkdG = (np.inner(Gones.T,uAmat).T*VGi)/Mag
dkdm = dkdG-Kdata*dMdm[:,nxs,:]/Mag[nxs,:,:]
dFdMx = np.zeros((nRef,mSize,3))
Uij = np.array(G2lat.U6toUij(Uijdata))
bij = Mast*Uij.T
dFdvDict = {}
dFdfr = np.zeros((nRef,mSize))
dFdx = np.zeros((nRef,mSize,3))
dFdMx = np.zeros((3,nRef,mSize))
dFdui = np.zeros((nRef,mSize))
dFdua = np.zeros((nRef,mSize,6))
time0 = time.time()
#reflection processing begins here - big arrays!
iBeg = 0
blkSize = 5 #no. of reflections in a block - optimized for speed
while iBeg < nRef:
iFin = min(iBeg+blkSize,nRef)
refl = refDict['RefList'][iBeg:iFin] #array(blkSize,nItems)
H = refl.T[:3].T
SQ = 1./(2.*refl.T[4])**2 # or (sin(theta)/lambda)**2
SQfactor = 8.0*SQ*np.pi**2
Uniq = np.inner(H,SGMT) # array(nSGOp,3)
Phi = np.inner(H,SGT)
phase = twopi*(np.inner(Uniq,(dXdata+Xdata).T).T+Phi.T).T
occ = Mdata*Fdata/Nops
biso = -SQfactor*Uisodata[:,nxs]
Tiso = np.repeat(np.where(biso<1.,np.exp(biso),1.0),len(SGT),axis=1).T
HbH = -np.sum(Uniq.T*np.swapaxes(np.inner(bij,Uniq),2,-1),axis=1)
Tuij = np.where(HbH<1.,np.exp(HbH),1.0).T
Hij = np.array([Mast*np.multiply.outer(U,U) for U in np.reshape(Uniq,(-1,3))])
Hij = np.reshape(np.array([G2lat.UijtoU6(uij) for uij in Hij]),(-1,len(SGT),6))
Tindx = np.array([refDict['FF']['El'].index(El) for El in Tdata])
MF = refDict['FF']['MF'][iBeg:iFin].T[Tindx].T #Nref,Natm
TMcorr = 0.539*(np.reshape(Tiso,Tuij.shape)*Tuij)[:,0,:]*Fdata*Mdata*MF/(2*Nops) #Nref,Natm
if SGData['SGInv']:
if not SGData['SGFixed']:
mphase = np.hstack((phase,-phase)) #OK
Uniq = np.hstack((Uniq,-Uniq)) #Nref,Nops,hkl
Hij = np.hstack((Hij,Hij))
else:
mphase = phase
else:
mphase = phase #
Hij = np.concatenate(np.array([Hij for cen in SGData['SGCen']]),axis=1)
Uniq = np.hstack([Uniq for cen in SGData['SGCen']])
mphase = np.array([mphase+twopi*np.inner(cen,H)[:,nxs,nxs] for cen in SGData['SGCen']])
mphase = np.concatenate(mphase,axis=1) #Nref,Nop,Natm
sinm = np.sin(mphase) #ditto - match magstrfc.for
cosm = np.cos(mphase) #ditto
HM = np.inner(Bmat.T,H) #put into cartesian space
HM = HM/np.sqrt(np.sum(HM**2,axis=0)) #unit cartesian vector for H
eDotK = np.sum(HM[:,:,nxs,nxs]*Kdata[:,nxs,:,:],axis=0)
Q = HM[:,:,nxs,nxs]*eDotK[nxs,:,:,:]-Kdata[:,nxs,:,:] #Mxyz,Nref,Nop,Natm = BPM in magstrfc.for OK
dqdk = np.array([np.outer(hm,hm)-np.eye(3) for hm in HM.T]).T #Mxyz**2,Nref
dqdm = dqdk[:,:,:,nxs,nxs]*dkdm[:,nxs,nxs,:,:] #Mxyz**2,Nref,Nops,Natms
dmx = Q*dMdm[:,nxs,nxs,:]
dmx = dmx[nxs,:,:,:,:]+dqdm*Mag[nxs,nxs,nxs,:,:]
dmx /= 2.
fam = Q*TMcorr[nxs,:,nxs,:]*cosm[nxs,:,:,:]*Mag[nxs,nxs,:,:] #Mxyz,Nref,Nop,Natm
fbm = Q*TMcorr[nxs,:,nxs,:]*sinm[nxs,:,:,:]*Mag[nxs,nxs,:,:]
fams = np.sum(np.sum(fam,axis=-1),axis=-1) #Mxyz,Nref
fbms = np.sum(np.sum(fbm,axis=-1),axis=-1)
famx = -Q*TMcorr[nxs,:,nxs,:]*Mag[nxs,nxs,:,:]*sinm[nxs,:,:,:] #Mxyz,Nref,Nops,Natom
fbmx = Q*TMcorr[nxs,:,nxs,:]*Mag[nxs,nxs,:,:]*cosm[nxs,:,:,:]
#sums below are over Nops - real part
dfadfr = np.sum(fam/occ,axis=2) #array(Mxyz,refBlk,nAtom) Fdata != 0 avoids /0. problem deriv OK
dfadx = np.sum(twopi*Uniq[nxs,:,:,nxs,:]*famx[:,:,:,:,nxs],axis=2) #deriv OK
dfadmx = np.sum(dmx*TMcorr[nxs,nxs,:,nxs,:]*cosm[nxs,nxs,:,:,:],axis=3)
dfadui = np.sum(-SQfactor[:,nxs,nxs]*fam,axis=2) #array(Ops,refBlk,nAtoms) deriv OK
dfadua = np.sum(-Hij[nxs,:,:,nxs,:]*fam[:,:,:,:,nxs],axis=2) #deriv OK
# imaginary part; array(3,refBlk,nAtom,3) & array(3,refBlk,nAtom,6)
dfbdfr = np.sum(fbm/occ,axis=2) #array(mxyz,refBlk,nAtom) Fdata != 0 avoids /0. problem
dfbdx = np.sum(twopi*Uniq[nxs,:,:,nxs,:]*fbmx[:,:,:,:,nxs],axis=2)
dfbdmx = np.sum(dmx*TMcorr[nxs,nxs,:,nxs,:]*sinm[nxs,nxs,:,:,:],axis=3)
dfbdui = np.sum(-SQfactor[:,nxs,nxs]*fbm,axis=2) #array(Ops,refBlk,nAtoms)
dfbdua = np.sum(-Hij[nxs,:,:,nxs,:]*fbm[:,:,:,:,nxs],axis=2)
#accumulate derivatives
dFdfr[iBeg:iFin] = 2.*np.sum((fams[:,:,nxs]*dfadfr+fbms[:,:,nxs]*dfbdfr)*Mdata/Nops,axis=0) #ok
dFdx[iBeg:iFin] = 2.*np.sum(fams[:,:,nxs,nxs]*dfadx+fbms[:,:,nxs,nxs]*dfbdx,axis=0) #ok
dFdMx[:,iBeg:iFin,:] = 2.*np.sum(fams[:,:,nxs]*dfadmx+fbms[:,:,nxs]*dfbdmx,axis=0) #problems
dFdui[iBeg:iFin] = 2.*np.sum(fams[:,:,nxs]*dfadui+fbms[:,:,nxs]*dfbdui,axis=0) #ok
dFdua[iBeg:iFin] = 2.*np.sum(fams[:,:,nxs,nxs]*dfadua+fbms[:,:,nxs,nxs]*dfbdua,axis=0) #ok
iBeg += blkSize
print (' %d derivative time %.4f\r'%(nRef,time.time()-time0))
#loop over atoms - each dict entry is list of derivatives for all the reflections
for i in range(len(Mdata)):
dFdvDict[pfx+'Afrac:'+str(i)] = dFdfr.T[i]
dFdvDict[pfx+'dAx:'+str(i)] = dFdx.T[0][i]
dFdvDict[pfx+'dAy:'+str(i)] = dFdx.T[1][i]
dFdvDict[pfx+'dAz:'+str(i)] = dFdx.T[2][i]
dFdvDict[pfx+'AMx:'+str(i)] = dFdMx[0,:,i]
dFdvDict[pfx+'AMy:'+str(i)] = dFdMx[1,:,i]
dFdvDict[pfx+'AMz:'+str(i)] = dFdMx[2,:,i]
dFdvDict[pfx+'AUiso:'+str(i)] = dFdui.T[i]
dFdvDict[pfx+'AU11:'+str(i)] = dFdua.T[0][i]
dFdvDict[pfx+'AU22:'+str(i)] = dFdua.T[1][i]
dFdvDict[pfx+'AU33:'+str(i)] = dFdua.T[2][i]
dFdvDict[pfx+'AU12:'+str(i)] = 2.*dFdua.T[3][i]
dFdvDict[pfx+'AU13:'+str(i)] = 2.*dFdua.T[4][i]
dFdvDict[pfx+'AU23:'+str(i)] = 2.*dFdua.T[5][i]
return dFdvDict
def StructureFactorDervTw2(refDict,G,hfx,pfx,SGData,calcControls,parmDict):
'''Compute structure factor derivatives on blocks of reflections - for twins only
faster than StructureFactorDervTw
input:
:param dict refDict: where
'RefList' list where each ref = h,k,l,it,d,...
'FF' dict of form factors - filled in below
:param np.array G: reciprocal metric tensor
:param str hfx: histogram id string
:param str pfx: phase id string
:param dict SGData: space group info. dictionary output from SpcGroup
:param dict calcControls:
:param dict parmDict:
:returns: dict dFdvDict: dictionary of derivatives
'''
phfx = pfx.split(':')[0]+hfx
ast = np.sqrt(np.diag(G))
Mast = twopisq*np.multiply.outer(ast,ast)
SGMT = np.array([ops[0].T for ops in SGData['SGOps']])
SGT = np.array([ops[1] for ops in SGData['SGOps']])
FFtables = calcControls['FFtables']
BLtables = calcControls['BLtables']
TwDict = refDict.get('TwDict',{})
NTL = calcControls[phfx+'NTL']
NM = calcControls[phfx+'TwinNMN']+1
TwinLaw = calcControls[phfx+'TwinLaw']
TwinFr = np.array([parmDict[phfx+'TwinFr:'+str(i)] for i in range(len(TwinLaw))])
TwinInv = list(np.where(calcControls[phfx+'TwinInv'],-1,1))
nTwin = len(TwinLaw)
nRef = len(refDict['RefList'])
Tdata,Mdata,Fdata,Xdata,dXdata,IAdata,Uisodata,Uijdata,Gdata = \
GetAtomFXU(pfx,calcControls,parmDict)
if not Xdata.size: #no atoms in phase!
return {}
mSize = len(Mdata)
FF = np.zeros(len(Tdata))
if 'NC' in calcControls[hfx+'histType']:
FP,FPP = G2el.BlenResCW(Tdata,BLtables,parmDict[hfx+'Lam'])
elif 'X' in calcControls[hfx+'histType']:
FP = np.array([FFtables[El][hfx+'FP'] for El in Tdata])
FPP = np.array([FFtables[El][hfx+'FPP'] for El in Tdata])
Uij = np.array(G2lat.U6toUij(Uijdata))
bij = Mast*Uij.T
dFdvDict = {}
dFdfr = np.zeros((nRef,nTwin,mSize))
dFdx = np.zeros((nRef,nTwin,mSize,3))
dFdui = np.zeros((nRef,nTwin,mSize))
dFdua = np.zeros((nRef,nTwin,mSize,6))
dFdbab = np.zeros((nRef,nTwin,2))
dFdtw = np.zeros((nRef,nTwin))
time0 = time.time()
#reflection processing begins here - big arrays!
iBeg = 0
blkSize = 16 #no. of reflections in a block - optimized for speed
while iBeg < nRef:
iFin = min(iBeg+blkSize,nRef)
refl = refDict['RefList'][iBeg:iFin] #array(blkSize,nItems)
H = refl.T[:3]
H = np.inner(H.T,TwinLaw) #array(3,nTwins)
TwMask = np.any(H,axis=-1)
for ir in range(blkSize):
iref = ir+iBeg
if iref in TwDict:
for i in TwDict[iref]:
for n in range(NTL):
H[ir][i+n*NM] = np.inner(TwinLaw[n*NM],np.array(TwDict[iref][i])*TwinInv[i+n*NM])
TwMask = np.any(H,axis=-1)
SQ = 1./(2.*refl.T[4])**2 # or (sin(theta)/lambda)**2
SQfactor = 8.0*SQ*np.pi**2
if 'T' in calcControls[hfx+'histType']:
if 'P' in calcControls[hfx+'histType']:
FP,FPP = G2el.BlenResTOF(Tdata,BLtables,refl.T[14])
else:
FP,FPP = G2el.BlenResTOF(Tdata,BLtables,refl.T[12])
FP = np.repeat(FP.T,len(SGT)*len(TwinLaw),axis=0)
FPP = np.repeat(FPP.T,len(SGT)*len(TwinLaw),axis=0)
dBabdA = np.exp(-parmDict[phfx+'BabU']*SQfactor)
Bab = np.repeat(parmDict[phfx+'BabA']*dBabdA,len(SGT)*nTwin)
Tindx = np.array([refDict['FF']['El'].index(El) for El in Tdata])
FF = np.repeat(refDict['FF']['FF'][iBeg:iFin].T[Tindx].T,len(SGT)*len(TwinLaw),axis=0)
Uniq = np.inner(H,SGMT) # (nTwin,nSGOp,3)
Phi = np.inner(H,SGT)
phase = twopi*(np.inner(Uniq,(dXdata+Xdata).T).T+Phi.T).T
sinp = np.sin(phase)
cosp = np.cos(phase)
occ = Mdata*Fdata/len(SGT)
biso = -SQfactor*Uisodata[:,nxs]
Tiso = np.repeat(np.where(biso<1.,np.exp(biso),1.0),len(SGT)*nTwin,axis=1)
HbH = -np.sum(Uniq.T*np.swapaxes(np.inner(bij,Uniq),2,-1),axis=1)
Hij = np.array([Mast*np.multiply.outer(U,U) for U in np.reshape(Uniq,(-1,3))])
Hij = np.reshape(np.array([G2lat.UijtoU6(uij) for uij in Hij]),(-1,nTwin,len(SGT),6))
Tuij = np.where(HbH<1.,np.exp(HbH),1.0)
Tcorr = (np.reshape(Tiso,Tuij.shape)*Tuij).T*Mdata*Fdata/len(SGMT)
fot = np.reshape(((FF+FP).T-Bab).T,cosp.shape)*Tcorr
fotp = FPP*Tcorr
if 'T' in calcControls[hfx+'histType']: #fa,fb are 2 X blkSize X nTwin X nOps x nAtoms
fa = np.array([np.reshape(((FF+FP).T-Bab).T,cosp.shape)*cosp*Tcorr,-np.reshape(FPP,sinp.shape)*sinp*Tcorr])
fb = np.array([np.reshape(((FF+FP).T-Bab).T,sinp.shape)*sinp*Tcorr,np.reshape(FPP,cosp.shape)*cosp*Tcorr])
else:
fa = np.array([np.reshape(((FF+FP).T-Bab).T,cosp.shape)*cosp*Tcorr,-FPP*sinp*Tcorr])
fb = np.array([np.reshape(((FF+FP).T-Bab).T,sinp.shape)*sinp*Tcorr,FPP*cosp*Tcorr])
fas = np.sum(np.sum(fa,axis=-1),axis=-1) #real sum over atoms & unique hkl array(2,nTwins)
fbs = np.sum(np.sum(fb,axis=-1),axis=-1) #imag sum over atoms & uniq hkl
if SGData['SGInv']: #centrosymmetric; B=0
fbs[0] *= 0.
fas[1] *= 0.
fax = np.array([-fot*sinp,-fotp*cosp]) #positions array(2,nRef,ntwi,nEqv,nAtoms)
fbx = np.array([fot*cosp,-fotp*sinp])
#sum below is over Uniq
dfadfr = np.sum(np.sum(fa/occ,axis=-2),axis=0) #array(2,nRef,ntwin,nAtom) Fdata != 0 avoids /0. problem
dfadba = np.sum(-cosp*Tcorr[:,nxs],axis=1)
dfadui = np.sum(np.sum(-SQfactor[nxs,:,nxs,nxs,nxs]*fa,axis=-2),axis=0)
dfadx = np.sum(np.sum(twopi*Uniq[nxs,:,:,:,nxs,:]*fax[:,:,:,:,:,nxs],axis=-3),axis=0) # nRef x nTwin x nAtoms x xyz; sum on ops & A,A'
dfadua = np.sum(np.sum(-Hij[nxs,:,:,:,nxs,:]*fa[:,:,:,:,:,nxs],axis=-3),axis=0)
if not SGData['SGInv']:
dfbdfr = np.sum(np.sum(fb/occ,axis=-2),axis=0) #Fdata != 0 avoids /0. problem
dfadba /= 2.
# dfbdba = np.sum(-sinp*Tcorr[:,nxs],axis=1)/2.
dfbdui = np.sum(np.sum(-SQfactor[nxs,:,nxs,nxs,nxs]*fb,axis=-2),axis=0)
dfbdx = np.sum(np.sum(twopi*Uniq[nxs,:,:,:,nxs,:]*fbx[:,:,:,:,:,nxs],axis=-3),axis=0)
dfbdua = np.sum(np.sum(-Hij[nxs,:,:,:,nxs,:]*fb[:,:,:,:,:,nxs],axis=-3),axis=0)
else:
dfbdfr = np.zeros_like(dfadfr)
dfbdx = np.zeros_like(dfadx)
dfbdui = np.zeros_like(dfadui)
dfbdua = np.zeros_like(dfadua)
# dfbdba = np.zeros_like(dfadba)
SA = fas[0]+fas[1]
SB = fbs[0]+fbs[1]
# GSASIIpath.IPyBreak()
dFdfr[iBeg:iFin] = ((2.*TwMask*SA)[:,:,nxs]*dfadfr+(2.*TwMask*SB)[:,:,nxs]*dfbdfr)*Mdata[nxs,nxs,:]/len(SGMT)
dFdx[iBeg:iFin] = (2.*TwMask*SA)[:,:,nxs,nxs]*dfadx+(2.*TwMask*SB)[:,:,nxs,nxs]*dfbdx
dFdui[iBeg:iFin] = (2.*TwMask*SA)[:,:,nxs]*dfadui+(2.*TwMask*SB)[:,:,nxs]*dfbdui
dFdua[iBeg:iFin] = (2.*TwMask*SA)[:,:,nxs,nxs]*dfadua+(2.*TwMask*SB)[:,:,nxs,nxs]*dfbdua
if SGData['SGInv']: #centrosymmetric; B=0
dFdtw[iBeg:iFin] = np.sum(TwMask[nxs,:]*fas,axis=0)**2
else:
dFdtw[iBeg:iFin] = np.sum(TwMask[nxs,:]*fas,axis=0)**2+np.sum(TwMask[nxs,:]*fbs,axis=0)**2
# dFdbab[iBeg:iFin] = fas[0,:,nxs]*np.array([np.sum(dfadba*dBabdA),np.sum(-dfadba*parmDict[phfx+'BabA']*SQfactor*dBabdA)]).T+ \
# fbs[0,:,nxs]*np.array([np.sum(dfbdba*dBabdA),np.sum(-dfbdba*parmDict[phfx+'BabA']*SQfactor*dBabdA)]).T
iBeg += blkSize
# GSASIIpath.IPyBreak()
print (' %d derivative time %.4f\r'%(len(refDict['RefList']),time.time()-time0))
#loop over atoms - each dict entry is list of derivatives for all the reflections
for i in range(len(Mdata)): #these all OK
dFdvDict[pfx+'Afrac:'+str(i)] = np.sum(dFdfr.T[i]*TwinFr[:,nxs],axis=0)
dFdvDict[pfx+'dAx:'+str(i)] = np.sum(dFdx.T[0][i]*TwinFr[:,nxs],axis=0)
dFdvDict[pfx+'dAy:'+str(i)] = np.sum(dFdx.T[1][i]*TwinFr[:,nxs],axis=0)
dFdvDict[pfx+'dAz:'+str(i)] = np.sum(dFdx.T[2][i]*TwinFr[:,nxs],axis=0)
dFdvDict[pfx+'AUiso:'+str(i)] = np.sum(dFdui.T[i]*TwinFr[:,nxs],axis=0)
dFdvDict[pfx+'AU11:'+str(i)] = np.sum(dFdua.T[0][i]*TwinFr[:,nxs],axis=0)
dFdvDict[pfx+'AU22:'+str(i)] = np.sum(dFdua.T[1][i]*TwinFr[:,nxs],axis=0)
dFdvDict[pfx+'AU33:'+str(i)] = np.sum(dFdua.T[2][i]*TwinFr[:,nxs],axis=0)
dFdvDict[pfx+'AU12:'+str(i)] = 2.*np.sum(dFdua.T[3][i]*TwinFr[:,nxs],axis=0)
dFdvDict[pfx+'AU13:'+str(i)] = 2.*np.sum(dFdua.T[4][i]*TwinFr[:,nxs],axis=0)
dFdvDict[pfx+'AU23:'+str(i)] = 2.*np.sum(dFdua.T[5][i]*TwinFr[:,nxs],axis=0)
dFdvDict[phfx+'BabA'] = dFdbab.T[0]
dFdvDict[phfx+'BabU'] = dFdbab.T[1]
for i in range(nTwin):
dFdvDict[phfx+'TwinFr:'+str(i)] = dFdtw.T[i]
return dFdvDict
def SStructureFactor(refDict,G,hfx,pfx,SGData,SSGData,calcControls,parmDict):
'''
Compute super structure factors for all h,k,l,m for phase - no twins
puts the result, F^2, in each ref[9] in refList
works on blocks of 32 reflections for speed
input:
:param dict refDict: where
'RefList' list where each ref = h,k,l,m,it,d,...
'FF' dict of form factors - filed in below
:param np.array G: reciprocal metric tensor
:param str pfx: phase id string
:param dict SGData: space group info. dictionary output from SpcGroup
:param dict calcControls:
:param dict ParmDict:
'''
phfx = pfx.split(':')[0]+hfx
ast = np.sqrt(np.diag(G))
Mast = twopisq*np.multiply.outer(ast,ast)
SGInv = SGData['SGInv']
SGMT = np.array([ops[0].T for ops in SGData['SGOps']])
Ncen = len(SGData['SGCen'])
Nops = len(SGMT)*Ncen*(1+SGData['SGInv'])
SSGMT = np.array([ops[0].T for ops in SSGData['SSGOps']])
SSGT = np.array([ops[1] for ops in SSGData['SSGOps']])
FFtables = calcControls['FFtables']
BLtables = calcControls['BLtables']
MFtables = calcControls['MFtables']
Amat,Bmat = G2lat.Gmat2AB(G)
Flack = 1.0
if not SGData['SGInv'] and 'S' in calcControls[hfx+'histType'] and phfx+'Flack' in parmDict:
Flack = 1.-2.*parmDict[phfx+'Flack']
Tdata,Mdata,Fdata,Xdata,dXdata,IAdata,Uisodata,Uijdata,Gdata = \
GetAtomFXU(pfx,calcControls,parmDict)
if not Xdata.size: #no atoms in phase!
return
if parmDict[pfx+'isMag']: #TODO: fix the math
Mag = np.sqrt(np.sum(Gdata**2,axis=0)) #magnitude of moments for uniq atoms
Gdata = np.where(Mag>0.,Gdata/Mag,0.) #normalze mag. moments
Gdata = np.inner(Gdata.T,SGMT).T #apply sym. ops.
if SGData['SGInv'] and not SGData['SGFixed']:
Gdata = np.hstack((Gdata,-Gdata)) #inversion if any
Gdata = np.hstack([Gdata for icen in range(Ncen)]) #dup over cell centering
Gdata = SGData['MagMom'][nxs,:,nxs]*Gdata #flip vectors according to spin flip * det(opM)
Mag = np.tile(Mag[:,nxs],len(SGMT)*Ncen).T
if SGData['SGInv'] and not SGData['SGFixed']:
Mag = np.repeat(Mag,2,axis=0) #Mag same shape as Gdata
waveTypes,FSSdata,XSSdata,USSdata,MSSdata = GetAtomSSFXU(pfx,calcControls,parmDict)
ngl,nWaves,Fmod,Xmod,Umod,Mmod,glTau,glWt = G2mth.makeWaves(waveTypes,FSSdata,XSSdata,USSdata,MSSdata,Mast)
modQ = np.array([parmDict[pfx+'mV0'],parmDict[pfx+'mV1'],parmDict[pfx+'mV2']])
FF = np.zeros(len(Tdata))
if 'NC' in calcControls[hfx+'histType']:
FP,FPP = G2el.BlenResCW(Tdata,BLtables,parmDict[hfx+'Lam'])
elif 'X' in calcControls[hfx+'histType']:
FP = np.array([FFtables[El][hfx+'FP'] for El in Tdata])
FPP = np.array([FFtables[El][hfx+'FPP'] for El in Tdata])
Uij = np.array(G2lat.U6toUij(Uijdata)).T
bij = Mast*Uij
blkSize = 32 #no. of reflections in a block
nRef = refDict['RefList'].shape[0]
SQ = 1./(2.*refDict['RefList'].T[5])**2
if 'N' in calcControls[hfx+'histType']:
dat = G2el.getBLvalues(BLtables)
refDict['FF']['El'] = list(dat.keys())
refDict['FF']['FF'] = np.ones((nRef,len(dat)))*list(dat.values())
refDict['FF']['MF'] = np.zeros((nRef,len(dat)))
for iel,El in enumerate(refDict['FF']['El']):
if El in MFtables:
refDict['FF']['MF'].T[iel] = G2el.MagScatFac(MFtables[El],SQ)
else:
dat = G2el.getFFvalues(FFtables,0.)
refDict['FF']['El'] = list(dat.keys())
refDict['FF']['FF'] = np.zeros((nRef,len(dat)))
for iel,El in enumerate(refDict['FF']['El']):
refDict['FF']['FF'].T[iel] = G2el.ScatFac(FFtables[El],SQ)
time0 = time.time()
#reflection processing begins here - big arrays!
iBeg = 0
while iBeg < nRef:
iFin = min(iBeg+blkSize,nRef)
refl = refDict['RefList'][iBeg:iFin] #array(blkSize,nItems)
H = refl.T[:4] #array(blkSize,4)
HP = H[:3]+modQ[:,nxs]*H[3:] #projected hklm to hkl
SQ = 1./(2.*refl.T[5])**2 #array(blkSize)
SQfactor = 4.0*SQ*twopisq #ditto prev.
Uniq = np.inner(H.T,SSGMT)
UniqP = np.inner(HP.T,SGMT)
Phi = np.inner(H.T,SSGT)
if SGInv: #if centro - expand HKL sets
Uniq = np.hstack((Uniq,-Uniq))
Phi = np.hstack((Phi,-Phi))
UniqP = np.hstack((UniqP,-UniqP))
if 'T' in calcControls[hfx+'histType']:
if 'P' in calcControls[hfx+'histType']:
FP,FPP = G2el.BlenResTOF(Tdata,BLtables,refl.T[14])
else:
FP,FPP = G2el.BlenResTOF(Tdata,BLtables,refl.T[12])
FP = np.repeat(FP.T,Uniq.shape[1],axis=0)
FPP = np.repeat(FPP.T,Uniq.shape[1],axis=0)
Bab = np.repeat(parmDict[phfx+'BabA']*np.exp(-parmDict[phfx+'BabU']*SQfactor),Uniq.shape[1])
Tindx = np.array([refDict['FF']['El'].index(El) for El in Tdata])
FF = np.repeat(refDict['FF']['FF'][iBeg:iFin].T[Tindx].T,Uniq.shape[1],axis=0)
phase = twopi*(np.inner(Uniq[:,:,:3],(dXdata.T+Xdata.T))-Phi[:,:,nxs])
sinp = np.sin(phase)
cosp = np.cos(phase)
biso = -SQfactor*Uisodata[:,nxs]
Tiso = np.repeat(np.where(biso<1.,np.exp(biso),1.0),Uniq.shape[1],axis=1).T
HbH = -np.sum(UniqP[:,:,nxs,:]*np.inner(UniqP[:,:,:],bij),axis=-1) #use hklt proj to hkl
Tuij = np.where(HbH<1.,np.exp(HbH),1.0)
Tcorr = np.reshape(Tiso,Tuij.shape)*Tuij*Mdata*Fdata/Uniq.shape[1] #refBlk x ops x atoms
if 'N' in calcControls[hfx+'histType'] and parmDict[pfx+'isMag']: #TODO: mag math here??
MF = refDict['FF']['MF'][iBeg:iFin].T[Tindx].T #Nref,Natm
TMcorr = 0.539*(np.reshape(Tiso,Tuij.shape)*Tuij)[:,0,:]*Fdata*Mdata*MF/(2*Nops) #Nref,Natm
if SGData['SGInv'] and not SGData['SGFixed']:
mphase = np.hstack((phase,-phase))
else:
mphase = phase
mphase = np.array([mphase+twopi*np.inner(cen,H)[:,nxs,nxs] for cen in SGData['SGCen']])
mphase = np.concatenate(mphase,axis=1) #Nref,full Nop,Natm
sinm = np.sin(mphase) #ditto - match magstrfc.for
cosm = np.cos(mphase) #ditto
HM = np.inner(Bmat,H) #put into cartesian space
HM = HM/np.sqrt(np.sum(HM**2,axis=0)) #Gdata = MAGS & HM = UVEC in magstrfc.for both OK
eDotK = np.sum(HM[:,:,nxs,nxs]*Gdata[:,nxs,:,:],axis=0)
Q = HM[:,:,nxs,nxs]*eDotK[nxs,:,:,:]-Gdata[:,nxs,:,:] #xyz,Nref,Nop,Natm = BPM in magstrfc.for OK
fam = Q*TMcorr[nxs,:,nxs,:]*cosm[nxs,:,:,:]*Mag[nxs,nxs,:,:] #ditto
fbm = Q*TMcorr[nxs,:,nxs,:]*sinm[nxs,:,:,:]*Mag[nxs,nxs,:,:] #ditto
fams = np.sum(np.sum(fam,axis=-1),axis=-1) #xyz,Nref
fbms = np.sum(np.sum(fbm,axis=-1),axis=-1) #ditto
refl.T[9] = np.sum(fams**2,axis=0)+np.sum(fbms**2,axis=0)
refl.T[7] = np.copy(refl.T[9])
refl.T[10] = 0.0 #atan2d(fbs[0],fas[0]) - what is phase for mag refl?
else:
if 'T' in calcControls[hfx+'histType']:
fa = np.array([np.reshape(((FF+FP).T-Bab).T,cosp.shape)*cosp*Tcorr,-np.reshape(Flack*FPP,sinp.shape)*sinp*Tcorr])
fb = np.array([np.reshape(Flack*FPP,cosp.shape)*cosp*Tcorr,np.reshape(((FF+FP).T-Bab).T,sinp.shape)*sinp*Tcorr])
else:
fa = np.array([np.reshape(((FF+FP).T-Bab).T,cosp.shape)*cosp*Tcorr,-Flack*FPP*sinp*Tcorr])
fb = np.array([Flack*FPP*cosp*Tcorr,np.reshape(((FF+FP).T-Bab).T,sinp.shape)*sinp*Tcorr])
GfpuA = G2mth.Modulation(Uniq,UniqP,nWaves,Fmod,Xmod,Umod,glTau,glWt) #2 x refBlk x sym X atoms
fag = fa*GfpuA[0]-fb*GfpuA[1] #real; 2 x refBlk x sym x atoms
fbg = fb*GfpuA[0]+fa*GfpuA[1]
fas = np.sum(np.sum(fag,axis=-1),axis=-1) #2 x refBlk; sum sym & atoms
fbs = np.sum(np.sum(fbg,axis=-1),axis=-1)
if 'P' in calcControls[hfx+'histType']:
refl.T[10] = np.sum(fas,axis=0)**2+np.sum(fbs,axis=0)**2 #square of sums
refl.T[11] = atan2d(fbs[0],fas[0]) #ignore f' & f"
else:
refl.T[10] = np.sum(fas,axis=0)**2+np.sum(fbs,axis=0)**2 #square of sums
refl.T[8] = np.copy(refl.T[10])
refl.T[11] = atan2d(fbs[0],fas[0]) #ignore f' & f"
iBeg += blkSize
print ('nRef %d time %.4f\r'%(nRef,time.time()-time0))
def SStructureFactorTw(refDict,G,hfx,pfx,SGData,SSGData,calcControls,parmDict):
'''
Compute super structure factors for all h,k,l,m for phase - twins only
puts the result, F^2, in each ref[8+im] in refList
works on blocks of 32 reflections for speed
input:
:param dict refDict: where
'RefList' list where each ref = h,k,l,m,it,d,...
'FF' dict of form factors - filed in below
:param np.array G: reciprocal metric tensor
:param str pfx: phase id string
:param dict SGData: space group info. dictionary output from SpcGroup
:param dict calcControls:
:param dict ParmDict:
'''
phfx = pfx.split(':')[0]+hfx
ast = np.sqrt(np.diag(G))
Mast = twopisq*np.multiply.outer(ast,ast)
SGInv = SGData['SGInv']
SGMT = np.array([ops[0].T for ops in SGData['SGOps']])
SSGMT = np.array([ops[0].T for ops in SSGData['SSGOps']])
SSGT = np.array([ops[1] for ops in SSGData['SSGOps']])
FFtables = calcControls['FFtables']
BLtables = calcControls['BLtables']
MFtables = calcControls['MFtables']
Flack = 1.0
if not SGData['SGInv'] and 'S' in calcControls[hfx+'histType'] and phfx+'Flack' in parmDict:
Flack = 1.-2.*parmDict[phfx+'Flack']
TwinLaw = np.array([[[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]],]) #4D?
TwDict = refDict.get('TwDict',{})
if 'S' in calcControls[hfx+'histType']:
NTL = calcControls[phfx+'NTL']
NM = calcControls[phfx+'TwinNMN']+1
TwinLaw = calcControls[phfx+'TwinLaw'] #this'll have to be 4D also...
TwinFr = np.array([parmDict[phfx+'TwinFr:'+str(i)] for i in range(len(TwinLaw))])
TwinInv = list(np.where(calcControls[phfx+'TwinInv'],-1,1))
Tdata,Mdata,Fdata,Xdata,dXdata,IAdata,Uisodata,Uijdata,Gdata = \
GetAtomFXU(pfx,calcControls,parmDict)
if not Xdata.size: #no atoms in phase!
return
waveTypes,FSSdata,XSSdata,USSdata,MSSdata = GetAtomSSFXU(pfx,calcControls,parmDict)
ngl,nWaves,Fmod,Xmod,Umod,Mmod,glTau,glWt = G2mth.makeWaves(waveTypes,FSSdata,XSSdata,USSdata,Mast)
modQ = np.array([parmDict[pfx+'mV0'],parmDict[pfx+'mV1'],parmDict[pfx+'mV2']])
FF = np.zeros(len(Tdata))
if 'NC' in calcControls[hfx+'histType']:
FP,FPP = G2el.BlenResCW(Tdata,BLtables,parmDict[hfx+'Lam'])
elif 'X' in calcControls[hfx+'histType']:
FP = np.array([FFtables[El][hfx+'FP'] for El in Tdata])
FPP = np.array([FFtables[El][hfx+'FPP'] for El in Tdata])
Uij = np.array(G2lat.U6toUij(Uijdata)).T
bij = Mast*Uij
blkSize = 32 #no. of reflections in a block
nRef = refDict['RefList'].shape[0]
if not len(refDict['FF']): #no form factors - 1st time thru StructureFactor
SQ = 1./(2.*refDict['RefList'].T[5])**2
if 'N' in calcControls[hfx+'histType']:
dat = G2el.getBLvalues(BLtables)
refDict['FF']['El'] = list(dat.keys())
refDict['FF']['FF'] = np.ones((nRef,len(dat)))*list(dat.values())
refDict['FF']['MF'] = np.zeros((nRef,len(dat)))
for iel,El in enumerate(refDict['FF']['El']):
if El in MFtables:
refDict['FF']['MF'].T[iel] = G2el.MagScatFac(MFtables[El],SQ)
else:
dat = G2el.getFFvalues(FFtables,0.)
refDict['FF']['El'] = list(dat.keys())
refDict['FF']['FF'] = np.zeros((nRef,len(dat)))
for iel,El in enumerate(refDict['FF']['El']):
refDict['FF']['FF'].T[iel] = G2el.ScatFac(FFtables[El],SQ)
time0 = time.time()
#reflection processing begins here - big arrays!
iBeg = 0
while iBeg < nRef:
iFin = min(iBeg+blkSize,nRef)
refl = refDict['RefList'][iBeg:iFin] #array(blkSize,nItems)
H = refl[:,:4] #array(blkSize,4)
H3 = refl[:,:3]
HP = H[:,:3]+modQ[nxs,:]*H[:,3:] #projected hklm to hkl
HP = np.inner(HP,TwinLaw) #array(blkSize,nTwins,4)
H3 = np.inner(H3,TwinLaw)
TwMask = np.any(HP,axis=-1)
if TwinLaw.shape[0] > 1 and TwDict: #need np.inner(TwinLaw[?],TwDict[iref][i])*TwinInv[i]
for ir in range(blkSize):
iref = ir+iBeg
if iref in TwDict:
for i in TwDict[iref]:
for n in range(NTL):
HP[ir][i+n*NM] = np.inner(TwinLaw[n*NM],np.array(TwDict[iref][i])*TwinInv[i+n*NM])
H3[ir][i+n*NM] = np.inner(TwinLaw[n*NM],np.array(TwDict[iref][i])*TwinInv[i+n*NM])
TwMask = np.any(HP,axis=-1)
SQ = 1./(2.*refl.T[5])**2 #array(blkSize)
SQfactor = 4.0*SQ*twopisq #ditto prev.
Uniq = np.inner(H,SSGMT)
Uniq3 = np.inner(H3,SGMT)
UniqP = np.inner(HP,SGMT)
Phi = np.inner(H,SSGT)
if SGInv: #if centro - expand HKL sets
Uniq = np.hstack((Uniq,-Uniq))
Uniq3 = np.hstack((Uniq3,-Uniq3))
Phi = np.hstack((Phi,-Phi))
UniqP = np.hstack((UniqP,-UniqP))
if 'T' in calcControls[hfx+'histType']:
if 'P' in calcControls[hfx+'histType']:
FP,FPP = G2el.BlenResTOF(Tdata,BLtables,refl.T[14])
else:
FP,FPP = G2el.BlenResTOF(Tdata,BLtables,refl.T[12])
FP = np.repeat(FP.T,Uniq.shape[1]*len(TwinLaw),axis=0)
FPP = np.repeat(FPP.T,Uniq.shape[1]*len(TwinLaw),axis=0)
Bab = np.repeat(parmDict[phfx+'BabA']*np.exp(-parmDict[phfx+'BabU']*SQfactor),Uniq.shape[1]*len(TwinLaw))
Tindx = np.array([refDict['FF']['El'].index(El) for El in Tdata])
FF = np.repeat(refDict['FF']['FF'][iBeg:iFin].T[Tindx].T,Uniq.shape[1]*len(TwinLaw),axis=0)
phase = twopi*(np.inner(Uniq3,(dXdata.T+Xdata.T))-Phi[:,nxs,:,nxs])
sinp = np.sin(phase)
cosp = np.cos(phase)
biso = -SQfactor*Uisodata[:,nxs]
Tiso = np.repeat(np.where(biso<1.,np.exp(biso),1.0),Uniq.shape[1]*len(TwinLaw),axis=1).T
HbH = -np.sum(UniqP[:,:,:,nxs]*np.inner(UniqP[:,:,:],bij),axis=-1) #use hklt proj to hkl
Tuij = np.where(HbH<1.,np.exp(HbH),1.0)
Tcorr = np.reshape(Tiso,Tuij.shape)*Tuij*Mdata*Fdata/Uniq.shape[1] #refBlk x ops x atoms
# GSASIIpath.IPyBreak()
if 'T' in calcControls[hfx+'histType']:
fa = np.array([np.reshape(((FF+FP).T-Bab).T,cosp.shape)*cosp*Tcorr,-np.reshape(Flack*FPP,sinp.shape)*sinp*Tcorr])
fb = np.array([np.reshape(Flack*FPP,cosp.shape)*cosp*Tcorr,np.reshape(((FF+FP).T-Bab).T,sinp.shape)*sinp*Tcorr])
else:
fa = np.array([np.reshape(((FF+FP).T-Bab).T,cosp.shape)*cosp*Tcorr,-Flack*FPP*sinp*Tcorr])
fb = np.array([Flack*FPP*cosp*Tcorr,np.reshape(((FF+FP).T-Bab).T,sinp.shape)*sinp*Tcorr])
GfpuA = G2mth.ModulationTw(Uniq,UniqP,nWaves,Fmod,Xmod,Umod,glTau,glWt) #2 x refBlk x sym X atoms
fag = fa*GfpuA[0]-fb*GfpuA[1] #real; 2 x refBlk x sym x atoms
fbg = fb*GfpuA[0]+fa*GfpuA[1]
fas = np.sum(np.sum(fag,axis=-1),axis=-1) #2 x refBlk; sum sym & atoms
fbs = np.sum(np.sum(fbg,axis=-1),axis=-1)
refl.T[10] = np.sum(fas[:,:,0],axis=0)**2+np.sum(fbs[:,:,0],axis=0)**2 #FcT from primary twin element
refl.T[8] = np.sum(TwinFr*np.sum(TwMask[nxs,:,:]*fas,axis=0)**2,axis=-1)+ \
np.sum(TwinFr*np.sum(TwMask[nxs,:,:]*fbs,axis=0)**2,axis=-1) #Fc sum over twins
refl.T[11] = atan2d(fbs[0].T[0],fas[0].T[0]) #ignore f' & f"
iBeg += blkSize
print ('nRef %d time %.4f\r'%(nRef,time.time()-time0))
def SStructureFactorDerv(refDict,im,G,hfx,pfx,SGData,SSGData,calcControls,parmDict):
'''
Compute super structure factor derivatives for all h,k,l,m for phase - no twins
input:
:param dict refDict: where
'RefList' list where each ref = h,k,l,m,it,d,...
'FF' dict of form factors - filled in below
:param int im: = 1 (could be eliminated)
:param np.array G: reciprocal metric tensor
:param str hfx: histogram id string
:param str pfx: phase id string
:param dict SGData: space group info. dictionary output from SpcGroup
:param dict SSGData: super space group info.
:param dict calcControls:
:param dict ParmDict:
:returns: dict dFdvDict: dictionary of derivatives
'''
phfx = pfx.split(':')[0]+hfx
ast = np.sqrt(np.diag(G))
Mast = twopisq*np.multiply.outer(ast,ast)
SGInv = SGData['SGInv']
SGMT = np.array([ops[0].T for ops in SGData['SGOps']])
SSGMT = np.array([ops[0].T for ops in SSGData['SSGOps']])
SSGT = np.array([ops[1] for ops in SSGData['SSGOps']])
FFtables = calcControls['FFtables']
BLtables = calcControls['BLtables']
nRef = len(refDict['RefList'])
Tdata,Mdata,Fdata,Xdata,dXdata,IAdata,Uisodata,Uijdata,Gdata = \
GetAtomFXU(pfx,calcControls,parmDict)
if not Xdata.size: #no atoms in phase!
return {}
mSize = len(Mdata) #no. atoms
waveTypes,FSSdata,XSSdata,USSdata,MSSdata = GetAtomSSFXU(pfx,calcControls,parmDict)
ngl,nWaves,Fmod,Xmod,Umod,Mmod,glTau,glWt = G2mth.makeWaves(waveTypes,FSSdata,XSSdata,USSdata,MSSdata,Mast)
waveShapes,SCtauF,SCtauX,SCtauU,UmodAB = G2mth.makeWavesDerv(ngl,waveTypes,FSSdata,XSSdata,USSdata,MSSdata,Mast)
modQ = np.array([parmDict[pfx+'mV0'],parmDict[pfx+'mV1'],parmDict[pfx+'mV2']])
FF = np.zeros(len(Tdata))
if 'NC' in calcControls[hfx+'histType']:
FP,FPP = G2el.BlenResCW(Tdata,BLtables,parmDict[hfx+'Lam'])
elif 'X' in calcControls[hfx+'histType']:
FP = np.array([FFtables[El][hfx+'FP'] for El in Tdata])
FPP = np.array([FFtables[El][hfx+'FPP'] for El in Tdata])
Uij = np.array(G2lat.U6toUij(Uijdata)).T
bij = Mast*Uij
if not len(refDict['FF']):
if 'N' in calcControls[hfx+'histType']:
dat = G2el.getBLvalues(BLtables) #will need wave here for anom. neutron b's
else:
dat = G2el.getFFvalues(FFtables,0.)
refDict['FF']['El'] = list(dat.keys())
refDict['FF']['FF'] = np.zeros((len(refDict['RefList']),len(dat)))
dFdvDict = {}
dFdfr = np.zeros((nRef,mSize))
dFdx = np.zeros((nRef,mSize,3))
dFdui = np.zeros((nRef,mSize))
dFdua = np.zeros((nRef,mSize,6))
dFdbab = np.zeros((nRef,2))
dFdfl = np.zeros((nRef))
dFdGf = np.zeros((nRef,mSize,FSSdata.shape[1],2))
dFdGx = np.zeros((nRef,mSize,XSSdata.shape[1],6))
dFdGz = np.zeros((nRef,mSize,5))
dFdGu = np.zeros((nRef,mSize,USSdata.shape[1],12))
Flack = 1.0
if not SGData['SGInv'] and 'S' in calcControls[hfx+'histType'] and phfx+'Flack' in parmDict:
Flack = 1.-2.*parmDict[phfx+'Flack']
time0 = time.time()
nRef = len(refDict['RefList'])/100
for iref,refl in enumerate(refDict['RefList']):
if 'T' in calcControls[hfx+'histType']:
FP,FPP = G2el.BlenResCW(Tdata,BLtables,refl.T[12+im])
H = np.array(refl[:4])
HP = H[:3]+modQ*H[3:] #projected hklm to hkl
SQ = 1./(2.*refl[4+im])**2 # or (sin(theta)/lambda)**2
SQfactor = 8.0*SQ*np.pi**2
dBabdA = np.exp(-parmDict[phfx+'BabU']*SQfactor)
Bab = parmDict[phfx+'BabA']*dBabdA
Tindx = np.array([refDict['FF']['El'].index(El) for El in Tdata])
FF = refDict['FF']['FF'][iref].T[Tindx]
Uniq = np.inner(H,SSGMT)
Phi = np.inner(H,SSGT)
UniqP = np.inner(HP,SGMT)
if SGInv: #if centro - expand HKL sets
Uniq = np.vstack((Uniq,-Uniq))
Phi = np.hstack((Phi,-Phi))
UniqP = np.vstack((UniqP,-UniqP))
phase = twopi*(np.inner(Uniq[:,:3],(dXdata+Xdata).T)+Phi[:,nxs])
sinp = np.sin(phase)
cosp = np.cos(phase)
occ = Mdata*Fdata/Uniq.shape[0]
biso = -SQfactor*Uisodata[:,nxs]
Tiso = np.repeat(np.where(biso<1.,np.exp(biso),1.0),Uniq.shape[0],axis=1).T #ops x atoms
HbH = -np.sum(UniqP[:,nxs,:3]*np.inner(UniqP[:,:3],bij),axis=-1) #ops x atoms
Hij = np.array([Mast*np.multiply.outer(U[:3],U[:3]) for U in UniqP]) #atoms x 3x3
Hij = np.array([G2lat.UijtoU6(uij) for uij in Hij]) #atoms x 6
Tuij = np.where(HbH<1.,np.exp(HbH),1.0) #ops x atoms
Tcorr = np.reshape(Tiso,Tuij.shape)*Tuij*Mdata*Fdata/Uniq.shape[0] #ops x atoms
fot = (FF+FP-Bab)*Tcorr #ops x atoms
fotp = FPP*Tcorr #ops x atoms
GfpuA = G2mth.Modulation(Uniq,UniqP,nWaves,Fmod,Xmod,Umod,glTau,glWt) #2 x sym X atoms
dGdf,dGdx,dGdu,dGdz = G2mth.ModulationDerv(Uniq,UniqP,Hij,nWaves,waveShapes,Fmod,Xmod,UmodAB,SCtauF,SCtauX,SCtauU,glTau,glWt)
# GfpuA is 2 x ops x atoms
# derivs are: ops x atoms x waves x 2,6,12, or 5 parms as [real,imag] parts
fa = np.array([((FF+FP).T-Bab).T*cosp*Tcorr,-Flack*FPP*sinp*Tcorr]) # array(2,nEqv,nAtoms)
fb = np.array([((FF+FP).T-Bab).T*sinp*Tcorr,Flack*FPP*cosp*Tcorr]) #or array(2,nEqv,nAtoms)
fag = fa*GfpuA[0]-fb*GfpuA[1]
fbg = fb*GfpuA[0]+fa*GfpuA[1]
fas = np.sum(np.sum(fag,axis=1),axis=1) # 2 x twin
fbs = np.sum(np.sum(fbg,axis=1),axis=1)
fax = np.array([-fot*sinp,-fotp*cosp]) #positions; 2 x ops x atoms
fbx = np.array([fot*cosp,-fotp*sinp])
fax = fax*GfpuA[0]-fbx*GfpuA[1]
fbx = fbx*GfpuA[0]+fax*GfpuA[1]
#sum below is over Uniq
dfadfr = np.sum(fag/occ,axis=1) #Fdata != 0 ever avoids /0. problem
dfbdfr = np.sum(fbg/occ,axis=1) #Fdata != 0 avoids /0. problem
dfadba = np.sum(-cosp*Tcorr[:,nxs],axis=1)
dfbdba = np.sum(-sinp*Tcorr[:,nxs],axis=1)
dfadui = np.sum(-SQfactor*fag,axis=1)
dfbdui = np.sum(-SQfactor*fbg,axis=1)
dfadx = np.sum(twopi*Uniq[:,:3]*np.swapaxes(fax,-2,-1)[:,:,:,nxs],axis=-2) #2 x nAtom x 3xyz; sum nOps
dfbdx = np.sum(twopi*Uniq[:,:3]*np.swapaxes(fbx,-2,-1)[:,:,:,nxs],axis=-2)
dfadua = np.sum(-Hij*np.swapaxes(fag,-2,-1)[:,:,:,nxs],axis=-2) #2 x nAtom x 6Uij; sum nOps
dfbdua = np.sum(-Hij*np.swapaxes(fbg,-2,-1)[:,:,:,nxs],axis=-2) #these are correct also for twins above
# array(2,nAtom,nWave,2) & array(2,nAtom,nWave,6) & array(2,nAtom,nWave,12); sum on nOps
dfadGf = np.sum(fa[:,:,:,nxs,nxs]*dGdf[0][nxs,:,:,:,:]-fb[:,:,:,nxs,nxs]*dGdf[1][nxs,:,:,:,:],axis=1)
dfbdGf = np.sum(fb[:,:,:,nxs,nxs]*dGdf[0][nxs,:,:,:,:]+fa[:,:,:,nxs,nxs]*dGdf[1][nxs,:,:,:,:],axis=1)
dfadGx = np.sum(fa[:,:,:,nxs,nxs]*dGdx[0][nxs,:,:,:,:]-fb[:,:,:,nxs,nxs]*dGdx[1][nxs,:,:,:,:],axis=1)
dfbdGx = np.sum(fb[:,:,:,nxs,nxs]*dGdx[0][nxs,:,:,:,:]+fa[:,:,:,nxs,nxs]*dGdx[1][nxs,:,:,:,:],axis=1)
dfadGz = np.sum(fa[:,:,0,nxs,nxs]*dGdz[0][nxs,:,:,:]-fb[:,:,0,nxs,nxs]*dGdz[1][nxs,:,:,:],axis=1)
dfbdGz = np.sum(fb[:,:,0,nxs,nxs]*dGdz[0][nxs,:,:,:]+fa[:,:,0,nxs,nxs]*dGdz[1][nxs,:,:,:],axis=1)
dfadGu = np.sum(fa[:,:,:,nxs,nxs]*dGdu[0][nxs,:,:,:,:]-fb[:,:,:,nxs,nxs]*dGdu[1][nxs,:,:,:,:],axis=1)
dfbdGu = np.sum(fb[:,:,:,nxs,nxs]*dGdu[0][nxs,:,:,:,:]+fa[:,:,:,nxs,nxs]*dGdu[1][nxs,:,:,:,:],axis=1)
if not SGData['SGInv']: #Flack derivative
dfadfl = np.sum(-FPP*Tcorr*sinp)
dfbdfl = np.sum(FPP*Tcorr*cosp)
else:
dfadfl = 1.0
dfbdfl = 1.0
# GSASIIpath.IPyBreak()
#NB: the above have been checked against PA(1:10,1:2) in strfctr.for for Al2O3!
SA = fas[0]+fas[1] #float = A+A'
SB = fbs[0]+fbs[1] #float = B+B'
if 'P' in calcControls[hfx+'histType']: #checked perfect for centro & noncentro?
dFdfl[iref] = -SA*dfadfl-SB*dfbdfl #array(nRef,)
dFdfr[iref] = 2.*(fas[0]*dfadfr[0]+fas[1]*dfadfr[1])*Mdata/len(Uniq)+ \
2.*(fbs[0]*dfbdfr[0]-fbs[1]*dfbdfr[1])*Mdata/len(Uniq)
dFdx[iref] = 2.*(fas[0]*dfadx[0]+fas[1]*dfadx[1])+ \
2.*(fbs[0]*dfbdx[0]+fbs[1]*dfbdx[1])
dFdui[iref] = 2.*(fas[0]*dfadui[0]+fas[1]*dfadui[1])+ \
2.*(fbs[0]*dfbdui[0]-fbs[1]*dfbdui[1])
dFdua[iref] = 2.*(fas[0]*dfadua[0]+fas[1]*dfadua[1])+ \
2.*(fbs[0]*dfbdua[0]+fbs[1]*dfbdua[1])
dFdGf[iref] = 2.*(fas[0]*dfadGf[0]+fas[1]*dfadGf[1])+ \
2.*(fbs[0]*dfbdGf[0]+fbs[1]*dfbdGf[1])
dFdGx[iref] = 2.*(fas[0]*dfadGx[0]+fas[1]*dfadGx[1])+ \
2.*(fbs[0]*dfbdGx[0]-fbs[1]*dfbdGx[1])
dFdGz[iref] = 2.*(fas[0]*dfadGz[0]+fas[1]*dfadGz[1])+ \
2.*(fbs[0]*dfbdGz[0]+fbs[1]*dfbdGz[1])
dFdGu[iref] = 2.*(fas[0]*dfadGu[0]+fas[1]*dfadGu[1])+ \
2.*(fbs[0]*dfbdGu[0]+fbs[1]*dfbdGu[1])
else: #OK, I think
dFdfr[iref] = 2.*(SA*dfadfr[0]+SA*dfadfr[1]+SB*dfbdfr[0]+SB*dfbdfr[1])*Mdata/len(Uniq) #array(nRef,nAtom)
dFdx[iref] = 2.*(SA*dfadx[0]+SA*dfadx[1]+SB*dfbdx[0]+SB*dfbdx[1]) #array(nRef,nAtom,3)
dFdui[iref] = 2.*(SA*dfadui[0]+SA*dfadui[1]+SB*dfbdui[0]+SB*dfbdui[1]) #array(nRef,nAtom)
dFdua[iref] = 2.*(SA*dfadua[0]+SA*dfadua[1]+SB*dfbdua[0]+SB*dfbdua[1]) #array(nRef,nAtom,6)
dFdfl[iref] = -SA*dfadfl-SB*dfbdfl #array(nRef,)
dFdGf[iref] = 2.*(SA*dfadGf[0]+SB*dfbdGf[1]) #array(nRef,natom,nwave,2)
dFdGx[iref] = 2.*(SA*dfadGx[0]+SB*dfbdGx[1]) #array(nRef,natom,nwave,6)
dFdGz[iref] = 2.*(SA*dfadGz[0]+SB*dfbdGz[1]) #array(nRef,natom,5)
dFdGu[iref] = 2.*(SA*dfadGu[0]+SB*dfbdGu[1]) #array(nRef,natom,nwave,12)
# GSASIIpath.IPyBreak()
dFdbab[iref] = 2.*fas[0]*np.array([np.sum(dfadba*dBabdA),np.sum(-dfadba*parmDict[phfx+'BabA']*SQfactor*dBabdA)]).T+ \
2.*fbs[0]*np.array([np.sum(dfbdba*dBabdA),np.sum(-dfbdba*parmDict[phfx+'BabA']*SQfactor*dBabdA)]).T
#loop over atoms - each dict entry is list of derivatives for all the reflections
if not iref%100 :
print (' %d derivative time %.4f\r'%(iref,time.time()-time0),end='')
for i in range(len(Mdata)): #loop over atoms
dFdvDict[pfx+'Afrac:'+str(i)] = dFdfr.T[i]
dFdvDict[pfx+'dAx:'+str(i)] = dFdx.T[0][i]
dFdvDict[pfx+'dAy:'+str(i)] = dFdx.T[1][i]
dFdvDict[pfx+'dAz:'+str(i)] = dFdx.T[2][i]
dFdvDict[pfx+'AUiso:'+str(i)] = dFdui.T[i]
dFdvDict[pfx+'AU11:'+str(i)] = dFdua.T[0][i]
dFdvDict[pfx+'AU22:'+str(i)] = dFdua.T[1][i]
dFdvDict[pfx+'AU33:'+str(i)] = dFdua.T[2][i]
dFdvDict[pfx+'AU12:'+str(i)] = 2.*dFdua.T[3][i]
dFdvDict[pfx+'AU13:'+str(i)] = 2.*dFdua.T[4][i]
dFdvDict[pfx+'AU23:'+str(i)] = 2.*dFdua.T[5][i]
for j in range(FSSdata.shape[1]): #loop over waves Fzero & Fwid?
dFdvDict[pfx+'Fsin:'+str(i)+':'+str(j)] = dFdGf.T[0][j][i]
dFdvDict[pfx+'Fcos:'+str(i)+':'+str(j)] = dFdGf.T[1][j][i]
nx = 0
if waveTypes[i] in ['Block','ZigZag']:
nx = 1
dFdvDict[pfx+'Tmin:'+str(i)+':0'] = dFdGz.T[0][i] #ZigZag/Block waves (if any)
dFdvDict[pfx+'Tmax:'+str(i)+':0'] = dFdGz.T[1][i]
dFdvDict[pfx+'Xmax:'+str(i)+':0'] = dFdGz.T[2][i]
dFdvDict[pfx+'Ymax:'+str(i)+':0'] = dFdGz.T[3][i]
dFdvDict[pfx+'Zmax:'+str(i)+':0'] = dFdGz.T[4][i]
for j in range(XSSdata.shape[1]-nx): #loop over waves
dFdvDict[pfx+'Xsin:'+str(i)+':'+str(j+nx)] = dFdGx.T[0][j][i]
dFdvDict[pfx+'Ysin:'+str(i)+':'+str(j+nx)] = dFdGx.T[1][j][i]
dFdvDict[pfx+'Zsin:'+str(i)+':'+str(j+nx)] = dFdGx.T[2][j][i]
dFdvDict[pfx+'Xcos:'+str(i)+':'+str(j+nx)] = dFdGx.T[3][j][i]
dFdvDict[pfx+'Ycos:'+str(i)+':'+str(j+nx)] = dFdGx.T[4][j][i]
dFdvDict[pfx+'Zcos:'+str(i)+':'+str(j+nx)] = dFdGx.T[5][j][i]
for j in range(USSdata.shape[1]): #loop over waves
dFdvDict[pfx+'U11sin:'+str(i)+':'+str(j)] = dFdGu.T[0][j][i]
dFdvDict[pfx+'U22sin:'+str(i)+':'+str(j)] = dFdGu.T[1][j][i]
dFdvDict[pfx+'U33sin:'+str(i)+':'+str(j)] = dFdGu.T[2][j][i]
dFdvDict[pfx+'U12sin:'+str(i)+':'+str(j)] = 2.*dFdGu.T[3][j][i]
dFdvDict[pfx+'U13sin:'+str(i)+':'+str(j)] = 2.*dFdGu.T[4][j][i]
dFdvDict[pfx+'U23sin:'+str(i)+':'+str(j)] = 2.*dFdGu.T[5][j][i]
dFdvDict[pfx+'U11cos:'+str(i)+':'+str(j)] = dFdGu.T[6][j][i]
dFdvDict[pfx+'U22cos:'+str(i)+':'+str(j)] = dFdGu.T[7][j][i]
dFdvDict[pfx+'U33cos:'+str(i)+':'+str(j)] = dFdGu.T[8][j][i]
dFdvDict[pfx+'U12cos:'+str(i)+':'+str(j)] = 2.*dFdGu.T[9][j][i]
dFdvDict[pfx+'U13cos:'+str(i)+':'+str(j)] = 2.*dFdGu.T[10][j][i]
dFdvDict[pfx+'U23cos:'+str(i)+':'+str(j)] = 2.*dFdGu.T[11][j][i]
# GSASIIpath.IPyBreak()
dFdvDict[phfx+'Flack'] = 4.*dFdfl.T
dFdvDict[phfx+'BabA'] = dFdbab.T[0]
dFdvDict[phfx+'BabU'] = dFdbab.T[1]
return dFdvDict
def SStructureFactorDerv2(refDict,im,G,hfx,pfx,SGData,SSGData,calcControls,parmDict):
'Needs a doc string - no twins'
phfx = pfx.split(':')[0]+hfx
ast = np.sqrt(np.diag(G))
Mast = twopisq*np.multiply.outer(ast,ast)
SGInv = SGData['SGInv']
SGMT = np.array([ops[0].T for ops in SGData['SGOps']])
SGT = np.array([ops[1] for ops in SGData['SGOps']])
SSGMT = np.array([ops[0].T for ops in SSGData['SSGOps']])
SSGT = np.array([ops[1] for ops in SSGData['SSGOps']])
FFtables = calcControls['FFtables']
BLtables = calcControls['BLtables']
nRef = len(refDict['RefList'])
Tdata,Mdata,Fdata,Xdata,dXdata,IAdata,Uisodata,Uijdata,Gdata = \
GetAtomFXU(pfx,calcControls,parmDict)
if not Xdata.size: #no atoms in phase!
return {}
mSize = len(Mdata) #no. atoms
waveTypes,FSSdata,XSSdata,USSdata,MSSdata = GetAtomSSFXU(pfx,calcControls,parmDict)
ngl,nWaves,Fmod,Xmod,Umod,Mmod,glTau,glWt = G2mth.makeWaves(waveTypes,FSSdata,XSSdata,USSdata,MSSdata,Mast)
waveShapes,SCtauF,SCtauX,SCtauU,UmodAB = G2mth.makeWavesDerv(ngl,waveTypes,FSSdata,XSSdata,USSdata,MSSdata,Mast)
modQ = np.array([parmDict[pfx+'mV0'],parmDict[pfx+'mV1'],parmDict[pfx+'mV2']])
FF = np.zeros(len(Tdata))
if 'NC' in calcControls[hfx+'histType']:
FP,FPP = G2el.BlenResCW(Tdata,BLtables,parmDict[hfx+'Lam'])
elif 'X' in calcControls[hfx+'histType']:
FP = np.array([FFtables[El][hfx+'FP'] for El in Tdata])
FPP = np.array([FFtables[El][hfx+'FPP'] for El in Tdata])
Uij = np.array(G2lat.U6toUij(Uijdata)).T
bij = Mast*Uij
if not len(refDict['FF']):
if 'N' in calcControls[hfx+'histType']:
dat = G2el.getBLvalues(BLtables) #will need wave here for anom. neutron b's
else:
dat = G2el.getFFvalues(FFtables,0.)
refDict['FF']['El'] = list(dat.keys())
refDict['FF']['FF'] = np.zeros((len(refDict['RefList']),len(dat)))
dFdvDict = {}
dFdfr = np.zeros((nRef,mSize))
dFdx = np.zeros((nRef,mSize,3))
dFdui = np.zeros((nRef,mSize))
dFdua = np.zeros((nRef,mSize,6))
dFdbab = np.zeros((nRef,2))
dFdfl = np.zeros((nRef))
dFdGf = np.zeros((nRef,mSize,FSSdata.shape[1],2))
dFdGx = np.zeros((nRef,mSize,XSSdata.shape[1],6))
dFdGz = np.zeros((nRef,mSize,5))
dFdGu = np.zeros((nRef,mSize,USSdata.shape[1],12))
Flack = 1.0
if not SGData['SGInv'] and 'S' in calcControls[hfx+'histType'] and phfx+'Flack' in parmDict:
Flack = 1.-2.*parmDict[phfx+'Flack']
time0 = time.time()
iBeg = 0
blkSize = 4 #no. of reflections in a block - optimized for speed
while iBeg < nRef:
iFin = min(iBeg+blkSize,nRef)
refl = refDict['RefList'][iBeg:iFin] #array(blkSize,nItems)
H = refl.T[:4]
HP = H[:3].T+modQ*H.T[:,3:] #projected hklm to hkl
SQ = 1./(2.*refl.T[4+im])**2 # or (sin(theta)/lambda)**2
SQfactor = 8.0*SQ*np.pi**2
if 'T' in calcControls[hfx+'histType']:
if 'P' in calcControls[hfx+'histType']:
FP,FPP = G2el.BlenResTOF(Tdata,BLtables,refl.T[15])
else:
FP,FPP = G2el.BlenResTOF(Tdata,BLtables,refl.T[13])
FP = np.repeat(FP.T,len(SGT),axis=0)
FPP = np.repeat(FPP.T,len(SGT),axis=0)
# dBabdA = np.exp(-parmDict[phfx+'BabU']*SQfactor)
Bab = np.repeat(parmDict[phfx+'BabA']*np.exp(-parmDict[phfx+'BabU']*SQfactor),len(SGT))
Tindx = np.array([refDict['FF']['El'].index(El) for El in Tdata])
FF = np.repeat(refDict['FF']['FF'][iBeg:iFin].T[Tindx].T,len(SGT),axis=0)
Uniq = np.inner(H.T,SSGMT)
Phi = np.inner(H.T,SSGT)
UniqP = np.inner(HP,SGMT)
if SGInv: #if centro - expand HKL sets
Uniq = np.hstack((Uniq,-Uniq))
Phi = np.hstack((Phi,-Phi))
UniqP = np.hstack((UniqP,-UniqP))
FF = np.vstack((FF,FF))
Bab = np.concatenate((Bab,Bab))
phase = twopi*(np.inner(Uniq[:,:,:3],(dXdata+Xdata).T)+Phi[:,:,nxs])
sinp = np.sin(phase)
cosp = np.cos(phase)
occ = Mdata*Fdata/Uniq.shape[1]
biso = -SQfactor*Uisodata[:,nxs]
Tiso = np.repeat(np.where(biso<1.,np.exp(biso),1.0),Uniq.shape[1],axis=1).T #ops x atoms
HbH = -np.sum(UniqP[:,:,nxs,:3]*np.inner(UniqP[:,:,:3],bij),axis=-1) #ops x atoms
Hij = np.array([Mast*np.multiply.outer(U[:3],U[:3]) for U in np.reshape(UniqP,(-1,3))]) #atoms x 3x3
Hij = np.reshape(np.array([G2lat.UijtoU6(uij) for uij in Hij]),(iFin-iBeg,-1,6)) #atoms x 6
Tuij = np.where(HbH<1.,np.exp(HbH),1.0) #ops x atoms
# GSASIIpath.IPyBreak()
Tcorr = np.reshape(Tiso,Tuij.shape)*Tuij*Mdata*Fdata/Uniq.shape[0] #ops x atoms
fot = np.reshape(FF+FP[nxs,:]-Bab[:,nxs],cosp.shape)*Tcorr #ops x atoms
fotp = FPP*Tcorr #ops x atoms
GfpuA = G2mth.Modulation(Uniq,UniqP,nWaves,Fmod,Xmod,Umod,glTau,glWt) #2 x sym X atoms
dGdf,dGdx,dGdu,dGdz = G2mth.ModulationDerv2(Uniq,UniqP,Hij,nWaves,waveShapes,Fmod,Xmod,UmodAB,SCtauF,SCtauX,SCtauU,glTau,glWt)
# GfpuA is 2 x ops x atoms
# derivs are: ops x atoms x waves x 2,6,12, or 5 parms as [real,imag] parts
fa = np.array([fot*cosp,-Flack*FPP*sinp*Tcorr]) # array(2,nEqv,nAtoms)
fb = np.array([fot*sinp,Flack*FPP*cosp*Tcorr]) #or array(2,nEqv,nAtoms)
fag = fa*GfpuA[0]-fb*GfpuA[1]
fbg = fb*GfpuA[0]+fa*GfpuA[1]
fas = np.sum(np.sum(fag,axis=-1),axis=-1) # 2 x refBlk
fbs = np.sum(np.sum(fbg,axis=-1),axis=-1)
fax = np.array([-fot*sinp,-fotp*cosp]) #positions; 2 x ops x atoms
fbx = np.array([fot*cosp,-fotp*sinp])
fax = fax*GfpuA[0]-fbx*GfpuA[1]
fbx = fbx*GfpuA[0]+fax*GfpuA[1]
#sum below is over Uniq
dfadfr = np.sum(fag/occ,axis=-2) #Fdata != 0 ever avoids /0. problem
dfbdfr = np.sum(fbg/occ,axis=-2) #Fdata != 0 avoids /0. problem
# dfadba = np.sum(-cosp*Tcorr,axis=-2)
# dfbdba = np.sum(-sinp*Tcorr,axis=-2)
dfadui = np.sum(-SQfactor[nxs,:,nxs,nxs]*fag,axis=-2)
dfbdui = np.sum(-SQfactor[nxs,:,nxs,nxs]*fbg,axis=-2)
dfadx = np.sum(twopi*Uniq[nxs,:,:,nxs,:3]*fax[:,:,:,:,nxs],axis=-3) #2 refBlk x x nAtom x 3xyz; sum nOps
dfbdx = np.sum(twopi*Uniq[nxs,:,:,nxs,:3]*fbx[:,:,:,:,nxs],axis=-3) #2 refBlk x x nAtom x 3xyz; sum nOps
dfadua = np.sum(-Hij[nxs,:,:,nxs,:]*fag[:,:,:,:,nxs],axis=-3) #2 x nAtom x 6Uij; sum nOps
dfbdua = np.sum(-Hij[nxs,:,:,nxs,:]*fbg[:,:,:,:,nxs],axis=-3) #2 x nAtom x 6Uij; sum nOps
# array(2,nAtom,nWave,2) & array(2,nAtom,nWave,6) & array(2,nAtom,nWave,12); sum on nOps
dfadGf = np.sum(fa[:,:,:,:,nxs,nxs]*dGdf[0][nxs,:,nxs,:,:,:]-fb[:,:,:,:,nxs,nxs]*dGdf[1][nxs,:,nxs,:,:,:],axis=2)
dfbdGf = np.sum(fb[:,:,:,:,nxs,nxs]*dGdf[0][nxs,:,nxs,:,:,:]+fa[:,:,:,:,nxs,nxs]*dGdf[1][nxs,:,nxs,:,:,:],axis=2)
dfadGx = np.sum(fa[:,:,:,:,nxs,nxs]*dGdx[0][nxs,:,:,:,:,:]-fb[:,:,:,:,nxs,nxs]*dGdx[1][nxs,:,:,:,:,:],axis=2)
dfbdGx = np.sum(fb[:,:,:,:,nxs,nxs]*dGdx[0][nxs,:,:,:,:,:]+fa[:,:,:,:,nxs,nxs]*dGdx[1][nxs,:,:,:,:,:],axis=2)
dfadGz = np.sum(fa[:,:,:,:,nxs]*dGdz[0][nxs,:,:,:,:]-fb[:,:,:,:,nxs]*dGdz[1][nxs,:,:,:,:],axis=2)
dfbdGz = np.sum(fb[:,:,:,:,nxs]*dGdz[0][nxs,:,:,:,:]+fa[:,:,:,:,nxs]*dGdz[1][nxs,:,:,:,:],axis=2)
dfadGu = np.sum(fa[:,:,:,:,nxs,nxs]*dGdu[0][nxs,:,:,:,:]-fb[:,:,:,:,nxs,nxs]*dGdu[1][nxs,:,:,:,:],axis=2)
dfbdGu = np.sum(fb[:,:,:,:,nxs,nxs]*dGdu[0][nxs,:,:,:,:]+fa[:,:,:,:,nxs,nxs]*dGdu[1][nxs,:,:,:,:],axis=2)
if not SGData['SGInv']: #Flack derivative
dfadfl = np.sum(np.sum(-FPP*Tcorr*sinp,axis=-1),axis=-1)
dfbdfl = np.sum(np.sum(FPP*Tcorr*cosp,axis=-1),axis=-1)
else:
dfadfl = 1.0
dfbdfl = 1.0
#NB: the above have been checked against PA(1:10,1:2) in strfctr.for for Al2O3!
SA = fas[0]+fas[1] #float = A+A' (might be array[nTwin])
SB = fbs[0]+fbs[1] #float = B+B' (might be array[nTwin])
if 'P' in calcControls[hfx+'histType']: #checked perfect for centro & noncentro?
dFdfl[iBeg:iFin] = -SA*dfadfl-SB*dfbdfl #array(nRef,)
dFdfr[iBeg:iFin] = 2.*(fas[0,:,nxs]*dfadfr[0]+fas[1,:,nxs]*dfadfr[1])*Mdata/len(Uniq)+ \
2.*(fbs[0,:,nxs]*dfbdfr[0]-fbs[1,:,nxs]*dfbdfr[1])*Mdata/len(Uniq)
dFdx[iBeg:iFin] = 2.*(fas[0,:,nxs,nxs]*dfadx[0]+fas[1,:,nxs,nxs]*dfadx[1])+ \
2.*(fbs[0,:,nxs,nxs]*dfbdx[0]+fbs[1,:,nxs,nxs]*dfbdx[1])
dFdui[iBeg:iFin] = 2.*(fas[0,:,nxs]*dfadui[0]+fas[1,:,nxs]*dfadui[1])+ \
2.*(fbs[0,:,nxs]*dfbdui[0]-fbs[1,:,nxs]*dfbdui[1])
dFdua[iBeg:iFin] = 2.*(fas[0,:,nxs,nxs]*dfadua[0]+fas[1,:,nxs,nxs]*dfadua[1])+ \
2.*(fbs[0,:,nxs,nxs]*dfbdua[0]+fbs[1,:,nxs,nxs]*dfbdua[1])
dFdGf[iBeg:iFin] = 2.*(fas[0,:,nxs,nxs,nxs]*dfadGf[0]+fas[1,:,nxs,nxs,nxs]*dfadGf[1])+ \
2.*(fbs[0,:,nxs,nxs,nxs]*dfbdGf[0]+fbs[1,:,nxs,nxs,nxs]*dfbdGf[1])
dFdGx[iBeg:iFin] = 2.*(fas[0,:,nxs,nxs,nxs]*dfadGx[0]+fas[1,:,nxs,nxs,nxs]*dfadGx[1])+ \
2.*(fbs[0,:,nxs,nxs,nxs]*dfbdGx[0]-fbs[1,:,nxs,nxs,nxs]*dfbdGx[1])
dFdGz[iBeg:iFin] = 2.*(fas[0,:,nxs,nxs]*dfadGz[0]+fas[1,:,nxs,nxs]*dfadGz[1])+ \
2.*(fbs[0,:,nxs,nxs]*dfbdGz[0]+fbs[1,:,nxs,nxs]*dfbdGz[1])
dFdGu[iBeg:iFin] = 2.*(fas[0,:,nxs,nxs,nxs]*dfadGu[0]+fas[1,:,nxs,nxs,nxs]*dfadGu[1])+ \
2.*(fbs[0,:,nxs,nxs,nxs]*dfbdGu[0]+fbs[1,:,nxs,nxs,nxs]*dfbdGu[1])
else: #OK, I think
dFdfr[iBeg:iFin] = 2.*(SA[:,nxs]*(dfadfr[0]+dfadfr[1])+SB[:,nxs]*(dfbdfr[0]+dfbdfr[1]))*Mdata/len(Uniq) #array(nRef,nAtom)
dFdx[iBeg:iFin] = 2.*(SA[:,nxs,nxs]*(dfadx[0]+dfadx[1])+SB[:,nxs,nxs]*(dfbdx[0]+dfbdx[1])) #array(nRef,nAtom,3)
dFdui[iBeg:iFin] = 2.*(SA[:,nxs]*(dfadui[0]+dfadui[1])+SB[:,nxs]*(dfbdui[0]+dfbdui[1])) #array(nRef,nAtom)
dFdua[iBeg:iFin] = 2.*(SA[:,nxs,nxs]*(dfadua[0]+dfadua[1])+SB[:,nxs,nxs]*(dfbdua[0]+dfbdua[1])) #array(nRef,nAtom,6)
dFdfl[iBeg:iFin] = -SA*dfadfl-SB*dfbdfl #array(nRef,)
dFdGf[iBeg:iFin] = 2.*(SA[:,nxs,nxs,nxs]*dfadGf[0]+SB[:,nxs,nxs,nxs]*dfbdGf[1]) #array(nRef,natom,nwave,2)
dFdGx[iBeg:iFin] = 2.*(SA[:,nxs,nxs,nxs]*dfadGx[0]+SB[:,nxs,nxs,nxs]*dfbdGx[1]) #array(nRef,natom,nwave,6)
dFdGz[iBeg:iFin] = 2.*(SA[:,nxs,nxs]*dfadGz[0]+SB[:,nxs,nxs]*dfbdGz[1]) #array(nRef,natom,5)
dFdGu[iBeg:iFin] = 2.*(SA[:,nxs,nxs,nxs]*dfadGu[0]+SB[:,nxs,nxs,nxs]*dfbdGu[1]) #array(nRef,natom,nwave,12)
# GSASIIpath.IPyBreak()
# dFdbab[iBeg:iFin] = 2.*fas[0,:,nxs]*np.array([np.sum(dfadba*dBabdA),np.sum(-dfadba*parmDict[phfx+'BabA']*SQfactor*dBabdA)]).T+ \
# 2.*fbs[0,:,nxs]*np.array([np.sum(dfbdba*dBabdA),np.sum(-dfbdba*parmDict[phfx+'BabA']*SQfactor*dBabdA)]).T
#loop over atoms - each dict entry is list of derivatives for all the reflections
print (' %d derivative time %.4f\r'%(iBeg,time.time()-time0),end='')
iBeg += blkSize
for i in range(len(Mdata)): #loop over atoms
dFdvDict[pfx+'Afrac:'+str(i)] = dFdfr.T[i]
dFdvDict[pfx+'dAx:'+str(i)] = dFdx.T[0][i]
dFdvDict[pfx+'dAy:'+str(i)] = dFdx.T[1][i]
dFdvDict[pfx+'dAz:'+str(i)] = dFdx.T[2][i]
dFdvDict[pfx+'AUiso:'+str(i)] = dFdui.T[i]
dFdvDict[pfx+'AU11:'+str(i)] = dFdua.T[0][i]
dFdvDict[pfx+'AU22:'+str(i)] = dFdua.T[1][i]
dFdvDict[pfx+'AU33:'+str(i)] = dFdua.T[2][i]
dFdvDict[pfx+'AU12:'+str(i)] = 2.*dFdua.T[3][i]
dFdvDict[pfx+'AU13:'+str(i)] = 2.*dFdua.T[4][i]
dFdvDict[pfx+'AU23:'+str(i)] = 2.*dFdua.T[5][i]
for j in range(FSSdata.shape[1]): #loop over waves Fzero & Fwid?
dFdvDict[pfx+'Fsin:'+str(i)+':'+str(j)] = dFdGf.T[0][j][i]
dFdvDict[pfx+'Fcos:'+str(i)+':'+str(j)] = dFdGf.T[1][j][i]
nx = 0
if waveTypes[i] in ['Block','ZigZag']:
nx = 1
dFdvDict[pfx+'Tmin:'+str(i)+':0'] = dFdGz.T[0][i] #ZigZag/Block waves (if any)
dFdvDict[pfx+'Tmax:'+str(i)+':0'] = dFdGz.T[1][i]
dFdvDict[pfx+'Xmax:'+str(i)+':0'] = dFdGz.T[2][i]
dFdvDict[pfx+'Ymax:'+str(i)+':0'] = dFdGz.T[3][i]
dFdvDict[pfx+'Zmax:'+str(i)+':0'] = dFdGz.T[4][i]
for j in range(XSSdata.shape[1]-nx): #loop over waves
dFdvDict[pfx+'Xsin:'+str(i)+':'+str(j+nx)] = dFdGx.T[0][j][i]
dFdvDict[pfx+'Ysin:'+str(i)+':'+str(j+nx)] = dFdGx.T[1][j][i]
dFdvDict[pfx+'Zsin:'+str(i)+':'+str(j+nx)] = dFdGx.T[2][j][i]
dFdvDict[pfx+'Xcos:'+str(i)+':'+str(j+nx)] = dFdGx.T[3][j][i]
dFdvDict[pfx+'Ycos:'+str(i)+':'+str(j+nx)] = dFdGx.T[4][j][i]
dFdvDict[pfx+'Zcos:'+str(i)+':'+str(j+nx)] = dFdGx.T[5][j][i]
for j in range(USSdata.shape[1]): #loop over waves
dFdvDict[pfx+'U11sin:'+str(i)+':'+str(j)] = dFdGu.T[0][j][i]
dFdvDict[pfx+'U22sin:'+str(i)+':'+str(j)] = dFdGu.T[1][j][i]
dFdvDict[pfx+'U33sin:'+str(i)+':'+str(j)] = dFdGu.T[2][j][i]
dFdvDict[pfx+'U12sin:'+str(i)+':'+str(j)] = 2.*dFdGu.T[3][j][i]
dFdvDict[pfx+'U13sin:'+str(i)+':'+str(j)] = 2.*dFdGu.T[4][j][i]
dFdvDict[pfx+'U23sin:'+str(i)+':'+str(j)] = 2.*dFdGu.T[5][j][i]
dFdvDict[pfx+'U11cos:'+str(i)+':'+str(j)] = dFdGu.T[6][j][i]
dFdvDict[pfx+'U22cos:'+str(i)+':'+str(j)] = dFdGu.T[7][j][i]
dFdvDict[pfx+'U33cos:'+str(i)+':'+str(j)] = dFdGu.T[8][j][i]
dFdvDict[pfx+'U12cos:'+str(i)+':'+str(j)] = 2.*dFdGu.T[9][j][i]
dFdvDict[pfx+'U13cos:'+str(i)+':'+str(j)] = 2.*dFdGu.T[10][j][i]
dFdvDict[pfx+'U23cos:'+str(i)+':'+str(j)] = 2.*dFdGu.T[11][j][i]
# GSASIIpath.IPyBreak()
dFdvDict[phfx+'BabA'] = dFdbab.T[0]
dFdvDict[phfx+'BabU'] = dFdbab.T[1]
return dFdvDict
def SStructureFactorDervTw(refDict,im,G,hfx,pfx,SGData,SSGData,calcControls,parmDict):
'Needs a doc string'
phfx = pfx.split(':')[0]+hfx
ast = np.sqrt(np.diag(G))
Mast = twopisq*np.multiply.outer(ast,ast)
SGInv = SGData['SGInv']
SGMT = np.array([ops[0].T for ops in SGData['SGOps']])
SSGMT = np.array([ops[0].T for ops in SSGData['SSGOps']])
SSGT = np.array([ops[1] for ops in SSGData['SSGOps']])
FFtables = calcControls['FFtables']
BLtables = calcControls['BLtables']
TwinLaw = np.array([[[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]],])
TwDict = refDict.get('TwDict',{})
if 'S' in calcControls[hfx+'histType']:
NTL = calcControls[phfx+'NTL']
NM = calcControls[phfx+'TwinNMN']+1
TwinLaw = calcControls[phfx+'TwinLaw']
TwinInv = list(np.where(calcControls[phfx+'TwinInv'],-1,1))
nTwin = len(TwinLaw)
nRef = len(refDict['RefList'])
Tdata,Mdata,Fdata,Xdata,dXdata,IAdata,Uisodata,Uijdata,Gdata = \
GetAtomFXU(pfx,calcControls,parmDict)
if not Xdata.size: #no atoms in phase!
return {}
mSize = len(Mdata) #no. atoms
waveTypes,FSSdata,XSSdata,USSdata,MSSdata = GetAtomSSFXU(pfx,calcControls,parmDict)
ngl,nWaves,Fmod,Xmod,Umod,Mmod,glTau,glWt = G2mth.makeWaves(waveTypes,FSSdata,XSSdata,USSdata,MSSdata,Mast)
waveShapes,SCtauF,SCtauX,SCtauU,UmodAB = G2mth.makeWavesDerv(ngl,waveTypes,FSSdata,XSSdata,USSdata,MSSdata,Mast)
modQ = np.array([parmDict[pfx+'mV0'],parmDict[pfx+'mV1'],parmDict[pfx+'mV2']])
FF = np.zeros(len(Tdata))
if 'NC' in calcControls[hfx+'histType']:
FP,FPP = G2el.BlenResCW(Tdata,BLtables,parmDict[hfx+'Lam'])
elif 'X' in calcControls[hfx+'histType']:
FP = np.array([FFtables[El][hfx+'FP'] for El in Tdata])
FPP = np.array([FFtables[El][hfx+'FPP'] for El in Tdata])
Uij = np.array(G2lat.U6toUij(Uijdata)).T
bij = Mast*Uij
if not len(refDict['FF']):
if 'N' in calcControls[hfx+'histType']:
dat = G2el.getBLvalues(BLtables) #will need wave here for anom. neutron b's
else:
dat = G2el.getFFvalues(FFtables,0.)
refDict['FF']['El'] = list(dat.keys())
refDict['FF']['FF'] = np.zeros((len(refDict['RefList']),len(dat)))
dFdvDict = {}
dFdfr = np.zeros((nRef,nTwin,mSize))
dFdx = np.zeros((nRef,nTwin,mSize,3))
dFdui = np.zeros((nRef,nTwin,mSize))
dFdua = np.zeros((nRef,nTwin,mSize,6))
dFdbab = np.zeros((nRef,nTwin,2))
dFdtw = np.zeros((nRef,nTwin))
dFdGf = np.zeros((nRef,nTwin,mSize,FSSdata.shape[1]))
dFdGx = np.zeros((nRef,nTwin,mSize,XSSdata.shape[1],3))
dFdGz = np.zeros((nRef,nTwin,mSize,5))
dFdGu = np.zeros((nRef,nTwin,mSize,USSdata.shape[1],6))
Flack = 1.0
if not SGData['SGInv'] and 'S' in calcControls[hfx+'histType'] and phfx+'Flack' in parmDict:
Flack = 1.-2.*parmDict[phfx+'Flack']
time0 = time.time()
nRef = len(refDict['RefList'])/100
for iref,refl in enumerate(refDict['RefList']):
if 'T' in calcControls[hfx+'histType']:
FP,FPP = G2el.BlenResCW(Tdata,BLtables,refl.T[12+im])
H = np.array(refl[:4])
HP = H[:3]+modQ*H[3:] #projected hklm to hkl
H = np.inner(H.T,TwinLaw) #maybe array(4,nTwins) or (4)
TwMask = np.any(H,axis=-1)
if TwinLaw.shape[0] > 1 and TwDict:
if iref in TwDict:
for i in TwDict[iref]:
for n in range(NTL):
H[i+n*NM] = np.inner(TwinLaw[n*NM],np.array(TwDict[iref][i])*TwinInv[i+n*NM])
TwMask = np.any(H,axis=-1)
SQ = 1./(2.*refl[4+im])**2 # or (sin(theta)/lambda)**2
SQfactor = 8.0*SQ*np.pi**2
dBabdA = np.exp(-parmDict[phfx+'BabU']*SQfactor)
Bab = parmDict[phfx+'BabA']*dBabdA
Tindx = np.array([refDict['FF']['El'].index(El) for El in Tdata])
FF = refDict['FF']['FF'][iref].T[Tindx]
Uniq = np.inner(H,SSGMT)
Phi = np.inner(H,SSGT)
UniqP = np.inner(HP,SGMT)
if SGInv: #if centro - expand HKL sets
Uniq = np.vstack((Uniq,-Uniq))
Phi = np.hstack((Phi,-Phi))
UniqP = np.vstack((UniqP,-UniqP))
phase = twopi*(np.inner(Uniq[:,:3],(dXdata+Xdata).T)+Phi[:,nxs])
sinp = np.sin(phase)
cosp = np.cos(phase)
occ = Mdata*Fdata/Uniq.shape[0]
biso = -SQfactor*Uisodata[:,nxs]
Tiso = np.repeat(np.where(biso<1.,np.exp(biso),1.0),Uniq.shape[0]*len(TwinLaw),axis=1).T #ops x atoms
HbH = -np.sum(UniqP[:,nxs,:3]*np.inner(UniqP[:,:3],bij),axis=-1) #ops x atoms
Hij = np.array([Mast*np.multiply.outer(U[:3],U[:3]) for U in UniqP]) #atoms x 3x3
Hij = np.squeeze(np.reshape(np.array([G2lat.UijtoU6(uij) for uij in Hij]),(nTwin,-1,6)))
Tuij = np.where(HbH<1.,np.exp(HbH),1.0) #ops x atoms
Tcorr = np.reshape(Tiso,Tuij.shape)*Tuij*Mdata*Fdata/Uniq.shape[0] #ops x atoms
fot = (FF+FP-Bab)*Tcorr #ops x atoms
fotp = FPP*Tcorr #ops x atoms
GfpuA = G2mth.Modulation(Uniq,UniqP,nWaves,Fmod,Xmod,Umod,glTau,glWt) #2 x sym X atoms
dGdf,dGdx,dGdu,dGdz = G2mth.ModulationDerv(Uniq,UniqP,Hij,nWaves,waveShapes,Fmod,Xmod,UmodAB,SCtauF,SCtauX,SCtauU,glTau,glWt)
# GfpuA is 2 x ops x atoms
# derivs are: ops x atoms x waves x 2,6,12, or 5 parms as [real,imag] parts
fa = np.array([((FF+FP).T-Bab).T*cosp*Tcorr,-Flack*FPP*sinp*Tcorr]) # array(2,nTwin,nEqv,nAtoms)
fb = np.array([((FF+FP).T-Bab).T*sinp*Tcorr,Flack*FPP*cosp*Tcorr]) #or array(2,nEqv,nAtoms)
fag = fa*GfpuA[0]-fb*GfpuA[1]
fbg = fb*GfpuA[0]+fa*GfpuA[1]
fas = np.sum(np.sum(fag,axis=1),axis=1) # 2 x twin
fbs = np.sum(np.sum(fbg,axis=1),axis=1)
fax = np.array([-fot*sinp,-fotp*cosp]) #positions; 2 x twin x ops x atoms
fbx = np.array([fot*cosp,-fotp*sinp])
fax = fax*GfpuA[0]-fbx*GfpuA[1]
fbx = fbx*GfpuA[0]+fax*GfpuA[1]
#sum below is over Uniq
dfadfr = np.sum(fag/occ,axis=1) #Fdata != 0 ever avoids /0. problem
dfbdfr = np.sum(fbg/occ,axis=1) #Fdata != 0 avoids /0. problem
dfadba = np.sum(-cosp*Tcorr[:,nxs],axis=1)
dfbdba = np.sum(-sinp*Tcorr[:,nxs],axis=1)
dfadui = np.sum(-SQfactor*fag,axis=1)
dfbdui = np.sum(-SQfactor*fbg,axis=1)
dfadx = np.array([np.sum(twopi*Uniq[it,:,:3]*np.swapaxes(fax,-2,-1)[:,it,:,:,nxs],axis=-2) for it in range(nTwin)])
dfbdx = np.array([np.sum(twopi*Uniq[it,:,:3]*np.swapaxes(fbx,-2,-1)[:,it,:,:,nxs],axis=-2) for it in range(nTwin)])
dfadua = np.array([np.sum(-Hij[it]*np.swapaxes(fag,-2,-1)[:,it,:,:,nxs],axis=-2) for it in range(nTwin)])
dfbdua = np.array([np.sum(-Hij[it]*np.swapaxes(fbg,-2,-1)[:,it,:,:,nxs],axis=-2) for it in range(nTwin)])
# array(2,nTwin,nAtom,3) & array(2,nTwin,nAtom,6) & array(2,nTwin,nAtom,12)
dfadGf = np.sum(fa[:,it,:,:,nxs,nxs]*dGdf[0][nxs,nxs,:,:,:,:]-fb[:,it,:,:,nxs,nxs]*dGdf[1][nxs,nxs,:,:,:,:],axis=1)
dfbdGf = np.sum(fb[:,it,:,:,nxs,nxs]*dGdf[0][nxs,nxs,:,:,:,:]+fa[:,it,:,:,nxs,nxs]*dGdf[1][nxs,nxs,:,:,:,:],axis=1)
dfadGx = np.sum(fa[:,it,:,:,nxs,nxs]*dGdx[0][nxs,nxs,:,:,:,:]-fb[:,it,:,:,nxs,nxs]*dGdx[1][nxs,nxs,:,:,:,:],axis=1)
dfbdGx = np.sum(fb[:,it,:,:,nxs,nxs]*dGdx[0][nxs,nxs,:,:,:,:]+fa[:,it,:,:,nxs,nxs]*dGdx[1][nxs,nxs,:,:,:,:],axis=1)
dfadGz = np.sum(fa[:,it,:,0,nxs,nxs]*dGdz[0][nxs,nxs,:,:,:]-fb[:,it,:,0,nxs,nxs]*dGdz[1][nxs,nxs,:,:,:],axis=1)
dfbdGz = np.sum(fb[:,it,:,0,nxs,nxs]*dGdz[0][nxs,nxs,:,:,:]+fa[:,it,:,0,nxs,nxs]*dGdz[1][nxs,nxs,:,:,:],axis=1)
dfadGu = np.sum(fa[:,it,:,:,nxs,nxs]*dGdu[0][nxs,nxs,:,:,:,:]-fb[:,it,:,:,nxs,nxs]*dGdu[1][nxs,nxs,:,:,:,:],axis=1)
dfbdGu = np.sum(fb[:,it,:,:,nxs,nxs]*dGdu[0][nxs,nxs,:,:,:,:]+fa[:,it,:,:,nxs,nxs]*dGdu[1][nxs,nxs,:,:,:,:],axis=1)
# GSASIIpath.IPyBreak()
#NB: the above have been checked against PA(1:10,1:2) in strfctr.for for Al2O3!
SA = fas[0]+fas[1] #float = A+A' (might be array[nTwin])
SB = fbs[0]+fbs[1] #float = B+B' (might be array[nTwin])
dFdfr[iref] = [2.*TwMask[it]*(SA[it]*dfadfr[0][it]+SA[it]*dfadfr[1][it]+SB[it]*dfbdfr[0][it]+SB[it]*dfbdfr[1][it])*Mdata/len(Uniq[it]) for it in range(nTwin)]
dFdx[iref] = [2.*TwMask[it]*(SA[it]*dfadx[it][0]+SA[it]*dfadx[it][1]+SB[it]*dfbdx[it][0]+SB[it]*dfbdx[it][1]) for it in range(nTwin)]
dFdui[iref] = [2.*TwMask[it]*(SA[it]*dfadui[it][0]+SA[it]*dfadui[it][1]+SB[it]*dfbdui[it][0]+SB[it]*dfbdui[it][1]) for it in range(nTwin)]
dFdua[iref] = [2.*TwMask[it]*(SA[it]*dfadua[it][0]+SA[it]*dfadua[it][1]+SB[it]*dfbdua[it][0]+SB[it]*dfbdua[it][1]) for it in range(nTwin)]
dFdtw[iref] = np.sum(TwMask*fas,axis=0)**2+np.sum(TwMask*fbs,axis=0)**2
dFdGf[iref] = [2.*TwMask[it]*(SA[it]*dfadGf[1]+SB[it]*dfbdGf[1]) for it in range(nTwin)]
dFdGx[iref] = [2.*TwMask[it]*(SA[it]*dfadGx[1]+SB[it]*dfbdGx[1]) for it in range(nTwin)]
dFdGz[iref] = [2.*TwMask[it]*(SA[it]*dfadGz[1]+SB[it]*dfbdGz[1]) for it in range(nTwin)]
dFdGu[iref] = [2.*TwMask[it]*(SA[it]*dfadGu[1]+SB[it]*dfbdGu[1]) for it in range(nTwin)]
# GSASIIpath.IPyBreak()
dFdbab[iref] = 2.*fas[0]*np.array([np.sum(dfadba*dBabdA),np.sum(-dfadba*parmDict[phfx+'BabA']*SQfactor*dBabdA)]).T+ \
2.*fbs[0]*np.array([np.sum(dfbdba*dBabdA),np.sum(-dfbdba*parmDict[phfx+'BabA']*SQfactor*dBabdA)]).T
#loop over atoms - each dict entry is list of derivatives for all the reflections
if not iref%100 :
print (' %d derivative time %.4f\r'%(iref,time.time()-time0),end='')
for i in range(len(Mdata)): #loop over atoms
dFdvDict[pfx+'Afrac:'+str(i)] = dFdfr.T[i]
dFdvDict[pfx+'dAx:'+str(i)] = dFdx.T[0][i]
dFdvDict[pfx+'dAy:'+str(i)] = dFdx.T[1][i]
dFdvDict[pfx+'dAz:'+str(i)] = dFdx.T[2][i]
dFdvDict[pfx+'AUiso:'+str(i)] = dFdui.T[i]
dFdvDict[pfx+'AU11:'+str(i)] = dFdua.T[0][i]
dFdvDict[pfx+'AU22:'+str(i)] = dFdua.T[1][i]
dFdvDict[pfx+'AU33:'+str(i)] = dFdua.T[2][i]
dFdvDict[pfx+'AU12:'+str(i)] = 2.*dFdua.T[3][i]
dFdvDict[pfx+'AU13:'+str(i)] = 2.*dFdua.T[4][i]
dFdvDict[pfx+'AU23:'+str(i)] = 2.*dFdua.T[5][i]
for j in range(FSSdata.shape[1]): #loop over waves Fzero & Fwid?
dFdvDict[pfx+'Fsin:'+str(i)+':'+str(j)] = dFdGf.T[0][j][i]
dFdvDict[pfx+'Fcos:'+str(i)+':'+str(j)] = dFdGf.T[1][j][i]
nx = 0
if waveTypes[i] in ['Block','ZigZag']:
nx = 1
dFdvDict[pfx+'Tmin:'+str(i)+':0'] = dFdGz.T[0][i] #ZigZag/Block waves (if any)
dFdvDict[pfx+'Tmax:'+str(i)+':0'] = dFdGz.T[1][i]
dFdvDict[pfx+'Xmax:'+str(i)+':0'] = dFdGz.T[2][i]
dFdvDict[pfx+'Ymax:'+str(i)+':0'] = dFdGz.T[3][i]
dFdvDict[pfx+'Zmax:'+str(i)+':0'] = dFdGz.T[4][i]
for j in range(XSSdata.shape[1]-nx): #loop over waves
dFdvDict[pfx+'Xsin:'+str(i)+':'+str(j+nx)] = dFdGx.T[0][j][i]
dFdvDict[pfx+'Ysin:'+str(i)+':'+str(j+nx)] = dFdGx.T[1][j][i]
dFdvDict[pfx+'Zsin:'+str(i)+':'+str(j+nx)] = dFdGx.T[2][j][i]
dFdvDict[pfx+'Xcos:'+str(i)+':'+str(j+nx)] = dFdGx.T[3][j][i]
dFdvDict[pfx+'Ycos:'+str(i)+':'+str(j+nx)] = dFdGx.T[4][j][i]
dFdvDict[pfx+'Zcos:'+str(i)+':'+str(j+nx)] = dFdGx.T[5][j][i]
for j in range(USSdata.shape[1]): #loop over waves
dFdvDict[pfx+'U11sin:'+str(i)+':'+str(j)] = dFdGu.T[0][j][i]
dFdvDict[pfx+'U22sin:'+str(i)+':'+str(j)] = dFdGu.T[1][j][i]
dFdvDict[pfx+'U33sin:'+str(i)+':'+str(j)] = dFdGu.T[2][j][i]
dFdvDict[pfx+'U12sin:'+str(i)+':'+str(j)] = 2.*dFdGu.T[3][j][i]
dFdvDict[pfx+'U13sin:'+str(i)+':'+str(j)] = 2.*dFdGu.T[4][j][i]
dFdvDict[pfx+'U23sin:'+str(i)+':'+str(j)] = 2.*dFdGu.T[5][j][i]
dFdvDict[pfx+'U11cos:'+str(i)+':'+str(j)] = dFdGu.T[6][j][i]
dFdvDict[pfx+'U22cos:'+str(i)+':'+str(j)] = dFdGu.T[7][j][i]
dFdvDict[pfx+'U33cos:'+str(i)+':'+str(j)] = dFdGu.T[8][j][i]
dFdvDict[pfx+'U12cos:'+str(i)+':'+str(j)] = 2.*dFdGu.T[9][j][i]
dFdvDict[pfx+'U13cos:'+str(i)+':'+str(j)] = 2.*dFdGu.T[10][j][i]
dFdvDict[pfx+'U23cos:'+str(i)+':'+str(j)] = 2.*dFdGu.T[11][j][i]
# GSASIIpath.IPyBreak()
dFdvDict[phfx+'BabA'] = dFdbab.T[0]
dFdvDict[phfx+'BabU'] = dFdbab.T[1]
return dFdvDict
def SCExtinction(ref,im,phfx,hfx,pfx,calcControls,parmDict,varyList):
''' Single crystal extinction function; returns extinction & derivative
'''
extCor = 1.0
dervDict = {}
dervCor = 1.0
if calcControls[phfx+'EType'] != 'None':
SQ = 1/(4.*ref[4+im]**2)
if 'C' in parmDict[hfx+'Type']:
cos2T = 1.0-2.*SQ*parmDict[hfx+'Lam']**2 #cos(2theta)
else: #'T'
cos2T = 1.0-2.*SQ*ref[12+im]**2 #cos(2theta)
if 'SXC' in parmDict[hfx+'Type']:
AV = 7.9406e5/parmDict[pfx+'Vol']**2
PL = np.sqrt(1.0-cos2T**2)/parmDict[hfx+'Lam']
P12 = (calcControls[phfx+'Cos2TM']+cos2T**4)/(calcControls[phfx+'Cos2TM']+cos2T**2)
PLZ = AV*P12*ref[9+im]*parmDict[hfx+'Lam']**2
elif 'SNT' in parmDict[hfx+'Type']:
AV = 1.e7/parmDict[pfx+'Vol']**2
PL = SQ
PLZ = AV*ref[9+im]*ref[12+im]**2
elif 'SNC' in parmDict[hfx+'Type']:
AV = 1.e7/parmDict[pfx+'Vol']**2
PL = np.sqrt(1.0-cos2T**2)/parmDict[hfx+'Lam']
PLZ = AV*ref[9+im]*parmDict[hfx+'Lam']**2
if 'Primary' in calcControls[phfx+'EType']:
PLZ *= 1.5
else:
if 'C' in parmDict[hfx+'Type']:
PLZ *= calcControls[phfx+'Tbar']
else: #'T'
PLZ *= ref[13+im] #t-bar
if 'Primary' in calcControls[phfx+'EType']:
PLZ *= 1.5
PSIG = parmDict[phfx+'Ep']
elif 'I & II' in calcControls[phfx+'EType']:
PSIG = parmDict[phfx+'Eg']/np.sqrt(1.+(parmDict[phfx+'Es']*PL/parmDict[phfx+'Eg'])**2)
elif 'Type II' in calcControls[phfx+'EType']:
PSIG = parmDict[phfx+'Es']
else: # 'Secondary Type I'
PSIG = parmDict[phfx+'Eg']/PL
AG = 0.58+0.48*cos2T+0.24*cos2T**2
AL = 0.025+0.285*cos2T
BG = 0.02-0.025*cos2T
BL = 0.15-0.2*(0.75-cos2T)**2
if cos2T < 0.:
BL = -0.45*cos2T
CG = 2.
CL = 2.
PF = PLZ*PSIG
if 'Gaussian' in calcControls[phfx+'EApprox']:
PF4 = 1.+CG*PF+AG*PF**2/(1.+BG*PF)
extCor = np.sqrt(PF4)
PF3 = 0.5*(CG+2.*AG*PF/(1.+BG*PF)-AG*PF**2*BG/(1.+BG*PF)**2)/(PF4*extCor)
else:
PF4 = 1.+CL*PF+AL*PF**2/(1.+BL*PF)
extCor = np.sqrt(PF4)
PF3 = 0.5*(CL+2.*AL*PF/(1.+BL*PF)-AL*PF**2*BL/(1.+BL*PF)**2)/(PF4*extCor)
dervCor = (1.+PF)*PF3 #extinction corr for other derivatives
if 'Primary' in calcControls[phfx+'EType'] and phfx+'Ep' in varyList:
dervDict[phfx+'Ep'] = -ref[7+im]*PLZ*PF3
if 'II' in calcControls[phfx+'EType'] and phfx+'Es' in varyList:
dervDict[phfx+'Es'] = -ref[7+im]*PLZ*PF3*(PSIG/parmDict[phfx+'Es'])**3
if 'I' in calcControls[phfx+'EType'] and phfx+'Eg' in varyList:
dervDict[phfx+'Eg'] = -ref[7+im]*PLZ*PF3*(PSIG/parmDict[phfx+'Eg'])**3*PL**2
return 1./extCor,dervDict,dervCor
def Dict2Values(parmdict, varylist):
'''Use before call to leastsq to setup list of values for the parameters
in parmdict, as selected by key in varylist'''
return [parmdict[key] for key in varylist]
def Values2Dict(parmdict, varylist, values):
''' Use after call to leastsq to update the parameter dictionary with
values corresponding to keys in varylist'''
parmdict.update(zip(varylist,values))
def GetNewCellParms(parmDict,varyList):
'Needs a doc string'
newCellDict = {}
Anames = ['A'+str(i) for i in range(6)]
Ddict = dict(zip(['D11','D22','D33','D12','D13','D23'],Anames))
for item in varyList:
keys = item.split(':')
if keys[2] in Ddict:
key = keys[0]+'::'+Ddict[keys[2]] #key is e.g. '0::A0'
parm = keys[0]+'::'+keys[2] #parm is e.g. '0::D11'
newCellDict[parm] = [key,parmDict[key]+parmDict[item]]
return newCellDict # is e.g. {'0::D11':A0-D11}
def ApplyXYZshifts(parmDict,varyList):
'''
takes atom x,y,z shift and applies it to corresponding atom x,y,z value
:param dict parmDict: parameter dictionary
:param list varyList: list of variables (not used!)
:returns: newAtomDict - dictionary of new atomic coordinate names & values; key is parameter shift name
'''
newAtomDict = {}
for item in parmDict:
if 'dA' in item:
parm = ''.join(item.split('d'))
parmDict[parm] += parmDict[item]
newAtomDict[item] = [parm,parmDict[parm]]
return newAtomDict
def SHTXcal(refl,im,g,pfx,hfx,SGData,calcControls,parmDict):
'Spherical harmonics texture'
IFCoup = 'Bragg' in calcControls[hfx+'instType']
if 'T' in calcControls[hfx+'histType']:
tth = parmDict[hfx+'2-theta']
else:
tth = refl[5+im]
odfCor = 1.0
H = refl[:3]
cell = G2lat.Gmat2cell(g)
Sangls = [parmDict[pfx+'SH omega'],parmDict[pfx+'SH chi'],parmDict[pfx+'SH phi']]
Gangls = [parmDict[hfx+'Phi'],parmDict[hfx+'Chi'],parmDict[hfx+'Omega'],parmDict[hfx+'Azimuth']]
phi,beta = G2lat.CrsAng(H,cell,SGData)
psi,gam,x,x = G2lat.SamAng(tth/2.,Gangls,Sangls,IFCoup) #ignore 2 sets of angle derivs.
SHnames = G2lat.GenSHCoeff(SGData['SGLaue'],parmDict[pfx+'SHmodel'],parmDict[pfx+'SHorder'])
for item in SHnames:
L,M,N = eval(item.strip('C'))
Kcl = G2lat.GetKcl(L,N,SGData['SGLaue'],phi,beta)
Ksl,x,x = G2lat.GetKsl(L,M,parmDict[pfx+'SHmodel'],psi,gam)
Lnorm = G2lat.Lnorm(L)
odfCor += parmDict[pfx+item]*Lnorm*Kcl*Ksl
return odfCor
def SHTXcalDerv(refl,im,g,pfx,hfx,SGData,calcControls,parmDict):
'Spherical harmonics texture derivatives'
if 'T' in calcControls[hfx+'histType']:
tth = parmDict[hfx+'2-theta']
else:
tth = refl[5+im]
IFCoup = 'Bragg' in calcControls[hfx+'instType']
odfCor = 1.0
dFdODF = {}
dFdSA = [0,0,0]
H = refl[:3]
cell = G2lat.Gmat2cell(g)
Sangls = [parmDict[pfx+'SH omega'],parmDict[pfx+'SH chi'],parmDict[pfx+'SH phi']]
Gangls = [parmDict[hfx+'Phi'],parmDict[hfx+'Chi'],parmDict[hfx+'Omega'],parmDict[hfx+'Azimuth']]
phi,beta = G2lat.CrsAng(H,cell,SGData)
psi,gam,dPSdA,dGMdA = G2lat.SamAng(tth/2.,Gangls,Sangls,IFCoup)
SHnames = G2lat.GenSHCoeff(SGData['SGLaue'],parmDict[pfx+'SHmodel'],parmDict[pfx+'SHorder'])
for item in SHnames:
L,M,N = eval(item.strip('C'))
Kcl = G2lat.GetKcl(L,N,SGData['SGLaue'],phi,beta)
Ksl,dKsdp,dKsdg = G2lat.GetKsl(L,M,parmDict[pfx+'SHmodel'],psi,gam)
Lnorm = G2lat.Lnorm(L)
odfCor += parmDict[pfx+item]*Lnorm*Kcl*Ksl
dFdODF[pfx+item] = Lnorm*Kcl*Ksl
for i in range(3):
dFdSA[i] += parmDict[pfx+item]*Lnorm*Kcl*(dKsdp*dPSdA[i]+dKsdg*dGMdA[i])
return odfCor,dFdODF,dFdSA
def SHPOcal(refl,im,g,phfx,hfx,SGData,calcControls,parmDict):
'spherical harmonics preferred orientation (cylindrical symmetry only)'
if 'T' in calcControls[hfx+'histType']:
tth = parmDict[hfx+'2-theta']
else:
tth = refl[5+im]
odfCor = 1.0
H = refl[:3]
cell = G2lat.Gmat2cell(g)
Sangls = [0.,0.,0.]
if 'Bragg' in calcControls[hfx+'instType']:
Gangls = [0.,90.,0.,parmDict[hfx+'Azimuth']]
IFCoup = True
else:
Gangls = [parmDict[hfx+'Phi'],parmDict[hfx+'Chi'],parmDict[hfx+'Omega'],parmDict[hfx+'Azimuth']]
IFCoup = False
phi,beta = G2lat.CrsAng(H,cell,SGData)
psi,gam,x,x = G2lat.SamAng(tth/2.,Gangls,Sangls,IFCoup) #ignore 2 sets of angle derivs.
SHnames = calcControls[phfx+'SHnames']
for item in SHnames:
L,N = eval(item.strip('C'))
Kcl = G2lat.GetKcl(L,N,SGData['SGLaue'],phi,beta)
Ksl,x,x = G2lat.GetKsl(L,0,'0',psi,gam)
Lnorm = G2lat.Lnorm(L)
odfCor += parmDict[phfx+item]*Lnorm*Kcl*Ksl
return np.squeeze(odfCor)
def SHPOcalDerv(refl,im,g,phfx,hfx,SGData,calcControls,parmDict):
'spherical harmonics preferred orientation derivatives (cylindrical symmetry only)'
if 'T' in calcControls[hfx+'histType']:
tth = parmDict[hfx+'2-theta']
else:
tth = refl[5+im]
odfCor = 1.0
dFdODF = {}
H = refl[:3]
cell = G2lat.Gmat2cell(g)
Sangls = [0.,0.,0.]
if 'Bragg' in calcControls[hfx+'instType']:
Gangls = [0.,90.,0.,parmDict[hfx+'Azimuth']]
IFCoup = True
else:
Gangls = [parmDict[hfx+'Phi'],parmDict[hfx+'Chi'],parmDict[hfx+'Omega'],parmDict[hfx+'Azimuth']]
IFCoup = False
phi,beta = G2lat.CrsAng(H,cell,SGData)
psi,gam,x,x = G2lat.SamAng(tth/2.,Gangls,Sangls,IFCoup) #ignore 2 sets of angle derivs.
SHnames = calcControls[phfx+'SHnames']
for item in SHnames:
L,N = eval(item.strip('C'))
Kcl = G2lat.GetKcl(L,N,SGData['SGLaue'],phi,beta)
Ksl,x,x = G2lat.GetKsl(L,0,'0',psi,gam)
Lnorm = G2lat.Lnorm(L)
odfCor += parmDict[phfx+item]*Lnorm*Kcl*Ksl
dFdODF[phfx+item] = Kcl*Ksl*Lnorm
return odfCor,dFdODF
def GetPrefOri(uniq,G,g,phfx,hfx,SGData,calcControls,parmDict):
'March-Dollase preferred orientation correction'
POcorr = 1.0
MD = parmDict[phfx+'MD']
if MD != 1.0:
MDAxis = calcControls[phfx+'MDAxis']
sumMD = 0
for H in uniq:
cosP,sinP = G2lat.CosSinAngle(H,MDAxis,G)
A = 1.0/np.sqrt((MD*cosP)**2+sinP**2/MD)
sumMD += A**3
POcorr = sumMD/len(uniq)
return POcorr
def GetPrefOriDerv(refl,im,uniq,G,g,phfx,hfx,SGData,calcControls,parmDict):
'Needs a doc string'
POcorr = 1.0
POderv = {}
if calcControls[phfx+'poType'] == 'MD':
MD = parmDict[phfx+'MD']
MDAxis = calcControls[phfx+'MDAxis']
sumMD = 0
sumdMD = 0
for H in uniq:
cosP,sinP = G2lat.CosSinAngle(H,MDAxis,G)
A = 1.0/np.sqrt((MD*cosP)**2+sinP**2/MD)
sumMD += A**3
sumdMD -= (1.5*A**5)*(2.0*MD*cosP**2-(sinP/MD)**2)
POcorr = sumMD/len(uniq)
POderv[phfx+'MD'] = sumdMD/len(uniq)
else: #spherical harmonics
if calcControls[phfx+'SHord']:
POcorr,POderv = SHPOcalDerv(refl,im,g,phfx,hfx,SGData,calcControls,parmDict)
return POcorr,POderv
def GetAbsorb(refl,im,hfx,calcControls,parmDict):
'Needs a doc string'
if 'Debye' in calcControls[hfx+'instType']:
if 'T' in calcControls[hfx+'histType']:
return G2pwd.Absorb('Cylinder',parmDict[hfx+'Absorption']*refl[14+im],abs(parmDict[hfx+'2-theta']),0,0)
else:
return G2pwd.Absorb('Cylinder',parmDict[hfx+'Absorption'],refl[5+im],0,0)
else:
return G2pwd.SurfaceRough(parmDict[hfx+'SurfRoughA'],parmDict[hfx+'SurfRoughB'],refl[5+im])
def GetAbsorbDerv(refl,im,hfx,calcControls,parmDict):
'Needs a doc string'
if 'Debye' in calcControls[hfx+'instType']:
if 'T' in calcControls[hfx+'histType']:
return G2pwd.AbsorbDerv('Cylinder',parmDict[hfx+'Absorption']*refl[14+im],abs(parmDict[hfx+'2-theta']),0,0)
else:
return G2pwd.AbsorbDerv('Cylinder',parmDict[hfx+'Absorption'],refl[5+im],0,0)
else:
return np.array(G2pwd.SurfaceRoughDerv(parmDict[hfx+'SurfRoughA'],parmDict[hfx+'SurfRoughB'],refl[5+im]))
def GetPwdrExt(refl,im,pfx,phfx,hfx,calcControls,parmDict):
'Needs a doc string'
coef = np.array([-0.5,0.25,-0.10416667,0.036458333,-0.0109375,2.8497409E-3])
pi2 = np.sqrt(2./np.pi)
if 'T' in calcControls[hfx+'histType']:
sth2 = sind(abs(parmDict[hfx+'2-theta'])/2.)**2
wave = refl[14+im]
else: #'C'W
sth2 = sind(refl[5+im]/2.)**2
wave = parmDict.get(hfx+'Lam',parmDict.get(hfx+'Lam1',1.0))
c2th = 1.-2.0*sth2
flv2 = refl[9+im]*(wave/parmDict[pfx+'Vol'])**2
if 'X' in calcControls[hfx+'histType']:
flv2 *= 0.079411*(1.0+c2th**2)/2.0
xfac = flv2*parmDict[phfx+'Extinction']
exb = 1.0
if xfac > -1.:
exb = 1./np.sqrt(1.+xfac)
exl = 1.0
if 0 < xfac <= 1.:
xn = np.array([xfac**(i+1) for i in range(6)])
exl += np.sum(xn*coef)
elif xfac > 1.:
xfac2 = 1./np.sqrt(xfac)
exl = pi2*(1.-0.125/xfac)*xfac2
return exb*sth2+exl*(1.-sth2)
def GetPwdrExtDerv(refl,im,pfx,phfx,hfx,calcControls,parmDict):
'Needs a doc string'
coef = np.array([-0.5,0.25,-0.10416667,0.036458333,-0.0109375,2.8497409E-3])
pi2 = np.sqrt(2./np.pi)
if 'T' in calcControls[hfx+'histType']:
sth2 = sind(abs(parmDict[hfx+'2-theta'])/2.)**2
wave = refl[14+im]
else: #'C'W
sth2 = sind(refl[5+im]/2.)**2
wave = parmDict.get(hfx+'Lam',parmDict.get(hfx+'Lam1',1.0))
c2th = 1.-2.0*sth2
flv2 = refl[9+im]*(wave/parmDict[pfx+'Vol'])**2
if 'X' in calcControls[hfx+'histType']:
flv2 *= 0.079411*(1.0+c2th**2)/2.0
xfac = flv2*parmDict[phfx+'Extinction']
dbde = -500.*flv2
if xfac > -1.:
dbde = -0.5*flv2/np.sqrt(1.+xfac)**3
dlde = 0.
if 0 < xfac <= 1.:
xn = np.array([i*flv2*xfac**i for i in [1,2,3,4,5,6]])
dlde = np.sum(xn*coef)
elif xfac > 1.:
xfac2 = 1./np.sqrt(xfac)
dlde = flv2*pi2*xfac2*(-1./xfac+0.375/xfac**2)
return dbde*sth2+dlde*(1.-sth2)
def GetIntensityCorr(refl,im,uniq,G,g,pfx,phfx,hfx,SGData,calcControls,parmDict):
'Needs a doc string' #need powder extinction!
Icorr = parmDict[phfx+'Scale']*parmDict[hfx+'Scale']*refl[3+im] #scale*multiplicity
if 'X' in parmDict[hfx+'Type']:
Icorr *= G2pwd.Polarization(parmDict[hfx+'Polariz.'],refl[5+im],parmDict[hfx+'Azimuth'])[0]
POcorr = 1.0
if pfx+'SHorder' in parmDict: #generalized spherical harmonics texture - takes precidence
POcorr = SHTXcal(refl,im,g,pfx,hfx,SGData,calcControls,parmDict)
elif calcControls[phfx+'poType'] == 'MD': #March-Dollase
POcorr = GetPrefOri(uniq,G,g,phfx,hfx,SGData,calcControls,parmDict)
elif calcControls[phfx+'SHord']: #cylindrical spherical harmonics
POcorr = SHPOcal(refl,im,g,phfx,hfx,SGData,calcControls,parmDict)
Icorr *= POcorr
AbsCorr = 1.0
AbsCorr = GetAbsorb(refl,im,hfx,calcControls,parmDict)
Icorr *= AbsCorr
ExtCorr = GetPwdrExt(refl,im,pfx,phfx,hfx,calcControls,parmDict)
Icorr *= ExtCorr
return Icorr,POcorr,AbsCorr,ExtCorr
def GetIntensityDerv(refl,im,wave,uniq,G,g,pfx,phfx,hfx,SGData,calcControls,parmDict):
'Needs a doc string' #need powder extinction derivs!
dIdsh = 1./parmDict[hfx+'Scale']
dIdsp = 1./parmDict[phfx+'Scale']
if 'X' in parmDict[hfx+'Type']:
pola,dIdPola = G2pwd.Polarization(parmDict[hfx+'Polariz.'],refl[5+im],parmDict[hfx+'Azimuth'])
dIdPola /= pola
else: #'N'
dIdPola = 0.0
dFdODF = {}
dFdSA = [0,0,0]
dIdPO = {}
if pfx+'SHorder' in parmDict:
odfCor,dFdODF,dFdSA = SHTXcalDerv(refl,im,g,pfx,hfx,SGData,calcControls,parmDict)
for iSH in dFdODF:
dFdODF[iSH] /= odfCor
for i in range(3):
dFdSA[i] /= odfCor
elif calcControls[phfx+'poType'] == 'MD' or calcControls[phfx+'SHord']:
POcorr,dIdPO = GetPrefOriDerv(refl,im,uniq,G,g,phfx,hfx,SGData,calcControls,parmDict)
for iPO in dIdPO:
dIdPO[iPO] /= POcorr
if 'T' in parmDict[hfx+'Type']:
dFdAb = GetAbsorbDerv(refl,im,hfx,calcControls,parmDict)*wave/refl[16+im] #wave/abs corr
dFdEx = GetPwdrExtDerv(refl,im,pfx,phfx,hfx,calcControls,parmDict)/refl[17+im] #/ext corr
else:
dFdAb = GetAbsorbDerv(refl,im,hfx,calcControls,parmDict)*wave/refl[13+im] #wave/abs corr
dFdEx = GetPwdrExtDerv(refl,im,pfx,phfx,hfx,calcControls,parmDict)/refl[14+im] #/ext corr
return dIdsh,dIdsp,dIdPola,dIdPO,dFdODF,dFdSA,dFdAb,dFdEx
def GetSampleSigGam(refl,im,wave,G,GB,SGData,hfx,phfx,calcControls,parmDict):
'Needs a doc string'
if 'C' in calcControls[hfx+'histType']: #All checked & OK
costh = cosd(refl[5+im]/2.)
#crystallite size
if calcControls[phfx+'SizeType'] == 'isotropic':
Sgam = 1.8*wave/(np.pi*parmDict[phfx+'Size;i']*costh)
elif calcControls[phfx+'SizeType'] == 'uniaxial':
H = np.array(refl[:3])
P = np.array(calcControls[phfx+'SizeAxis'])
cosP,sinP = G2lat.CosSinAngle(H,P,G)
Sgam = (1.8*wave/np.pi)/(parmDict[phfx+'Size;i']*parmDict[phfx+'Size;a']*costh)
Sgam *= np.sqrt((sinP*parmDict[phfx+'Size;a'])**2+(cosP*parmDict[phfx+'Size;i'])**2)
else: #ellipsoidal crystallites
Sij =[parmDict[phfx+'Size;%d'%(i)] for i in range(6)]
H = np.array(refl[:3])
lenR = G2pwd.ellipseSize(H,Sij,GB)
Sgam = 1.8*wave/(np.pi*costh*lenR)
#microstrain
if calcControls[phfx+'MustrainType'] == 'isotropic':
Mgam = 0.018*parmDict[phfx+'Mustrain;i']*tand(refl[5+im]/2.)/np.pi
elif calcControls[phfx+'MustrainType'] == 'uniaxial':
H = np.array(refl[:3])
P = np.array(calcControls[phfx+'MustrainAxis'])
cosP,sinP = G2lat.CosSinAngle(H,P,G)
Si = parmDict[phfx+'Mustrain;i']
Sa = parmDict[phfx+'Mustrain;a']
Mgam = 0.018*Si*Sa*tand(refl[5+im]/2.)/(np.pi*np.sqrt((Si*cosP)**2+(Sa*sinP)**2))
else: #generalized - P.W. Stephens model
Strms = G2spc.MustrainCoeff(refl[:3],SGData)
Sum = 0
for i,strm in enumerate(Strms):
Sum += parmDict[phfx+'Mustrain;'+str(i)]*strm
Mgam = 0.018*refl[4+im]**2*tand(refl[5+im]/2.)*np.sqrt(Sum)/np.pi
elif 'T' in calcControls[hfx+'histType']: #All checked & OK
#crystallite size
if calcControls[phfx+'SizeType'] == 'isotropic': #OK
Sgam = 1.e-4*parmDict[hfx+'difC']*refl[4+im]**2/parmDict[phfx+'Size;i']
elif calcControls[phfx+'SizeType'] == 'uniaxial': #OK
H = np.array(refl[:3])
P = np.array(calcControls[phfx+'SizeAxis'])
cosP,sinP = G2lat.CosSinAngle(H,P,G)
Sgam = 1.e-4*parmDict[hfx+'difC']*refl[4+im]**2/(parmDict[phfx+'Size;i']*parmDict[phfx+'Size;a'])
Sgam *= np.sqrt((sinP*parmDict[phfx+'Size;a'])**2+(cosP*parmDict[phfx+'Size;i'])**2)
else: #ellipsoidal crystallites #OK
Sij =[parmDict[phfx+'Size;%d'%(i)] for i in range(6)]
H = np.array(refl[:3])
lenR = G2pwd.ellipseSize(H,Sij,GB)
Sgam = 1.e-4*parmDict[hfx+'difC']*refl[4+im]**2/lenR
#microstrain
if calcControls[phfx+'MustrainType'] == 'isotropic': #OK
Mgam = 1.e-6*parmDict[hfx+'difC']*refl[4+im]*parmDict[phfx+'Mustrain;i']
elif calcControls[phfx+'MustrainType'] == 'uniaxial': #OK
H = np.array(refl[:3])
P = np.array(calcControls[phfx+'MustrainAxis'])
cosP,sinP = G2lat.CosSinAngle(H,P,G)
Si = parmDict[phfx+'Mustrain;i']
Sa = parmDict[phfx+'Mustrain;a']
Mgam = 1.e-6*parmDict[hfx+'difC']*refl[4+im]*Si*Sa/np.sqrt((Si*cosP)**2+(Sa*sinP)**2)
else: #generalized - P.W. Stephens model OK
Strms = G2spc.MustrainCoeff(refl[:3],SGData)
Sum = 0
for i,strm in enumerate(Strms):
Sum += parmDict[phfx+'Mustrain;'+str(i)]*strm
Mgam = 1.e-6*parmDict[hfx+'difC']*np.sqrt(Sum)*refl[4+im]**3
gam = Sgam*parmDict[phfx+'Size;mx']+Mgam*parmDict[phfx+'Mustrain;mx']
sig = (Sgam*(1.-parmDict[phfx+'Size;mx']))**2+(Mgam*(1.-parmDict[phfx+'Mustrain;mx']))**2
sig /= ateln2
return sig,gam
def GetSampleSigGamDerv(refl,im,wave,G,GB,SGData,hfx,phfx,calcControls,parmDict):
'Needs a doc string'
gamDict = {}
sigDict = {}
if 'C' in calcControls[hfx+'histType']: #All checked & OK
costh = cosd(refl[5+im]/2.)
tanth = tand(refl[5+im]/2.)
#crystallite size derivatives
if calcControls[phfx+'SizeType'] == 'isotropic':
Sgam = 1.8*wave/(np.pi*costh*parmDict[phfx+'Size;i'])
gamDict[phfx+'Size;i'] = -1.8*wave*parmDict[phfx+'Size;mx']/(np.pi*costh*parmDict[phfx+'Size;i']**2)
sigDict[phfx+'Size;i'] = -3.6*Sgam*wave*(1.-parmDict[phfx+'Size;mx'])**2/(np.pi*costh*ateln2)
elif calcControls[phfx+'SizeType'] == 'uniaxial':
H = np.array(refl[:3])
P = np.array(calcControls[phfx+'SizeAxis'])
cosP,sinP = G2lat.CosSinAngle(H,P,G)
Si = parmDict[phfx+'Size;i']
Sa = parmDict[phfx+'Size;a']
gami = 1.8*wave/(costh*np.pi*Si*Sa)
sqtrm = np.sqrt((sinP*Sa)**2+(cosP*Si)**2)
Sgam = gami*sqtrm
dsi = gami*Si*cosP**2/sqtrm-Sgam/Si
dsa = gami*Sa*sinP**2/sqtrm-Sgam/Sa
gamDict[phfx+'Size;i'] = dsi*parmDict[phfx+'Size;mx']
gamDict[phfx+'Size;a'] = dsa*parmDict[phfx+'Size;mx']
sigDict[phfx+'Size;i'] = 2.*dsi*Sgam*(1.-parmDict[phfx+'Size;mx'])**2/ateln2
sigDict[phfx+'Size;a'] = 2.*dsa*Sgam*(1.-parmDict[phfx+'Size;mx'])**2/ateln2
else: #ellipsoidal crystallites
const = 1.8*wave/(np.pi*costh)
Sij =[parmDict[phfx+'Size;%d'%(i)] for i in range(6)]
H = np.array(refl[:3])
lenR,dRdS = G2pwd.ellipseSizeDerv(H,Sij,GB)
Sgam = const/lenR
for i,item in enumerate([phfx+'Size;%d'%(j) for j in range(6)]):
gamDict[item] = -(const/lenR**2)*dRdS[i]*parmDict[phfx+'Size;mx']
sigDict[item] = -2.*Sgam*(const/lenR**2)*dRdS[i]*(1.-parmDict[phfx+'Size;mx'])**2/ateln2
gamDict[phfx+'Size;mx'] = Sgam
sigDict[phfx+'Size;mx'] = -2.*Sgam**2*(1.-parmDict[phfx+'Size;mx'])/ateln2
#microstrain derivatives
if calcControls[phfx+'MustrainType'] == 'isotropic':
Mgam = 0.018*parmDict[phfx+'Mustrain;i']*tand(refl[5+im]/2.)/np.pi
gamDict[phfx+'Mustrain;i'] = 0.018*tanth*parmDict[phfx+'Mustrain;mx']/np.pi
sigDict[phfx+'Mustrain;i'] = 0.036*Mgam*tanth*(1.-parmDict[phfx+'Mustrain;mx'])**2/(np.pi*ateln2)
elif calcControls[phfx+'MustrainType'] == 'uniaxial':
H = np.array(refl[:3])
P = np.array(calcControls[phfx+'MustrainAxis'])
cosP,sinP = G2lat.CosSinAngle(H,P,G)
Si = parmDict[phfx+'Mustrain;i']
Sa = parmDict[phfx+'Mustrain;a']
gami = 0.018*Si*Sa*tanth/np.pi
sqtrm = np.sqrt((Si*cosP)**2+(Sa*sinP)**2)
Mgam = gami/sqtrm
dsi = -gami*Si*cosP**2/sqtrm**3
dsa = -gami*Sa*sinP**2/sqtrm**3
gamDict[phfx+'Mustrain;i'] = (Mgam/Si+dsi)*parmDict[phfx+'Mustrain;mx']
gamDict[phfx+'Mustrain;a'] = (Mgam/Sa+dsa)*parmDict[phfx+'Mustrain;mx']
sigDict[phfx+'Mustrain;i'] = 2*(Mgam/Si+dsi)*Mgam*(1.-parmDict[phfx+'Mustrain;mx'])**2/ateln2
sigDict[phfx+'Mustrain;a'] = 2*(Mgam/Sa+dsa)*Mgam*(1.-parmDict[phfx+'Mustrain;mx'])**2/ateln2
else: #generalized - P.W. Stephens model
const = 0.018*refl[4+im]**2*tanth/np.pi
Strms = G2spc.MustrainCoeff(refl[:3],SGData)
Sum = 0
for i,strm in enumerate(Strms):
Sum += parmDict[phfx+'Mustrain;'+str(i)]*strm
gamDict[phfx+'Mustrain;'+str(i)] = strm*parmDict[phfx+'Mustrain;mx']/2.
sigDict[phfx+'Mustrain;'+str(i)] = strm*(1.-parmDict[phfx+'Mustrain;mx'])**2
Mgam = const*np.sqrt(Sum)
for i in range(len(Strms)):
gamDict[phfx+'Mustrain;'+str(i)] *= Mgam/Sum
sigDict[phfx+'Mustrain;'+str(i)] *= const**2/ateln2
gamDict[phfx+'Mustrain;mx'] = Mgam
sigDict[phfx+'Mustrain;mx'] = -2.*Mgam**2*(1.-parmDict[phfx+'Mustrain;mx'])/ateln2
else: #'T'OF - All checked & OK
if calcControls[phfx+'SizeType'] == 'isotropic': #OK
Sgam = 1.e-4*parmDict[hfx+'difC']*refl[4+im]**2/parmDict[phfx+'Size;i']
gamDict[phfx+'Size;i'] = -Sgam*parmDict[phfx+'Size;mx']/parmDict[phfx+'Size;i']
sigDict[phfx+'Size;i'] = -2.*Sgam**2*(1.-parmDict[phfx+'Size;mx'])**2/(ateln2*parmDict[phfx+'Size;i'])
elif calcControls[phfx+'SizeType'] == 'uniaxial': #OK
const = 1.e-4*parmDict[hfx+'difC']*refl[4+im]**2
H = np.array(refl[:3])
P = np.array(calcControls[phfx+'SizeAxis'])
cosP,sinP = G2lat.CosSinAngle(H,P,G)
Si = parmDict[phfx+'Size;i']
Sa = parmDict[phfx+'Size;a']
gami = const/(Si*Sa)
sqtrm = np.sqrt((sinP*Sa)**2+(cosP*Si)**2)
Sgam = gami*sqtrm
dsi = gami*Si*cosP**2/sqtrm-Sgam/Si
dsa = gami*Sa*sinP**2/sqtrm-Sgam/Sa
gamDict[phfx+'Size;i'] = dsi*parmDict[phfx+'Size;mx']
gamDict[phfx+'Size;a'] = dsa*parmDict[phfx+'Size;mx']
sigDict[phfx+'Size;i'] = 2.*dsi*Sgam*(1.-parmDict[phfx+'Size;mx'])**2/ateln2
sigDict[phfx+'Size;a'] = 2.*dsa*Sgam*(1.-parmDict[phfx+'Size;mx'])**2/ateln2
else: #OK ellipsoidal crystallites
const = 1.e-4*parmDict[hfx+'difC']*refl[4+im]**2
Sij =[parmDict[phfx+'Size;%d'%(i)] for i in range(6)]
H = np.array(refl[:3])
lenR,dRdS = G2pwd.ellipseSizeDerv(H,Sij,GB)
Sgam = const/lenR
for i,item in enumerate([phfx+'Size;%d'%(j) for j in range(6)]):
gamDict[item] = -(const/lenR**2)*dRdS[i]*parmDict[phfx+'Size;mx']
sigDict[item] = -2.*Sgam*(const/lenR**2)*dRdS[i]*(1.-parmDict[phfx+'Size;mx'])**2/ateln2
gamDict[phfx+'Size;mx'] = Sgam #OK
sigDict[phfx+'Size;mx'] = -2.*Sgam**2*(1.-parmDict[phfx+'Size;mx'])/ateln2 #OK
#microstrain derivatives
if calcControls[phfx+'MustrainType'] == 'isotropic':
Mgam = 1.e-6*parmDict[hfx+'difC']*refl[4+im]*parmDict[phfx+'Mustrain;i']
gamDict[phfx+'Mustrain;i'] = 1.e-6*refl[4+im]*parmDict[hfx+'difC']*parmDict[phfx+'Mustrain;mx'] #OK
sigDict[phfx+'Mustrain;i'] = 2.*Mgam**2*(1.-parmDict[phfx+'Mustrain;mx'])**2/(ateln2*parmDict[phfx+'Mustrain;i'])
elif calcControls[phfx+'MustrainType'] == 'uniaxial':
H = np.array(refl[:3])
P = np.array(calcControls[phfx+'MustrainAxis'])
cosP,sinP = G2lat.CosSinAngle(H,P,G)
Si = parmDict[phfx+'Mustrain;i']
Sa = parmDict[phfx+'Mustrain;a']
gami = 1.e-6*parmDict[hfx+'difC']*refl[4+im]*Si*Sa
sqtrm = np.sqrt((Si*cosP)**2+(Sa*sinP)**2)
Mgam = gami/sqtrm
dsi = -gami*Si*cosP**2/sqtrm**3
dsa = -gami*Sa*sinP**2/sqtrm**3
gamDict[phfx+'Mustrain;i'] = (Mgam/Si+dsi)*parmDict[phfx+'Mustrain;mx']
gamDict[phfx+'Mustrain;a'] = (Mgam/Sa+dsa)*parmDict[phfx+'Mustrain;mx']
sigDict[phfx+'Mustrain;i'] = 2*(Mgam/Si+dsi)*Mgam*(1.-parmDict[phfx+'Mustrain;mx'])**2/ateln2
sigDict[phfx+'Mustrain;a'] = 2*(Mgam/Sa+dsa)*Mgam*(1.-parmDict[phfx+'Mustrain;mx'])**2/ateln2
else: #generalized - P.W. Stephens model OK
Strms = G2spc.MustrainCoeff(refl[:3],SGData)
const = 1.e-6*parmDict[hfx+'difC']*refl[4+im]**3
Sum = 0
for i,strm in enumerate(Strms):
Sum += parmDict[phfx+'Mustrain;'+str(i)]*strm
gamDict[phfx+'Mustrain;'+str(i)] = strm*parmDict[phfx+'Mustrain;mx']/2.
sigDict[phfx+'Mustrain;'+str(i)] = strm*(1.-parmDict[phfx+'Mustrain;mx'])**2
Mgam = const*np.sqrt(Sum)
for i in range(len(Strms)):
gamDict[phfx+'Mustrain;'+str(i)] *= Mgam/Sum
sigDict[phfx+'Mustrain;'+str(i)] *= const**2/ateln2
gamDict[phfx+'Mustrain;mx'] = Mgam
sigDict[phfx+'Mustrain;mx'] = -2.*Mgam**2*(1.-parmDict[phfx+'Mustrain;mx'])/ateln2
return sigDict,gamDict
def GetReflPos(refl,im,wave,A,pfx,hfx,calcControls,parmDict):
'Needs a doc string'
if im:
h,k,l,m = refl[:4]
vec = np.array([parmDict[pfx+'mV0'],parmDict[pfx+'mV1'],parmDict[pfx+'mV2']])
d = 1./np.sqrt(G2lat.calc_rDsqSS(np.array([h,k,l,m]),A,vec))
else:
h,k,l = refl[:3]
d = 1./np.sqrt(G2lat.calc_rDsq(np.array([h,k,l]),A))
refl[4+im] = d
if 'C' in calcControls[hfx+'histType']:
pos = 2.0*asind(wave/(2.0*d))+parmDict[hfx+'Zero']
const = 9.e-2/(np.pi*parmDict[hfx+'Gonio. radius']) #shifts in microns
if 'Bragg' in calcControls[hfx+'instType']:
pos -= const*(4.*parmDict[hfx+'Shift']*cosd(pos/2.0)+ \
parmDict[hfx+'Transparency']*sind(pos)*100.0) #trans(=1/mueff) in cm
else: #Debye-Scherrer - simple but maybe not right
pos -= const*(parmDict[hfx+'DisplaceX']*cosd(pos)+parmDict[hfx+'DisplaceY']*sind(pos))
elif 'T' in calcControls[hfx+'histType']:
pos = parmDict[hfx+'difC']*d+parmDict[hfx+'difA']*d**2+parmDict[hfx+'difB']/d+parmDict[hfx+'Zero']
#do I need sample position effects - maybe?
return pos
def GetReflPosDerv(refl,im,wave,A,pfx,hfx,calcControls,parmDict):
'Needs a doc string'
dpr = 180./np.pi
if im:
h,k,l,m = refl[:4]
vec = np.array([parmDict[pfx+'mV0'],parmDict[pfx+'mV1'],parmDict[pfx+'mV2']])
dstsq = G2lat.calc_rDsqSS(np.array([h,k,l,m]),A,vec)
h,k,l = [h+m*vec[0],k+m*vec[1],l+m*vec[2]] #do proj of hklm to hkl so dPdA & dPdV come out right
else:
m = 0
h,k,l = refl[:3]
dstsq = G2lat.calc_rDsq(np.array([h,k,l]),A)
dst = np.sqrt(dstsq)
dsp = 1./dst
if 'C' in calcControls[hfx+'histType']:
pos = refl[5+im]-parmDict[hfx+'Zero']
const = dpr/np.sqrt(1.0-wave**2*dstsq/4.0)
dpdw = const*dst
dpdA = np.array([h**2,k**2,l**2,h*k,h*l,k*l])*const*wave/(2.0*dst)
dpdZ = 1.0
dpdV = np.array([2.*h*A[0]+k*A[3]+l*A[4],2*k*A[1]+h*A[3]+l*A[5],
2*l*A[2]+h*A[4]+k*A[5]])*m*const*wave/(2.0*dst)
shft = 9.e-2/(np.pi*parmDict[hfx+'Gonio. radius']) #shifts in microns
if 'Bragg' in calcControls[hfx+'instType']:
dpdSh = -4.*shft*cosd(pos/2.0)
dpdTr = -shft*sind(pos)*100.0
return dpdA,dpdw,dpdZ,dpdSh,dpdTr,0.,0.,dpdV
else: #Debye-Scherrer - simple but maybe not right
dpdXd = -shft*cosd(pos)
dpdYd = -shft*sind(pos)
return dpdA,dpdw,dpdZ,0.,0.,dpdXd,dpdYd,dpdV
elif 'T' in calcControls[hfx+'histType']:
dpdA = -np.array([h**2,k**2,l**2,h*k,h*l,k*l])*parmDict[hfx+'difC']*dsp**3/2.
dpdZ = 1.0
dpdDC = dsp
dpdDA = dsp**2
dpdDB = 1./dsp
dpdV = np.array([2.*h*A[0]+k*A[3]+l*A[4],2*k*A[1]+h*A[3]+l*A[5],
2*l*A[2]+h*A[4]+k*A[5]])*m*parmDict[hfx+'difC']*dsp**3/2.
return dpdA,dpdZ,dpdDC,dpdDA,dpdDB,dpdV
def GetHStrainShift(refl,im,SGData,phfx,hfx,calcControls,parmDict):
'Needs a doc string'
laue = SGData['SGLaue']
uniq = SGData['SGUniq']
h,k,l = refl[:3]
if laue in ['m3','m3m']:
Dij = parmDict[phfx+'D11']*(h**2+k**2+l**2)+ \
refl[4+im]**2*parmDict[phfx+'eA']*((h*k)**2+(h*l)**2+(k*l)**2)/(h**2+k**2+l**2)**2
elif laue in ['6/m','6/mmm','3m1','31m','3']:
Dij = parmDict[phfx+'D11']*(h**2+k**2+h*k)+parmDict[phfx+'D33']*l**2
elif laue in ['3R','3mR']:
Dij = parmDict[phfx+'D11']*(h**2+k**2+l**2)+parmDict[phfx+'D12']*(h*k+h*l+k*l)
elif laue in ['4/m','4/mmm']:
Dij = parmDict[phfx+'D11']*(h**2+k**2)+parmDict[phfx+'D33']*l**2
elif laue in ['mmm']:
Dij = parmDict[phfx+'D11']*h**2+parmDict[phfx+'D22']*k**2+parmDict[phfx+'D33']*l**2
elif laue in ['2/m']:
Dij = parmDict[phfx+'D11']*h**2+parmDict[phfx+'D22']*k**2+parmDict[phfx+'D33']*l**2
if uniq == 'a':
Dij += parmDict[phfx+'D23']*k*l
elif uniq == 'b':
Dij += parmDict[phfx+'D13']*h*l
elif uniq == 'c':
Dij += parmDict[phfx+'D12']*h*k
else:
Dij = parmDict[phfx+'D11']*h**2+parmDict[phfx+'D22']*k**2+parmDict[phfx+'D33']*l**2+ \
parmDict[phfx+'D12']*h*k+parmDict[phfx+'D13']*h*l+parmDict[phfx+'D23']*k*l
if 'C' in calcControls[hfx+'histType']:
return -180.*Dij*refl[4+im]**2*tand(refl[5+im]/2.0)/np.pi
else:
return -Dij*parmDict[hfx+'difC']*refl[4+im]**2/2.
def GetHStrainShiftDerv(refl,im,SGData,phfx,hfx,calcControls,parmDict):
'Needs a doc string'
laue = SGData['SGLaue']
uniq = SGData['SGUniq']
h,k,l = refl[:3]
if laue in ['m3','m3m']:
dDijDict = {phfx+'D11':h**2+k**2+l**2,
phfx+'eA':refl[4+im]**2*((h*k)**2+(h*l)**2+(k*l)**2)/(h**2+k**2+l**2)**2}
elif laue in ['6/m','6/mmm','3m1','31m','3']:
dDijDict = {phfx+'D11':h**2+k**2+h*k,phfx+'D33':l**2}
elif laue in ['3R','3mR']:
dDijDict = {phfx+'D11':h**2+k**2+l**2,phfx+'D12':h*k+h*l+k*l}
elif laue in ['4/m','4/mmm']:
dDijDict = {phfx+'D11':h**2+k**2,phfx+'D33':l**2}
elif laue in ['mmm']:
dDijDict = {phfx+'D11':h**2,phfx+'D22':k**2,phfx+'D33':l**2}
elif laue in ['2/m']:
dDijDict = {phfx+'D11':h**2,phfx+'D22':k**2,phfx+'D33':l**2}
if uniq == 'a':
dDijDict[phfx+'D23'] = k*l
elif uniq == 'b':
dDijDict[phfx+'D13'] = h*l
elif uniq == 'c':
dDijDict[phfx+'D12'] = h*k
else:
dDijDict = {phfx+'D11':h**2,phfx+'D22':k**2,phfx+'D33':l**2,
phfx+'D12':h*k,phfx+'D13':h*l,phfx+'D23':k*l}
if 'C' in calcControls[hfx+'histType']:
for item in dDijDict:
dDijDict[item] *= 180.0*refl[4+im]**2*tand(refl[5+im]/2.0)/np.pi
else:
for item in dDijDict:
dDijDict[item] *= -parmDict[hfx+'difC']*refl[4+im]**3/2.
return dDijDict
def GetDij(phfx,SGData,parmDict):
HSvals = [parmDict[phfx+name] for name in G2spc.HStrainNames(SGData)]
return G2spc.HStrainVals(HSvals,SGData)
def GetFobsSq(Histograms,Phases,parmDict,calcControls):
'''Compute the observed structure factors for Powder histograms and store in reflection array
Multiprocessing support added
'''
if GSASIIpath.GetConfigValue('Show_timing',False):
starttime = time.time() #; print 'start GetFobsSq'
histoList = list(Histograms.keys())
histoList.sort()
Ka2 = shl = lamRatio = kRatio = None
for histogram in histoList:
if 'PWDR' in histogram[:4]:
Histogram = Histograms[histogram]
hId = Histogram['hId']
hfx = ':%d:'%(hId)
Limits = calcControls[hfx+'Limits']
if 'C' in calcControls[hfx+'histType']:
shl = max(parmDict[hfx+'SH/L'],0.0005)
Ka2 = False
kRatio = 0.0
if hfx+'Lam1' in list(parmDict.keys()):
Ka2 = True
lamRatio = 360*(parmDict[hfx+'Lam2']-parmDict[hfx+'Lam1'])/(np.pi*parmDict[hfx+'Lam1'])
kRatio = parmDict[hfx+'I(L2)/I(L1)']
x,y,w,yc,yb,yd = Histogram['Data']
xMask = ma.getmaskarray(x)
xB = np.searchsorted(x,Limits[0])
xF = np.searchsorted(x,Limits[1])
ymb = np.array(y-yb)
ymb = np.where(ymb,ymb,1.0)
ycmb = np.array(yc-yb)
ratio = 1./np.where(ycmb,ycmb/ymb,1.e10)
refLists = Histogram['Reflection Lists']
for phase in refLists:
if phase not in Phases: #skips deleted or renamed phases silently!
continue
Phase = Phases[phase]
im = 0
if Phase['General'].get('Modulated',False):
im = 1
pId = Phase['pId']
phfx = '%d:%d:'%(pId,hId)
refDict = refLists[phase]
sumFo = 0.0
sumdF = 0.0
sumFosq = 0.0
sumdFsq = 0.0
sumInt = 0.0
nExcl = 0
# test to see if we are using multiprocessing below
useMP,ncores = G2mp.InitMP()
if len(refDict['RefList']) < 100: useMP = False
if useMP: # multiprocessing: create a set of initialized Python processes
MPpool = mp.Pool(G2mp.ncores,G2mp.InitFobsSqGlobals,
[x,ratio,shl,xB,xF,im,lamRatio,kRatio,xMask,Ka2])
profArgs = [[] for i in range(G2mp.ncores)]
else:
G2mp.InitFobsSqGlobals(x,ratio,shl,xB,xF,im,lamRatio,kRatio,xMask,Ka2)
if 'C' in calcControls[hfx+'histType']:
# are we multiprocessing?
for iref,refl in enumerate(refDict['RefList']):
if useMP:
profArgs[iref%G2mp.ncores].append((refl,iref))
else:
icod= G2mp.ComputeFobsSqCW(refl,iref)
if type(icod) is tuple:
refl[8+im] = icod[0]
sumInt += icod[1]
if parmDict[phfx+'LeBail']: refl[9+im] = refl[8+im]
elif icod == -1:
refl[3+im] *= -1
nExcl += 1
elif icod == -2:
break
if useMP:
for sInt,resList in MPpool.imap_unordered(G2mp.ComputeFobsSqCWbatch,profArgs):
sumInt += sInt
for refl8im,irefl in resList:
if refl8im is None:
refDict['RefList'][irefl][3+im] *= -1
nExcl += 1
else:
refDict['RefList'][irefl][8+im] = refl8im
if parmDict[phfx+'LeBail']:
refDict['RefList'][irefl][9+im] = refDict['RefList'][irefl][8+im]
elif 'T' in calcControls[hfx+'histType']:
for iref,refl in enumerate(refDict['RefList']):
if useMP:
profArgs[iref%G2mp.ncores].append((refl,iref))
else:
icod= G2mp.ComputeFobsSqTOF(refl,iref)
if type(icod) is tuple:
refl[8+im] = icod[0]
sumInt += icod[1]
if parmDict[phfx+'LeBail']: refl[9+im] = refl[8+im]
elif icod == -1:
refl[3+im] *= -1
nExcl += 1
elif icod == -2:
break
if useMP:
for sInt,resList in MPpool.imap_unordered(G2mp.ComputeFobsSqTOFbatch,profArgs):
sumInt += sInt
for refl8im,irefl in resList:
if refl8im is None:
refDict['RefList'][irefl][3+im] *= -1
nExcl += 1
else:
refDict['RefList'][irefl][8+im] = refl8im
if parmDict[phfx+'LeBail']:
refDict['RefList'][irefl][9+im] = refDict['RefList'][irefl][8+im]
if useMP: MPpool.terminate()
sumFo = 0.0
sumdF = 0.0
sumFosq = 0.0
sumdFsq = 0.0
for iref,refl in enumerate(refDict['RefList']):
Fo = np.sqrt(np.abs(refl[8+im]))
Fc = np.sqrt(np.abs(refl[9]+im))
sumFo += Fo
sumFosq += refl[8+im]**2
sumdF += np.abs(Fo-Fc)
sumdFsq += (refl[8+im]-refl[9+im])**2
if sumFo:
Histogram['Residuals'][phfx+'Rf'] = min(100.,(sumdF/sumFo)*100.)
Histogram['Residuals'][phfx+'Rf^2'] = min(100.,np.sqrt(sumdFsq/sumFosq)*100.)
else:
Histogram['Residuals'][phfx+'Rf'] = 100.
Histogram['Residuals'][phfx+'Rf^2'] = 100.
Histogram['Residuals'][phfx+'sumInt'] = sumInt
Histogram['Residuals'][phfx+'Nref'] = len(refDict['RefList'])-nExcl
Histogram['Residuals']['hId'] = hId
elif 'HKLF' in histogram[:4]:
Histogram = Histograms[histogram]
Histogram['Residuals']['hId'] = Histograms[histogram]['hId']
if GSASIIpath.GetConfigValue('Show_timing',False):
print ('GetFobsSq t=',time.time()-starttime)
def getPowderProfile(parmDict,x,varylist,Histogram,Phases,calcControls,pawleyLookup):
'Computes the powder pattern for a histogram based on contributions from all used phases'
# </ Anton Gagin
fwhm = []
xfwhm = []
# Anton Gagin />
if GSASIIpath.GetConfigValue('Show_timing',False): starttime = time.time()
def GetReflSigGamCW(refl,im,wave,G,GB,phfx,calcControls,parmDict):
U = parmDict[hfx+'U']
V = parmDict[hfx+'V']
W = parmDict[hfx+'W']
X = parmDict[hfx+'X']
Y = parmDict[hfx+'Y']
Z = parmDict[hfx+'Z']
tanPos = tand(refl[5+im]/2.0)
Ssig,Sgam = GetSampleSigGam(refl,im,wave,G,GB,SGData,hfx,phfx,calcControls,parmDict)
sig = U*tanPos**2+V*tanPos+W+Ssig #save peak sigma
sig = max(0.001,sig)
gam = X/cosd(refl[5+im]/2.0)+Y*tanPos+Sgam+Z #save peak gamma
gam = max(0.001,gam)
return sig,gam
def GetReflSigGamTOF(refl,im,G,GB,phfx,calcControls,parmDict):
sig = parmDict[hfx+'sig-0']+parmDict[hfx+'sig-1']*refl[4+im]**2+ \
parmDict[hfx+'sig-2']*refl[4+im]**4+parmDict[hfx+'sig-q']*refl[4+im]
gam = parmDict[hfx+'X']*refl[4+im]+parmDict[hfx+'Y']*refl[4+im]**2+parmDict[hfx+'Z']
Ssig,Sgam = GetSampleSigGam(refl,im,0.0,G,GB,SGData,hfx,phfx,calcControls,parmDict)
sig += Ssig
gam += Sgam
return sig,gam
def GetReflAlpBet(refl,im,hfx,parmDict):
alp = parmDict[hfx+'alpha']/refl[4+im]
bet = parmDict[hfx+'beta-0']+parmDict[hfx+'beta-1']/refl[4+im]**4+parmDict[hfx+'beta-q']/refl[4+im]**2
return alp,bet
hId = Histogram['hId']
hfx = ':%d:'%(hId)
bakType = calcControls[hfx+'bakType']
yb,Histogram['sumBk'] = G2pwd.getBackground(hfx,parmDict,bakType,calcControls[hfx+'histType'],x)
yc = np.zeros_like(yb)
cw = np.diff(ma.getdata(x))
cw = np.append(cw,cw[-1])
if 'C' in calcControls[hfx+'histType']:
shl = max(parmDict[hfx+'SH/L'],0.002)
Ka2 = False
if hfx+'Lam1' in (parmDict.keys()):
wave = parmDict[hfx+'Lam1']
Ka2 = True
lamRatio = 360*(parmDict[hfx+'Lam2']-parmDict[hfx+'Lam1'])/(np.pi*parmDict[hfx+'Lam1'])
kRatio = parmDict[hfx+'I(L2)/I(L1)']
else:
wave = parmDict[hfx+'Lam']
else:
shl = 0.
for phase in Histogram['Reflection Lists']:
refDict = Histogram['Reflection Lists'][phase]
if phase not in Phases: #skips deleted or renamed phases silently!
continue
Phase = Phases[phase]
pId = Phase['pId']
pfx = '%d::'%(pId)
phfx = '%d:%d:'%(pId,hId)
hfx = ':%d:'%(hId)
SGData = Phase['General']['SGData']
SGMT = np.array([ops[0].T for ops in SGData['SGOps']])
im = 0
if Phase['General'].get('Modulated',False):
SSGData = Phase['General']['SSGData']
im = 1 #offset in SS reflection list
#??
Dij = GetDij(phfx,SGData,parmDict)
A = [parmDict[pfx+'A%d'%(i)]+Dij[i] for i in range(6)] #TODO: need to do someting if Dij << 0.
G,g = G2lat.A2Gmat(A) #recip & real metric tensors
if np.any(np.diag(G)<0.) or np.any(np.isnan(A)):
raise G2obj.G2Exception('invalid metric tensor \n cell/Dij refinement not advised')
GA,GB = G2lat.Gmat2AB(G) #Orthogonalization matricies
Vst = np.sqrt(nl.det(G)) #V*
if not Phase['General'].get('doPawley') and not parmDict[phfx+'LeBail']:
if im:
SStructureFactor(refDict,G,hfx,pfx,SGData,SSGData,calcControls,parmDict)
elif parmDict[pfx+'isMag'] and 'N' in calcControls[hfx+'histType']:
MagStructureFactor2(refDict,G,hfx,pfx,SGData,calcControls,parmDict)
else:
StructureFactor2(refDict,G,hfx,pfx,SGData,calcControls,parmDict)
badPeak = False
# test to see if we are using multiprocessing here
useMP,ncores = G2mp.InitMP()
if len(refDict['RefList']) < 100: useMP = False
if useMP: # multiprocessing: create a set of initialized Python processes
MPpool = mp.Pool(ncores,G2mp.InitPwdrProfGlobals,[im,shl,x])
profArgs = [[] for i in range(ncores)]
if 'C' in calcControls[hfx+'histType']:
for iref,refl in enumerate(refDict['RefList']):
if im:
h,k,l,m = refl[:4]
else:
h,k,l = refl[:3]
Uniq = np.inner(refl[:3],SGMT)
refl[5+im] = GetReflPos(refl,im,wave,A,pfx,hfx,calcControls,parmDict) #corrected reflection position
Lorenz = 1./(2.*sind(refl[5+im]/2.)**2*cosd(refl[5+im]/2.)) #Lorentz correction
refl[6+im:8+im] = GetReflSigGamCW(refl,im,wave,G,GB,phfx,calcControls,parmDict) #peak sig & gam
refl[11+im:15+im] = GetIntensityCorr(refl,im,Uniq,G,g,pfx,phfx,hfx,SGData,calcControls,parmDict)
refl[11+im] *= Vst*Lorenz
if Phase['General'].get('doPawley'):
try:
if im:
pInd = pfx+'PWLref:%d'%(pawleyLookup[pfx+'%d,%d,%d,%d'%(h,k,l,m)])
else:
pInd = pfx+'PWLref:%d'%(pawleyLookup[pfx+'%d,%d,%d'%(h,k,l)])
refl[9+im] = parmDict[pInd]
except KeyError:
# print ' ***Error %d,%d,%d missing from Pawley reflection list ***'%(h,k,l)
continue
Wd,fmin,fmax = G2pwd.getWidthsCW(refl[5+im],refl[6+im],refl[7+im],shl)
# </ Anton Gagin
fwhm.append(2.355*Wd[0]+2.*Wd[1])
xfwhm.append(refl[5+im])
# Anton Gagin />
iBeg = np.searchsorted(x,refl[5+im]-fmin)
iFin = np.searchsorted(x,refl[5+im]+fmax)
if not iBeg+iFin: #peak below low limit - skip peak
continue
elif not iBeg-iFin: #peak above high limit - done
break
elif iBeg > iFin: #bad peak coeff - skip
badPeak = True
continue
if useMP:
profArgs[iref%ncores].append((refl[5+im],refl,iBeg,iFin,1.))
else:
yc[iBeg:iFin] += refl[11+im]*refl[9+im]*G2pwd.getFCJVoigt3(refl[5+im],refl[6+im],refl[7+im],shl,ma.getdata(x[iBeg:iFin])) #>90% of time spent here
if Ka2:
pos2 = refl[5+im]+lamRatio*tand(refl[5+im]/2.0) # + 360/pi * Dlam/lam * tan(th)
Wd,fmin,fmax = G2pwd.getWidthsCW(pos2,refl[6+im],refl[7+im],shl)
# </ Anton Gagin
fwhm.append(2.355*Wd[0]+2.*Wd[1])
xfwhm.append(refl[5+im])
# Anton Gagin />
iBeg = np.searchsorted(x,pos2-fmin)
iFin = np.searchsorted(x,pos2+fmax)
if not iBeg+iFin: #peak below low limit - skip peak
continue
elif not iBeg-iFin: #peak above high limit - done
return yc,yb
elif iBeg > iFin: #bad peak coeff - skip
continue
if useMP:
profArgs[iref%ncores].append((pos2,refl,iBeg,iFin,kRatio))
else:
yc[iBeg:iFin] += refl[11+im]*refl[9+im]*kRatio*G2pwd.getFCJVoigt3(pos2,refl[6+im],refl[7+im],shl,ma.getdata(x[iBeg:iFin])) #and here
elif 'T' in calcControls[hfx+'histType']:
for iref,refl in enumerate(refDict['RefList']):
if im:
h,k,l,m = refl[:4]
else:
h,k,l = refl[:3]
Uniq = np.inner(refl[:3],SGMT)
refl[5+im] = GetReflPos(refl,im,0.0,A,pfx,hfx,calcControls,parmDict) #corrected reflection position - #TODO - what about tabluated offset?
Lorenz = sind(abs(parmDict[hfx+'2-theta'])/2)*refl[4+im]**4 #TOF Lorentz correction
# refl[5+im] += GetHStrainShift(refl,im,SGData,phfx,hfx,calcControls,parmDict) #apply hydrostatic strain shift
refl[6+im:8+im] = GetReflSigGamTOF(refl,im,G,GB,phfx,calcControls,parmDict) #peak sig & gam
refl[12+im:14+im] = GetReflAlpBet(refl,im,hfx,parmDict) #TODO - skip if alp, bet tabulated?
refl[11+im],refl[15+im],refl[16+im],refl[17+im] = GetIntensityCorr(refl,im,Uniq,G,g,pfx,phfx,hfx,SGData,calcControls,parmDict)
refl[11+im] *= Vst*Lorenz
if Phase['General'].get('doPawley'):
try:
if im:
pInd =pfx+'PWLref:%d'%(pawleyLookup[pfx+'%d,%d,%d,%d'%(h,k,l,m)])
else:
pInd =pfx+'PWLref:%d'%(pawleyLookup[pfx+'%d,%d,%d'%(h,k,l)])
refl[9+im] = parmDict[pInd]
except KeyError:
# print ' ***Error %d,%d,%d missing from Pawley reflection list ***'%(h,k,l)
continue
Wd,fmin,fmax = G2pwd.getWidthsTOF(refl[5+im],refl[12+im],refl[13+im],refl[6+im],refl[7+im])
# </ Anton Gagin
fwhm.append(2.355*Wd[0]+2.*Wd[1])
xfwhm.append(refl[5+im])
# Anton Gagin />
iBeg = np.searchsorted(x,refl[5+im]-fmin)
iFin = np.searchsorted(x,refl[5+im]+fmax)
if not iBeg+iFin: #peak below low limit - skip peak
continue
elif not iBeg-iFin: #peak above high limit - done
break
elif iBeg > iFin: #bad peak coeff - skip
badPeak = True
continue
if useMP:
profArgs[iref%ncores].append((refl[5+im],refl,iBeg,iFin))
else:
yc[iBeg:iFin] += refl[11+im]*refl[9+im]*G2pwd.getEpsVoigt(refl[5+im],refl[12+im],refl[13+im],refl[6+im],refl[7+im],ma.getdata(x[iBeg:iFin]))/cw[iBeg:iFin]
# print 'profile calc time: %.3fs'%(time.time()-time0)
if useMP and 'C' in calcControls[hfx+'histType']:
for y in MPpool.imap_unordered(G2mp.ComputePwdrProfCW,profArgs):
yc += y
MPpool.terminate()
elif useMP:
for y in MPpool.imap_unordered(G2mp.ComputePwdrProfTOF,profArgs):
yc += y
MPpool.terminate()
if badPeak:
print ('ouch #4 bad profile coefficients yield negative peak width; some reflections skipped')
if GSASIIpath.GetConfigValue('Show_timing',False):
print ('getPowderProfile t=%.3f'%time.time()-starttime)
# </ Anton Gagin
config_example.xyFWHM[0][hId] = xfwhm
config_example.xyFWHM[1][hId] = fwhm
# Anton Gagin />
return yc,yb
def getPowderProfileDervMP(args):
'''Computes the derivatives of the computed powder pattern with respect to all
refined parameters.
Multiprocessing version.
'''
import pytexture as ptx
ptx.pyqlmninit() #initialize fortran arrays for spherical harmonics for each processor
parmDict,x,varylist,Histogram,Phases,rigidbodyDict,calcControls,pawleyLookup,dependentVars = args[:9]
prc=0
tprc=1
if len(args) >= 10: prc=args[9]
if len(args) >= 11: tprc=args[10]
def cellVaryDerv(pfx,SGData,dpdA):
if SGData['SGLaue'] in ['-1',]:
return [[pfx+'A0',dpdA[0]],[pfx+'A1',dpdA[1]],[pfx+'A2',dpdA[2]],
[pfx+'A3',dpdA[3]],[pfx+'A4',dpdA[4]],[pfx+'A5',dpdA[5]]]
elif SGData['SGLaue'] in ['2/m',]:
if SGData['SGUniq'] == 'a':
return [[pfx+'A0',dpdA[0]],[pfx+'A1',dpdA[1]],[pfx+'A2',dpdA[2]],[pfx+'A5',dpdA[5]]]
elif SGData['SGUniq'] == 'b':
return [[pfx+'A0',dpdA[0]],[pfx+'A1',dpdA[1]],[pfx+'A2',dpdA[2]],[pfx+'A4',dpdA[4]]]
else:
return [[pfx+'A0',dpdA[0]],[pfx+'A1',dpdA[1]],[pfx+'A2',dpdA[2]],[pfx+'A3',dpdA[3]]]
elif SGData['SGLaue'] in ['mmm',]:
return [[pfx+'A0',dpdA[0]],[pfx+'A1',dpdA[1]],[pfx+'A2',dpdA[2]]]
elif SGData['SGLaue'] in ['4/m','4/mmm']:
return [[pfx+'A0',dpdA[0]],[pfx+'A2',dpdA[2]]]
elif SGData['SGLaue'] in ['6/m','6/mmm','3m1', '31m', '3']:
return [[pfx+'A0',dpdA[0]],[pfx+'A2',dpdA[2]]]
elif SGData['SGLaue'] in ['3R', '3mR']:
return [[pfx+'A0',dpdA[0]+dpdA[1]+dpdA[2]],[pfx+'A3',dpdA[3]+dpdA[4]+dpdA[5]]]
elif SGData['SGLaue'] in ['m3m','m3']:
return [[pfx+'A0',dpdA[0]]]
# create a list of dependent variables and set up a dictionary to hold their derivatives
# dependentVars = G2mv.GetDependentVars()
depDerivDict = {}
for j in dependentVars:
depDerivDict[j] = np.zeros(shape=(len(x)))
# print 'dependent vars',dependentVars
hId = Histogram['hId']
hfx = ':%d:'%(hId)
bakType = calcControls[hfx+'bakType']
dMdv = np.zeros(shape=(len(varylist),len(x)))
dMdb,dMddb,dMdpk = G2pwd.getBackgroundDerv(hfx,parmDict,bakType,calcControls[hfx+'histType'],x)
if prc == 0 and hfx+'Back;0' in varylist: # for now assume that Back;x vars to not appear in constraints
bBpos = varylist.index(hfx+'Back;0')
dMdv[bBpos:bBpos+len(dMdb)] += dMdb #TODO crash if bck parms tossed
names = [hfx+'DebyeA',hfx+'DebyeR',hfx+'DebyeU']
for name in varylist:
if prc == 0 and 'Debye' in name:
id = int(name.split(';')[-1])
parm = name[:int(name.rindex(';'))]
ip = names.index(parm)
dMdv[varylist.index(name)] += dMddb[3*id+ip]
names = [hfx+'BkPkpos',hfx+'BkPkint',hfx+'BkPksig',hfx+'BkPkgam']
for name in varylist:
if prc == 0 and 'BkPk' in name:
parm,id = name.split(';')
id = int(id)
if parm in names:
ip = names.index(parm)
dMdv[varylist.index(name)] += dMdpk[4*id+ip]
cw = np.diff(ma.getdata(x))
cw = np.append(cw,cw[-1])
Ka2 = False #also for TOF!
if 'C' in calcControls[hfx+'histType']:
shl = max(parmDict[hfx+'SH/L'],0.002)
if hfx+'Lam1' in (parmDict.keys()):
wave = parmDict[hfx+'Lam1']
Ka2 = True
lamRatio = 360*(parmDict[hfx+'Lam2']-parmDict[hfx+'Lam1'])/(np.pi*parmDict[hfx+'Lam1'])
kRatio = parmDict[hfx+'I(L2)/I(L1)']
else:
wave = parmDict[hfx+'Lam']
for phase in Histogram['Reflection Lists']:
refDict = Histogram['Reflection Lists'][phase]
if phase not in Phases: #skips deleted or renamed phases silently!
continue
Phase = Phases[phase]
SGData = Phase['General']['SGData']
SGMT = np.array([ops[0].T for ops in SGData['SGOps']])
im = 0
if Phase['General'].get('Modulated',False):
SSGData = Phase['General']['SSGData']
im = 1 #offset in SS reflection list
#??
pId = Phase['pId']
pfx = '%d::'%(pId)
phfx = '%d:%d:'%(pId,hId)
Dij = GetDij(phfx,SGData,parmDict)
A = [parmDict[pfx+'A%d'%(i)]+Dij[i] for i in range(6)]
G,g = G2lat.A2Gmat(A) #recip & real metric tensors
GA,GB = G2lat.Gmat2AB(G) #Orthogonalization matricies
if not Phase['General'].get('doPawley') and not parmDict[phfx+'LeBail']:
if im:
dFdvDict = SStructureFactorDerv(refDict,im,G,hfx,pfx,SGData,SSGData,calcControls,parmDict)
else:
if Phase['General']['Type'] == 'magnetic':
dFdvDict = MagStructureFactorDerv(refDict,G,hfx,pfx,SGData,calcControls,parmDict)
else:
dFdvDict = StructureFactorDerv2(refDict,G,hfx,pfx,SGData,calcControls,parmDict)
ApplyRBModelDervs(dFdvDict,parmDict,rigidbodyDict,Phase)
# determine the parameters that will have derivatives computed only at end
nonatomvarylist = []
for name in varylist:
if '::RBV;' not in name:
try:
aname = name.split(pfx)[1][:2]
if aname not in ['Af','dA','AU','RB','AM','Xs','Xc','Ys','Yc','Zs','Zc', \
'Tm','Xm','Ym','Zm','U1','U2','U3']: continue # skip anything not an atom or rigid body param
except IndexError:
continue
nonatomvarylist.append(name)
nonatomdependentVars = []
for name in dependentVars:
if '::RBV;' not in name:
try:
aname = name.split(pfx)[1][:2]
if aname not in ['Af','dA','AU','RB','AM','Xs','Xc','Ys','Yc','Zs','Zc', \
'Tm','Xm','Ym','Zm','U1','U2','U3']: continue # skip anything not an atom or rigid body param
except IndexError:
continue
nonatomdependentVars.append(name)
#==========================================================================================
#==========================================================================================
for iref in range(prc,len(refDict['RefList']),tprc):
refl = refDict['RefList'][iref]
if im:
h,k,l,m = refl[:4]
else:
h,k,l = refl[:3]
Uniq = np.inner(refl[:3],SGMT)
if 'T' in calcControls[hfx+'histType']:
wave = refl[14+im]
dIdsh,dIdsp,dIdpola,dIdPO,dFdODF,dFdSA,dFdAb,dFdEx = GetIntensityDerv(refl,im,wave,Uniq,G,g,pfx,phfx,hfx,SGData,calcControls,parmDict)
if 'C' in calcControls[hfx+'histType']: #CW powder
Wd,fmin,fmax = G2pwd.getWidthsCW(refl[5+im],refl[6+im],refl[7+im],shl)
else: #'T'OF
Wd,fmin,fmax = G2pwd.getWidthsTOF(refl[5+im],refl[12+im],refl[13+im],refl[6+im],refl[7+im])
iBeg = np.searchsorted(x,refl[5+im]-fmin)
iFin = np.searchsorted(x,refl[5+im]+fmax)
if not iBeg+iFin: #peak below low limit - skip peak
continue
elif not iBeg-iFin: #peak above high limit - done
break
pos = refl[5+im]
if 'C' in calcControls[hfx+'histType']:
tanth = tand(pos/2.0)
costh = cosd(pos/2.0)
lenBF = iFin-iBeg
dMdpk = np.zeros(shape=(6,lenBF))
dMdipk = G2pwd.getdFCJVoigt3(refl[5+im],refl[6+im],refl[7+im],shl,ma.getdata(x[iBeg:iFin]))
for i in range(5):
dMdpk[i] += 100.*cw[iBeg:iFin]*refl[11+im]*refl[9+im]*dMdipk[i]
dervDict = {'int':dMdpk[0],'pos':dMdpk[1],'sig':dMdpk[2],'gam':dMdpk[3],'shl':dMdpk[4],'L1/L2':np.zeros_like(dMdpk[0])}
if Ka2:
pos2 = refl[5+im]+lamRatio*tanth # + 360/pi * Dlam/lam * tan(th)
iBeg2 = np.searchsorted(x,pos2-fmin)
iFin2 = np.searchsorted(x,pos2+fmax)
if iBeg2-iFin2:
lenBF2 = iFin2-iBeg2
dMdpk2 = np.zeros(shape=(6,lenBF2))
dMdipk2 = G2pwd.getdFCJVoigt3(pos2,refl[6+im],refl[7+im],shl,ma.getdata(x[iBeg2:iFin2]))
for i in range(5):
dMdpk2[i] = 100.*cw[iBeg2:iFin2]*refl[11+im]*refl[9+im]*kRatio*dMdipk2[i]
dMdpk2[5] = 100.*cw[iBeg2:iFin2]*refl[11+im]*dMdipk2[0]
dervDict2 = {'int':dMdpk2[0],'pos':dMdpk2[1],'sig':dMdpk2[2],'gam':dMdpk2[3],'shl':dMdpk2[4],'L1/L2':dMdpk2[5]*refl[9]}
else: #'T'OF
lenBF = iFin-iBeg
if lenBF < 0: #bad peak coeff
break
dMdpk = np.zeros(shape=(6,lenBF))
dMdipk = G2pwd.getdEpsVoigt(refl[5+im],refl[12+im],refl[13+im],refl[6+im],refl[7+im],ma.getdata(x[iBeg:iFin]))
for i in range(6):
dMdpk[i] += refl[11+im]*refl[9+im]*dMdipk[i] #cw[iBeg:iFin]*
dervDict = {'int':dMdpk[0],'pos':dMdpk[1],'alp':dMdpk[2],'bet':dMdpk[3],'sig':dMdpk[4],'gam':dMdpk[5]}
if Phase['General'].get('doPawley'):
dMdpw = np.zeros(len(x))
try:
if im:
pIdx = pfx+'PWLref:'+str(pawleyLookup[pfx+'%d,%d,%d,%d'%(h,k,l,m)])
else:
pIdx = pfx+'PWLref:'+str(pawleyLookup[pfx+'%d,%d,%d'%(h,k,l)])
idx = varylist.index(pIdx)
dMdpw[iBeg:iFin] = dervDict['int']/refl[9+im]
if Ka2: #not for TOF either
dMdpw[iBeg2:iFin2] += dervDict2['int']/refl[9+im]
dMdv[idx] = dMdpw
except: # ValueError:
pass
if 'C' in calcControls[hfx+'histType']:
dpdA,dpdw,dpdZ,dpdSh,dpdTr,dpdX,dpdY,dpdV = GetReflPosDerv(refl,im,wave,A,pfx,hfx,calcControls,parmDict)
names = {hfx+'Scale':[dIdsh,'int'],hfx+'Polariz.':[dIdpola,'int'],phfx+'Scale':[dIdsp,'int'],
hfx+'U':[tanth**2,'sig'],hfx+'V':[tanth,'sig'],hfx+'W':[1.0,'sig'],
hfx+'X':[1.0/costh,'gam'],hfx+'Y':[tanth,'gam'],hfx+'Z':[1.0,'gam'],hfx+'SH/L':[1.0,'shl'],
hfx+'I(L2)/I(L1)':[1.0,'L1/L2'],hfx+'Zero':[dpdZ,'pos'],hfx+'Lam':[dpdw,'pos'],
hfx+'Shift':[dpdSh,'pos'],hfx+'Transparency':[dpdTr,'pos'],hfx+'DisplaceX':[dpdX,'pos'],
hfx+'DisplaceY':[dpdY,'pos'],}
if 'Bragg' in calcControls[hfx+'instType']:
names.update({hfx+'SurfRoughA':[dFdAb[0],'int'],
hfx+'SurfRoughB':[dFdAb[1],'int'],})
else:
names.update({hfx+'Absorption':[dFdAb,'int'],})
else: #'T'OF
dpdA,dpdZ,dpdDC,dpdDA,dpdDB,dpdV = GetReflPosDerv(refl,im,0.0,A,pfx,hfx,calcControls,parmDict)
names = {hfx+'Scale':[dIdsh,'int'],phfx+'Scale':[dIdsp,'int'],
hfx+'difC':[dpdDC,'pos'],hfx+'difA':[dpdDA,'pos'],hfx+'difB':[dpdDB,'pos'],
hfx+'Zero':[dpdZ,'pos'],hfx+'X':[refl[4+im],'gam'],hfx+'Y':[refl[4+im]**2,'gam'],hfx+'Z':[1.0,'gam'],
hfx+'alpha':[1./refl[4+im],'alp'],hfx+'beta-0':[1.0,'bet'],hfx+'beta-1':[1./refl[4+im]**4,'bet'],
hfx+'beta-q':[1./refl[4+im]**2,'bet'],hfx+'sig-0':[1.0,'sig'],hfx+'sig-1':[refl[4+im]**2,'sig'],
hfx+'sig-2':[refl[4+im]**4,'sig'],hfx+'sig-q':[refl[4+im],'sig'],
hfx+'Absorption':[dFdAb,'int'],phfx+'Extinction':[dFdEx,'int'],}
for name in names:
item = names[name]
if name in varylist:
dMdv[varylist.index(name)][iBeg:iFin] += item[0]*dervDict[item[1]]
if Ka2 and iFin2-iBeg2:
dMdv[varylist.index(name)][iBeg2:iFin2] += item[0]*dervDict2[item[1]]
elif name in dependentVars:
depDerivDict[name][iBeg:iFin] += item[0]*dervDict[item[1]]
if Ka2 and iFin2-iBeg2:
depDerivDict[name][iBeg2:iFin2] += item[0]*dervDict2[item[1]]
for iPO in dIdPO:
if iPO in varylist:
dMdv[varylist.index(iPO)][iBeg:iFin] += dIdPO[iPO]*dervDict['int']
if Ka2 and iFin2-iBeg2:
dMdv[varylist.index(iPO)][iBeg2:iFin2] += dIdPO[iPO]*dervDict2['int']
elif iPO in dependentVars:
depDerivDict[iPO][iBeg:iFin] += dIdPO[iPO]*dervDict['int']
if Ka2 and iFin2-iBeg2:
depDerivDict[iPO][iBeg2:iFin2] += dIdPO[iPO]*dervDict2['int']
for i,name in enumerate(['omega','chi','phi']):
aname = pfx+'SH '+name
if aname in varylist:
dMdv[varylist.index(aname)][iBeg:iFin] += dFdSA[i]*dervDict['int']
if Ka2 and iFin2-iBeg2:
dMdv[varylist.index(aname)][iBeg2:iFin2] += dFdSA[i]*dervDict2['int']
elif aname in dependentVars:
depDerivDict[aname][iBeg:iFin] += dFdSA[i]*dervDict['int']
if Ka2 and iFin2-iBeg2:
depDerivDict[aname][iBeg2:iFin2] += dFdSA[i]*dervDict2['int']
for iSH in dFdODF:
if iSH in varylist:
dMdv[varylist.index(iSH)][iBeg:iFin] += dFdODF[iSH]*dervDict['int']
if Ka2 and iFin2-iBeg2:
dMdv[varylist.index(iSH)][iBeg2:iFin2] += dFdODF[iSH]*dervDict2['int']
elif iSH in dependentVars:
depDerivDict[iSH][iBeg:iFin] += dFdODF[iSH]*dervDict['int']
if Ka2 and iFin2-iBeg2:
depDerivDict[iSH][iBeg2:iFin2] += dFdODF[iSH]*dervDict2['int']
cellDervNames = cellVaryDerv(pfx,SGData,dpdA)
for name,dpdA in cellDervNames:
if name in varylist:
dMdv[varylist.index(name)][iBeg:iFin] += dpdA*dervDict['pos']
if Ka2 and iFin2-iBeg2:
dMdv[varylist.index(name)][iBeg2:iFin2] += dpdA*dervDict2['pos']
elif name in dependentVars: #need to scale for mixed phase constraints?
depDerivDict[name][iBeg:iFin] += dpdA*dervDict['pos']
if Ka2 and iFin2-iBeg2:
depDerivDict[name][iBeg2:iFin2] += dpdA*dervDict2['pos']
dDijDict = GetHStrainShiftDerv(refl,im,SGData,phfx,hfx,calcControls,parmDict)
for name in dDijDict:
if name in varylist:
dMdv[varylist.index(name)][iBeg:iFin] += dDijDict[name]*dervDict['pos']
if Ka2 and iFin2-iBeg2:
dMdv[varylist.index(name)][iBeg2:iFin2] += dDijDict[name]*dervDict2['pos']
elif name in dependentVars:
depDerivDict[name][iBeg:iFin] += dDijDict[name]*dervDict['pos']
if Ka2 and iFin2-iBeg2:
depDerivDict[name][iBeg2:iFin2] += dDijDict[name]*dervDict2['pos']
for i,name in enumerate([pfx+'mV0',pfx+'mV1',pfx+'mV2']):
if name in varylist:
dMdv[varylist.index(name)][iBeg:iFin] += dpdV[i]*dervDict['pos']
if Ka2 and iFin2-iBeg2:
dMdv[varylist.index(name)][iBeg2:iFin2] += dpdV[i]*dervDict2['pos']
elif name in dependentVars:
depDerivDict[name][iBeg:iFin] += dpdV[i]*dervDict['pos']
if Ka2 and iFin2-iBeg2:
depDerivDict[name][iBeg2:iFin2] += dpdV[i]*dervDict2['pos']
if 'C' in calcControls[hfx+'histType']:
sigDict,gamDict = GetSampleSigGamDerv(refl,im,wave,G,GB,SGData,hfx,phfx,calcControls,parmDict)
else: #'T'OF
sigDict,gamDict = GetSampleSigGamDerv(refl,im,0.0,G,GB,SGData,hfx,phfx,calcControls,parmDict)
for name in gamDict:
if name in varylist:
dMdv[varylist.index(name)][iBeg:iFin] += gamDict[name]*dervDict['gam']
if Ka2 and iFin2-iBeg2:
dMdv[varylist.index(name)][iBeg2:iFin2] += gamDict[name]*dervDict2['gam']
elif name in dependentVars:
depDerivDict[name][iBeg:iFin] += gamDict[name]*dervDict['gam']
if Ka2 and iFin2-iBeg2:
depDerivDict[name][iBeg2:iFin2] += gamDict[name]*dervDict2['gam']
for name in sigDict:
if name in varylist:
dMdv[varylist.index(name)][iBeg:iFin] += sigDict[name]*dervDict['sig']
if Ka2 and iFin2-iBeg2:
dMdv[varylist.index(name)][iBeg2:iFin2] += sigDict[name]*dervDict2['sig']
elif name in dependentVars:
depDerivDict[name][iBeg:iFin] += sigDict[name]*dervDict['sig']
if Ka2 and iFin2-iBeg2:
depDerivDict[name][iBeg2:iFin2] += sigDict[name]*dervDict2['sig']
for name in ['BabA','BabU']:
if refl[9+im]:
if phfx+name in varylist:
dMdv[varylist.index(phfx+name)][iBeg:iFin] += parmDict[phfx+'Scale']*dFdvDict[phfx+name][iref]*dervDict['int']/refl[9+im]
if Ka2 and iFin2-iBeg2:
dMdv[varylist.index(phfx+name)][iBeg2:iFin2] += parmDict[phfx+'Scale']*dFdvDict[phfx+name][iref]*dervDict2['int']/refl[9+im]
elif phfx+name in dependentVars:
depDerivDict[phfx+name][iBeg:iFin] += parmDict[phfx+'Scale']*dFdvDict[phfx+name][iref]*dervDict['int']/refl[9+im]
if Ka2 and iFin2-iBeg2:
depDerivDict[phfx+name][iBeg2:iFin2] += parmDict[phfx+'Scale']*dFdvDict[phfx+name][iref]*dervDict2['int']/refl[9+im]
if not Phase['General'].get('doPawley') and not parmDict[phfx+'LeBail']:
#do atom derivatives - for RB,F,X & U so far - how do I scale mixed phase constraints?
corr = 0.
corr2 = 0.
if refl[9+im]:
corr = dervDict['int']/refl[9+im]
#if Ka2 and iFin2-iBeg2:
# corr2 = dervDict2['int']/refl[9+im]
for name in nonatomvarylist:
dMdv[varylist.index(name)][iBeg:iFin] += dFdvDict[name][iref]*corr
if Ka2 and iFin2-iBeg2:
dMdv[varylist.index(name)][iBeg2:iFin2] += dFdvDict[name][iref]*corr2
for name in nonatomdependentVars:
depDerivDict[name][iBeg:iFin] += dFdvDict[name][iref]*corr
if Ka2 and iFin2-iBeg2:
depDerivDict[name][iBeg2:iFin2] += dFdvDict[name][iref]*corr2
# now process derivatives in constraints
dMdv[:,ma.getmaskarray(x)] = 0. # instead of masking, zero out masked values
#G2mv.Dict2Deriv(varylist,depDerivDict,dMdv)
return dMdv,depDerivDict
def UserRejectHKL(ref,im,userReject):
if ref[5+im]/ref[6+im] < userReject['minF/sig']:
return False
elif userReject['MaxD'] < ref[4+im] > userReject['MinD']:
return False
elif ref[11+im] < userReject['MinExt']:
return False
elif abs(ref[5+im]-ref[7+im])/ref[6+im] > userReject['MaxDF/F']:
return False
return True
def dervHKLF(Histogram,Phase,calcControls,varylist,parmDict,rigidbodyDict):
'''Loop over reflections in a HKLF histogram and compute derivatives of the fitting
model (M) with respect to all parameters. Independent and dependant dM/dp arrays
are returned to either dervRefine or HessRefine.
:returns:
'''
hId = Histogram['hId']
hfx = ':%d:'%(hId)
pfx = '%d::'%(Phase['pId'])
phfx = '%d:%d:'%(Phase['pId'],hId)
SGData = Phase['General']['SGData']
im = 0
if Phase['General'].get('Modulated',False):
SSGData = Phase['General']['SSGData']
im = 1 #offset in SS reflection list
A = [parmDict[pfx+'A%d'%(i)] for i in range(6)]
G,g = G2lat.A2Gmat(A) #recip & real metric tensors
TwinLaw = calcControls[phfx+'TwinLaw']
refDict = Histogram['Data']
if parmDict[phfx+'Scale'] < 0.:
parmDict[phfx+'Scale'] = .001
if im: # split to nontwin/twin versions
if len(TwinLaw) > 1:
dFdvDict = SStructureFactorDervTw(refDict,im,G,hfx,pfx,SGData,SSGData,calcControls,parmDict) #?
else:
dFdvDict = SStructureFactorDerv(refDict,im,G,hfx,pfx,SGData,SSGData,calcControls,parmDict) #OK
else:
if len(TwinLaw) > 1:
dFdvDict = StructureFactorDervTw2(refDict,G,hfx,pfx,SGData,calcControls,parmDict)
else: #correct!!
if Phase['General']['Type'] == 'magnetic': #is this going to work for single crystal mag data?
dFdvDict = MagStructureFactorDerv2(refDict,G,hfx,pfx,SGData,calcControls,parmDict)
else:
dFdvDict = StructureFactorDerv2(refDict,G,hfx,pfx,SGData,calcControls,parmDict)
ApplyRBModelDervs(dFdvDict,parmDict,rigidbodyDict,Phase)
dMdvh = np.zeros((len(varylist),len(refDict['RefList'])))
dependentVars = G2mv.GetDependentVars()
depDerivDict = {}
for j in dependentVars:
depDerivDict[j] = np.zeros(shape=(len(refDict['RefList'])))
wdf = np.zeros(len(refDict['RefList']))
if calcControls['F**2']:
for iref,ref in enumerate(refDict['RefList']):
if ref[6+im] > 0:
dervDict,dervCor = SCExtinction(ref,im,phfx,hfx,pfx,calcControls,parmDict,varylist+dependentVars)[1:]
w = 1.0/ref[6+im]
if ref[3+im] > 0:
wdf[iref] = w*(ref[5+im]-ref[7+im])
for j,var in enumerate(varylist):
if var in dFdvDict:
dMdvh[j][iref] = w*dFdvDict[var][iref]*parmDict[phfx+'Scale']*ref[11+im]
for var in dependentVars:
if var in dFdvDict:
depDerivDict[var][iref] = w*dFdvDict[var][iref]*parmDict[phfx+'Scale']*ref[11+im]
if phfx+'Scale' in varylist:
dMdvh[varylist.index(phfx+'Scale')][iref] = w*ref[7+im]*ref[11+im]/parmDict[phfx+'Scale'] #OK
elif phfx+'Scale' in dependentVars:
depDerivDict[phfx+'Scale'][iref] = w*ref[7+im]*ref[11+im]/parmDict[phfx+'Scale'] #OK
for item in ['Ep','Es','Eg']:
if phfx+item in varylist and phfx+item in dervDict:
dMdvh[varylist.index(phfx+item)][iref] = w*dervDict[phfx+item]/ref[11+im] #OK
elif phfx+item in dependentVars and phfx+item in dervDict:
depDerivDict[phfx+item][iref] = w*dervDict[phfx+item]/ref[11+im] #OK
for item in ['BabA','BabU']:
if phfx+item in varylist:
dMdvh[varylist.index(phfx+item)][iref] = w*dFdvDict[phfx+item][iref]*parmDict[phfx+'Scale']*ref[11+im]
elif phfx+item in dependentVars:
depDerivDict[phfx+item][iref] = w*dFdvDict[phfx+item][iref]*parmDict[phfx+'Scale']*ref[11+im]
else: #F refinement
for iref,ref in enumerate(refDict['RefList']):
if ref[5+im] > 0.:
dervDict,dervCor = SCExtinction(ref,im,phfx,hfx,pfx,calcControls,parmDict,varylist+dependentVars)[1:]
Fo = np.sqrt(ref[5+im])
Fc = np.sqrt(ref[7+im])
w = 1.0/ref[6+im]
if ref[3+im] > 0:
wdf[iref] = 2.0*Fc*w*(Fo-Fc)
for j,var in enumerate(varylist):
if var in dFdvDict:
dMdvh[j][iref] = w*dFdvDict[var][iref]*parmDict[phfx+'Scale']*ref[11+im]
for var in dependentVars:
if var in dFdvDict:
depDerivDict[var][iref] = w*dFdvDict[var][iref]*parmDict[phfx+'Scale']*ref[11+im]
if phfx+'Scale' in varylist:
dMdvh[varylist.index(phfx+'Scale')][iref] = w*ref[7+im]*ref[11+im]/parmDict[phfx+'Scale'] #OK
elif phfx+'Scale' in dependentVars:
depDerivDict[phfx+'Scale'][iref] = w*ref[7+im]*ref[11+im]/parmDict[phfx+'Scale'] #OK
for item in ['Ep','Es','Eg']: #OK!
if phfx+item in varylist and phfx+item in dervDict:
dMdvh[varylist.index(phfx+item)][iref] = w*dervDict[phfx+item]/ref[11+im]
elif phfx+item in dependentVars and phfx+item in dervDict:
depDerivDict[phfx+item][iref] = w*dervDict[phfx+item]/ref[11+im]
for item in ['BabA','BabU']:
if phfx+item in varylist:
dMdvh[varylist.index(phfx+item)][iref] = w*dFdvDict[phfx+item][iref]*parmDict[phfx+'Scale']*ref[11+im]
elif phfx+item in dependentVars:
depDerivDict[phfx+item][iref] = w*dFdvDict[phfx+item][iref]*parmDict[phfx+'Scale']*ref[11+im]
return dMdvh,depDerivDict,wdf
def dervRefine(values,HistoPhases,parmDict,varylist,calcControls,pawleyLookup,dlg):
'''Loop over histograms and compute derivatives of the fitting
model (M) with respect to all parameters. Results are returned in
a Jacobian matrix (aka design matrix) of dimensions (n by m) where
n is the number of parameters and m is the number of data
points. This can exceed memory when m gets large. This routine is
used when refinement derivatives are selected as "analtytic
Jacobian" in Controls.
:returns: Jacobian numpy.array dMdv for all histograms concatinated
'''
parmDict.update(zip(varylist,values))
G2mv.Dict2Map(parmDict,varylist)
Histograms,Phases,restraintDict,rigidbodyDict = HistoPhases
dependentVars = G2mv.GetDependentVars()
histoList = list(Histograms.keys())
histoList.sort()
First = True
for histogram in histoList:
if 'PWDR' in histogram[:4]:
Histogram = Histograms[histogram]
hId = Histogram['hId']
hfx = ':%d:'%(hId)
wtFactor = calcControls[hfx+'wtFactor']
Limits = calcControls[hfx+'Limits']
x,y,w,yc,yb,yd = Histogram['Data']
xB = np.searchsorted(x,Limits[0])
xF = np.searchsorted(x,Limits[1])+1
dMdv,depDerivDict = getPowderProfileDervMP([parmDict,x[xB:xF],
varylist,Histogram,Phases,rigidbodyDict,calcControls,pawleyLookup,dependentVars])
G2mv.Dict2Deriv(varylist,depDerivDict,dMdv)
dMdvh = np.sqrt(w[xB:xF])*dMdv
elif 'HKLF' in histogram[:4]:
Histogram = Histograms[histogram]
phase = Histogram['Reflection Lists']
Phase = Phases[phase]
dMdvh,depDerivDict,wdf = dervHKLF(Histogram,Phase,calcControls,varylist,parmDict,rigidbodyDict)
hfx = ':%d:'%(Histogram['hId'])
wtFactor = calcControls[hfx+'wtFactor']
# now process derivatives in constraints
G2mv.Dict2Deriv(varylist,depDerivDict,dMdvh)
else:
continue #skip non-histogram entries
if First:
dMdv_joint = np.sqrt(wtFactor)*dMdvh
First = False
else:
dMdv_joint = np.concatenate((dMdv_joint.T,np.sqrt(wtFactor)*dMdvh.T)).T
GetFobsSq(Histograms,Phases,parmDict,calcControls)
pNames,pVals,pWt,pWsum,pWnum = penaltyFxn(HistoPhases,calcControls,parmDict,varylist)
if np.any(pVals):
dpdv = penaltyDeriv(pNames,pVals,HistoPhases,calcControls,parmDict,varylist)
dMdv_joint = np.concatenate((dMdv_joint.T,(np.sqrt(pWt)*dpdv).T)).T
return dMdv_joint
def HessRefine(values,HistoPhases,parmDict,varylist,calcControls,pawleyLookup,dlg):
'''Loop over histograms and compute derivatives of the fitting
model (M) with respect to all parameters. For each histogram, the
Jacobian matrix, dMdv, with dimensions (n by m) where n is the
number of parameters and m is the number of data points *in the
histogram*. The (n by n) Hessian is computed from each Jacobian
and it is returned. This routine is used when refinement
derivatives are selected as "analtytic Hessian" in Controls.
:returns: Vec,Hess where Vec is the least-squares vector and Hess is the Hessian
'''
parmDict.update(zip(varylist,values))
G2mv.Dict2Map(parmDict,varylist)
Histograms,Phases,restraintDict,rigidbodyDict = HistoPhases
dependentVars = G2mv.GetDependentVars()
#fixup H atom positions here?
ApplyRBModels(parmDict,Phases,rigidbodyDict) #,Update=True??
Hess = np.empty(0)
Vec = np.empty(0)
histoList = list(Histograms.keys())
histoList.sort()
for histogram in histoList:
if 'PWDR' in histogram[:4]:
Histogram = Histograms[histogram]
hId = Histogram['hId']
hfx = ':%d:'%(hId)
wtFactor = calcControls[hfx+'wtFactor']
Limits = calcControls[hfx+'Limits']
x,y,w,yc,yb,yd = Histogram['Data']
W = wtFactor*w
dy = y-yc
xB = np.searchsorted(x,Limits[0])
xF = np.searchsorted(x,Limits[1])+1
useMP,ncores = G2mp.InitMP()
if GSASIIpath.GetConfigValue('Show_timing',False): starttime = time.time()
if useMP:
MPpool = mp.Pool(ncores)
dMdvh = None
depDerivDict = None
profArgs = [
(parmDict,x[xB:xF],varylist,Histogram,Phases,rigidbodyDict,calcControls,pawleyLookup,dependentVars,
i,ncores) for i in range(ncores)]
for dmdv,depDerivs in MPpool.imap_unordered(getPowderProfileDervMP,profArgs):
if dMdvh is None:
dMdvh = dmdv
depDerivDict = depDerivs
else:
dMdvh += dmdv
for key in depDerivs.keys(): depDerivDict[key] += depDerivs[key]
MPpool.terminate()
else:
dMdvh,depDerivDict = getPowderProfileDervMP([parmDict,x[xB:xF],
varylist,Histogram,Phases,rigidbodyDict,calcControls,pawleyLookup,dependentVars])
#dMdvh = getPowderProfileDerv(parmDict,x[xB:xF],
# varylist,Histogram,Phases,rigidbodyDict,calcControls,pawleyLookup,dependentVars)
G2mv.Dict2Deriv(varylist,depDerivDict,dMdvh)
if GSASIIpath.GetConfigValue('Show_timing',False): print ('getPowderProfileDerv t=%.3f'%time.time()-starttime)
Wt = ma.sqrt(W[xB:xF])[nxs,:]
Dy = dy[xB:xF][nxs,:]
dMdvh *= Wt
if dlg:
dlg.Update(Histogram['Residuals']['wR'],newmsg='Hessian for histogram %d\nAll data Rw=%8.3f%s'%(hId,Histogram['Residuals']['wR'],'%'))
dlg.Raise()
if len(Hess):
Hess += np.inner(dMdvh,dMdvh)
dMdvh *= Wt*Dy
Vec += np.sum(dMdvh,axis=1)
else:
Hess = np.inner(dMdvh,dMdvh)
dMdvh *= Wt*Dy
Vec = np.sum(dMdvh,axis=1)
elif 'HKLF' in histogram[:4]:
Histogram = Histograms[histogram]
phase = Histogram['Reflection Lists']
Phase = Phases[phase]
dMdvh,depDerivDict,wdf = dervHKLF(Histogram,Phase,calcControls,varylist,parmDict,rigidbodyDict)
hId = Histogram['hId']
hfx = ':%d:'%(Histogram['hId'])
wtFactor = calcControls[hfx+'wtFactor']
# now process derivatives in constraints
G2mv.Dict2Deriv(varylist,depDerivDict,dMdvh)
# print 'matrix build time: %.3f'%(time.time()-time0)
if dlg:
dlg.Update(Histogram['Residuals']['wR'],newmsg='Hessian for histogram %d Rw=%8.3f%s'%(hId,Histogram['Residuals']['wR'],'%'))[0]
dlg.Raise()
if len(Hess):
Vec += wtFactor*np.sum(dMdvh*wdf,axis=1)
Hess += wtFactor*np.inner(dMdvh,dMdvh)
else:
Vec = wtFactor*np.sum(dMdvh*wdf,axis=1)
Hess = wtFactor*np.inner(dMdvh,dMdvh)
else:
continue #skip non-histogram entries
GetFobsSq(Histograms,Phases,parmDict,calcControls)
pNames,pVals,pWt,pWsum,pWnum = penaltyFxn(HistoPhases,calcControls,parmDict,varylist)
if np.any(pVals):
dpdv = penaltyDeriv(pNames,pVals,HistoPhases,calcControls,parmDict,varylist)
Vec -= np.sum(dpdv*pWt*pVals,axis=1)
Hess += np.inner(dpdv*pWt,dpdv)
return Vec,Hess
def errRefine(values,HistoPhases,parmDict,varylist,calcControls,pawleyLookup,dlg=None):
'''Computes the point-by-point discrepancies between every data point in every histogram
and the observed value. Used in the Jacobian, Hessian & numeric least-squares to compute function
:returns: an np array of differences between observed and computed diffraction values.
'''
Values2Dict(parmDict, varylist, values)
G2mv.Dict2Map(parmDict,varylist)
Histograms,Phases,restraintDict,rigidbodyDict = HistoPhases
M = np.empty(0)
SumwYo = 0
Nobs = 0
Nrej = 0
Next = 0
ApplyRBModels(parmDict,Phases,rigidbodyDict)
#fixup Hatom positions here....
histoList = list(Histograms.keys())
histoList.sort()
for histogram in histoList:
if 'PWDR' in histogram[:4]:
Histogram = Histograms[histogram]
hId = Histogram['hId']
hfx = ':%d:'%(hId)
wtFactor = calcControls[hfx+'wtFactor']
Limits = calcControls[hfx+'Limits']
x,y,w,yc,yb,yd = Histogram['Data']
yc *= 0.0 #zero full calcd profiles
yb *= 0.0
yd *= 0.0
xB = np.searchsorted(x,Limits[0])
xF = np.searchsorted(x,Limits[1])+1
yc[xB:xF],yb[xB:xF] = getPowderProfile(parmDict,x[xB:xF],
varylist,Histogram,Phases,calcControls,pawleyLookup)
yc[xB:xF] += yb[xB:xF]
if not np.any(y): #fill dummy data
try:
rv = st.poisson(yc[xB:xF])
y[xB:xF] = rv.rvs()
except ValueError:
y[xB:xF] = yc[xB:xF]
Z = np.ones_like(yc[xB:xF])
Z[1::2] *= -1
y[xB:xF] = yc[xB:xF]+np.abs(y[xB:xF]-yc[xB:xF])*Z
w[xB:xF] = np.where(y[xB:xF]>0.,1./y[xB:xF],1.0)
yd[xB:xF] = y[xB:xF]-yc[xB:xF]
W = wtFactor*w
wdy = -ma.sqrt(w[xB:xF])*(yd[xB:xF])
Histogram['Residuals']['Durbin-Watson'] = ma.sum(ma.diff(wdy)**2)/ma.sum(wdy**2)
wdy *= wtFactor
Histogram['Residuals']['Nobs'] = ma.count(x[xB:xF])
Nobs += Histogram['Residuals']['Nobs']
Histogram['Residuals']['sumwYo'] = ma.sum(W[xB:xF]*y[xB:xF]**2)
SumwYo += Histogram['Residuals']['sumwYo']
Histogram['Residuals']['R'] = min(100.,ma.sum(ma.abs(yd[xB:xF]))/ma.sum(y[xB:xF])*100.)
Histogram['Residuals']['wR'] = min(100.,ma.sqrt(ma.sum(wdy**2)/Histogram['Residuals']['sumwYo'])*100.)
sumYmB = ma.sum(ma.where(yc[xB:xF]!=yb[xB:xF],ma.abs(y[xB:xF]-yb[xB:xF]),0.))
sumwYmB2 = ma.sum(ma.where(yc[xB:xF]!=yb[xB:xF],W[xB:xF]*(y[xB:xF]-yb[xB:xF])**2,0.))
sumYB = ma.sum(ma.where(yc[xB:xF]!=yb[xB:xF],ma.abs(y[xB:xF]-yc[xB:xF])*ma.abs(y[xB:xF]-yb[xB:xF])/y[xB:xF],0.))
sumwYB2 = ma.sum(ma.where(yc[xB:xF]!=yb[xB:xF],W[xB:xF]*(ma.abs(y[xB:xF]-yc[xB:xF])*ma.abs(y[xB:xF]-yb[xB:xF])/y[xB:xF])**2,0.))
Histogram['Residuals']['Rb'] = min(100.,100.*sumYB/sumYmB)
Histogram['Residuals']['wRb'] = min(100.,100.*ma.sqrt(sumwYB2/sumwYmB2))
Histogram['Residuals']['wRmin'] = min(100.,100.*ma.sqrt(Histogram['Residuals']['Nobs']/Histogram['Residuals']['sumwYo']))
if dlg:
dlg.Update(Histogram['Residuals']['wR'],newmsg='For histogram %d Rw=%8.3f%s'%(hId,Histogram['Residuals']['wR'],'%'))[0]
dlg.Raise()
M = np.concatenate((M,wdy))
#end of PWDR processing
elif 'HKLF' in histogram[:4]:
Histogram = Histograms[histogram]
Histogram['Residuals'] = {}
phase = Histogram['Reflection Lists']
Phase = Phases[phase]
hId = Histogram['hId']
hfx = ':%d:'%(hId)
wtFactor = calcControls[hfx+'wtFactor']
pfx = '%d::'%(Phase['pId'])
phfx = '%d:%d:'%(Phase['pId'],hId)
SGData = Phase['General']['SGData']
TwinLaw = calcControls[phfx+'TwinLaw']
im = 0
if parmDict[phfx+'Scale'] < 0.:
parmDict[phfx+'Scale'] = .001
if Phase['General'].get('Modulated',False):
SSGData = Phase['General']['SSGData']
im = 1 #offset in SS reflection list
A = [parmDict[pfx+'A%d'%(i)] for i in range(6)]
G,g = G2lat.A2Gmat(A) #recip & real metric tensors
refDict = Histogram['Data']
if im:
if len(TwinLaw) > 1:
SStructureFactorTw(refDict,G,hfx,pfx,SGData,SSGData,calcControls,parmDict)
else:
SStructureFactor(refDict,G,hfx,pfx,SGData,SSGData,calcControls,parmDict)
else:
StructureFactor2(refDict,G,hfx,pfx,SGData,calcControls,parmDict)
# print 'sf-calc time: %.3f'%(time.time()-time0)
df = np.zeros(len(refDict['RefList']))
sumwYo = 0
sumFo = 0
sumFo2 = 0
sumFc2 = 0
sumdF = 0
sumdF2 = 0
if im:
sumSSFo = np.zeros(10)
sumSSFo2 = np.zeros(10)
sumSSdF = np.zeros(10)
sumSSdF2 = np.zeros(10)
sumSSwYo = np.zeros(10)
sumSSwdf2 = np.zeros(10)
SSnobs = np.zeros(10)
nobs = 0
nrej = 0
next = 0
maxH = 0
if calcControls['F**2']:
for i,ref in enumerate(refDict['RefList']):
if ref[6+im] > 0:
ref[11+im] = SCExtinction(ref,im,phfx,hfx,pfx,calcControls,parmDict,varylist)[0]
w = 1.0/ref[6+im] # 1/sig(F^2)
ref[7+im] *= parmDict[phfx+'Scale']*ref[11+im] #correct Fc^2 for extinction
ref[8+im] = ref[5+im]/(parmDict[phfx+'Scale']*ref[11+im])
if UserRejectHKL(ref,im,calcControls['UsrReject']) and ref[3+im]: #skip sp.gp. absences (mul=0)
ref[3+im] = abs(ref[3+im]) #mark as allowed
Fo = np.sqrt(ref[5+im])
sumFo += Fo
sumFo2 += ref[5+im]
sumFc2 += ref[7+im]
sumdF += abs(Fo-np.sqrt(ref[7+im]))
sumdF2 += abs(ref[5+im]-ref[7+im])
nobs += 1
df[i] = -w*(ref[5+im]-ref[7+im])
sumwYo += (w*ref[5+im])**2 #w*Fo^2
if im: #accumulate super lattice sums
ind = int(abs(ref[3]))
sumSSFo[ind] += Fo
sumSSFo2[ind] += ref[5+im]
sumSSdF[ind] += abs(Fo-np.sqrt(ref[7+im]))
sumSSdF2[ind] += abs(ref[5+im]-ref[7+im])
sumSSwYo[ind] += (w*ref[5+im])**2 #w*Fo^2
sumSSwdf2[ind] += df[i]**2
SSnobs[ind] += 1
maxH = max(maxH,ind)
else:
if ref[3+im]:
ref[3+im] = -abs(ref[3+im]) #mark as rejected
nrej += 1
else: #sp.gp.extinct
next += 1
else:
for i,ref in enumerate(refDict['RefList']):
if ref[5+im] > 0.:
ref[11+im] = SCExtinction(ref,im,phfx,hfx,pfx,calcControls,parmDict,varylist)[0]
ref[7+im] *= parmDict[phfx+'Scale']*ref[11+im] #correct Fc^2 for extinction
ref[8+im] = ref[5+im]/(parmDict[phfx+'Scale']*ref[11+im])
Fo = np.sqrt(ref[5+im])
Fc = np.sqrt(ref[7+im])
w = 2.0*Fo/ref[6+im] # 1/sig(F)?
if UserRejectHKL(ref,im,calcControls['UsrReject']) and ref[3+im]: #skip sp.gp. absences (mul=0)
ref[3+im] = abs(ref[3+im]) #mark as allowed
sumFo += Fo
sumFo2 += ref[5+im]
sumFc2 += ref[7+im]
sumdF += abs(Fo-Fc)
sumdF2 += abs(ref[5+im]-ref[7+im])
nobs += 1
df[i] = -w*(Fo-Fc)
sumwYo += (w*Fo)**2
if im:
ind = int(abs(ref[3]))
sumSSFo[ind] += Fo
sumSSFo2[ind] += ref[5+im]
sumSSdF[ind] += abs(Fo-Fc)
sumSSdF2[ind] += abs(ref[5+im]-ref[7+im])
sumSSwYo[ind] += (w*Fo)**2
sumSSwdf2[ind] += df[i]**2
SSnobs[ind] += 1
maxH = max(maxH,ind)
else:
if ref[3+im]:
ref[3+im] = -abs(ref[3+im]) #mark as rejected
nrej += 1
else: #sp.gp.extinct
next += 1
Scale = sumFo2/sumFc2
if (Scale < 0.8 or Scale > 1.2) and phfx+'Scale' in varylist:
print ('New scale: %.4f'%(Scale*parmDict[phfx+'Scale']))
indx = varylist.index(phfx+'Scale')
values[indx] = Scale*parmDict[phfx+'Scale']
Histogram['Residuals']['Nobs'] = nobs
Histogram['Residuals']['sumwYo'] = sumwYo
SumwYo += sumwYo
Histogram['Residuals']['wR'] = min(100.,np.sqrt(np.sum(df**2)/sumwYo)*100.)
Histogram['Residuals'][phfx+'Rf'] = 100.*sumdF/sumFo
Histogram['Residuals'][phfx+'Rf^2'] = 100.*sumdF2/sumFo2
Histogram['Residuals'][phfx+'Nref'] = nobs
Histogram['Residuals'][phfx+'Nrej'] = nrej
Histogram['Residuals'][phfx+'Next'] = next
if im:
Histogram['Residuals'][phfx+'SSRf'] = 100.*sumSSdF[:maxH+1]/sumSSFo[:maxH+1]
Histogram['Residuals'][phfx+'SSRf^2'] = 100.*sumSSdF2[:maxH+1]/sumSSFo2[:maxH+1]
Histogram['Residuals'][phfx+'SSNref'] = SSnobs[:maxH+1]
Histogram['Residuals']['SSwR'] = np.sqrt(sumSSwdf2[:maxH+1]/sumSSwYo[:maxH+1])*100.
Nobs += nobs
Nrej += nrej
Next += next
if dlg:
dlg.Update(Histogram['Residuals']['wR'],newmsg='For histogram %d Rw=%8.3f%s'%(hId,Histogram['Residuals']['wR'],'%'))[0]
dlg.Raise()
M = np.concatenate((M,wtFactor*df))
# end of HKLF processing
# GetFobsSq(Histograms,Phases,parmDict,calcControls)
Histograms['sumwYo'] = SumwYo
Histograms['Nobs'] = Nobs
Histograms['Nrej'] = Nrej
Histograms['Next'] = Next
Rw = min(100.,np.sqrt(np.sum(M**2)/SumwYo)*100.)
if dlg:
GoOn = dlg.Update(Rw,newmsg='%s%8.3f%s'%('All data Rw =',Rw,'%'))[0]
if not GoOn:
parmDict['saved values'] = values
dlg.Destroy()
raise G2obj.G2Exception('User abort') #Abort!!
pDict,pVals,pWt,pWsum,pWnum = penaltyFxn(HistoPhases,calcControls,parmDict,varylist)
if len(pVals):
pSum = np.sum(pWt*pVals**2)
for name in pWsum:
if pWsum[name]:
print (' Penalty function for %5d %8ss = %12.5g'%(pWnum[name],name,pWsum[name]))
print ('Total penalty function: %12.5g on %d terms'%(pSum,len(pVals)))
Nobs += len(pVals)
M = np.concatenate((M,np.sqrt(pWt)*pVals))
return M
# </ Anton Gagin
from scipy import interpolate
from scipy.interpolate import interp1d
def extrap1d(interpolator):
xs = interpolator.x
ys = interpolator.y
def pointwise(x):
if x < xs[0]:
return ys[0]+(x-xs[0])*(ys[1]-ys[0])/(xs[1]-xs[0])
elif x > xs[-1]:
return ys[-1]+(x-xs[-1])*(ys[-1]-ys[-2])/(xs[-1]-xs[-2])
else:
return interpolator(x)
def ufunclike(xs):
return np.array(map(pointwise, np.array(xs)))
return ufunclike
# repeats code of errRefine
# apart from corrections applied
def errRefine_opt(values, optCor, HistoPhases,parmDict,varylist,calcControls,pawleyLookup,dlg):
'Needs a doc string'
Values2Dict(parmDict, varylist, values)
G2mv.Dict2Map(parmDict,varylist)
Histograms,Phases,restraintDict,rigidbodyDict = HistoPhases
M = np.empty(0)
SumwYo = 0
Nobs = 0
ApplyRBModels(parmDict,Phases,rigidbodyDict)
histoList = Histograms.keys()
histoList.sort()
for histogram in histoList:
if 'PWDR' in histogram[:4]:
Histogram = Histograms[histogram]
hId = Histogram['hId']
hfx = ':%d:'%(hId)
wtFactor = calcControls[hfx+'wtFactor']
Limits = calcControls[hfx+'Limits']
x,y,w,yc,yb,yd = Histogram['Data']
yc *= 0.0 #zero full calcd profiles
yb *= 0.0
yd *= 0.0
xB = np.searchsorted(x,Limits[0])
xF = np.searchsorted(x,Limits[1])+1
yc[xB:xF],yb[xB:xF] = getPowderProfile(parmDict,x[xB:xF],
varylist,Histogram,Phases,calcControls,pawleyLookup)
yc[xB:xF] += yb[xB:xF]
if not np.any(y): #fill dummy data
rv = st.poisson(yc[xB:xF])
y[xB:xF] = rv.rvs()
Z = np.ones_like(yc[xB:xF])
Z[1::2] *= -1
y[xB:xF] = yc[xB:xF]+np.abs(y[xB:xF]-yc[xB:xF])*Z
w[xB:xF] = np.where(y[xB:xF]>0.,1./y[xB:xF],1.0)
#
# This part was changed -->
#
if (bool(optCor)):
x_cor = x[xB:xF] - optCor['dx_opt'][hId]
yc_func = interp1d(x_cor, yc[xB:xF])
yc_func = extrap1d(yc_func)
yc[xB:xF] = yc_func(x[xB:xF])
yc[xB:xF] = np.multiply(optCor['cc_opt'][hId], yc[xB:xF])
# yc[xB:xF] = np.multiply(optCor['cc_opt'][hId], (yc[xB:xF] - yb[xB:xF])) + yb[xB:xF]
yc[xB:xF] = yc[xB:xF] + optCor['bb_opt'][hId]
#
# <--
#
yd[xB:xF] = y[xB:xF]-yc[xB:xF]
W = wtFactor*w
wdy = -ma.sqrt(W[xB:xF])*(yd[xB:xF])
Histogram['Residuals']['Nobs'] = ma.count(x[xB:xF])
Nobs += Histogram['Residuals']['Nobs']
Histogram['Residuals']['sumwYo'] = ma.sum(W[xB:xF]*y[xB:xF]**2)
SumwYo += Histogram['Residuals']['sumwYo']
Histogram['Residuals']['R'] = min(100.,ma.sum(ma.abs(yd[xB:xF]))/ma.sum(y[xB:xF])*100.)
Histogram['Residuals']['wR'] = min(100.,ma.sqrt(ma.sum(wdy**2)/Histogram['Residuals']['sumwYo'])*100.)
sumYmB = ma.sum(ma.where(yc[xB:xF]!=yb[xB:xF],ma.abs(y[xB:xF]-yb[xB:xF]),0.))
sumwYmB2 = ma.sum(ma.where(yc[xB:xF]!=yb[xB:xF],W[xB:xF]*(y[xB:xF]-yb[xB:xF])**2,0.))
sumYB = ma.sum(ma.where(yc[xB:xF]!=yb[xB:xF],ma.abs(y[xB:xF]-yc[xB:xF])*ma.abs(y[xB:xF]-yb[xB:xF])/y[xB:xF],0.))
sumwYB2 = ma.sum(ma.where(yc[xB:xF]!=yb[xB:xF],W[xB:xF]*(ma.abs(y[xB:xF]-yc[xB:xF])*ma.abs(y[xB:xF]-yb[xB:xF])/y[xB:xF])**2,0.))
Histogram['Residuals']['Rb'] = min(100.,100.*sumYB/sumYmB)
Histogram['Residuals']['wRb'] = min(100.,100.*ma.sqrt(sumwYB2/sumwYmB2))
Histogram['Residuals']['wRmin'] = min(100.,100.*ma.sqrt(Histogram['Residuals']['Nobs']/Histogram['Residuals']['sumwYo']))
if dlg:
dlg.Update(Histogram['Residuals']['wR'],newmsg='For histogram %d Rw=%8.3f%s'%(hId,Histogram['Residuals']['wR'],'%'))[0]
M = np.concatenate((M,wdy))
#end of PWDR processing
elif 'HKLF' in histogram[:4]:
Histogram = Histograms[histogram]
Histogram['Residuals'] = {}
phase = Histogram['Reflection Lists']
Phase = Phases[phase]
hId = Histogram['hId']
hfx = ':%d:'%(hId)
wtFactor = calcControls[hfx+'wtFactor']
pfx = '%d::'%(Phase['pId'])
phfx = '%d:%d:'%(Phase['pId'],hId)
SGData = Phase['General']['SGData']
im = 0
if Phase['General']['Type'] in ['modulated','magnetic']:
SSGData = Phase['General']['SSGData']
SSGMT = np.array([ops[0].T for ops in SSGData['SSGOps']])
im = 1 #offset in SS reflection list
#??
A = [parmDict[pfx+'A%d'%(i)] for i in range(6)]
G,g = G2lat.A2Gmat(A) #recip & real metric tensors
refDict = Histogram['Data']
time0 = time.time()
if im:
SStructureFactor(refDict,im,G,hfx,pfx,SGData,SSGData,calcControls,parmDict)
else:
StructureFactor2(refDict,G,hfx,pfx,SGData,calcControls,parmDict)
# StructureFactor2(refDict,G,hfx,pfx,SGData,calcControls,parmDict)
# print 'sf-calc time: %.3f'%(time.time()-time0)
df = np.zeros(len(refDict['RefList']))
sumwYo = 0
sumFo = 0
sumFo2 = 0
sumdF = 0
sumdF2 = 0
nobs = 0
if calcControls['F**2']:
for i,ref in enumerate(refDict['RefList']):
if ref[6+im] > 0:
ref[11+im] = SCExtinction(ref,im,phfx,hfx,pfx,calcControls,parmDict,varylist)[0]
w = 1.0/ref[6+im]
ref[7+im] = parmDict[phfx+'Scale']*ref[9+im]*ref[11+im] #correct Fc^2 for extinction
ref[8+im] = ref[5+im]/(parmDict[phfx+'Scale']*ref[11+im])
if w*ref[5+im] >= calcControls['minF/sig']:
Fo = np.sqrt(ref[5+im])
sumFo += Fo
sumFo2 += ref[5+im]
sumdF += abs(Fo-np.sqrt(ref[7+im]))
sumdF2 += abs(ref[5+im]-ref[7+im])
nobs += 1
df[i] = -w*(ref[5+im]-ref[7+im])
sumwYo += (w*ref[5+im])**2
else:
for i,ref in enumerate(refDict['RefList']):
if ref[5+im] > 0.:
ref[11+im] = SCExtinction(ref,im,phfx,hfx,pfx,calcControls,parmDict,varylist)[0]
ref[7+im] = parmDict[phfx+'Scale']*ref[9+im]*ref[11+im] #correct Fc^2 for extinction
ref[8+im] = ref[5+im]/(parmDict[phfx+'Scale']*ref[11+im])
Fo = np.sqrt(ref[5+im])
Fc = np.sqrt(ref[7+im])
w = 2.0*Fo/ref[6+im]
if w*Fo >= calcControls['minF/sig']:
sumFo += Fo
sumFo2 += ref[5+im]
sumdF += abs(Fo-Fc)
sumdF2 += abs(ref[5+im]-ref[7+im])
nobs += 1
df[i] = -w*(Fo-Fc)
sumwYo += (w*Fo)**2
Histogram['Residuals']['Nobs'] = nobs
Histogram['Residuals']['sumwYo'] = sumwYo
SumwYo += sumwYo
Histogram['Residuals']['wR'] = min(100.,np.sqrt(np.sum(df**2)/Histogram['Residuals']['sumwYo'])*100.)
Histogram['Residuals'][phfx+'Rf'] = 100.*sumdF/sumFo
Histogram['Residuals'][phfx+'Rf^2'] = 100.*sumdF2/sumFo2
Histogram['Residuals'][phfx+'Nref'] = nobs
Nobs += nobs
if dlg:
dlg.Update(Histogram['Residuals']['wR'],newmsg='For histogram %d Rw=%8.3f%s'%(hId,Histogram['Residuals']['wR'],'%'))[0]
M = np.concatenate((M,wtFactor*df))
# end of HKLF processing
Histograms['sumwYo'] = SumwYo
Histograms['Nobs'] = Nobs
Rw = min(100.,np.sqrt(np.sum(M**2)/SumwYo)*100.)
if dlg:
GoOn = dlg.Update(Rw,newmsg='%s%8.3f%s'%('All data Rw =',Rw,'%'))[0]
if not GoOn:
parmDict['saved values'] = values
dlg.Destroy()
raise Exception #Abort!!
pDict,pVals,pWt,pWsum = penaltyFxn(HistoPhases,parmDict,varylist)
if len(pVals):
pSum = np.sum(pWt*pVals**2)
for name in pWsum:
if pWsum:
print (' Penalty function for %8s = %12.5g'%(name,pWsum[name]))
print ('Total penalty function: %12.5g on %d terms'%(pSum,len(pVals)))
Nobs += len(pVals)
M = np.concatenate((M,np.sqrt(pWt)*pVals))
return M
# Anton Gagin />
|
AntonGagin/GSAS_USE
|
patchSystErrors/modifiedOld/GSASIIstrMath.py
|
Python
|
gpl-3.0
| 243,705
|
[
"CRYSTAL",
"Gaussian"
] |
9ab9a1b9cb89c0a43c16fc673f7ead9ea561f11cd96b54c7fb7ebd826533268f
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.