text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Random distribution generator NDArray API of MXNet."""
from ..base import numeric_types, _Null
from ..context import current_context
from . import _internal
from .ndarray import NDArray
__all__ = ['uniform', 'normal', 'randn', 'poisson', 'exponential', 'gamma', 'binomial',
'categorical', 'multinomial', 'negative_binomial', 'generalized_negative_binomial',
'shuffle', 'randint']
def _random_helper(random, sampler, params, shape, dtype, ctx, out, kwargs):
"""Helper function for random generators."""
if isinstance(params[0], NDArray):
for i in params[1:]:
assert isinstance(i, NDArray), \
"Distribution parameters must all have the same type, but got " \
"both %s and %s."%(type(params[0]), type(i))
return sampler(*params, shape=shape, dtype=dtype, out=out, **kwargs)
elif isinstance(params[0], numeric_types):
if ctx is None:
ctx = current_context()
if shape is _Null and out is None:
shape = 1
for i in params[1:]:
assert isinstance(i, numeric_types), \
"Distribution parameters must all have the same type, but got " \
"both %s and %s."%(type(params[0]), type(i))
return random(*params, shape=shape, dtype=dtype, ctx=ctx, out=out, **kwargs)
raise ValueError("Distribution parameters must be either NDArray or numbers, "
"but got %s."%type(params[0]))
def uniform(low=0, high=1, shape=_Null, dtype=_Null, ctx=None, out=None, **kwargs):
"""Draw random samples from a uniform distribution.
Samples are uniformly distributed over the half-open interval *[low, high)*
(includes *low*, but excludes *high*).
Parameters
----------
low : float or NDArray, optional
Lower boundary of the output interval. All values generated will be
greater than or equal to low. The default value is 0.
high : float or NDArray, optional
Upper boundary of the output interval. All values generated will be
less than high. The default value is 1.0.
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `low` and
`high` are scalars, output shape will be `(m, n)`. If `low` and `high`
are NDArrays with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each `[low, high)` pair.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context, optional
Device context of output. Default is current context. Overridden by
`low.context` when `low` is an NDArray.
out : NDArray, optional
Store output to an existing NDArray.
Returns
-------
NDArray
An NDArray of type `dtype`. If input `shape` has shape, e.g.,
`(m, n)` and `low` and `high` are scalars, output shape will be `(m, n)`.
If `low` and `high` are NDArrays with shape, e.g., `(x, y)`, then the
return NDArray will have shape `(x, y, m, n)`, where `m*n` uniformly distributed
samples are drawn for each `[low, high)` pair.
Examples
--------
>>> mx.nd.random.uniform(0, 1)
[ 0.54881352]
<NDArray 1 @cpu(0)
>>> mx.nd.random.uniform(0, 1, ctx=mx.gpu(0))
[ 0.92514056]
<NDArray 1 @gpu(0)>
>>> mx.nd.random.uniform(-1, 1, shape=(2,))
[ 0.71589124 0.08976638]
<NDArray 2 @cpu(0)>
>>> low = mx.nd.array([1,2,3])
>>> high = mx.nd.array([2,3,4])
>>> mx.nd.random.uniform(low, high, shape=2)
[[ 1.78653979 1.93707538]
[ 2.01311183 2.37081361]
[ 3.30491424 3.69977832]]
<NDArray 3x2 @cpu(0)>
"""
return _random_helper(_internal._random_uniform, _internal._sample_uniform,
[low, high], shape, dtype, ctx, out, kwargs)
def normal(loc=0, scale=1, shape=_Null, dtype=_Null, ctx=None, out=None, **kwargs):
"""Draw random samples from a normal (Gaussian) distribution.
Samples are distributed according to a normal distribution parametrized
by *loc* (mean) and *scale* (standard deviation).
Parameters
----------
loc : float or NDArray, optional
Mean (centre) of the distribution.
scale : float or NDArray, optional
Standard deviation (spread or width) of the distribution.
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `loc` and
`scale` are scalars, output shape will be `(m, n)`. If `loc` and `scale`
are NDArrays with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each `[loc, scale)` pair.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context, optional
Device context of output. Default is current context. Overridden by
`loc.context` when `loc` is an NDArray.
out : NDArray, optional
Store output to an existing NDArray.
Returns
-------
NDArray
An NDArray of type `dtype`. If input `shape` has shape, e.g., `(m, n)` and
`loc` and `scale` are scalars, output shape will be `(m, n)`. If `loc` and
`scale` are NDArrays with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each `[loc, scale)` pair.
Examples
--------
>>> mx.nd.random.normal(0, 1)
[ 2.21220636]
<NDArray 1 @cpu(0)>
>>> mx.nd.random.normal(0, 1, ctx=mx.gpu(0))
[ 0.29253659]
<NDArray 1 @gpu(0)>
>>> mx.nd.random.normal(-1, 1, shape=(2,))
[-0.2259962 -0.51619542]
<NDArray 2 @cpu(0)>
>>> loc = mx.nd.array([1,2,3])
>>> scale = mx.nd.array([2,3,4])
>>> mx.nd.random.normal(loc, scale, shape=2)
[[ 0.55912292 3.19566321]
[ 1.91728961 2.47706747]
[ 2.79666662 5.44254589]]
<NDArray 3x2 @cpu(0)>
"""
return _random_helper(_internal._random_normal, _internal._sample_normal,
[loc, scale], shape, dtype, ctx, out, kwargs)
def randn(*shape, **kwargs):
"""Draw random samples from a normal (Gaussian) distribution.
Samples are distributed according to a normal distribution parametrized
by *loc* (mean) and *scale* (standard deviation).
Parameters
----------
loc : float or NDArray
Mean (centre) of the distribution.
scale : float or NDArray
Standard deviation (spread or width) of the distribution.
shape : int or tuple of ints
The number of samples to draw. If shape is, e.g., `(m, n)` and `loc` and
`scale` are scalars, output shape will be `(m, n)`. If `loc` and `scale`
are NDArrays with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each `[loc, scale)` pair.
dtype : {'float16', 'float32', 'float64'}
Data type of output samples. Default is 'float32'
ctx : Context
Device context of output. Default is current context. Overridden by
`loc.context` when `loc` is an NDArray.
out : NDArray
Store output to an existing NDArray.
Returns
-------
NDArray
If input `shape` has shape, e.g., `(m, n)` and `loc` and `scale` are scalars, output
shape will be `(m, n)`. If `loc` and `scale` are NDArrays with shape, e.g., `(x, y)`,
then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for
each `[loc, scale)` pair.
Examples
--------
>>> mx.nd.random.randn()
2.21220636
<NDArray 1 @cpu(0)>
>>> mx.nd.random.randn(2, 2)
[[-1.856082 -1.9768796 ]
[-0.20801921 0.2444218 ]]
<NDArray 2x2 @cpu(0)>
>>> mx.nd.random.randn(2, 3, loc=5, scale=1)
[[4.19962 4.8311777 5.936328 ]
[5.357444 5.7793283 3.9896927]]
<NDArray 2x3 @cpu(0)>
"""
loc = kwargs.pop('loc', 0)
scale = kwargs.pop('scale', 1)
dtype = kwargs.pop('dtype', _Null)
ctx = kwargs.pop('ctx', None)
out = kwargs.pop('out', None)
assert isinstance(loc, (int, float, NDArray))
assert isinstance(scale, (int, float, NDArray))
return _random_helper(_internal._random_normal, _internal._sample_normal,
[loc, scale], shape, dtype, ctx, out, kwargs)
def poisson(lam=1, shape=_Null, dtype=_Null, ctx=None, out=None, **kwargs):
"""Draw random samples from a Poisson distribution.
Samples are distributed according to a Poisson distribution parametrized
by *lambda* (rate). Samples will always be returned as a floating point data type.
Parameters
----------
lam : float or NDArray, optional
Expectation of interval, should be >= 0.
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `lam` is
a scalar, output shape will be `(m, n)`. If `lam`
is an NDArray with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each entry in `lam`.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context, optional
Device context of output. Default is current context. Overridden by
`lam.context` when `lam` is an NDArray.
out : NDArray, optional
Store output to an existing NDArray.
Returns
-------
NDArray
If input `shape` has shape, e.g., `(m, n)` and `lam` is
a scalar, output shape will be `(m, n)`. If `lam`
is an NDArray with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each entry in `lam`.
Examples
--------
>>> mx.nd.random.poisson(1)
[ 1.]
<NDArray 1 @cpu(0)>
>>> mx.nd.random.poisson(1, shape=(2,))
[ 0. 2.]
<NDArray 2 @cpu(0)>
>>> lam = mx.nd.array([1,2,3])
>>> mx.nd.random.poisson(lam, shape=2)
[[ 1. 3.]
[ 3. 2.]
[ 2. 3.]]
<NDArray 3x2 @cpu(0)>
"""
return _random_helper(_internal._random_poisson, _internal._sample_poisson,
[lam], shape, dtype, ctx, out, kwargs)
def exponential(scale=1, shape=_Null, dtype=_Null, ctx=None, out=None, **kwargs):
r"""Draw samples from an exponential distribution.
Its probability density function is
.. math:: f(x; \frac{1}{\beta}) = \frac{1}{\beta} \exp(-\frac{x}{\beta}),
for x > 0 and 0 elsewhere. \beta is the scale parameter, which is the
inverse of the rate parameter \lambda = 1/\beta.
Parameters
----------
scale : float or NDArray, optional
The scale parameter, \beta = 1/\lambda.
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `scale` is
a scalar, output shape will be `(m, n)`. If `scale`
is an NDArray with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each entry in `scale`.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context, optional
Device context of output. Default is current context. Overridden by
`scale.context` when `scale` is an NDArray.
out : NDArray, optional
Store output to an existing NDArray.
Returns
-------
NDArray
If input `shape` has shape, e.g., `(m, n)` and `scale` is a scalar, output shape will
be `(m, n)`. If `scale` is an NDArray with shape, e.g., `(x, y)`, then `output`
will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each entry in scale.
Examples
--------
>>> mx.nd.random.exponential(1)
[ 0.79587454]
<NDArray 1 @cpu(0)>
>>> mx.nd.random.exponential(1, shape=(2,))
[ 0.89856035 1.25593066]
<NDArray 2 @cpu(0)>
>>> scale = mx.nd.array([1,2,3])
>>> mx.nd.random.exponential(scale, shape=2)
[[ 0.41063145 0.42140478]
[ 2.59407091 10.12439728]
[ 2.42544937 1.14260709]]
<NDArray 3x2 @cpu(0)>
"""
return _random_helper(_internal._random_exponential, _internal._sample_exponential,
[1.0/scale], shape, dtype, ctx, out, kwargs)
def gamma(alpha=1, beta=1, shape=_Null, dtype=_Null, ctx=None, out=None, **kwargs):
"""Draw random samples from a gamma distribution.
Samples are distributed according to a gamma distribution parametrized
by *alpha* (shape) and *beta* (scale).
Parameters
----------
alpha : float or NDArray, optional
The shape of the gamma distribution. Should be greater than zero.
beta : float or NDArray, optional
The scale of the gamma distribution. Should be greater than zero.
Default is equal to 1.
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `alpha` and
`beta` are scalars, output shape will be `(m, n)`. If `alpha` and `beta`
are NDArrays with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each `[alpha, beta)` pair.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context, optional
Device context of output. Default is current context. Overridden by
`alpha.context` when `alpha` is an NDArray.
out : NDArray, optional
Store output to an existing NDArray.
Returns
-------
NDArray
If input `shape` has shape, e.g., `(m, n)` and `alpha` and `beta` are scalars, output
shape will be `(m, n)`. If `alpha` and `beta` are NDArrays with shape, e.g.,
`(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are
drawn for each `[alpha, beta)` pair.
Examples
--------
>>> mx.nd.random.gamma(1, 1)
[ 1.93308783]
<NDArray 1 @cpu(0)>
>>> mx.nd.random.gamma(1, 1, shape=(2,))
[ 0.48216391 2.09890771]
<NDArray 2 @cpu(0)>
>>> alpha = mx.nd.array([1,2,3])
>>> beta = mx.nd.array([2,3,4])
>>> mx.nd.random.gamma(alpha, beta, shape=2)
[[ 3.24343276 0.94137681]
[ 3.52734375 0.45568955]
[ 14.26264095 14.0170126 ]]
<NDArray 3x2 @cpu(0)>
"""
return _random_helper(_internal._random_gamma, _internal._sample_gamma,
[alpha, beta], shape, dtype, ctx, out, kwargs)
def binomial(n=1, p=0.5, shape=_Null, dtype=_Null, ctx=None, out=None, **kwargs):
"""Draw random samples from a binomial distribution.
Samples are distributed according to a binomial distribution parametrized
by *n* (number of trials) and *p* (success probability).
Parameters
----------
n : float or NDArray, optional
Number of experiments, > 0.
p : float or NDArray, optional
Success probability in each experiment, >= 0 and <= 1.
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `n` and
`p` are scalars, output shape will be `(m, n)`. If `n` and `p`
are NDArrays with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each `[n, p)` pair.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context, optional
Device context of output. Default is current context. Overridden by
`n.context` when `n` is an NDArray.
out : NDArray, optional
Store output to an existing NDArray.
Returns
-------
NDArray
If input `shape` has shape, e.g., `(m, n)` and `n` and `p` are scalars, output
shape will be `(m, n)`. If `n` and `p` are NDArrays with shape, e.g.,
`(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are
drawn for each `[n, p)` pair.
Examples
--------
>>> mx.nd.random.binomial(10, 0.1)
[ 1.]
<NDArray 1 @cpu(0)>
>>> mx.nd.random.binomial(10, 0.6, shape=(2,))
[ 4. 6.]
<NDArray 2 @cpu(0)>
>>> n = mx.nd.array([10,2,3])
>>> p = mx.nd.array([0.2,0.3,0.4])
>>> mx.nd.random.binomial(n, p, shape=2)
[[ 1. 4.]
[ 0. 2.]
[ 1. 1.]]
<NDArray 3x2 @cpu(0)>
"""
return _random_helper(_internal._random_binomial, _internal._sample_binomial,
[n, p], shape, dtype, ctx, out, kwargs)
def negative_binomial(k=1, p=1, shape=_Null, dtype=_Null, ctx=None,
out=None, **kwargs):
"""Draw random samples from a negative binomial distribution.
Samples are distributed according to a negative binomial distribution
parametrized by *k* (limit of unsuccessful experiments) and *p* (failure
probability in each experiment). Samples will always be returned as a
floating point data type.
Parameters
----------
k : float or NDArray, optional
Limit of unsuccessful experiments, > 0.
p : float or NDArray, optional
Failure probability in each experiment, >= 0 and <=1.
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `k` and
`p` are scalars, output shape will be `(m, n)`. If `k` and `p`
are NDArrays with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each `[k, p)` pair.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context, optional
Device context of output. Default is current context. Overridden by
`k.context` when `k` is an NDArray.
out : NDArray, optional
Store output to an existing NDArray.
Returns
-------
NDArray
If input `shape` has shape, e.g., `(m, n)` and `k` and `p` are scalars, output shape
will be `(m, n)`. If `k` and `p` are NDArrays with shape, e.g., `(x, y)`, then
output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[k, p)` pair.
Examples
--------
>>> mx.nd.random.negative_binomial(10, 0.5)
[ 4.]
<NDArray 1 @cpu(0)>
>>> mx.nd.random.negative_binomial(10, 0.5, shape=(2,))
[ 3. 4.]
<NDArray 2 @cpu(0)>
>>> k = mx.nd.array([1,2,3])
>>> p = mx.nd.array([0.2,0.4,0.6])
>>> mx.nd.random.negative_binomial(k, p, shape=2)
[[ 3. 2.]
[ 4. 4.]
[ 0. 5.]]
<NDArray 3x2 @cpu(0)>
"""
return _random_helper(_internal._random_negative_binomial,
_internal._sample_negative_binomial,
[k, p], shape, dtype, ctx, out, kwargs)
def generalized_negative_binomial(mu=1, alpha=1, shape=_Null, dtype=_Null, ctx=None,
out=None, **kwargs):
"""Draw random samples from a generalized negative binomial distribution.
Samples are distributed according to a generalized negative binomial
distribution parametrized by *mu* (mean) and *alpha* (dispersion).
*alpha* is defined as *1/k* where *k* is the failure limit of the
number of unsuccessful experiments (generalized to real numbers).
Samples will always be returned as a floating point data type.
Parameters
----------
mu : float or NDArray, optional
Mean of the negative binomial distribution.
alpha : float or NDArray, optional
Alpha (dispersion) parameter of the negative binomial distribution.
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `mu` and
`alpha` are scalars, output shape will be `(m, n)`. If `mu` and `alpha`
are NDArrays with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each `[mu, alpha)` pair.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context, optional
Device context of output. Default is current context. Overridden by
`mu.context` when `mu` is an NDArray.
out : NDArray, optional
Store output to an existing NDArray.
Returns
-------
NDArray
If input `shape` has shape, e.g., `(m, n)` and `mu` and `alpha` are scalars, output
shape will be `(m, n)`. If `mu` and `alpha` are NDArrays with shape, e.g., `(x, y)`,
then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for
each `[mu, alpha)` pair.
Examples
--------
>>> mx.nd.random.generalized_negative_binomial(10, 0.5)
[ 19.]
<NDArray 1 @cpu(0)>
>>> mx.nd.random.generalized_negative_binomial(10, 0.5, shape=(2,))
[ 30. 21.]
<NDArray 2 @cpu(0)>
>>> mu = mx.nd.array([1,2,3])
>>> alpha = mx.nd.array([0.2,0.4,0.6])
>>> mx.nd.random.generalized_negative_binomial(mu, alpha, shape=2)
[[ 4. 0.]
[ 3. 2.]
[ 6. 2.]]
<NDArray 3x2 @cpu(0)>
"""
return _random_helper(_internal._random_generalized_negative_binomial,
_internal._sample_generalized_negative_binomial,
[mu, alpha], shape, dtype, ctx, out, kwargs)
def categorical(data, shape=_Null, get_prob=False, out=None, dtype='int32', **kwargs):
"""Concurrent sampling from multiple categorical distributions.
.. note:: The input distribution must be normalized, i.e. `data` must sum to
1 along its last dimension.
Parameters
----------
data : NDArray
An *n* dimensional array whose last dimension has length `k`, where
`k` is the number of possible outcomes of each categorical distribution.
For example, data with shape `(m, n, k)` specifies `m*n` categorical
distributions each with `k` possible outcomes.
shape : int or tuple of ints, optional
The number of samples to draw from each distribution. If shape is empty
one sample will be drawn from each distribution.
get_prob : bool, optional
If true, a second array containing log likelihood of the drawn
samples will also be returned.
This is usually used for reinforcement learning, where you can provide
reward as head gradient w.r.t. this array to estimate gradient.
out : NDArray, optional
Store output to an existing NDArray.
dtype : str or numpy.dtype, optional
Data type of the sample output array. The default is int32.
Note that the data type of the log likelihood array is the same with that of `data`.
Returns
-------
List, or NDArray
For input `data` with `n` dimensions and shape `(d1, d2, ..., dn-1, k)`, and input
`shape` with shape `(s1, s2, ..., sx)`, returns an NDArray with shape
`(d1, d2, ... dn-1, s1, s2, ..., sx)`. The `s1, s2, ... sx` dimensions of the
returned NDArray consist of 0-indexed values sampled from each respective categorical
distribution provided in the `k` dimension of `data`.
For the case `n`=1, and `x`=1 (one shape dimension), returned NDArray has shape `(s1,)`.
If `get_prob` is set to True, this function returns a list of format:
`[ndarray_output, log_likelihood_output]`, where `log_likelihood_output` is an NDArray of the
same shape as the sampled outputs.
Examples
--------
>>> probs = mx.nd.array([0, 0.1, 0.2, 0.3, 0.4])
>>> mx.nd.random.categorical(probs)
[3]
<NDArray 1 @cpu(0)>
>>> probs = mx.nd.array([[0, 0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1, 0]])
>>> mx.nd.random.categorical(probs)
[3 1]
<NDArray 2 @cpu(0)>
>>> mx.nd.random.categorical(probs, shape=2)
[[4 4]
[1 2]]
<NDArray 2x2 @cpu(0)>
>>> mx.nd.random.categorical(probs, get_prob=True)
[3 2]
<NDArray 2 @cpu(0)>
[-1.20397282 -1.60943794]
<NDArray 2 @cpu(0)>
"""
return _internal._sample_categorical(data, shape, get_prob, out=out, dtype=dtype, **kwargs)
def multinomial(n=[1], p=[[1.0]], shape=_Null, dtype='float32', ctx=None, out=None, **kwargs):
"""Concurrent sampling from multiple multinomial distributions.
.. note:: The input distribution must be normalized, i.e. `p` must sum to
1 along its last dimension.
Parameters
----------
n : NDArray
An *n* dimensional array containing the number of trials of each
multinomial distribution.
p : NDArray
An *n+1* dimensional array containing the probabilities of each multinomial
distribution. Its last dimension has length `k`, where `k` is the number
of possible outcomes of each multinomial distribution.
For example, p with shape `(m, n, k)` specifies `m*n` multinomial
distributions each with `k` possible outcomes.
shape : int or tuple of ints, optional
The number of samples to draw from each distribution. If shape is empty
one sample will be drawn from each distribution.
out : NDArray, optional
Store output to an existing NDArray.
ctx : Context, optional
Device context of output. Default is current context. Overridden by
`n.context` when `n` is an NDArray.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
Returns
-------
NDArray
If input `shape` has shape, e.g., `(m, n)` and `n` and `p` are a scalar and an array of length k
respectively, output shape will be `(m, n, k)`. If `n` and `p` are NDArrays with shape, e.g.,
`(x, y)` and `(x, y, k)`, then output will have shape `(x, y, m, n, k)`, where `m*n`
samples are drawn for each `[n, p)` pair.
Examples
--------
>>> mx.nd.random.multinomial(mx.nd.array([10]), mx.nd.array([[0.1, 0.9]]))
[[ 1. 9.]]
<NDArray 1x2 @cpu(0)>
>>> mx.nd.random.multinomial(mx.nd.array([10]), mx.nd.array([[0.6, 0.4]]), shape=(2,))
[[[ 5. 5.]
[ 6. 4.]]]
<NDArray 1x2x2 @cpu(0)>
>>> n = mx.nd.array([10, 2, 3])
>>> p = mx.nd.array([[0.2, 0.8], [0.3, 0.7], [0.4, 0.6]])
>>> mx.nd.random.binomial(n, p)
[[ 2. 8.]
[ 1. 1.]
[ 1. 2.]]
<NDArray 3x2 @cpu(0)>
"""
return _internal._sample_multinomial(n, p, shape=shape, out=out, ctx=ctx, dtype=dtype, **kwargs)
def shuffle(data, **kwargs):
"""Shuffle the elements randomly.
This shuffles the array along the first axis.
The order of the elements in each subarray does not change.
For example, if a 2D array is given, the order of the rows randomly changes,
but the order of the elements in each row does not change.
Parameters
----------
data : NDArray
Input data array.
out : NDArray, optional
Array to store the result.
Returns
-------
NDArray
A new NDArray with the same shape and type as input `data`, but
with items in the first axis of the returned NDArray shuffled randomly.
The original input `data` is not modified.
Examples
--------
>>> data = mx.nd.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
>>> mx.nd.random.shuffle(data)
[[ 0. 1. 2.]
[ 6. 7. 8.]
[ 3. 4. 5.]]
<NDArray 2x3 @cpu(0)>
>>> mx.nd.random.shuffle(data)
[[ 3. 4. 5.]
[ 0. 1. 2.]
[ 6. 7. 8.]]
<NDArray 2x3 @cpu(0)>
"""
return _internal._shuffle(data, **kwargs)
def randint(low, high, shape=_Null, dtype=_Null, ctx=None, out=None, **kwargs):
"""Draw random samples from a discrete uniform distribution.
Samples are uniformly distributed over the half-open interval *[low, high)*
(includes *low*, but excludes *high*).
Parameters
----------
low : int, required
Lower boundary of the output interval. All values generated will be
greater than or equal to low.
high : int, required
Upper boundary of the output interval. All values generated will be
less than high.
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `low` and
`high` are scalars, output shape will be `(m, n)`.
dtype : {'int32', 'int64'}, optional
Data type of output samples. Default is 'int32'
ctx : Context, optional
Device context of output. Default is current context. Overridden by
`low.context` when `low` is an NDArray.
out : NDArray, optional
Store output to an existing NDArray.
Returns
-------
NDArray
An NDArray of type `dtype`. If input `shape` has shape, e.g.,
`(m, n)`, the returned NDArray will shape will be `(m, n)`. Contents
of the returned NDArray will be samples from the interval `[low, high)`.
Examples
--------
>>> mx.nd.random.randint(5, 100)
[ 90]
<NDArray 1 @cpu(0)
>>> mx.nd.random.randint(-10, 2, ctx=mx.gpu(0))
[ -8]
<NDArray 1 @gpu(0)>
>>> mx.nd.random.randint(-10, 10, shape=(2,))
[ -5 4]
<NDArray 2 @cpu(0)>
"""
return _random_helper(_internal._random_randint, None,
[low, high], shape, dtype, ctx, out, kwargs)
|
szha/mxnet
|
python/mxnet/ndarray/random.py
|
Python
|
apache-2.0
| 30,169
|
[
"Gaussian"
] |
8faef5cc4fc9ca8f13556f84c80bf2f1fe1adf67792c29da769a58f8d10ed24a
|
import sympy
from sympy.functions import DiracDelta, Heaviside
from sympy.solvers import solve
def change_mul(node,x):
"""change_mul(node,x)
Rearranges the operands of a product, bringing to front any simple
DiracDelta expression.
If no simple DiracDelta expression was found, then all the DiracDelta
expressions are simplified (using DiracDelta.simplify).
Return: (dirac,nnode)
Where:
dirac is a simple DiracDelta expression. None if no simple expression has been found
nnode is a new node where all the DiracDelta expressions where simplified,
and finally the node was expanded. if nnode is None, means that no DiracDelta expression
could be simplified
Examples
--------
>>change_mul(x*y*DiracDelta(x)*cos(x),x)
(DiracDelta(x),x*y*cos(x))
>>change_mul(x*y*DiracDelta(x**2-1)*cos(x),x)
(None,x*y*cos(x),x*y*DiracDelta(1 + x)*cos(x)/2 + x*y*DiracDelta(-1 + x)*cos(x)/2)
>>change_mul(x*y*DiracDelta(cos(x))*cos(x),x)
(None,None)
"""
if not node.is_Mul:
return node
new_args = []
dirac = None
for arg in node.args:
if arg.func == DiracDelta and arg.is_simple(x) \
and (len(arg.args) <= 1 or arg.args[1]==0):
dirac = arg
else:
new_args.append(change_mul(arg,x))
if not dirac:#we didn't find any simple dirac
new_args = []
for arg in node.args:
if arg.func == DiracDelta:
new_args.append(arg.simplify(x))
else:
new_args.append(change_mul(arg,x))
if tuple(new_args) != node.args:
nnode = node.__class__(*new_args).expand()
else:#if the node didn't change there is nothing to do
nnode = None
return (None, nnode)
return (dirac, node.func(*new_args))
def deltaintegrate(f, x):
"""The idea for integration is the following:
-If we are dealing with a DiracDelta expression, i.e.:
DiracDelta(g(x)), we try to simplify it.
If we could simplify it, then we integrate the resulting expression.
We already know we can integrate a simplified expression, because only
simple DiracDelta expressions are involved.
If we couldn't simplify it, there are two cases:
1) The expression is a simple expression, then we return the integral
Taking care if we are dealing with a Derivative or with a proper DiracDelta
2) The expression is not simple(i.e. DiracDelta(cos(x))), we can do nothing at all
-If the node is a multiplication node having a DiracDelta term
First we expand it.
If the expansion did work, the we try to integrate the expansion
If not, we try to extract a simple DiracDelta term, then we have two cases
1)We have a simple DiracDelta term, so we return the integral
2)We didn't have a simple term, but we do have an expression with simplified
DiracDelta terms, so we integrate this expression
"""
if not f.has(DiracDelta):
return None
# g(x) = DiracDelta(h(x))
if f.func == DiracDelta:
h = f.simplify(x)
if h == f:#can't simplify the expression
#FIXME: the second term tells whether is DeltaDirac or Derivative
#For integrating derivatives of DiracDelta we need the chain rule
if f.is_simple(x):
if (len(f.args) <= 1 or f.args[1]==0):
return Heaviside(f.args[0])
else:
return (DiracDelta(f.args[0],f.args[1]-1)/ f.args[0].as_poly().LC())
else:#let's try to integrate the simplified expression
fh = sympy.integrals.integrate(h,x)
return fh
elif f.is_Mul: #g(x)=a*b*c*f(DiracDelta(h(x)))*d*e
g = f.expand()
if f != g:#the expansion worked
fh = sympy.integrals.integrate(g,x)
if fh and not isinstance(fh,sympy.integrals.Integral):
return fh
else:#no expansion performed, try to extract a simple DiracDelta term
dg, rest_mult = change_mul(f,x)
if not dg:
if rest_mult:
fh = sympy.integrals.integrate(rest_mult,x)
return fh
else:
point = solve(dg.args[0],x)[0]
return (rest_mult.subs(x,point)*Heaviside(dg.args[0]))
return None
|
mattpap/sympy-polys
|
sympy/integrals/deltafunctions.py
|
Python
|
bsd-3-clause
| 4,414
|
[
"DIRAC"
] |
f602c328652087f9ee634dfaf47269cf58cdc3b2f2ca7f6eb0dcb3065f24f311
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module defines the events signaled by abinit during the execution. It also
provides a parser to extract these events form the main output file and the log file.
"""
import sys
import os.path
import datetime
import collections
import ruamel.yaml as yaml
import abc
import logging
import numpy as np
from monty.string import indent, is_string, list_strings
from monty.fnmatch import WildCard
from monty.termcolor import colored
from monty.inspect import all_subclasses
from monty.json import MontyDecoder
from pymatgen.core.structure import Structure
from monty.json import MSONable
from pymatgen.util.serialization import pmg_serialize
from .abiinspect import YamlTokenizer
logger = logging.getLogger(__name__)
__all__ = [
"EventsParser",
"get_event_handler_classes",
"ScfConvergenceWarning",
"NscfConvergenceWarning",
"RelaxConvergenceWarning",
"Correction",
"DilatmxError",
"DilatmxErrorHandler",
]
def straceback():
"""Returns a string with the traceback."""
import traceback
return traceback.format_exc()
class AbinitEvent(yaml.YAMLObject):
"""
Example (YAML syntax)::
Normal warning without any handler:
--- !Warning
message: |
This is a normal warning that won't
trigger any handler in the python code!
src_file: routine_name
src_line: 112
...
Critical warning that will trigger some action in the python code.
--- !ScfConvergeWarning
message: |
The human-readable message goes here!
src_file: foo.F90
src_line: 112
tolname: tolwfr
actual_tol: 1.0e-8
required_tol: 1.0e-10
nstep: 50
...
The algorithm to extract the YAML sections is very simple.
1) We use YamlTokenizer to extract the documents from the output file
2) If we have a tag that ends with "Warning", "Error", "Bug", "Comment
we know we have encountered a new ABINIT event
3) We parse the document with yaml.safe_load(doc.text) and we get the object
Note that:
# --- and ... become reserved words (whey they are placed at
the begining of a line) since they are used to mark the beginning and
the end of YAML documents.
# All the possible events should subclass `AbinitEvent` and define
the class attribute yaml_tag so that yaml.safe_load will know how to
build the instance.
"""
color = None
def __init__(self, src_file, src_line, message):
"""
Basic constructor for :class:`AbinitEvent`.
Args:
message: String with human-readable message providing info on the event.
src_file: String with the name of the Fortran file where the event is raised.
src_line Integer giving the line number in src_file.
"""
#print("src_file", src_file, "src_line", src_line)
self.message = message
self.src_file = src_file
self.src_line = src_line
@pmg_serialize
def as_dict(self):
# This is needed because the events printed in the main output file do not define scr_file and src_line
src_file = getattr(self, "src_file", "Unknown")
src_line = getattr(self, "src_line", 0)
return dict(message=self.message, src_file=src_file, src_line=src_line, yaml_tag=self.yaml_tag)
@classmethod
def from_dict(cls, d):
cls = as_event_class(d.get("yaml_tag"))
return cls(**{k: v for k, v in d.items() if k != "yaml_tag" and not k.startswith("@")})
@property
def header(self):
try:
return "<%s at %s:%s>" % (self.name, self.src_file, self.src_line)
except AttributeError:
# This is needed because the events printed in the main output file do not define scr_file and src_line
return "<%s at %s:%s>" % (self.name, "Unknown", 0)
def __repr__(self):
return self.header
def __str__(self):
return "\n".join((self.header, self.message))
def __eq__(self, other):
if other is None: return False
return self.message == other.message
def __ne__(self, other):
return not self.__eq__(other)
@property
def name(self):
"""Name of the event (class name)"""
return self.__class__.__name__
@property
def baseclass(self):
"""The baseclass of self."""
for cls in _BASE_CLASSES:
if isinstance(self, cls):
return cls
raise ValueError("Cannot determine the base class of %s" % self.__class__.__name__)
def correct(self, task):
"""
This method is called when an error is detected in a :class:`Task`
It should perform any corrective measures relating to the detected error.
The idea is similar to the one used in custodian but the handler receives
a :class:`Task` object so that we have access to its methods.
Returns:
(dict) JSON serializable dict that describes the errors and actions taken. E.g.
{"errors": list_of_errors, "actions": list_of_actions_taken}.
If this is an unfixable error, actions should be set to None.
"""
return 0
class AbinitComment(AbinitEvent):
"""Base class for Comment events"""
yaml_tag = '!COMMENT'
color = "blue"
class AbinitError(AbinitEvent):
"""Base class for Error events"""
yaml_tag = '!ERROR'
color = "red"
class AbinitYamlError(AbinitError):
"""
Raised if the YAML parser cannot parse the document and the doc tag is an Error.
It's an AbinitError because the msg produced by the code is not valid YAML!
"""
class AbinitBug(AbinitEvent):
"""Base class for Bug events"""
yaml_tag = '!BUG'
color = "red"
class AbinitWarning(AbinitEvent):
"""
Base class for Warning events (the most important class).
Developers should subclass this class to define the different exceptions
raised by the code and the possible actions that can be performed.
"""
yaml_tag = '!WARNING'
color = "magenta"
class AbinitCriticalWarning(AbinitWarning):
color = "red"
class AbinitYamlWarning(AbinitCriticalWarning):
"""
Raised if the YAML parser cannot parse the document and the doc tas is a Warning.
"""
###############################
# Warnings triggering restart #
###############################
class ScfConvergenceWarning(AbinitCriticalWarning):
"""Warning raised when the GS SCF cycle did not converge."""
yaml_tag = '!ScfConvergenceWarning'
class NscfConvergenceWarning(AbinitCriticalWarning):
"""Warning raised when the GS NSCF cycle did not converge."""
yaml_tag = '!NscfConvergenceWarning'
class RelaxConvergenceWarning(AbinitCriticalWarning):
"""Warning raised when the structural relaxation did not converge."""
yaml_tag = '!RelaxConvergenceWarning'
# TODO: for the time being we don't discern between GS and PhononCalculations.
#class PhononConvergenceWarning(AbinitCriticalWarning):
# """Warning raised when the phonon calculation did not converge."""
# yaml_tag = u'!PhononConvergenceWarning'
class QPSConvergenceWarning(AbinitCriticalWarning):
"""Warning raised when the QPS iteration (GW) did not converge."""
yaml_tag = '!QPSConvergenceWarning'
class HaydockConvergenceWarning(AbinitCriticalWarning):
"""Warning raised when the Haydock method (BSE) did not converge."""
yaml_tag = '!HaydockConvergenceWarning'
# Error classes providing a correct method.
# Register the concrete base classes.
_BASE_CLASSES = [
AbinitComment,
AbinitError,
AbinitBug,
AbinitWarning,
]
class EventReport(collections.abc.Iterable, MSONable):
"""
Iterable storing the events raised by an ABINIT calculation.
Attributes::
stat: information about a file as returned by os.stat
"""
def __init__(self, filename, events=None):
"""
List of ABINIT events.
Args:
filename: Name of the file
events: List of Event objects
"""
self.filename = os.path.abspath(filename)
self.stat = os.stat(self.filename)
self.start_datetime, self.end_datetime = None, None
self._events = []
self._events_by_baseclass = collections.defaultdict(list)
if events is not None:
for ev in events:
self.append(ev)
def __len__(self):
return len(self._events)
def __iter__(self):
return self._events.__iter__()
def __getitem__(self, slice):
return self._events[slice]
def __str__(self):
#has_colours = stream_has_colours(stream)
has_colours = True
lines = []
app = lines.append
app("Events found in %s\n" % self.filename)
for i, event in enumerate(self):
if has_colours:
app("[%d] %s" % (i+1, colored(event.header, color=event.color)))
app(indent(event.message, 4))
else:
app("[%d] %s" % (i+1, str(event)))
app("num_errors: %s, num_warnings: %s, num_comments: %s, completed: %s\n" % (
self.num_errors, self.num_warnings, self.num_comments, self.run_completed))
return "\n".join(lines)
def append(self, event):
"""Add an event to the list."""
self._events.append(event)
self._events_by_baseclass[event.baseclass].append(event)
def set_run_completed(self, boolean, start_datetime, end_datetime):
"""Set the value of _run_completed."""
self._run_completed = boolean
if (start_datetime, end_datetime) != (None, None):
# start_datetime: Sat Feb 28 23:54:27 2015
# end_datetime: Sat Feb 28 23:54:30 2015
try:
fmt = "%a %b %d %H:%M:%S %Y"
self.start_datetime = datetime.datetime.strptime(start_datetime, fmt)
self.end_datetime = datetime.datetime.strptime(end_datetime, fmt)
except Exception as exc:
# Maybe LOCALE != en_US
logger.warning(str(exc))
@property
def run_etime(self):
"""Wall-time of the run as `timedelta` object."""
if self.start_datetime is None or self.end_datetime is None:
return None
return self.end_datetime - self.start_datetime
@property
def run_completed(self):
"""True if the calculation terminated."""
try:
return self._run_completed
except AttributeError:
return False
@property
def comments(self):
"""List of comments found."""
return self.select(AbinitComment)
@property
def errors(self):
"""List of errors + bugs found."""
return self.select(AbinitError) + self.select(AbinitBug)
@property
def warnings(self):
"""List of warnings found."""
return self.select(AbinitWarning)
@property
def num_warnings(self):
"""Number of warnings reported."""
return len(self.warnings)
@property
def num_errors(self):
"""Number of errors reported."""
return len(self.errors)
@property
def num_comments(self):
"""Number of comments reported."""
return len(self.comments)
def select(self, base_class):
"""
Return the list of events that inherits from class base_class
"""
return self._events_by_baseclass[base_class]
def filter_types(self, event_types):
events = []
for ev in self:
if type(ev) in event_types: events.append(ev)
return self.__class__(filename=self.filename, events=events)
def get_events_of_type(self, event_class):
"""Return a list of events of the given class."""
return [ev for ev in self if type(ev) == event_class]
@pmg_serialize
def as_dict(self):
return dict(filename=self.filename, events=[e.as_dict() for e in self._events])
@classmethod
def from_dict(cls, d):
return cls(filename=d["filename"], events=[AbinitEvent.from_dict(e) for e in d["events"]])
class EventsParserError(Exception):
"""Base class for the exceptions raised by :class:`EventsParser`."""
class EventsParser:
"""
Parses the output or the log file produced by ABINIT and extract the list of events.
"""
Error = EventsParserError
def parse(self, filename, verbose=0):
"""
Parse the given file. Return :class:`EventReport`.
"""
run_completed, start_datetime, end_datetime = False, None, None
filename = os.path.abspath(filename)
report = EventReport(filename)
w = WildCard("*Error|*Warning|*Comment|*Bug|*ERROR|*WARNING|*COMMENT|*BUG")
import warnings
warnings.simplefilter('ignore', yaml.error.UnsafeLoaderWarning)
with YamlTokenizer(filename) as tokens:
for doc in tokens:
if w.match(doc.tag):
#print("got doc.tag", doc.tag,"--")
try:
#print(doc.text)
event = yaml.load(doc.text) # Can't use ruamel safe_load!
#yaml.load(doc.text, Loader=ruamel.yaml.Loader)
#print(event.yaml_tag, type(event))
except:
#raise
# Wrong YAML doc. Check tha doc tag and instantiate the proper event.
message = "Malformatted YAML document at line: %d\n" % doc.lineno
message += doc.text
# This call is very expensive when we have many exceptions due to malformatted YAML docs.
if verbose:
message += "Traceback:\n %s" % straceback()
if "error" in doc.tag.lower():
print("It seems an error. doc.tag:", doc.tag)
event = AbinitYamlError(message=message, src_file=__file__, src_line=0)
else:
event = AbinitYamlWarning(message=message, src_file=__file__, src_line=0)
event.lineno = doc.lineno
report.append(event)
# Check whether the calculation completed.
if doc.tag == "!FinalSummary":
#print(doc)
run_completed = True
d = doc.as_dict()
#print(d)
start_datetime, end_datetime = d["start_datetime"], d["end_datetime"]
report.set_run_completed(run_completed, start_datetime, end_datetime)
return report
def report_exception(self, filename, exc):
"""
This method is used when self.parser raises an Exception so that
we can report a customized :class:`EventReport` object with info the exception.
"""
# Build fake event.
event = AbinitError(src_file="Unknown", src_line=0, message=str(exc))
return EventReport(filename, events=[event])
class EventHandler(MSONable, metaclass=abc.ABCMeta):
"""
Abstract base class defining the interface for an EventHandler.
The__init__ should always provide default values for its arguments so that we can
easily instantiate the handlers with:
handlers = [cls() for cls in get_event_handler_classes()]
The defaul values should be chosen so to cover the most typical cases.
Each EventHandler should define the class attribute `can_change_physics`
that is true if the handler changes `important` parameters of the
run that are tightly connected to the physics of the system.
For example, an `EventHandler` that changes the value of `dilatmx` and
prepare the restart is not changing the physics. Similarly a handler
that changes the mixing algorithm. On the contrary, a handler that
changes the value of the smearing is modifying an important physical
parameter, and the user should be made aware of this so that
there's an explicit agreement between the user and the code.
The default handlers are those that do not change the physics,
other handlers can be installed by the user when constructing with the flow with
TODO
.. warning::
The EventHandler should perform any action at the level of the input files
needed to solve the problem and then prepare the task for a new submission
The handler should never try to resubmit the task. The submission must be
delegated to the scheduler or Fireworks.
"""
event_class = AbinitEvent
"""AbinitEvent subclass associated to this handler."""
#can_change_physics
FIXED = 1
NOT_FIXED = 0
def __init__(self):
"""Simple init for compatibility with introspection in as_dict/from_dict"""
return super().__init__()
@classmethod
def cls2str(cls):
lines = []
app = lines.append
ecls = cls.event_class
app("event name = %s" % ecls.yaml_tag)
app("event documentation: ")
lines.extend(ecls.__doc__.split("\n"))
app("handler documentation: ")
lines.extend(cls.__doc__.split("\n"))
return "\n".join(lines)
def __str__(self):
return "<%s>" % self.__class__.__name__
def can_handle(self, event):
"""True if this handler is associated to the given :class:`AbinitEvent`"""
return self.event_class == event.__class__
# TODO: defined CorrectionRecord object and provide helper functions to build it
def count(self, task):
"""
Return the number of times the event associated to this handler
has been already fixed in the :class:`Task`.
"""
return len([c for c in task.corrections if c["event"]["@class"] == self.event_class])
@abc.abstractmethod
def handle_task_event(self, task, event):
"""
Method to handle Abinit events.
Args:
task: :class:`Task` object.
event: :class:`AbinitEvent` found in the log file.
Return:
0 if no action has been applied, 1 if the problem has been fixed.
"""
@pmg_serialize
def as_dict(self):
"""
Basic implementation of as_dict if __init__ has no arguments. Subclasses may need to overwrite.
"""
d = {}
return d
@classmethod
def from_dict(cls, d):
"""
Basic implementation of from_dict if __init__ has no arguments. Subclasses may need to overwrite.
"""
return cls()
@classmethod
def compare_inputs(cls, new_input, old_input):
def vars_dict(d):
"""
make a simple dictionary and convert numpy arrays to lists
"""
new_d = {}
for key, value in d.items():
if isinstance(value, np.ndarray): value = value.tolist()
new_d[key] = value
return new_d
new_vars = vars_dict(new_input)
old_vars = vars_dict(old_input)
new_keys = set(new_vars.keys())
old_keys = set(old_vars.keys())
intersect = new_keys.intersection(old_keys)
added_keys = new_keys - intersect
removed_keys = old_keys - intersect
changed_keys = set(v for v in intersect if new_vars[v] != old_vars[v])
log_diff = {}
if added_keys:
log_diff['_set'] = {k: new_vars[k] for k in added_keys}
if changed_keys:
log_diff['_update'] = ({k: {'new': new_vars[k], 'old': old_vars[k]} for k in changed_keys})
if new_input.structure != old_input.structure:
log_diff['_change_structure'] = new_input.structure.as_dict()
if removed_keys:
log_diff['_pop'] = {k: old_vars[k] for k in removed_keys}
return log_diff
class Correction(MSONable):
def __init__(self, handler, actions, event, reset=False):
self.handler = handler
self.actions = actions
self.event = event
self.reset = reset
@pmg_serialize
def as_dict(self):
return dict(handler=self.handler.as_dict(), actions=self.actions, event=self.event.as_dict(), reset=self.reset)
@classmethod
def from_dict(cls, d):
dec = MontyDecoder()
return cls(handler=dec.process_decoded(d['handler']), actions=d['actions'],
event=dec.process_decoded(d['event']), reset=d['reset'])
#class WarningHandler(EventHandler):
# """Base class for handlers associated to ABINIT warnings."""
# event_class = AbinitWarning
#
#class BugHandler(EventHandler):
# """Base class for handlers associated to ABINIT bugs."""
# event_class = AbinitBug
class ErrorHandler(EventHandler):
"""Base class for handlers associated to ABINIT errors."""
event_class = AbinitError
_ABC_EVHANDLER_CLASSES = set([ErrorHandler,])
# Public API
def autodoc_event_handlers(stream=sys.stdout):
"""
Print to the given string, the documentation for the events
and the associated handlers.
"""
lines = []
for cls in all_subclasses(EventHandler):
if cls in _ABC_EVHANDLER_CLASSES: continue
event_class = cls.event_class
lines.extend(cls.cls2str().split("\n"))
# Here we enforce the abstract protocol of the class
# The unit test in tests_events will detect the problem.
if not hasattr(cls, "can_change_physics"):
raise RuntimeError("%s: can_change_physics must be defined" % cls)
stream.write("\n".join(lines) + "\n")
def get_event_handler_classes(categories=None):
"""Return the list of handler classes."""
classes = [c for c in all_subclasses(EventHandler) if c not in _ABC_EVHANDLER_CLASSES]
return classes
def as_event_class(obj):
"""
Convert obj into a subclass of AbinitEvent.
obj can be either a class or a string with the class name or the YAML tag
"""
if is_string(obj):
for c in all_subclasses(AbinitEvent):
if c.__name__ == obj or c.yaml_tag == obj: return c
raise ValueError("Cannot find event class associated to %s" % obj)
# Assume class.
assert obj in all_subclasses(AbinitEvent)
return obj
############################################
########## Concrete classes ################
############################################
class DilatmxError(AbinitError):
"""
This Error occurs in variable cell calculations when the increase in the
unit cell volume is too large.
"""
yaml_tag = '!DilatmxError'
class DilatmxErrorHandler(ErrorHandler):
"""
Handle DilatmxError. Abinit produces a netcdf file with the last structure before aborting
The handler changes the structure in the input with the last configuration and modify the value of dilatmx.
"""
event_class = DilatmxError
can_change_physics = False
def __init__(self, max_dilatmx=1.3):
self.max_dilatmx = max_dilatmx
@pmg_serialize
def as_dict(self):
return {'max_dilatmx': self.max_dilatmx}
@classmethod
def from_dict(cls, d):
return cls(max_dilatmx=d['max_dilatmx'])
def handle_task_event(self, task, event):
# Read the last structure dumped by ABINIT before aborting.
filepath = task.outdir.has_abiext("DILATMX_STRUCT.nc")
last_structure = Structure.from_file(filepath)
task._change_structure(last_structure)
#read the suggested dilatmx
# new_dilatmx = 1.05
# if new_dilatmx > self.max_dilatmx:
# msg = "Suggested dilatmx ({}) exceeds maximux configured value ({}).".format(new_dilatmx, self.max_dilatmx)
# return self.NOT_FIXED
# task.strategy.abinit_input.set_vars(dilatmx=new_dilatmx)
msg = "Take last structure from DILATMX_STRUCT.nc, will try to restart with dilatmx %s" % task.get_inpvar("dilatmx")
task.log_correction(event, msg)
# Note that we change the structure but we don't try restart from the previous WFK|DEN file
# because Abinit called mpi_abort and therefore no final WFK|DEN file has been produced.
return self.FIXED
def handle_input_event(self, abiinput, outdir, event):
try:
old_abiinput = abiinput.deepcopy()
# Read the last structure dumped by ABINIT before aborting.
filepath = outdir.has_abiext("DILATMX_STRUCT.nc")
last_structure = Structure.from_file(filepath)
abiinput.set_structure(last_structure)
#FIXME restart from DEN files not always working with interpolation
return Correction(self, self.compare_inputs(abiinput, old_abiinput), event, reset=True)
# return Correction(self, self.compare_inputs(abiinput, old_abiinput), event, event=False)
except Exception as exc:
logger.warning('Error while trying to apply the handler {}.'.format(str(self)), exc)
return None
class TolSymError(AbinitError):
"""
Class of errors raised by Abinit when it cannot detect the symmetries of the system.
The handler assumes the structure makes sense and the error is just due to numerical inaccuracies.
We increase the value of tolsym in the input file (default 1-8) so that Abinit can find the space group
and re-symmetrize the input structure.
"""
yaml_tag = '!TolSymError'
class TolSymErrorHandler(ErrorHandler):
"""
Increase the value of tolsym in the input file.
"""
event_class = TolSymError
can_change_physics = False
def __init__(self, max_nfixes=3):
self.max_nfixes = max_nfixes
@pmg_serialize
def as_dict(self):
return {'max_nfixes': self.max_nfixes}
@classmethod
def from_dict(cls, d):
return cls(max_nfixes=d['max_nfixes'])
def handle_task_event(self, task, event):
# TODO: Add limit on the number of fixes one can do for the same error
# For example in this case, the scheduler will stop after 20 submissions
if self.count(task) > self.max_nfixes:
return self.NOT_FIXED
old_tolsym = task.get_inpvar("tolsym")
new_tolsym = 1e-6 if old_tolsym is None else old_tolsym * 10
task.set_vars(tolsym=new_tolsym)
task.log_correction(event, "Increasing tolsym from %s to %s" % (old_tolsym, new_tolsym))
return self.FIXED
def handle_input_event(self, abiinput, outdir, event):
try:
old_abiinput = abiinput.deepcopy()
old_tolsym = abiinput["tolsym"]
new_tolsym = 1e-6 if old_tolsym is None else old_tolsym * 10
abiinput.set_vars(tolsym=new_tolsym)
return Correction(self, self.compare_inputs(abiinput, old_abiinput), event, reset=False)
except Exception as exc:
logger.warning('Error while trying to apply the handler {}.'.format(str(self)), exc)
return None
class MemanaError(AbinitError):
"""
Class of errors raised by the memory analyzer.
(the section that estimates the memory requirements from the input parameters).
"""
yaml_tag = '!MemanaError'
class MemanaErrorHandler(ErrorHandler):
"""
Set mem_test to 0 to bypass the memory check.
"""
event_class = MemanaError
can_change_physics = False
def handle_task_event(self, task, event):
task.set_vars(mem_test=0)
task.log_correction(event, "Find MemanaError. Setting mem_test to 0 in input file.")
return self.FIXED
def handle_input_event(self, abiinput, outdir, event):
try:
old_abiinput = abiinput.deepcopy()
abiinput.set_vars(mem_test=0)
return Correction(self, self.compare_inputs(abiinput, old_abiinput), event, reset=False)
except Exception as exc:
logger.warning('Error while trying to apply the handler {}.'.format(str(self)), exc)
return None
class MemoryError(AbinitError):
"""
This error occurs when a checked allocation fails in Abinit
The only way to go is to increase memory
"""
yaml_tag = '!MemoryError'
class MemoryErrorHandler(ErrorHandler):
"""
Handle MemoryError. Increase the resources requirements
"""
event_class = MemoryError
can_change_physics = False
def handle_task_event(self, task, event):
task.manager.increase_resources()
return self.FIXED
def handle_input_event(self, abiinput, outdir, event):
"""
Shouldn't do anything on the input
"""
return None
|
dongsenfo/pymatgen
|
pymatgen/io/abinit/events.py
|
Python
|
mit
| 28,778
|
[
"ABINIT",
"NetCDF",
"pymatgen"
] |
ecce0cbcaee6c9e573c630db008db8b02d290b6793fe5790ca166b716f93f4f4
|
import numpy as np
import scipy.stats as ss
import scipy.special as sp
from .family import Family
from .flat import Flat
from .gas_recursions import gas_recursion_normal_orderone, gas_recursion_normal_ordertwo
from .gas_recursions import gasx_recursion_normal_orderone, gasx_recursion_normal_ordertwo
from .gas_recursions import gas_llev_recursion_normal_orderone, gas_llev_recursion_normal_ordertwo
from .gas_recursions import gas_llt_recursion_normal_orderone, gas_llt_recursion_normal_ordertwo
from .gas_recursions import gas_reg_recursion_normal_orderone, gas_reg_recursion_normal_ordertwo
class Normal(Family):
"""
Normal Distribution
----
This class contains methods relating to the normal distribution for time series.
"""
def __init__(self, mu=0.0, sigma=1.0, transform=None, **kwargs):
"""
Parameters
----------
mu : float
Mean parameter for the Normal distribution
sigma : float
Standard deviation for the Normal distribution
transform : str
Whether to apply a transformation for the location latent variable - e.g. 'exp' or 'logit'
"""
super(Normal, self).__init__(transform)
self.mu0 = mu
self.sigma0 = sigma
self.param_no = 2
self.covariance_prior = False
self.gradient_only = kwargs.get('gradient_only', False) # used for GAS Normal models
if self.gradient_only is True:
self.score_function = self.first_order_score
else:
self.score_function = self.second_order_score
def approximating_model(self, beta, T, Z, R, Q, h_approx, data):
""" Creates approximating Gaussian state space model for the Normal measurement density
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
T, Z, R, Q : np.array
State space matrices used in KFS algorithm
h_approx : float
The variance of the measurement density
data: np.array
The univariate time series data
Returns
----------
H : np.array
Approximating measurement variance matrix
mu : np.array
Approximating measurement constants
"""
H = np.ones(data.shape[0])*h_approx
mu = np.zeros(data.shape[0])
return H, mu
def approximating_model_reg(self, beta, T, Z, R, Q, h_approx, data, X, state_no):
""" Creates approximating Gaussian state space model for the Normal measurement density
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
T, Z, R, Q : np.array
State space matrices used in KFS algorithm
h_approx : float
The variance of the measurement density
data: np.array
The univariate time series data
X: np.array
The regressors
state_no : int
Number of states
Returns
----------
H : np.array
Approximating measurement variance matrix
mu : np.array
Approximating measurement constants
"""
H = np.ones(data.shape[0])*h_approx
mu = np.zeros(data.shape[0])
return H, mu
@staticmethod
def build_latent_variables():
""" Builds additional latent variables for this family in a probabilistic model
Returns
----------
- A list of lists (each sub-list contains latent variable information)
"""
lvs_to_build = []
lvs_to_build.append(['Normal Scale', Flat(transform='exp'), Normal(0, 3), 0.0])
return lvs_to_build
@staticmethod
def draw_variable(loc, scale, shape, skewness, nsims):
""" Draws random variables from this distribution with new latent variables
Parameters
----------
loc : float
location parameter for the distribution
scale : float
scale parameter for the distribution
shape : float
tail thickness parameter for the distribution
skewness : float
skewness parameter for the distribution
nsims : int or list
number of draws to take from the distribution
Returns
----------
- Random draws from the distribution
"""
return np.random.normal(loc, scale, nsims)
def draw_variable_local(self, size):
""" Simulate from the Normal distribution using instance values
Parameters
----------
size : int
How many simulations to perform
Returns
----------
np.ndarray of Normal random variable
"""
return ss.norm.rvs(loc=self.mu0, scale=self.sigma0, size=size)
@staticmethod
def first_order_score(y, mean, scale, shape, skewness):
""" GAS Normal Update term using gradient only - native Python function
Parameters
----------
y : float
datapoint for the time series
mean : float
location parameter for the Normal distribution
scale : float
scale parameter for the Normal distribution
shape : float
tail thickness parameter for the Normal distribution
skewness : float
skewness parameter for the Normal distribution
Returns
----------
- Score of the Normal family
"""
return (y-mean)/np.power(scale,2)
def logpdf(self, mu):
"""
Log PDF for Normal prior
Parameters
----------
mu : float
Latent variable for which the prior is being formed over
Returns
----------
- log(p(mu))
"""
if self.transform is not None:
mu = self.transform(mu)
return -np.log(float(self.sigma0)) - (0.5*(mu-self.mu0)**2)/float(self.sigma0**2)
@staticmethod
def markov_blanket(y, mean, scale, shape, skewness):
""" Markov blanket for each likelihood term - used for state space models
Parameters
----------
y : np.ndarray
univariate time series
mean : np.ndarray
array of location parameters for the Normal distribution
scale : float
scale parameter for the Normal distribution
shape : float
tail thickness parameter for the Normal distribution
skewness : float
skewness parameter for the Normal distribution
Returns
----------
- Markov blanket of the Normal family
"""
return ss.norm.logpdf(y, loc=mean, scale=scale)
@staticmethod
def setup():
""" Returns the attributes of this family if using in a probabilistic model
Notes
----------
- scale notes whether family has a variance parameter (sigma)
- shape notes whether family has a tail thickness parameter (nu)
- skewness notes whether family has a skewness parameter (gamma)
- mean_transform is a function which transforms the location parameter
- cythonized notes whether the family has cythonized routines
Returns
----------
- model name, link function, scale, shape, skewness, mean_transform, cythonized
"""
name = "Normal"
link = np.array
scale = True
shape = False
skewness = False
mean_transform = np.array
cythonized = True
return name, link, scale, shape, skewness, mean_transform, cythonized
@staticmethod
def neg_loglikelihood(y, mean, scale, shape, skewness):
""" Negative loglikelihood function for this distribution
Parameters
----------
y : np.ndarray
univariate time series
mean : np.ndarray
array of location parameters for the Normal distribution
scale : float
scale parameter for the Normal distribution
shape : float
tail thickness parameter for the Normal distribution
skewness : float
skewness parameter for the Normal distribution
Returns
----------
- Negative loglikelihood of the Normal family
"""
return -np.sum(ss.norm.logpdf(y, loc=mean, scale=scale))
def pdf(self, mu):
"""
PDF for Normal prior
Parameters
----------
mu : float
Latent variable for which the prior is being formed over
Returns
----------
- p(mu)
"""
if self.transform is not None:
mu = self.transform(mu)
return (1.0/float(self.sigma0))*np.exp(-(0.5*(mu-self.mu0)**2)/float(self.sigma0**2))
@staticmethod
def reg_score_function(X, y, mean, scale, shape, skewness):
""" GAS Normal Regression Update term using gradient only - native Python function
Parameters
----------
X : float
datapoint for the right hand side variable
y : float
datapoint for the time series
mean : float
location parameter for the Normal distribution
scale : float
scale parameter for the Normal distribution
shape : float
tail thickness parameter for the Normal distribution
skewness : float
skewness parameter for the Normal distribution
Returns
----------
- Score of the Normal family
"""
return X*(y-mean)
@staticmethod
def second_order_score(y, mean, scale, shape, skewness):
""" GAS Normal Update term potentially using second-order information - native Python function
Parameters
----------
y : float
datapoint for the time series
mean : float
location parameter for the Normal distribution
scale : float
scale parameter for the Normal distribution
shape : float
tail thickness parameter for the Normal distribution
skewness : float
skewness parameter for the Normal distribution
Returns
----------
- Adjusted score of the Normal family
"""
return y-mean
def vi_change_param(self, index, value):
""" Wrapper function for changing latent variables - variational inference
Parameters
----------
index : int
0 or 1 depending on which latent variable
value : float
What to change the latent variable to
"""
if index == 0:
self.mu0 = value
elif index == 1:
self.sigma0 = np.exp(value)
def vi_return_param(self, index):
""" Wrapper function for selecting appropriate latent variable for variational inference
Parameters
----------
index : int
0 or 1 depending on which latent variable
Returns
----------
The appropriate indexed parameter
"""
if index == 0:
return self.mu0
elif index == 1:
return np.log(self.sigma0)
def vi_loc_score(self,x):
""" The gradient of the location latent variable mu - used for variational inference
Parameters
----------
x : float
A random variable
Returns
----------
The gradient of the location latent variable mu at x
"""
return (x-self.mu0)/(self.sigma0**2)
def vi_scale_score(self,x):
""" The score of the scale, where scale = exp(x) - used for variational inference
Parameters
----------
x : float
A random variable
Returns
----------
The gradient of the scale latent variable at x
"""
return np.exp(-2.0*np.log(self.sigma0))*(x-self.mu0)**2 - 1.0
def vi_score(self, x, index):
""" Wrapper function for selecting appropriate score
Parameters
----------
x : float
A random variable
index : int
0 or 1 depending on which latent variable
Returns
----------
The gradient of the scale latent variable at x
"""
if index == 0:
return self.vi_loc_score(x)
elif index == 1:
return self.vi_scale_score(x)
# Optional Cythonized recursions below for GAS Normal models
@staticmethod
def gradient_recursion():
""" GAS Normal Model Recursion - gradient only
Returns
----------
- Recursion function for GAS Normal model - gradient only
"""
return gas_recursion_normal_orderone
@staticmethod
def newton_recursion():
""" GAS Normal Model Recursion - adjusted score
Returns
----------
- Recursion function for GAS Normal model - adjusted score
"""
return gas_recursion_normal_ordertwo
@staticmethod
def gradientx_recursion():
""" GASX Normal Model Recursion - gradient only
Returns
----------
- Recursion function for GASX Normal model - gradient only
"""
return gasx_recursion_normal_orderone
@staticmethod
def newtonx_recursion():
""" GASX Normal Model Recursion - adjusted score
Returns
----------
- Recursion function for GASX Normal model - adjusted score
"""
return gasx_recursion_normal_ordertwo
@staticmethod
def gradientllev_recursion():
""" GAS Local Level Normal Model Recursion - gradient only
Returns
----------
- Recursion function for GAS Local Level Normal model - gradient only
"""
return gas_llev_recursion_normal_orderone
@staticmethod
def newtonllev_recursion():
""" GAS Local Level Normal Model Recursion - adjusted score
Returns
----------
- Recursion function for GAS Local Level Normal model - adjusted score
"""
return gas_llev_recursion_normal_ordertwo
@staticmethod
def gradientllt_recursion():
""" GAS Local Linear Trend Normal Model Recursion - gradient only
Returns
----------
- Recursion function for GAS Local Linear Trend Normal model - gradient only
"""
return gas_llt_recursion_normal_orderone
@staticmethod
def newtonllt_recursion():
""" GAS Local Linear Trend Normal Model Recursion - adjusted score
Returns
----------
- Recursion function for GAS Local Linear Trend Normal model - adjusted score
"""
return gas_llt_recursion_normal_ordertwo
@staticmethod
def gradientreg_recursion():
""" GAS Dynamic Regression Normal Model Recursion - gradient only
Returns
----------
- Recursion function for GAS Dynamic Regression Normal model - gradient only
"""
return gas_reg_recursion_normal_orderone
@staticmethod
def newtonreg_recursion():
""" GAS Dynamic Regression Normal Model Recursion - adjusted score
Returns
----------
- Recursion function for GAS Dynamic Regression Normal model - adjusted score
"""
return gas_reg_recursion_normal_ordertwo
|
RJT1990/pyflux
|
pyflux/families/normal.py
|
Python
|
bsd-3-clause
| 15,629
|
[
"Gaussian"
] |
7c4b8d8563e8adbe888ade37e34be1d2ff125bc894afac11b5964fc4238839f0
|
from __future__ import print_function
"""This module defines an ASE interface to SIESTA.
http://www.uam.es/departamentos/ciencias/fismateriac/siesta
"""
import os
from os.path import join, isfile, islink, getmtime
from cmath import exp
import array
import numpy as np
from ase.data import chemical_symbols
from ase.units import Rydberg, fs
from ase.io.siesta import read_rho, read_fdf, read_struct
from ase.io.cube import read_cube_data
class Siesta:
"""Class for doing SIESTA calculations.
The default parameters are very close to those that the SIESTA
Fortran code would use. These are the exceptions::
calc = Siesta(label='siesta', xc='LDA', pulay=5, mix=0.1)
Use the set_fdf method to set extra FDF parameters::
calc.set_fdf('PAO.EnergyShift', 0.01 * Rydberg)
"""
def __init__(self, label='siesta', xc='LDA', kpts=None, nbands=None,
width=None, meshcutoff=None, charge=None,
pulay=5, mix=0.1, maxiter=120,
basis=None, ghosts=[],
write_fdf=True):
"""Construct SIESTA-calculator object.
Parameters
==========
label: str
Prefix to use for filenames (label.fdf, label.txt, ...).
Default is 'siesta'.
xc: str
Exchange-correlation functional. Must be one of LDA, PBE,
revPBE, RPBE.
kpts: list of three int
Monkhost-Pack sampling.
nbands: int
Number of bands.
width: float
Fermi-distribution width in eV.
meshcutoff: float
Cutoff energy in eV for grid.
charge: float
Total charge of the system.
pulay: int
Number of old densities to use for Pulay mixing.
mix: float
Mixing parameter between zero and one for density mixing.
write_fdf: bool
Use write_fdf=False to use your own fdf-file.
Examples
========
Use default values:
>>> h = Atoms('H', calculator=Siesta())
>>> h.center(vacuum=3.0)
>>> e = h.get_potential_energy()
"""
self.name = 'Siesta'
self.label = label#################### != out
self.xc = xc
self.kpts = kpts
self.nbands = nbands
self.width = width
self.meshcutoff = meshcutoff
self.charge = charge
self.pulay = pulay
self.mix = mix
self.maxiter = maxiter
self.basis = basis
self.ghosts = ghosts
self.write_fdf_file = write_fdf
self.converged = False
self.fdf = {}
self.e_fermi = None
def update(self, atoms):
if (not self.converged or
len(self.numbers) != len(atoms) or
(self.numbers != atoms.get_atomic_numbers()).any()):
self.initialize(atoms)
self.calculate(atoms)
elif ((self.positions != atoms.get_positions()).any() or
(self.pbc != atoms.get_pbc()).any() or
(self.cell != atoms.get_cell()).any()):
self.calculate(atoms)
def initialize(self, atoms):
self.numbers = atoms.get_atomic_numbers().copy()
self.species = []
for a, Z in enumerate(self.numbers):
if a in self.ghosts:
Z = -Z
if Z not in self.species:
self.species.append(Z)
if 'SIESTA_PP_PATH' in os.environ:
pppaths = os.environ['SIESTA_PP_PATH'].split(':')
else:
pppaths = []
for Z in self.species:
symbol = chemical_symbols[abs(Z)]
name = symbol + '.vps'
name1 = symbol + '.psf'
found = False
for path in pppaths:
filename = join(path, name)
filename1 = join(path, name1)
if isfile(filename) or islink(filename):
found = True
if path != '.':
if islink(name) or isfile(name):
os.remove(name)
os.symlink(filename, name)
elif isfile(filename1) or islink(filename1):
found = True
if path != '.':
if islink(name1) or isfile(name1):
os.remove(name1)
os.symlink(filename1, name1)
if not found:
raise RuntimeError('No pseudopotential for %s!' % symbol)
self.converged = False
def get_potential_energy(self, atoms, force_consistent=False):
self.update(atoms)
if force_consistent:
return self.efree
else:
# Energy extrapolated to zero Kelvin:
return (self.etotal + self.efree) / 2
def get_forces(self, atoms):
self.update(atoms)
return self.forces.copy()
def get_stress(self, atoms):
self.update(atoms)
return self.stress.copy()
def get_dipole_moment(self, atoms):
"""Returns total dipole moment of the system."""
self.update(atoms)
return self.dipole
def read_dipole(self):
dipolemoment = np.zeros([1, 3])
for line in open(self.label + '.txt', 'r'):
if line.rfind('Electric dipole (Debye)') > -1:
dipolemoment = np.array([float(f) for f in line.split()[5:8]])
#debye to e*Ang (the units of VASP)
dipolemoment = dipolemoment*0.2081943482534
return dipolemoment
def get_pseudo_density(self, spin=None, pad=True):
"""Return pseudo-density array.
If *spin* is not given, then the total density is returned.
Otherwise, the spin up or down density is returned (spin=0 or 1).
"""
filename = self.label + '.RHO'
if not isfile(filename):
raise RuntimeError('Could not find rho-file (make sure to add fdf-option '
'"SaveRho=True" to your calculation)')
rho = read_rho(filename)
if spin is None:
return rho.sum(axis=3)
elif rho.shape[3] != 2:
raise RuntimeError('Explicit spin-value requested. '
'Only total density is available.')
elif spin == 0 or spin == 1:
return rho[:, :, :, spin]
else:
raise RuntimeError('Invalid spin-value requested. '
'Expected 0 or 1, got %s' % spin)
def get_pseudo_wave_function(self, band=0, kpt=0, spin=None):
"""Return pseudo-wave-function array.
The method is limited to the gamma point, and is implemented
as a wrapper to denchar (a tool shipped with siesta);
denchar must be available in the command path.
When retrieving a p_w_f from a non-spin-polarized calculation,
spin must be None (default), and for spin-polarized
calculations, spin must be set to either 0 (up) or 1 (down).
As long as the necessary files are present and named
correctly, old p_w_fs can be read as long as the
calculator label is set. E.g.
>>> c = Siesta(label='name_of_old_calculation')
>>> pwf = c.get_pseudo_wave_function()
The broadcast and pad options are not implemented.
"""
# Not implemented: kpt=0, broadcast=True, pad=True
# kpoint must be Gamma
assert kpt == 0, \
"siesta.get_pseudo_wave_function is unfortunately limited " \
"to the gamma point only. kpt must be 0."
# In denchar, band numbering starts from 1
assert isinstance(band, int) and band >= 0
band = band+1
if spin is None:
spin_name = ""
elif spin == 0:
spin_name = ".UP"
elif spin == 1:
spin_name = ".DOWN"
label = self.label
# If <label>.WF<band>.cube already exist and is newer than <label>.fdf,
# just return it
fn_wf = label+('.WF%i%s.cube'%(band,spin_name))
fn_fdf = label+'.fdf'
if isfile(fn_wf) and isfile(fn_fdf) and (getmtime(fn_wf) > getmtime(fn_fdf)):
x, _ = read_cube_data(fn_wf)
return x
if not isfile(fn_fdf):
raise RuntimeError('Could not find the fdf-file. It is required as '
'part of the input for denchar.')
fdf_mtime = getmtime(fn_fdf)
for suf in ['.WFS', '.PLD', '.DM', '.DIM']:
if not isfile(label+suf):
raise RuntimeError('Could not find file "%s%s" which is required '
'when extracting wave functions '
'(make sure the fdf options "WriteDenchar" is '
'True, and WaveFuncKpoints is [0.0 0.0 0.0]")' %
(label, suf))
if not getmtime(label+suf) > fdf_mtime:
# This should be handled in a better way, e.g. by implementing
# a "calculation_required() and calculate()"
raise RuntimeError('The calculation is not up to date.')
# Simply read the old fdf-file and pick some meta info from there.
# However, strictly it's not always neccesary
fdf = read_fdf(fn_fdf)
if 'latticeconstant' in fdf:
const = float(fdf['latticeconstant'][0])
unit = fdf['latticeconstant'][1]
else:
const = 1.0
unit = 'Ang'
if 'latticevectors' in fdf:
cell = np.array(fdf['latticevectors'], dtype='d')
else:
raise RuntimeError('Failed to find the lattice vectors in the fdf-file.')
if 'spinpolarized' in fdf and \
fdf['spinpolarized'][0].lower() in ['yes', 'true', '.true.', 'T', '']:
if spin is None:
raise RuntimeError('The calculation was spin polarized, pick either '
'spin=0 or 1.')
else:
if not spin is None:
raise RuntimeError('The calculation was not spin polarized, '
'spin argument must be None.')
denc_fdf = open(fn_fdf).readlines()
denc_fdf.append('Denchar.TypeOfRun 3D\n')
denc_fdf.append('Denchar.PlotWaveFunctions T\n')
for dim, dir in zip(cell.transpose(), ['X', 'Y', 'Z']):
# Naive square box limits to denchar
denc_fdf.append('Denchar.Min%s %f %s\n' % (dir, const*dim.min(), unit))
denc_fdf.append('Denchar.Max%s %f %s\n' % (dir, const*dim.max(), unit))
# denchar rewinds stdin and fails if stdin is a pipe
denc_fdf_file = open(label+'.denchar.fdf', 'w')
denc_fdf_file.write(''.join(denc_fdf))
denc_fdf_file.close()
try:
from subprocess import Popen, PIPE
p = Popen('denchar', shell=True, stdin=open(label+'.denchar.fdf'),
stdout=PIPE, stderr=PIPE, close_fds=True)
exitcode = p.wait()
except ImportError:
raise RuntimeError('get_pseudo_wave_function implemented only with subprocess.')
if exitcode == 0:
if not isfile(fn_wf):
raise RuntimeError('Could not find the requested file (%s)'%fn_wf)
x, _ = read_cube_data(fn_wf)
return x
elif exitcode == 127:
raise RuntimeError('No denchar executable found. Make sure it is in the path.')
else:
import sys
print(''.join(p.stderr.readlines()), file=sys.stderr)
raise RuntimeError('Execution of denchar failed!')
def calculate(self, atoms):
self.positions = atoms.get_positions().copy()
self.cell = atoms.get_cell().copy()
self.pbc = atoms.get_pbc().copy()
if self.write_fdf_file:
self.write_fdf(atoms)
siesta = os.environ['SIESTA_SCRIPT']
locals = {'label': self.label}
exec(compile(open(siesta).read(), siesta, 'exec'), {}, locals)
exitcode = locals['exitcode']
if exitcode != 0:
raise RuntimeError(('Siesta exited with exit code: %d. ' +
'Check %s.txt for more information.') %
(exitcode, self.label))
self.dipole = self.read_dipole()
self.read()
atoms_structout = read_struct('%s.STRUCT_OUT' % self.label)
atoms.cell = atoms_structout.cell
atoms.positions = atoms_structout.positions
self.converged = True
def set_fdf(self, key, value):
"""Set FDF parameter."""
self.fdf[key] = value
def write_fdf(self, atoms):
"""Write input parameters to fdf-file."""
fh = open(self.label + '.fdf', 'w')
fdf = {
'SystemLabel': self.label,
'AtomicCoordinatesFormat': 'Ang',
'LatticeConstant': 1.0,
'NumberOfAtoms': len(atoms),
'MeshCutoff': self.meshcutoff,
'NetCharge': self.charge,
'ElectronicTemperature': self.width,
'NumberOfEigenStates': self.nbands,
'DM.UseSaveDM': self.converged,
'PAO.BasisSize': self.basis,
'SolutionMethod': 'diagon',
'DM.NumberPulay': self.pulay,
'DM.MixingWeight': self.mix,
'MaxSCFIterations': self.maxiter
}
if self.xc != 'LDA':
fdf['xc.functional'] = 'GGA'
fdf['xc.authors'] = self.xc
magmoms = atoms.get_initial_magnetic_moments()
if magmoms.any():
fdf['SpinPolarized'] = True
fh.write('%block InitSpin\n')
for n, M in enumerate(magmoms):
if M != 0:
fh.write('%d %.14f\n' % (n + 1, M))
fh.write('%endblock InitSpin\n')
fdf['Number_of_species'] = len(self.species)
fdf.update(self.fdf)
for key, value in fdf.items():
if value is None:
continue
if isinstance(value, list):
fh.write('%%block %s\n' % key)
for line in value:
fh.write(line + '\n')
fh.write('%%endblock %s\n' % key)
else:
unit = keys_with_units.get(fdfify(key))
if unit is None:
fh.write('%s %s\n' % (key, value))
else:
if 'fs**2' in unit:
value /= fs**2
elif 'fs' in unit:
value /= fs
fh.write('%s %f %s\n' % (key, value, unit))
fh.write('%block LatticeVectors\n')
for v in self.cell:
fh.write('%.14f %.14f %.14f\n' % tuple(v))
fh.write('%endblock LatticeVectors\n')
fh.write('%block Chemical_Species_label\n')
for n, Z in enumerate(self.species):
fh.write('%d %s %s\n' % (n + 1, Z, chemical_symbols[abs(Z)]))
fh.write('%endblock Chemical_Species_label\n')
fh.write('%block AtomicCoordinatesAndAtomicSpecies\n')
a = 0
for pos, Z in zip(self.positions, self.numbers):
if a in self.ghosts:
Z = -Z
a += 1
fh.write('%.14f %.14f %.14f' % tuple(pos))
fh.write(' %d\n' % (self.species.index(Z) + 1))
fh.write('%endblock AtomicCoordinatesAndAtomicSpecies\n')
if self.kpts is not None:
fh.write('%block kgrid_Monkhorst_Pack\n')
for i in range(3):
for j in range(3):
if i == j:
fh.write('%d ' % self.kpts[i])
else:
fh.write('0 ')
fh.write('%.1f\n' % (((self.kpts[i] + 1) % 2) * 0.5))
fh.write('%endblock kgrid_Monkhorst_Pack\n')
fh.close()
def read(self):
"""Read results from SIESTA's text-output file."""
text = open(self.label + '.txt', 'r').read().lower()
assert 'error' not in text
lines = iter(text.split('\n'))
# Get the number of grid points used:
for line in lines:
if line.startswith('initmesh: mesh ='):
self.grid = [int(word) for word in line.split()[3:8:2]]
break
# Stress (fixed so it's compatible with a MD run from siesta):
for line in lines:
if line.startswith('siesta: stress tensor '):
stress = np.empty((3, 3))
for i in range(3):
tmp = lines.next().split()
if len(tmp) == 4:
stress[i] = [float(word) for word in tmp[1:]]
else:
stress[i] = [float(word) for word in tmp]
self.stress = np.array(
[stress[0, 0], stress[1, 1], stress[2, 2],
stress[1, 2], stress[0, 2], stress[0, 1]])
break
else:
raise RuntimeError
text = open(self.label + '.txt', 'r').read().lower()
lines = iter(text.split('\n'))
# Energy (again a fix to make it compatible with a MD run from siesta):
counter = 0
for line in lines:
if line.startswith('siesta: etot =') and counter == 0:
counter += 1
elif line.startswith('siesta: etot ='):
self.etotal = float(line.split()[-1])
self.efree = float(lines.next().split()[-1])
break
else:
raise RuntimeError
# Forces (changed so forces smaller than -999eV/A can be fetched):
lines = open(self.label + '.FA', 'r').readlines()
assert int(lines[0]) == len(self.numbers)
assert len(lines) == len(self.numbers) + 1
lines = lines[1:]
self.forces = np.zeros((len(lines), 3))
for i in range(len(lines)):
self.forces[i, 0] = float(lines[i][6:18].strip())
self.forces[i, 1] = float(lines[i][18:30].strip())
self.forces[i, 2] = float(lines[i][30:42].strip())
def read_eig(self):
if self.e_fermi is not None:
return
assert os.access(self.label + '.EIG', os.F_OK)
assert os.access(self.label + '.KP', os.F_OK)
# Read k point weights
text = open(self.label + '.KP', 'r').read()
lines = text.split('\n')
n_kpts = int(lines[0].strip())
self.weights = np.zeros((n_kpts,))
for i in range(n_kpts):
l = lines[i + 1].split()
self.weights[i] = float(l[4])
# Read eigenvalues and fermi-level
text = open(self.label+'.EIG','r').read()
lines = text.split('\n')
self.e_fermi = float(lines[0].split()[0])
tmp = lines[1].split()
self.n_bands = int(tmp[0])
n_spin_bands = int(tmp[1])
self.spin_pol = n_spin_bands == 2
lines = lines[2:-1]
lines_per_kpt = (self.n_bands * n_spin_bands / 10 +
int((self.n_bands * n_spin_bands) % 10 != 0))
self.eig = dict()
for i in range(len(self.weights)):
tmp = lines[i * lines_per_kpt:(i + 1) * lines_per_kpt]
v = [float(v) for v in tmp[0].split()[1:]]
for l in tmp[1:]:
v.extend([float(t) for t in l.split()])
if self.spin_pol:
self.eig[(i, 0)] = np.array(v[0:self.n_bands])
self.eig[(i, 1)] = np.array(v[self.n_bands:])
else:
self.eig[(i, 0)] = np.array(v)
def get_k_point_weights(self):
self.read_eig()
return self.weights
def get_fermi_level(self):
self.read_eig()
return self.e_fermi
def get_eigenvalues(self, kpt=0, spin=0):
self.read_eig()
return self.eig[(kpt, spin)]
def get_number_of_spins(self):
self.read_eig()
if self.spin_pol:
return 2
else:
return 1
def read_hs(self, filename, is_gamma_only=False, magnus=False):
"""Read the Hamiltonian and overlap matrix from a Siesta
calculation in sparse format.
Parameters
==========
filename: str
The filename should be on the form jobname.HS
is_gamma_only: {False, True), optional
Is it a gamma point calculation?
magnus: bool
The fileformat was changed by Magnus in Siesta at some
point around version 2.xxx.
Use mangus=False, to use the old file format.
Note
====
Data read in is put in self._dat.
Examples
========
>>> calc = Siesta()
>>> calc.read_hs('jobname.HS')
>>> print calc._dat.fermi_level
>>> print 'Number of orbitals: %i' % calc._dat.nuotot
"""
assert not magnus, 'Not implemented; changes by Magnus to file io'
assert not is_gamma_only, 'Not implemented. Only works for k-points.'
class Dummy:
pass
self._dat = dat = Dummy()
# Try to read supercell and atom data from a jobname.XV file
filename_xv = filename[:-2] + 'XV'
#assert isfile(filename_xv), 'Missing jobname.XV file'
if isfile(filename_xv):
print('Reading supercell and atom data from ' + filename_xv)
fd = open(filename_xv, 'r')
dat.cell = np.zeros((3, 3)) # Supercell
for a_vec in dat.cell:
a_vec[:] = np.array(fd.readline().split()[:3], float)
dat.rcell = 2 * np.pi * np.linalg.inv(dat.cell.T)
dat.natoms = int(fd.readline().split()[0])
dat.symbols = []
dat.pos_ac = np.zeros((dat.natoms, 3))
for a in range(dat.natoms):
line = fd.readline().split()
dat.symbols.append(chemical_symbols[int(line[1])])
dat.pos_ac[a, :] = [float(line[i]) for i in range(2, 2 + 3)]
# Read in the jobname.HS file
fileobj = file(filename, 'rb')
fileobj.seek(0)
dat.fermi_level = float(open(filename[:-3] + '.EIG', 'r').readline())
dat.is_gammay_only = is_gamma_only
dat.nuotot, dat.ns, dat.mnh = getrecord(fileobj, 'l')
nuotot, ns, mnh = dat.nuotot, dat.ns, dat.mnh
print('Number of orbitals found: %i' % nuotot)
dat.numh = numh = np.array([getrecord(fileobj, 'l')
for i in range(nuotot)], 'l')
dat.maxval = max(numh)
dat.listhptr = listhptr = np.zeros(nuotot, 'l')
listhptr[0] = 0
for oi in range(1, nuotot):
listhptr[oi] = listhptr[oi - 1] + numh[oi - 1]
dat.listh = listh = np.zeros(mnh, 'l')
print('Reading sparse info')
for oi in range(nuotot):
for mi in range(numh[oi]):
listh[listhptr[oi] + mi] = getrecord(fileobj, 'l')
dat.nuotot_sc = max(listh)
dat.h_sparse = h_sparse = np.zeros((mnh, ns), float)
dat.s_sparse = s_sparse = np.zeros(mnh, float)
print('Reading H')
for si in range(ns):
for oi in range(nuotot):
for mi in range(numh[oi]):
h_sparse[listhptr[oi] + mi, si] = getrecord(fileobj, 'd')
print('Reading S')
for oi in range(nuotot):
for mi in range(numh[oi]):
s_sparse[listhptr[oi] + mi] = getrecord(fileobj, 'd')
dat.qtot, dat.temperature = getrecord(fileobj, 'd')
if not is_gamma_only:
print('Reading X')
dat.xij_sparse = xij_sparse = np.zeros([3, mnh], float)
for oi in range(nuotot):
for mi in range(numh[oi]):
xij_sparse[:, listhptr[oi] + mi] = getrecord(fileobj, 'd')
fileobj.close()
def get_hs(self, kpt=(0, 0, 0), spin=0, remove_pbc=None, kpt_scaled=True):
"""Hamiltonian and overlap matrices for an arbitrary k-point.
The default values corresponds to the Gamma point for
spin 0 and periodic boundary conditions.
Parameters
==========
kpt : {(0, 0, 0), (3,) array_like}, optional
k-point in scaled or absolute coordinates.
For the latter the units should be Bohr^-1.
spin : {0, 1}, optional
Spin index
remove_pbc : {None, ({'x', 'y', 'z'}, basis)}, optional
Use remove_pbc to truncate h and s along a cartesian
axis.
basis: {str, dict}
The basis specification as either a string or a dictionary.
kpt_scaled : {True, bool}, optional
Use kpt_scaled=False if `kpt` is in absolute units (Bohr^-1).
Note
====
read_hs should be called before get_hs gets called.
Examples
========
>>> calc = Siesta()
>>> calc.read_hs('jobname.HS')
>>> h, s = calc.get_hs((0.0, 0.375, 0.375))
>>> h -= s * calc._dat.fermi_level # fermi level is now at 0.0
>>> basis = 'szp'
>>> h, s = calc.get_hs((0.0, 0.375, 0.375), remove_pbc=('x', basis))
>>> basis = {'Au:'sz}', 'C':'dzp', None:'szp'}
>>> h, s = calc.get_hs((0.0, 0.375, 0.375), remove_pbc=('x', basis))
"""
if not hasattr(self, '_dat'):# XXX Crude check if data is avail.
print('Please read in data first by calling the method read_hs.')
return None, None
dot = np.dot
dat = self._dat
kpt_c = np.array(kpt, float)
if kpt_scaled:
kpt_c = dot(kpt_c, dat.rcell)
h_MM = np.zeros((dat.nuotot, dat.nuotot), complex)
s_MM = np.zeros((dat.nuotot, dat.nuotot), complex)
h_sparse, s_sparse = dat.h_sparse, dat.s_sparse
x_sparse = dat.xij_sparse
numh, listhptr, listh = dat.numh, dat.listhptr, dat.listh
indxuo = np.mod(np.arange(dat.nuotot_sc), dat.nuotot)
for iuo in range(dat.nuotot):
for j in range(numh[iuo]):
ind = listhptr[iuo] + j
jo = listh[ind] - 1
juo = indxuo[jo]
kx = dot(kpt_c, x_sparse[:, ind])
phasef = exp(1.0j * kx)
h_MM[iuo, juo] += phasef * h_sparse[ind, spin]
s_MM[iuo, juo] += phasef * s_sparse[ind]
if remove_pbc is not None:
direction, basis = remove_pbc
centers_ic = get_bf_centers(dat.symbols, dat.pos_ac, basis)
d = 'xyz'.index(direction)
cutoff = dat.cell[d, d] * 0.5
truncate_along_axis(h_MM, s_MM, direction, centers_ic, cutoff)
h_MM *= complex(Rydberg)
return h_MM, s_MM
def getrecord(fileobj, dtype):
"""Used to read in binary files.
"""
typetosize = {'l':4, 'f':4, 'd':8}# XXX np.int, np.float32, np.float64
assert dtype in typetosize # XXX
size = typetosize[dtype]
record = array.array('l')
trunk = array.array(dtype)
record.fromfile(fileobj, 1)
nofelements = int(record[-1]) / size
trunk.fromfile(fileobj, nofelements)
record.fromfile(fileobj, 1)
data = np.array(trunk, dtype=dtype)
if len(data)==1:
data = data[0]
return data
def truncate_along_axis(h, s, direction, centers_ic, cutoff):
"""Truncate h and s such along a cartesian axis.
Parameters:
h: (N, N) ndarray
Hamiltonian matrix.
s: (N, N) ndarray
Overlap matrix.
direction: {'x', 'y', 'z'}
Truncate allong a cartesian axis.
centers_ic: (N, 3) ndarray
Centers of the basis functions.
cutoff: float
The (direction-axis projected) cutoff distance.
"""
dtype = h.dtype
ni = len(centers_ic)
d = 'xyz'.index(direction)
pos_i = centers_ic[:, d]
for i in range(ni):
dpos_i = abs(pos_i - pos_i[i])
mask_i = (dpos_i < cutoff).astype(dtype)
h[i, :] *= mask_i
h[:, i] *= mask_i
s[i, :] *= mask_i
s[:, i] *= mask_i
def get_nao(symbol, basis):
"""Number of basis functions.
Parameters
==========
symbol: str
The chemical symbol.
basis: str
Basis function type.
"""
ls = valence_config[symbol]
nao = 0
zeta = {'s':1, 'd':2, 't':3, 'q':4}
nzeta = zeta[basis[0]]
is_pol = 'p' in basis
for l in ls:
nao += (2 * l + 1) * nzeta
if is_pol:
l_pol = None
l = -1
while l_pol is None:
l += 1
if not l in ls:
l_pol = l
nao += 2 * l_pol + 1
return nao
def get_bf_centers(symbols, positions, basis):
"""Centers of basis functions.
Parameters
==========
symbols: str, (N, ) array_like
chemical symbol for each atom.
positions: float, (N, 3) array_like
Positions of the atoms.
basis: {str, dict}
Basis set specification as either a string or a dictionary
Examples
========
>>> symbols = ['O', 'H']
>>> positions = [(0, 0, 0), (0, 0, 1)]
>>> basis = 'sz'
>>> print get_bf_centers(symbols, positions, basis)
[[0 0 0]
[0 0 0]
[0 0 0]
[0 0 0]
[0 0 1]]
>>> basis = {'H':'dz', None:'sz'}
>>> print get_bf_centers(symbols, positions, basis)
[[0 0 0]
[0 0 0]
[0 0 0]
[0 0 0]
[0 0 1]
[0 0 1]]
"""
centers_ic = []
dict_basis = False
if isinstance(basis, dict):
dict_basis = True
for symbol, pos in zip(symbols, positions):
if dict_basis:
if symbol not in basis:
bas = basis[None]
else:
bas = basis[symbol]
else:
bas = basis
for i in range(get_nao(symbol, bas)):
centers_ic.append(pos)
return np.asarray(centers_ic)
def fdfify(key):
return key.lower().replace('_', '').replace('.', '').replace('-', '')
valence_config = {
'H': (0,),
'C': (0, 1),
'N': (0, 1),
'O': (0, 1),
'S': (0, 1),
'Li': (0,),
'Na': (0,),
'Ni': (0, 2),
'Cu': (0, 2),
'Pd': (0, 2),
'Ag': (0, 2),
'Pt': (0, 2),
'Au': (0, 2)}
keys_with_units = {
'paoenergyshift': 'eV',
'zmunitslength': 'Bohr',
'zmunitsangle': 'rad',
'zmforcetollength': 'eV/Ang',
'zmforcetolangle': 'eV/rad',
'zmmaxdispllength': 'Ang',
'zmmaxdisplangle': 'rad',
'meshcutoff': 'eV',
'dmenergytolerance': 'eV',
'electronictemperature': 'eV',
'oneta': 'eV',
'onetaalpha': 'eV',
'onetabeta': 'eV',
'onrclwf': 'Ang',
'onchemicalpotentialrc': 'Ang',
'onchemicalpotentialtemperature': 'eV',
'mdmaxcgdispl': 'Ang',
'mdmaxforcetol': 'eV/Ang',
'mdmaxstresstol': 'eV/Ang**3',
'mdlengthtimestep': 'fs',
'mdinitialtemperature': 'eV',
'mdtargettemperature': 'eV',
'mdtargetpressure': 'eV/Ang**3',
'mdnosemass': 'eV*fs**2',
'mdparrinellorahmanmass': 'eV*fs**2',
'mdtaurelax': 'fs',
'mdbulkmodulus': 'eV/Ang**3',
'mdfcdispl': 'Ang',
'warningminimumatomicdistance': 'Ang',
'rcspatial': 'Ang',
'kgridcutoff': 'Ang',
'latticeconstant': 'Ang'}
|
suttond/MODOI
|
ase/calculators/siesta.py
|
Python
|
lgpl-3.0
| 31,261
|
[
"ASE",
"SIESTA",
"VASP"
] |
69c7858da9e71d66479df16ee6fbdcf546c1ef4cc2e21bbaef298c1b28022024
|
# -*- coding: utf-8 -*-
"""
End-to-end tests for the LMS Instructor Dashboard.
"""
import ddt
from nose.plugins.attrib import attr
from bok_choy.promise import EmptyPromise
from flaky import flaky
from common.test.acceptance.tests.helpers import UniqueCourseTest, get_modal_alert, EventsTestMixin
from common.test.acceptance.pages.common.logout import LogoutPage
from common.test.acceptance.pages.lms.auto_auth import AutoAuthPage
from common.test.acceptance.pages.studio.overview import CourseOutlinePage
from common.test.acceptance.pages.lms.create_mode import ModeCreationPage
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.lms.instructor_dashboard import InstructorDashboardPage, EntranceExamAdmin
from common.test.acceptance.fixtures.course import CourseFixture, XBlockFixtureDesc
from common.test.acceptance.pages.lms.dashboard import DashboardPage
from common.test.acceptance.pages.lms.problem import ProblemPage
from common.test.acceptance.pages.lms.track_selection import TrackSelectionPage
from common.test.acceptance.pages.lms.pay_and_verify import PaymentAndVerificationFlow, FakePaymentPage
from common.test.acceptance.pages.lms.login_and_register import CombinedLoginAndRegisterPage
from common.test.acceptance.tests.helpers import disable_animations
from common.test.acceptance.fixtures.certificates import CertificateConfigFixture
class BaseInstructorDashboardTest(EventsTestMixin, UniqueCourseTest):
"""
Mixin class for testing the instructor dashboard.
"""
def log_in_as_instructor(self):
"""
Logs in as an instructor and returns the id.
"""
username = "test_instructor_{uuid}".format(uuid=self.unique_id[0:6])
auto_auth_page = AutoAuthPage(self.browser, username=username, course_id=self.course_id, staff=True)
return username, auto_auth_page.visit().get_user_id()
def visit_instructor_dashboard(self):
"""
Visits the instructor dashboard.
"""
instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course_id)
instructor_dashboard_page.visit()
return instructor_dashboard_page
@attr('a11y')
class LMSInstructorDashboardA11yTest(BaseInstructorDashboardTest):
"""
Instructor dashboard base accessibility test.
"""
def setUp(self):
super(LMSInstructorDashboardA11yTest, self).setUp()
self.course_fixture = CourseFixture(**self.course_info).install()
self.log_in_as_instructor()
self.instructor_dashboard_page = self.visit_instructor_dashboard()
def test_instructor_dashboard_a11y(self):
self.instructor_dashboard_page.a11y_audit.check_for_accessibility_errors()
@ddt.ddt
class BulkEmailTest(BaseInstructorDashboardTest):
"""
End-to-end tests for bulk emailing from instructor dash.
"""
def setUp(self):
super(BulkEmailTest, self).setUp()
self.course_fixture = CourseFixture(**self.course_info).install()
self.log_in_as_instructor()
instructor_dashboard_page = self.visit_instructor_dashboard()
self.send_email_page = instructor_dashboard_page.select_bulk_email()
@ddt.data(["myself"], ["staff"], ["learners"], ["myself", "staff", "learners"])
def test_email_queued_for_sending(self, recipient):
self.send_email_page.send_message(recipient)
self.send_email_page.verify_message_queued_successfully()
@attr('a11y')
def test_bulk_email_a11y(self):
"""
Bulk email accessibility tests
"""
self.send_email_page.a11y_audit.config.set_scope([
'#section-send-email'
])
self.send_email_page.a11y_audit.config.set_rules({
"ignore": [
'button-name', # TODO: TNL-5830
]
})
self.send_email_page.a11y_audit.check_for_accessibility_errors()
@attr(shard=10)
class AutoEnrollmentWithCSVTest(BaseInstructorDashboardTest):
"""
End-to-end tests for Auto-Registration and enrollment functionality via CSV file.
"""
def setUp(self):
super(AutoEnrollmentWithCSVTest, self).setUp()
self.course_fixture = CourseFixture(**self.course_info).install()
self.log_in_as_instructor()
instructor_dashboard_page = self.visit_instructor_dashboard()
self.auto_enroll_section = instructor_dashboard_page.select_membership().select_auto_enroll_section()
# Initialize the page objects
self.register_page = CombinedLoginAndRegisterPage(self.browser, start_page="register")
self.dashboard_page = DashboardPage(self.browser)
def test_browse_and_upload_buttons_are_visible(self):
"""
Scenario: On the Membership tab of the Instructor Dashboard, Auto-Enroll Browse and Upload buttons are visible.
Given that I am on the Membership tab on the Instructor Dashboard
Then I see the 'REGISTER/ENROLL STUDENTS' section on the page with the 'Browse' and 'Upload' buttons
"""
self.assertTrue(self.auto_enroll_section.is_file_attachment_browse_button_visible())
self.assertTrue(self.auto_enroll_section.is_upload_button_visible())
def test_enroll_unregister_student(self):
"""
Scenario: On the Membership tab of the Instructor Dashboard, Batch Enrollment div is visible.
Given that I am on the Membership tab on the Instructor Dashboard
Then I enter the email and enroll it.
Logout the current page.
And Navigate to the registration page and register the student.
Then I see the course which enrolled the student.
"""
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
email = "{user}@example.com".format(user=username)
self.auto_enroll_section.fill_enrollment_batch_text_box(email)
self.assertIn(
'Successfully sent enrollment emails to the following users. '
'They will be enrolled once they register:',
self.auto_enroll_section.get_notification_text()
)
LogoutPage(self.browser).visit()
self.register_page.visit()
self.register_page.register(
email=email,
password="123456",
username=username,
full_name="Test User",
terms_of_service=True,
country="US",
favorite_movie="Harry Potter",
)
course_names = self.dashboard_page.wait_for_page().available_courses
self.assertEquals(len(course_names), 1)
self.assertIn(self.course_info["display_name"], course_names)
def test_clicking_file_upload_button_without_file_shows_error(self):
"""
Scenario: Clicking on the upload button without specifying a CSV file results in error.
Given that I am on the Membership tab on the Instructor Dashboard
When I click the Upload Button without specifying a CSV file
Then I should be shown an Error Notification
And The Notification message should read 'File is not attached.'
"""
self.auto_enroll_section.click_upload_file_button()
self.assertTrue(self.auto_enroll_section.is_notification_displayed(section_type=self.auto_enroll_section.NOTIFICATION_ERROR))
self.assertEqual(self.auto_enroll_section.first_notification_message(section_type=self.auto_enroll_section.NOTIFICATION_ERROR), "File is not attached.")
def test_uploading_correct_csv_file_results_in_success(self):
"""
Scenario: Uploading a CSV with correct data results in Success.
Given that I am on the Membership tab on the Instructor Dashboard
When I select a csv file with correct data and click the Upload Button
Then I should be shown a Success Notification.
"""
self.auto_enroll_section.upload_correct_csv_file()
self.assertTrue(self.auto_enroll_section.is_notification_displayed(section_type=self.auto_enroll_section.NOTIFICATION_SUCCESS))
def test_uploading_csv_file_with_bad_data_results_in_errors_and_warnings(self):
"""
Scenario: Uploading a CSV with incorrect data results in error and warnings.
Given that I am on the Membership tab on the Instructor Dashboard
When I select a csv file with incorrect data and click the Upload Button
Then I should be shown an Error Notification
And a corresponding Error Message.
And I should be shown a Warning Notification
And a corresponding Warning Message.
"""
self.auto_enroll_section.upload_csv_file_with_errors_warnings()
self.assertTrue(self.auto_enroll_section.is_notification_displayed(section_type=self.auto_enroll_section.NOTIFICATION_ERROR))
self.assertEqual(self.auto_enroll_section.first_notification_message(section_type=self.auto_enroll_section.NOTIFICATION_ERROR), "Data in row #2 must have exactly four columns: email, username, full name, and country")
self.assertTrue(self.auto_enroll_section.is_notification_displayed(section_type=self.auto_enroll_section.NOTIFICATION_WARNING))
self.assertEqual(self.auto_enroll_section.first_notification_message(section_type=self.auto_enroll_section.NOTIFICATION_WARNING), "ename (d@a.com): (An account with email d@a.com exists but the provided username ename is different. Enrolling anyway with d@a.com.)")
def test_uploading_non_csv_file_results_in_error(self):
"""
Scenario: Uploading an image file for auto-enrollment results in error.
Given that I am on the Membership tab on the Instructor Dashboard
When I select an image file (a non-csv file) and click the Upload Button
Then I should be shown an Error Notification
And The Notification message should read 'Make sure that the file you upload is in CSV..'
"""
self.auto_enroll_section.upload_non_csv_file()
self.assertTrue(self.auto_enroll_section.is_notification_displayed(section_type=self.auto_enroll_section.NOTIFICATION_ERROR))
self.assertEqual(self.auto_enroll_section.first_notification_message(section_type=self.auto_enroll_section.NOTIFICATION_ERROR), "Make sure that the file you upload is in CSV format with no extraneous characters or rows.")
@attr('a11y')
def test_auto_enroll_csv_a11y(self):
"""
Auto-enrollment with CSV accessibility tests
"""
self.auto_enroll_section.a11y_audit.config.set_scope([
'#member-list-widget-template'
])
self.auto_enroll_section.a11y_audit.check_for_accessibility_errors()
@attr(shard=10)
class ProctoredExamsTest(BaseInstructorDashboardTest):
"""
End-to-end tests for Proctoring Sections of the Instructor Dashboard.
"""
USERNAME = "STUDENT_TESTER"
EMAIL = "student101@example.com"
def setUp(self):
super(ProctoredExamsTest, self).setUp()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.course_outline = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
course_fixture = CourseFixture(**self.course_info)
course_fixture.add_advanced_settings({
"enable_proctored_exams": {"value": "true"}
})
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section 1').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 1').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1')
)
)
).install()
self.track_selection_page = TrackSelectionPage(self.browser, self.course_id)
self.payment_and_verification_flow = PaymentAndVerificationFlow(self.browser, self.course_id)
self.immediate_verification_page = PaymentAndVerificationFlow(
self.browser, self.course_id, entry_point='verify-now'
)
self.upgrade_page = PaymentAndVerificationFlow(self.browser, self.course_id, entry_point='upgrade')
self.fake_payment_page = FakePaymentPage(self.browser, self.course_id)
self.dashboard_page = DashboardPage(self.browser)
self.problem_page = ProblemPage(self.browser)
# Add a verified mode to the course
ModeCreationPage(
self.browser, self.course_id, mode_slug=u'verified', mode_display_name=u'Verified Certificate',
min_price=10, suggested_prices='10,20'
).visit()
# Auto-auth register for the course.
self._auto_auth(self.USERNAME, self.EMAIL, False)
def _auto_auth(self, username, email, staff):
"""
Logout and login with given credentials.
"""
AutoAuthPage(self.browser, username=username, email=email,
course_id=self.course_id, staff=staff).visit()
def _login_as_a_verified_user(self):
"""
login as a verififed user
"""
self._auto_auth(self.USERNAME, self.EMAIL, False)
# the track selection page cannot be visited. see the other tests to see if any prereq is there.
# Navigate to the track selection page
self.track_selection_page.visit()
# Enter the payment and verification flow by choosing to enroll as verified
self.track_selection_page.enroll('verified')
# Proceed to the fake payment page
self.payment_and_verification_flow.proceed_to_payment()
# Submit payment
self.fake_payment_page.submit_payment()
def _create_a_proctored_exam_and_attempt(self):
"""
Creates a proctored exam and makes the student attempt it so that
the associated allowance and attempts are visible on the Instructor Dashboard.
"""
# Visit the course outline page in studio
LogoutPage(self.browser).visit()
self._auto_auth("STAFF_TESTER", "staff101@example.com", True)
self.course_outline.visit()
# open the exam settings to make it a proctored exam.
self.course_outline.open_subsection_settings_dialog()
# select advanced settings tab
self.course_outline.select_advanced_tab()
self.course_outline.make_exam_proctored()
# login as a verified student and visit the courseware.
LogoutPage(self.browser).visit()
self._login_as_a_verified_user()
self.courseware_page.visit()
# Start the proctored exam.
self.courseware_page.start_proctored_exam()
def _create_a_timed_exam_and_attempt(self):
"""
Creates a timed exam and makes the student attempt it so that
the associated allowance and attempts are visible on the Instructor Dashboard.
"""
# Visit the course outline page in studio
LogoutPage(self.browser).visit()
self._auto_auth("STAFF_TESTER", "staff101@example.com", True)
self.course_outline.visit()
# open the exam settings to make it a proctored exam.
self.course_outline.open_subsection_settings_dialog()
# select advanced settings tab
self.course_outline.select_advanced_tab()
self.course_outline.make_exam_timed()
# login as a verified student and visit the courseware.
LogoutPage(self.browser).visit()
self._login_as_a_verified_user()
self.courseware_page.visit()
# Start the timed exam.
self.courseware_page.start_timed_exam()
# Stop the timed exam.
self.courseware_page.stop_timed_exam()
def test_can_add_remove_allowance(self):
"""
Make sure that allowances can be added and removed.
"""
# Given that an exam has been configured to be a timed exam.
self._create_a_timed_exam_and_attempt()
# When I log in as an instructor,
__, __ = self.log_in_as_instructor()
# And visit the Allowance Section of Instructor Dashboard's Special Exams tab
instructor_dashboard_page = self.visit_instructor_dashboard()
allowance_section = instructor_dashboard_page.select_special_exams().select_allowance_section()
# Then I can add Allowance to that exam for a student
self.assertTrue(allowance_section.is_add_allowance_button_visible)
# When I click the Add Allowance button
allowance_section.click_add_allowance_button()
# Then popup should be visible
self.assertTrue(allowance_section.is_add_allowance_popup_visible)
# When I fill and submit the allowance form
allowance_section.submit_allowance_form('10', self.USERNAME)
# Then, the added record should be visible
self.assertTrue(allowance_section.is_allowance_record_visible)
@flaky # TNL-5832
def test_can_reset_attempts(self):
"""
Make sure that Exam attempts are visible and can be reset.
"""
# Given that an exam has been configured to be a proctored exam.
self._create_a_timed_exam_and_attempt()
# When I log in as an instructor,
__, __ = self.log_in_as_instructor()
# And visit the Student Proctored Exam Attempts Section of Instructor Dashboard's Special Exams tab
instructor_dashboard_page = self.visit_instructor_dashboard()
exam_attempts_section = instructor_dashboard_page.select_special_exams().select_exam_attempts_section()
# Then I can see the search text field
self.assertTrue(exam_attempts_section.is_search_text_field_visible)
# And I can see one attempt by a student.
self.assertTrue(exam_attempts_section.is_student_attempt_visible)
# And I can remove the attempt by clicking the "x" at the end of the row.
exam_attempts_section.remove_student_attempt()
self.assertFalse(exam_attempts_section.is_student_attempt_visible)
@attr(shard=10)
@ddt.ddt
class EntranceExamGradeTest(BaseInstructorDashboardTest):
"""
Tests for Entrance exam specific student grading tasks.
"""
admin_buttons = (
'reset_attempts_button',
'rescore_button',
'rescore_if_higher_button',
'delete_state_button',
)
def setUp(self):
super(EntranceExamGradeTest, self).setUp()
self.course_info.update({"settings": {"entrance_exam_enabled": "true"}})
CourseFixture(**self.course_info).install()
self.student_identifier = "johndoe_saee@example.com"
# Create the user (automatically logs us in)
AutoAuthPage(
self.browser,
username="johndoe_saee",
email=self.student_identifier,
course_id=self.course_id,
staff=False
).visit()
LogoutPage(self.browser).visit()
# go to the student admin page on the instructor dashboard
self.log_in_as_instructor()
self.entrance_exam_admin = self.visit_instructor_dashboard().select_student_admin(EntranceExamAdmin)
def test_input_text_and_buttons_are_visible(self):
"""
Scenario: On the Student admin tab of the Instructor Dashboard, Student Email input box,
Reset Student Attempt, Rescore Student Submission, Delete Student State for entrance exam
and Show Background Task History for Student buttons are visible
Given that I am on the Student Admin tab on the Instructor Dashboard
Then I see Student Email input box, Reset Student Attempt, Rescore Student Submission,
Delete Student State for entrance exam and Show Background Task History for Student buttons
"""
self.assertTrue(self.entrance_exam_admin.are_all_buttons_visible())
@ddt.data(*admin_buttons)
def test_admin_button_without_email_shows_error(self, button_to_test):
"""
Scenario: Clicking on the requested button without entering student email
address or username results in error.
Given that I am on the Student Admin tab on the Instructor Dashboard
When I click the requested button under Entrance Exam Grade
Adjustment without enter an email address
Then I should be shown an Error Notification
And The Notification message should read 'Please enter a student email address or username.'
"""
getattr(self.entrance_exam_admin, button_to_test).click()
self.assertEqual(
'Please enter a student email address or username.',
self.entrance_exam_admin.top_notification.text[0]
)
@ddt.data(*admin_buttons)
def test_admin_button_with_success(self, button_to_test):
"""
Scenario: Clicking on the requested button with valid student email
address or username should result in success prompt.
Given that I am on the Student Admin tab on the Instructor Dashboard
When I click the requested button under Entrance Exam Grade
Adjustment after entering a valid student
email address or username
Then I should be shown an alert with success message
"""
self.entrance_exam_admin.set_student_email_or_username(self.student_identifier)
getattr(self.entrance_exam_admin, button_to_test).click()
alert = get_modal_alert(self.entrance_exam_admin.browser)
alert.dismiss()
@ddt.data(*admin_buttons)
def test_admin_button_with_error(self, button_to_test):
"""
Scenario: Clicking on the requested button with email address or username
of a non existing student should result in error message.
Given that I am on the Student Admin tab on the Instructor Dashboard
When I click the requested Button under Entrance Exam Grade
Adjustment after non existing student email address or username
Then I should be shown an error message
"""
self.entrance_exam_admin.set_student_email_or_username('non_existing@example.com')
getattr(self.entrance_exam_admin, button_to_test).click()
self.entrance_exam_admin.wait_for_ajax()
self.assertGreater(len(self.entrance_exam_admin.top_notification.text[0]), 0)
def test_skip_entrance_exam_button_with_success(self):
"""
Scenario: Clicking on the Let Student Skip Entrance Exam button with
valid student email address or username should result in success prompt.
Given that I am on the Student Admin tab on the Instructor Dashboard
When I click the Let Student Skip Entrance Exam Button under
Entrance Exam Grade Adjustment after entering a valid student
email address or username
Then I should be shown an alert with success message
"""
self.entrance_exam_admin.set_student_email_or_username(self.student_identifier)
self.entrance_exam_admin.skip_entrance_exam_button.click()
#first we have window.confirm
alert = get_modal_alert(self.entrance_exam_admin.browser)
alert.accept()
# then we have alert confirming action
alert = get_modal_alert(self.entrance_exam_admin.browser)
alert.dismiss()
def test_skip_entrance_exam_button_with_error(self):
"""
Scenario: Clicking on the Let Student Skip Entrance Exam button with
email address or username of a non existing student should result in error message.
Given that I am on the Student Admin tab on the Instructor Dashboard
When I click the Let Student Skip Entrance Exam Button under
Entrance Exam Grade Adjustment after entering non existing
student email address or username
Then I should be shown an error message
"""
self.entrance_exam_admin.set_student_email_or_username('non_existing@example.com')
self.entrance_exam_admin.skip_entrance_exam_button.click()
#first we have window.confirm
alert = get_modal_alert(self.entrance_exam_admin.browser)
alert.accept()
self.entrance_exam_admin.wait_for_ajax()
self.assertGreater(len(self.entrance_exam_admin.top_notification.text[0]), 0)
def test_task_history_button_with_success(self):
"""
Scenario: Clicking on the Show Background Task History for Student
with valid student email address or username should result in table of tasks.
Given that I am on the Student Admin tab on the Instructor Dashboard
When I click the Show Background Task History for Student Button
under Entrance Exam Grade Adjustment after entering a valid student
email address or username
Then I should be shown a table listing all background tasks
"""
self.entrance_exam_admin.set_student_email_or_username(self.student_identifier)
self.entrance_exam_admin.task_history_button.click()
self.entrance_exam_admin.wait_for_task_history_table()
@attr(shard=10)
class DataDownloadsTest(BaseInstructorDashboardTest):
"""
Bok Choy tests for the "Data Downloads" tab.
"""
def setUp(self):
super(DataDownloadsTest, self).setUp()
self.course_fixture = CourseFixture(**self.course_info).install()
self.instructor_username, self.instructor_id = self.log_in_as_instructor()
instructor_dashboard_page = self.visit_instructor_dashboard()
self.data_download_section = instructor_dashboard_page.select_data_download()
def verify_report_requested_event(self, report_type):
"""
Verifies that the correct event is emitted when a report is requested.
"""
self.assert_matching_events_were_emitted(
event_filter={'name': u'edx.instructor.report.requested', 'report_type': report_type}
)
def verify_report_downloaded_event(self, report_url):
"""
Verifies that the correct event is emitted when a report is downloaded.
"""
self.assert_matching_events_were_emitted(
event_filter={'name': u'edx.instructor.report.downloaded', 'report_url': report_url}
)
def verify_report_download(self, report_name):
"""
Verifies that a report can be downloaded and an event fired.
"""
download_links = self.data_download_section.report_download_links
self.assertEquals(len(download_links), 1)
download_links[0].click()
expected_url = download_links.attrs('href')[0]
self.assertIn(report_name, expected_url)
self.verify_report_downloaded_event(expected_url)
def test_student_profiles_report_download(self):
"""
Scenario: Verify that an instructor can download a student profiles report
Given that I am an instructor
And I visit the instructor dashboard's "Data Downloads" tab
And I click on the "Download profile information as a CSV" button
Then a report should be generated
And a report requested event should be emitted
When I click on the report
Then a report downloaded event should be emitted
"""
report_name = u"student_profile_info"
self.data_download_section.generate_student_report_button.click()
self.data_download_section.wait_for_available_report()
self.verify_report_requested_event(report_name)
self.verify_report_download(report_name)
def test_grade_report_download(self):
"""
Scenario: Verify that an instructor can download a grade report
Given that I am an instructor
And I visit the instructor dashboard's "Data Downloads" tab
And I click on the "Generate Grade Report" button
Then a report should be generated
And a report requested event should be emitted
When I click on the report
Then a report downloaded event should be emitted
"""
report_name = u"grade_report"
self.data_download_section.generate_grade_report_button.click()
self.data_download_section.wait_for_available_report()
self.verify_report_requested_event(report_name)
self.verify_report_download(report_name)
def test_problem_grade_report_download(self):
"""
Scenario: Verify that an instructor can download a problem grade report
Given that I am an instructor
And I visit the instructor dashboard's "Data Downloads" tab
And I click on the "Generate Problem Grade Report" button
Then a report should be generated
And a report requested event should be emitted
When I click on the report
Then a report downloaded event should be emitted
"""
report_name = u"problem_grade_report"
self.data_download_section.generate_problem_report_button.click()
self.data_download_section.wait_for_available_report()
self.verify_report_requested_event(report_name)
self.verify_report_download(report_name)
def test_ora2_response_report_download(self):
"""
Scenario: Verify that an instructor can download an ORA2 grade report
Given that I am an instructor
And I visit the instructor dashboard's "Data Downloads" tab
And I click on the "Download ORA2 Responses" button
Then a report should be generated
"""
report_name = u"ORA_data"
self.data_download_section.generate_ora2_response_report_button.click()
self.data_download_section.wait_for_available_report()
self.verify_report_download(report_name)
@attr('a11y')
def test_data_download_a11y(self):
"""
Data download page accessibility tests
"""
self.data_download_section.a11y_audit.config.set_scope([
'.data-download-container'
])
self.data_download_section.a11y_audit.check_for_accessibility_errors()
@attr(shard=10)
@ddt.ddt
class CertificatesTest(BaseInstructorDashboardTest):
"""
Tests for Certificates functionality on instructor dashboard.
"""
def setUp(self):
super(CertificatesTest, self).setUp()
self.test_certificate_config = {
'id': 1,
'name': 'Certificate name',
'description': 'Certificate description',
'course_title': 'Course title override',
'signatories': [],
'version': 1,
'is_active': True
}
CourseFixture(**self.course_info).install()
self.cert_fixture = CertificateConfigFixture(self.course_id, self.test_certificate_config)
self.cert_fixture.install()
self.user_name, self.user_id = self.log_in_as_instructor()
self.instructor_dashboard_page = self.visit_instructor_dashboard()
self.certificates_section = self.instructor_dashboard_page.select_certificates()
disable_animations(self.certificates_section)
def test_generate_certificates_buttons_is_disable(self):
"""
Scenario: On the Certificates tab of the Instructor Dashboard, Generate Certificates button is disable.
Given that I am on the Certificates tab on the Instructor Dashboard
The instructor-generation and cert_html_view_enabled feature flags have been enabled
But the certificate is not active in settings.
Then I see a 'Generate Certificates' button disabled
"""
self.test_certificate_config['is_active'] = False
self.cert_fixture.update_certificate(1)
self.browser.refresh()
self.assertFalse(self.certificates_section.generate_certificates_button.visible)
self.assertTrue(self.certificates_section.generate_certificates_disabled_button.visible)
def test_generate_certificates_buttons_is_visible(self):
"""
Scenario: On the Certificates tab of the Instructor Dashboard, Generate Certificates button is visible.
Given that I am on the Certificates tab on the Instructor Dashboard
And the instructor-generation feature flag has been enabled
Then I see a 'Generate Certificates' button
And when I click on the 'Generate Certificates' button
Then I should see a status message and 'Generate Certificates' button should be disabled.
"""
self.assertTrue(self.certificates_section.generate_certificates_button.visible)
self.certificates_section.generate_certificates_button.click()
alert = get_modal_alert(self.certificates_section.browser)
alert.accept()
self.certificates_section.wait_for_ajax()
EmptyPromise(
lambda: self.certificates_section.certificate_generation_status.visible,
'Certificate generation status shown'
).fulfill()
disabled = self.certificates_section.generate_certificates_button.attrs('disabled')
self.assertEqual(disabled[0], 'true')
def test_pending_tasks_section_is_visible(self):
"""
Scenario: On the Certificates tab of the Instructor Dashboard, Pending Instructor Tasks section is visible.
Given that I am on the Certificates tab on the Instructor Dashboard
Then I see 'Pending Instructor Tasks' section
"""
self.assertTrue(self.certificates_section.pending_tasks_section.visible)
def test_certificate_exceptions_section_is_visible(self):
"""
Scenario: On the Certificates tab of the Instructor Dashboard, Certificate Exceptions section is visible.
Given that I am on the Certificates tab on the Instructor Dashboard
Then I see 'CERTIFICATE EXCEPTIONS' section
"""
self.assertTrue(self.certificates_section.certificate_exceptions_section.visible)
def test_instructor_can_add_certificate_exception(self):
"""
Scenario: On the Certificates tab of the Instructor Dashboard, Instructor can add new certificate
exception to list.
Given that I am on the Certificates tab on the Instructor Dashboard
When I fill in student username and notes fields and click 'Add Exception' button
Then new certificate exception should be visible in certificate exceptions list
"""
notes = 'Test Notes'
# Add a student to Certificate exception list
self.certificates_section.add_certificate_exception(self.user_name, notes)
self.assertIn(self.user_name, self.certificates_section.last_certificate_exception.text)
self.assertIn(notes, self.certificates_section.last_certificate_exception.text)
# Verify that added exceptions are also synced with backend
# Revisit Page
self.certificates_section.refresh()
# wait for the certificate exception section to render
self.certificates_section.wait_for_certificate_exceptions_section()
# validate certificate exception synced with server is visible in certificate exceptions list
self.assertIn(self.user_name, self.certificates_section.last_certificate_exception.text)
self.assertIn(notes, self.certificates_section.last_certificate_exception.text)
def test_remove_certificate_exception_on_page_reload(self):
"""
Scenario: On the Certificates tab of the Instructor Dashboard, Instructor can remove added certificate
exceptions from the list.
Given that I am on the Certificates tab on the Instructor Dashboard
When I fill in student username and notes fields and click 'Add Exception' button
Then new certificate exception should be visible in certificate exceptions list
Revisit the page to make sure exceptions are synced.
Remove the user from the exception list should remove the user from the list.
"""
notes = 'Test Notes'
# Add a student to Certificate exception list
self.certificates_section.add_certificate_exception(self.user_name, notes)
self.assertIn(self.user_name, self.certificates_section.last_certificate_exception.text)
self.assertIn(notes, self.certificates_section.last_certificate_exception.text)
# Verify that added exceptions are also synced with backend
# Revisit Page
self.certificates_section.refresh()
# Remove Certificate Exception
self.certificates_section.remove_first_certificate_exception()
self.assertNotIn(self.user_name, self.certificates_section.last_certificate_exception.text)
self.assertNotIn(notes, self.certificates_section.last_certificate_exception.text)
def test_instructor_can_remove_certificate_exception(self):
"""
Scenario: On the Certificates tab of the Instructor Dashboard, Instructor can remove added certificate
exceptions from the list.
Given that I am on the Certificates tab on the Instructor Dashboard
When I fill in student username and notes fields and click 'Add Exception' button
Then new certificate exception should be visible in certificate exceptions list
"""
notes = 'Test Notes'
# Add a student to Certificate exception list
self.certificates_section.add_certificate_exception(self.user_name, notes)
self.assertIn(self.user_name, self.certificates_section.last_certificate_exception.text)
self.assertIn(notes, self.certificates_section.last_certificate_exception.text)
# Remove Certificate Exception
self.certificates_section.remove_first_certificate_exception()
self.assertNotIn(self.user_name, self.certificates_section.last_certificate_exception.text)
self.assertNotIn(notes, self.certificates_section.last_certificate_exception.text)
# Verify that added exceptions are also synced with backend
# Revisit Page
self.certificates_section.refresh()
# wait for the certificate exception section to render
self.certificates_section.wait_for_certificate_exceptions_section()
# validate certificate exception synced with server is visible in certificate exceptions list
self.assertNotIn(self.user_name, self.certificates_section.last_certificate_exception.text)
self.assertNotIn(notes, self.certificates_section.last_certificate_exception.text)
def test_error_on_duplicate_certificate_exception(self):
"""
Scenario: On the Certificates tab of the Instructor Dashboard,
Error message appears if student being added already exists in certificate exceptions list
Given that I am on the Certificates tab on the Instructor Dashboard
When I fill in student username that already is in the list and click 'Add Exception' button
Then Error Message should say 'User (username/email={user}) already in exception list.'
"""
# Add a student to Certificate exception list
self.certificates_section.add_certificate_exception(self.user_name, '')
# Add duplicate student to Certificate exception list
self.certificates_section.add_certificate_exception(self.user_name, '')
self.assertIn(
'{user} already in exception list.'.format(user=self.user_name),
self.certificates_section.message.text
)
def test_error_on_empty_user_name(self):
"""
Scenario: On the Certificates tab of the Instructor Dashboard,
Error message appears if no username/email is entered while clicking "Add Exception" button
Given that I am on the Certificates tab on the Instructor Dashboard
When I click on 'Add Exception' button
AND student username/email field is empty
Then Error Message should say
'Student username/email field is required and can not be empty. '
'Kindly fill in username/email and then press "Add Exception" button.'
"""
# Click 'Add Exception' button without filling username/email field
self.certificates_section.wait_for_certificate_exceptions_section()
self.certificates_section.click_add_exception_button()
self.assertIn(
'Student username/email field is required and can not be empty. '
'Kindly fill in username/email and then press "Add to Exception List" button.',
self.certificates_section.message.text
)
def test_error_on_non_existing_user(self):
"""
Scenario: On the Certificates tab of the Instructor Dashboard,
Error message appears if username/email does not exists in the system while clicking "Add Exception" button
Given that I am on the Certificates tab on the Instructor Dashboard
When I click on 'Add Exception' button
AND student username/email does not exists
Then Error Message should say
'Student username/email field is required and can not be empty. '
'Kindly fill in username/email and then press "Add Exception" button.
"""
invalid_user = 'test_user_non_existent'
# Click 'Add Exception' button with invalid username/email field
self.certificates_section.wait_for_certificate_exceptions_section()
self.certificates_section.fill_user_name_field(invalid_user)
self.certificates_section.click_add_exception_button()
self.certificates_section.wait_for_ajax()
self.assertIn(
"{user} does not exist in the LMS. Please check your spelling and retry.".format(user=invalid_user),
self.certificates_section.message.text
)
def test_user_not_enrolled_error(self):
"""
Scenario: On the Certificates tab of the Instructor Dashboard,
Error message appears if user is not enrolled in the course while trying to add a new exception.
Given that I am on the Certificates tab on the Instructor Dashboard
When I click on 'Add Exception' button
AND student is not enrolled in the course
Then Error Message should say
"The user (username/email={user}) you have entered is not enrolled in this course.
Make sure the username or email address is correct, then try again."
"""
new_user = 'test_user_{uuid}'.format(uuid=self.unique_id[6:12])
new_email = 'test_user_{uuid}@example.com'.format(uuid=self.unique_id[6:12])
# Create a new user who is not enrolled in the course
AutoAuthPage(self.browser, username=new_user, email=new_email).visit()
# Login as instructor and visit Certificate Section of Instructor Dashboard
self.user_name, self.user_id = self.log_in_as_instructor()
self.instructor_dashboard_page.visit()
self.certificates_section = self.instructor_dashboard_page.select_certificates()
# Click 'Add Exception' button with invalid username/email field
self.certificates_section.wait_for_certificate_exceptions_section()
self.certificates_section.fill_user_name_field(new_user)
self.certificates_section.click_add_exception_button()
self.certificates_section.wait_for_ajax()
self.assertIn(
"{user} is not enrolled in this course. Please check your spelling and retry.".format(user=new_user),
self.certificates_section.message.text
)
def test_generate_certificate_exception(self):
"""
Scenario: On the Certificates tab of the Instructor Dashboard, when user clicks
'Generate Exception Certificates' newly added certificate exceptions should be synced on server
Given that I am on the Certificates tab on the Instructor Dashboard
When I click 'Generate Exception Certificates'
Then newly added certificate exceptions should be synced on server
"""
# Add a student to Certificate exception list
self.certificates_section.add_certificate_exception(self.user_name, '')
# Click 'Generate Exception Certificates' button
self.certificates_section.click_generate_certificate_exceptions_button()
self.certificates_section.wait_for_ajax()
self.assertIn(
self.user_name + ' has been successfully added to the exception list. Click Generate Exception Certificate'
' below to send the certificate.',
self.certificates_section.message.text
)
@ddt.data(
('Test \nNotes', 'Test Notes'),
('<Test>Notes</Test>', '<Test>Notes</Test>'),
)
@ddt.unpack
def test_notes_escaped_in_add_certificate_exception(self, notes, expected_notes):
"""
Scenario: On the Certificates tab of the Instructor Dashboard, Instructor can add new certificate
exception to list.
Given that I am on the Certificates tab on the Instructor Dashboard
When I fill in student username and notes (which contains character which are needed to be escaped)
and click 'Add Exception' button, then new certificate exception should be visible in
certificate exceptions list.
"""
# Add a student to Certificate exception list
self.certificates_section.add_certificate_exception(self.user_name, notes)
self.assertIn(self.user_name, self.certificates_section.last_certificate_exception.text)
self.assertIn(expected_notes, self.certificates_section.last_certificate_exception.text)
# Revisit Page & verify that added exceptions are also synced with backend
self.certificates_section.refresh()
# Wait for the certificate exception section to render
self.certificates_section.wait_for_certificate_exceptions_section()
# Validate certificate exception synced with server is visible in certificate exceptions list
self.assertIn(self.user_name, self.certificates_section.last_certificate_exception.text)
self.assertIn(expected_notes, self.certificates_section.last_certificate_exception.text)
@attr('a11y')
def test_certificates_a11y(self):
"""
Certificates page accessibility tests
"""
self.certificates_section.a11y_audit.config.set_scope([
'.certificates-wrapper'
])
self.certificates_section.a11y_audit.check_for_accessibility_errors()
@attr(shard=10)
class CertificateInvalidationTest(BaseInstructorDashboardTest):
"""
Tests for Certificates functionality on instructor dashboard.
"""
@classmethod
def setUpClass(cls):
super(CertificateInvalidationTest, cls).setUpClass()
# Create course fixture once each test run
CourseFixture(
org='test_org',
number='335535897951379478207964576572017930000',
run='test_run',
display_name='Test Course 335535897951379478207964576572017930000',
).install()
def setUp(self):
super(CertificateInvalidationTest, self).setUp()
# set same course number as we have in fixture json
self.course_info['number'] = "335535897951379478207964576572017930000"
# we have created a user with this id in fixture, and created a generated certificate for it.
self.student_id = "99"
self.student_name = "testcert"
self.student_email = "cert@example.com"
# Enroll above test user in the course
AutoAuthPage(
self.browser,
username=self.student_name,
email=self.student_email,
course_id=self.course_id,
).visit()
self.test_certificate_config = {
'id': 1,
'name': 'Certificate name',
'description': 'Certificate description',
'course_title': 'Course title override',
'signatories': [],
'version': 1,
'is_active': True
}
self.cert_fixture = CertificateConfigFixture(self.course_id, self.test_certificate_config)
self.cert_fixture.install()
self.user_name, self.user_id = self.log_in_as_instructor()
self.instructor_dashboard_page = self.visit_instructor_dashboard()
self.certificates_section = self.instructor_dashboard_page.select_certificates()
disable_animations(self.certificates_section)
def test_instructor_can_invalidate_certificate(self):
"""
Scenario: On the Certificates tab of the Instructor Dashboard, Instructor can add a certificate
invalidation to invalidation list.
Given that I am on the Certificates tab on the Instructor Dashboard
When I fill in student username and notes fields and click 'Add Exception' button
Then new certificate exception should be visible in certificate exceptions list
"""
notes = 'Test Notes'
# Add a student to certificate invalidation list
self.certificates_section.add_certificate_invalidation(self.student_name, notes)
self.assertIn(self.student_name, self.certificates_section.last_certificate_invalidation.text)
self.assertIn(notes, self.certificates_section.last_certificate_invalidation.text)
# Validate success message
self.assertIn(
"Certificate has been successfully invalidated for {user}.".format(user=self.student_name),
self.certificates_section.certificate_invalidation_message.text
)
# Verify that added invalidations are also synced with backend
# Revisit Page
self.certificates_section.refresh()
# wait for the certificate invalidations section to render
self.certificates_section.wait_for_certificate_invalidations_section()
# validate certificate invalidation is visible in certificate invalidation list
self.assertIn(self.student_name, self.certificates_section.last_certificate_invalidation.text)
self.assertIn(notes, self.certificates_section.last_certificate_invalidation.text)
def test_instructor_can_re_validate_certificate(self):
"""
Scenario: On the Certificates tab of the Instructor Dashboard, Instructor can re-validate certificate.
Given that I am on the certificates tab on the Instructor Dashboard
AND there is a certificate invalidation in certificate invalidation table
When I click "Remove from Invalidation Table" button
Then certificate is re-validated and removed from certificate invalidation table.
"""
notes = 'Test Notes'
# Add a student to certificate invalidation list
self.certificates_section.add_certificate_invalidation(self.student_name, notes)
self.assertIn(self.student_name, self.certificates_section.last_certificate_invalidation.text)
self.assertIn(notes, self.certificates_section.last_certificate_invalidation.text)
# Verify that added invalidations are also synced with backend
# Revisit Page
self.certificates_section.refresh()
# wait for the certificate invalidations section to render
self.certificates_section.wait_for_certificate_invalidations_section()
# click "Remove from Invalidation Table" button next to certificate invalidation
self.certificates_section.remove_first_certificate_invalidation()
# validate certificate invalidation is removed from the list
self.assertNotIn(self.student_name, self.certificates_section.last_certificate_invalidation.text)
self.assertNotIn(notes, self.certificates_section.last_certificate_invalidation.text)
self.assertIn(
"The certificate for this learner has been re-validated and the system is "
"re-running the grade for this learner.",
self.certificates_section.certificate_invalidation_message.text
)
def test_error_on_empty_user_name_or_email(self):
"""
Scenario: On the Certificates tab of the Instructor Dashboard, Instructor should see error message if he clicks
"Invalidate Certificate" button without entering student username or email.
Given that I am on the certificates tab on the Instructor Dashboard
When I click "Invalidate Certificate" button without entering student username/email.
Then I see following error message
"Student username/email field is required and can not be empty."
"Kindly fill in username/email and then press "Invalidate Certificate" button."
"""
# Click "Invalidate Certificate" with empty student username/email field
self.certificates_section.fill_certificate_invalidation_user_name_field("")
self.certificates_section.click_invalidate_certificate_button()
self.certificates_section.wait_for_ajax()
self.assertIn(
u'Student username/email field is required and can not be empty. '
u'Kindly fill in username/email and then press "Invalidate Certificate" button.',
self.certificates_section.certificate_invalidation_message.text
)
def test_error_on_invalid_user(self):
"""
Scenario: On the Certificates tab of the Instructor Dashboard, Instructor should see error message if
the student entered for certificate invalidation does not exist.
Given that I am on the certificates tab on the Instructor Dashboard
When I click "Invalidate Certificate"
AND the username entered does not exist in the system
Then I see following error message
"Student username/email field is required and can not be empty."
"Kindly fill in username/email and then press "Invalidate Certificate" button."
"""
invalid_user = "invalid_test_user"
# Click "Invalidate Certificate" with invalid student username/email
self.certificates_section.fill_certificate_invalidation_user_name_field(invalid_user)
self.certificates_section.click_invalidate_certificate_button()
self.certificates_section.wait_for_ajax()
self.assertIn(
u"{user} does not exist in the LMS. Please check your spelling and retry.".format(user=invalid_user),
self.certificates_section.certificate_invalidation_message.text
)
def test_user_not_enrolled_error(self):
"""
Scenario: On the Certificates tab of the Instructor Dashboard, Instructor should see error message if
the student entered for certificate invalidation is not enrolled in the course.
Given that I am on the certificates tab on the Instructor Dashboard
When I click "Invalidate Certificate"
AND the username entered is not enrolled in the current course
Then I see following error message
"{user} is not enrolled in this course. Please check your spelling and retry."
"""
new_user = 'test_user_{uuid}'.format(uuid=self.unique_id[6:12])
new_email = 'test_user_{uuid}@example.com'.format(uuid=self.unique_id[6:12])
# Create a new user who is not enrolled in the course
AutoAuthPage(self.browser, username=new_user, email=new_email).visit()
# Login as instructor and visit Certificate Section of Instructor Dashboard
self.user_name, self.user_id = self.log_in_as_instructor()
self.instructor_dashboard_page.visit()
self.certificates_section = self.instructor_dashboard_page.select_certificates()
# Click 'Invalidate Certificate' button with not enrolled student
self.certificates_section.wait_for_certificate_invalidations_section()
self.certificates_section.fill_certificate_invalidation_user_name_field(new_user)
self.certificates_section.click_invalidate_certificate_button()
self.certificates_section.wait_for_ajax()
self.assertIn(
u"{user} is not enrolled in this course. Please check your spelling and retry.".format(user=new_user),
self.certificates_section.certificate_invalidation_message.text
)
@attr('a11y')
def test_invalidate_certificates_a11y(self):
"""
Certificate invalidation accessibility tests
"""
self.certificates_section.a11y_audit.config.set_scope([
'.certificates-wrapper'
])
self.certificates_section.a11y_audit.check_for_accessibility_errors()
|
caesar2164/edx-platform
|
common/test/acceptance/tests/lms/test_lms_instructor_dashboard.py
|
Python
|
agpl-3.0
| 56,702
|
[
"VisIt"
] |
d3d382b29d0bbc8cf9d5f6553742d65950f289c9265861d968e3689dde78e4d9
|
import numpy as np # numpy for numerical code (arrays, etc.)
from read_in_ascii import read_in_ascii #from filename import function
from make_ERDASimg import generate_ERDASimg_grid
#Degree day base for boreal forest photosynthesis 5.0 degrees C
ddbase = 5.0
#######################################################################
def GrowingDegreeDays_calc(ddbase, monthly_temperature_avgs_lst, monthly_temperature_stds_lst,
monthly_temperature_mins_lst, monthly_temperature_maxs_lst, lapse_rate_adj_mat):
"""
Simulate monthly temperature based off of driver monthly means and standard deviations;
add elev/lapse rate adjustment value from GIS to simulated monthly temp for each square in grid;
subtract off ddbase;
multiply positive temps by days in month and sum up for total growing degrees in the year.
Compute growing season length.
Parameters : ddbase -- degree day base
monthly_temperature_avgs_lst -- 12 monthly averages for temperature each month
monthly_temperature_stds_lst -- 12 monthly standard deviation for how each
monthly average deviates from year to year
monthly_temperature_mins_lst -- 12 min temperature values; 1 for each month from 50+ yrs of
daily WMO record of temps in the region
monthly_temperature_maxs_lst -- 12 max temperature values; 1 for each month from 50+ yrs of
daily WMO record of temps in the region
adj_val_mat -- matrix of adjustment values to correct temps based on elev/lapse rate
Returns : GDD_mat -- a numpy matrix of growing degrees accumulated over the entire year for each plot
monthly_temp_lst -- a list of 12 matrices of monthly temperatures for each plot
"""
#### CONSTANTS
DAYS_IN_MONTH_lst = [31.,28.,31.,30.,31.,30.,31.,31.,30.,31.,30.,31.] # days in each month
monthly_Tmaxs_vec = np.array(monthly_temperature_maxs_lst)
####
def generate_daily_temperatures(month_avg, month_std, num_days):
# generate X numbers for a gaussian distribution
return month_std * np.random.randn(num_days) + month_avg
def generate_month_temperature(month_avg, month_std, minT, maxT, month_number):
ndays = DAYS_IN_MONTH_lst[month_number]
daily_temp_vec = generate_daily_temperatures(month_avg, month_std, ndays)
# if any of the daily values are outside of the range of allowed values, pick new numbers
# this removes the long tail from the right and left side of the distribution
while np.any( daily_temp_vec > maxT ) or np.any( daily_temp_vec < minT ):
new_temps_vec = generate_daily_temperatures(month_avg, month_std, ndays)
daily_temp_vec[daily_temp_vec > maxT] = new_temps_vec[daily_temp_vec > maxT]
daily_temp_vec[daily_temp_vec < minT] = new_temps_vec[daily_temp_vec < minT]
return daily_temp_vec
# generate a temperature value for each day of the year, but return them in a matrix (MONTH, DAY)
# Note : some of the returned values will be nan due to different number of days in each month
def generate_temperatures_matrix(monthly_temperature_avgs_lst, monthly_temperature_stds_lst,
monthly_minT_vec, monthly_maxT_vec):
NMONTH = 12
NDAY = 31
# start with all nans and then we will fill in data as we go
daily_temperature_mat = np.zeros((NMONTH, NDAY)) + np.nan
month_index = 0
for month_avg, month_std, month_minT, month_maxT in zip(monthly_temperature_avgs_lst, monthly_temperature_stds_lst,
monthly_temperature_mins_lst, monthly_temperature_maxs_lst):
daily_temp_vec = generate_month_temperature(month_avg, month_std, month_minT, month_maxT, month_index)
ndays = DAYS_IN_MONTH_lst[month_index]
daily_temperature_mat[month_index,0:ndays] = daily_temp_vec
month_index += 1
return daily_temperature_mat
# 0) start with the daily temperature values (daily weather for all of geographical grid)
daily_temperature_mat = generate_temperatures_matrix(monthly_temperature_avgs_lst, monthly_temperature_stds_lst,
monthly_temperature_mins_lst, monthly_temperature_maxs_lst)
## start geographic specific
# for each geographic grid location and each day subtract DDBASE and add the lapse rate adjustment
# 1) add temp to adj_val for each square in geographic grid;
# 2) subtract DDBASE (5.5C) for each square in geographic grid;
# 3) sum growing degree days for the year
nx, ny = lapse_rate_adj_mat.shape
GDD_mat = np.zeros((nx, ny))
total_growing_season_mat = np.zeros((nx, ny))
for x in range(nx):
for y in range(ny):
lapse_rate_adj = lapse_rate_adj_mat[x,y]
growing_degree_mat = daily_temperature_mat - ddbase + lapse_rate_adj
# every day that is below 0 set to 0
growing_degree_mat[np.less(growing_degree_mat, 0)] = 0.
# compute the growing degree days for the year
growing_degree_days = np.nansum(growing_degree_mat)
# compute the growing season length as the number of days that have a temperature above DDBASE
growing_season_ndays = np.sum( np.greater(growing_degree_mat, 0) )
# store the growing degree days value for this geographic grid point
GDD_mat[x,y] = growing_degree_days
total_growing_season_mat[x,y] = growing_season_ndays
# build up a list by month where each value in the list is a 2D matrix that hold the adjusted month temperature for
# each point on the geographic grid
monthly_temp_mat_lst = []
for month in range(12):
this_month_avg_temperature = np.nanmean( daily_temperature_mat[month] )
monthly_temp_mat_lst.append( this_month_avg_temperature + lapse_rate_adj_mat)
# # 1) add temp to adj_val for each square in grid; 2) subtract DDBASE (5.5C); 3) multiply by days in month
# # 4) sum growing degree days for the year
# monthly_temp_mat_lst = []
# GDD_mat = np.zeros(adj_val_mat.shape)
# growing_season_mat = np.zeros(adj_val_mat.shape)
# for this_months_temp, days_this_month in zip(month_simtemp_vec, DAYS_IN_MONTH_lst): #this is i,j in zip(a,b)
# lapse_rate_adj_mat = this_months_temp + adj_val_mat - ddbase
# lapse_rate_adj_mat[lapse_rate_adj_mat<0] = 0 #sets all negatives to zero, now can sum everything
# boolean_growing_season_mat = np.where(lapse_rate_adj_mat<=0,0,1) #assigns 0 to every plot where T<ddbase, 1 to every plot where T>ddbase
# growing_season_this_month_mat = boolean_growing_season_mat * days_this_month
# growing_season_mat = growing_season_mat + growing_season_this_month_mat #tallies growing days this year for each plot
# monthly_temp_mat_lst.append(this_months_temp + adj_val_mat)
# GDD_mat = GDD_mat + lapse_rate_adj_mat * days_this_month #don't use += to increment with numpy!!!
return GDD_mat, monthly_temp_mat_lst, total_growing_season_mat
'''
def GrowingDegreeDays_calc(ddbase, monthly_temperature_avgs_lst, monthly_temperature_stds_lst, monthly_temperature_maxs_lst, adj_val_mat, month_simtemp_vec=None):
"""
Simulate monthly temperature based off of driver monthly means and standard deviations;
add elev/lapse rate adjustment value from GIS to simulated monthly temp for each square in grid;
subtract off ddbase;
multiply positive temps by days in month and sum up for total growing degrees in the year.
Compute growing season length.
Parameters : ddbase -- degree day base
monthly_temperature_avgs_lst -- 12 monthly averages for temperature each month
monthly_temperature_stds_lst -- 12 monthly standard deviation for how each
monthly average deviates from year to year
monthly_temperature_maxs_lst -- 12 daily max temperature values from 50+ yrs of
WMO record of temps in the region
adj_val_mat -- matrix of adjustment values to correct temps based on elev/lapse rate
month_simtemp_vec -- optional parameter, if not passed in then generate random temps,
if pass in, this is from an hdf file record
Returns : GDD_mat -- a numpy matrix of growing degrees accumulated over the entire year for each plot
monthly_temp_lst -- a list of 12 matrices of monthly temperatures for each plot
"""
#### CONSTANTS
DAYS_IN_MONTH_lst = [31.,28.,31.,30.,31.,30.,31.,31.,30.,31.,30.,31.] # days in each month
monthly_Tmaxs_vec = np.array(monthly_temperature_maxs_lst)
####
def generate_temperature(monthly_temperature_avgs_lst, monthly_temperature_stds_lst):
# simulate temps for each month using staticstics from driver and numpy math
normal_randn_vec = np.random.randn(12) # get 12 random numbers with zero mean and std=1
monthly_avgs_vec = np.array(monthly_temperature_avgs_lst)
monthly_stds_vec = np.array(monthly_temperature_stds_lst)
month_simtemp_vec = monthly_avgs_vec + monthly_stds_vec * normal_randn_vec
return month_simtemp_vec
if month_simtemp_vec == None: #generate 12 months of random temperature (weather)
month_simtemp_vec = generate_temperature(monthly_temperature_avgs_lst, monthly_temperature_stds_lst)
while np.any(month_simtemp_vec > monthly_Tmaxs_vec): #if any of 12 values return true, generate 12 new T values
month_simtemp_vec = generate_temperature(monthly_temperature_avgs_lst, monthly_temperature_stds_lst)
#while loop will continue until 12 monthly temp values are similated such that all monthlies are less
#than or equal to the allowed Tmax for that month.
# 1) add temp to adj_val for each square in grid; 2) subtract 5.5C; 3) multiply by days in month
# 4) sum growing degree days for the year
monthly_temp_mat_lst = []
GDD_mat = np.zeros(adj_val_mat.shape)
growing_season_mat = np.zeros(adj_val_mat.shape)
for this_months_temp, days_this_month in zip(month_simtemp_vec, DAYS_IN_MONTH_lst): #this is i,j in zip(a,b)
lapse_rate_adj_mat = this_months_temp + adj_val_mat - ddbase
lapse_rate_adj_mat[lapse_rate_adj_mat<0] = 0 #sets all negatives to zero, now can sum everything
boolean_growing_season_mat = np.where(lapse_rate_adj_mat<=0,0,1) #assigns 0 to every plot where T<ddbase, 1 to every plot where T>ddbase
growing_season_this_month_mat = boolean_growing_season_mat * days_this_month
growing_season_mat = growing_season_mat + growing_season_this_month_mat #tallies growing days this year for each plot
monthly_temp_mat_lst.append(this_months_temp + adj_val_mat)
GDD_mat = GDD_mat + lapse_rate_adj_mat * days_this_month #don't use += to increment with numpy!!!
return month_simtemp_vec, GDD_mat, monthly_temp_mat_lst, growing_season_mat
'''
#########################################################################################
def one_time_radiation_readin(monthly_radiation_files_path,expected_nx,expected_ny):
"""
Activated in year 1 of sim to read-in the radiation files computed in GIS for the simulated terrain;
generates a list of matrices to be called during PET and soil moisture and light calculations.
Parameters: monthly_radiation_files_path -- folder location for the 12 monthly radiation matrices
expected_nx, expected_ny -- define the DEM matrix (obtained from DEM.shape)
Returns: radiation_rasters_lst = a list of 12 matrices containing accumulated radiation for each month on each plot
"""
radiation_rasters_lst = []
for i in range(12):
filename = monthly_radiation_files_path+'/monthlyradiation%d.txt' % (i+1) #rad1.txt corresponds to january.... rad12.txt corresponds to december's radiation
months_rad_mat = read_in_ascii(filename)
if months_rad_mat.shape != (expected_nx,expected_ny):
raise Exception("Monthly radiation file wrong shape: %s" % filename)
radiation_rasters_lst.append(months_rad_mat)
return radiation_rasters_lst
#loop through calling PET function on 12 radiation matricies to get PET for each month
#compute soil moisture for each month
#compute the dry days in growing season for the Dry Day Factor constraint on growth
def PET(monthly_temp_mat, rad_raster_mat):
"""
Using a modified Priestly-Taylor equation as described in Campbell (1977, p140)
The temperature- and radiation-based PET calculations is recommended for boreal regions by Fisher et al., (2011)
Simulate monthly PET based on monthly temperature and GIS-computed monthly accumulated radiation (in WH/m2);
PET is assumed to occur anytime air temperatures are >0C, because conifers can begin transpiration whenever air temp>0C;
The units work out as follows:
GIS returns a raster in WH/m2
total energy = WH/m2 * 3600s/hr = J/m2
lambda = latent heat of vaporization = 2430 J/g
a = 0.025 1/deg C
b = 3 deg C
PET calculation = a*(avg_monthly_temperature + b)/lambda = g/m2s
convert to cm/month via = (PET in g/m2s) * (1m3/1000000g) * (100cm/1m)
Parameters : monthly_temp_PET_lst -- 12 monthly averages for temperature each month
Returns : ddays = dry day index for each plot, which is a fraction of drought days within growing season
"""
total_energy = rad_raster_mat * 3600
monthly_temp_mat[monthly_temp_mat<=0] = -3 #this will result in PET=0 for temps <=0C, at which PET should not be occuring
PET_mat = (0.025 * (monthly_temp_mat + 3.0) * (total_energy))/(2430.0*10000.0)
print "PET mat:", PET_mat[0,0]
return PET_mat
##############################################################################
def rain_sim(rainfall_monthly_avgs, rainfall_monthly_stds):
"""
Initialize the rain simulator object.
Parameters: rainfall_monthly_avgs -- 12 monthly averages for rainfall
rainfall_monthly_stds -- 12 monthly standard deviation for how each
monthly average deviates from year to year
Returns : rainfall_vec -- a list of 12 simulated monthly rainfall values for this year of sim
"""
# store the monthly rainfall statistics
mean_rain_by_month_vec = np.array(rainfall_monthly_avgs)
std_rain_by_month_vec = np.array(rainfall_monthly_stds)
normal_randn_vec = np.random.randn(12) # get 12 random numbers with zero mean and std=1
monthly_sim_rain_vec = mean_rain_by_month_vec + std_rain_by_month_vec * normal_randn_vec
monthly_sim_rain_vec[monthly_sim_rain_vec<0] = 0
# precipitation is always underestimated due to loss due to winds. According to Bonan, increase the observed rain by 10% (for Alaska boreal)
# (maybe more for Siberia?) to better represent simulated rain:
monthly_sim_rain_vec = monthly_sim_rain_vec * 1.1
print "annual rain: ", np.sum(monthly_sim_rain_vec)
return monthly_sim_rain_vec
def soil_moisture(monthly_sim_rain, PET_mat, last_months_soil_water_mat, field_capacity, wilting_point):
"""
Computes soil moisture based on equation: old_water + rain - PET = current soil moisture
old water is soil moisture from pervious month
Parameters: monthly_rain -- simulated precip amount in cm for this month
PET -- computed in the PET function, this is the list of matrices of PET computed for each plot for each month
last_months_soil_water -- this is input for the previous month and output for this month
Returns: last_months_soil_water -- soil moisture computed for this month
"""
soil_moisture_mat = (last_months_soil_water_mat + monthly_sim_rain) - PET_mat
runoff_mat = soil_moisture_mat - field_capacity #computing runoff for now, but not using it outside the loop
runoff_mat[runoff_mat<0] = 0 #sets negative runoff values to zero
soil_moisture_mat[soil_moisture_mat>field_capacity] = field_capacity #if more soil moist than field capacity, make that excess run off and set the soil moisture to field capacity (saturated soil)
soil_moisture_mat[soil_moisture_mat<(wilting_point - 5.)] = wilting_point - 5. #so soil water can still recharge over the winter, otherwise becomes very negative
# print "soil water: ", soil_moisture_mat
return soil_moisture_mat
def drydays(total_growing_season_mat, soil_moisture_mat, wilting_point, monthly_temp_mat_lst, radiation_mat_lst, field_capacity, monthly_sim_rain_vec, ddbase):
"""
Take growing season length and soil moisture=f(FC,rain,lastmonthssoilmoist,PET{T&radiation})
and compute the fraction of dry days within the growing season
Parameters: total_growing_season_mat -- sum of days within the growing season this year
soil_moisture_mat -- last month's soil moisture to initate a whole year of soil moisture computations
wilting point -- specified in the driver, however, allow to dry out below wilting point, unlike ZELIG v1.0
monthly_temp_mat_lst -- list of matrices of monthly average temperatures for each plot for each month
radiation_mat_lst -- list of matrices of monthly cumulative incident radiation for each plot
field_capacity -- specified in driver, soil moisture in excess of this is considered runoff
monthly_sim_rain_vec -- list of monthly precip (each plot in simulated area is considered to receive same amount of precip, due to how small the
simulated area is)
Returns: soil_moisture_mat = december's soil moisture from this year to be used to initiate the soil moisture computation next year
drydays_fraction_mat = = a fraction of growing season spent in drought this year (0 to 1)
"""
days_this_month_lst = [31.,28.,31.,30.,31.,30.,31.,31.,30.,31.,30.,31.] # days in each month
dry_days_accumulator_mat = np.zeros(total_growing_season_mat.shape)
for month in range(12):
PET_mat = PET(monthly_temp_mat = monthly_temp_mat_lst[month], rad_raster_mat = radiation_mat_lst[month])
# since actual evapotranspiration (AET) is about 70% of PET, scale down the computed PET:
PET_mat = 0.7 * PET_mat
soil_moisture_mat = soil_moisture(monthly_sim_rain = monthly_sim_rain_vec[month], PET_mat = PET_mat,
last_months_soil_water_mat = soil_moisture_mat,
field_capacity = field_capacity, wilting_point = wilting_point)
boolean_drought_mat = np.where(((soil_moisture_mat<=wilting_point) & (monthly_temp_mat_lst[month]>=ddbase)),1,0) #assigns 0 to every plot where there is less soil water than wilting point and within growing season
drought_this_month_mat = boolean_drought_mat * days_this_month_lst[month] # number of days in dought
dry_days_accumulator_mat = dry_days_accumulator_mat + drought_this_month_mat #tallies drought days this year for each plot
# print "PET monthly sum for all plots: ", PET_mat.sum(), "monthly rain summed up over all plots: ", monthly_sim_rain_vec[month]*900.
drydays_fraction_mat = dry_days_accumulator_mat/total_growing_season_mat
# print "rain: ", monthly_sim_rain_vec
# print "dry days fraction: ", drydays_fraction_mat
return soil_moisture_mat, drydays_fraction_mat
######################################################################################################
#if __name__ == '__main__':
def compute_GDD():
from load_driver import load_driver_json
driver_file = 'driver_boreal.json' #for testing, comparing against ZELIG v1.0 from Urban 1990
# load the species specific parameters from the driver file into a dictionary called driver
driver = load_driver_json(driver_file)
# define the range of years to simulate over
start = 0; stop = driver["NYRS"]-1
nplots = driver["NPLOTS"]
GDD_matrix, monthly_temp_mat_lst, total_growing_season_mat = GrowingDegreeDays_calc(ddbase = 5.5, monthly_temperature_avgs_lst = driver["XT"],
monthly_temperature_stds_lst = driver["VT"],
lapse_rate_adj_mat = read_in_ascii('elev_adj_factor.txt'))
generate_ERDASimg_grid(metadata_file = 'elev_adj_factor.txt', matrix_file = 'GDD_grid.img',
numpy_raster = GDD_matrix)
radiation_mat_lst = one_time_radiation_readin()
monthly_sim_rain_vec = rain_sim(rainfall_monthly_avgs = driver['XR'], rainfall_monthly_stds = driver['VR'])
initial_soil_water_mat = np.zeros(GDD_matrix.shape)
initial_soil_water_mat = driver['FC'] #start sim with FC as soil water content
soil_moisture_mat, drydays_fraction_mat = drydays(total_growing_season_mat = total_growing_season_mat,soil_moisture_mat = initial_soil_water_mat,
wilting_point = driver['WP'], monthly_temp_mat_lst = monthly_temp_mat_lst,
radiation_mat_lst = radiation_mat_lst, field_capacity = driver['FC'],
monthly_sim_rain_vec = monthly_sim_rain_vec, ddbase = 5.56) #specify ddbase in driver?
generate_ERDASimg_grid(metadata_file = 'elev_adj_factor.txt', matrix_file = 'DryDays_grid.img',
numpy_raster = drydays_fraction_mat)
|
SIBBORK/SIBBORK
|
source/weather.py
|
Python
|
gpl-2.0
| 22,080
|
[
"Gaussian"
] |
923cd8be7b8c176dfbe63aba84a3f79a206995eaab03e05e6d82492dbc472db8
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multivariate Normal distribution classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops import kullback_leibler
from tensorflow.contrib.distributions.python.ops import operator_pd_cholesky
from tensorflow.contrib.distributions.python.ops import operator_pd_diag
from tensorflow.contrib.distributions.python.ops import operator_pd_full
from tensorflow.contrib.distributions.python.ops import operator_pd_vdvt_update
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
__all__ = [
"MultivariateNormalDiag",
"MultivariateNormalDiagWithSoftplusStDev",
"MultivariateNormalCholesky",
"MultivariateNormalFull",
"MultivariateNormalDiagPlusVDVT",
]
_mvn_prob_note = """
`x` is a batch vector with compatible shape if `x` is a `Tensor` whose
shape can be broadcast up to either:
```
self.batch_shape + self.event_shape
```
or
```
[M1,...,Mm] + self.batch_shape + self.event_shape
```
"""
class _MultivariateNormalOperatorPD(distribution.Distribution):
"""The multivariate normal distribution on `R^k`.
This distribution is defined by a 1-D mean `mu` and an instance of
`OperatorPDBase`, which provides access to a symmetric positive definite
operator, which defines the covariance.
#### Mathematical details
With `C` the covariance matrix represented by the operator, the PDF of this
distribution is:
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and a covariance matrix of shape `k x k`.
Extra leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian.
mu = [1, 2, 3]
chol = [[1, 0, 0.], [1, 3, 0], [1, 2, 3]]
cov = tf.contrib.distributions.OperatorPDCholesky(chol)
dist = tf.contrib.distributions._MultivariateNormalOperatorPD(mu, cov)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1.])
# Initialize a batch of two 3-variate Gaussians.
mu = [[1, 2, 3], [11, 22, 33.]]
chol = ... # shape 2 x 3 x 3, lower triangular, positive diagonal.
cov = tf.contrib.distributions.OperatorPDCholesky(chol)
dist = tf.contrib.distributions._MultivariateNormalOperatorPD(mu, cov)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11.]] # Shape 2 x 3.
dist.pdf(x)
```
"""
def __init__(self,
mu,
cov,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalCov"):
"""Multivariate Normal distributions on `R^k`.
User must provide means `mu`, and an instance of `OperatorPDBase`, `cov`,
which determines the covariance.
Args:
mu: Floating point tensor with shape `[N1,...,Nb, k]`, `b >= 0`.
cov: Instance of `OperatorPDBase` with same `dtype` as `mu` and shape
`[N1,...,Nb, k, k]`.
validate_args: `Boolean`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: If `mu` and `cov` are different dtypes.
"""
parameters = locals()
parameters.pop("self")
with ops.name_scope(name) as ns:
with ops.name_scope("init", values=[mu] + cov.inputs):
self._mu = array_ops.identity(mu, name="mu")
self._cov = cov
self._validate_args = validate_args # Needed by _assert_valid_mu.
self._mu = self._assert_valid_mu(self._mu)
super(_MultivariateNormalOperatorPD, self).__init__(
dtype=self._mu.dtype,
is_reparameterized=True,
is_continuous=True,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._mu] + cov.inputs,
name=ns)
def _assert_valid_mu(self, mu):
"""Return `mu` after validity checks and possibly with assertations."""
cov = self._cov
if mu.dtype != cov.dtype:
raise TypeError(
"mu and cov must have the same dtype. Found mu.dtype = %s, "
"cov.dtype = %s" % (mu.dtype, cov.dtype))
# Try to validate with static checks.
mu_shape = mu.get_shape()
cov_shape = cov.get_shape()
if mu_shape.is_fully_defined() and cov_shape.is_fully_defined():
if mu_shape != cov_shape[:-1]:
raise ValueError(
"mu.shape and cov.shape[:-1] should match. Found: mu.shape=%s, "
"cov.shape=%s" % (mu_shape, cov_shape))
else:
return mu
# Static checks could not be run, so possibly do dynamic checks.
if not self.validate_args:
return mu
else:
assert_same_rank = check_ops.assert_equal(
array_ops.rank(mu) + 1,
cov.rank(),
data=["mu should have rank 1 less than cov. Found: rank(mu) = ",
array_ops.rank(mu), " rank(cov) = ", cov.rank()],
)
with ops.control_dependencies([assert_same_rank]):
assert_same_shape = check_ops.assert_equal(
array_ops.shape(mu),
cov.vector_shape(),
data=["mu.shape and cov.shape[:-1] should match. "
"Found: shape(mu) = "
, array_ops.shape(mu), " shape(cov) = ", cov.shape()],
)
return control_flow_ops.with_dependencies([assert_same_shape], mu)
@property
def mu(self):
return self._mu
@property
def sigma(self):
"""Dense (batch) covariance matrix, if available."""
with ops.name_scope(self.name):
return self._cov.to_dense()
def log_sigma_det(self, name="log_sigma_det"):
"""Log of determinant of covariance matrix."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=self._cov.inputs):
return self._cov.log_det()
def sigma_det(self, name="sigma_det"):
"""Determinant of covariance matrix."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=self._cov.inputs):
return math_ops.exp(self._cov.log_det())
def _batch_shape(self):
return self._cov.batch_shape()
def _get_batch_shape(self):
return self._cov.get_batch_shape()
def _event_shape(self):
return array_ops.pack([self._cov.vector_space_dimension()])
def _get_event_shape(self):
return self._cov.get_shape()[-1:]
def _sample_n(self, n, seed=None):
# Recall _assert_valid_mu ensures mu and self._cov have same batch shape.
shape = array_ops.concat(0, [self._cov.vector_shape(), [n]])
white_samples = random_ops.random_normal(shape=shape,
mean=0.,
stddev=1.,
dtype=self.dtype,
seed=seed)
correlated_samples = self._cov.sqrt_matmul(white_samples)
# Move the last dimension to the front
perm = array_ops.concat(0, (
array_ops.pack([array_ops.rank(correlated_samples) - 1]),
math_ops.range(0, array_ops.rank(correlated_samples) - 1)))
# TODO(ebrevdo): Once we get a proper tensor contraction op,
# perform the inner product using that instead of batch_matmul
# and this slow transpose can go away!
correlated_samples = array_ops.transpose(correlated_samples, perm)
samples = correlated_samples + self.mu
return samples
@distribution_util.AppendDocstring(_mvn_prob_note)
def _log_prob(self, x):
# Q: Why are shape requirements as stated above?
# A: The compatible shapes are precisely the ones that will broadcast to
# a shape compatible with self._cov.
# See Operator base class for notes about shapes compatible with self._cov.
x = ops.convert_to_tensor(x)
contrib_tensor_util.assert_same_float_dtype((self._mu, x))
# _assert_valid_mu asserts that self.mu has same batch shape as self.cov.
# so batch shape of self.mu = that of self._cov and self, and the
# batch shape of x_centered is a broadcast version of these. If this
# broadcast results in a shape like
# [M1,...,Mm] + self.batch_shape + self.event_shape
# OR
# self.batch_shape + self.event_shape
# then subsequent operator calls are guaranteed to work.
x_centered = x - self.mu
# Compute the term x^{-1} sigma^{-1} x which appears in the exponent of
# the pdf.
x_whitened_norm = self._cov.inv_quadratic_form_on_vectors(x_centered)
k = math_ops.cast(self._cov.vector_space_dimension(), self.dtype)
log_prob_value = -0.5 * (self.log_sigma_det() +
k * math.log(2. * math.pi) +
x_whitened_norm)
output_static_shape = x_centered.get_shape()[:-1]
log_prob_value.set_shape(output_static_shape)
return log_prob_value
@distribution_util.AppendDocstring(_mvn_prob_note)
def _prob(self, x):
return math_ops.exp(self.log_prob(x))
def _entropy(self):
log_sigma_det = self.log_sigma_det()
one_plus_log_two_pi = constant_op.constant(1 + math.log(2 * math.pi),
dtype=self.dtype)
# Use broadcasting rules to calculate the full broadcast sigma.
k = math_ops.cast(self._cov.vector_space_dimension(), dtype=self.dtype)
entropy_value = (k * one_plus_log_two_pi + log_sigma_det) / 2
entropy_value.set_shape(log_sigma_det.get_shape())
return entropy_value
def _mean(self):
return array_ops.identity(self._mu)
def _variance(self):
return self.sigma
def _mode(self):
return array_ops.identity(self._mu)
class MultivariateNormalDiag(_MultivariateNormalOperatorPD):
"""The multivariate normal distribution on `R^k`.
This distribution is defined by a 1-D mean `mu` and a 1-D diagonal
`diag_stdev`, representing the standard deviations. This distribution
assumes the random variables, `(X_1,...,X_k)` are independent, thus no
non-diagonal terms of the covariance matrix are needed.
This allows for `O(k)` pdf evaluation, sampling, and storage.
#### Mathematical details
The PDF of this distribution is defined in terms of the diagonal covariance
determined by `diag_stdev`: `C_{ii} = diag_stdev[i]**2`.
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and the square roots of the (independent) random variables.
Extra leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian with diagonal standard deviation.
mu = [1, 2, 3.]
diag_stdev = [4, 5, 6.]
dist = tf.contrib.distributions.MultivariateNormalDiag(mu, diag_stdev)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1])
# Initialize a batch of two 3-variate Gaussians.
mu = [[1, 2, 3], [11, 22, 33]] # shape 2 x 3
diag_stdev = ... # shape 2 x 3, positive.
dist = tf.contrib.distributions.MultivariateNormalDiag(mu, diag_stdev)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11]] # Shape 2 x 3.
dist.pdf(x)
```
"""
def __init__(
self,
mu,
diag_stdev,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalDiag"):
"""Multivariate Normal distributions on `R^k`.
User must provide means `mu` and standard deviations `diag_stdev`.
Each batch member represents a random vector `(X_1,...,X_k)` of independent
random normals.
The mean of `X_i` is `mu[i]`, and the standard deviation is `diag_stdev[i]`.
Args:
mu: Rank `N + 1` floating point tensor with shape `[N1,...,Nb, k]`,
`b >= 0`.
diag_stdev: Rank `N + 1` `Tensor` with same `dtype` and shape as `mu`,
representing the standard deviations. Must be positive.
validate_args: `Boolean`, default `False`. Whether to validate
input with asserts. If `validate_args` is `False`,
and the inputs are invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: If `mu` and `diag_stdev` are different dtypes.
"""
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[diag_stdev]) as ns:
cov = operator_pd_diag.OperatorPDSqrtDiag(diag_stdev,
verify_pd=validate_args)
super(MultivariateNormalDiag, self).__init__(
mu, cov,
allow_nan_stats=allow_nan_stats,
validate_args=validate_args,
name=ns)
self._parameters = parameters
class MultivariateNormalDiagWithSoftplusStDev(MultivariateNormalDiag):
"""MultivariateNormalDiag with `diag_stddev = softplus(diag_stddev)`."""
def __init__(self,
mu,
diag_stdev,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalDiagWithSoftplusStdDev"):
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[diag_stdev]) as ns:
super(MultivariateNormalDiagWithSoftplusStDev, self).__init__(
mu=mu,
diag_stdev=nn.softplus(diag_stdev),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=ns)
self._parameters = parameters
class MultivariateNormalDiagPlusVDVT(_MultivariateNormalOperatorPD):
"""The multivariate normal distribution on `R^k`.
Every batch member of this distribution is defined by a mean and a lightweight
covariance matrix `C`.
#### Mathematical details
The PDF of this distribution in terms of the mean `mu` and covariance `C` is:
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
For every batch member, this distribution represents `k` random variables
`(X_1,...,X_k)`, with mean `E[X_i] = mu[i]`, and covariance matrix
`C_{ij} := E[(X_i - mu[i])(X_j - mu[j])]`
The user initializes this class by providing the mean `mu`, and a lightweight
definition of `C`:
```
C = SS^T = SS = (M + V D V^T) (M + V D V^T)
M is diagonal (k x k)
V = is shape (k x r), typically r << k
D = is diagonal (r x r), optional (defaults to identity).
```
This allows for `O(kr + r^3)` pdf evaluation and determinant, and `O(kr)`
sampling and storage (per batch member).
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and square root of the covariance `S = M + V D V^T`. Extra
leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian with covariance square root
# S = M + V D V^T, where V D V^T is a matrix-rank 2 update.
mu = [1, 2, 3.]
diag_large = [1.1, 2.2, 3.3]
v = ... # shape 3 x 2
diag_small = [4., 5.]
dist = tf.contrib.distributions.MultivariateNormalDiagPlusVDVT(
mu, diag_large, v, diag_small=diag_small)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1])
# Initialize a batch of two 3-variate Gaussians. This time, don't provide
# diag_small. This means S = M + V V^T.
mu = [[1, 2, 3], [11, 22, 33]] # shape 2 x 3
diag_large = ... # shape 2 x 3
v = ... # shape 2 x 3 x 1, a matrix-rank 1 update.
dist = tf.contrib.distributions.MultivariateNormalDiagPlusVDVT(
mu, diag_large, v)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11]] # Shape 2 x 3.
dist.pdf(x)
```
"""
def __init__(
self,
mu,
diag_large,
v,
diag_small=None,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalDiagPlusVDVT"):
"""Multivariate Normal distributions on `R^k`.
For every batch member, this distribution represents `k` random variables
`(X_1,...,X_k)`, with mean `E[X_i] = mu[i]`, and covariance matrix
`C_{ij} := E[(X_i - mu[i])(X_j - mu[j])]`
The user initializes this class by providing the mean `mu`, and a
lightweight definition of `C`:
```
C = SS^T = SS = (M + V D V^T) (M + V D V^T)
M is diagonal (k x k)
V = is shape (k x r), typically r << k
D = is diagonal (r x r), optional (defaults to identity).
```
Args:
mu: Rank `n + 1` floating point tensor with shape `[N1,...,Nn, k]`,
`n >= 0`. The means.
diag_large: Optional rank `n + 1` floating point tensor, shape
`[N1,...,Nn, k]` `n >= 0`. Defines the diagonal matrix `M`.
v: Rank `n + 1` floating point tensor, shape `[N1,...,Nn, k, r]`
`n >= 0`. Defines the matrix `V`.
diag_small: Rank `n + 1` floating point tensor, shape
`[N1,...,Nn, k]` `n >= 0`. Defines the diagonal matrix `D`. Default
is `None`, which means `D` will be the identity matrix.
validate_args: `Boolean`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`,
and the inputs are invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
"""
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[diag_large, v, diag_small]) as ns:
cov = operator_pd_vdvt_update.OperatorPDSqrtVDVTUpdate(
operator_pd_diag.OperatorPDDiag(
diag_large, verify_pd=validate_args),
v,
diag=diag_small,
verify_pd=validate_args,
verify_shapes=validate_args)
super(MultivariateNormalDiagPlusVDVT, self).__init__(
mu, cov,
allow_nan_stats=allow_nan_stats,
validate_args=validate_args,
name=ns)
self._parameters = parameters
class MultivariateNormalCholesky(_MultivariateNormalOperatorPD):
"""The multivariate normal distribution on `R^k`.
This distribution is defined by a 1-D mean `mu` and a Cholesky factor `chol`.
Providing the Cholesky factor allows for `O(k^2)` pdf evaluation and sampling,
and requires `O(k^2)` storage.
#### Mathematical details
The Cholesky factor `chol` defines the covariance matrix: `C = chol chol^T`.
The PDF of this distribution is then:
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and a covariance matrix of shape `k x k`.
Extra leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian with diagonal covariance.
# Note, this would be more efficient with MultivariateNormalDiag.
mu = [1, 2, 3.]
chol = [[1, 0, 0], [0, 3, 0], [0, 0, 2]]
dist = tf.contrib.distributions.MultivariateNormalCholesky(mu, chol)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1])
# Initialize a batch of two 3-variate Gaussians.
mu = [[1, 2, 3], [11, 22, 33]]
chol = ... # shape 2 x 3 x 3, lower triangular, positive diagonal.
dist = tf.contrib.distributions.MultivariateNormalCholesky(mu, chol)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11]] # Shape 2 x 3.
dist.pdf(x)
```
Trainable (batch) Cholesky matrices can be created with
`tf.contrib.distributions.matrix_diag_transform()`
"""
def __init__(self,
mu,
chol,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalCholesky"):
"""Multivariate Normal distributions on `R^k`.
User must provide means `mu` and `chol` which holds the (batch) Cholesky
factors, such that the covariance of each batch member is `chol chol^T`.
Args:
mu: `(N+1)-D` floating point tensor with shape `[N1,...,Nb, k]`,
`b >= 0`.
chol: `(N+2)-D` `Tensor` with same `dtype` as `mu` and shape
`[N1,...,Nb, k, k]`. The upper triangular part is ignored (treated as
though it is zero), and the diagonal must be positive.
validate_args: `Boolean`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: If `mu` and `chol` are different dtypes.
"""
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[chol]) as ns:
cov = operator_pd_cholesky.OperatorPDCholesky(chol,
verify_pd=validate_args)
super(MultivariateNormalCholesky, self).__init__(
mu, cov,
allow_nan_stats=allow_nan_stats,
validate_args=validate_args,
name=ns)
self._parameters = parameters
class MultivariateNormalFull(_MultivariateNormalOperatorPD):
"""The multivariate normal distribution on `R^k`.
This distribution is defined by a 1-D mean `mu` and covariance matrix `sigma`.
Evaluation of the pdf, determinant, and sampling are all `O(k^3)` operations.
#### Mathematical details
With `C = sigma`, the PDF of this distribution is:
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and a covariance matrix of shape `k x k`.
Extra leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian with diagonal covariance.
mu = [1, 2, 3.]
sigma = [[1, 0, 0], [0, 3, 0], [0, 0, 2.]]
dist = tf.contrib.distributions.MultivariateNormalFull(mu, chol)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1])
# Initialize a batch of two 3-variate Gaussians.
mu = [[1, 2, 3], [11, 22, 33.]]
sigma = ... # shape 2 x 3 x 3, positive definite.
dist = tf.contrib.distributions.MultivariateNormalFull(mu, sigma)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11.]] # Shape 2 x 3.
dist.pdf(x)
```
"""
def __init__(self,
mu,
sigma,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalFull"):
"""Multivariate Normal distributions on `R^k`.
User must provide means `mu` and `sigma`, the mean and covariance.
Args:
mu: `(N+1)-D` floating point tensor with shape `[N1,...,Nb, k]`,
`b >= 0`.
sigma: `(N+2)-D` `Tensor` with same `dtype` as `mu` and shape
`[N1,...,Nb, k, k]`. Each batch member must be positive definite.
validate_args: `Boolean`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: If `mu` and `sigma` are different dtypes.
"""
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[sigma]) as ns:
cov = operator_pd_full.OperatorPDFull(sigma, verify_pd=validate_args)
super(MultivariateNormalFull, self).__init__(
mu, cov,
allow_nan_stats=allow_nan_stats,
validate_args=validate_args,
name=ns)
self._parameters = parameters
def _kl_mvn_mvn_brute_force(mvn_a, mvn_b, name=None):
"""Batched KL divergence `KL(mvn_a || mvn_b)` for multivariate normals.
With `X`, `Y` both multivariate normals in `R^k` with means `mu_x`, `mu_y` and
covariance `C_x`, `C_y` respectively,
```
KL(X || Y) = 0.5 * ( T + Q + - k + L ),
T := trace(C_b^{-1} C_a),
Q := (mu_b - mu_a)^T C_b^{-1} (mu_b - mu_a),
L := Log[Det(C_b)] - Log[Det(C_a)]
```
This `Op` computes the trace by solving `C_b^{-1} C_a`. Although efficient
methods for solving systems with `C_b` may be available, a dense version of
(the square root of) `C_a` is used, so performance is `O(B s k^2)` where `B`
is the batch size, and `s` is the cost of solving `C_b x = y` for vectors `x`
and `y`.
Args:
mvn_a: Instance of subclass of `_MultivariateNormalOperatorPD`.
mvn_b: Instance of subclass of `_MultivariateNormalOperatorPD`.
name: (optional) name to use for created ops. Default "kl_mvn_mvn".
Returns:
Batchwise `KL(mvn_a || mvn_b)`.
"""
# Access the "private" OperatorPD that each mvn is built from.
cov_a = mvn_a._cov # pylint: disable=protected-access
cov_b = mvn_b._cov # pylint: disable=protected-access
mu_a = mvn_a.mu
mu_b = mvn_b.mu
inputs = [mu_a, mu_b] + cov_a.inputs + cov_b.inputs
with ops.name_scope(name, "kl_mvn_mvn", inputs):
# If Ca = AA', Cb = BB', then
# tr[inv(Cb) Ca] = tr[inv(B)' inv(B) A A']
# = tr[inv(B) A A' inv(B)']
# = tr[(inv(B) A) (inv(B) A)']
# = sum_{ik} (inv(B) A)_{ik}^2
# The second equality follows from the cyclic permutation property.
b_inv_a = cov_b.sqrt_solve(cov_a.sqrt_to_dense())
t = math_ops.reduce_sum(
math_ops.square(b_inv_a),
reduction_indices=[-1, -2])
q = cov_b.inv_quadratic_form_on_vectors(mu_b - mu_a)
k = math_ops.cast(cov_a.vector_space_dimension(), mvn_a.dtype)
one_half_l = cov_b.sqrt_log_det() - cov_a.sqrt_log_det()
return 0.5 * (t + q - k) + one_half_l
# Register KL divergences.
kl_classes = [
MultivariateNormalFull,
MultivariateNormalCholesky,
MultivariateNormalDiag,
MultivariateNormalDiagPlusVDVT,
]
for mvn_aa in kl_classes:
# Register when they are the same here, and do not register when they are the
# same below because that would result in a repeated registration.
kullback_leibler.RegisterKL(mvn_aa, mvn_aa)(_kl_mvn_mvn_brute_force)
for mvn_bb in kl_classes:
if mvn_bb != mvn_aa:
kullback_leibler.RegisterKL(mvn_aa, mvn_bb)(_kl_mvn_mvn_brute_force)
|
tongwang01/tensorflow
|
tensorflow/contrib/distributions/python/ops/mvn.py
|
Python
|
apache-2.0
| 29,127
|
[
"Gaussian"
] |
e65478ffce617b26f0937ae5a225cc3ad3d5cb412c4cd0a285d37eac747782f4
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.dataproc_v1.services.autoscaling_policy_service import (
AutoscalingPolicyServiceAsyncClient,
)
from google.cloud.dataproc_v1.services.autoscaling_policy_service import (
AutoscalingPolicyServiceClient,
)
from google.cloud.dataproc_v1.services.autoscaling_policy_service import pagers
from google.cloud.dataproc_v1.services.autoscaling_policy_service import transports
from google.cloud.dataproc_v1.types import autoscaling_policies
from google.oauth2 import service_account
from google.protobuf import duration_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert AutoscalingPolicyServiceClient._get_default_mtls_endpoint(None) is None
assert (
AutoscalingPolicyServiceClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
AutoscalingPolicyServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
AutoscalingPolicyServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
AutoscalingPolicyServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
AutoscalingPolicyServiceClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class",
[AutoscalingPolicyServiceClient, AutoscalingPolicyServiceAsyncClient,],
)
def test_autoscaling_policy_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dataproc.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.AutoscalingPolicyServiceGrpcTransport, "grpc"),
(transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_autoscaling_policy_service_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class",
[AutoscalingPolicyServiceClient, AutoscalingPolicyServiceAsyncClient,],
)
def test_autoscaling_policy_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dataproc.googleapis.com:443"
def test_autoscaling_policy_service_client_get_transport_class():
transport = AutoscalingPolicyServiceClient.get_transport_class()
available_transports = [
transports.AutoscalingPolicyServiceGrpcTransport,
]
assert transport in available_transports
transport = AutoscalingPolicyServiceClient.get_transport_class("grpc")
assert transport == transports.AutoscalingPolicyServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
AutoscalingPolicyServiceClient,
transports.AutoscalingPolicyServiceGrpcTransport,
"grpc",
),
(
AutoscalingPolicyServiceAsyncClient,
transports.AutoscalingPolicyServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
AutoscalingPolicyServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(AutoscalingPolicyServiceClient),
)
@mock.patch.object(
AutoscalingPolicyServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(AutoscalingPolicyServiceAsyncClient),
)
def test_autoscaling_policy_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(
AutoscalingPolicyServiceClient, "get_transport_class"
) as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(
AutoscalingPolicyServiceClient, "get_transport_class"
) as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
AutoscalingPolicyServiceClient,
transports.AutoscalingPolicyServiceGrpcTransport,
"grpc",
"true",
),
(
AutoscalingPolicyServiceAsyncClient,
transports.AutoscalingPolicyServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
AutoscalingPolicyServiceClient,
transports.AutoscalingPolicyServiceGrpcTransport,
"grpc",
"false",
),
(
AutoscalingPolicyServiceAsyncClient,
transports.AutoscalingPolicyServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
AutoscalingPolicyServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(AutoscalingPolicyServiceClient),
)
@mock.patch.object(
AutoscalingPolicyServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(AutoscalingPolicyServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_autoscaling_policy_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class",
[AutoscalingPolicyServiceClient, AutoscalingPolicyServiceAsyncClient],
)
@mock.patch.object(
AutoscalingPolicyServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(AutoscalingPolicyServiceClient),
)
@mock.patch.object(
AutoscalingPolicyServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(AutoscalingPolicyServiceAsyncClient),
)
def test_autoscaling_policy_service_client_get_mtls_endpoint_and_cert_source(
client_class,
):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
AutoscalingPolicyServiceClient,
transports.AutoscalingPolicyServiceGrpcTransport,
"grpc",
),
(
AutoscalingPolicyServiceAsyncClient,
transports.AutoscalingPolicyServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_autoscaling_policy_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
AutoscalingPolicyServiceClient,
transports.AutoscalingPolicyServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
AutoscalingPolicyServiceAsyncClient,
transports.AutoscalingPolicyServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_autoscaling_policy_service_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_autoscaling_policy_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.dataproc_v1.services.autoscaling_policy_service.transports.AutoscalingPolicyServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = AutoscalingPolicyServiceClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
AutoscalingPolicyServiceClient,
transports.AutoscalingPolicyServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
AutoscalingPolicyServiceAsyncClient,
transports.AutoscalingPolicyServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_autoscaling_policy_service_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"dataproc.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=None,
default_host="dataproc.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"request_type", [autoscaling_policies.CreateAutoscalingPolicyRequest, dict,]
)
def test_create_autoscaling_policy(request_type, transport: str = "grpc"):
client = AutoscalingPolicyServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_autoscaling_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = autoscaling_policies.AutoscalingPolicy(
id="id_value",
name="name_value",
basic_algorithm=autoscaling_policies.BasicAutoscalingAlgorithm(
yarn_config=autoscaling_policies.BasicYarnAutoscalingConfig(
graceful_decommission_timeout=duration_pb2.Duration(seconds=751)
)
),
)
response = client.create_autoscaling_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == autoscaling_policies.CreateAutoscalingPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, autoscaling_policies.AutoscalingPolicy)
assert response.id == "id_value"
assert response.name == "name_value"
def test_create_autoscaling_policy_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoscalingPolicyServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_autoscaling_policy), "__call__"
) as call:
client.create_autoscaling_policy()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == autoscaling_policies.CreateAutoscalingPolicyRequest()
@pytest.mark.asyncio
async def test_create_autoscaling_policy_async(
transport: str = "grpc_asyncio",
request_type=autoscaling_policies.CreateAutoscalingPolicyRequest,
):
client = AutoscalingPolicyServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_autoscaling_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
autoscaling_policies.AutoscalingPolicy(id="id_value", name="name_value",)
)
response = await client.create_autoscaling_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == autoscaling_policies.CreateAutoscalingPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, autoscaling_policies.AutoscalingPolicy)
assert response.id == "id_value"
assert response.name == "name_value"
@pytest.mark.asyncio
async def test_create_autoscaling_policy_async_from_dict():
await test_create_autoscaling_policy_async(request_type=dict)
def test_create_autoscaling_policy_field_headers():
client = AutoscalingPolicyServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = autoscaling_policies.CreateAutoscalingPolicyRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_autoscaling_policy), "__call__"
) as call:
call.return_value = autoscaling_policies.AutoscalingPolicy()
client.create_autoscaling_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_autoscaling_policy_field_headers_async():
client = AutoscalingPolicyServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = autoscaling_policies.CreateAutoscalingPolicyRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_autoscaling_policy), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
autoscaling_policies.AutoscalingPolicy()
)
await client.create_autoscaling_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_autoscaling_policy_flattened():
client = AutoscalingPolicyServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_autoscaling_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = autoscaling_policies.AutoscalingPolicy()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_autoscaling_policy(
parent="parent_value",
policy=autoscaling_policies.AutoscalingPolicy(id="id_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].policy
mock_val = autoscaling_policies.AutoscalingPolicy(id="id_value")
assert arg == mock_val
def test_create_autoscaling_policy_flattened_error():
client = AutoscalingPolicyServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_autoscaling_policy(
autoscaling_policies.CreateAutoscalingPolicyRequest(),
parent="parent_value",
policy=autoscaling_policies.AutoscalingPolicy(id="id_value"),
)
@pytest.mark.asyncio
async def test_create_autoscaling_policy_flattened_async():
client = AutoscalingPolicyServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_autoscaling_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = autoscaling_policies.AutoscalingPolicy()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
autoscaling_policies.AutoscalingPolicy()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_autoscaling_policy(
parent="parent_value",
policy=autoscaling_policies.AutoscalingPolicy(id="id_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].policy
mock_val = autoscaling_policies.AutoscalingPolicy(id="id_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_autoscaling_policy_flattened_error_async():
client = AutoscalingPolicyServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_autoscaling_policy(
autoscaling_policies.CreateAutoscalingPolicyRequest(),
parent="parent_value",
policy=autoscaling_policies.AutoscalingPolicy(id="id_value"),
)
@pytest.mark.parametrize(
"request_type", [autoscaling_policies.UpdateAutoscalingPolicyRequest, dict,]
)
def test_update_autoscaling_policy(request_type, transport: str = "grpc"):
client = AutoscalingPolicyServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_autoscaling_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = autoscaling_policies.AutoscalingPolicy(
id="id_value",
name="name_value",
basic_algorithm=autoscaling_policies.BasicAutoscalingAlgorithm(
yarn_config=autoscaling_policies.BasicYarnAutoscalingConfig(
graceful_decommission_timeout=duration_pb2.Duration(seconds=751)
)
),
)
response = client.update_autoscaling_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == autoscaling_policies.UpdateAutoscalingPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, autoscaling_policies.AutoscalingPolicy)
assert response.id == "id_value"
assert response.name == "name_value"
def test_update_autoscaling_policy_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoscalingPolicyServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_autoscaling_policy), "__call__"
) as call:
client.update_autoscaling_policy()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == autoscaling_policies.UpdateAutoscalingPolicyRequest()
@pytest.mark.asyncio
async def test_update_autoscaling_policy_async(
transport: str = "grpc_asyncio",
request_type=autoscaling_policies.UpdateAutoscalingPolicyRequest,
):
client = AutoscalingPolicyServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_autoscaling_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
autoscaling_policies.AutoscalingPolicy(id="id_value", name="name_value",)
)
response = await client.update_autoscaling_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == autoscaling_policies.UpdateAutoscalingPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, autoscaling_policies.AutoscalingPolicy)
assert response.id == "id_value"
assert response.name == "name_value"
@pytest.mark.asyncio
async def test_update_autoscaling_policy_async_from_dict():
await test_update_autoscaling_policy_async(request_type=dict)
def test_update_autoscaling_policy_field_headers():
client = AutoscalingPolicyServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = autoscaling_policies.UpdateAutoscalingPolicyRequest()
request.policy.name = "policy.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_autoscaling_policy), "__call__"
) as call:
call.return_value = autoscaling_policies.AutoscalingPolicy()
client.update_autoscaling_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "policy.name=policy.name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_autoscaling_policy_field_headers_async():
client = AutoscalingPolicyServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = autoscaling_policies.UpdateAutoscalingPolicyRequest()
request.policy.name = "policy.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_autoscaling_policy), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
autoscaling_policies.AutoscalingPolicy()
)
await client.update_autoscaling_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "policy.name=policy.name/value",) in kw["metadata"]
def test_update_autoscaling_policy_flattened():
client = AutoscalingPolicyServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_autoscaling_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = autoscaling_policies.AutoscalingPolicy()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_autoscaling_policy(
policy=autoscaling_policies.AutoscalingPolicy(id="id_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].policy
mock_val = autoscaling_policies.AutoscalingPolicy(id="id_value")
assert arg == mock_val
def test_update_autoscaling_policy_flattened_error():
client = AutoscalingPolicyServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_autoscaling_policy(
autoscaling_policies.UpdateAutoscalingPolicyRequest(),
policy=autoscaling_policies.AutoscalingPolicy(id="id_value"),
)
@pytest.mark.asyncio
async def test_update_autoscaling_policy_flattened_async():
client = AutoscalingPolicyServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_autoscaling_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = autoscaling_policies.AutoscalingPolicy()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
autoscaling_policies.AutoscalingPolicy()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_autoscaling_policy(
policy=autoscaling_policies.AutoscalingPolicy(id="id_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].policy
mock_val = autoscaling_policies.AutoscalingPolicy(id="id_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_autoscaling_policy_flattened_error_async():
client = AutoscalingPolicyServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_autoscaling_policy(
autoscaling_policies.UpdateAutoscalingPolicyRequest(),
policy=autoscaling_policies.AutoscalingPolicy(id="id_value"),
)
@pytest.mark.parametrize(
"request_type", [autoscaling_policies.GetAutoscalingPolicyRequest, dict,]
)
def test_get_autoscaling_policy(request_type, transport: str = "grpc"):
client = AutoscalingPolicyServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_autoscaling_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = autoscaling_policies.AutoscalingPolicy(
id="id_value",
name="name_value",
basic_algorithm=autoscaling_policies.BasicAutoscalingAlgorithm(
yarn_config=autoscaling_policies.BasicYarnAutoscalingConfig(
graceful_decommission_timeout=duration_pb2.Duration(seconds=751)
)
),
)
response = client.get_autoscaling_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == autoscaling_policies.GetAutoscalingPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, autoscaling_policies.AutoscalingPolicy)
assert response.id == "id_value"
assert response.name == "name_value"
def test_get_autoscaling_policy_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoscalingPolicyServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_autoscaling_policy), "__call__"
) as call:
client.get_autoscaling_policy()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == autoscaling_policies.GetAutoscalingPolicyRequest()
@pytest.mark.asyncio
async def test_get_autoscaling_policy_async(
transport: str = "grpc_asyncio",
request_type=autoscaling_policies.GetAutoscalingPolicyRequest,
):
client = AutoscalingPolicyServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_autoscaling_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
autoscaling_policies.AutoscalingPolicy(id="id_value", name="name_value",)
)
response = await client.get_autoscaling_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == autoscaling_policies.GetAutoscalingPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, autoscaling_policies.AutoscalingPolicy)
assert response.id == "id_value"
assert response.name == "name_value"
@pytest.mark.asyncio
async def test_get_autoscaling_policy_async_from_dict():
await test_get_autoscaling_policy_async(request_type=dict)
def test_get_autoscaling_policy_field_headers():
client = AutoscalingPolicyServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = autoscaling_policies.GetAutoscalingPolicyRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_autoscaling_policy), "__call__"
) as call:
call.return_value = autoscaling_policies.AutoscalingPolicy()
client.get_autoscaling_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_autoscaling_policy_field_headers_async():
client = AutoscalingPolicyServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = autoscaling_policies.GetAutoscalingPolicyRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_autoscaling_policy), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
autoscaling_policies.AutoscalingPolicy()
)
await client.get_autoscaling_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_autoscaling_policy_flattened():
client = AutoscalingPolicyServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_autoscaling_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = autoscaling_policies.AutoscalingPolicy()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_autoscaling_policy(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_autoscaling_policy_flattened_error():
client = AutoscalingPolicyServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_autoscaling_policy(
autoscaling_policies.GetAutoscalingPolicyRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_autoscaling_policy_flattened_async():
client = AutoscalingPolicyServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_autoscaling_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = autoscaling_policies.AutoscalingPolicy()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
autoscaling_policies.AutoscalingPolicy()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_autoscaling_policy(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_autoscaling_policy_flattened_error_async():
client = AutoscalingPolicyServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_autoscaling_policy(
autoscaling_policies.GetAutoscalingPolicyRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [autoscaling_policies.ListAutoscalingPoliciesRequest, dict,]
)
def test_list_autoscaling_policies(request_type, transport: str = "grpc"):
client = AutoscalingPolicyServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_autoscaling_policies), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = autoscaling_policies.ListAutoscalingPoliciesResponse(
next_page_token="next_page_token_value",
)
response = client.list_autoscaling_policies(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == autoscaling_policies.ListAutoscalingPoliciesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListAutoscalingPoliciesPager)
assert response.next_page_token == "next_page_token_value"
def test_list_autoscaling_policies_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoscalingPolicyServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_autoscaling_policies), "__call__"
) as call:
client.list_autoscaling_policies()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == autoscaling_policies.ListAutoscalingPoliciesRequest()
@pytest.mark.asyncio
async def test_list_autoscaling_policies_async(
transport: str = "grpc_asyncio",
request_type=autoscaling_policies.ListAutoscalingPoliciesRequest,
):
client = AutoscalingPolicyServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_autoscaling_policies), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
autoscaling_policies.ListAutoscalingPoliciesResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_autoscaling_policies(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == autoscaling_policies.ListAutoscalingPoliciesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListAutoscalingPoliciesAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_autoscaling_policies_async_from_dict():
await test_list_autoscaling_policies_async(request_type=dict)
def test_list_autoscaling_policies_field_headers():
client = AutoscalingPolicyServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = autoscaling_policies.ListAutoscalingPoliciesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_autoscaling_policies), "__call__"
) as call:
call.return_value = autoscaling_policies.ListAutoscalingPoliciesResponse()
client.list_autoscaling_policies(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_autoscaling_policies_field_headers_async():
client = AutoscalingPolicyServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = autoscaling_policies.ListAutoscalingPoliciesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_autoscaling_policies), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
autoscaling_policies.ListAutoscalingPoliciesResponse()
)
await client.list_autoscaling_policies(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_autoscaling_policies_flattened():
client = AutoscalingPolicyServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_autoscaling_policies), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = autoscaling_policies.ListAutoscalingPoliciesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_autoscaling_policies(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_autoscaling_policies_flattened_error():
client = AutoscalingPolicyServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_autoscaling_policies(
autoscaling_policies.ListAutoscalingPoliciesRequest(),
parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_autoscaling_policies_flattened_async():
client = AutoscalingPolicyServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_autoscaling_policies), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = autoscaling_policies.ListAutoscalingPoliciesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
autoscaling_policies.ListAutoscalingPoliciesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_autoscaling_policies(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_autoscaling_policies_flattened_error_async():
client = AutoscalingPolicyServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_autoscaling_policies(
autoscaling_policies.ListAutoscalingPoliciesRequest(),
parent="parent_value",
)
def test_list_autoscaling_policies_pager(transport_name: str = "grpc"):
client = AutoscalingPolicyServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_autoscaling_policies), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
autoscaling_policies.ListAutoscalingPoliciesResponse(
policies=[
autoscaling_policies.AutoscalingPolicy(),
autoscaling_policies.AutoscalingPolicy(),
autoscaling_policies.AutoscalingPolicy(),
],
next_page_token="abc",
),
autoscaling_policies.ListAutoscalingPoliciesResponse(
policies=[], next_page_token="def",
),
autoscaling_policies.ListAutoscalingPoliciesResponse(
policies=[autoscaling_policies.AutoscalingPolicy(),],
next_page_token="ghi",
),
autoscaling_policies.ListAutoscalingPoliciesResponse(
policies=[
autoscaling_policies.AutoscalingPolicy(),
autoscaling_policies.AutoscalingPolicy(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_autoscaling_policies(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(
isinstance(i, autoscaling_policies.AutoscalingPolicy) for i in results
)
def test_list_autoscaling_policies_pages(transport_name: str = "grpc"):
client = AutoscalingPolicyServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_autoscaling_policies), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
autoscaling_policies.ListAutoscalingPoliciesResponse(
policies=[
autoscaling_policies.AutoscalingPolicy(),
autoscaling_policies.AutoscalingPolicy(),
autoscaling_policies.AutoscalingPolicy(),
],
next_page_token="abc",
),
autoscaling_policies.ListAutoscalingPoliciesResponse(
policies=[], next_page_token="def",
),
autoscaling_policies.ListAutoscalingPoliciesResponse(
policies=[autoscaling_policies.AutoscalingPolicy(),],
next_page_token="ghi",
),
autoscaling_policies.ListAutoscalingPoliciesResponse(
policies=[
autoscaling_policies.AutoscalingPolicy(),
autoscaling_policies.AutoscalingPolicy(),
],
),
RuntimeError,
)
pages = list(client.list_autoscaling_policies(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_autoscaling_policies_async_pager():
client = AutoscalingPolicyServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_autoscaling_policies),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
autoscaling_policies.ListAutoscalingPoliciesResponse(
policies=[
autoscaling_policies.AutoscalingPolicy(),
autoscaling_policies.AutoscalingPolicy(),
autoscaling_policies.AutoscalingPolicy(),
],
next_page_token="abc",
),
autoscaling_policies.ListAutoscalingPoliciesResponse(
policies=[], next_page_token="def",
),
autoscaling_policies.ListAutoscalingPoliciesResponse(
policies=[autoscaling_policies.AutoscalingPolicy(),],
next_page_token="ghi",
),
autoscaling_policies.ListAutoscalingPoliciesResponse(
policies=[
autoscaling_policies.AutoscalingPolicy(),
autoscaling_policies.AutoscalingPolicy(),
],
),
RuntimeError,
)
async_pager = await client.list_autoscaling_policies(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(
isinstance(i, autoscaling_policies.AutoscalingPolicy) for i in responses
)
@pytest.mark.asyncio
async def test_list_autoscaling_policies_async_pages():
client = AutoscalingPolicyServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_autoscaling_policies),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
autoscaling_policies.ListAutoscalingPoliciesResponse(
policies=[
autoscaling_policies.AutoscalingPolicy(),
autoscaling_policies.AutoscalingPolicy(),
autoscaling_policies.AutoscalingPolicy(),
],
next_page_token="abc",
),
autoscaling_policies.ListAutoscalingPoliciesResponse(
policies=[], next_page_token="def",
),
autoscaling_policies.ListAutoscalingPoliciesResponse(
policies=[autoscaling_policies.AutoscalingPolicy(),],
next_page_token="ghi",
),
autoscaling_policies.ListAutoscalingPoliciesResponse(
policies=[
autoscaling_policies.AutoscalingPolicy(),
autoscaling_policies.AutoscalingPolicy(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_autoscaling_policies(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [autoscaling_policies.DeleteAutoscalingPolicyRequest, dict,]
)
def test_delete_autoscaling_policy(request_type, transport: str = "grpc"):
client = AutoscalingPolicyServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_autoscaling_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_autoscaling_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == autoscaling_policies.DeleteAutoscalingPolicyRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_autoscaling_policy_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AutoscalingPolicyServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_autoscaling_policy), "__call__"
) as call:
client.delete_autoscaling_policy()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == autoscaling_policies.DeleteAutoscalingPolicyRequest()
@pytest.mark.asyncio
async def test_delete_autoscaling_policy_async(
transport: str = "grpc_asyncio",
request_type=autoscaling_policies.DeleteAutoscalingPolicyRequest,
):
client = AutoscalingPolicyServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_autoscaling_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_autoscaling_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == autoscaling_policies.DeleteAutoscalingPolicyRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_autoscaling_policy_async_from_dict():
await test_delete_autoscaling_policy_async(request_type=dict)
def test_delete_autoscaling_policy_field_headers():
client = AutoscalingPolicyServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = autoscaling_policies.DeleteAutoscalingPolicyRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_autoscaling_policy), "__call__"
) as call:
call.return_value = None
client.delete_autoscaling_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_autoscaling_policy_field_headers_async():
client = AutoscalingPolicyServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = autoscaling_policies.DeleteAutoscalingPolicyRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_autoscaling_policy), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_autoscaling_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_autoscaling_policy_flattened():
client = AutoscalingPolicyServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_autoscaling_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_autoscaling_policy(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_autoscaling_policy_flattened_error():
client = AutoscalingPolicyServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_autoscaling_policy(
autoscaling_policies.DeleteAutoscalingPolicyRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_autoscaling_policy_flattened_async():
client = AutoscalingPolicyServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_autoscaling_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_autoscaling_policy(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_autoscaling_policy_flattened_error_async():
client = AutoscalingPolicyServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_autoscaling_policy(
autoscaling_policies.DeleteAutoscalingPolicyRequest(), name="name_value",
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.AutoscalingPolicyServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = AutoscalingPolicyServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.AutoscalingPolicyServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = AutoscalingPolicyServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.AutoscalingPolicyServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = AutoscalingPolicyServiceClient(
client_options=options, transport=transport,
)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = AutoscalingPolicyServiceClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.AutoscalingPolicyServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = AutoscalingPolicyServiceClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.AutoscalingPolicyServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = AutoscalingPolicyServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.AutoscalingPolicyServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.AutoscalingPolicyServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.AutoscalingPolicyServiceGrpcTransport,
transports.AutoscalingPolicyServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = AutoscalingPolicyServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport, transports.AutoscalingPolicyServiceGrpcTransport,
)
def test_autoscaling_policy_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.AutoscalingPolicyServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_autoscaling_policy_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.dataproc_v1.services.autoscaling_policy_service.transports.AutoscalingPolicyServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.AutoscalingPolicyServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"create_autoscaling_policy",
"update_autoscaling_policy",
"get_autoscaling_policy",
"list_autoscaling_policies",
"delete_autoscaling_policy",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_autoscaling_policy_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.dataproc_v1.services.autoscaling_policy_service.transports.AutoscalingPolicyServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.AutoscalingPolicyServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_autoscaling_policy_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.dataproc_v1.services.autoscaling_policy_service.transports.AutoscalingPolicyServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.AutoscalingPolicyServiceTransport()
adc.assert_called_once()
def test_autoscaling_policy_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
AutoscalingPolicyServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.AutoscalingPolicyServiceGrpcTransport,
transports.AutoscalingPolicyServiceGrpcAsyncIOTransport,
],
)
def test_autoscaling_policy_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.AutoscalingPolicyServiceGrpcTransport, grpc_helpers),
(transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_autoscaling_policy_service_transport_create_channel(
transport_class, grpc_helpers
):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"dataproc.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="dataproc.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.AutoscalingPolicyServiceGrpcTransport,
transports.AutoscalingPolicyServiceGrpcAsyncIOTransport,
],
)
def test_autoscaling_policy_service_grpc_transport_client_cert_source_for_mtls(
transport_class,
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_autoscaling_policy_service_host_no_port():
client = AutoscalingPolicyServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dataproc.googleapis.com"
),
)
assert client.transport._host == "dataproc.googleapis.com:443"
def test_autoscaling_policy_service_host_with_port():
client = AutoscalingPolicyServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dataproc.googleapis.com:8000"
),
)
assert client.transport._host == "dataproc.googleapis.com:8000"
def test_autoscaling_policy_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.AutoscalingPolicyServiceGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_autoscaling_policy_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.AutoscalingPolicyServiceGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.AutoscalingPolicyServiceGrpcTransport,
transports.AutoscalingPolicyServiceGrpcAsyncIOTransport,
],
)
def test_autoscaling_policy_service_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.AutoscalingPolicyServiceGrpcTransport,
transports.AutoscalingPolicyServiceGrpcAsyncIOTransport,
],
)
def test_autoscaling_policy_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_autoscaling_policy_path():
project = "squid"
location = "clam"
autoscaling_policy = "whelk"
expected = "projects/{project}/locations/{location}/autoscalingPolicies/{autoscaling_policy}".format(
project=project, location=location, autoscaling_policy=autoscaling_policy,
)
actual = AutoscalingPolicyServiceClient.autoscaling_policy_path(
project, location, autoscaling_policy
)
assert expected == actual
def test_parse_autoscaling_policy_path():
expected = {
"project": "octopus",
"location": "oyster",
"autoscaling_policy": "nudibranch",
}
path = AutoscalingPolicyServiceClient.autoscaling_policy_path(**expected)
# Check that the path construction is reversible.
actual = AutoscalingPolicyServiceClient.parse_autoscaling_policy_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "cuttlefish"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = AutoscalingPolicyServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "mussel",
}
path = AutoscalingPolicyServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = AutoscalingPolicyServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "winkle"
expected = "folders/{folder}".format(folder=folder,)
actual = AutoscalingPolicyServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nautilus",
}
path = AutoscalingPolicyServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = AutoscalingPolicyServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "scallop"
expected = "organizations/{organization}".format(organization=organization,)
actual = AutoscalingPolicyServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "abalone",
}
path = AutoscalingPolicyServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = AutoscalingPolicyServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "squid"
expected = "projects/{project}".format(project=project,)
actual = AutoscalingPolicyServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "clam",
}
path = AutoscalingPolicyServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = AutoscalingPolicyServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "whelk"
location = "octopus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = AutoscalingPolicyServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "oyster",
"location": "nudibranch",
}
path = AutoscalingPolicyServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = AutoscalingPolicyServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.AutoscalingPolicyServiceTransport, "_prep_wrapped_messages"
) as prep:
client = AutoscalingPolicyServiceClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.AutoscalingPolicyServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = AutoscalingPolicyServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = AutoscalingPolicyServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = AutoscalingPolicyServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = AutoscalingPolicyServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(
AutoscalingPolicyServiceClient,
transports.AutoscalingPolicyServiceGrpcTransport,
),
(
AutoscalingPolicyServiceAsyncClient,
transports.AutoscalingPolicyServiceGrpcAsyncIOTransport,
),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
googleapis/python-dataproc
|
tests/unit/gapic/dataproc_v1/test_autoscaling_policy_service.py
|
Python
|
apache-2.0
| 101,910
|
[
"Octopus"
] |
79ce229ee9d8674682d7cff23cb8c71b0eff8a76da647ff58d9c8bdde63a25c5
|
'''
Created on Oct 2, 2013
@author: olehlong
'''
import numpy as np
import hashlib
import json
class TreeParityMachine:
'''
Tree parity machine class
'''
def __init__(self, iK, iN, iL):
'''
Constructor
'''
self.K = iK # hidden layer size
self.N = iN # number of input neurons for each hidden neuron
self.L = iL # distribution width
self.W = [0] * (self.K * self.N) # input layer
self.H = [0] * self.K # hidden layer
self.output = None # output
def compute_result(self, X):
'''
compute output and hidden layer
'''
self.output = 1
for i in range(self.K):
summ = 0
for j in range(self.N):
summ += self.W[i * self.N + j] * X[i * self.N + j]
self.H[i] = self.signum(summ)
self.output *= self.signum(summ)
def update_weights(self, X, outputB):
'''
X - input vector
'''
for i in range(self.K):
for j in range(self.N):
nW = self.W[i * self.N + j] + X[i * self.N + j] * self.equal(self.output, self.H[i]) * self.equal(self.output, outputB)
if nW > self.L:
nW = self.L
if nW < -self.L:
nW = -self.L
self.W[i * self.N + j] = nW
def update_weights_solo(self, X):
'''
X - input vector
'''
for i in range(self.K):
for j in range(self.N):
nW = self.W[i * self.N + j] + X[i * self.N + j] * self.equal(self.output, self.H[i])
if nW > self.L:
nW = self.L
if nW < -self.L:
nW = -self.L
self.W[i * self.N + j] = nW
def randomize_weights(self):
'''
fill self.W with random weights
'''
for i in range(len(self.W)):
# L - (rand() % (2 * L + 1));
self.W[i] = np.random.randint(-self.L, self.L)
def equal(self, a, b):
return 1 if a == b else 0
def signum(self, a):
return 1 if a > 0 else -1
def rand_bit():
return 1 if np.random.randint(2) == 1 else -1
def create_vector(k, n):
res = []
for i in range(k*n):
res.append(rand_bit())
return res
class TPMManager():
'''
TPM manager
You must set recvr (message receiver) and transport object that have method
tpm_send(recvr, rvec=None, out=None, w=None, eqout=None, status=None, it=None)
'''
def __init__(self):
'''
init tpm manager
'''
self.k = 0
self.n = 0
self.l = 0
self.dic = "01234567890_abcdefghijklmnopqrstuvwxyz"
self.tpm = None
self.max_iter = 0
self.curr_iter = 0
self.fail_count = 0
self.max_fail = 50
self.recvr = None
self.transport = None
self.prev_vec = None
self.is_success = False
self.__key = None
def init(self, k, n, l):
'''
set up tpm
'''
self.k = k
self.n = n
self.l = l
self.max_iter = l**3*n*k
# self.max_iter = 10
self.tpm = TreeParityMachine(k, n, l)
self.tpm.randomize_weights()
def fill(self, k, n, l, w):
self.init(k, n, l)
if len(w) == k*n:
self.tpm.W = w
return True
return False
def clear(self):
self.k = 0
self.n = 0
self.l = 0
self.max_iter = 0
self.tpm = None
def start_iter(self):
'''
begin sync
'''
self.__key = None
rvec = self.vect()
self.tpm.compute_result(rvec)
self.prev_vec = rvec
self.transport.tpm_send(self.recvr, rvec, None, self.tpm.output, status="start", it=0)
def vect(self):
'''
get vector for tpm settings
'''
return create_vector(self.k, self.n)
def w_sum(self):
'''
get hash-sum of weights for comparison
'''
return hashlib.md5(json.dumps(self.tpm.W)).hexdigest()
def recv(self, rvec, oout=None, out=None, w=None, eqout=None, status=None, it=None):
'''
sync iteration
rvec - random vector
out - tpm output
w - hash-sum for another tpm weights
eqout - outputs equality
status - process status:
start
stage_1
success
fail
it - iteration
'''
s_rvec = self.vect()
self.curr_iter = it
if self.curr_iter == self.max_iter:
self.transport.tpm_send(self.recvr, status="fail")
return True
if status == "start":
# stage_1
print "status: start"
self.tpm.compute_result(rvec)
self.prev_vec = s_rvec
if self.tpm.output == out:
print "out equals"
self.tpm.update_weights(rvec, out)
# prepare data for next iteration
self.tpm.compute_result(s_rvec)
self.transport.tpm_send(self.recvr, s_rvec, out, self.tpm.output, self.w_sum(), True, "stage_1", it+1)
else:
self.fail_count += 1
if self.fail_count == self.max_fail:
self.transport.tpm_send(self.recvr, status="fail")
else:
# prepare data for next iteration
self.tpm.compute_result(s_rvec)
self.transport.tpm_send(self.recvr, s_rvec, None, self.tpm.output, self.w_sum(), False, "stage_1", it+1)
return True
elif status == "stage_1":
print "status: stage_1"
if eqout:
print "eqout"
self.fail_count = 0
self.tpm.update_weights(self.prev_vec, oout) # need old rvec
m_w = self.w_sum()
if m_w == w:
print "success from manager"
self.is_success = True
self.transport.tpm_send(self.recvr, status="success")
return True
else:
print m_w, " != ", w
self.tpm.compute_result(rvec)
self.prev_vec = s_rvec
if self.tpm.output == out:
self.tpm.update_weights(rvec, out)
# prepare data for next iteration
self.tpm.compute_result(s_rvec)
self.transport.tpm_send(self.recvr, s_rvec, out, self.tpm.output, self.w_sum(), True, "stage_1", it+1)
else:
self.fail_count += 1
if self.fail_count == self.max_fail:
self.transport.tpm_send(self.recvr, status="fail")
else:
# prepare data for next iteration
self.tpm.compute_result(s_rvec)
self.transport.tpm_send(self.recvr, s_rvec, None, self.tpm.output, self.w_sum(), False, "stage_1", it+1)
return True
else:
print "Something went wrong"
def get_key(self):
if self.__key != None:
return self.__key
key = ""
key_size = 37/(self.tpm.L*2 + 1)
key_length = self.tpm.K * self.tpm.N / key_size
for i in range(1, key_length+1):
k=1
for j in range((i-1)*key_size, i*key_size):
k += self.tpm.W[j] + self.tpm.L
key += self.dic[k]
self.__key = key
return key
def get_data(self):
return {'w': self.tpm.W, 'k': self.tpm.K, 'n': self.tpm.N, 'l': self.tpm.L}
|
olehlong/xmpp-neural-cryptography
|
nc/TreeParityMachine.py
|
Python
|
mit
| 8,572
|
[
"NEURON"
] |
7de79540b21450a28009bcd8afefd2559028fc8e5a9b12a31fda4e68c9de5a88
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
# Mark Wronkiewicz <wronk.mark@gmail.com>
#
# License: Simplified BSD
from mne.minimum_norm.inverse import apply_inverse
import os.path as op
from pathlib import Path
import sys
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
import pytest
import matplotlib.pyplot as plt
from matplotlib.colors import Colormap
from mne import (make_field_map, pick_channels_evoked, read_evokeds,
read_trans, read_dipole, SourceEstimate,
make_sphere_model, use_coil_def,
setup_volume_source_space, read_forward_solution,
convert_forward_solution, MixedSourceEstimate)
from mne.source_estimate import _BaseVolSourceEstimate
from mne.io import (read_raw_ctf, read_raw_bti, read_raw_kit, read_info,
read_raw_nirx)
from mne.io._digitization import write_dig
from mne.io.pick import pick_info
from mne.io.constants import FIFF
from mne.viz import (plot_sparse_source_estimates, plot_source_estimates,
snapshot_brain_montage, plot_head_positions,
plot_alignment, plot_sensors_connectivity,
plot_brain_colorbar, link_brains, mne_analyze_colormap)
from mne.viz._3d import _process_clim, _linearize_map, _get_map_ticks
from mne.viz.utils import _fake_click
from mne.utils import (requires_pysurfer, requires_nibabel, traits_test,
catch_logging, run_subprocess, modified_env)
from mne.datasets import testing
from mne.source_space import read_source_spaces
from mne.bem import read_bem_solution, read_bem_surfaces
data_dir = testing.data_path(download=False)
subjects_dir = op.join(data_dir, 'subjects')
trans_fname = op.join(data_dir, 'MEG', 'sample',
'sample_audvis_trunc-trans.fif')
src_fname = op.join(data_dir, 'subjects', 'sample', 'bem',
'sample-oct-6-src.fif')
dip_fname = op.join(data_dir, 'MEG', 'sample', 'sample_audvis_trunc_set1.dip')
ctf_fname = op.join(data_dir, 'CTF', 'testdata_ctf.ds')
nirx_fname = op.join(data_dir, 'NIRx', 'nirscout',
'nirx_15_2_recording_w_short')
io_dir = op.join(op.abspath(op.dirname(__file__)), '..', '..', 'io')
base_dir = op.join(io_dir, 'tests', 'data')
evoked_fname = op.join(base_dir, 'test-ave.fif')
fwd_fname = op.join(data_dir, 'MEG', 'sample',
'sample_audvis_trunc-meg-vol-7-fwd.fif')
fwd_fname2 = op.join(data_dir, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
base_dir = op.join(io_dir, 'bti', 'tests', 'data')
pdf_fname = op.join(base_dir, 'test_pdf_linux')
config_fname = op.join(base_dir, 'test_config_linux')
hs_fname = op.join(base_dir, 'test_hs_linux')
sqd_fname = op.join(io_dir, 'kit', 'tests', 'data', 'test.sqd')
coil_3d = """# custom cube coil def
1 9999 1 8 3e-03 0.000e+00 "QuSpin ZFOPM 3mm cube"
0.1250 -0.750e-03 -0.750e-03 -0.750e-03 0.000 0.000 1.000
0.1250 -0.750e-03 0.750e-03 -0.750e-03 0.000 0.000 1.000
0.1250 0.750e-03 -0.750e-03 -0.750e-03 0.000 0.000 1.000
0.1250 0.750e-03 0.750e-03 -0.750e-03 0.000 0.000 1.000
0.1250 -0.750e-03 -0.750e-03 0.750e-03 0.000 0.000 1.000
0.1250 -0.750e-03 0.750e-03 0.750e-03 0.000 0.000 1.000
0.1250 0.750e-03 -0.750e-03 0.750e-03 0.000 0.000 1.000
0.1250 0.750e-03 0.750e-03 0.750e-03 0.000 0.000 1.000
"""
def test_plot_head_positions():
"""Test plotting of head positions."""
info = read_info(evoked_fname)
pos = np.random.RandomState(0).randn(4, 10)
pos[:, 0] = np.arange(len(pos))
destination = (0., 0., 0.04)
with pytest.warns(None): # old MPL will cause a warning
plot_head_positions(pos)
plot_head_positions(pos, mode='field', info=info,
destination=destination)
plot_head_positions([pos, pos]) # list support
pytest.raises(ValueError, plot_head_positions, ['pos'])
pytest.raises(ValueError, plot_head_positions, pos[:, :9])
pytest.raises(ValueError, plot_head_positions, pos, 'foo')
with pytest.raises(ValueError, match='shape'):
plot_head_positions(pos, axes=1.)
@testing.requires_testing_data
@requires_pysurfer
@traits_test
@pytest.mark.slowtest
def test_plot_sparse_source_estimates(renderer_interactive, brain_gc):
"""Test plotting of (sparse) source estimates."""
sample_src = read_source_spaces(src_fname)
# dense version
vertices = [s['vertno'] for s in sample_src]
n_time = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.zeros((n_verts * n_time))
stc_size = stc_data.size
stc_data[(np.random.rand(stc_size // 20) * stc_size).astype(int)] = \
np.random.RandomState(0).rand(stc_data.size // 20)
stc_data.shape = (n_verts, n_time)
stc = SourceEstimate(stc_data, vertices, 1, 1)
colormap = 'mne_analyze'
brain = plot_source_estimates(
stc, 'sample', colormap=colormap, background=(1, 1, 0),
subjects_dir=subjects_dir, colorbar=True, clim='auto')
brain.close()
pytest.raises(TypeError, plot_source_estimates, stc, 'sample',
figure='foo', hemi='both', clim='auto',
subjects_dir=subjects_dir)
# now do sparse version
vertices = sample_src[0]['vertno']
inds = [111, 333]
stc_data = np.zeros((len(inds), n_time))
stc_data[0, 1] = 1.
stc_data[1, 4] = 2.
vertices = [vertices[inds], np.empty(0, dtype=np.int64)]
stc = SourceEstimate(stc_data, vertices, 1, 1)
surf = plot_sparse_source_estimates(sample_src, stc, bgcolor=(1, 1, 1),
opacity=0.5, high_resolution=False)
if renderer_interactive._get_3d_backend() == 'mayavi':
import mayavi # noqa: F401 analysis:ignore
assert isinstance(surf, mayavi.modules.surface.Surface)
@testing.requires_testing_data
@traits_test
@pytest.mark.slowtest
def test_plot_evoked_field(renderer):
"""Test plotting evoked field."""
evoked = read_evokeds(evoked_fname, condition='Left Auditory',
baseline=(-0.2, 0.0))
evoked = pick_channels_evoked(evoked, evoked.ch_names[::10]) # speed
for t in ['meg', None]:
with pytest.warns(RuntimeWarning, match='projection'):
maps = make_field_map(evoked, trans_fname, subject='sample',
subjects_dir=subjects_dir, n_jobs=1,
ch_type=t)
fig = evoked.plot_field(maps, time=0.1)
if renderer._get_3d_backend() == 'mayavi':
import mayavi # noqa: F401 analysis:ignore
assert isinstance(fig, mayavi.core.scene.Scene)
@pytest.mark.slowtest # can be slow on OSX
@testing.requires_testing_data
@traits_test
def test_plot_alignment(tmpdir, renderer):
"""Test plotting of -trans.fif files and MEG sensor layouts."""
# generate fiducials file for testing
tempdir = str(tmpdir)
fiducials_path = op.join(tempdir, 'fiducials.fif')
fid = [{'coord_frame': 5, 'ident': 1, 'kind': 1,
'r': [-0.08061612, -0.02908875, -0.04131077]},
{'coord_frame': 5, 'ident': 2, 'kind': 1,
'r': [0.00146763, 0.08506715, -0.03483611]},
{'coord_frame': 5, 'ident': 3, 'kind': 1,
'r': [0.08436285, -0.02850276, -0.04127743]}]
write_dig(fiducials_path, fid, 5)
renderer.backend._close_all()
evoked = read_evokeds(evoked_fname)[0]
sample_src = read_source_spaces(src_fname)
bti = read_raw_bti(pdf_fname, config_fname, hs_fname, convert=True,
preload=False).info
infos = dict(
Neuromag=evoked.info,
CTF=read_raw_ctf(ctf_fname).info,
BTi=bti,
KIT=read_raw_kit(sqd_fname).info,
)
for system, info in infos.items():
meg = ['helmet', 'sensors']
if system == 'KIT':
meg.append('ref')
fig = plot_alignment(info, read_trans(trans_fname), subject='sample',
subjects_dir=subjects_dir, meg=meg)
rend = renderer.backend._Renderer(fig=fig)
rend.close()
# KIT ref sensor coil def is defined
renderer.backend._close_all()
info = infos['Neuromag']
pytest.raises(TypeError, plot_alignment, 'foo', trans_fname,
subject='sample', subjects_dir=subjects_dir)
pytest.raises(OSError, plot_alignment, info, trans_fname,
subject='sample', subjects_dir=subjects_dir, src='foo')
pytest.raises(ValueError, plot_alignment, info, trans_fname,
subject='fsaverage', subjects_dir=subjects_dir,
src=sample_src)
sample_src.plot(subjects_dir=subjects_dir, head=True, skull=True,
brain='white')
renderer.backend._close_all()
# no-head version
renderer.backend._close_all()
# all coord frames
plot_alignment(info) # works: surfaces='auto' default
for coord_frame in ('meg', 'head', 'mri'):
fig = plot_alignment(info, meg=['helmet', 'sensors'], dig=True,
coord_frame=coord_frame, trans=Path(trans_fname),
subject='sample', mri_fiducials=fiducials_path,
subjects_dir=subjects_dir, src=src_fname)
renderer.backend._close_all()
# EEG only with strange options
evoked_eeg_ecog_seeg = evoked.copy().pick_types(meg=False, eeg=True)
evoked_eeg_ecog_seeg.info['projs'] = [] # "remove" avg proj
evoked_eeg_ecog_seeg.set_channel_types({'EEG 001': 'ecog',
'EEG 002': 'seeg'})
with pytest.warns(RuntimeWarning, match='Cannot plot MEG'):
with catch_logging() as log:
plot_alignment(evoked_eeg_ecog_seeg.info, subject='sample',
trans=trans_fname, subjects_dir=subjects_dir,
surfaces=['white', 'outer_skin', 'outer_skull'],
meg=['helmet', 'sensors'],
eeg=['original', 'projected'], ecog=True, seeg=True,
verbose=True)
log = log.getvalue()
assert '1 ECoG location' in log
assert '1 sEEG location' in log
renderer.backend._close_all()
sphere = make_sphere_model(info=evoked.info, r0='auto', head_radius='auto')
bem_sol = read_bem_solution(op.join(subjects_dir, 'sample', 'bem',
'sample-1280-1280-1280-bem-sol.fif'))
bem_surfs = read_bem_surfaces(op.join(subjects_dir, 'sample', 'bem',
'sample-1280-1280-1280-bem.fif'))
sample_src[0]['coord_frame'] = 4 # hack for coverage
plot_alignment(info, subject='sample', eeg='projected',
meg='helmet', bem=sphere, dig=True,
surfaces=['brain', 'inner_skull', 'outer_skull',
'outer_skin'])
plot_alignment(info, trans_fname, subject='sample', meg='helmet',
subjects_dir=subjects_dir, eeg='projected', bem=sphere,
surfaces=['head', 'brain'], src=sample_src)
assert all(surf['coord_frame'] == FIFF.FIFFV_COORD_MRI
for surf in bem_sol['surfs'])
plot_alignment(info, trans_fname, subject='sample', meg=[],
subjects_dir=subjects_dir, bem=bem_sol, eeg=True,
surfaces=['head', 'inflated', 'outer_skull', 'inner_skull'])
assert all(surf['coord_frame'] == FIFF.FIFFV_COORD_MRI
for surf in bem_sol['surfs'])
plot_alignment(info, trans_fname, subject='sample',
meg=True, subjects_dir=subjects_dir,
surfaces=['head', 'inner_skull'], bem=bem_surfs)
# single-layer BEM can still plot head surface
assert bem_surfs[-1]['id'] == FIFF.FIFFV_BEM_SURF_ID_BRAIN
bem_sol_homog = read_bem_solution(op.join(subjects_dir, 'sample', 'bem',
'sample-1280-bem-sol.fif'))
for use_bem in (bem_surfs[-1:], bem_sol_homog):
with catch_logging() as log:
plot_alignment(info, trans_fname, subject='sample',
meg=True, subjects_dir=subjects_dir,
surfaces=['head', 'inner_skull'], bem=use_bem,
verbose=True)
log = log.getvalue()
assert 'not find the surface for head in the provided BEM model' in log
# sphere model
sphere = make_sphere_model('auto', 'auto', evoked.info)
src = setup_volume_source_space(sphere=sphere)
plot_alignment(info, eeg='projected', meg='helmet', bem=sphere,
src=src, dig=True, surfaces=['brain', 'inner_skull',
'outer_skull', 'outer_skin'])
sphere = make_sphere_model('auto', None, evoked.info) # one layer
# no info is permitted
fig = plot_alignment(trans=trans_fname, subject='sample', meg=False,
coord_frame='mri', subjects_dir=subjects_dir,
surfaces=['brain'], bem=sphere, show_axes=True)
renderer.backend._close_all()
if renderer._get_3d_backend() == 'mayavi':
import mayavi # noqa: F401 analysis:ignore
assert isinstance(fig, mayavi.core.scene.Scene)
# 3D coil with no defined draw (ConvexHull)
info_cube = pick_info(info, [0])
info['dig'] = None
info_cube['chs'][0]['coil_type'] = 9999
with pytest.raises(RuntimeError, match='coil definition not found'):
plot_alignment(info_cube, meg='sensors', surfaces=())
coil_def_fname = op.join(tempdir, 'temp')
with open(coil_def_fname, 'w') as fid:
fid.write(coil_3d)
with use_coil_def(coil_def_fname):
plot_alignment(info_cube, meg='sensors', surfaces=(), dig=True)
# one layer bem with skull surfaces:
with pytest.raises(ValueError, match='sphere conductor model must have'):
plot_alignment(info=info, trans=trans_fname,
subject='sample', subjects_dir=subjects_dir,
surfaces=['brain', 'head', 'inner_skull'], bem=sphere)
# wrong eeg value:
with pytest.raises(ValueError, match='Invalid value for the .eeg'):
plot_alignment(info=info, trans=trans_fname,
subject='sample', subjects_dir=subjects_dir, eeg='foo')
# wrong meg value:
with pytest.raises(ValueError, match='Invalid value for the .meg'):
plot_alignment(info=info, trans=trans_fname,
subject='sample', subjects_dir=subjects_dir, meg='bar')
# multiple brain surfaces:
with pytest.raises(ValueError, match='Only one brain surface can be plot'):
plot_alignment(info=info, trans=trans_fname,
subject='sample', subjects_dir=subjects_dir,
surfaces=['white', 'pial'])
with pytest.raises(TypeError, match='all entries in surfaces must be'):
plot_alignment(info=info, trans=trans_fname,
subject='sample', subjects_dir=subjects_dir,
surfaces=[1])
with pytest.raises(ValueError, match='Unknown surface type'):
plot_alignment(info=info, trans=trans_fname,
subject='sample', subjects_dir=subjects_dir,
surfaces=['foo'])
with pytest.raises(TypeError, match="must be an instance of "):
plot_alignment(info=info, trans=trans_fname,
subject='sample', subjects_dir=subjects_dir,
surfaces=dict(brain='super clear'))
with pytest.raises(ValueError, match="must be between 0 and 1"):
plot_alignment(info=info, trans=trans_fname,
subject='sample', subjects_dir=subjects_dir,
surfaces=dict(brain=42))
fwd_fname = op.join(data_dir, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
fwd = read_forward_solution(fwd_fname)
plot_alignment(subject='sample', subjects_dir=subjects_dir,
trans=trans_fname, fwd=fwd,
surfaces='white', coord_frame='head')
fwd = convert_forward_solution(fwd, force_fixed=True)
plot_alignment(subject='sample', subjects_dir=subjects_dir,
trans=trans_fname, fwd=fwd,
surfaces='white', coord_frame='head')
# surfaces as dict
plot_alignment(subject='sample', coord_frame='head',
subjects_dir=subjects_dir,
surfaces={'white': 0.4, 'outer_skull': 0.6, 'head': None})
# fNIRS (default is pairs)
info = read_raw_nirx(nirx_fname).info
with catch_logging() as log:
plot_alignment(info, subject='fsaverage', surfaces=(), verbose=True)
log = log.getvalue()
assert '26 fNIRS pairs' in log
assert '26 fNIRS locations' not in log
assert '26 fNIRS sources' not in log
assert '26 fNIRS detectors' not in log
with catch_logging() as log:
plot_alignment(info, subject='fsaverage', surfaces=(), verbose=True,
fnirs=['channels', 'sources', 'detectors'])
log = log.getvalue()
assert '26 fNIRS pairs' not in log
assert '26 fNIRS locations' in log
assert '26 fNIRS sources' in log
assert '26 fNIRS detectors' in log
renderer.backend._close_all()
@pytest.mark.slowtest # can be slow on OSX
@testing.requires_testing_data
@requires_pysurfer
@traits_test
def test_process_clim_plot(renderer_interactive, brain_gc):
"""Test functionality for determining control points with stc.plot."""
sample_src = read_source_spaces(src_fname)
kwargs = dict(subjects_dir=subjects_dir, smoothing_steps=1,
time_viewer=False, show_traces=False)
vertices = [s['vertno'] for s in sample_src]
n_time = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.random.RandomState(0).rand((n_verts * n_time))
stc_data.shape = (n_verts, n_time)
stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample')
# Test for simple use cases
brain = stc.plot(**kwargs)
assert brain.data['center'] is None
brain.close()
brain = stc.plot(clim=dict(pos_lims=(10, 50, 90)), **kwargs)
assert brain.data['center'] == 0.
brain.close()
stc.plot(colormap='hot', clim='auto', **kwargs)
stc.plot(colormap='mne', clim='auto', **kwargs)
stc.plot(clim=dict(kind='value', lims=(10, 50, 90)), figure=99, **kwargs)
pytest.raises(TypeError, stc.plot, clim='auto', figure=[0], **kwargs)
# Test for correct clim values
with pytest.raises(ValueError, match='monotonically'):
stc.plot(clim=dict(kind='value', pos_lims=[0, 1, 0]), **kwargs)
with pytest.raises(ValueError, match=r'.*must be \(3,\)'):
stc.plot(colormap='mne', clim=dict(pos_lims=(5, 10, 15, 20)), **kwargs)
with pytest.raises(ValueError, match="'value', 'values', and 'percent'"):
stc.plot(clim=dict(pos_lims=(5, 10, 15), kind='foo'), **kwargs)
with pytest.raises(ValueError, match='must be "auto" or dict'):
stc.plot(colormap='mne', clim='foo', **kwargs)
with pytest.raises(TypeError, match='must be an instance of'):
plot_source_estimates('foo', clim='auto', **kwargs)
with pytest.raises(ValueError, match='hemi'):
stc.plot(hemi='foo', clim='auto', **kwargs)
with pytest.raises(ValueError, match='Exactly one'):
stc.plot(clim=dict(lims=[0, 1, 2], pos_lims=[0, 1, 2], kind='value'),
**kwargs)
# Test handling of degenerate data: thresholded maps
stc._data.fill(0.)
with pytest.warns(RuntimeWarning, match='All data were zero'):
plot_source_estimates(stc, **kwargs)
def _assert_mapdata_equal(a, b):
__tracebackhide__ = True
assert set(a.keys()) == {'clim', 'colormap', 'transparent'}
assert a.keys() == b.keys()
assert a['transparent'] == b['transparent'], 'transparent'
aa, bb = a['clim'], b['clim']
assert aa.keys() == bb.keys(), 'clim keys'
assert aa['kind'] == bb['kind'] == 'value'
key = 'pos_lims' if 'pos_lims' in aa else 'lims'
assert_array_equal(aa[key], bb[key], err_msg=key)
assert isinstance(a['colormap'], Colormap), 'Colormap'
assert isinstance(b['colormap'], Colormap), 'Colormap'
assert a['colormap'].name == b['colormap'].name
def test_process_clim_round_trip():
"""Test basic input-output support."""
# With some negative data
out = _process_clim('auto', 'auto', True, -1.)
want = dict(
colormap=mne_analyze_colormap([0, 0.5, 1], 'matplotlib'),
clim=dict(kind='value', pos_lims=[1, 1, 1]),
transparent=True,)
_assert_mapdata_equal(out, want)
out2 = _process_clim(**out)
_assert_mapdata_equal(out, out2)
_linearize_map(out) # smoke test
ticks = _get_map_ticks(out)
assert_allclose(ticks, [-1, 0, 1])
# With some positive data
out = _process_clim('auto', 'auto', True, 1.)
want = dict(
colormap=plt.get_cmap('hot'),
clim=dict(kind='value', lims=[1, 1, 1]),
transparent=True,)
_assert_mapdata_equal(out, want)
out2 = _process_clim(**out)
_assert_mapdata_equal(out, out2)
_linearize_map(out)
ticks = _get_map_ticks(out)
assert_allclose(ticks, [1])
# With some actual inputs
clim = dict(kind='value', pos_lims=[0, 0.5, 1])
out = _process_clim(clim, 'auto', True)
want = dict(
colormap=mne_analyze_colormap([0, 0.5, 1], 'matplotlib'),
clim=clim, transparent=True)
_assert_mapdata_equal(out, want)
_linearize_map(out)
ticks = _get_map_ticks(out)
assert_allclose(ticks, [-1, -0.5, 0, 0.5, 1])
clim = dict(kind='value', pos_lims=[0.25, 0.5, 1])
out = _process_clim(clim, 'auto', True)
want = dict(
colormap=mne_analyze_colormap([0, 0.5, 1], 'matplotlib'),
clim=clim, transparent=True)
_assert_mapdata_equal(out, want)
_linearize_map(out)
ticks = _get_map_ticks(out)
assert_allclose(ticks, [-1, -0.5, -0.25, 0, 0.25, 0.5, 1])
@testing.requires_testing_data
@requires_nibabel()
def test_stc_mpl():
"""Test plotting source estimates with matplotlib."""
sample_src = read_source_spaces(src_fname)
vertices = [s['vertno'] for s in sample_src]
n_time = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.ones((n_verts * n_time))
stc_data.shape = (n_verts, n_time)
stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample')
with pytest.warns(RuntimeWarning, match='not included'):
stc.plot(subjects_dir=subjects_dir, time_unit='s', views='ven',
hemi='rh', smoothing_steps=2, subject='sample',
backend='matplotlib', spacing='oct1', initial_time=0.001,
colormap='Reds')
fig = stc.plot(subjects_dir=subjects_dir, time_unit='ms', views='dor',
hemi='lh', smoothing_steps=2, subject='sample',
backend='matplotlib', spacing='ico2', time_viewer=True,
colormap='mne')
time_viewer = fig.time_viewer
_fake_click(time_viewer, time_viewer.axes[0], (0.5, 0.5)) # change t
time_viewer.canvas.key_press_event('ctrl+right')
time_viewer.canvas.key_press_event('left')
pytest.raises(ValueError, stc.plot, subjects_dir=subjects_dir,
hemi='both', subject='sample', backend='matplotlib')
pytest.raises(ValueError, stc.plot, subjects_dir=subjects_dir,
time_unit='ss', subject='sample', backend='matplotlib')
@pytest.mark.timeout(60) # can sometimes take > 60 sec
@testing.requires_testing_data
@requires_nibabel()
@pytest.mark.parametrize('coord_frame, idx, show_all, title',
[('head', 'gof', True, 'Test'),
('mri', 'amplitude', False, None)])
def test_plot_dipole_mri_orthoview(coord_frame, idx, show_all, title):
"""Test mpl dipole plotting."""
dipoles = read_dipole(dip_fname)
trans = read_trans(trans_fname)
fig = dipoles.plot_locations(trans=trans, subject='sample',
subjects_dir=subjects_dir,
coord_frame=coord_frame, idx=idx,
show_all=show_all, title=title,
mode='orthoview')
fig.canvas.scroll_event(0.5, 0.5, 1) # scroll up
fig.canvas.scroll_event(0.5, 0.5, -1) # scroll down
fig.canvas.key_press_event('up')
fig.canvas.key_press_event('down')
fig.canvas.key_press_event('a') # some other key
ax = fig.add_subplot(211)
with pytest.raises(TypeError, match='instance of Axes3D'):
dipoles.plot_locations(trans, 'sample', subjects_dir, ax=ax)
@testing.requires_testing_data
def test_plot_dipole_orientations(renderer):
"""Test dipole plotting in 3d."""
dipoles = read_dipole(dip_fname)
trans = read_trans(trans_fname)
for coord_frame, mode in zip(['head', 'mri'],
['arrow', 'sphere']):
dipoles.plot_locations(trans=trans, subject='sample',
subjects_dir=subjects_dir,
mode=mode, coord_frame=coord_frame)
renderer.backend._close_all()
@testing.requires_testing_data
@traits_test
def test_snapshot_brain_montage(renderer):
"""Test snapshot brain montage."""
info = read_info(evoked_fname)
fig = plot_alignment(
info, trans=None, subject='sample', subjects_dir=subjects_dir)
xyz = np.vstack([ich['loc'][:3] for ich in info['chs']])
ch_names = [ich['ch_name'] for ich in info['chs']]
xyz_dict = dict(zip(ch_names, xyz))
xyz_dict[info['chs'][0]['ch_name']] = [1, 2] # Set one ch to only 2 vals
# Make sure wrong types are checked
pytest.raises(TypeError, snapshot_brain_montage, fig, xyz)
# All chs must have 3 position values
pytest.raises(ValueError, snapshot_brain_montage, fig, xyz_dict)
# Make sure we raise error if the figure has no scene
pytest.raises(ValueError, snapshot_brain_montage, None, info)
@pytest.mark.slowtest # can be slow on OSX
@testing.requires_testing_data
@requires_pysurfer
@traits_test
@pytest.mark.parametrize('pick_ori', ('vector', None))
@pytest.mark.parametrize('kind', ('surface', 'volume', 'mixed'))
def test_plot_source_estimates(renderer_interactive, all_src_types_inv_evoked,
pick_ori, kind, brain_gc):
"""Test plotting of scalar and vector source estimates."""
invs, evoked = all_src_types_inv_evoked
inv = invs[kind]
is_pyvista = renderer_interactive._get_3d_backend() == 'pyvista'
with pytest.warns(None): # PCA mag
stc = apply_inverse(evoked, inv, pick_ori=pick_ori)
stc.data[1] *= -1 # make it signed
meth_key = 'plot_3d' if isinstance(stc, _BaseVolSourceEstimate) else 'plot'
stc.subject = 'sample'
meth = getattr(stc, meth_key)
kwargs = dict(subjects_dir=subjects_dir,
time_viewer=False, show_traces=False, # for speed
smoothing_steps=1, verbose='error', src=inv['src'],
volume_options=dict(resolution=None), # for speed
)
if pick_ori != 'vector':
kwargs['surface'] = 'white'
kwargs['backend'] = renderer_interactive._get_3d_backend()
# Mayavi can't handle non-surface
if kind != 'surface' and not is_pyvista:
with pytest.raises(RuntimeError, match='PyVista'):
meth(**kwargs)
return
brain = meth(**kwargs)
brain.close()
del brain
these_kwargs = kwargs.copy()
these_kwargs['show_traces'] = 'foo'
with pytest.raises(ValueError, match='show_traces'):
meth(**these_kwargs)
del these_kwargs
if pick_ori == 'vector':
with pytest.raises(ValueError, match='use "pos_lims"'):
meth(**kwargs, clim=dict(pos_lims=[1, 2, 3]))
if kind in ('volume', 'mixed'):
with pytest.raises(TypeError, match='when stc is a mixed or vol'):
these_kwargs = kwargs.copy()
these_kwargs.pop('src')
meth(**these_kwargs)
with pytest.raises(ValueError, match='cannot be used'):
these_kwargs = kwargs.copy()
these_kwargs.update(show_traces=True, time_viewer=False)
meth(**these_kwargs)
if not is_pyvista:
with pytest.raises(ValueError, match='view_layout must be'):
meth(view_layout='horizontal', **kwargs)
# flatmaps (mostly a lot of error checking)
these_kwargs = kwargs.copy()
these_kwargs.update(surface='flat', views='auto')
if kind == 'surface' and pick_ori != 'vector' and is_pyvista:
with pytest.raises(FileNotFoundError, match='flatmap'):
meth(**these_kwargs) # sample does not have them
fs_stc = stc.copy()
fs_stc.subject = 'fsaverage' # this is wrong, but don't have to care
flat_meth = getattr(fs_stc, meth_key)
these_kwargs.pop('src')
if pick_ori == 'vector':
pass # can't even pass "surface" variable
elif kind != 'surface':
with pytest.raises(TypeError, match='SourceEstimate when a flatmap'):
flat_meth(**these_kwargs)
elif not is_pyvista:
with pytest.raises(RuntimeError, match='PyVista 3D backend.*flatmap'):
flat_meth(**these_kwargs)
else:
brain = flat_meth(**these_kwargs)
brain.close()
these_kwargs.update(surface='inflated', views='flat')
with pytest.raises(ValueError, match='surface="flat".*views="flat"'):
flat_meth(**these_kwargs)
# just test one for speed
if kind != 'mixed':
return
assert is_pyvista
brain = meth(
views=['lat', 'med', 'ven'], hemi='lh',
view_layout='horizontal', **kwargs)
brain.close()
assert brain._subplot_shape == (1, 3)
del brain
these_kwargs = kwargs.copy()
these_kwargs['volume_options'] = dict(blending='foo')
with pytest.raises(ValueError, match='mip'):
meth(**these_kwargs)
these_kwargs['volume_options'] = dict(badkey='foo')
with pytest.raises(ValueError, match='unknown'):
meth(**these_kwargs)
# with resampling (actually downsampling but it's okay)
these_kwargs['volume_options'] = dict(resolution=20., surface_alpha=0.)
brain = meth(**these_kwargs)
brain.close()
del brain
@pytest.mark.slowtest
@testing.requires_testing_data
def test_plot_sensors_connectivity(renderer):
"""Test plotting of sensors connectivity."""
from mne import io, pick_types
data_path = data_dir
raw_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc_raw.fif')
raw = io.read_raw_fif(raw_fname)
picks = pick_types(raw.info, meg='grad', eeg=False, stim=False,
eog=True, exclude='bads')
n_channels = len(picks)
con = np.random.RandomState(42).randn(n_channels, n_channels)
info = raw.info
with pytest.raises(TypeError):
plot_sensors_connectivity(info='foo', con=con,
picks=picks)
with pytest.raises(ValueError):
plot_sensors_connectivity(info=info, con=con[::2, ::2],
picks=picks)
plot_sensors_connectivity(info=info, con=con, picks=picks)
@pytest.mark.parametrize('orientation', ('horizontal', 'vertical'))
@pytest.mark.parametrize('diverging', (True, False))
@pytest.mark.parametrize('lims', ([0.5, 1, 10], [0, 1, 10]))
def test_brain_colorbar(orientation, diverging, lims):
"""Test brain colorbar plotting."""
_, ax = plt.subplots()
clim = dict(kind='value')
if diverging:
clim['pos_lims'] = lims
else:
clim['lims'] = lims
plot_brain_colorbar(ax, clim, orientation=orientation)
if orientation == 'vertical':
have, empty = ax.get_yticklabels, ax.get_xticklabels
else:
have, empty = ax.get_xticklabels, ax.get_yticklabels
if diverging:
if lims[0] == 0:
ticks = list(-np.array(lims[1:][::-1])) + lims
else:
ticks = list(-np.array(lims[::-1])) + [0] + lims
else:
ticks = lims
plt.draw()
assert_array_equal(
[float(h.get_text().replace('−', '-')) for h in have()], ticks)
assert_array_equal(empty(), [])
@pytest.mark.slowtest # slow-ish on Travis OSX
@requires_pysurfer
@testing.requires_testing_data
@traits_test
def test_mixed_sources_plot_surface(renderer_interactive):
"""Test plot_surface() for mixed source space."""
src = read_source_spaces(fwd_fname2)
N = np.sum([s['nuse'] for s in src]) # number of sources
T = 2 # number of time points
S = 3 # number of source spaces
rng = np.random.RandomState(0)
data = rng.randn(N, T)
vertno = S * [np.arange(N // S)]
stc = MixedSourceEstimate(data, vertno, 0, 1)
stc.surface().plot(views='lat', hemi='split',
subject='fsaverage', subjects_dir=subjects_dir,
colorbar=False)
@testing.requires_testing_data
@traits_test
@pytest.mark.slowtest
def test_link_brains(renderer_interactive):
"""Test plotting linked brains."""
sample_src = read_source_spaces(src_fname)
vertices = [s['vertno'] for s in sample_src]
n_time = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.zeros((n_verts * n_time))
stc_size = stc_data.size
stc_data[(np.random.rand(stc_size // 20) * stc_size).astype(int)] = \
np.random.RandomState(0).rand(stc_data.size // 20)
stc_data.shape = (n_verts, n_time)
stc = SourceEstimate(stc_data, vertices, 1, 1)
colormap = 'mne_analyze'
brain = plot_source_estimates(
stc, 'sample', colormap=colormap,
background=(1, 1, 0),
subjects_dir=subjects_dir, colorbar=True,
clim='auto'
)
if renderer_interactive._get_3d_backend() != 'pyvista':
with pytest.raises(NotImplementedError, match='backend is pyvista'):
link_brains(brain)
else:
with pytest.raises(ValueError, match='is empty'):
link_brains([])
with pytest.raises(TypeError, match='type is Brain'):
link_brains('foo')
link_brains(brain, time=True, camera=True)
def test_renderer(renderer):
"""Test that renderers are available on demand."""
backend = renderer.get_3d_backend()
cmd = [sys.executable, '-uc',
'import mne; mne.viz.create_3d_figure((800, 600)); '
'backend = mne.viz.get_3d_backend(); '
'assert backend == %r, backend' % (backend,)]
with modified_env(MNE_3D_BACKEND=backend):
run_subprocess(cmd)
|
olafhauk/mne-python
|
mne/viz/tests/test_3d.py
|
Python
|
bsd-3-clause
| 34,878
|
[
"Mayavi"
] |
4c538668e2134356ac43f052cc6628053114714cc2fae5e0387b4f24b713d11d
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Martin Hawlisch, Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
""" Dump Gender Statistics.
Tools/Debug/Dump Gender Statistics
"""
#-------------------------------------------------------------------------
#
# GTK/Gnome modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.gui.listmodel import ListModel, INTEGER
from gramps.gui.managedwindow import ManagedWindow
from gramps.gui.plug import tool
_GENDER = [ _('female'), _('male'), _('unknown') ]
#-------------------------------------------------------------------------
#
#
#
#-------------------------------------------------------------------------
class DumpGenderStats(tool.Tool, ManagedWindow):
def __init__(self, dbstate, user, options_class, name, callback=None):
uistate = user.uistate
self.label = _("Gender Statistics tool")
tool.Tool.__init__(self, dbstate, options_class, name)
stats_list = []
for name, value in dbstate.db.genderStats.stats.items():
stats_list.append(
(name,)
+ value
+ (_GENDER[dbstate.db.genderStats.guess_gender(name)],)
)
if uistate:
ManagedWindow.__init__(self, uistate, [], self.__class__)
titles = [(_('Name'), 0, 100),
(_('Male'), 1, 70, INTEGER),
(_('Female'), 2, 70, INTEGER),
(_('Unknown'), 3, 90, INTEGER),
(_('Guess'), 4, 70)]
treeview = Gtk.TreeView()
model = ListModel(treeview, titles)
for entry in sorted(stats_list):
model.add(entry, entry[0])
s = Gtk.ScrolledWindow()
s.add(treeview)
dialog = Gtk.Dialog()
dialog.add_button(_('_Close'), Gtk.ResponseType.CLOSE)
dialog.connect('response', self._response)
dialog.vbox.pack_start(s, expand=True, fill=True, padding=0)
self.set_window(dialog, None, self.label)
self.setup_configs('interface.dumpgenderstats', 420, 300)
self.show()
else:
if len(_('Name')) < 16:
print('%s%s%s' % (_('Name'),
" " * (16 - len(_('Name'))),
_('Male')),
'\t%s'*3 % (_('Female'), _('Unknown'), _('Guess')))
else:
print(_('Name'), '\t%s'*4 % (_('Male'), _('Female'),
_('Unknown'), _('Guess')))
print()
for entry in sorted(stats_list):
if len(entry[0]) < 16:
print('%s%s%s' % (entry[0],
" " * (16 - len(entry[0])),
entry[1]),
'\t%s'*3 % (entry[2:]))
else:
print(entry[0], '\t%s'*4 % (entry[1:]))
def _response(self, obj, response_id):
if response_id == Gtk.ResponseType.CLOSE:
self.close()
def build_menu_names(self, obj):
return (self.label,None)
#------------------------------------------------------------------------
#
#
#
#------------------------------------------------------------------------
class DumpGenderStatsOptions(tool.ToolOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name, person_id=None):
tool.ToolOptions.__init__(self, name, person_id)
|
SNoiraud/gramps
|
gramps/plugins/tool/dumpgenderstats.py
|
Python
|
gpl-2.0
| 4,674
|
[
"Brian"
] |
ad87542c0ce3c2af0fd6cf260edda64d0a8d4f35c813ba322b03111e48e389ed
|
#! python3
import argparse
import pandas
import numpy as np
import matplotlib.pyplot as plt
import math
import itertools
def parseArguments(args):
parser = argparse.ArgumentParser(description="Creates convergence plots from gathered stats")
parser.add_argument('-f', '--file', type=argparse.FileType('r'), default="stats.csv", help='The CSV file containing the gathered stats.')
parser.add_argument('--show', action="store_true", help='Shows the plots insead of saving them.')
return parser.parse_args(args)
def lavg(l):
return math.exp(sum(map(math.log, l)) / len(l))
def getStyler():
styles = ['solid', 'dashed', 'dashdot']
colors = ['#0173b2', '#de8f05', '#029e73', '#d55e00', '#cc78bc', '#ca9161', '#fbafe4', '#949494', '#ece133', '#56b4e9']
markers = ['o', 'v', '^', 'D', '*']
for style in itertools.product(styles, markers, colors):
yield style
def plot_order(ax, nth, xmin, xmax, ymin, ymax):
x1, y1 = xmax, ymax
def f(x):
return y1 * ((x / x1)**nth)
xl, xu = xmin, xmax
for step in range(4):
xt = lavg([xu, xl])
yt = f(xt)
if yt > ymin:
xu = xt
if yt < ymin:
xl = xt
x2, y2 = xu, f(xu)
xs, ys = [x1, x2], [y1, y2]
ax.plot(xs, ys, color="lightgray", linewidth=1.0, zorder=-1)
ax.annotate(
"{} order".format(nth),
xy=(lavg(xs), lavg(ys)),
color="gray",
zorder=-1
)
def main(argv):
args = parseArguments(argv[1:])
df = pandas.read_csv(args.file)
numeric_cols = ['mesh A', 'mesh B', 'count', 'min', 'max', 'median', 'relative-l2', 'weighted-l2', '99th percentile', '95th percentile', '90th percentile', 'peakMemA', 'peakMemB', 'computeMappingTime']
df[numeric_cols] = df[numeric_cols].apply(pandas.to_numeric)
# remove all matching meshes
df = df[df["mesh A"] != df["mesh B"]]
singleB = df[ df["mesh B"] == 0.025 ]
# \item about the best way to go: for one target mesh compare best local-rbf (gaussian), global-rbf (tps), nn, np (For each geometry: 12 series)
# Goal: show errors of best mappings (user perspective)
best = singleB[
singleB["mapping"].apply(lambda n: (n in ["nn", "np", "tps"]) | n.endswith("-separate") )
]
plot(best, "best-mappings", xname="mesh A", yname="relative-l2", show=args.show)
# \item pick one geometry, one target mesh: nn, np, tps, gaussian-nX separate
# Goal: show memory usage of best mappings (user perspective)
plot(singleB, "memory-usage", xname="mesh A", yname="peakMemB", show=args.show, conv=False)
# \item pick one geometry, one target mesh: nn, np, tps, gaussian-nX separate
# Goal: show compute time of best mappings (user perspective)
plot(singleB[singleB["computeMappingTime"] > 0], "compute-time", xname="mesh A", yname="computeMappingTime", show=args.show, conv=False)
# \item pick one geometry, one target mesh, varying rank counts: nn, np, tps, gaussian-nX separate
# Goal: show weak scalability of best mappings (user perspective)
# TODO plot(singleB, show=args.show)
# \item pick one geometry, fixed rank count, varying target meshes: nn, np, tps, gaussian-nX separate
# Goal: show strong scalability of best mappings (user perspective)
plot(singleB, "strong-scaling", xname="mesh A", yname="computeMappingTime", show=args.show, conv=False)
# \item pick one geometry and target mesh: compare gaussian rbf on vs separate and different support radii (8 series)
# Goal: Options for local-rbf you should not choose and why
gaussians = singleB[df["mapping"].str.startswith("gaussian")]
plot(gaussians, "rbf-comp", xname="mesh A", yname="relative-l2", show=args.show)
# \item pick one geometry: 3 different target meshes: np vs gaussian-n5-separate (6 series)
# Goal: Above holds for different target meshes
reverse = df.query('mapping == "np" | mapping == "gaussian-n5-separate"')
plot(reverse, "changing-b", xname="mesh A", yname="relative-l2", show=args.show)
return 0
def plot(df, output, xname="mesh A", yname="relative-l2", groupname="mesh B", show=False, conv=True):
fmt = "{} onto {}"
styler = getStyler()
print("Plot x:{} y:{} grouped by {}".format(xname, yname, groupname))
df = df.sort_values(xname)
grouped = df.groupby(["mapping", groupname])
fig, ax = plt.subplots(sharex=True, sharey=True, figsize=(10,5))
for name, group in grouped:
print("\tGroup {} with {} points".format(fmt.format(*name), group.shape[0]))
l, m, c = next(styler)
group.plot(
ax=ax,
loglog=True,
x=xname,
y=yname,
label=fmt.format(*name),
marker=m,
linestyle=l,
color=c
)
ax.set_xlabel("edge length(h) of {}".format(xname))
ax.set_ylabel("{} error mapping to mesh B".format(yname))
if conv:
filtered = df[yname]
plot_order(ax, 1, df[xname].min(), df[xname].max(), filtered.min(), filtered.max())
plot_order(ax, 2, df[xname].min(), df[xname].max(), filtered.min(), filtered.max())
plot_order(ax, 3, df[xname].min(), df[xname].max(), filtered.min(), filtered.max())
plt.gca().invert_xaxis()
plt.grid()
ax.legend(loc="upper left", bbox_to_anchor=(1,1))
plt.subplots_adjust(right=0.7)
if show:
plt.show()
else:
parts = [output]
parts.extend(".pdf")
fig.savefig("".join(parts), pad_inches=1)
if __name__ == "__main__":
import sys
sys.exit(main(sys.argv))
|
precice/aste
|
contrib/mapping-tester/plots/paperplot.py
|
Python
|
gpl-3.0
| 5,620
|
[
"Gaussian"
] |
fa18f1c656ed3d9befe826e3448530cab58190238f6be478ae8ad61d41b8df12
|
import socialite.engine.LocalEngine as LocalEngine
import socialite.engine.ClientEngine as ClientEngine
import socialite.engine.Config as Config
import socialite.tables.QueryVisitor as QueryVisitor
import socialite.tables.Tuple as Tuple
import socialite.util.SociaLiteException as SociaLiteException
import socialite.type.Utf8 as Utf8
import sys
import java.util.concurrent.atomic.AtomicBoolean as AtomicBool
import java.lang.InterruptedException as JavaInterruptedException
from threading import Thread, InterruptedException, Condition, Lock
from Queue import Queue
__all__ = ['returns', 'cwd', 'chdir', 'store', 'load', 'tables', 'status', 'engine', 'SociaLiteException', 'double']
__doc__ = """
Useful functions:
tables() : shows declared SociaLite tables
status() : shows runtime status of SociaLite
Use backtik(`) to run SociaLite queries
e.g. `Friend(String i, (String f)).` # declares a table Friend having two columns.
`Friend(a,b) :- a="John Smith", b="Jane Doe".` # inserts a tuple into Friend.
for i, f in `Friend(i, f)`: # iterates over tuples in Friend
print i, f
Type help(socialite.examples) to see more SociaLite query examples.
"""
examples="""
`Edge(int i, (int f)).` # declares Edge table (with nested 2nd column).
`Edge(int i:0..1000, (int f)).` # Values of 1st column of Edge is between 0 and 1000
`Edge(s, t) :- l=$read("edges.txt"), # $read returns lines in edges.txt
(a,b)=$split(l, "\\t"),# splits a string with a delimiter, tab here.
s=$toInt(a), # Casting a,b into primitive int.
t=$toInt(b).`
`Foaf(i, f) :- Friend(i,x), Friend(x,f).` # joins Friend table with itself
# to compute friends-of-friends
# and store the result in Foaf.
for i, f in `Foaf(i, f)`: # iterates over tuples in Foaf
print i, f
`FriendCnt(int i, int cnt) groupby(1). # we will apply $inc to the 'cnt' column,
# which requires groupby with one column (column 'i').
FriendCnt(i, $inc(1)) :- Friend(i,f).` # counting the # of friends for each person.
@returns(int) # annotates function return type
def randInt(s, e): # to access it from SociaLite queries
import random as r
return r.randint(s, e)
# Computes average friend counts for randomly selected samples.
`SampleAvg(int i:0..0, Avg avg).
SampleAvg(0, $avg(cnt)) :- i=$randInt(0,100), FriendCnt(i, cnt).`
"""
# Initialize useful functions (help, quit, ...)
import __builtin__
class _Helper(object):
def __init__(self):
global examples
self.socialite = sys.modules[__name__]
self.socialiteExamples = examples
def __repr__(self):
return "Type help(socialite) for help on SociaLite, " \
"or help(object) for help about object."
def __call__(self, *args, **kwds):
if args and args[0]==self.socialite:
print self.socialite.__doc__
return
elif args and args[0]==self.socialiteExamples:
print self.socialite.examples
return
import pydoc
return pydoc.help(*args, **kwds)
def sethelper():
__builtin__.socialite = sys.modules[__name__]
__builtin__.help = _Helper()
import os
def setquit():
"""Define new built-ins 'quit' and 'exit'.
These are simply strings that display a hint on how to exit.
"""
if os.sep == ':':
eof = 'Cmd-Q'
elif os.sep == '\\':
eof = 'Ctrl-Z plus Return'
else:
eof = 'Ctrl-D (i.e. EOF)'
class Quitter(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return 'Use %s() or %s to exit' % (self.name, eof)
def __call__(self, code=None):
# Shells like IDLE catch the SystemExit, but listen when their
# stdin wrapper is closed.
try:
sys.stdin.close()
except:
pass
raise SystemExit(code)
__builtin__.quit = Quitter('quit')
__builtin__.exit = Quitter('exit')
double = float
def internal(f):
f.internal = True
return f
internal.internal = True
isInteractive = False
isClusterEngine = False
engine = None
@internal
def init(cpu=None, dist=False, interactive=False, verbose=None):
verbose = True
global engine, isClusterEngine, isInteractive
if engine==None:
if dist:
engine = ClientEngine()
isClusterEngine = True
else:
conf = None
if cpu == None: conf = Config.par()
else: conf = Config.par(cpu)
if verbose: conf.setVerbose()
engine = LocalEngine(conf)
if interactive:
isInteractive = True
engine = AsyncEngine(engine)
cleanupFuncsBefore =[]
cleanupFuncsAfter =[]
cleanupLock = Lock()
@internal
def registerCleanupOnExit(f, before=True):
try:
cleanupLock.acquire()
if before: cleanupFuncsBefore.append(f)
else: cleanupFuncsAfter.append(f)
finally:
cleanupLock.release()
@internal
def unregisterCleanupOnExit(f):
try:
cleanupLock.acquire()
cleanupFuncsBefore.remove(f)
cleanupFuncsAfter.remove(f)
finally:
cleanupLock.release()
cleanupDone = AtomicBool()
import time
@internal
def cleanupOnExit():
if cleanupDone.compareAndSet(False, True):
for f in cleanupFuncsBefore: f()
#time.sleep(0.02)
engine.shutdown()
for f in cleanupFuncsAfter: f()
#time.sleep(0.02)
def install_funcs():
sethelper()
setquit()
import atexit
atexit.register(cleanupOnExit)
install_funcs()
@internal
def cwd(): return engine.cwd()
@internal
def chdir(path): engine.chdir(path)
@internal
def store(): engine.storeWorkspace()
@internal
def load(): engine.loadWorkspace()
@internal
def tables(verbose=0):
status = engine.status(0)
print status.getTableStatus()
@internal
def status(verbose=0):
write = sys.stdout.write
write("** SociaLite Runtime Status **\n")
status = engine.status(verbose)
write("Number of nodes: "+status.getNodeNum()+"\n")
write("Free memory:\n")
memStat = filter(lambda x:x, status.getMemStatus().split('\n'))
memStat = ''.join(map(lambda x:' '+x+'\n', memStat))
memStat.rstrip(' ')
write(memStat)
write("Recent rules:\n")
progStat = status.getProgress().split('\n')
progStat = ' '+'\n '.join(progStat)
progStat.rstrip(' ')
write(progStat)
@internal
def clear(name): engine.clearTable(name)
@internal
def indent(msg, width=4, indentFirst=True):
if not msg: return msg
tab1=''
if indentFirst:tab1=' '*width
tab=' '*width
msg = tab1+msg.replace('\n','\n'+tab)
return msg.rstrip(' ')
@internal
def _removeStackTrace(msg):
if not msg: return msg
magic="at socialite.dist.master.QueryListener."
if msg.find(magic) >= 0:
msg = msg[:msg.find(magic)].strip()
magic="org.apache.hadoop.ipc.RemoteException:"
if msg.find(magic) == 0:
msg = msg[len(magic):].strip()
return msg
class AsyncEngine:
END = None
def __init__(self, engine):
self.engine = engine
self.q = Queue(maxsize=16)
self.reqThreads = []
reqThreadNum = 2
for i in xrange(reqThreadNum):
t=Thread(target=self.asyncRequest, name="Async Request Thread")
t.start()
self.reqThreads.append(t)
registerCleanupOnExit(self.cleanupReqThreads)
def getTableRef(self, name):
return self.engine.getTableRef(name)
def cleanupReqThreads(self):
try:
#for t in self.reqThreads:
# self.q.put(self.END)
for t in self.reqThreads:
t._thread.interrupt()
except:
pass
#print "Exception in cleanupReqThreads"
def asyncRequest(self):
try:
while True:
tup = self.q.get()
if tup == self.END: break
query, visitor, id, checker = tup
try:
if visitor: self.engine.run(query, visitor, id)
else: self.engine.run(query)
except:
type, inst, tb = sys.exc_info()
errhead="Error while running:"
print "\n"+errhead+indent(query, width=len(errhead), indentFirst=False)
print indent(_removeStackTrace(inst.getMessage()))
if visitor:
visitor.raiseError(inst)
checker.done=True
self._notify(checker.cv)
except JavaInterruptedException:
pass
def _notify(self, cv):
cv.acquire()
try: cv.notify()
finally: cv.release()
def _wait(self, cv, timeout=None):
cv.acquire()
try: cv.wait(timeout)
finally: cv.release()
def run(self, program, visitor=None, id=None):
done=[]
class Checker(object): pass
checker = Checker()
checker.cv = Condition()
checker.done=False
self.q.put((program, visitor, id, checker))
self._wait(checker.cv, 3)
if not checker.done and not visitor:
print "... still running the query. Type status() to see the progress."
def cleanupTableIter(self, id):
self.engine.cleanupTableIter(id)
def cwd(self):
self.engine.cwd()
def load(self):
self.engine.load()
def status(self, verbose=0):
return self.engine.status()
def chdir(self, path):
self.engine.chdir(path)
def shutdown(self):
self.engine.shutdown()
def update(self, func):
self.engine.update(func)
@internal
def returns(*types):
def _wrapper(f):
if len(types) == 1:
f.returns = types[0]
else:
f.returns = types
engine.update(f)
return f
return _wrapper
@internal
def passVars(*vars):
tmp=[]
for v in vars:
if type(v) == type(0):
tmp.append(str(v))
elif type(v) == type(0.0):
tmp.append(str(v))
elif type(v) == type(""):
v = v.replace('"', '\\"')
tmp.append('"'+v+'"')
elif type(v) == type(u""):
v = v.replace('"', '\\"')
tmp.append('"'+v+'"')
elif isinstance(v , Utf8):
v = v.toString().replace('"', '\\"')
tmp.append('u"'+v+'"')
else:
raise SociaLiteException("Only numbers and Strings can be passed to SociaLite queries")
return tuple(tmp)
class IdFactory:
def __init__(self):
import java.util.concurrent.atomic.AtomicInteger as AtomicInt
self.nextid = AtomicInt()
def next(self):
nextid = self.nextid.getAndIncrement()
return nextid
class TableIterator(QueryVisitor):
END = None
idFactory = IdFactory()
def __init__(self, engine, query):
self.engine = engine
self.query = query
self.q = Queue(maxsize=1024)
self.finished = False
self.cleanupIterDone = AtomicBool()
self.error = None
self.thread = None
self.id = self.idFactory.next()
def startThread(self):
if self.thread: return
self.thread = t = Thread(target=self.run, name="Table Iterator Thread query="+self.query)
registerCleanupOnExit(self.cleanupIterThread, False)
t.start()
def __del__(self):
unregisterCleanupOnExit(self.cleanupIterThread)
self.cleanupIterThread()
def cleanupIterThread(self):
try:
if not self.cleanupIterDone.compareAndSet(False, True):
return
self.finished = True
self.engine.cleanupTableIter(self.id)
self.thread._thread.interrupt()
except:
pass
#print "Exception in cleanupIterThread"
def visit(self, t):
if self.finished: return False
if isinstance(t, Tuple):
cols = []
for i in xrange(t.size()):
cols.append(t.get(i))
self.q.put(tuple(cols))
else: self.q.put(t)
return True
def finish(self):
if self.finished: return
self.q.put(self.END)
def raiseError(self, error):
self.error = error
self.finish()
def run(self):
try:
self.engine.run(self.query, self, self.id)
except SociaLiteException, e1:
e1.printStackTrace()
self.q.put(self.END)
raise e1
except InterruptedException, e3:
return
except Exception, e2:
e2.printStackTrace()
self.q.put(self.END)
raise e2
def __next__(self):
if not self.thread:
self.startThread()
if self.finished or self.error:
raise StopIteration
v = self.q.get()
if self.error:
self.finished = True
raise self.error
if v == self.END:
self.finished = True
raise StopIteration
return v
def next(self):
n = self.__next__()
return n
def __iter__(self):
self.startThread()
return self
|
ofermend/medicare-demo
|
socialite/src/pysocialite/SociaLite.py
|
Python
|
apache-2.0
| 13,436
|
[
"VisIt"
] |
e8eefd028e3187949a9d15a360511244c0250649c7ce2f0846ce347a31c8a6e4
|
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Gaussian hidden markov model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from fivo.models import base
tfd = tf.contrib.distributions
class GaussianHMM(object):
"""A hidden markov model with 1-D Gaussian latent space and observations.
This is a hidden markov model where the state and observations are
one-dimensional Gaussians. The mean of each latent state is a linear
function of the previous latent state, and the mean of each observation
is a linear function of the current latent state.
The description that follows is 0-indexed instead of 1-indexed to make
it easier to reason about the parameters passed to the model.
The parameters of the model are:
T: The number timesteps, latent states, and observations.
vz_t, t=0 to T-1: The variance of the latent state at timestep t.
vx_t, t=0 to T-1: The variance of the observation at timestep t.
wz_t, t=1 to T-1: The weight that defines the latent transition at t.
wx_t, t=0 to T-1: The weight that defines the observation function at t.
There are T vz_t, vx_t, and wx_t but only T-1 wz_t because there are only
T-1 transitions in the model.
Given these parameters, sampling from the model is defined as
z_0 ~ N(0, vz_0)
x_0 | z_0 ~ N(wx_0 * z_0, vx_0)
z_1 | z_0 ~ N(wz_1 * z_0, vz_1)
x_1 | z_1 ~ N(wx_1 * z_1, vx_1)
...
z_{T-1} | z_{T-2} ~ N(wz_{T-1} * z_{T-2}, vz_{T-1})
x_{T-1} | z_{T-1} ~ N(wx_{T-1} * z_{T-1}, vx_{T-1}).
"""
def __init__(self,
num_timesteps,
transition_variances=1.,
emission_variances=1.,
transition_weights=1.,
emission_weights=1.,
dtype=tf.float32):
"""Creates a gaussian hidden markov model.
Args:
num_timesteps: A python int, the number of timesteps in the model.
transition_variances: The variance of p(z_t | z_t-1). Can be a scalar,
setting all variances to be the same, or a Tensor of shape
[num_timesteps].
emission_variances: The variance of p(x_t | z_t). Can be a scalar,
setting all variances to be the same, or a Tensor of shape
[num_timesteps].
transition_weights: The weight that defines the linear function that
produces the mean of z_t given z_{t-1}. Can be a scalar, setting
all weights to be the same, or a Tensor of shape [num_timesteps-1].
emission_weights: The weight that defines the linear function that
produces the mean of x_t given z_t. Can be a scalar, setting
all weights to be the same, or a Tensor of shape [num_timesteps].
dtype: The datatype of the state.
"""
self.num_timesteps = num_timesteps
self.dtype = dtype
def _expand_param(param, size):
param = tf.convert_to_tensor(param, dtype=self.dtype)
if not param.get_shape().as_list():
param = tf.tile(param[tf.newaxis], [size])
return param
def _ta_for_param(param):
size = tf.shape(param)[0]
ta = tf.TensorArray(dtype=param.dtype,
size=size,
dynamic_size=False,
clear_after_read=False).unstack(param)
return ta
self.transition_variances = _ta_for_param(
_expand_param(transition_variances, num_timesteps))
self.transition_weights = _ta_for_param(
_expand_param(transition_weights, num_timesteps-1))
em_var = _expand_param(emission_variances, num_timesteps)
self.emission_variances = _ta_for_param(em_var)
em_w = _expand_param(emission_weights, num_timesteps)
self.emission_weights = _ta_for_param(em_w)
self._compute_covariances(em_w, em_var)
def _compute_covariances(self, emission_weights, emission_variances):
"""Compute all covariance matrices.
Computes the covaraince matrix for the latent variables, the observations,
and the covariance between the latents and observations.
Args:
emission_weights: A Tensor of shape [num_timesteps] containing
the emission distribution weights at each timestep.
emission_variances: A Tensor of shape [num_timesteps] containing
the emiision distribution variances at each timestep.
"""
# Compute the marginal variance of each latent.
z_variances = [self.transition_variances.read(0)]
for i in range(1, self.num_timesteps):
z_variances.append(
z_variances[i-1] * tf.square(self.transition_weights.read(i-1)) +
self.transition_variances.read(i))
# Compute the latent covariance matrix.
sigma_z = []
for i in range(self.num_timesteps):
sigma_z_row = []
for j in range(self.num_timesteps):
if i == j:
sigma_z_row.append(z_variances[i])
continue
min_ind = min(i, j)
max_ind = max(i, j)
weight = tf.reduce_prod(
self.transition_weights.gather(tf.range(min_ind, max_ind)))
sigma_z_row.append(z_variances[min_ind] * weight)
sigma_z.append(tf.stack(sigma_z_row))
self.sigma_z = tf.stack(sigma_z)
# Compute the observation covariance matrix.
x_weights_outer = tf.einsum("i,j->ij", emission_weights, emission_weights)
self.sigma_x = x_weights_outer * self.sigma_z + tf.diag(emission_variances)
# Compute the latent - observation covariance matrix.
# The first axis will index latents, the second axis will index observtions.
self.sigma_zx = emission_weights[tf.newaxis, :] * self.sigma_z
self.obs_dist = tfd.MultivariateNormalFullCovariance(
loc=tf.zeros([self.num_timesteps], dtype=tf.float32),
covariance_matrix=self.sigma_x)
def transition(self, t, z_prev):
"""Compute the transition distribution p(z_t | z_t-1).
Args:
t: The current timestep, a scalar integer Tensor. When t=0 z_prev is
mostly ignored and the distribution p(z_0) is returned. z_prev is
'mostly' ignored because it is still used to derive batch_size.
z_prev: A [batch_size] set of states.
Returns:
p(z_t | z_t-1) as a univariate normal distribution.
"""
batch_size = tf.shape(z_prev)[0]
scale = tf.sqrt(self.transition_variances.read(t))
scale = tf.tile(scale[tf.newaxis], [batch_size])
loc = tf.cond(tf.greater(t, 0),
lambda: self.transition_weights.read(t-1)*z_prev,
lambda: tf.zeros_like(scale))
return tfd.Normal(loc=loc, scale=scale)
def emission(self, t, z):
"""Compute the emission distribution p(x_t | z_t).
Args:
t: The current timestep, a scalar integer Tensor.
z: A [batch_size] set of the current states.
Returns:
p(x_t | z_t) as a univariate normal distribution.
"""
batch_size = tf.shape(z)[0]
scale = tf.sqrt(self.emission_variances.read(t))
scale = tf.tile(scale[tf.newaxis], [batch_size])
loc = self.emission_weights.read(t)*z
return tfd.Normal(loc=loc, scale=scale)
def filtering(self, t, z_prev, x_cur):
"""Computes the filtering distribution p(z_t | z_{t-1}, x_t).
Args:
t: A python int, the index for z_t. When t is 0, z_prev is ignored,
giving p(z_0 | x_0).
z_prev: z_{t-1}, the previous z to condition on. A Tensor of shape
[batch_size].
x_cur: x_t, the current x to condition on. A Tensor of shape [batch_size].
Returns:
p(z_t | z_{t-1}, x_t) as a univariate normal distribution.
"""
z_prev = tf.convert_to_tensor(z_prev)
x_cur = tf.convert_to_tensor(x_cur)
batch_size = tf.shape(z_prev)[0]
z_var = self.transition_variances.read(t)
x_var = self.emission_variances.read(t)
x_weight = self.emission_weights.read(t)
prev_state_weight = x_var/(tf.square(x_weight)*z_var + x_var)
prev_state_weight *= tf.cond(tf.greater(t, 0),
lambda: self.transition_weights.read(t-1),
lambda: tf.zeros_like(prev_state_weight))
cur_obs_weight = (x_weight*z_var)/(tf.square(x_weight)*z_var + x_var)
loc = prev_state_weight*z_prev + cur_obs_weight*x_cur
scale = tf.sqrt((z_var*x_var)/(tf.square(x_weight)*z_var + x_var))
scale = tf.tile(scale[tf.newaxis], [batch_size])
return tfd.Normal(loc=loc, scale=scale)
def smoothing(self, t, z_prev, xs):
"""Computes the smoothing distribution p(z_t | z_{t-1}, x_{t:num_timesteps).
Args:
t: A python int, the index for z_t. When t is 0, z_prev is ignored,
giving p(z_0 | x_{0:num_timesteps-1}).
z_prev: z_{t-1}, the previous z to condition on. A Tensor of shape
[batch_size].
xs: x_{t:num_timesteps}, the future xs to condition on. A Tensor of shape
[num_timesteps - t, batch_size].
Returns:
p(z_t | z_{t-1}, x_{t:num_timesteps}) as a univariate normal distribution.
"""
xs = tf.convert_to_tensor(xs)
z_prev = tf.convert_to_tensor(z_prev)
batch_size = tf.shape(xs)[1]
mess_mean, mess_prec = tf.cond(
tf.less(t, self.num_timesteps-1),
lambda: tf.unstack(self._compute_backwards_messages(xs[1:]).read(0)),
lambda: [tf.zeros([batch_size]), tf.zeros([batch_size])])
return self._smoothing_from_message(t, z_prev, xs[0], mess_mean, mess_prec)
def _smoothing_from_message(self, t, z_prev, x_t, mess_mean, mess_prec):
"""Computes the smoothing distribution given message incoming to z_t.
Computes p(z_t | z_{t-1}, x_{t:num_timesteps}) given the message incoming
to the node for z_t.
Args:
t: A python int, the index for z_t. When t is 0, z_prev is ignored.
z_prev: z_{t-1}, the previous z to condition on. A Tensor of shape
[batch_size].
x_t: The observation x at timestep t.
mess_mean: The mean of the message incoming to z_t, in information form.
mess_prec: The precision of the message incoming to z_t.
Returns:
p(z_t | z_{t-1}, x_{t:num_timesteps}) as a univariate normal distribution.
"""
batch_size = tf.shape(x_t)[0]
z_var = self.transition_variances.read(t)
x_var = self.emission_variances.read(t)
w_x = self.emission_weights.read(t)
def transition_term():
return (tf.square(self.transition_weights.read(t))/
self.transition_variances.read(t+1))
prec = 1./z_var + tf.square(w_x)/x_var + mess_prec
prec += tf.cond(tf.less(t, self.num_timesteps-1),
transition_term, lambda: 0.)
mean = x_t*(w_x/x_var) + mess_mean
mean += tf.cond(tf.greater(t, 0),
lambda: z_prev*(self.transition_weights.read(t-1)/z_var),
lambda: 0.)
mean = tf.reshape(mean / prec, [batch_size])
scale = tf.reshape(tf.sqrt(1./prec), [batch_size])
return tfd.Normal(loc=mean, scale=scale)
def _compute_backwards_messages(self, xs):
"""Computes the backwards messages used in smoothing."""
batch_size = tf.shape(xs)[1]
num_xs = tf.shape(xs)[0]
until_t = self.num_timesteps - num_xs
xs = tf.TensorArray(dtype=xs.dtype,
size=num_xs,
dynamic_size=False,
clear_after_read=True).unstack(xs)
messages_ta = tf.TensorArray(dtype=xs.dtype,
size=num_xs,
dynamic_size=False,
clear_after_read=False)
def compute_message(t, prev_mean, prev_prec, messages_ta):
"""Computes one step of the backwards messages."""
z_var = self.transition_variances.read(t)
w_z = self.transition_weights.read(t-1)
x_var = self.emission_variances.read(t)
w_x = self.emission_weights.read(t)
cur_x = xs.read(t - until_t)
# If it isn't the first message, add the terms from the transition.
def transition_term():
return (tf.square(self.transition_weights.read(t))/
self.transition_variances.read(t+1))
unary_prec = 1/z_var + tf.square(w_x)/x_var
unary_prec += tf.cond(tf.less(t, self.num_timesteps-1),
transition_term, lambda: 0.)
unary_mean = (w_x / x_var) * cur_x
pairwise_prec = w_z / z_var
next_prec = -tf.square(pairwise_prec)/(unary_prec + prev_prec)
next_mean = (pairwise_prec * (unary_mean + prev_mean) /
(unary_prec + prev_prec))
next_prec = tf.reshape(next_prec, [batch_size])
next_mean = tf.reshape(next_mean, [batch_size])
messages_ta = messages_ta.write(t - until_t,
tf.stack([next_mean, next_prec]))
return t-1, next_mean, next_prec, messages_ta
def pred(t, *unused_args):
return tf.greater_equal(t, until_t)
init_prec = tf.zeros([batch_size], dtype=xs.dtype)
init_mean = tf.zeros([batch_size], dtype=xs.dtype)
t0 = tf.constant(self.num_timesteps - 1, dtype=tf.int32)
outs = tf.while_loop(pred, compute_message,
(t0, init_mean, init_prec, messages_ta))
messages = outs[-1]
return messages
def lookahead(self, t, z_prev):
"""Compute the 'lookahead' distribution, p(x_{t:T} | z_{t-1}).
Args:
t: A scalar Tensor int, the current timestep. Must be at least 1.
z_prev: The latent state at time t-1. A Tensor of shape [batch_size].
Returns:
p(x_{t:T} | z_{t-1}) as a multivariate normal distribution.
"""
z_prev = tf.convert_to_tensor(z_prev)
sigma_zx = self.sigma_zx[t-1, t:]
z_var = self.sigma_z[t-1, t-1]
mean = tf.einsum("i,j->ij", z_prev, sigma_zx) / z_var
variance = (self.sigma_x[t:, t:] -
tf.einsum("i,j->ij", sigma_zx, sigma_zx) / z_var)
return tfd.MultivariateNormalFullCovariance(
loc=mean, covariance_matrix=variance)
def likelihood(self, xs):
"""Compute the true marginal likelihood of the data.
Args:
xs: The observations, a [num_timesteps, batch_size] float Tensor.
Returns:
likelihoods: A [batch_size] float Tensor representing the likelihood of
each sequence of observations in the batch.
"""
return self.obs_dist.log_prob(tf.transpose(xs))
class TrainableGaussianHMM(GaussianHMM, base.ELBOTrainableSequenceModel):
"""An interface between importance-sampling training methods and the GHMM."""
def __init__(self,
num_timesteps,
proposal_type,
transition_variances=1.,
emission_variances=1.,
transition_weights=1.,
emission_weights=1.,
random_seed=None,
dtype=tf.float32):
"""Constructs a trainable Gaussian HMM.
Args:
num_timesteps: A python int, the number of timesteps in the model.
proposal_type: The type of proposal to use in the importance sampling
setup. Could be "filtering", "smoothing", "prior", "true-filtering",
or "true-smoothing". If "true-filtering" or "true-smoothing" are
selected, then the true filtering or smoothing distributions are used to
propose new states. If "learned-filtering" is selected then a
distribution with learnable parameters is used. Specifically at each
timestep the proposal is Gaussian with mean that is a learnable linear
function of the previous state and current observation. The log variance
is a per-timestep learnable constant. "learned-smoothing" is similar,
but the mean is a learnable linear function of the previous state and
all future observations. Note that this proposal class includes the true
posterior. If "prior" is selected then states are proposed from the
model's prior.
transition_variances: The variance of p(z_t | z_t-1). Can be a scalar,
setting all variances to be the same, or a Tensor of shape
[num_timesteps].
emission_variances: The variance of p(x_t | z_t). Can be a scalar,
setting all variances to be the same, or a Tensor of shape
[num_timesteps].
transition_weights: The weight that defines the linear function that
produces the mean of z_t given z_{t-1}. Can be a scalar, setting
all weights to be the same, or a Tensor of shape [num_timesteps-1].
emission_weights: The weight that defines the linear function that
produces the mean of x_t given z_t. Can be a scalar, setting
all weights to be the same, or a Tensor of shape [num_timesteps].
random_seed: A seed for the proposal sampling, mainly useful for testing.
dtype: The datatype of the state.
"""
super(TrainableGaussianHMM, self).__init__(
num_timesteps, transition_variances, emission_variances,
transition_weights, emission_weights, dtype=dtype)
self.random_seed = random_seed
assert proposal_type in ["filtering", "smoothing", "prior",
"true-filtering", "true-smoothing"]
if proposal_type == "true-filtering":
self.proposal = self._filtering_proposal
elif proposal_type == "true-smoothing":
self.proposal = self._smoothing_proposal
elif proposal_type == "prior":
self.proposal = self.transition
elif proposal_type == "filtering":
self._learned_proposal_fn = base.NonstationaryLinearDistribution(
num_timesteps, inputs_per_timestep=[1] + [2] * (num_timesteps-1))
self.proposal = self._learned_filtering_proposal
elif proposal_type == "smoothing":
inputs_per_timestep = [num_timesteps] + [num_timesteps - t
for t in range(num_timesteps-1)]
self._learned_proposal_fn = base.NonstationaryLinearDistribution(
num_timesteps, inputs_per_timestep=inputs_per_timestep)
self.proposal = self._learned_smoothing_proposal
def set_observations(self, xs, seq_lengths):
"""Sets the observations and stores the backwards messages."""
# Squeeze out data dimension since everything is 1-d.
xs = tf.squeeze(xs)
self.batch_size = tf.shape(xs)[1]
super(TrainableGaussianHMM, self).set_observations(xs, seq_lengths)
self.messages = self._compute_backwards_messages(xs[1:])
def zero_state(self, batch_size, dtype):
return tf.zeros([batch_size], dtype=dtype)
def propose_and_weight(self, state, t):
"""Computes the next state and log weights for the GHMM."""
state_shape = tf.shape(state)
xt = self.observations[t]
p_zt = self.transition(t, state)
q_zt = self.proposal(t, state)
zt = q_zt.sample(seed=self.random_seed)
zt = tf.reshape(zt, state_shape)
p_xt_given_zt = self.emission(t, zt)
log_p_zt = p_zt.log_prob(zt)
log_q_zt = q_zt.log_prob(zt)
log_p_xt_given_zt = p_xt_given_zt.log_prob(xt)
weight = log_p_zt + log_p_xt_given_zt - log_q_zt
return weight, zt
def _filtering_proposal(self, t, state):
"""Uses the stored observations to compute the filtering distribution."""
cur_x = self.observations[t]
return self.filtering(t, state, cur_x)
def _smoothing_proposal(self, t, state):
"""Uses the stored messages to compute the smoothing distribution."""
mess_mean, mess_prec = tf.cond(
tf.less(t, self.num_timesteps-1),
lambda: tf.unstack(self.messages.read(t)),
lambda: [tf.zeros([self.batch_size]), tf.zeros([self.batch_size])])
return self._smoothing_from_message(t, state, self.observations[t],
mess_mean, mess_prec)
def _learned_filtering_proposal(self, t, state):
cur_x = self.observations[t]
inputs = tf.cond(tf.greater(t, 0),
lambda: tf.stack([state, cur_x], axis=0),
lambda: cur_x[tf.newaxis, :])
return self._learned_proposal_fn(t, inputs)
def _learned_smoothing_proposal(self, t, state):
xs = self.observations_ta.gather(tf.range(t, self.num_timesteps))
inputs = tf.cond(tf.greater(t, 0),
lambda: tf.concat([state[tf.newaxis, :], xs], axis=0),
lambda: xs)
return self._learned_proposal_fn(t, inputs)
|
cshallue/models
|
research/fivo/fivo/models/ghmm.py
|
Python
|
apache-2.0
| 20,795
|
[
"Gaussian"
] |
6a33f18078cfe0dd30fe3f6022bb6ae4d6adbecd8602069d09c62992224c891a
|
# mako/codegen.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""provides functionality for rendering a parsetree constructing into module source code."""
import time
import re
from mako.pygen import PythonPrinter
from mako import util, ast, parsetree, filters, exceptions
MAGIC_NUMBER = 6
def compile(node,
uri,
filename=None,
default_filters=None,
buffer_filters=None,
imports=None,
source_encoding=None,
generate_magic_comment=True,
disable_unicode=False,
strict_undefined=False):
"""Generate module source code given a parsetree node,
uri, and optional source filename"""
# if on Py2K, push the "source_encoding" string to be
# a bytestring itself, as we will be embedding it into
# the generated source and we don't want to coerce the
# result into a unicode object, in "disable_unicode" mode
if not util.py3k and isinstance(source_encoding, str):
source_encoding = source_encoding.encode(source_encoding)
buf = util.FastEncodingBuffer()
printer = PythonPrinter(buf)
_GenerateRenderMethod(printer,
_CompileContext(uri,
filename,
default_filters,
buffer_filters,
imports,
source_encoding,
generate_magic_comment,
disable_unicode,
strict_undefined),
node)
return buf.getvalue()
class _CompileContext(object):
def __init__(self,
uri,
filename,
default_filters,
buffer_filters,
imports,
source_encoding,
generate_magic_comment,
disable_unicode,
strict_undefined):
self.uri = uri
self.filename = filename
self.default_filters = default_filters
self.buffer_filters = buffer_filters
self.imports = imports
self.source_encoding = source_encoding
self.generate_magic_comment = generate_magic_comment
self.disable_unicode = disable_unicode
self.strict_undefined = strict_undefined
class _GenerateRenderMethod(object):
"""A template visitor object which generates the
full module source for a template.
"""
def __init__(self, printer, compiler, node):
self.printer = printer
self.last_source_line = -1
self.compiler = compiler
self.node = node
self.identifier_stack = [None]
self.in_def = isinstance(node, (parsetree.DefTag, parsetree.BlockTag))
if self.in_def:
name = "render_%s" % node.funcname
args = node.get_argument_expressions()
filtered = len(node.filter_args.args) > 0
buffered = eval(node.attributes.get('buffered', 'False'))
cached = eval(node.attributes.get('cached', 'False'))
defs = None
pagetag = None
if node.is_block and not node.is_anonymous:
args += ['**pageargs']
else:
defs = self.write_toplevel()
pagetag = self.compiler.pagetag
name = "render_body"
if pagetag is not None:
args = pagetag.body_decl.get_argument_expressions()
if not pagetag.body_decl.kwargs:
args += ['**pageargs']
cached = eval(pagetag.attributes.get('cached', 'False'))
else:
args = ['**pageargs']
cached = False
buffered = filtered = False
if args is None:
args = ['context']
else:
args = [a for a in ['context'] + args]
self.write_render_callable(
pagetag or node,
name, args,
buffered, filtered, cached)
if defs is not None:
for node in defs:
_GenerateRenderMethod(printer, compiler, node)
@property
def identifiers(self):
return self.identifier_stack[-1]
def write_toplevel(self):
"""Traverse a template structure for module-level directives and
generate the start of module-level code.
"""
inherit = []
namespaces = {}
module_code = []
encoding =[None]
self.compiler.pagetag = None
class FindTopLevel(object):
def visitInheritTag(s, node):
inherit.append(node)
def visitNamespaceTag(s, node):
namespaces[node.name] = node
def visitPageTag(s, node):
self.compiler.pagetag = node
def visitCode(s, node):
if node.ismodule:
module_code.append(node)
f = FindTopLevel()
for n in self.node.nodes:
n.accept_visitor(f)
self.compiler.namespaces = namespaces
module_ident = set()
for n in module_code:
module_ident = module_ident.union(n.declared_identifiers())
module_identifiers = _Identifiers()
module_identifiers.declared = module_ident
# module-level names, python code
if self.compiler.generate_magic_comment and \
self.compiler.source_encoding:
self.printer.writeline("# -*- encoding:%s -*-" %
self.compiler.source_encoding)
self.printer.writeline("from mako import runtime, filters, cache")
self.printer.writeline("UNDEFINED = runtime.UNDEFINED")
self.printer.writeline("__M_dict_builtin = dict")
self.printer.writeline("__M_locals_builtin = locals")
self.printer.writeline("_magic_number = %r" % MAGIC_NUMBER)
self.printer.writeline("_modified_time = %r" % time.time())
self.printer.writeline(
"_template_filename=%r" % self.compiler.filename)
self.printer.writeline("_template_uri=%r" % self.compiler.uri)
self.printer.writeline(
"_template_cache=cache.Cache(__name__, _modified_time)")
self.printer.writeline(
"_source_encoding=%r" % self.compiler.source_encoding)
if self.compiler.imports:
buf = ''
for imp in self.compiler.imports:
buf += imp + "\n"
self.printer.writeline(imp)
impcode = ast.PythonCode(
buf,
source='', lineno=0,
pos=0,
filename='template defined imports')
else:
impcode = None
main_identifiers = module_identifiers.branch(self.node)
module_identifiers.topleveldefs = \
module_identifiers.topleveldefs.\
union(main_identifiers.topleveldefs)
module_identifiers.declared.add("UNDEFINED")
if impcode:
module_identifiers.declared.update(impcode.declared_identifiers)
self.compiler.identifiers = module_identifiers
self.printer.writeline("_exports = %r" %
[n.name for n in
list(main_identifiers.topleveldefs.values())]
)
self.printer.write("\n\n")
if len(module_code):
self.write_module_code(module_code)
if len(inherit):
self.write_namespaces(namespaces)
self.write_inherit(inherit[-1])
elif len(namespaces):
self.write_namespaces(namespaces)
return list(main_identifiers.topleveldefs.values())
def write_render_callable(self, node, name, args, buffered, filtered, cached):
"""write a top-level render callable.
this could be the main render() method or that of a top-level def."""
if self.in_def:
decorator = node.decorator
if decorator:
self.printer.writeline("@runtime._decorate_toplevel(%s)" % decorator)
self.printer.writelines(
"def %s(%s):" % (name, ','.join(args)),
"context.caller_stack._push_frame()",
"try:"
)
if buffered or filtered or cached:
self.printer.writeline("context._push_buffer()")
self.identifier_stack.append(self.compiler.identifiers.branch(self.node))
if (not self.in_def or self.node.is_block) and '**pageargs' in args:
self.identifier_stack[-1].argument_declared.add('pageargs')
if not self.in_def and (
len(self.identifiers.locally_assigned) > 0 or
len(self.identifiers.argument_declared) > 0
):
self.printer.writeline("__M_locals = __M_dict_builtin(%s)" %
','.join([
"%s=%s" % (x, x) for x in
self.identifiers.argument_declared
]))
self.write_variable_declares(self.identifiers, toplevel=True)
for n in self.node.nodes:
n.accept_visitor(self)
self.write_def_finish(self.node, buffered, filtered, cached)
self.printer.writeline(None)
self.printer.write("\n\n")
if cached:
self.write_cache_decorator(
node, name,
args, buffered,
self.identifiers, toplevel=True)
def write_module_code(self, module_code):
"""write module-level template code, i.e. that which
is enclosed in <%! %> tags in the template."""
for n in module_code:
self.write_source_comment(n)
self.printer.write_indented_block(n.text)
def write_inherit(self, node):
"""write the module-level inheritance-determination callable."""
self.printer.writelines(
"def _mako_inherit(template, context):",
"_mako_generate_namespaces(context)",
"return runtime._inherit_from(context, %s, _template_uri)" %
(node.parsed_attributes['file']),
None
)
def write_namespaces(self, namespaces):
"""write the module-level namespace-generating callable."""
self.printer.writelines(
"def _mako_get_namespace(context, name):",
"try:",
"return context.namespaces[(__name__, name)]",
"except KeyError:",
"_mako_generate_namespaces(context)",
"return context.namespaces[(__name__, name)]",
None,None
)
self.printer.writeline("def _mako_generate_namespaces(context):")
for node in list(namespaces.values()):
if 'import' in node.attributes:
self.compiler.has_ns_imports = True
self.write_source_comment(node)
if len(node.nodes):
self.printer.writeline("def make_namespace():")
export = []
identifiers = self.compiler.identifiers.branch(node)
self.in_def = True
class NSDefVisitor(object):
def visitDefTag(s, node):
s.visitDefOrBase(node)
def visitBlockTag(s, node):
s.visitDefOrBase(node)
def visitDefOrBase(s, node):
if node.is_anonymous:
raise exceptions.CompileException(
"Can't put anonymous blocks inside <%namespace>",
**node.exception_kwargs
)
self.write_inline_def(node, identifiers, nested=False)
export.append(node.funcname)
vis = NSDefVisitor()
for n in node.nodes:
n.accept_visitor(vis)
self.printer.writeline("return [%s]" % (','.join(export)))
self.printer.writeline(None)
self.in_def = False
callable_name = "make_namespace()"
else:
callable_name = "None"
if 'file' in node.parsed_attributes:
self.printer.writeline(
"ns = runtime.TemplateNamespace(%r, context._clean_inheritance_tokens(),"
" templateuri=%s, callables=%s, calling_uri=_template_uri)" %
(
node.name,
node.parsed_attributes.get('file', 'None'),
callable_name,
)
)
elif 'module' in node.parsed_attributes:
self.printer.writeline(
"ns = runtime.ModuleNamespace(%r, context._clean_inheritance_tokens(),"
" callables=%s, calling_uri=_template_uri, module=%s)" %
(
node.name,
callable_name,
node.parsed_attributes.get('module', 'None')
)
)
else:
self.printer.writeline(
"ns = runtime.Namespace(%r, context._clean_inheritance_tokens(),"
" callables=%s, calling_uri=_template_uri)" %
(
node.name,
callable_name,
)
)
if eval(node.attributes.get('inheritable', "False")):
self.printer.writeline("context['self'].%s = ns" % (node.name))
self.printer.writeline("context.namespaces[(__name__, %s)] = ns" % repr(node.name))
self.printer.write("\n")
if not len(namespaces):
self.printer.writeline("pass")
self.printer.writeline(None)
def write_variable_declares(self, identifiers, toplevel=False, limit=None):
"""write variable declarations at the top of a function.
the variable declarations are in the form of callable
definitions for defs and/or name lookup within the
function's context argument. the names declared are based
on the names that are referenced in the function body,
which don't otherwise have any explicit assignment
operation. names that are assigned within the body are
assumed to be locally-scoped variables and are not
separately declared.
for def callable definitions, if the def is a top-level
callable then a 'stub' callable is generated which wraps
the current Context into a closure. if the def is not
top-level, it is fully rendered as a local closure.
"""
# collection of all defs available to us in this scope
comp_idents = dict([(c.funcname, c) for c in identifiers.defs])
to_write = set()
# write "context.get()" for all variables we are going to
# need that arent in the namespace yet
to_write = to_write.union(identifiers.undeclared)
# write closure functions for closures that we define
# right here
to_write = to_write.union([c.funcname for c in list(identifiers.closuredefs.values())])
# remove identifiers that are declared in the argument
# signature of the callable
to_write = to_write.difference(identifiers.argument_declared)
# remove identifiers that we are going to assign to.
# in this way we mimic Python's behavior,
# i.e. assignment to a variable within a block
# means that variable is now a "locally declared" var,
# which cannot be referenced beforehand.
to_write = to_write.difference(identifiers.locally_declared)
# if a limiting set was sent, constraint to those items in that list
# (this is used for the caching decorator)
if limit is not None:
to_write = to_write.intersection(limit)
if toplevel and getattr(self.compiler, 'has_ns_imports', False):
self.printer.writeline("_import_ns = {}")
self.compiler.has_imports = True
for ident, ns in self.compiler.namespaces.items():
if 'import' in ns.attributes:
self.printer.writeline(
"_mako_get_namespace(context, %r)._populate(_import_ns, %r)" %
(
ident,
re.split(r'\s*,\s*', ns.attributes['import'])
))
for ident in to_write:
if ident in comp_idents:
comp = comp_idents[ident]
if comp.is_block:
if not comp.is_anonymous:
self.write_def_decl(comp, identifiers)
else:
self.write_inline_def(comp, identifiers, nested=True)
else:
if comp.is_root():
self.write_def_decl(comp, identifiers)
else:
self.write_inline_def(comp, identifiers, nested=True)
elif ident in self.compiler.namespaces:
self.printer.writeline(
"%s = _mako_get_namespace(context, %r)" %
(ident, ident)
)
else:
if getattr(self.compiler, 'has_ns_imports', False):
if self.compiler.strict_undefined:
self.printer.writelines(
"%s = _import_ns.get(%r, UNDEFINED)" %
(ident, ident),
"if %s is UNDEFINED:" % ident,
"try:",
"%s = context[%r]" % (ident, ident),
"except KeyError:",
"raise NameError(\"'%s' is not defined\")" %
ident,
None, None
)
else:
self.printer.writeline(
"%s = _import_ns.get(%r, context.get(%r, UNDEFINED))" %
(ident, ident, ident))
else:
if self.compiler.strict_undefined:
self.printer.writelines(
"try:",
"%s = context[%r]" % (ident, ident),
"except KeyError:",
"raise NameError(\"'%s' is not defined\")" %
ident,
None
)
else:
self.printer.writeline(
"%s = context.get(%r, UNDEFINED)" % (ident, ident)
)
self.printer.writeline("__M_writer = context.writer()")
def write_source_comment(self, node):
"""write a source comment containing the line number of the corresponding template line."""
if self.last_source_line != node.lineno:
self.printer.writeline("# SOURCE LINE %d" % node.lineno)
self.last_source_line = node.lineno
def write_def_decl(self, node, identifiers):
"""write a locally-available callable referencing a top-level def"""
funcname = node.funcname
namedecls = node.get_argument_expressions()
nameargs = node.get_argument_expressions(include_defaults=False)
if not self.in_def and (
len(self.identifiers.locally_assigned) > 0 or
len(self.identifiers.argument_declared) > 0):
nameargs.insert(0, 'context.locals_(__M_locals)')
else:
nameargs.insert(0, 'context')
self.printer.writeline("def %s(%s):" % (funcname, ",".join(namedecls)))
self.printer.writeline("return render_%s(%s)" % (funcname, ",".join(nameargs)))
self.printer.writeline(None)
def write_inline_def(self, node, identifiers, nested):
"""write a locally-available def callable inside an enclosing def."""
namedecls = node.get_argument_expressions()
decorator = node.decorator
if decorator:
self.printer.writeline("@runtime._decorate_inline(context, %s)" % decorator)
self.printer.writeline("def %s(%s):" % (node.funcname, ",".join(namedecls)))
filtered = len(node.filter_args.args) > 0
buffered = eval(node.attributes.get('buffered', 'False'))
cached = eval(node.attributes.get('cached', 'False'))
self.printer.writelines(
"context.caller_stack._push_frame()",
"try:"
)
if buffered or filtered or cached:
self.printer.writelines(
"context._push_buffer()",
)
identifiers = identifiers.branch(node, nested=nested)
self.write_variable_declares(identifiers)
self.identifier_stack.append(identifiers)
for n in node.nodes:
n.accept_visitor(self)
self.identifier_stack.pop()
self.write_def_finish(node, buffered, filtered, cached)
self.printer.writeline(None)
if cached:
self.write_cache_decorator(node, node.funcname,
namedecls, False, identifiers,
inline=True, toplevel=False)
def write_def_finish(self, node, buffered, filtered, cached, callstack=True):
"""write the end section of a rendering function, either outermost or inline.
this takes into account if the rendering function was filtered, buffered, etc.
and closes the corresponding try: block if any, and writes code to retrieve
captured content, apply filters, send proper return value."""
if not buffered and not cached and not filtered:
self.printer.writeline("return ''")
if callstack:
self.printer.writelines(
"finally:",
"context.caller_stack._pop_frame()",
None
)
if buffered or filtered or cached:
if buffered or cached:
# in a caching scenario, don't try to get a writer
# from the context after popping; assume the caching
# implemenation might be using a context with no
# extra buffers
self.printer.writelines(
"finally:",
"__M_buf = context._pop_buffer()"
)
else:
self.printer.writelines(
"finally:",
"__M_buf, __M_writer = context._pop_buffer_and_writer()"
)
if callstack:
self.printer.writeline("context.caller_stack._pop_frame()")
s = "__M_buf.getvalue()"
if filtered:
s = self.create_filter_callable(node.filter_args.args, s, False)
self.printer.writeline(None)
if buffered and not cached:
s = self.create_filter_callable(self.compiler.buffer_filters, s, False)
if buffered or cached:
self.printer.writeline("return %s" % s)
else:
self.printer.writelines(
"__M_writer(%s)" % s,
"return ''"
)
def write_cache_decorator(self, node_or_pagetag, name,
args, buffered, identifiers,
inline=False, toplevel=False):
"""write a post-function decorator to replace a rendering
callable with a cached version of itself."""
self.printer.writeline("__M_%s = %s" % (name, name))
cachekey = node_or_pagetag.parsed_attributes.get('cache_key', repr(name))
cacheargs = {}
for arg in (
('cache_type', 'type'), ('cache_dir', 'data_dir'),
('cache_timeout', 'expiretime'), ('cache_url', 'url')):
val = node_or_pagetag.parsed_attributes.get(arg[0], None)
if val is not None:
if arg[1] == 'expiretime':
cacheargs[arg[1]] = int(eval(val))
else:
cacheargs[arg[1]] = val
else:
if self.compiler.pagetag is not None:
val = self.compiler.pagetag.parsed_attributes.get(arg[0], None)
if val is not None:
if arg[1] == 'expiretime':
cacheargs[arg[1]] == int(eval(val))
else:
cacheargs[arg[1]] = val
self.printer.writeline("def %s(%s):" % (name, ','.join(args)))
# form "arg1, arg2, arg3=arg3, arg4=arg4", etc.
pass_args = [
'=' in a and "%s=%s" % ((a.split('=')[0],)*2) or a
for a in args
]
self.write_variable_declares(
identifiers,
toplevel=toplevel,
limit=node_or_pagetag.undeclared_identifiers()
)
if buffered:
s = "context.get('local')."\
"get_cached(%s, defname=%r, %screatefunc=lambda:__M_%s(%s))" % \
(cachekey, name,
''.join(["%s=%s, " % (k,v) for k, v in cacheargs.items()]),
name, ','.join(pass_args))
# apply buffer_filters
s = self.create_filter_callable(self.compiler.buffer_filters, s, False)
self.printer.writelines("return " + s,None)
else:
self.printer.writelines(
"__M_writer(context.get('local')."
"get_cached(%s, defname=%r, %screatefunc=lambda:__M_%s(%s)))" %
(cachekey, name,
''.join(["%s=%s, " % (k,v) for k, v in cacheargs.items()]),
name, ','.join(pass_args)),
"return ''",
None
)
def create_filter_callable(self, args, target, is_expression):
"""write a filter-applying expression based on the filters
present in the given filter names, adjusting for the global
'default' filter aliases as needed."""
def locate_encode(name):
if re.match(r'decode\..+', name):
return "filters." + name
elif self.compiler.disable_unicode:
return filters.NON_UNICODE_ESCAPES.get(name, name)
else:
return filters.DEFAULT_ESCAPES.get(name, name)
if 'n' not in args:
if is_expression:
if self.compiler.pagetag:
args = self.compiler.pagetag.filter_args.args + args
if self.compiler.default_filters:
args = self.compiler.default_filters + args
for e in args:
# if filter given as a function, get just the identifier portion
if e == 'n':
continue
m = re.match(r'(.+?)(\(.*\))', e)
if m:
(ident, fargs) = m.group(1,2)
f = locate_encode(ident)
e = f + fargs
else:
x = e
e = locate_encode(e)
assert e is not None
target = "%s(%s)" % (e, target)
return target
def visitExpression(self, node):
self.write_source_comment(node)
if len(node.escapes) or \
(
self.compiler.pagetag is not None and
len(self.compiler.pagetag.filter_args.args)
) or \
len(self.compiler.default_filters):
s = self.create_filter_callable(node.escapes_code.args, "%s" % node.text, True)
self.printer.writeline("__M_writer(%s)" % s)
else:
self.printer.writeline("__M_writer(%s)" % node.text)
def visitControlLine(self, node):
if node.isend:
if not node.get_children():
self.printer.writeline("pass")
self.printer.writeline(None)
else:
self.write_source_comment(node)
self.printer.writeline(node.text)
def visitText(self, node):
self.write_source_comment(node)
self.printer.writeline("__M_writer(%s)" % repr(node.content))
def visitTextTag(self, node):
filtered = len(node.filter_args.args) > 0
if filtered:
self.printer.writelines(
"__M_writer = context._push_writer()",
"try:",
)
for n in node.nodes:
n.accept_visitor(self)
if filtered:
self.printer.writelines(
"finally:",
"__M_buf, __M_writer = context._pop_buffer_and_writer()",
"__M_writer(%s)" %
self.create_filter_callable(
node.filter_args.args,
"__M_buf.getvalue()",
False),
None
)
def visitCode(self, node):
if not node.ismodule:
self.write_source_comment(node)
self.printer.write_indented_block(node.text)
if not self.in_def and len(self.identifiers.locally_assigned) > 0:
# if we are the "template" def, fudge locally
# declared/modified variables into the "__M_locals" dictionary,
# which is used for def calls within the same template,
# to simulate "enclosing scope"
self.printer.writeline('__M_locals_builtin_stored = __M_locals_builtin()')
self.printer.writeline(
'__M_locals.update(__M_dict_builtin([(__M_key,'
' __M_locals_builtin_stored[__M_key]) for '
'__M_key in [%s] if __M_key in __M_locals_builtin_stored]))' %
','.join([repr(x) for x in node.declared_identifiers()]))
def visitIncludeTag(self, node):
self.write_source_comment(node)
args = node.attributes.get('args')
if args:
self.printer.writeline(
"runtime._include_file(context, %s, _template_uri, %s)" %
(node.parsed_attributes['file'], args))
else:
self.printer.writeline(
"runtime._include_file(context, %s, _template_uri)" %
(node.parsed_attributes['file']))
def visitNamespaceTag(self, node):
pass
def visitDefTag(self, node):
pass
def visitBlockTag(self, node):
if node.is_anonymous:
self.printer.writeline("%s()" % node.funcname)
else:
nameargs = node.get_argument_expressions(include_defaults=False)
nameargs += ['**pageargs']
self.printer.writeline("if 'parent' not in context._data or "
"not hasattr(context._data['parent'], '%s'):"
% node.funcname)
self.printer.writeline("context['self'].%s(%s)" % (node.funcname, ",".join(nameargs)))
self.printer.writeline("\n")
def visitCallNamespaceTag(self, node):
# TODO: we can put namespace-specific checks here, such
# as ensure the given namespace will be imported,
# pre-import the namespace, etc.
self.visitCallTag(node)
def visitCallTag(self, node):
self.printer.writeline("def ccall(caller):")
export = ['body']
callable_identifiers = self.identifiers.branch(node, nested=True)
body_identifiers = callable_identifiers.branch(node, nested=False)
# we want the 'caller' passed to ccall to be used
# for the body() function, but for other non-body()
# <%def>s within <%call> we want the current caller
# off the call stack (if any)
body_identifiers.add_declared('caller')
self.identifier_stack.append(body_identifiers)
class DefVisitor(object):
def visitDefTag(s, node):
s.visitDefOrBase(node)
def visitBlockTag(s, node):
s.visitDefOrBase(node)
def visitDefOrBase(s, node):
self.write_inline_def(node, callable_identifiers, nested=False)
if not node.is_anonymous:
export.append(node.funcname)
# remove defs that are within the <%call> from the "closuredefs" defined
# in the body, so they dont render twice
if node.funcname in body_identifiers.closuredefs:
del body_identifiers.closuredefs[node.funcname]
vis = DefVisitor()
for n in node.nodes:
n.accept_visitor(vis)
self.identifier_stack.pop()
bodyargs = node.body_decl.get_argument_expressions()
self.printer.writeline("def body(%s):" % ','.join(bodyargs))
# TODO: figure out best way to specify
# buffering/nonbuffering (at call time would be better)
buffered = False
if buffered:
self.printer.writelines(
"context._push_buffer()",
"try:"
)
self.write_variable_declares(body_identifiers)
self.identifier_stack.append(body_identifiers)
for n in node.nodes:
n.accept_visitor(self)
self.identifier_stack.pop()
self.write_def_finish(node, buffered, False, False, callstack=False)
self.printer.writelines(
None,
"return [%s]" % (','.join(export)),
None
)
self.printer.writelines(
# get local reference to current caller, if any
"__M_caller = context.caller_stack._get_caller()",
# push on caller for nested call
"context.caller_stack.nextcaller = "
"runtime.Namespace('caller', context, callables=ccall(__M_caller))",
"try:")
self.write_source_comment(node)
self.printer.writelines(
"__M_writer(%s)" % self.create_filter_callable([], node.expression, True),
"finally:",
"context.caller_stack.nextcaller = None",
None
)
class _Identifiers(object):
"""tracks the status of identifier names as template code is rendered."""
def __init__(self, node=None, parent=None, nested=False):
if parent is not None:
# if we are the branch created in write_namespaces(),
# we don't share any context from the main body().
if isinstance(node, parsetree.NamespaceTag):
self.declared = set()
self.topleveldefs = util.SetLikeDict()
else:
# things that have already been declared
# in an enclosing namespace (i.e. names we can just use)
self.declared = set(parent.declared).\
union([c.name for c in list(parent.closuredefs.values())]).\
union(parent.locally_declared).\
union(parent.argument_declared)
# if these identifiers correspond to a "nested"
# scope, it means whatever the parent identifiers
# had as undeclared will have been declared by that parent,
# and therefore we have them in our scope.
if nested:
self.declared = self.declared.union(parent.undeclared)
# top level defs that are available
self.topleveldefs = util.SetLikeDict(**parent.topleveldefs)
else:
self.declared = set()
self.topleveldefs = util.SetLikeDict()
# things within this level that are referenced before they
# are declared (e.g. assigned to)
self.undeclared = set()
# things that are declared locally. some of these things
# could be in the "undeclared" list as well if they are
# referenced before declared
self.locally_declared = set()
# assignments made in explicit python blocks.
# these will be propagated to
# the context of local def calls.
self.locally_assigned = set()
# things that are declared in the argument
# signature of the def callable
self.argument_declared = set()
# closure defs that are defined in this level
self.closuredefs = util.SetLikeDict()
self.node = node
if node is not None:
node.accept_visitor(self)
def branch(self, node, **kwargs):
"""create a new Identifiers for a new Node, with
this Identifiers as the parent."""
return _Identifiers(node, self, **kwargs)
@property
def defs(self):
return set(self.topleveldefs.union(self.closuredefs).values())
def __repr__(self):
return "Identifiers(declared=%r, locally_declared=%r, "\
"undeclared=%r, topleveldefs=%r, closuredefs=%r, argumentdeclared=%r)" %\
(
list(self.declared),
list(self.locally_declared),
list(self.undeclared),
[c.name for c in list(self.topleveldefs.values())],
[c.name for c in list(self.closuredefs.values())],
self.argument_declared)
def check_declared(self, node):
"""update the state of this Identifiers with the undeclared
and declared identifiers of the given node."""
for ident in node.undeclared_identifiers():
if ident != 'context' and ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
for ident in node.declared_identifiers():
self.locally_declared.add(ident)
def add_declared(self, ident):
self.declared.add(ident)
if ident in self.undeclared:
self.undeclared.remove(ident)
def visitExpression(self, node):
self.check_declared(node)
def visitControlLine(self, node):
self.check_declared(node)
def visitCode(self, node):
if not node.ismodule:
self.check_declared(node)
self.locally_assigned = self.locally_assigned.union(node.declared_identifiers())
def visitNamespaceTag(self, node):
# only traverse into the sub-elements of a
# <%namespace> tag if we are the branch created in
# write_namespaces()
if self.node is node:
for n in node.nodes:
n.accept_visitor(self)
def _check_name_exists(self, collection, node):
existing = collection.get(node.funcname)
collection[node.funcname] = node
if existing is not None and \
existing is not node and \
(node.is_block or existing.is_block):
raise exceptions.CompileException(
"%%def or %%block named '%s' already "
"exists in this template." %
node.funcname, **node.exception_kwargs)
def visitDefTag(self, node):
if node.is_root() and not node.is_anonymous:
self._check_name_exists(self.topleveldefs, node)
elif node is not self.node:
self._check_name_exists(self.closuredefs, node)
for ident in node.undeclared_identifiers():
if ident != 'context' and ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
# visit defs only one level deep
if node is self.node:
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
for n in node.nodes:
n.accept_visitor(self)
def visitBlockTag(self, node):
if node is not self.node and \
not node.is_anonymous:
if isinstance(self.node, parsetree.DefTag):
raise exceptions.CompileException(
"Named block '%s' not allowed inside of def '%s'"
% (node.name, self.node.name), **node.exception_kwargs)
elif isinstance(self.node, (parsetree.CallTag, parsetree.CallNamespaceTag)):
raise exceptions.CompileException(
"Named block '%s' not allowed inside of <%%call> tag"
% (node.name, ), **node.exception_kwargs)
if not node.is_anonymous:
self._check_name_exists(self.topleveldefs, node)
self.undeclared.add(node.funcname)
elif node is not self.node:
self._check_name_exists(self.closuredefs, node)
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
for n in node.nodes:
n.accept_visitor(self)
def visitIncludeTag(self, node):
self.check_declared(node)
def visitPageTag(self, node):
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
self.check_declared(node)
def visitCallNamespaceTag(self, node):
self.visitCallTag(node)
def visitCallTag(self, node):
if node is self.node:
for ident in node.undeclared_identifiers():
if ident != 'context' and ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
for n in node.nodes:
n.accept_visitor(self)
else:
for ident in node.undeclared_identifiers():
if ident != 'context' and ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
|
sfstpala/Victory-Chat
|
mako/codegen.py
|
Python
|
isc
| 43,410
|
[
"VisIt"
] |
eb0c0b6bdc257761e2c42e38c5a44d9f9bcab2882fae124fe4e0c4a164431577
|
""" Test Encoding function of DIRAC
It contains tests for DISET and JSON.
Some tests can be passed by both, while some can only be passed by one.
Typically, we know JSON cannot serialize tuples, or integers as dictionary keys.
On the other hand, it can serialize some objects, while DISET cannot.
"""
from string import printable
import datetime
import sys
from DIRAC.Core.Utilities.DEncode import encode as disetEncode, decode as disetDecode, g_dEncodeFunctions
from DIRAC.Core.Utilities.JEncode import encode as jsonEncode, decode as jsonDecode, JSerializable
from hypothesis import given
from hypothesis.strategies import integers, lists, recursive, floats, text,\
booleans, none, dictionaries, tuples
from hypothesis.searchstrategy.datetime import DatetimeStrategy
from pytest import mark, approx, raises
parametrize = mark.parametrize
# List of couple (encoding, decoding) functions
# In order to test a new library, import the encode/decode
# function, and add the tuple here
disetTuple = (disetEncode, disetDecode)
jsonTuple = (jsonEncode, jsonDecode)
enc_dec_imp = (disetTuple, jsonTuple)
# We define a custom datetime strategy in order
# to pull date after 1900 (limitation of strftime)
# and without microseconds
class myDateTimeSearchStrategy(DatetimeStrategy):
""" Class to draw datetime without microseconds"""
def do_draw(self, *args, **kwargs):
""" Just draw from the parent class and replace microseconds with 0 """
return super(myDateTimeSearchStrategy, self).do_draw(*args, **kwargs).replace(microsecond=0)
def myDatetimes():
""" Convenience 'constructor' like hypothesis datetimes().
Only pull dates after 1900
"""
return myDateTimeSearchStrategy(datetime.datetime(
1900, 1, 1, 0, 0), datetime.datetime.max, none())
# These initial strategies are the basic types supported by the original dEncode
# Unfortuately we cannot make nested structure with floats because as the floats
# are not stable, the result is approximative, and it becomes extremely difficult
# to compare
# Datetime also starts only at 1900 because earlier date can't be dumped with strftime
initialStrategies = none() | booleans() | text() | integers() | myDatetimes()
initialJsonStrategies = none() | booleans() | text() | myDatetimes()
# From a strategy (x), make a new strategy
# We basically use that to make nested structures
# see http://hypothesis.readthedocs.io/en/latest/data.html#recursive-data
nestedStrategy = recursive(
initialStrategies,
lambda x: lists(x) | dictionaries(
text(),
x) | tuples(x))
# This strategy does not return tuples
nestedStrategyJson = recursive(
initialJsonStrategies,
lambda x: lists(x) | dictionaries(
text(),
x))
def test_everyBaseTypeIsTested():
""" Make sure that each supported base type in the original
DEncode module are tested here.
We rely on the fact that the test function will be called
"test_BaseType"
"""
current_module = sys.modules[__name__]
for encodeFunc in g_dEncodeFunctions.itervalues():
testFuncName = ('test_BaseType_%s' % encodeFunc.__name__).replace('encode', '')
getattr(current_module, testFuncName)
def agnosticTestFunction(enc_dec, data):
""" Function called by all the other to test that
decode(encode) returns the original data
:param enc_dec: tuple of function (encoding, decoding)
:param data: data to be worked on
"""
encode, decode = enc_dec
encodedData = encode(data)
decodedData, lenData = decode(encodedData)
assert data == decodedData
assert lenData == len(encodedData)
return decodedData
@parametrize('enc_dec', enc_dec_imp)
@given(data=booleans())
def test_BaseType_Bool(enc_dec, data):
""" Test for boolean"""
agnosticTestFunction(enc_dec, data)
@parametrize('enc_dec', enc_dec_imp)
@given(data=myDatetimes())
def test_BaseType_DateTime(enc_dec, data):
""" Test for data time"""
agnosticTestFunction(enc_dec, data)
# Json does not serialize keys as integers but as string
@parametrize('enc_dec', [disetTuple])
@given(data=dictionaries(integers(), integers()))
def test_BaseType_Dict(enc_dec, data):
""" Test for basic dict"""
agnosticTestFunction(enc_dec, data)
@parametrize('enc_dec', enc_dec_imp)
@given(data=integers(max_value=sys.maxsize))
def test_BaseType_Int(enc_dec, data):
""" Test for integer"""
agnosticTestFunction(enc_dec, data)
# CAUTION: DEncode is not precise for floats !!
@parametrize('enc_dec', enc_dec_imp)
@given(data=floats(allow_nan=False))
def test_BaseType_Float(enc_dec, data):
""" Test that float is approximatly stable"""
encode, decode = enc_dec
encodedData = encode(data)
decodedData, lenData = decode(encodedData)
assert data == approx(decodedData)
assert lenData == len(encodedData)
@parametrize('enc_dec', enc_dec_imp)
@given(data=lists(integers()))
def test_BaseType_List(enc_dec, data):
""" Test for List """
agnosticTestFunction(enc_dec, data)
@parametrize('enc_dec', enc_dec_imp)
@given(data=integers(min_value=sys.maxsize + 1))
def test_BaseType_Long(enc_dec, data):
""" Test long type"""
agnosticTestFunction(enc_dec, data)
@parametrize('enc_dec', enc_dec_imp)
def test_BaseType_None(enc_dec, ):
""" Test None case """
agnosticTestFunction(enc_dec, None)
@parametrize('enc_dec', enc_dec_imp)
@given(data=text(printable))
def test_BaseType_String(enc_dec, data):
""" Test basic strings"""
# we need to cast to str because text() returns unicode
data = str(data)
agnosticTestFunction(enc_dec, data)
# Tuple are not serialized in JSON
@parametrize('enc_dec', [disetTuple])
@given(data=tuples(integers()))
def test_BaseType_Tuple(enc_dec, data):
""" Test basic tuple """
agnosticTestFunction(enc_dec, data)
@parametrize('enc_dec', enc_dec_imp)
@given(data=text())
def test_BaseType_Unicode(enc_dec, data):
""" Test unicode data """
agnosticTestFunction(enc_dec, data)
# Json will not pass this because of tuples and integers as dict keys
@parametrize('enc_dec', [disetTuple])
@given(data=nestedStrategy)
def test_nestedStructure(enc_dec, data):
""" Test nested structure """
agnosticTestFunction(enc_dec, data)
# DEncode raises KeyError.....
# Others raise TypeError
@parametrize('enc_dec', enc_dec_imp)
def test_NonSerializable(enc_dec):
""" Test that a class that does not inherit from the serializable class
raises TypeError
"""
class NonSerializable(object):
""" Dummy class not serializable"""
pass
data = NonSerializable()
with raises((TypeError, KeyError)):
agnosticTestFunction(enc_dec, data)
class Serializable(JSerializable):
""" Dummy class inheriting from JSerializable"""
_attrToSerialize = ['instAttr']
def __init__(self, instAttr=None):
self.instAttr = instAttr
def __eq__(self, other):
return all([getattr(self, attr) == getattr(other, attr) for attr in self._attrToSerialize])
@given(data=nestedStrategyJson)
def test_Serializable(data):
""" Test if a simple serializable class with one random argument
can be serialized
"""
objData = Serializable(instAttr=data)
agnosticTestFunction(jsonTuple, objData)
def test_nonDeclaredAttr():
""" Tests that an argument not in the list of arguments to serialized
is not serialized
"""
objData = Serializable()
objData.notToBeSerialized = 1
encodedData = jsonEncode(objData)
decodedData, _lenData = jsonDecode(encodedData)
assert not hasattr(decodedData, 'notToBeSerialized')
class BadSerializable(JSerializable):
""" Missing _attrToSerialize attribute """
pass
def test_missingAttrToSerialize():
""" Tests that an argument not in the list of arguments to serialized
is not serialized
"""
objData = BadSerializable()
with raises(TypeError):
agnosticTestFunction(jsonTuple, objData)
@given(data=nestedStrategyJson)
def test_nestedSerializable(data):
""" Test that a serializable containing a serializable class
can be serialized
"""
subObj = Serializable(instAttr=data)
objData = Serializable(instAttr=subObj)
agnosticTestFunction(jsonTuple, objData)
|
arrabito/DIRAC
|
Core/Utilities/test/Test_Encode.py
|
Python
|
gpl-3.0
| 8,118
|
[
"DIRAC"
] |
b82ec2e27a9af2546405e6894e3d5e47fc5b0e6c7b7855204de4a7e009f2a3c5
|
""" :mod: DMSRequestOperationsBase
====================
Just a collector of common functions
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id $"
from DIRAC import S_OK, S_ERROR
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.RequestManagementSystem.Client.File import File
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.RequestManagementSystem.private.OperationHandlerBase import OperationHandlerBase
from DIRAC.DataManagementSystem.Utilities.DMSHelpers import DMSHelpers
class DMSRequestOperationsBase(OperationHandlerBase):
def __init__(self, operation=None, csPath=None):
OperationHandlerBase.__init__(self, operation, csPath)
self.registrationProtocols = DMSHelpers().getRegistrationProtocols()
def checkSEsRSS(self, checkSEs=None, access='WriteAccess', failIfBanned=True):
""" check SEs.
By default, we check the SEs for WriteAccess, but it is configurable
"""
if not checkSEs:
checkSEs = self.operation.targetSEList
elif isinstance(checkSEs, str):
checkSEs = [checkSEs]
if access == 'ReadAccess':
seType = 'sourceSE'
else:
seType = 'targetSE'
bannedSEs = []
for checkSE in checkSEs:
seStatus = self.rssSEStatus(checkSE, access, retries=5)
if not seStatus["OK"]:
self.log.error('Failed to get SE status', seStatus["Message"])
error = "unknown %s: %s" % (seType, checkSE)
for opFile in self.operation:
opFile.Error = error
self.operation.Error = error
return S_ERROR(error)
if not seStatus["Value"]:
self.log.info("%s %s is banned for %s right now" % (seType.capitalize(), checkSE, access))
bannedSEs.append(checkSE)
self.operation.Error = "banned %s: %s;" % (seType, checkSE)
if bannedSEs:
alwaysBannedSEs = []
for seName in bannedSEs:
res = self.rssClient().isStorageElementAlwaysBanned(seName, access)
if not res['OK']:
continue
# The SE will always be banned
if res['Value']:
alwaysBannedSEs.append(seName)
# If Some SE are always banned, we fail the request
if alwaysBannedSEs:
self.operation.Error = "%s always banned" % alwaysBannedSEs
if failIfBanned:
self.log.info("Some storages are always banned, failing the request", alwaysBannedSEs)
for opFile in self.operation:
opFile.Error = "%s always banned" % alwaysBannedSEs
opFile.Status = "Failed"
# If it is temporary, we wait an hour
else:
self.log.info("Banning is temporary, next attempt in an hour")
self.operation.Error = "%s currently banned" % bannedSEs
self.request.delayNextExecution(60)
return S_OK(bannedSEs)
def getRegisterOperation(self, opFile, targetSE, type='RegisterFile', catalog=None):
""" add RegisterReplica operation for file
:param ~DIRAC.RequestManagementSystem.Client.File.File opFile: operation file
:param str targetSE: target SE
"""
# # add RegisterReplica operation
registerOperation = Operation()
registerOperation.Type = type
registerOperation.TargetSE = targetSE
if catalog:
registerOperation.Catalog = catalog
registerFile = File()
registerFile.LFN = opFile.LFN
registerFile.PFN = StorageElement(targetSE).getURL(
opFile.LFN,
protocol=self.registrationProtocols).get(
'Value',
{}).get(
'Successful',
{}).get(
opFile.LFN)
registerFile.GUID = opFile.GUID
registerFile.Checksum = opFile.Checksum
registerFile.ChecksumType = opFile.ChecksumType
registerFile.Size = opFile.Size
registerOperation.addFile(registerFile)
return registerOperation
|
yujikato/DIRAC
|
src/DIRAC/DataManagementSystem/Agent/RequestOperations/DMSRequestOperationsBase.py
|
Python
|
gpl-3.0
| 3,888
|
[
"DIRAC"
] |
57ae4a8e38aba21bc38eb561bdcd54d37287d9db81f165022592b0d7e806ec84
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Functional tests using WebTest."""
import datetime as dt
import httplib as http
import logging
import unittest
import markupsafe
import mock
from nose.tools import * # flake8: noqa (PEP8 asserts)
import re
from addons.wiki.utils import to_mongo_key
from framework.auth import exceptions as auth_exc
from framework.auth.core import Auth
from tests.base import OsfTestCase
from tests.base import fake
from osf_tests.factories import (
fake_email,
AuthUserFactory,
NodeFactory,
PreprintFactory,
PreprintProviderFactory,
PrivateLinkFactory,
ProjectFactory,
RegistrationFactory,
SubjectFactory,
UserFactory,
UnconfirmedUserFactory,
UnregUserFactory,
)
from addons.wiki.tests.factories import NodeWikiFactory
from website import settings, language
from addons.osfstorage.models import OsfStorageFile
from website.util import web_url_for, api_url_for
from api_tests import utils as test_utils
logging.getLogger('website.project.model').setLevel(logging.ERROR)
def assert_in_html(member, container, **kwargs):
"""Looks for the specified member in markupsafe-escaped HTML output"""
member = markupsafe.escape(member)
return assert_in(member, container, **kwargs)
def assert_not_in_html(member, container, **kwargs):
"""Looks for the specified member in markupsafe-escaped HTML output"""
member = markupsafe.escape(member)
return assert_not_in(member, container, **kwargs)
class TestDisabledUser(OsfTestCase):
def setUp(self):
super(TestDisabledUser, self).setUp()
self.user = UserFactory()
self.user.set_password('Korben Dallas')
self.user.is_disabled = True
self.user.save()
def test_profile_disabled_returns_401(self):
res = self.app.get(self.user.url, expect_errors=True)
assert_equal(res.status_code, 410)
class TestAnUnregisteredUser(OsfTestCase):
def test_cant_see_profile_if_not_logged_in(self):
url = web_url_for('profile_view')
res = self.app.get(url)
res = res.follow()
assert_equal(res.status_code, 301)
assert_in('/login/', res.headers['Location'])
class TestAUser(OsfTestCase):
def setUp(self):
super(TestAUser, self).setUp()
self.user = AuthUserFactory()
self.auth = self.user.auth
def test_can_see_profile_url(self):
res = self.app.get(self.user.url).maybe_follow()
assert_in(self.user.url, res)
def test_can_see_homepage(self):
# Goes to homepage
res = self.app.get('/').maybe_follow() # Redirects
assert_equal(res.status_code, 200)
# `GET /login/` without parameters is redirected to `/dashboard/` page which has `@must_be_logged_in` decorator
# if user is not logged in, she/he is further redirected to CAS login page
def test_is_redirected_to_cas_if_not_logged_in_at_login_page(self):
res = self.app.get('/login/').follow()
assert_equal(res.status_code, 302)
location = res.headers.get('Location')
assert_in('login?service=', location)
def test_is_redirected_to_dashboard_if_already_logged_in_at_login_page(self):
res = self.app.get('/login/', auth=self.user.auth)
assert_equal(res.status_code, 302)
res = res.follow(auth=self.user.auth)
assert_equal(res.request.path, '/dashboard/')
def test_register_page(self):
res = self.app.get('/register/')
assert_equal(res.status_code, 200)
def test_is_redirected_to_dashboard_if_already_logged_in_at_register_page(self):
res = self.app.get('/register/', auth=self.user.auth)
assert_equal(res.status_code, 302)
res = res.follow(auth=self.user.auth)
assert_equal(res.request.path, '/dashboard/')
def test_sees_projects_in_her_dashboard(self):
# the user already has a project
project = ProjectFactory(creator=self.user)
project.add_contributor(self.user)
project.save()
res = self.app.get('/myprojects/', auth=self.user.auth)
assert_in('Projects', res) # Projects heading
def test_logged_in_index_route_renders_home_template(self):
res = self.app.get('/', auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_in('My Projects', res) # Will change once home page populated
def test_logged_out_index_route_renders_landing_page(self):
res = self.app.get('/')
assert_in('Simplified Scholarly Collaboration', res)
def test_does_not_see_osffiles_in_user_addon_settings(self):
res = self.app.get('/settings/addons/', auth=self.auth, auto_follow=True)
assert_not_in('OSF Storage', res)
def test_sees_osffiles_in_project_addon_settings(self):
project = ProjectFactory(creator=self.user)
project.add_contributor(
self.user,
permissions=['read', 'write', 'admin'],
save=True)
res = self.app.get('/{0}/addons/'.format(project._primary_key), auth=self.auth, auto_follow=True)
assert_in('OSF Storage', res)
def test_sees_correct_title_home_page(self):
# User goes to homepage
res = self.app.get('/', auto_follow=True)
title = res.html.title.string
# page title is correct
assert_equal('OSF | Home', title)
def test_sees_correct_title_on_dashboard(self):
# User goes to dashboard
res = self.app.get('/myprojects/', auth=self.auth, auto_follow=True)
title = res.html.title.string
assert_equal('OSF | My Projects', title)
def test_can_see_make_public_button_if_admin(self):
# User is a contributor on a project
project = ProjectFactory()
project.add_contributor(
self.user,
permissions=['read', 'write', 'admin'],
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_in('Make Public', res)
def test_cant_see_make_public_button_if_not_admin(self):
# User is a contributor on a project
project = ProjectFactory()
project.add_contributor(
self.user,
permissions=['read', 'write'],
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_not_in('Make Public', res)
def test_can_see_make_private_button_if_admin(self):
# User is a contributor on a project
project = ProjectFactory(is_public=True)
project.add_contributor(
self.user,
permissions=['read', 'write', 'admin'],
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_in('Make Private', res)
def test_cant_see_make_private_button_if_not_admin(self):
# User is a contributor on a project
project = ProjectFactory(is_public=True)
project.add_contributor(
self.user,
permissions=['read', 'write'],
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_not_in('Make Private', res)
def test_sees_logs_on_a_project(self):
project = ProjectFactory(is_public=True)
# User goes to the project's page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
# Can see log event
assert_in('created', res)
def test_no_wiki_content_message(self):
project = ProjectFactory(creator=self.user)
# Goes to project's wiki, where there is no content
res = self.app.get('/{0}/wiki/home/'.format(project._primary_key), auth=self.auth)
# Sees a message indicating no content
assert_in('Add important information, links, or images here to describe your project.', res)
# Sees that edit panel is open by default when home wiki has no content
assert_in('panelsUsed: ["view", "menu", "edit"]', res)
def test_wiki_content(self):
project = ProjectFactory(creator=self.user)
wiki_page = 'home'
wiki_content = 'Kittens'
NodeWikiFactory(user=self.user, node=project, content=wiki_content, page_name=wiki_page)
res = self.app.get('/{0}/wiki/{1}/'.format(
project._primary_key,
wiki_page,
), auth=self.auth)
assert_not_in('Add important information, links, or images here to describe your project.', res)
assert_in(wiki_content, res)
assert_in('panelsUsed: ["view", "menu"]', res)
def test_wiki_page_name_non_ascii(self):
project = ProjectFactory(creator=self.user)
non_ascii = to_mongo_key('WöRlÐé')
self.app.get('/{0}/wiki/{1}/'.format(
project._primary_key,
non_ascii
), auth=self.auth, expect_errors=True)
project.update_node_wiki(non_ascii, 'new content', Auth(self.user))
assert_in(non_ascii, project.wiki_pages_current)
def test_noncontributor_cannot_see_wiki_if_no_content(self):
user2 = UserFactory()
# user2 creates a public project and adds no wiki content
project = ProjectFactory(creator=user2, is_public=True)
# self navigates to project
res = self.app.get(project.url).maybe_follow()
# Should not see wiki widget (since non-contributor and no content)
assert_not_in('Add important information, links, or images here to describe your project.', res)
def test_wiki_does_not_exist(self):
project = ProjectFactory(creator=self.user)
res = self.app.get('/{0}/wiki/{1}/'.format(
project._primary_key,
'not a real page yet',
), auth=self.auth, expect_errors=True)
assert_in('Add important information, links, or images here to describe your project.', res)
def test_sees_own_profile(self):
res = self.app.get('/profile/', auth=self.auth)
td1 = res.html.find('td', text=re.compile(r'Public(.*?)Profile'))
td2 = td1.find_next_sibling('td')
assert_equal(td2.text, self.user.display_absolute_url)
def test_sees_another_profile(self):
user2 = UserFactory()
res = self.app.get(user2.url, auth=self.auth)
td1 = res.html.find('td', text=re.compile(r'Public(.*?)Profile'))
td2 = td1.find_next_sibling('td')
assert_equal(td2.text, user2.display_absolute_url)
class TestComponents(OsfTestCase):
def setUp(self):
super(TestComponents, self).setUp()
self.user = AuthUserFactory()
self.consolidate_auth = Auth(user=self.user)
self.project = ProjectFactory(creator=self.user)
self.project.add_contributor(contributor=self.user, auth=self.consolidate_auth)
# A non-project componenet
self.component = NodeFactory(
category='hypothesis',
creator=self.user,
parent=self.project,
)
self.component.save()
self.component.set_privacy('public', self.consolidate_auth)
self.component.set_privacy('private', self.consolidate_auth)
self.project.save()
self.project_url = self.project.web_url_for('view_project')
def test_sees_parent(self):
res = self.app.get(self.component.url, auth=self.user.auth).maybe_follow()
parent_title = res.html.find_all('h2', class_='node-parent-title')
assert_equal(len(parent_title), 1)
assert_in(self.project.title, parent_title[0].text) # Bs4 will handle unescaping HTML here
def test_delete_project(self):
res = self.app.get(
self.component.url + 'settings/',
auth=self.user.auth
).maybe_follow()
assert_in(
'Delete {0}'.format(self.component.project_or_component),
res
)
def test_cant_delete_project_if_not_admin(self):
non_admin = AuthUserFactory()
self.component.add_contributor(
non_admin,
permissions=['read', 'write'],
auth=self.consolidate_auth,
save=True,
)
res = self.app.get(
self.component.url + 'settings/',
auth=non_admin.auth
).maybe_follow()
assert_not_in(
'Delete {0}'.format(self.component.project_or_component),
res
)
def test_can_configure_comments_if_admin(self):
res = self.app.get(
self.component.url + 'settings/',
auth=self.user.auth,
).maybe_follow()
assert_in('Commenting', res)
def test_cant_configure_comments_if_not_admin(self):
non_admin = AuthUserFactory()
self.component.add_contributor(
non_admin,
permissions=['read', 'write'],
auth=self.consolidate_auth,
save=True,
)
res = self.app.get(
self.component.url + 'settings/',
auth=non_admin.auth
).maybe_follow()
assert_not_in('Commenting', res)
def test_components_should_have_component_list(self):
res = self.app.get(self.component.url, auth=self.user.auth)
assert_in('Components', res)
class TestPrivateLinkView(OsfTestCase):
def setUp(self):
super(TestPrivateLinkView, self).setUp()
self.user = AuthUserFactory() # Is NOT a contributor
self.project = ProjectFactory(is_public=False)
self.link = PrivateLinkFactory(anonymous=True)
self.link.nodes.add(self.project)
self.link.save()
self.project_url = self.project.web_url_for('view_project')
def test_anonymous_link_hide_contributor(self):
res = self.app.get(self.project_url, {'view_only': self.link.key})
assert_in("Anonymous Contributors", res.body)
assert_not_in(self.user.fullname, res)
def test_anonymous_link_hides_citations(self):
res = self.app.get(self.project_url, {'view_only': self.link.key})
assert_not_in('Citation:', res)
def test_no_warning_for_read_only_user_with_valid_link(self):
link2 = PrivateLinkFactory(anonymous=False)
link2.nodes.add(self.project)
link2.save()
self.project.add_contributor(
self.user,
permissions=['read'],
save=True,
)
res = self.app.get(self.project_url, {'view_only': link2.key},
auth=self.user.auth)
assert_not_in(
"is being viewed through a private, view-only link. "
"Anyone with the link can view this project. Keep "
"the link safe.",
res.body
)
def test_no_warning_for_read_only_user_with_invalid_link(self):
self.project.add_contributor(
self.user,
permissions=['read'],
save=True,
)
res = self.app.get(self.project_url, {'view_only': "not_valid"},
auth=self.user.auth)
assert_not_in(
"is being viewed through a private, view-only link. "
"Anyone with the link can view this project. Keep "
"the link safe.",
res.body
)
class TestMergingAccounts(OsfTestCase):
def setUp(self):
super(TestMergingAccounts, self).setUp()
self.user = UserFactory.build()
self.user.fullname = "tess' test string"
self.user.set_password('science')
self.user.save()
self.dupe = UserFactory.build()
self.dupe.set_password('example')
self.dupe.save()
def test_merged_user_is_not_shown_as_a_contributor(self):
project = ProjectFactory(is_public=True)
# Both the master and dupe are contributors
project.add_contributor(self.dupe, log=False)
project.add_contributor(self.user, log=False)
project.save()
# At the project page, both are listed as contributors
res = self.app.get(project.url).maybe_follow()
assert_in_html(self.user.fullname, res)
assert_in_html(self.dupe.fullname, res)
# The accounts are merged
self.user.merge_user(self.dupe)
self.user.save()
# Now only the master user is shown at the project page
res = self.app.get(project.url).maybe_follow()
assert_in_html(self.user.fullname, res)
assert_true(self.dupe.is_merged)
assert_not_in(self.dupe.fullname, res)
def test_merged_user_has_alert_message_on_profile(self):
# Master merges dupe
self.user.merge_user(self.dupe)
self.user.save()
# At the dupe user's profile there is an alert message at the top
# indicating that the user is merged
res = self.app.get('/profile/{0}/'.format(self.dupe._primary_key)).maybe_follow()
assert_in('This account has been merged', res)
# FIXME: These affect search in development environment. So need to migrate solr after running.
# # Remove this side effect.
@unittest.skipIf(not settings.SEARCH_ENGINE, 'Skipping because search is disabled')
class TestSearching(OsfTestCase):
'''Test searching using the search bar. NOTE: These may affect the
Solr database. May need to migrate after running these.
'''
def setUp(self):
super(TestSearching, self).setUp()
import website.search.search as search
search.delete_all()
self.user = AuthUserFactory()
self.auth = self.user.auth
@unittest.skip(reason='¯\_(ツ)_/¯ knockout.')
def test_a_user_from_home_page(self):
user = UserFactory()
# Goes to home page
res = self.app.get('/').maybe_follow()
# Fills search form
form = res.forms['searchBar']
form['q'] = user.fullname
res = form.submit().maybe_follow()
# The username shows as a search result
assert_in(user.fullname, res)
@unittest.skip(reason='¯\_(ツ)_/¯ knockout.')
def test_a_public_project_from_home_page(self):
project = ProjectFactory(title='Foobar Project', is_public=True)
# Searches a part of the name
res = self.app.get('/').maybe_follow()
project.reload()
form = res.forms['searchBar']
form['q'] = 'Foobar'
res = form.submit().maybe_follow()
# A link to the project is shown as a result
assert_in('Foobar Project', res)
@unittest.skip(reason='¯\_(ツ)_/¯ knockout.')
def test_a_public_component_from_home_page(self):
component = NodeFactory(title='Foobar Component', is_public=True)
# Searches a part of the name
res = self.app.get('/').maybe_follow()
component.reload()
form = res.forms['searchBar']
form['q'] = 'Foobar'
res = form.submit().maybe_follow()
# A link to the component is shown as a result
assert_in('Foobar Component', res)
class TestShortUrls(OsfTestCase):
def setUp(self):
super(TestShortUrls, self).setUp()
self.user = AuthUserFactory()
self.auth = self.user.auth
self.consolidate_auth = Auth(user=self.user)
self.project = ProjectFactory(creator=self.user)
# A non-project componenet
self.component = NodeFactory(parent=self.project, category='hypothesis', creator=self.user)
# Hack: Add some logs to component; should be unnecessary pending
# improvements to factories from @rliebz
self.component.set_privacy('public', auth=self.consolidate_auth)
self.component.set_privacy('private', auth=self.consolidate_auth)
self.wiki = NodeWikiFactory(user=self.user, node=self.component)
def _url_to_body(self, url):
return self.app.get(
url,
auth=self.auth
).maybe_follow(
auth=self.auth,
).normal_body
def test_project_url(self):
assert_equal(
self._url_to_body(self.project.deep_url),
self._url_to_body(self.project.url),
)
def test_component_url(self):
assert_equal(
self._url_to_body(self.component.deep_url),
self._url_to_body(self.component.url),
)
def test_wiki_url(self):
assert_equal(
self._url_to_body(self.wiki.deep_url),
self._url_to_body(self.wiki.url),
)
class TestClaiming(OsfTestCase):
def setUp(self):
super(TestClaiming, self).setUp()
self.referrer = AuthUserFactory()
self.project = ProjectFactory(creator=self.referrer, is_public=True)
def test_correct_name_shows_in_contributor_list(self):
name1, email = fake.name(), fake_email()
UnregUserFactory(fullname=name1, email=email)
name2, email = fake.name(), fake_email()
# Added with different name
self.project.add_unregistered_contributor(fullname=name2,
email=email, auth=Auth(self.referrer))
self.project.save()
res = self.app.get(self.project.url, auth=self.referrer.auth)
# Correct name is shown
assert_in_html(name2, res)
assert_not_in(name1, res)
def test_user_can_set_password_on_claim_page(self):
name, email = fake.name(), fake_email()
new_user = self.project.add_unregistered_contributor(
email=email,
fullname=name,
auth=Auth(self.referrer)
)
self.project.save()
claim_url = new_user.get_claim_url(self.project._primary_key)
res = self.app.get(claim_url)
self.project.reload()
assert_in('Set Password', res)
form = res.forms['setPasswordForm']
#form['username'] = new_user.username #Removed as long as E-mail can't be updated.
form['password'] = 'killerqueen'
form['password2'] = 'killerqueen'
res = form.submit().follow()
new_user.reload()
assert_true(new_user.check_password('killerqueen'))
def test_sees_is_redirected_if_user_already_logged_in(self):
name, email = fake.name(), fake_email()
new_user = self.project.add_unregistered_contributor(
email=email,
fullname=name,
auth=Auth(self.referrer)
)
self.project.save()
existing = AuthUserFactory()
claim_url = new_user.get_claim_url(self.project._primary_key)
# a user is already logged in
res = self.app.get(claim_url, auth=existing.auth, expect_errors=True)
assert_equal(res.status_code, 302)
def test_unregistered_users_names_are_project_specific(self):
name1, name2, email = fake.name(), fake.name(), fake_email()
project2 = ProjectFactory(creator=self.referrer)
# different projects use different names for the same unreg contributor
self.project.add_unregistered_contributor(
email=email,
fullname=name1,
auth=Auth(self.referrer)
)
self.project.save()
project2.add_unregistered_contributor(
email=email,
fullname=name2,
auth=Auth(self.referrer)
)
project2.save()
self.app.authenticate(*self.referrer.auth)
# Each project displays a different name in the contributor list
res = self.app.get(self.project.url)
assert_in_html(name1, res)
res2 = self.app.get(project2.url)
assert_in_html(name2, res2)
@unittest.skip("as long as E-mails cannot be changed")
def test_cannot_set_email_to_a_user_that_already_exists(self):
reg_user = UserFactory()
name, email = fake.name(), fake_email()
new_user = self.project.add_unregistered_contributor(
email=email,
fullname=name,
auth=Auth(self.referrer)
)
self.project.save()
# Goes to claim url and successfully claims account
claim_url = new_user.get_claim_url(self.project._primary_key)
res = self.app.get(claim_url)
self.project.reload()
assert_in('Set Password', res)
form = res.forms['setPasswordForm']
# Fills out an email that is the username of another user
form['username'] = reg_user.username
form['password'] = 'killerqueen'
form['password2'] = 'killerqueen'
res = form.submit().maybe_follow(expect_errors=True)
assert_in(
language.ALREADY_REGISTERED.format(email=reg_user.username),
res
)
def test_correct_display_name_is_shown_at_claim_page(self):
original_name = fake.name()
unreg = UnregUserFactory(fullname=original_name)
different_name = fake.name()
new_user = self.project.add_unregistered_contributor(
email=unreg.username,
fullname=different_name,
auth=Auth(self.referrer),
)
self.project.save()
claim_url = new_user.get_claim_url(self.project._primary_key)
res = self.app.get(claim_url)
# Correct name (different_name) should be on page
assert_in_html(different_name, res)
class TestConfirmingEmail(OsfTestCase):
def setUp(self):
super(TestConfirmingEmail, self).setUp()
self.user = UnconfirmedUserFactory()
self.confirmation_url = self.user.get_confirmation_url(
self.user.username,
external=False,
)
self.confirmation_token = self.user.get_confirmation_token(
self.user.username
)
def test_cannot_remove_another_user_email(self):
user1 = AuthUserFactory()
user2 = AuthUserFactory()
url = api_url_for('update_user')
header = {'id': user1.username, 'emails': [{'address': user1.username}]}
res = self.app.put_json(url, header, auth=user2.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_cannnot_make_primary_email_for_another_user(self):
user1 = AuthUserFactory()
user2 = AuthUserFactory()
email = 'test@cos.io'
user1.emails.create(address=email)
user1.save()
url = api_url_for('update_user')
header = {'id': user1.username,
'emails': [{'address': user1.username, 'primary': False, 'confirmed': True},
{'address': email, 'primary': True, 'confirmed': True}
]}
res = self.app.put_json(url, header, auth=user2.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_cannnot_add_email_for_another_user(self):
user1 = AuthUserFactory()
user2 = AuthUserFactory()
email = 'test@cos.io'
url = api_url_for('update_user')
header = {'id': user1.username,
'emails': [{'address': user1.username, 'primary': True, 'confirmed': True},
{'address': email, 'primary': False, 'confirmed': False}
]}
res = self.app.put_json(url, header, auth=user2.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_error_page_if_confirm_link_is_used(self):
self.user.confirm_email(self.confirmation_token)
self.user.save()
res = self.app.get(self.confirmation_url, expect_errors=True)
assert_in(auth_exc.InvalidTokenError.message_short, res)
assert_equal(res.status_code, http.BAD_REQUEST)
class TestClaimingAsARegisteredUser(OsfTestCase):
def setUp(self):
super(TestClaimingAsARegisteredUser, self).setUp()
self.referrer = AuthUserFactory()
self.project = ProjectFactory(creator=self.referrer, is_public=True)
name, email = fake.name(), fake_email()
self.user = self.project.add_unregistered_contributor(
fullname=name,
email=email,
auth=Auth(user=self.referrer)
)
self.project.save()
def test_claim_user_registered_with_correct_password(self):
reg_user = AuthUserFactory() # NOTE: AuthUserFactory sets password as 'queenfan86'
url = self.user.get_claim_url(self.project._primary_key)
# Follow to password re-enter page
res = self.app.get(url, auth=reg_user.auth).follow(auth=reg_user.auth)
# verify that the "Claim Account" form is returned
assert_in('Claim Contributor', res.body)
form = res.forms['claimContributorForm']
form['password'] = 'queenfan86'
res = form.submit(auth=reg_user.auth)
res = res.follow(auth=reg_user.auth)
self.project.reload()
self.user.reload()
# user is now a contributor to the project
assert_in(reg_user, self.project.contributors)
# the unregistered user (self.user) is removed as a contributor, and their
assert_not_in(self.user, self.project.contributors)
# unclaimed record for the project has been deleted
assert_not_in(self.project, self.user.unclaimed_records)
class TestExplorePublicActivity(OsfTestCase):
def setUp(self):
super(TestExplorePublicActivity, self).setUp()
self.project = ProjectFactory(is_public=True)
self.registration = RegistrationFactory(project=self.project)
self.private_project = ProjectFactory(title="Test private project")
self.popular_project = ProjectFactory(is_public=True)
self.popular_registration = RegistrationFactory(project=self.project, is_public=True)
# Add project to new and noteworthy projects
self.new_and_noteworthy_links_node = ProjectFactory(is_public=True)
self.new_and_noteworthy_links_node._id = settings.NEW_AND_NOTEWORTHY_LINKS_NODE
self.new_and_noteworthy_links_node.add_pointer(self.project, auth=Auth(self.new_and_noteworthy_links_node.creator), save=True)
# Set up popular projects and registrations
self.popular_links_node = ProjectFactory(is_public=True)
settings.POPULAR_LINKS_NODE = self.popular_links_node._id
self.popular_links_node.add_pointer(self.popular_project, auth=Auth(self.popular_links_node.creator), save=True)
self.popular_links_registrations = ProjectFactory(is_public=True)
settings.POPULAR_LINKS_REGISTRATIONS = self.popular_links_registrations._id
self.popular_links_registrations.add_pointer(self.popular_registration, auth=Auth(self.popular_links_registrations.creator), save=True)
def test_explore_page_loads_when_settings_not_configured(self):
old_settings_values = settings.POPULAR_LINKS_NODE, settings.NEW_AND_NOTEWORTHY_LINKS_NODE, settings.POPULAR_LINKS_REGISTRATIONS
settings.POPULAR_LINKS_NODE = 'notanode'
settings.NEW_AND_NOTEWORTHY_LINKS_NODE = 'alsototallywrong'
settings.POPULAR_LINKS_REGISTRATIONS = 'nopenope'
url = self.project.web_url_for('activity')
res = self.app.get(url)
assert_equal(res.status_code, 200)
settings.POPULAR_LINKS_NODE, settings.NEW_AND_NOTEWORTHY_LINKS_NODE, settings.POPULAR_LINKS_REGISTRATIONS = old_settings_values
def test_new_and_noteworthy_and_popular_nodes_show_in_explore_activity(self):
url = self.project.web_url_for('activity')
res = self.app.get(url)
assert_equal(res.status_code, 200)
# New and Noteworthy
assert_in(str(self.project.title), res)
assert_in(str(self.project.created.date()), res)
assert_in(str(self.registration.title), res)
assert_in(str(self.registration.registered_date.date()), res)
assert_not_in(str(self.private_project.title), res)
# Popular Projects and Registrations
assert_in(str(self.popular_project.title), res)
assert_in(str(self.popular_project.created.date()), res)
assert_in(str(self.popular_registration.title), res)
assert_in(str(self.popular_registration.registered_date.date()), res)
class TestResendConfirmation(OsfTestCase):
def setUp(self):
super(TestResendConfirmation, self).setUp()
self.unconfirmed_user = UnconfirmedUserFactory()
self.confirmed_user = UserFactory()
self.get_url = web_url_for('resend_confirmation_get')
self.post_url = web_url_for('resend_confirmation_post')
# test that resend confirmation page is load correctly
def test_resend_confirmation_get(self):
res = self.app.get(self.get_url)
assert_equal(res.status_code, 200)
assert_in('Resend Confirmation', res.body)
assert_in('resendForm', res.forms)
# test that unconfirmed user can receive resend confirmation email
@mock.patch('framework.auth.views.mails.send_mail')
def test_can_receive_resend_confirmation_email(self, mock_send_mail):
# load resend confirmation page and submit email
res = self.app.get(self.get_url)
form = res.forms['resendForm']
form['email'] = self.unconfirmed_user.unconfirmed_emails[0]
res = form.submit()
# check email, request and response
assert_true(mock_send_mail.called)
assert_equal(res.status_code, 200)
assert_equal(res.request.path, self.post_url)
assert_in_html('If there is an OSF account', res)
# test that confirmed user cannot receive resend confirmation email
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_receive_resend_confirmation_email_1(self, mock_send_mail):
# load resend confirmation page and submit email
res = self.app.get(self.get_url)
form = res.forms['resendForm']
form['email'] = self.confirmed_user.emails.first().address
res = form.submit()
# check email, request and response
assert_false(mock_send_mail.called)
assert_equal(res.status_code, 200)
assert_equal(res.request.path, self.post_url)
assert_in_html('has already been confirmed', res)
# test that non-existing user cannot receive resend confirmation email
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_receive_resend_confirmation_email_2(self, mock_send_mail):
# load resend confirmation page and submit email
res = self.app.get(self.get_url)
form = res.forms['resendForm']
form['email'] = 'random@random.com'
res = form.submit()
# check email, request and response
assert_false(mock_send_mail.called)
assert_equal(res.status_code, 200)
assert_equal(res.request.path, self.post_url)
assert_in_html('If there is an OSF account', res)
# test that user cannot submit resend confirmation request too quickly
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_resend_confirmation_twice_quickly(self, mock_send_mail):
# load resend confirmation page and submit email
res = self.app.get(self.get_url)
form = res.forms['resendForm']
form['email'] = self.unconfirmed_user.email
res = form.submit()
res = form.submit()
# check request and response
assert_equal(res.status_code, 200)
assert_in_html('Please wait', res)
class TestForgotPassword(OsfTestCase):
def setUp(self):
super(TestForgotPassword, self).setUp()
self.user = UserFactory()
self.auth_user = AuthUserFactory()
self.get_url = web_url_for('forgot_password_get')
self.post_url = web_url_for('forgot_password_post')
self.user.verification_key_v2 = {}
self.user.save()
# log users out before they land on forgot password page
def test_forgot_password_logs_out_user(self):
# visit forgot password link while another user is logged in
res = self.app.get(self.get_url, auth=self.auth_user.auth)
# check redirection to CAS logout
assert_equal(res.status_code, 302)
location = res.headers.get('Location')
assert_not_in('reauth', location)
assert_in('logout?service=', location)
assert_in('forgotpassword', location)
# test that forgot password page is loaded correctly
def test_get_forgot_password(self):
res = self.app.get(self.get_url)
assert_equal(res.status_code, 200)
assert_in('Forgot Password', res.body)
assert_in('forgotPasswordForm', res.forms)
# test that existing user can receive reset password email
@mock.patch('framework.auth.views.mails.send_mail')
def test_can_receive_reset_password_email(self, mock_send_mail):
# load forgot password page and submit email
res = self.app.get(self.get_url)
form = res.forms['forgotPasswordForm']
form['forgot_password-email'] = self.user.username
res = form.submit()
# check mail was sent
assert_true(mock_send_mail.called)
# check http 200 response
assert_equal(res.status_code, 200)
# check request URL is /forgotpassword
assert_equal(res.request.path, self.post_url)
# check push notification
assert_in_html('If there is an OSF account', res)
assert_not_in_html('Please wait', res)
# check verification_key_v2 is set
self.user.reload()
assert_not_equal(self.user.verification_key_v2, {})
# test that non-existing user cannot receive reset password email
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_receive_reset_password_email(self, mock_send_mail):
# load forgot password page and submit email
res = self.app.get(self.get_url)
form = res.forms['forgotPasswordForm']
form['forgot_password-email'] = 'fake' + self.user.username
res = form.submit()
# check mail was not sent
assert_false(mock_send_mail.called)
# check http 200 response
assert_equal(res.status_code, 200)
# check request URL is /forgotpassword
assert_equal(res.request.path, self.post_url)
# check push notification
assert_in_html('If there is an OSF account', res)
assert_not_in_html('Please wait', res)
# check verification_key_v2 is not set
self.user.reload()
assert_equal(self.user.verification_key_v2, {})
# test that non-existing user cannot receive reset password email
@mock.patch('framework.auth.views.mails.send_mail')
def test_not_active_user_no_reset_password_email(self, mock_send_mail):
self.user.disable_account()
self.user.save()
# load forgot password page and submit email
res = self.app.get(self.get_url)
form = res.forms['forgotPasswordForm']
form['forgot_password-email'] = self.user.username
res = form.submit()
# check mail was not sent
assert_false(mock_send_mail.called)
# check http 200 response
assert_equal(res.status_code, 200)
# check request URL is /forgotpassword
assert_equal(res.request.path, self.post_url)
# check push notification
assert_in_html('If there is an OSF account', res)
assert_not_in_html('Please wait', res)
# check verification_key_v2 is not set
self.user.reload()
assert_equal(self.user.verification_key_v2, {})
# test that user cannot submit forgot password request too quickly
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_reset_password_twice_quickly(self, mock_send_mail):
# load forgot password page and submit email
res = self.app.get(self.get_url)
form = res.forms['forgotPasswordForm']
form['forgot_password-email'] = self.user.username
res = form.submit()
res = form.submit()
# check http 200 response
assert_equal(res.status_code, 200)
# check push notification
assert_in_html('Please wait', res)
assert_not_in_html('If there is an OSF account', res)
@unittest.skip('Public projects/components are dynamically loaded now.')
class TestAUserProfile(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.user = AuthUserFactory()
self.me = AuthUserFactory()
self.project = ProjectFactory(creator=self.me, is_public=True, title=fake.bs())
self.component = NodeFactory(creator=self.me, parent=self.project, is_public=True, title=fake.bs())
# regression test for https://github.com/CenterForOpenScience/osf.io/issues/2623
def test_has_public_projects_and_components(self):
# I go to my own profile
url = web_url_for('profile_view_id', uid=self.me._primary_key)
# I see the title of both my project and component
res = self.app.get(url, auth=self.me.auth)
assert_in_html(self.component.title, res)
assert_in_html(self.project.title, res)
# Another user can also see my public project and component
url = web_url_for('profile_view_id', uid=self.me._primary_key)
# I see the title of both my project and component
res = self.app.get(url, auth=self.user.auth)
assert_in_html(self.component.title, res)
assert_in_html(self.project.title, res)
def test_shows_projects_with_many_contributors(self):
# My project has many contributors
for _ in range(5):
user = UserFactory()
self.project.add_contributor(user, auth=Auth(self.project.creator), save=True)
# I go to my own profile
url = web_url_for('profile_view_id', uid=self.me._primary_key)
res = self.app.get(url, auth=self.me.auth)
# I see '3 more' as a link
assert_in('3 more', res)
res = res.click('3 more')
assert_equal(res.request.path, self.project.url)
def test_has_no_public_projects_or_components_on_own_profile(self):
# User goes to their profile
url = web_url_for('profile_view_id', uid=self.user._id)
res = self.app.get(url, auth=self.user.auth)
# user has no public components/projects
assert_in('You have no public projects', res)
assert_in('You have no public components', res)
def test_user_no_public_projects_or_components(self):
# I go to other user's profile
url = web_url_for('profile_view_id', uid=self.user._id)
# User has no public components/projects
res = self.app.get(url, auth=self.me.auth)
assert_in('This user has no public projects', res)
assert_in('This user has no public components', res)
# regression test
def test_does_not_show_registrations(self):
project = ProjectFactory(creator=self.user)
component = NodeFactory(parent=project, creator=self.user, is_public=False)
# User has a registration with public components
reg = RegistrationFactory(project=component.parent_node, creator=self.user, is_public=True)
for each in reg.nodes:
each.is_public = True
each.save()
# I go to other user's profile
url = web_url_for('profile_view_id', uid=self.user._id)
# Registration does not appear on profile
res = self.app.get(url, auth=self.me.auth)
assert_in('This user has no public components', res)
assert_not_in(reg.title, res)
assert_not_in(reg.nodes[0].title, res)
class TestPreprintBannerView(OsfTestCase):
def setUp(self):
super(TestPreprintBannerView, self).setUp()
self.admin = AuthUserFactory()
self.provider_one = PreprintProviderFactory()
self.provider_two = PreprintProviderFactory()
self.project_one = ProjectFactory(creator=self.admin, is_public=True)
self.project_two = ProjectFactory(creator=self.admin, is_public=True)
self.project_three = ProjectFactory(creator=self.admin, is_public=True)
self.subject_one = SubjectFactory()
self.subject_two = SubjectFactory()
self.file_one = test_utils.create_test_file(self.project_one, self.admin, 'mgla.pdf')
self.file_two = test_utils.create_test_file(self.project_two, self.admin, 'saor.pdf')
self.published_preprint = PreprintFactory(creator=self.admin, filename='mgla.pdf', provider=self.provider_one, subjects=[[self.subject_one._id]], project=self.project_one, is_published=True)
self.unpublished_preprint = PreprintFactory(creator=self.admin, filename='saor.pdf', provider=self.provider_two, subjects=[[self.subject_two._id]], project=self.project_two, is_published=False)
def test_public_project_published_preprint(self):
url = self.project_one.web_url_for('view_project')
res = self.app.get(url, auth=self.admin.auth)
assert_not_in('has a preprint, but has been made Private. Make your preprint discoverable by making this', res.body)
def test_private_project_published_preprint(self):
self.project_one.is_public = False
self.project_one.save()
url = self.project_one.web_url_for('view_project')
res = self.app.get(url, auth=self.admin.auth)
assert_in('has a preprint, but has been made Private. Make your preprint discoverable by making this', res.body)
def test_public_project_unpublished_preprint(self):
url = self.project_two.web_url_for('view_project')
res = self.app.get(url, auth=self.admin.auth)
assert_not_in('has a preprint, but has been made Private. Make your preprint discoverable by making this', res.body)
def test_private_project_unpublished_preprint(self):
# Do not show banner on unpublished preprints
self.project_two.is_public = False
self.project_two.save()
url = self.project_two.web_url_for('view_project')
res = self.app.get(url, auth=self.admin.auth)
assert_not_in('has a preprint, but has been made Private. Make your preprint discoverable by making this', res.body)
def test_public_project_no_preprint(self):
url = self.project_three.web_url_for('view_project')
res = self.app.get(url, auth=self.admin.auth)
assert_not_in('has a preprint, but has been made Private. Make your preprint discoverable by making this', res.body)
def test_private_project_no_preprint(self):
self.project_three.is_public = False
self.project_three.save()
url = self.project_three.web_url_for('view_project')
res = self.app.get(url, auth=self.admin.auth)
assert_not_in('has a preprint, but has been made Private. Make your preprint discoverable by making this', res.body)
if __name__ == '__main__':
unittest.main()
|
chennan47/osf.io
|
tests/test_webtests.py
|
Python
|
apache-2.0
| 46,576
|
[
"VisIt"
] |
7d6db6b8c4256c5c2291a67180dbf653ef8c3b636bdf846eca4ff32ed1b6a107
|
"""Mayavi/traits GUI for converting data from KIT systems."""
# Authors: Christian Brodbeck <christianbrodbeck@nyu.edu>
#
# License: BSD (3-clause)
from collections import Counter
import os
import queue
import sys
import numpy as np
from scipy.linalg import inv
from threading import Thread
from mayavi.core.ui.mayavi_scene import MayaviScene
from mayavi.tools.mlab_scene_model import MlabSceneModel
from pyface.api import (confirm, error, FileDialog, OK, YES, information,
ProgressDialog, warning)
from traits.api import (HasTraits, HasPrivateTraits, cached_property, Instance,
Property, Bool, Button, Enum, File, Float, Int, List,
Str, Array, DelegatesTo, on_trait_change)
from traits.trait_base import ETSConfig
from traitsui.api import (View, Item, HGroup, VGroup, spring, TextEditor,
CheckListEditor, EnumEditor, Handler)
from traitsui.menu import NoButtons
from tvtk.pyface.scene_editor import SceneEditor
from ..io.constants import FIFF
from ..io._digitization import _make_dig_points
from ..io.kit.coreg import _read_dig_kit
from ..io.kit.kit import (RawKIT, KIT, _make_stim_channel, _default_stim_chs,
UnsupportedKITFormat)
from ..transforms import (apply_trans, als_ras_trans,
get_ras_to_neuromag_trans, Transform)
from ..coreg import _decimate_points, fit_matched_points
from ..utils import get_config, set_config, logger, warn
from ._backend import _get_pyface_backend
from ..event import _find_events
from ._marker_gui import CombineMarkersPanel, CombineMarkersModel
from ._help import read_tooltips
from ._viewer import HeadViewController, PointObject
use_editor = CheckListEditor(cols=5, values=[(i, str(i)) for i in range(5)])
if _get_pyface_backend() == 'wx':
# wx backend allows labels for wildcards
hsp_wildcard = ['Head Shape Points (*.hsp;*.txt)|*.hsp;*.txt']
elp_wildcard = ['Head Shape Fiducials (*.elp;*.txt)|*.elp;*.txt']
kit_con_wildcard = ['Continuous KIT Files (*.sqd;*.con)|*.sqd;*.con']
if sys.platform in ('win32', 'linux2'):
# on Windows and Ubuntu, multiple wildcards does not seem to work
hsp_wildcard = ['*.hsp', '*.txt']
elp_wildcard = ['*.elp', '*.txt']
kit_con_wildcard = ['*.sqd', '*.con']
else:
hsp_wildcard = ['*.hsp;*.txt']
elp_wildcard = ['*.elp;*.txt']
kit_con_wildcard = ['*.sqd;*.con']
tooltips = read_tooltips('kit2fiff')
class Kit2FiffModel(HasPrivateTraits):
"""Data Model for Kit2Fiff conversion.
- Markers are transformed into RAS coordinate system (as are the sensor
coordinates).
- Head shape digitizer data is transformed into neuromag-like space.
"""
# Input Traits
markers = Instance(CombineMarkersModel, ())
sqd_file = File(exists=True, filter=kit_con_wildcard)
allow_unknown_format = Bool(False)
hsp_file = File(exists=True, filter=hsp_wildcard)
fid_file = File(exists=True, filter=elp_wildcard)
stim_coding = Enum(">", "<", "channel")
stim_chs = Str("")
stim_chs_array = Property(depends_on=['raw', 'stim_chs', 'stim_coding'])
stim_chs_ok = Property(depends_on='stim_chs_array')
stim_chs_comment = Property(depends_on='stim_chs_array')
stim_slope = Enum("-", "+")
stim_threshold = Float(1.)
# Marker Points
use_mrk = List(list(range(5)), desc="Which marker points to use for the "
"device head coregistration.")
# Derived Traits
mrk = Property(depends_on='markers.mrk3.points')
# Polhemus Fiducials
elp_raw = Property(depends_on=['fid_file'])
hsp_raw = Property(depends_on=['hsp_file'])
polhemus_neuromag_trans = Property(depends_on=['elp_raw'])
# Polhemus data (in neuromag space)
elp = Property(depends_on=['elp_raw', 'polhemus_neuromag_trans'])
fid = Property(depends_on=['elp_raw', 'polhemus_neuromag_trans'])
hsp = Property(depends_on=['hsp_raw', 'polhemus_neuromag_trans'])
# trans
dev_head_trans = Property(depends_on=['elp', 'mrk', 'use_mrk'])
head_dev_trans = Property(depends_on=['dev_head_trans'])
# event preview
raw = Property(depends_on='sqd_file')
misc_chs = Property(List, depends_on='raw')
misc_chs_desc = Property(Str, depends_on='misc_chs')
misc_data = Property(Array, depends_on='raw')
can_test_stim = Property(Bool, depends_on='raw')
# info
sqd_fname = Property(Str, depends_on='sqd_file')
hsp_fname = Property(Str, depends_on='hsp_file')
fid_fname = Property(Str, depends_on='fid_file')
can_save = Property(Bool, depends_on=['stim_chs_ok', 'fid',
'elp', 'hsp', 'dev_head_trans'])
# Show GUI feedback (like error messages and progress bar)
show_gui = Bool(False)
@cached_property
def _get_can_save(self):
"""Only allow saving when all or no head shape elements are set."""
if not self.stim_chs_ok:
return False
has_all_hsp = (np.any(self.dev_head_trans) and np.any(self.hsp) and
np.any(self.elp) and np.any(self.fid))
if has_all_hsp:
return True
has_any_hsp = self.hsp_file or self.fid_file or np.any(self.mrk)
return not has_any_hsp
@cached_property
def _get_can_test_stim(self):
return self.raw is not None
@cached_property
def _get_dev_head_trans(self):
if (self.mrk is None) or not np.any(self.fid):
return np.eye(4)
src_pts = self.mrk
dst_pts = self.elp
n_use = len(self.use_mrk)
if n_use < 3:
if self.show_gui:
error(None, "Estimating the device head transform requires at "
"least 3 marker points. Please adjust the markers used.",
"Not Enough Marker Points")
return
elif n_use < 5:
src_pts = src_pts[self.use_mrk]
dst_pts = dst_pts[self.use_mrk]
trans = fit_matched_points(src_pts, dst_pts, out='trans')
return trans
@cached_property
def _get_elp(self):
if self.elp_raw is None:
return np.empty((0, 3))
pts = self.elp_raw[3:8]
pts = apply_trans(self.polhemus_neuromag_trans, pts)
return pts
@cached_property
def _get_elp_raw(self):
if not self.fid_file:
return
try:
pts = _read_dig_kit(self.fid_file)
if len(pts) < 8:
raise ValueError("File contains %i points, need 8" % len(pts))
except Exception as err:
if self.show_gui:
error(None, str(err), "Error Reading Fiducials")
self.reset_traits(['fid_file'])
raise
else:
return pts
@cached_property
def _get_fid(self):
if self.elp_raw is None:
return np.empty((0, 3))
pts = self.elp_raw[:3]
pts = apply_trans(self.polhemus_neuromag_trans, pts)
return pts
@cached_property
def _get_fid_fname(self):
if self.fid_file:
return os.path.basename(self.fid_file)
else:
return '-'
@cached_property
def _get_head_dev_trans(self):
return inv(self.dev_head_trans)
@cached_property
def _get_hsp(self):
if (self.hsp_raw is None) or not np.any(self.polhemus_neuromag_trans):
return np.empty((0, 3))
else:
pts = apply_trans(self.polhemus_neuromag_trans, self.hsp_raw)
return pts
@cached_property
def _get_hsp_fname(self):
if self.hsp_file:
return os.path.basename(self.hsp_file)
else:
return '-'
@cached_property
def _get_hsp_raw(self):
fname = self.hsp_file
if not fname:
return
try:
pts = _read_dig_kit(fname)
n_pts = len(pts)
if n_pts > KIT.DIG_POINTS:
msg = ("The selected head shape contains {n_in} points, "
"which is more than the recommended maximum ({n_rec}). "
"The file will be automatically downsampled, which "
"might take a while. A better way to downsample is "
"using FastScan.".
format(n_in=n_pts, n_rec=KIT.DIG_POINTS))
if self.show_gui:
information(None, msg, "Too Many Head Shape Points")
pts = _decimate_points(pts, 5)
except Exception as err:
if self.show_gui:
error(None, str(err), "Error Reading Head Shape")
self.reset_traits(['hsp_file'])
raise
else:
return pts
@cached_property
def _get_misc_chs(self):
if not self.raw:
return
return [i for i, ch in enumerate(self.raw.info['chs']) if
ch['kind'] == FIFF.FIFFV_MISC_CH]
@cached_property
def _get_misc_chs_desc(self):
if self.misc_chs is None:
return "No SQD file selected..."
elif np.all(np.diff(self.misc_chs) == 1):
return "%i:%i" % (self.misc_chs[0], self.misc_chs[-1] + 1)
else:
return "%i... (discontinuous)" % self.misc_chs[0]
@cached_property
def _get_misc_data(self):
if not self.raw:
return
if self.show_gui:
# progress dialog with indefinite progress bar
prog = ProgressDialog(title="Loading SQD data...",
message="Loading stim channel data from SQD "
"file ...")
prog.open()
prog.update(0)
else:
prog = None
try:
data, times = self.raw[self.misc_chs]
except Exception as err:
if self.show_gui:
error(None, "Error reading SQD data file: %s (Check the "
"terminal output for details)" % str(err),
"Error Reading SQD File")
raise
finally:
if self.show_gui:
prog.close()
return data
@cached_property
def _get_mrk(self):
return apply_trans(als_ras_trans, self.markers.mrk3.points)
@cached_property
def _get_polhemus_neuromag_trans(self):
if self.elp_raw is None:
return
nasion, lpa, rpa = apply_trans(als_ras_trans, self.elp_raw[:3])
trans = get_ras_to_neuromag_trans(nasion, lpa, rpa)
return np.dot(trans, als_ras_trans)
@cached_property
def _get_raw(self):
if not self.sqd_file:
return
try:
return RawKIT(self.sqd_file, stim=None,
allow_unknown_format=self.allow_unknown_format)
except UnsupportedKITFormat as exception:
warning(
None,
"The selected SQD file is written in an old file format (%s) "
"that is not officially supported. Confirm that the results "
"are as expected. This warning is displayed only once per "
"session." % (exception.sqd_version,),
"Unsupported SQD File Format")
self.allow_unknown_format = True
return self._get_raw()
except Exception as err:
self.reset_traits(['sqd_file'])
if self.show_gui:
error(None, "Error reading SQD data file: %s (Check the "
"terminal output for details)" % str(err),
"Error Reading SQD File")
raise
@cached_property
def _get_sqd_fname(self):
if self.sqd_file:
return os.path.basename(self.sqd_file)
else:
return '-'
@cached_property
def _get_stim_chs_array(self):
if self.raw is None:
return
elif not self.stim_chs.strip():
picks = _default_stim_chs(self.raw.info)
else:
try:
picks = eval("r_[%s]" % self.stim_chs, vars(np))
if picks.dtype.kind != 'i':
raise TypeError("Need array of int")
except Exception:
return None
if self.stim_coding == '<': # Big-endian
return picks[::-1]
else:
return picks
@cached_property
def _get_stim_chs_comment(self):
if self.raw is None:
return ""
elif not self.stim_chs_ok:
return "Invalid!"
elif not self.stim_chs.strip():
return "Default: The first 8 MISC channels"
else:
return "Ok: %i channels" % len(self.stim_chs_array)
@cached_property
def _get_stim_chs_ok(self):
return self.stim_chs_array is not None
def clear_all(self):
"""Clear all specified input parameters."""
self.markers.clear = True
self.reset_traits(['sqd_file', 'hsp_file', 'fid_file', 'use_mrk'])
def get_event_info(self):
"""Count events with current stim channel settings.
Returns
-------
event_count : Counter
Counter mapping event ID to number of occurrences.
"""
if self.misc_data is None:
return
idx = [self.misc_chs.index(ch) for ch in self.stim_chs_array]
data = self.misc_data[idx]
if self.stim_coding == 'channel':
coding = 'channel'
else:
coding = 'binary'
stim_ch = _make_stim_channel(data, self.stim_slope,
self.stim_threshold, coding,
self.stim_chs_array)
events = _find_events(stim_ch, self.raw.first_samp, consecutive=True,
min_samples=3)
return Counter(events[:, 2])
def get_raw(self, preload=False):
"""Create a raw object based on the current model settings."""
if not self.can_save:
raise ValueError("Not all necessary parameters are set")
# stim channels and coding
if self.stim_coding == 'channel':
stim_code = 'channel'
elif self.stim_coding in '<>':
stim_code = 'binary'
else:
raise RuntimeError("stim_coding=%r" % self.stim_coding)
logger.info("Creating raw with stim=%r, slope=%r, stim_code=%r, "
"stimthresh=%r", self.stim_chs_array, self.stim_slope,
stim_code, self.stim_threshold)
raw = RawKIT(self.sqd_file, preload=preload, stim=self.stim_chs_array,
slope=self.stim_slope, stim_code=stim_code,
stimthresh=self.stim_threshold,
allow_unknown_format=self.allow_unknown_format)
if np.any(self.fid):
raw.info['dig'] = _make_dig_points(self.fid[0], self.fid[1],
self.fid[2], self.elp,
self.hsp)
raw.info['dev_head_t'] = Transform('meg', 'head',
self.dev_head_trans)
return raw
class Kit2FiffFrameHandler(Handler):
"""Check for unfinished processes before closing its window."""
def close(self, info, is_ok): # noqa: D102
if info.object.kit2fiff_panel.queue.unfinished_tasks:
msg = ("Can not close the window while saving is still in "
"progress. Please wait until all files are processed.")
title = "Saving Still in Progress"
information(None, msg, title)
return False
else:
# store configuration, but don't prevent from closing on error
try:
info.object.save_config()
except Exception as exc:
warn("Error saving GUI configuration:\n%s" % (exc,))
return True
class Kit2FiffPanel(HasPrivateTraits):
"""Control panel for kit2fiff conversion."""
model = Instance(Kit2FiffModel)
# model copies for view
use_mrk = DelegatesTo('model')
sqd_file = DelegatesTo('model')
hsp_file = DelegatesTo('model')
fid_file = DelegatesTo('model')
stim_coding = DelegatesTo('model')
stim_chs = DelegatesTo('model')
stim_chs_ok = DelegatesTo('model')
stim_chs_comment = DelegatesTo('model')
stim_slope = DelegatesTo('model')
stim_threshold = DelegatesTo('model')
# info
can_save = DelegatesTo('model')
sqd_fname = DelegatesTo('model')
hsp_fname = DelegatesTo('model')
fid_fname = DelegatesTo('model')
misc_chs_desc = DelegatesTo('model')
can_test_stim = DelegatesTo('model')
test_stim = Button(label="Find Events")
plot_raw = Button(label="Plot Raw")
# Source Files
reset_dig = Button
# Visualization
scene = Instance(MlabSceneModel)
fid_obj = Instance(PointObject)
elp_obj = Instance(PointObject)
hsp_obj = Instance(PointObject)
# Output
save_as = Button(label='Save FIFF...')
clear_all = Button(label='Clear All')
queue = Instance(queue.Queue, ())
queue_feedback = Str('')
queue_current = Str('')
queue_len = Int(0)
queue_len_str = Property(Str, depends_on=['queue_len'])
error = Str('')
view = View(
VGroup(VGroup(Item('sqd_file', label="Data",
tooltip=tooltips['sqd_file']),
Item('sqd_fname', show_label=False, style='readonly'),
Item('hsp_file', label='Digitizer\nHead Shape',
tooltip=tooltips['hsp_file']),
Item('hsp_fname', show_label=False, style='readonly'),
Item('fid_file', label='Digitizer\nFiducials',
tooltip=tooltips['fid_file']),
Item('fid_fname', show_label=False, style='readonly'),
Item('reset_dig', label='Clear Digitizer Files',
show_label=False),
Item('use_mrk', editor=use_editor, style='custom',
tooltip=tooltips['use_mrk']),
label="Sources", show_border=True),
VGroup(Item('misc_chs_desc', label='MISC Channels',
style='readonly'),
Item('stim_slope', label="Event Onset", style='custom',
tooltip=tooltips['stim_slope'],
editor=EnumEditor(
values={'+': '2:Peak (0 to 5 V)',
'-': '1:Trough (5 to 0 V)'},
cols=2)),
Item('stim_coding', label="Value Coding", style='custom',
editor=EnumEditor(values={'>': '1:little-endian',
'<': '2:big-endian',
'channel': '3:Channel#'},
cols=3),
tooltip=tooltips["stim_coding"]),
Item('stim_chs', label='Channels', style='custom',
tooltip=tooltips["stim_chs"],
editor=TextEditor(evaluate_name='stim_chs_ok',
auto_set=True)),
Item('stim_chs_comment', label='Evaluation',
style='readonly', show_label=False),
Item('stim_threshold', label='Threshold',
tooltip=tooltips['stim_threshold']),
HGroup(Item('test_stim', enabled_when='can_test_stim',
show_label=False),
Item('plot_raw', enabled_when='can_test_stim',
show_label=False),
show_labels=False),
label='Events', show_border=True),
HGroup(Item('save_as', enabled_when='can_save'), spring,
'clear_all', show_labels=False),
Item('queue_feedback', show_label=False, style='readonly'),
Item('queue_current', show_label=False, style='readonly'),
Item('queue_len_str', show_label=False, style='readonly')
)
)
def __init__(self, *args, **kwargs): # noqa: D102
super(Kit2FiffPanel, self).__init__(*args, **kwargs)
# setup save worker
def worker(): # noqa: D102
while True:
raw, fname = self.queue.get()
basename = os.path.basename(fname)
self.queue_len -= 1
self.queue_current = 'Processing: %s' % basename
# task
try:
raw.save(fname, overwrite=True)
except Exception as err:
self.error = str(err)
res = "Error saving: %s"
else:
res = "Saved: %s"
# finalize
self.queue_current = ''
self.queue_feedback = res % basename
self.queue.task_done()
t = Thread(target=worker)
t.daemon = True
t.start()
# setup mayavi visualization
self.fid_obj = PointObject(scene=self.scene, color=(0.1, 1., 0.1),
point_scale=5e-3, name='Fiducials')
self._update_fid()
self.elp_obj = PointObject(scene=self.scene,
color=(0.196, 0.196, 0.863),
point_scale=1e-2, opacity=.2, name='ELP')
self._update_elp()
self.hsp_obj = PointObject(scene=self.scene, color=(0.784,) * 3,
point_scale=2e-3, name='HSP')
self._update_hsp()
self.scene.camera.parallel_scale = 0.15
self.scene.mlab.view(0, 0, .15)
@on_trait_change('model:fid,model:head_dev_trans')
def _update_fid(self):
if self.fid_obj is not None:
self.fid_obj.points = apply_trans(self.model.head_dev_trans,
self.model.fid)
@on_trait_change('model:hsp,model:head_dev_trans')
def _update_hsp(self):
if self.hsp_obj is not None:
self.hsp_obj.points = apply_trans(self.model.head_dev_trans,
self.model.hsp)
@on_trait_change('model:elp,model:head_dev_trans')
def _update_elp(self):
if self.elp_obj is not None:
self.elp_obj.points = apply_trans(self.model.head_dev_trans,
self.model.elp)
def _clear_all_fired(self):
self.model.clear_all()
@cached_property
def _get_queue_len_str(self):
if self.queue_len:
return "Queue length: %i" % self.queue_len
else:
return ''
def _plot_raw_fired(self):
self.model.raw.plot()
def _reset_dig_fired(self):
self.reset_traits(['hsp_file', 'fid_file'])
def _save_as_fired(self):
# create raw
try:
raw = self.model.get_raw()
except Exception as err:
error(None, str(err), "Error Creating KIT Raw")
raise
# find default path
stem, _ = os.path.splitext(self.sqd_file)
if not stem.endswith('raw'):
stem += '-raw'
default_path = stem + '.fif'
# save as dialog
dlg = FileDialog(action="save as",
wildcard="fiff raw file (*.fif)|*.fif",
default_path=default_path)
dlg.open()
if dlg.return_code != OK:
return
fname = dlg.path
if not fname.endswith('.fif'):
fname += '.fif'
if os.path.exists(fname):
answer = confirm(None, "The file %r already exists. Should it "
"be replaced?", "Overwrite File?")
if answer != YES:
return
self.queue.put((raw, fname))
self.queue_len += 1
def _test_stim_fired(self):
try:
events = self.model.get_event_info()
except Exception as err:
error(None, "Error reading events from SQD data file: %s (Check "
"the terminal output for details)" % str(err),
"Error Reading events from SQD file")
raise
if len(events) == 0:
information(None, "No events were found with the current "
"settings.", "No Events Found")
else:
lines = ["Events found (ID: n events):"]
for id_ in sorted(events):
lines.append("%3i: \t%i" % (id_, events[id_]))
information(None, '\n'.join(lines), "Events in SQD File")
class Kit2FiffFrame(HasTraits):
"""GUI for interpolating between two KIT marker files."""
model = Instance(Kit2FiffModel)
scene = Instance(MlabSceneModel, ())
headview = Instance(HeadViewController)
marker_panel = Instance(CombineMarkersPanel)
kit2fiff_panel = Instance(Kit2FiffPanel)
view = View(HGroup(VGroup(Item('marker_panel', style='custom'),
show_labels=False),
VGroup(Item('scene',
editor=SceneEditor(scene_class=MayaviScene),
dock='vertical', show_label=False),
VGroup(Item('headview', style='custom'),
show_labels=False),
),
VGroup(Item('kit2fiff_panel', style='custom'),
show_labels=False),
show_labels=False,
),
handler=Kit2FiffFrameHandler(),
height=700, resizable=True, buttons=NoButtons)
def __init__(self, *args, **kwargs): # noqa: D102
logger.debug(
"Initializing Kit2fiff-GUI with %s backend", ETSConfig.toolkit)
HasTraits.__init__(self, *args, **kwargs)
# can't be static method due to Traits
def _model_default(self):
# load configuration values and make sure they're valid
config = get_config(home_dir=os.environ.get('_MNE_FAKE_HOME_DIR'))
stim_threshold = 1.
if 'MNE_KIT2FIFF_STIM_CHANNEL_THRESHOLD' in config:
try:
stim_threshold = float(
config['MNE_KIT2FIFF_STIM_CHANNEL_THRESHOLD'])
except ValueError:
warn("Ignoring invalid configuration value for "
"MNE_KIT2FIFF_STIM_CHANNEL_THRESHOLD: %r (expected "
"float)" %
(config['MNE_KIT2FIFF_STIM_CHANNEL_THRESHOLD'],))
stim_slope = config.get('MNE_KIT2FIFF_STIM_CHANNEL_SLOPE', '-')
if stim_slope not in '+-':
warn("Ignoring invalid configuration value for "
"MNE_KIT2FIFF_STIM_CHANNEL_THRESHOLD: %s (expected + or -)" %
stim_slope)
stim_slope = '-'
stim_coding = config.get('MNE_KIT2FIFF_STIM_CHANNEL_CODING', '>')
if stim_coding not in ('<', '>', 'channel'):
warn("Ignoring invalid configuration value for "
"MNE_KIT2FIFF_STIM_CHANNEL_CODING: %s (expected <, > or "
"channel)" % stim_coding)
stim_coding = '>'
return Kit2FiffModel(
stim_chs=config.get('MNE_KIT2FIFF_STIM_CHANNELS', ''),
stim_coding=stim_coding,
stim_slope=stim_slope,
stim_threshold=stim_threshold,
show_gui=True)
def _headview_default(self):
return HeadViewController(scene=self.scene, scale=160, system='RAS')
def _kit2fiff_panel_default(self):
return Kit2FiffPanel(scene=self.scene, model=self.model)
def _marker_panel_default(self):
return CombineMarkersPanel(scene=self.scene, model=self.model.markers,
trans=als_ras_trans)
def save_config(self, home_dir=None):
"""Write configuration values."""
set_config('MNE_KIT2FIFF_STIM_CHANNELS', self.model.stim_chs, home_dir,
set_env=False)
set_config('MNE_KIT2FIFF_STIM_CHANNEL_CODING', self.model.stim_coding,
home_dir, set_env=False)
set_config('MNE_KIT2FIFF_STIM_CHANNEL_SLOPE', self.model.stim_slope,
home_dir, set_env=False)
set_config('MNE_KIT2FIFF_STIM_CHANNEL_THRESHOLD',
str(self.model.stim_threshold), home_dir, set_env=False)
|
olafhauk/mne-python
|
mne/gui/_kit2fiff_gui.py
|
Python
|
bsd-3-clause
| 28,812
|
[
"Mayavi"
] |
ef763681321d70bfc693025f82de315e59030ede6b9c2b52d5cd0116bf3c6f6a
|
from __future__ import print_function
__author__ = """Alex "O." Holcombe, Charles Ludowici, """ ## double-quotes will be silently removed, single quotes will be left, eg, O'Connor
import time, sys, platform, os
from math import atan, atan2, pi, cos, sin, sqrt, ceil, radians, degrees
import numpy as np
import psychopy, psychopy.info
import copy
from psychopy import visual, sound, monitors, logging, gui, event, core, data
try:
from helpersAOH import accelerateComputer, openMyStimWindow
except Exception as e:
print(e); print('Problem loading helpersAOH. Check that the file helpersAOH.py in the same directory as this file')
print('Current directory is ',os.getcwd())
eyeTracking = False
if eyeTracking:
try:
import eyelinkEyetrackerForPsychopySUPA3
except Exception as e:
print(e)
print('Problem loading eyelinkEyetrackerForPsychopySUPA3. Check that the file eyelinkEyetrackerForPsychopySUPA3.py in the same directory as this file')
print('While a different version of pylink might make your eyetracking code work, your code appears to generally be out of date. Rewrite your eyetracker code based on the SR website examples')
#Psychopy v1.83.01 broke this, pylink version prevents EyelinkEyetrackerForPsychopySUPA3 stuff from importing. But what really needs to be done is to change eyetracking code to more modern calls, as indicated on SR site
eyeTracking = False
expname= "dot-jump"
demo = False; exportImages = False
autopilot = False
subject='test'
###############################
### Setup the screen parameters ##############################################################################################
##
allowGUI = False
units='deg' #'cm'
fullscrn=False
waitBlank=False
if True: #just so I can indent all the below
refreshRate= 85 *1.0; #160 #set to the framerate of the monitor
fullscrn=True; #show in small window (0) or full screen (1)
scrn=True #which screen to display the stimuli. 0 is home screen, 1 is second screen
# create a dialog from dictionary
infoFirst = { 'Autopilot':autopilot, 'Check refresh etc':True, 'Use second screen':scrn, 'Fullscreen (timing errors if not)': fullscrn, 'Screen refresh rate': refreshRate }
OK = gui.DlgFromDict(dictionary=infoFirst,
title='MOT',
order=['Autopilot','Check refresh etc', 'Use second screen', 'Screen refresh rate', 'Fullscreen (timing errors if not)'],
tip={'Check refresh etc': 'To confirm refresh rate and that can keep up, at least when drawing a grating',
'Use second Screen': ''},
)
if not OK.OK:
print('User cancelled from dialog box'); logging.info('User cancelled from dialog box'); core.quit()
autopilot = infoFirst['Autopilot']
checkRefreshEtc = infoFirst['Check refresh etc']
scrn = infoFirst['Use second screen']
print('scrn = ',scrn, ' from dialog box')
fullscrn = infoFirst['Fullscreen (timing errors if not)']
refreshRate = infoFirst['Screen refresh rate']
#monitor parameters
widthPix = 1280 #1440 #monitor width in pixels
heightPix =1024 #900 #monitor height in pixels
monitorwidth = 40.5 #28.5 #monitor width in centimeters
viewdist = 55.; #cm
pixelperdegree = widthPix/ (atan(monitorwidth/viewdist) /np.pi*180)
bgColor = [-1,-1,-1] #black background
monitorname = 'testMonitor' # 'mitsubishi' #in psychopy Monitors Center
mon = monitors.Monitor(monitorname,width=monitorwidth, distance=viewdist)#fetch the most recent calib for this monitor
mon.setSizePix( (widthPix,heightPix) )
myWin = openMyStimWindow(mon,widthPix,heightPix,bgColor,allowGUI,units,fullscrn,scrn,waitBlank)
myWin.setRecordFrameIntervals(False)
trialsPerCondition = 2 #default value
refreshMsg2 = ''
if not checkRefreshEtc:
refreshMsg1 = 'REFRESH RATE WAS NOT CHECKED'
refreshRateWrong = False
else: #checkRefreshEtc
runInfo = psychopy.info.RunTimeInfo(
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=True, ## True means report on everything
userProcsDetailed=True ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
)
print('Finished runInfo- which assesses the refresh and processes of this computer')
refreshMsg1 = 'Median frames per second ='+ str( np.round(1000./runInfo["windowRefreshTimeMedian_ms"],1) )
refreshRateTolerancePct = 3
pctOff = abs( (1000./runInfo["windowRefreshTimeMedian_ms"]-refreshRate) / refreshRate)
refreshRateWrong = pctOff > (refreshRateTolerancePct/100.)
if refreshRateWrong:
refreshMsg1 += ' BUT'
refreshMsg1 += ' program assumes ' + str(refreshRate)
refreshMsg2 = 'which is off by more than' + str(round(refreshRateTolerancePct,0)) + '%!!'
else:
refreshMsg1 += ', which is close enough to desired val of ' + str( round(refreshRate,1) )
myWinRes = myWin.size
myWin.allowGUI =True
myWin.close() #have to close window to show dialog box
##
### END Setup of the screen parameters ##############################################################################################
####################################
askUserAndConfirmExpParams = True
if autopilot:
subject = 'autoTest'
###############################
### Ask user exp params ##############################################################################################
## askUserAndConfirmExpParams
if askUserAndConfirmExpParams:
dlgLabelsOrdered = list() #new dialog box
myDlg = gui.Dlg(title=expname, pos=(200,400))
if not autopilot:
myDlg.addField('Subject code :', subject)
dlgLabelsOrdered.append('subject')
else:
myDlg.addField('Subject code :', subject)
dlgLabelsOrdered.append('subject')
myDlg.addField('autoPilotTime:', 0, tip='Auto response time relative to cue')
myDlg.addField('randomTime:',False, tip = 'Add (rounded) gaussian N(0,2) error to time offset?')
myDlg.addField('autoPilotSpace:',0, tip='Auto response position relative to cue')
myDlg.addField('randomSpace:',False, tip = 'Add (rounded) gaussian N(0,2) error to space offset?')
dlgLabelsOrdered.append('autoPilotTime')
dlgLabelsOrdered.append('randomTime')
dlgLabelsOrdered.append('autoPilotSpace')
dlgLabelsOrdered.append('randomSpace')
myDlg.addField('Trials per condition (default=' + str(trialsPerCondition) + '):', trialsPerCondition, tip=str(trialsPerCondition))
dlgLabelsOrdered.append('trialsPerCondition')
pctCompletedBreak = 50
myDlg.addText(refreshMsg1, color='Black')
if refreshRateWrong:
myDlg.addText(refreshMsg2, color='Red')
msgWrongResolution = ''
if checkRefreshEtc and (not demo) and (myWinRes != [widthPix,heightPix]).any():
msgWrongResolution = 'Instead of desired resolution of '+ str(widthPix)+'x'+str(heightPix)+ ' pixels, screen apparently '+ str(myWinRes[0])+ 'x'+ str(myWinRes[1])
myDlg.addText(msgWrongResolution, color='Red')
print(msgWrongResolution); logging.info(msgWrongResolution)
myDlg.addText('Note: to abort press ESC at response time', color='DimGrey') #works in PsychoPy1.84
#myDlg.addText('Note: to abort press ESC at a trials response screen', color=[-1.,1.,-1.]) #color names not working for some pre-1.84 versions
myDlg.show()
if myDlg.OK: #unpack information from dialogue box
thisInfo = myDlg.data #this will be a list of data returned from each field added in order
name=thisInfo[dlgLabelsOrdered.index('subject')]
if len(name) > 0: #if entered something
subject = name #change subject default name to what user entered
trialsPerCondition = int( thisInfo[ dlgLabelsOrdered.index('trialsPerCondition') ] ) #convert string to integer
print('trialsPerCondition=',trialsPerCondition)
logging.info('trialsPerCondition ='+str(trialsPerCondition))
if autopilot:
autoSpace = thisInfo[dlgLabelsOrdered.index('autoPilotSpace')]
autoTime = thisInfo[dlgLabelsOrdered.index('autoPilotTime')]
randomTime = thisInfo[dlgLabelsOrdered.index('randomTime')]
randomSpace = thisInfo[dlgLabelsOrdered.index('randomSpace')]
else:
print('User cancelled from dialog box.'); logging.info('User cancelled from dialog box')
logging.flush()
core.quit()
### Ask user exp params
## END askUserAndConfirmExpParams ###############################
##############################################################################################
if os.path.isdir('.'+os.sep+'dataRaw'):
dataDir='dataRaw'
else:
msg= 'dataRaw directory does not exist, so saving data in present working directory'
print(msg); logging.info(msg)
dataDir='.'
timeAndDateStr = time.strftime("%d%b%Y_%H-%M", time.localtime())
fileNameWithPath = dataDir+os.sep+subject+ '_' + expname+timeAndDateStr
if not demo and not exportImages:
saveCodeCmd = 'cp \'' + sys.argv[0] + '\' '+ fileNameWithPath + '.py'
os.system(saveCodeCmd) #save a copy of the code as it was when that subject was run
logF = logging.LogFile(fileNameWithPath+'.log',
filemode='w',#if you set this to 'a' it will append instead of overwriting
level=logging.INFO)#info, data, warnings, and errors will be sent to this logfile
if demo or exportImages:
logging.console.setLevel(logging.ERROR) #only show this level's and higher messages
logging.console.setLevel(logging.WARNING) #DEBUG means set the console to receive nearly all messges, INFO is for everything else, INFO, EXP, DATA, WARNING and ERROR
if refreshRateWrong:
logging.error(refreshMsg1+refreshMsg2)
else: logging.info(refreshMsg1+refreshMsg2)
longerThanRefreshTolerance = 0.27
longFrameLimit = round(1000./refreshRate*(1.0+longerThanRefreshTolerance),3) # round(1000/refreshRate*1.5,2)
msg = 'longFrameLimit='+ str(longFrameLimit) +' Recording trials where one or more interframe interval exceeded this figure '
logging.info(msg); print(msg)
if msgWrongResolution != '':
logging.error(msgWrongResolution)
myWin = openMyStimWindow(mon,widthPix,heightPix,bgColor,allowGUI,units,fullscrn,scrn,waitBlank)
runInfo = psychopy.info.RunTimeInfo(
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=True, ## True means report on everything
userProcsDetailed=True ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
)
msg = 'second window opening runInfo mean ms='+ str( runInfo["windowRefreshTimeAvg_ms"] )
logging.info(msg); print(msg)
logging.info(runInfo)
logging.info('gammaGrid='+str(mon.getGammaGrid()))
logging.info('linearizeMethod='+str(mon.getLinearizeMethod()))
####Functions. Save time by automating processes like stimulus creation and ordering
############################################################################
def oneFrameOfStim(n, itemFrames, SOAFrames, cueFrames, cuePos, trialObjects):
cueFrame = cuePos * SOAFrames
cueMax = cueFrame + cueFrames
showIdx = int(np.floor(n/SOAFrames))
#objectIdxs = [i for i in range(len(trialObjects))]
#objectIdxs.append(len(trialObjects)-1) #AWFUL hack
#print(objectIdxs[showIdx])
#floored quotient
obj = trialObjects[showIdx]
drawObject = n%SOAFrames < itemFrames
if drawObject:
myWin.color = bgColor
if n >= cueFrame and n < cueMax:
#print('cueFrames! n is', n,'. cueFrame is ,', cueFrame, 'cueFrame + cueFrames is ', (cueFrame + cueFrames))
#if n%2 == 0: #This should make it flash, but it might be too fast
#print('cue flash')
#myWin.color = (0,0,0)
obj.draw()
cue.draw()
else:
obj.draw()
return True
#objects: Stimuli to display or
#cue: cue stimulus or stimuli
#timing parameters: Could be item duration, soa and isi. i.e. if SOA+Duration % n == 0: stimulus.setColor(stimulusColor)
#bgColor and stimulusColor: if displaying and hiding stimuli, i.e. for RSVP
#movementVector: direction and distance of movement if moving stimuli
def oneTrial(stimuli):
dotOrder = np.arange(len(stimuli))
np.random.shuffle(dotOrder)
print(dotOrder)
shuffledStimuli = [stimuli[i] for i in dotOrder]
ts = []
myWin.flip(); myWin.flip() #Make sure raster at top of screen (unless not in blocking mode), and give CPU a chance to finish other tasks
t0 = trialClock.getTime()
for n in range(trialFrames):
fixation.draw()
#print(n//SOAFrames)
oneFrameOfStim(n, itemFrames, SOAFrames, cueFrames, cuePos, shuffledStimuli)
myWin.flip()
ts.append(trialClock.getTime() - t0)
return True, shuffledStimuli, dotOrder, ts
def getResponse(trialStimuli):
if autopilot:
spacing = 360./nDots
autoResponseIdx = cuePos + autoTime #The serial position of the response in the stream
if randomTime:
autoResponseIdx += int(round( np.random.normal(0,2) ))
itemAtTemporalSelection = trialStimuli[autoResponseIdx]
unshuffledPositions = [dot.pos.tolist() for dot in stimuli]
itemSpatial = unshuffledPositions.index(itemAtTemporalSelection.pos.tolist())
itemSpatial = itemSpatial + autoSpace
if randomSpace:
itemSpatial += int(round( np.random.normal(0,2) ))
while itemSpatial>23:
itemSpatial = itemSpatial - 23
#Once we have temporal pos of selected item relative to start of the trial
#Need to get the serial spatial pos of this item, so that we can select items around it based on the autoSpace offset
#print('itemSpatial is: ', itemSpatial)
selectionTemporal = trialStimuli.index(stimuli[itemSpatial]) #This seems redundant, but it tests that the item we've selected in space is the cued item in time. if the temporal and spatial offsets are 0, it should be the same as cuePos.
accuracy = cuePos == selectionTemporal
mousePos = (stimuli[itemSpatial].pos[0],stimuli[itemSpatial].pos[1])
expStop = False
item = stimuli[itemSpatial]
return accuracy, item, expStop, mousePos
elif not autopilot:
myMouse = event.Mouse(visible = False,win=myWin)
responded = False
expStop = False
event.clearEvents()
mousePos = (1e6,1e6)
escape = event.getKeys()
myMouse.setPos((0,0))
myMouse.setVisible(True)
while not responded:
for item in trialStimuli:
item.draw()
myWin.flip()
button = myMouse.getPressed()
mousePos = myMouse.getPos()
escapeKey = event.getKeys()
if button[0]:
print('click detected')
responded = True
print('getResponse mousePos:',mousePos)
elif len(escapeKey)>0:
if escapeKey[0] == 'space' or escapeKey[0] == 'ESCAPE':
expStop = True
responded = True
return False, np.random.choice(trialStimuli), expStop, (0,0)
clickDistances = []
for item in trialStimuli:
x = mousePos[0] - item.pos[0]
y = mousePos[1] - item.pos[1]
distance = sqrt(x**2 + y**2)
clickDistances.append(distance)
if not expStop:
minDistanceIdx = clickDistances.index(min(clickDistances))
accuracy = minDistanceIdx == cuePos
item = trialStimuli[minDistanceIdx]
myMouse.setVisible(False)
return accuracy, item, expStop, mousePos
def drawStimuli(nDots, radius, center, stimulusObject, sameEachTime = True):
if len(center) > 2 or len(center) < 2:
print('Center coords must be list of length 2')
return None
if not sameEachTime and not isinstance(stimulusObject, (list, tuple)):
print('You want different objects in each position, but your stimuli is not a list or tuple')
return None
if not sameEachTime and isinstance(stimulusObject, (list, tuple)) and len(stimulusObject)!=nDots:
print('You want different objects in each position, but the number of positions does not equal the number of items')
return None
spacing = 360./nDots
stimuli = []
for dot in range(nDots): #have to specify positions for multiples of 90deg because python (computers in general?) can't store exact value of pi and thus cos(pi/2) = 6.123e-17, not 0
angle = dot*spacing
if angle == 0:
xpos = radius
ypos = 0
elif angle == 90:
xpos = 0
ypos = radius
elif angle == 180:
xpos = -radius
ypos = 0
elif angle == 270:
xpos = 0
ypos = -radius
elif angle%90!=0:
xpos = radius*cos(radians(angle))
ypos = radius*sin(radians(angle))
if sameEachTime:
stim = copy.copy(stimulusObject)
elif not sameEachTime:
stim = stimulusObject[dot]
stim.pos = (xpos,ypos)
stimuli.append(stim)
return stimuli
def checkTiming(ts):
interframeIntervals = np.diff(ts) * 1000
#print(interframeIntervals)
frameTimeTolerance=.3 #proportion longer than refreshRate that will not count as a miss
longFrameLimit = np.round(1000/refreshRate*(1.0+frameTimeTolerance),2)
idxsInterframeLong = np.where( interframeIntervals > longFrameLimit ) [0] #frames that exceeded 150% of expected duration
numCasesInterframeLong = len( idxsInterframeLong )
if numCasesInterframeLong > 0:
print(numCasesInterframeLong,'frames of', trialFrames,'were longer than',str(1000/refreshRate*(1.0+frameTimeTolerance)))
return numCasesInterframeLong
##Set up stimuli
stimulus = visual.Circle(myWin, radius = .2, fillColor = (1,1,1) )
nDots = 24
radius = 4
center = (0,0)
sameEachTime = True
#(nDots, radius, center, stimulusObject, sameEachTime = True)
stimuli = drawStimuli(nDots, radius, center, stimulus, sameEachTime)
#print(stimuli)
#print('length of stimuli object', len(stimuli))
######Create visual objects, noise masks, response prompts etc. ###########
######Draw your stimuli here if they don't change across trials, but other parameters do (like timing or distance)
######If you want to automate your stimuli. Do it in a function below and save clutter.
######For instance, maybe you want random pairs of letters. Write a function!
###########################################################################
fixSize = .1
fixation= visual.Circle(myWin, radius = fixSize , fillColor = (1,1,1), units=units)
cue = visual.Circle(myWin, radius = radius + 2, fillColor = None, lineColor = (1,1,1), units = units)
###Trial timing parameters
SOAMS = 12
itemMS = 12
ISIMS = SOAMS - itemMS
trialMS = SOAMS * nDots
cueMS = itemMS
SOAFrames = int(np.floor(SOAMS/(1000./refreshRate)))
itemFrames = int(np.floor(itemMS/(1000./refreshRate)))
ISIFrames = int(np.floor(ISIMS/(1000./refreshRate)))
trialFrames = int(nDots*SOAFrames)
cueFrames = int(np.floor(cueMS/(1000./refreshRate)))
print('cueFrames=',cueFrames)
print('itemFrames=',itemFrames)
print('refreshRate =', refreshRate)
print('cueMS from frames =', cueFrames*(1000./refreshRate))
print('num of SOAs in the trial:', trialFrames/SOAFrames)
##Factorial design
numResponsesPerTrial = 1 #default. Used to create headers for dataFile
stimList = []
#cuePositions = [dot for dot in range(nDots) if dot not in [0,nDots-1]]
cuePositions = [10]
print('cuePositions: ',cuePositions)
#cuePositions = cuePositions[2:(nDots-3)] #drop the first and final two dots
#Set up the factorial design (list of all conditions)
for cuePos in cuePositions:
stimList.append({'cuePos':cuePos})
trials = data.TrialHandler(stimList, nReps = trialsPerCondition)
#print(trials)
####Create output file###
#########################################################################
dataFile = open(fileNameWithPath + '.txt', 'w')
numResponsesPerTrial = 1
#headers for initial datafile rows, they don't get repeated. These appear in the file in the order they appear here.
oneOffHeaders = [
'subject',
'task',
'staircase',
'trialNum'
]
for header in oneOffHeaders:
print(header, '\t', end='', file=dataFile)
#Headers for duplicated datafile rows. These are repeated using numResponsesPerTrial. For instance, we might have two responses in a trial.
duplicatedHeaders = [
'responseSpatialPos',
'responseX',
'responseY',
'correctX',
'correctY',
'clickX',
'clickY',
'accuracy',
'responsePosInStream',
'correctPosInStream'
]
if numResponsesPerTrial == 1:
for header in duplicatedHeaders:
print(header, '\t', end='', file=dataFile)
elif numResponsesPerTrial > 1:
for response in range(numResponsesPerTrial):
for header in duplicatedHeaders:
print(header+str(response), '\t', end='', file=dataFile)
for pos in range(nDots):
print('position'+str(pos),'\t',end='',file=dataFile)
#Headers done. Do a new line
print('longFrames',file=dataFile)
expStop = False
trialNum=0; numTrialsCorrect=0; expStop=False; framesSaved=0;
print('Starting experiment of',trials.nTotal,'trials. Current trial is trial ',trialNum)
#NextRemindCountText.setText( str(trialNum) + ' of ' + str(trials.nTotal) )
#NextRemindCountText.draw()
myWin.flip()
#end of header
trialClock = core.Clock()
stimClock = core.Clock()
if eyeTracking:
if getEyeTrackingFileFromEyetrackingMachineAtEndOfExperiment:
eyeMoveFile=('EyeTrack_'+subject+'_'+timeAndDateStr+'.EDF')
tracker=Tracker_EyeLink(myWin,trialClock,subject,1, 'HV5',(255,255,255),(0,0,0),False,(widthPix,heightPix))
while trialNum < trials.nTotal and expStop==False:
fixation.draw()
myWin.flip()
if not autopilot:
core.wait(1)
trial = trials.next()
# print('trial idx is',trials.thisIndex)
cuePos = trial.cuePos
# print(cuePos)
print("Doing trialNum",trialNum)
trialDone, trialStimuli, trialStimuliOrder, ts = oneTrial(stimuli)
#Shift positions so that the list starts at 1, which is positioned at (0,radius), and increases clockwise. This is what the MM code expects
MMPositions = list() #Mixture modelling positions
for dotPos in trialStimuliOrder:
if dotPos < (nDots/4): #Because python indexes start at 0, 5 is the 6th pos.
MMPositions.append(dotPos + 20)
elif dotPos >= (nDots/4):
MMPositions.append(dotPos -4)
nBlips = checkTiming(ts)
# print(trialStimuliOrder)
if trialDone:
accuracy, response, expStop, clickPos = getResponse(trialStimuli)
responseCoord = response.pos.tolist()
spatialRelativeToXAxis = [item.pos.tolist() for item in stimuli]
try:
responseSpatialRelativeToXAxis = spatialRelativeToXAxis.index(responseCoord)
except ValueError:
print('coord not in list')
if responseSpatialRelativeToXAxis < (nDots/4):
responseSpatial = responseSpatialRelativeToXAxis + 20
elif responseSpatialRelativeToXAxis >= (nDots/4):
responseSpatial = responseSpatialRelativeToXAxis - 4
trialPositions = [item.pos.tolist() for item in trialStimuli]
responseTemporal = trialPositions.index(responseCoord)
# print('trial positions in sequence:',trialPositions)
# print('position of item nearest to click:',responseSpatial)
# print('Position in sequence of item nearest to click:',responseTemporal)
correctSpatial = trialStimuli[cuePos].pos
correctTemporal = cuePos
print(subject,'\t',
'dot-jump','\t',
'False','\t',
trialNum,'\t',
responseSpatial,'\t',
responseCoord[0],'\t',
responseCoord[1],'\t',
correctSpatial[0],'\t',
correctSpatial[1],'\t',
clickPos[0],'\t',
clickPos[1],'\t',
accuracy,'\t',
responseTemporal,'\t',
correctTemporal,'\t',
end='',
file = dataFile
)
for dot in range(nDots):
print(MMPositions[dot], '\t',end='', file=dataFile)
print(nBlips, file=dataFile)
trialNum += 1
dataFile.flush()
if expStop:
print('Participant cancelled experiment on trial', trialNum)
dataFile.flush()
|
alexholcombe/dot-jump
|
dataRaw/Fixed Cue/test_dot-jump25Oct2016_11-18.py
|
Python
|
gpl-3.0
| 25,110
|
[
"Gaussian"
] |
bf1976f0faca87a05999bf287fe09c7154972287a7f54603aa88b81d087c0f65
|
"""
weatherBot keys
Copyright 2015-2019 Brian Mitchell under the MIT license
See the GitHub repository: https://github.com/BrianMitchL/weatherBot
"""
import os
KEYS = {
'consumer_key': 'xxx',
'consumer_secret': 'xxx',
'access_token': 'xxx',
'access_token_secret': 'xxx',
'darksky_key': 'xxx'
}
def set_twitter_env_vars():
"""
If any of the Twitter environmental variables are not set, set them based on the keys dict
"""
if os.getenv('WEATHERBOT_CONSUMER_KEY') is None or os.getenv('WEATHERBOT_CONSUMER_SECRET') is None \
or os.getenv('WEATHERBOT_ACCESS_TOKEN') is None or os.getenv('WEATHERBOT_ACCESS_TOKEN_SECRET') is None:
os.environ['WEATHERBOT_CONSUMER_KEY'] = KEYS['consumer_key']
os.environ['WEATHERBOT_CONSUMER_SECRET'] = KEYS['consumer_secret']
os.environ['WEATHERBOT_ACCESS_TOKEN'] = KEYS['access_token']
os.environ['WEATHERBOT_ACCESS_TOKEN_SECRET'] = KEYS['access_token_secret']
def set_darksky_env_vars():
"""
If no Dark Sky environmental variable is set, set it based on the keys dict
"""
if os.getenv('WEATHERBOT_DARKSKY_KEY') is None:
os.environ['WEATHERBOT_DARKSKY_KEY'] = KEYS['darksky_key']
|
bman4789/weatherBot
|
keys.py
|
Python
|
mit
| 1,221
|
[
"Brian"
] |
31f748995cfa77d8a4f5981dcac60ea5ab547fc68926aab9cbf09b8e9248529b
|
import simtk.openmm.app.element as elem
class Element(elem.Element):
"""An Element represents a chemical element.
The simtk.openmm.app.element module contains objects for all the standard chemical elements,
such as element.hydrogen or element.carbon. You can also call the static method Element.getBySymbol() to
look up the Element with a particular chemical symbol.
Element objects should be considered immutable
"""
def __init__(self, number, name, symbol, mass):
"""Create a new element
Parameters
----------
number : int
The atomic number of the element
name : string
The name of the element
symbol : string
The chemical symbol of the element
mass : float
The atomic mass of the element
"""
## The atomic number of the element
self._atomic_number = number
## The name of the element
self._name = name
## The chemical symbol of the element
self._symbol = symbol
## The atomic mass of the element
self._mass = mass
# Index this element in a global table
s = symbol.strip().upper()
## If we add a new element, we need to re-hash elements by mass
Element._elements_by_mass = None
if s in Element._elements_by_symbol:
raise ValueError('Duplicate element symbol %s' % s)
|
ctk3b/foyer
|
foyer/element.py
|
Python
|
mit
| 1,431
|
[
"OpenMM"
] |
3e2bfaa2dd02d0f9dc255373a13c3e15c64a2a59afe7bcb745d1f7a15859018e
|
from django.db import connection
from django.template import RequestContext, loader
from django.utils.html import mark_safe
from django.shortcuts import render_to_response
from django.core import urlresolvers
from django.http import HttpResponseNotFound
from zerver.decorator import has_request_variables, REQ, zulip_internal
from zerver.models import get_realm, UserActivity, UserActivityInterval, Realm
from zerver.lib.timestamp import timestamp_to_datetime
from collections import defaultdict
from datetime import datetime, timedelta
import itertools
import time
import re
import pytz
eastern_tz = pytz.timezone('US/Eastern')
def make_table(title, cols, rows, has_row_class=False):
if not has_row_class:
def fix_row(row):
return dict(cells=row, row_class=None)
rows = map(fix_row, rows)
data = dict(title=title, cols=cols, rows=rows)
content = loader.render_to_string(
'analytics/ad_hoc_query.html',
dict(data=data)
)
return content
def dictfetchall(cursor):
"Returns all rows from a cursor as a dict"
desc = cursor.description
return [
dict(zip([col[0] for col in desc], row))
for row in cursor.fetchall()
]
def get_realm_day_counts():
query = '''
select
r.domain,
(now()::date - pub_date::date) age,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
join zerver_client c on c.id = m.sending_client_id
where
(not up.is_bot)
and
pub_date > now()::date - interval '8 day'
and
c.name not in ('zephyr_mirror', 'ZulipMonitoring')
group by
r.domain,
age
order by
r.domain,
age
'''
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
counts = defaultdict(dict)
for row in rows:
counts[row['domain']][row['age']] = row['cnt']
result = {}
for domain in counts:
cnts = [counts[domain].get(age, 0) for age in range(8)]
min_cnt = min(cnts)
max_cnt = max(cnts)
def format_count(cnt):
if cnt == min_cnt:
good_bad = 'bad'
elif cnt == max_cnt:
good_bad = 'good'
else:
good_bad = 'neutral'
return '<td class="number %s">%s</td>' % (good_bad, cnt)
cnts = ''.join(map(format_count, cnts))
result[domain] = dict(cnts=cnts)
return result
def realm_summary_table(realm_minutes):
query = '''
SELECT
realm.domain,
coalesce(user_counts.active_user_count, 0) active_user_count,
coalesce(at_risk_counts.at_risk_count, 0) at_risk_count,
(
SELECT
count(*)
FROM zerver_userprofile up
WHERE up.realm_id = realm.id
AND is_active
AND not is_bot
) user_profile_count,
(
SELECT
count(*)
FROM zerver_userprofile up
WHERE up.realm_id = realm.id
AND is_active
AND is_bot
) bot_count
FROM zerver_realm realm
LEFT OUTER JOIN
(
SELECT
up.realm_id realm_id,
count(distinct(ua.user_profile_id)) active_user_count
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
WHERE
query in (
'/json/send_message',
'send_message_backend',
'/api/v1/send_message',
'/json/update_pointer'
)
AND
last_visit > now() - interval '1 day'
AND
not is_bot
GROUP BY realm_id
) user_counts
ON user_counts.realm_id = realm.id
LEFT OUTER JOIN
(
SELECT
realm_id,
count(*) at_risk_count
FROM (
SELECT
realm.id as realm_id,
up.email
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
JOIN zerver_realm realm
ON realm.id = up.realm_id
WHERE up.is_active
AND (not up.is_bot)
AND
ua.query in (
'/json/send_message',
'send_message_backend',
'/api/v1/send_message',
'/json/update_pointer'
)
GROUP by realm.id, up.email
HAVING max(last_visit) between
now() - interval '7 day' and
now() - interval '1 day'
) as at_risk_users
GROUP BY realm_id
) at_risk_counts
ON at_risk_counts.realm_id = realm.id
WHERE EXISTS (
SELECT *
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
WHERE
query in (
'/json/send_message',
'/api/v1/send_message',
'send_message_backend',
'/json/update_pointer'
)
AND
up.realm_id = realm.id
AND
last_visit > now() - interval '2 week'
)
ORDER BY active_user_count DESC, domain ASC
'''
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
# get messages sent per day
counts = get_realm_day_counts()
for row in rows:
try:
row['history'] = counts[row['domain']]['cnts']
except:
row['history'] = ''
# augment data with realm_minutes
total_hours = 0
for row in rows:
domain = row['domain']
minutes = realm_minutes.get(domain, 0)
hours = minutes / 60.0
total_hours += hours
row['hours'] = str(int(hours))
try:
row['hours_per_user'] = '%.1f' % (hours / row['active_user_count'],)
except:
pass
# formatting
for row in rows:
row['domain'] = realm_activity_link(row['domain'])
# Count active sites
def meets_goal(row):
return row['active_user_count'] >= 5
num_active_sites = len(filter(meets_goal, rows))
# create totals
total_active_user_count = 0
total_user_profile_count = 0
total_bot_count = 0
for row in rows:
total_active_user_count += int(row['active_user_count'])
total_user_profile_count += int(row['user_profile_count'])
total_bot_count += int(row['bot_count'])
rows.append(dict(
domain='Total',
active_user_count=total_active_user_count,
user_profile_count=total_user_profile_count,
bot_count=total_bot_count,
hours=int(total_hours)
))
content = loader.render_to_string(
'analytics/realm_summary_table.html',
dict(rows=rows, num_active_sites=num_active_sites)
)
return content
def user_activity_intervals():
day_end = timestamp_to_datetime(time.time())
day_start = day_end - timedelta(hours=24)
output = "Per-user online duration for the last 24 hours:\n"
total_duration = timedelta(0)
all_intervals = UserActivityInterval.objects.filter(
end__gte=day_start,
start__lte=day_end
).select_related(
'user_profile',
'user_profile__realm'
).only(
'start',
'end',
'user_profile__email',
'user_profile__realm__domain'
).order_by(
'user_profile__realm__domain',
'user_profile__email'
)
by_domain = lambda row: row.user_profile.realm.domain
by_email = lambda row: row.user_profile.email
realm_minutes = {}
for domain, realm_intervals in itertools.groupby(all_intervals, by_domain):
realm_duration = timedelta(0)
output += '<hr>%s\n' % (domain,)
for email, intervals in itertools.groupby(realm_intervals, by_email):
duration = timedelta(0)
for interval in intervals:
start = max(day_start, interval.start)
end = min(day_end, interval.end)
duration += end - start
total_duration += duration
realm_duration += duration
output += " %-*s%s\n" % (37, email, duration, )
realm_minutes[domain] = realm_duration.total_seconds() / 60
output += "\nTotal Duration: %s\n" % (total_duration,)
output += "\nTotal Duration in minutes: %s\n" % (total_duration.total_seconds() / 60.,)
output += "Total Duration amortized to a month: %s" % (total_duration.total_seconds() * 30. / 60.,)
content = mark_safe('<pre>' + output + '</pre>')
return content, realm_minutes
def sent_messages_report(realm):
title = 'Recently sent messages for ' + realm
cols = [
'Date',
'Humans',
'Bots'
]
query = '''
select
series.day::date,
humans.cnt,
bots.cnt
from (
select generate_series(
(now()::date - interval '2 week'),
now()::date,
interval '1 day'
) as day
) as series
left join (
select
pub_date::date pub_date,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.domain = %s
and
(not up.is_bot)
and
pub_date > now() - interval '2 week'
group by
pub_date::date
order by
pub_date::date
) humans on
series.day = humans.pub_date
left join (
select
pub_date::date pub_date,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.domain = %s
and
up.is_bot
and
pub_date > now() - interval '2 week'
group by
pub_date::date
order by
pub_date::date
) bots on
series.day = bots.pub_date
'''
cursor = connection.cursor()
cursor.execute(query, [realm, realm])
rows = cursor.fetchall()
cursor.close()
return make_table(title, cols, rows)
def ad_hoc_queries():
def get_page(query, cols, title):
cursor = connection.cursor()
cursor.execute(query)
rows = cursor.fetchall()
rows = map(list, rows)
cursor.close()
def fix_rows(i, fixup_func):
for row in rows:
row[i] = fixup_func(row[i])
for i, col in enumerate(cols):
if col == 'Domain':
fix_rows(i, realm_activity_link)
elif col in ['Last time', 'Last visit']:
fix_rows(i, format_date_for_activity_reports)
content = make_table(title, cols, rows)
return dict(
content=content,
title=title
)
pages = []
###
for mobile_type in ['Android', 'ZulipiOS']:
title = '%s usage' % (mobile_type,)
query = '''
select
realm.domain,
up.id user_id,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like '%s'
group by domain, up.id, client.name
having max(last_visit) > now() - interval '2 week'
order by domain, up.id, client.name
''' % (mobile_type,)
cols = [
'Domain',
'User id',
'Name',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Desktop users'
query = '''
select
realm.domain,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like 'desktop%%'
group by domain, client.name
having max(last_visit) > now() - interval '2 week'
order by domain, client.name
'''
cols = [
'Domain',
'Client',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Integrations by domain'
query = '''
select
realm.domain,
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by domain, client_name
having max(last_visit) > now() - interval '2 week'
order by domain, client_name
'''
cols = [
'Domain',
'Client',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Integrations by client'
query = '''
select
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
realm.domain,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by client_name, domain
having max(last_visit) > now() - interval '2 week'
order by client_name, domain
'''
cols = [
'Client',
'Domain',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
return pages
@zulip_internal
@has_request_variables
def get_activity(request):
duration_content, realm_minutes = user_activity_intervals()
counts_content = realm_summary_table(realm_minutes)
data = [
('Counts', counts_content),
('Durations', duration_content),
]
for page in ad_hoc_queries():
data.append((page['title'], page['content']))
title = 'Activity'
return render_to_response(
'analytics/activity.html',
dict(data=data, title=title, is_home=True),
context_instance=RequestContext(request)
)
def get_user_activity_records_for_realm(realm, is_bot):
fields = [
'user_profile__full_name',
'user_profile__email',
'query',
'client__name',
'count',
'last_visit',
]
records = UserActivity.objects.filter(
user_profile__realm__domain=realm,
user_profile__is_active=True,
user_profile__is_bot=is_bot
)
records = records.order_by("user_profile__email", "-last_visit")
records = records.select_related('user_profile', 'client').only(*fields)
return records
def get_user_activity_records_for_email(email):
fields = [
'user_profile__full_name',
'query',
'client__name',
'count',
'last_visit'
]
records = UserActivity.objects.filter(
user_profile__email=email
)
records = records.order_by("-last_visit")
records = records.select_related('user_profile', 'client').only(*fields)
return records
def raw_user_activity_table(records):
cols = [
'query',
'client',
'count',
'last_visit'
]
def row(record):
return [
record.query,
record.client.name,
record.count,
format_date_for_activity_reports(record.last_visit)
]
rows = map(row, records)
title = 'Raw Data'
return make_table(title, cols, rows)
def get_user_activity_summary(records):
summary = {}
def update(action, record):
if action not in summary:
summary[action] = dict(
count=record.count,
last_visit=record.last_visit
)
else:
summary[action]['count'] += record.count
summary[action]['last_visit'] = max(
summary[action]['last_visit'],
record.last_visit
)
if records:
summary['name'] = records[0].user_profile.full_name
for record in records:
client = record.client.name
query = record.query
update('use', record)
if client == 'API':
m = re.match('/api/.*/external/(.*)', query)
if m:
client = m.group(1)
update(client, record)
if client.startswith('desktop'):
update('desktop', record)
if client == 'website':
update('website', record)
if ('send_message' in query) or re.search('/api/.*/external/.*', query):
update('send', record)
if query in ['/json/update_pointer', '/api/v1/update_pointer']:
update('pointer', record)
update(client, record)
return summary
def format_date_for_activity_reports(date):
if date:
return date.astimezone(eastern_tz).strftime('%Y-%m-%d %H:%M')
else:
return ''
def user_activity_link(email):
url_name = 'analytics.views.get_user_activity'
url = urlresolvers.reverse(url_name, kwargs=dict(email=email))
email_link = '<a href="%s">%s</a>' % (url, email)
return mark_safe(email_link)
def realm_activity_link(realm):
url_name = 'analytics.views.get_realm_activity'
url = urlresolvers.reverse(url_name, kwargs=dict(realm=realm))
realm_link = '<a href="%s">%s</a>' % (url, realm)
return mark_safe(realm_link)
def realm_client_table(user_summaries):
exclude_keys = [
'internal',
'name',
'use',
'send',
'pointer',
'website',
'desktop',
]
rows = []
for email, user_summary in user_summaries.items():
email_link = user_activity_link(email)
name = user_summary['name']
for k, v in user_summary.items():
if k in exclude_keys:
continue
client = k
count = v['count']
last_visit = v['last_visit']
row = [
format_date_for_activity_reports(last_visit),
client,
name,
email_link,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
'Last visit',
'Client',
'Name',
'Email',
'Count',
]
title = 'Clients'
return make_table(title, cols, rows)
def user_activity_summary_table(user_summary):
rows = []
for k, v in user_summary.items():
if k == 'name':
continue
client = k
count = v['count']
last_visit = v['last_visit']
row = [
format_date_for_activity_reports(last_visit),
client,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
'last_visit',
'client',
'count',
]
title = 'User Activity'
return make_table(title, cols, rows)
def realm_user_summary_table(all_records, admin_emails):
user_records = {}
def by_email(record):
return record.user_profile.email
for email, records in itertools.groupby(all_records, by_email):
user_records[email] = get_user_activity_summary(list(records))
def get_last_visit(user_summary, k):
if k in user_summary:
return user_summary[k]['last_visit']
else:
return None
def get_count(user_summary, k):
if k in user_summary:
return user_summary[k]['count']
else:
return ''
def is_recent(val):
age = datetime.now(val.tzinfo) - val
return age.total_seconds() < 5 * 60
rows = []
for email, user_summary in user_records.items():
email_link = user_activity_link(email)
sent_count = get_count(user_summary, 'send')
cells = [user_summary['name'], email_link, sent_count]
row_class = ''
for field in ['use', 'send', 'pointer', 'desktop', 'ZulipiOS', 'Android']:
val = get_last_visit(user_summary, field)
if field == 'use':
if val and is_recent(val):
row_class += ' recently_active'
if email in admin_emails:
row_class += ' admin'
val = format_date_for_activity_reports(val)
cells.append(val)
row = dict(cells=cells, row_class=row_class)
rows.append(row)
def by_used_time(row):
return row['cells'][3]
rows = sorted(rows, key=by_used_time, reverse=True)
cols = [
'Name',
'Email',
'Total sent',
'Heard from',
'Message sent',
'Pointer motion',
'Desktop',
'ZulipiOS',
'Android'
]
title = 'Summary'
content = make_table(title, cols, rows, has_row_class=True)
return user_records, content
@zulip_internal
def get_realm_activity(request, realm):
data = []
all_records = {}
all_user_records = {}
try:
admins = Realm.objects.get(domain=realm).get_admin_users()
except Realm.DoesNotExist:
return HttpResponseNotFound("Realm %s does not exist" % (realm,))
admin_emails = {admin.email for admin in admins}
for is_bot, page_title in [(False, 'Humans'), (True, 'Bots')]:
all_records = get_user_activity_records_for_realm(realm, is_bot)
all_records = list(all_records)
user_records, content = realm_user_summary_table(all_records, admin_emails)
all_user_records.update(user_records)
data += [(page_title, content)]
page_title = 'Clients'
content = realm_client_table(all_user_records)
data += [(page_title, content)]
page_title = 'History'
content = sent_messages_report(realm)
data += [(page_title, content)]
fix_name = lambda realm: realm.replace('.', '_')
realm_link = 'https://stats1.zulip.net:444/render/?from=-7days'
realm_link += '&target=stats.gauges.staging.users.active.%s.0_16hr' % (fix_name(realm),)
title = realm
return render_to_response(
'analytics/activity.html',
dict(data=data, realm_link=realm_link, title=title),
context_instance=RequestContext(request)
)
@zulip_internal
def get_user_activity(request, email):
records = get_user_activity_records_for_email(email)
data = []
user_summary = get_user_activity_summary(records)
content = user_activity_summary_table(user_summary)
data += [('Summary', content)]
content = raw_user_activity_table(records)
data += [('Info', content)]
title = email
return render_to_response(
'analytics/activity.html',
dict(data=data, title=title),
context_instance=RequestContext(request)
)
|
JanzTam/zulip
|
analytics/views.py
|
Python
|
apache-2.0
| 25,440
|
[
"VisIt"
] |
de23814dd9fa70dc1ccd6cb468d1d1d5470cda14b2d123c7ad9977e2cbf22b3a
|
from collections import defaultdict
from .utils_test import add, inc # noqa: F401
no_default = "__no_default__"
def ishashable(x):
"""Is x hashable?
Examples
--------
>>> ishashable(1)
True
>>> ishashable([1])
False
"""
try:
hash(x)
return True
except TypeError:
return False
def istask(x):
"""Is x a runnable task?
A task is a tuple with a callable first argument
Examples
--------
>>> inc = lambda x: x + 1
>>> istask((inc, 1))
True
>>> istask(1)
False
"""
return type(x) is tuple and x and callable(x[0])
def has_tasks(dsk, x):
"""Whether ``x`` has anything to compute.
Returns True if:
- ``x`` is a task
- ``x`` is a key in ``dsk``
- ``x`` is a list that contains any tasks or keys
"""
if istask(x):
return True
try:
if x in dsk:
return True
except Exception:
pass
if isinstance(x, list):
for i in x:
if has_tasks(dsk, i):
return True
return False
def preorder_traversal(task):
"""A generator to preorder-traverse a task."""
for item in task:
if istask(item):
yield from preorder_traversal(item)
elif isinstance(item, list):
yield list
yield from preorder_traversal(item)
else:
yield item
def lists_to_tuples(res, keys):
if isinstance(keys, list):
return tuple(lists_to_tuples(r, k) for r, k in zip(res, keys))
return res
def _execute_task(arg, cache, dsk=None):
"""Do the actual work of collecting data and executing a function
Examples
--------
>>> cache = {'x': 1, 'y': 2}
Compute tasks against a cache
>>> _execute_task((add, 'x', 1), cache) # Compute task in naive manner
2
>>> _execute_task((add, (inc, 'x'), 1), cache) # Support nested computation
3
Also grab data from cache
>>> _execute_task('x', cache)
1
Support nested lists
>>> list(_execute_task(['x', 'y'], cache))
[1, 2]
>>> list(map(list, _execute_task([['x', 'y'], ['y', 'x']], cache)))
[[1, 2], [2, 1]]
>>> _execute_task('foo', cache) # Passes through on non-keys
'foo'
"""
if isinstance(arg, list):
return [_execute_task(a, cache) for a in arg]
elif istask(arg):
func, args = arg[0], arg[1:]
# Note: Don't assign the subtask results to a variable. numpy detects
# temporaries by their reference count and can execute certain
# operations in-place.
return func(*(_execute_task(a, cache) for a in args))
elif not ishashable(arg):
return arg
elif arg in cache:
return cache[arg]
else:
return arg
def get(dsk, out, cache=None):
"""Get value from Dask
Examples
--------
>>> inc = lambda x: x + 1
>>> d = {'x': 1, 'y': (inc, 'x')}
>>> get(d, 'x')
1
>>> get(d, 'y')
2
"""
for k in flatten(out) if isinstance(out, list) else [out]:
if k not in dsk:
raise KeyError(f"{k} is not a key in the graph")
if cache is None:
cache = {}
for key in toposort(dsk):
task = dsk[key]
result = _execute_task(task, cache)
cache[key] = result
result = _execute_task(out, cache)
if isinstance(out, list):
result = lists_to_tuples(result, out)
return result
def keys_in_tasks(keys, tasks, as_list=False):
"""Returns the keys in `keys` that are also in `tasks`
Examples
--------
>>> dsk = {'x': 1,
... 'y': (inc, 'x'),
... 'z': (add, 'x', 'y'),
... 'w': (inc, 'z'),
... 'a': (add, (inc, 'x'), 1)}
>>> keys_in_tasks(dsk, ['x', 'y', 'j']) # doctest: +SKIP
{'x', 'y'}
"""
ret = []
while tasks:
work = []
for w in tasks:
typ = type(w)
if typ is tuple and w and callable(w[0]): # istask(w)
work.extend(w[1:])
elif typ is list:
work.extend(w)
elif typ is dict:
work.extend(w.values())
else:
try:
if w in keys:
ret.append(w)
except TypeError: # not hashable
pass
tasks = work
return ret if as_list else set(ret)
def find_all_possible_keys(tasks) -> set:
"""Returns all possible keys in `tasks` including hashable literals.
The definition of a key in a Dask graph is any hashable object
that is not a task. This function returns all such objects in
`tasks` even if the object is in fact a literal.
"""
ret = set()
while tasks:
work = []
for w in tasks:
typ = type(w)
if typ is tuple and w and callable(w[0]): # istask(w)
work.extend(w[1:])
elif typ is list:
work.extend(w)
elif typ is dict:
work.extend(w.values())
else:
try:
ret.add(w)
except TypeError: # not hashable
pass
tasks = work
return ret
def get_dependencies(dsk, key=None, task=no_default, as_list=False):
"""Get the immediate tasks on which this task depends
Examples
--------
>>> dsk = {'x': 1,
... 'y': (inc, 'x'),
... 'z': (add, 'x', 'y'),
... 'w': (inc, 'z'),
... 'a': (add, (inc, 'x'), 1)}
>>> get_dependencies(dsk, 'x')
set()
>>> get_dependencies(dsk, 'y')
{'x'}
>>> get_dependencies(dsk, 'z') # doctest: +SKIP
{'x', 'y'}
>>> get_dependencies(dsk, 'w') # Only direct dependencies
{'z'}
>>> get_dependencies(dsk, 'a') # Ignore non-keys
{'x'}
>>> get_dependencies(dsk, task=(inc, 'x')) # provide tasks directly
{'x'}
"""
if key is not None:
arg = dsk[key]
elif task is not no_default:
arg = task
else:
raise ValueError("Provide either key or task")
return keys_in_tasks(dsk, [arg], as_list=as_list)
def get_deps(dsk):
"""Get dependencies and dependents from dask dask graph
>>> dsk = {'a': 1, 'b': (inc, 'a'), 'c': (inc, 'b')}
>>> dependencies, dependents = get_deps(dsk)
>>> dependencies
{'a': set(), 'b': {'a'}, 'c': {'b'}}
>>> dependents # doctest: +SKIP
{'a': {'b'}, 'b': {'c'}, 'c': set()}
"""
dependencies = {k: get_dependencies(dsk, task=v) for k, v in dsk.items()}
dependents = reverse_dict(dependencies)
return dependencies, dependents
def flatten(seq, container=list):
"""
>>> list(flatten([1]))
[1]
>>> list(flatten([[1, 2], [1, 2]]))
[1, 2, 1, 2]
>>> list(flatten([[[1], [2]], [[1], [2]]]))
[1, 2, 1, 2]
>>> list(flatten(((1, 2), (1, 2)))) # Don't flatten tuples
[(1, 2), (1, 2)]
>>> list(flatten((1, 2, [3, 4]))) # support heterogeneous
[1, 2, 3, 4]
"""
if isinstance(seq, str):
yield seq
else:
for item in seq:
if isinstance(item, container):
yield from flatten(item, container=container)
else:
yield item
def reverse_dict(d):
"""
>>> a, b, c = 'abc'
>>> d = {a: [b, c], b: [c]}
>>> reverse_dict(d) # doctest: +SKIP
{'a': set([]), 'b': set(['a']}, 'c': set(['a', 'b'])}
"""
result = defaultdict(set)
_add = set.add
for k, vals in d.items():
result[k]
for val in vals:
_add(result[val], k)
result.default_factory = None
return result
def subs(task, key, val):
"""Perform a substitution on a task
Examples
--------
>>> subs((inc, 'x'), 'x', 1) # doctest: +ELLIPSIS
(<function inc at ...>, 1)
"""
type_task = type(task)
if not (type_task is tuple and task and callable(task[0])): # istask(task):
try:
if type_task is type(key) and task == key:
return val
except Exception:
pass
if type_task is list:
return [subs(x, key, val) for x in task]
return task
newargs = []
hash_key = {key}
for arg in task[1:]:
type_arg = type(arg)
if type_arg is tuple and arg and callable(arg[0]): # istask(task):
arg = subs(arg, key, val)
elif type_arg is list:
arg = [subs(x, key, val) for x in arg]
else:
try:
if arg in hash_key: # Hash and equality match
arg = val
except TypeError: # not hashable
pass
newargs.append(arg)
return task[:1] + tuple(newargs)
def _toposort(dsk, keys=None, returncycle=False, dependencies=None):
# Stack-based depth-first search traversal. This is based on Tarjan's
# method for topological sorting (see wikipedia for pseudocode)
if keys is None:
keys = dsk
elif not isinstance(keys, list):
keys = [keys]
if not returncycle:
ordered = []
# Nodes whose descendents have been completely explored.
# These nodes are guaranteed to not be part of a cycle.
completed = set()
# All nodes that have been visited in the current traversal. Because
# we are doing depth-first search, going "deeper" should never result
# in visiting a node that has already been seen. The `seen` and
# `completed` sets are mutually exclusive; it is okay to visit a node
# that has already been added to `completed`.
seen = set()
if dependencies is None:
dependencies = {k: get_dependencies(dsk, k) for k in dsk}
for key in keys:
if key in completed:
continue
nodes = [key]
while nodes:
# Keep current node on the stack until all descendants are visited
cur = nodes[-1]
if cur in completed:
# Already fully traversed descendants of cur
nodes.pop()
continue
seen.add(cur)
# Add direct descendants of cur to nodes stack
next_nodes = []
for nxt in dependencies[cur]:
if nxt not in completed:
if nxt in seen:
# Cycle detected!
cycle = [nxt]
while nodes[-1] != nxt:
cycle.append(nodes.pop())
cycle.append(nodes.pop())
cycle.reverse()
if returncycle:
return cycle
else:
cycle = "->".join(str(x) for x in cycle)
raise RuntimeError("Cycle detected in Dask: %s" % cycle)
next_nodes.append(nxt)
if next_nodes:
nodes.extend(next_nodes)
else:
# cur has no more descendants to explore, so we're done with it
if not returncycle:
ordered.append(cur)
completed.add(cur)
seen.remove(cur)
nodes.pop()
if returncycle:
return []
return ordered
def toposort(dsk, dependencies=None):
"""Return a list of keys of dask sorted in topological order."""
return _toposort(dsk, dependencies=dependencies)
def getcycle(d, keys):
"""Return a list of nodes that form a cycle if Dask is not a DAG.
Returns an empty list if no cycle is found.
``keys`` may be a single key or list of keys.
Examples
--------
>>> d = {'x': (inc, 'z'), 'y': (inc, 'x'), 'z': (inc, 'y')}
>>> getcycle(d, 'x')
['x', 'z', 'y', 'x']
See Also
--------
isdag
"""
return _toposort(d, keys=keys, returncycle=True)
def isdag(d, keys):
"""Does Dask form a directed acyclic graph when calculating keys?
``keys`` may be a single key or list of keys.
Examples
--------
>>> inc = lambda x: x + 1
>>> isdag({'x': 0, 'y': (inc, 'x')}, 'y')
True
>>> isdag({'x': (inc, 'y'), 'y': (inc, 'x')}, 'y')
False
See Also
--------
getcycle
"""
return not getcycle(d, keys)
class literal:
"""A small serializable object to wrap literal values without copying"""
__slots__ = ("data",)
def __init__(self, data):
self.data = data
def __repr__(self):
return "literal<type=%s>" % type(self.data).__name__
def __reduce__(self):
return (literal, (self.data,))
def __call__(self):
return self.data
def quote(x):
"""Ensure that this value remains this value in a dask graph
Some values in dask graph take on special meaning. Sometimes we want to
ensure that our data is not interpreted but remains literal.
>>> quote((add, 1, 2))
(literal<type=tuple>,)
"""
if istask(x) or type(x) is list or type(x) is dict:
return (literal(x),)
return x
|
jakirkham/dask
|
dask/core.py
|
Python
|
bsd-3-clause
| 13,069
|
[
"VisIt"
] |
218fa8d4808a450e931041228152031519f29ba26a44f30e59ae37448b9d66c5
|
# -*- coding: utf-8 -*-
r"""Testing of the resolution library - TAS
"""
from copy import deepcopy
import numpy as np
import pytest
from matplotlib import use
from mock import patch
from neutronpy import Sample, instrument
from neutronpy.instrument.exceptions import *
use('Agg')
def angle2(x, y, z, h, k, l, lattice):
r"""Function necessary for Prefactor functions
"""
latticestar = instrument.tools._star(lattice)[-1]
return np.arccos(
2 * np.pi * (h * x + k * y + l * z) / instrument.tools._modvec([x, y, z], lattice) / instrument.tools._modvec(
[h, k, l], latticestar))
def SqwDemo(H, K, L, W, p):
r"""Example Scattering function for convolution tests
"""
del K, L
Deltax = p[0]
Deltay = p[1]
Deltaz = p[2]
cc = p[3]
Gamma = p[4]
omegax = np.sqrt(cc ** 2 * (np.sin(2 * np.pi * H)) ** 2 + Deltax ** 2)
omegay = np.sqrt(cc ** 2 * (np.sin(2 * np.pi * H)) ** 2 + Deltay ** 2)
omegaz = np.sqrt(cc ** 2 * (np.sin(2 * np.pi * H)) ** 2 + Deltaz ** 2)
lorx = 1 / np.pi * Gamma / ((W - omegax) ** 2 + Gamma ** 2)
lory = 1 / np.pi * Gamma / ((W - omegay) ** 2 + Gamma ** 2)
lorz = 1 / np.pi * Gamma / ((W - omegaz) ** 2 + Gamma ** 2)
sqw0 = lorx * (1 - np.cos(np.pi * H)) / omegax / 2
sqw1 = lory * (1 - np.cos(np.pi * H)) / omegay / 2
sqw2 = lorz * (1 - np.cos(np.pi * H)) / omegaz / 2
sqw = np.vstack((sqw0, sqw1, sqw2))
return sqw
def SMADemo(H, K, L, p):
r"""Example Scattering function for convolution tests
"""
del K, L
Deltax = p[0]
Deltay = p[1]
Deltaz = p[2]
cc = p[3]
Gamma = p[4]
omegax = np.sqrt(cc ** 2 * (np.sin(2. * np.pi * H.flatten())) ** 2 + Deltax ** 2)
omegay = np.sqrt(cc ** 2 * (np.sin(2. * np.pi * H.flatten())) ** 2 + Deltay ** 2)
omegaz = np.sqrt(cc ** 2 * (np.sin(2. * np.pi * H.flatten())) ** 2 + Deltaz ** 2)
w0 = np.vstack((omegax, omegay, omegaz))
S = np.vstack(((1. - np.cos(np.pi * H.flatten())) / omegax / 2.,
(1. - np.cos(np.pi * H.flatten())) / omegay / 2.,
(1. - np.cos(np.pi * H.flatten())) / omegaz / 2.))
HWHM = np.ones(S.shape) * Gamma
return [w0, S, HWHM]
def PrefDemo(H, K, L, W, EXP, p):
r"""Prefactor example for convolution tests
"""
[sample, rsample] = EXP.get_lattice()
q2 = instrument.tools._modvec([H, K, L], rsample) ** 2
sd = q2 / (16 * np.pi ** 2)
ff = 0.0163 * np.exp(-35.883 * sd) + 0.3916 * np.exp(-13.223 * sd) + 0.6052 * np.exp(-4.339 * sd) - 0.0133
alphax = angle2(1, 0, 0, H, K, L, sample)
alphay = angle2(0, 1, 0, H, K, L, sample)
alphaz = angle2(0, 0, 1, H, K, L, sample)
polx = np.sin(alphax) ** 2
poly = np.sin(alphay) ** 2
polz = np.sin(alphaz) ** 2
prefactor = np.zeros((3, len(H)))
prefactor[0, :] = ff ** 2.0 * polx * p[5]
prefactor[1, :] = ff ** 2.0 * poly * p[5]
prefactor[2, :] = ff ** 2.0 * polz * p[5]
bgr = np.ones(H.shape) * p[6]
return [prefactor, bgr]
def PrefDemo2(H, K, L, W, EXP, p):
r"""Prefactor example for convolution tests
No background
"""
[sample, rsample] = EXP.get_lattice()
q2 = instrument.tools._modvec([H, K, L], rsample) ** 2
sd = q2 / (16 * np.pi ** 2)
ff = 0.0163 * np.exp(-35.883 * sd) + 0.3916 * np.exp(-13.223 * sd) + 0.6052 * np.exp(-4.339 * sd) - 0.0133
alphax = angle2(1, 0, 0, H, K, L, sample)
alphay = angle2(0, 1, 0, H, K, L, sample)
alphaz = angle2(0, 0, 1, H, K, L, sample)
polx = np.sin(alphax) ** 2
poly = np.sin(alphay) ** 2
polz = np.sin(alphaz) ** 2
prefactor = np.zeros((3, len(H)))
prefactor[0, :] = ff ** 2.0 * polx * p[5]
prefactor[1, :] = ff ** 2.0 * poly * p[5]
prefactor[2, :] = ff ** 2.0 * polz * p[5]
return prefactor
def PrefDemo3(H, K, L, W, EXP, p):
r"""Prefactor example for convolution tests
No prefactor
"""
return
sumIavg = 1646.8109875866667
sumIstd = 0.67288676280070814 * 2
instr = instrument.Instrument(test=1)
instr.method = 0
instr.mono.tau = 'PG(002)'
instr.mono.mosaic = 25
instr.ana.tau = 'PG(002)'
instr.ana.mosaic = 25
instr.sample.a = 6
instr.sample.b = 7
instr.sample.c = 8
instr.sample.alpha = 90
instr.sample.beta = 90
instr.sample.gamma = 90
instr.hcol = [40, 40, 40, 40]
instr.vcol = [120, 120, 120, 120]
instr.efixed = 14.7
instr.orient1 = np.array([1, 0, 0])
instr.orient2 = np.array([0, 1, 0])
EXP_coopernathans = deepcopy(instr)
instr.method = 1
EXP_popovici = deepcopy(instr)
def test_cooper_nathans():
"""Test Cooper Nathans method
"""
R0 = 2117.45739160280
RMS = np.array([[9154.39386475516, 7.32203491574463e-11, 0, 7.11894676107400e-12],
[2.68712790277282e-10, 340628.383580632, 0, -32536.7077302429],
[0, 0, 634.724632931705, 0],
[2.58004722905037e-11, -32536.7077302429, 0, 3114.58144514260]])
ResVol0 = (2 * np.pi) ** 2 / np.sqrt(np.linalg.det(RMS)) * 2
angles0 = np.array([-20.58848852, -41.17697704, -78.6627354, 22.67452921, -20.58848852, -41.17697704])
BraggWidths0 = np.array(
[0.0492235489748347, 0.00806951257792662, 0.186936902874783, 1.82137589975272, 0.0843893950600324])
EXP = EXP_coopernathans
hkle = [1., 0., 0., 0.]
EXP.calc_resolution(hkle)
NP = EXP.RMS
R = EXP.R0
BraggWidths = instrument.tools.get_bragg_widths(NP)
angles = EXP_coopernathans.get_angles_and_Q(hkle)[0]
ResVol = (2 * np.pi) ** 2 / np.sqrt(np.linalg.det(NP)) * 2
assert (np.all(np.abs((RMS - NP)) < 100))
assert (abs(R - R0) < 1e-3)
assert (abs(ResVol - ResVol0) < 1e-5)
assert (np.all(np.abs((BraggWidths - BraggWidths0)) < 0.1))
assert (np.all(np.abs((angles0 - angles)) < 0.1))
def test_popovici():
"""Test Popovici method
"""
R0 = 2117.46377630698
RMS = np.array([[9154.44276618996, 4.78869185251432e-08, 0, 4.57431754676102e-09],
[8.53192164855333e-08, 340633.245599205, 0, -32537.1653207760],
[0, 0, 634.821032587120, 0],
[8.14983128960581e-09, -32537.1653207760, 0, 3114.62458263531]])
ResVol0 = (2 * np.pi) ** 2 / np.sqrt(np.linalg.det(RMS)) * 2
angles0 = np.array([-20.58848852, -41.17697704, -78.6627354, 22.67452921, -20.58848852, -41.17697704])
BraggWidths0 = np.array(
[0.0492234175028573, 0.00806945498774637, 0.186922708845071, 1.82136489553849, 0.0843888106622307])
EXP = EXP_popovici
hkle = [1, 0, 0, 0]
EXP.calc_resolution(hkle)
NP = EXP_popovici.RMS
R = EXP_popovici.R0
BraggWidths = instrument.tools.get_bragg_widths(NP)
angles = EXP_popovici.get_angles_and_Q(hkle)[0]
ResVol = (2 * np.pi) ** 2 / np.sqrt(np.linalg.det(NP)) * 2
assert (np.all(np.abs((RMS - NP) / 1e4) < 0.1))
assert (abs(R - R0) < 1e-3)
assert (abs(ResVol - ResVol0) < 1e-5)
assert (np.all(np.abs((BraggWidths - BraggWidths0)) < 0.1))
assert (np.all(np.abs((angles0 - angles)) < 1e-3))
def test_4d_conv():
"""Test 4d convolution
"""
sample = Sample(6, 7, 8, 90, 90, 90)
sample.u = [1, 0, 0]
sample.v = [0, 0, 1]
EXP = instrument.Instrument(14.7, sample, hcol=[80, 40, 40, 80], vcol=[120, 120, 120, 120], mono='pg(002)',
ana='pg(002)')
EXP.moncor = 0
p = np.array([3, 3, 3, 30, 0.4, 6e4, 40])
H1, K1, L1, W1 = 1.5, 0, 0.35, np.arange(20, -0.5, -0.5)
I11 = EXP.resolution_convolution(SqwDemo, PrefDemo, 2, (H1, K1, L1, W1), 'fix', [5, 0], p)
I12 = EXP.resolution_convolution(SqwDemo, PrefDemo, 2, (H1, K1, L1, W1), 'fix', [15, 0], p)
I13 = EXP.resolution_convolution(SqwDemo, PrefDemo, 2, (H1, K1, L1, W1), 'mc', None, p, 13)
sumI11, sumI12, sumI13 = np.sum(I11), np.sum(I12), np.sum(I13)
assert (np.abs(sumIavg - sumI11) < sumIstd)
assert (np.abs(sumIavg - sumI12) < sumIstd)
assert (np.abs(sumIavg - sumI13) < sumIstd)
EXP.resolution_convolution(SqwDemo, PrefDemo2, 1, (H1, K1, L1, W1), 'fix', None, p)
with pytest.raises(ValueError):
EXP.resolution_convolution(SqwDemo, PrefDemo3, 0, (H1, K1, L1, W1), 'fix', [5, 0], p)
def test_sma_conv():
"""Test SMA convolution
"""
sample = Sample(6, 7, 8, 90, 90, 90)
sample.u = [1, 0, 0]
sample.v = [0, 0, 1]
EXP = instrument.Instrument(14.7, sample, hcol=[80, 40, 40, 80], vcol=[120, 120, 120, 120], mono='pg(002)',
ana='pg(002)')
EXP.moncor = 0
p = np.array([3, 3, 3, 30, 0.4, 6e4, 40])
H1, K1, L1, W1 = 1.5, 0, 0.35, np.arange(20, -0.5, -0.5)
I14 = EXP.resolution_convolution_SMA(SMADemo, PrefDemo, 2, (H1, K1, L1, W1), 'fix', [15, 0], p)
I15 = EXP.resolution_convolution_SMA(SMADemo, PrefDemo, 2, (H1, K1, L1, W1), 'mc', [1], p, 13)
sumI14, sumI15 = np.sum(I14), np.sum(I15)
assert (np.abs(sumIavg - sumI14) < sumIstd)
assert (np.abs(sumIavg - sumI15) < sumIstd)
EXP.resolution_convolution_SMA(SMADemo, PrefDemo2, 1, (H1, K1, L1, W1), 'fix', None, p)
with pytest.raises(ValueError):
EXP.resolution_convolution_SMA(SMADemo, PrefDemo3, 0, (H1, K1, L1, W1), 'fix', None, p)
@patch("matplotlib.pyplot.show")
def test_plotting(mock_show):
"""Test Plotting methods
"""
EXP = instrument.Instrument()
EXP.plot_instrument([1, 0, 0, 0])
EXP.plot_projections([1, 0, 0, 0])
EXP.calc_projections([[1, 2], 0, 0, 0])
EXP.plot_projections([[1, 2], 0, 0, 0])
EXP.guide.width = 1
EXP.guide.height = 1
EXP.mono.width = 1
EXP.mono.height = 1
EXP.sample.width = 1
EXP.sample.height = 1
EXP.sample.depth = 1
EXP.ana.width = 1
EXP.ana.height = 1
EXP.detector.width = 1
EXP.detector.height = 1
EXP.arms = [10, 10, 10, 10]
EXP.plot_instrument([1, 0, 0, 0])
def test_sample():
"""Test Sample class
"""
sample = Sample(1, 1, 1, 90, 90, 90, mosaic=60, direct=-1, u=[1, 0, 0], v=[0, 1, 0])
assert (isinstance(sample.u, np.ndarray))
assert (isinstance(sample.v, np.ndarray))
def test_GetTau():
"""Test monochromator crystal tau value finder
"""
assert (instrument.tools.GetTau(1.87325, getlabel=True) == 'pg(002)')
assert (instrument.tools.GetTau(1.8, getlabel=True) == '')
assert (instrument.tools.GetTau(10) == 10)
with pytest.raises((AnalyzerError, MonochromatorError, KeyError)):
instrument.tools.GetTau('blah')
def test_CleanArgs_err():
"""Test exception capture in CleanArgs
"""
pass
def test_fproject():
"""Test projection function
"""
x = np.ones((4, 4, 1))
instrument.tools.fproject(x, 0)
instrument.tools.fproject(x, 1)
instrument.tools.fproject(x, 2)
def test_constants():
"""Test constants
"""
EXP_popovici.moncor = 0
assert (EXP_popovici.moncor == 0)
def test_errors():
"""Test exception handling
"""
EXP = instrument.Instrument()
EXP.sample.u = [1, 0, 0]
EXP.sample.v = [2, 0, 0]
with pytest.raises(ScatteringTriangleError):
EXP.calc_resolution([1, 1, 0, 0])
def test_calc_res_cases():
"""Test different resolution cases
"""
EXP = instrument.Instrument()
EXP.sample.shape = np.eye(3)
EXP.calc_resolution([1, 0, 0, 0])
EXP.sample.shape = np.eye(3)[np.newaxis].reshape((1, 3, 3))
EXP.calc_resolution([1, 0, 0, 0])
EXP.horifoc = 1
EXP.calc_resolution([1, 0, 0, 0])
EXP.moncor = 1
EXP.calc_resolution([1, 0, 0, 0])
EXP.method = 1
EXP.calc_resolution([1, 0, 0, 0])
EXP.ana.thickness = 1
EXP.ana.Q = 1.5
EXP.calc_resolution([1, 0, 0, 0])
EXP.Smooth = instrument.tools._Dummy('Smooth')
EXP.Smooth.X = 1
EXP.Smooth.Y = 1
EXP.Smooth.Z = 1
EXP.Smooth.E = 1
EXP.calc_resolution([1, 0, 0, 0])
def test_projection_calc():
"""Test different cases of resolution ellipse slices/projections
"""
EXP = instrument.Instrument()
EXP.calc_resolution([1, 0, 0, 0])
EXP.calc_projections([0, 1, 0, 0])
EXP.get_resolution_params([0, 1, 0, 0], 'QxQy', 'slice')
with pytest.raises(InstrumentError):
EXP.get_resolution_params([1, 1, 0, 0], 'QxQy', 'slice')
EXP = instrument.Instrument()
EXP.get_resolution_params([1, 0, 0, 0], 'QxQy', 'slice')
EXP.get_resolution_params([1, 0, 0, 0], 'QxQy', 'project')
EXP.get_resolution_params([1, 0, 0, 0], 'QxW', 'slice')
EXP.get_resolution_params([1, 0, 0, 0], 'QxW', 'project')
EXP.get_resolution_params([1, 0, 0, 0], 'QyW', 'slice')
EXP.get_resolution_params([1, 0, 0, 0], 'QyW', 'project')
if __name__ == '__main__':
pytest.main()
|
granrothge/neutronpy
|
tests/test_resolution_tas.py
|
Python
|
mit
| 12,645
|
[
"CRYSTAL"
] |
ee2891225512f760f6dbe49754a7247a74f5d283d196cd36f341596e0c02f2a3
|
import typing
from itertools import chain
from typing import *
from typing import Callable, ForwardRef, Union, _GenericAlias
import astroid
import astroid.inference
from astroid import nodes
from astroid.transforms import TransformVisitor
from ..typecheck.base import (
Environment,
NoType,
TypeConstraints,
TypeFail,
TypeFailAnnotationInvalid,
TypeFailFunction,
TypeFailLookup,
TypeFailReturn,
TypeFailStarred,
TypeInfo,
TypeResult,
_ann_node_to_type,
_gorg,
_node_to_type,
accept_failable,
create_Callable_TypeResult,
failable_collect,
is_callable,
wrap_container,
)
from ..typecheck.errors import (
BINOP_TO_METHOD,
BINOP_TO_REV_METHOD,
INPLACE_TO_BINOP,
UNARY_TO_METHOD,
binop_error_message,
subscript_error_message,
unaryop_error_message,
)
from ..typecheck.type_store import TypeStore
class TypeInferer:
"""The class responsible for inferring types given an astroid AST."""
type_constraints = TypeConstraints()
type_store = TypeStore(type_constraints)
type_constraints.type_store = type_store
def __init__(self) -> None:
self.type_constraints.reset()
def reset(self) -> None:
self.type_constraints.reset()
self.type_store = TypeStore(self.type_constraints)
self.type_constraints.type_store = self.type_store
###########################################################################
# Setting up the environment
###########################################################################
def environment_transformer(self) -> TransformVisitor:
"""Return a TransformVisitor that sets an environment for every node."""
visitor = TransformVisitor()
visitor.register_transform(nodes.FunctionDef, self._set_function_def_environment)
visitor.register_transform(nodes.AsyncFunctionDef, self._set_function_def_environment)
visitor.register_transform(nodes.ClassDef, self._set_classdef_environment)
visitor.register_transform(nodes.Module, self._set_module_environment)
visitor.register_transform(nodes.ListComp, self._set_comprehension_environment)
visitor.register_transform(nodes.DictComp, self._set_comprehension_environment)
visitor.register_transform(nodes.SetComp, self._set_comprehension_environment)
visitor.register_transform(nodes.GeneratorExp, self._set_comprehension_environment)
visitor.register_transform(nodes.Lambda, self._set_comprehension_environment)
return visitor
def _set_module_environment(self, node: nodes.Module) -> None:
"""Method to set environment of a Module node."""
node.type_environment = Environment()
for name in node.globals:
if not any(
isinstance(elt, (nodes.ImportFrom, nodes.Import)) for elt in node.globals[name]
):
new_tvar = self.type_constraints.fresh_tvar(node.globals[name][0])
if any(isinstance(elt, nodes.ClassDef) for elt in node.globals[name]):
self.type_constraints.unify(new_tvar, Type[ForwardRef(name)], node)
node.type_environment.globals[name] = new_tvar
self._populate_local_env(node)
def _set_classdef_environment(self, node: nodes.ClassDef) -> None:
"""Method to set environment of a ClassDef node."""
node.type_environment = Environment()
for name in node.instance_attrs:
node.type_environment.locals[name] = self.type_constraints.fresh_tvar(
node.instance_attrs[name][0]
)
self.type_store.classes[node.name][name] = [
(node.type_environment.locals[name], "attribute")
]
for name in node.locals:
if name in ["__module__", "__qualname__"]:
node.type_environment.locals[name] = str
else:
node.type_environment.locals[name] = self.type_constraints.fresh_tvar(
node.locals[name][0]
)
self.type_store.classes[node.name]["__bases"] = [_node_to_type(base) for base in node.bases]
try:
self.type_store.classes[node.name]["__mro"] = [cls.name for cls in node.mro()]
except astroid.exceptions.DuplicateBasesError:
self.type_store.classes[node.name]["__mro"] = [node.name]
def _set_function_def_environment(self, node: nodes.FunctionDef) -> None:
"""Method to set environment of a FunctionDef node."""
node.type_environment = Environment()
# self is a special case
if (
node.args.args
and node.args.args[0].name == "self"
and isinstance(node.parent, nodes.ClassDef)
):
node.type_environment.locals["self"] = ForwardRef(node.parent.name)
self._populate_local_env(node)
self._populate_local_env_attrs(node)
node.type_environment.locals["return"] = self.type_constraints.fresh_tvar(node)
def _set_comprehension_environment(self, node: nodes.Comprehension) -> None:
"""Set the environment of a comprehension expression.
Covers ListComp, SetComp, DictComp, and GeneratorExp."""
node.type_environment = Environment()
for name in node.locals:
node.type_environment.locals[name] = self.type_constraints.fresh_tvar(node)
def _populate_local_env(self, node: nodes.NodeNG) -> None:
"""Helper to populate locals attributes in type environment of given node."""
for var_name in node.locals:
try:
var_value = node.type_environment.lookup_in_env(var_name)
except KeyError:
if any(
isinstance(elt, (nodes.ImportFrom, nodes.Import))
for elt in node.locals[var_name]
):
var_value = Any
else:
var_value = self.type_constraints.fresh_tvar(node.locals[var_name][0])
node.type_environment.locals[var_name] = var_value
def _populate_local_env_attrs(self, node: nodes.NodeNG) -> None:
"""Store in TypeStore the attributes of any unresolved class names"""
for attr_node in chain(
node.nodes_of_class(nodes.Attribute), node.nodes_of_class(nodes.AssignAttr)
):
if (
isinstance(attr_node.expr, nodes.Name)
and attr_node.expr.name in node.type_environment.locals
):
class_type = node.type_environment.lookup_in_env(attr_node.expr.name)
if isinstance(class_type, TypeVar):
self.type_store.classes[class_type.__name__]["__mro"] = [class_type.__name__]
if not attr_node.attrname in self.type_store.classes[class_type.__name__]:
self.type_store.classes[class_type.__name__][attr_node.attrname] = [
(self.type_constraints.fresh_tvar(attr_node), "attribute")
]
###########################################################################
# Type inference methods
###########################################################################
def type_inference_transformer(self) -> TransformVisitor:
"""Instantiate a visitor to perform type inference on an AST."""
type_visitor = TransformVisitor()
for klass in nodes.ALL_NODE_CLASSES:
if hasattr(self, f"visit_{klass.__name__.lower()}"):
type_visitor.register_transform(
klass, getattr(self, f"visit_{klass.__name__.lower()}")
)
else:
type_visitor.register_transform(klass, self.visit_default)
return type_visitor
def visit_default(self, node: nodes.NodeNG) -> None:
node.inf_type = NoType()
##############################################################################
# Literals
##############################################################################
def visit_const(self, node: nodes.Const) -> None:
node.inf_type = TypeInfo(type(node.value))
def visit_list(self, node: nodes.List) -> None:
if node.ctx == nodes.Store:
# List is the target of an assignment; do not give it a type.
node.inf_type = NoType()
elif not node.elts:
node.inf_type = TypeInfo(List[self.type_constraints.fresh_tvar(node)])
else:
elt_inf_type = self._unify_elements(node.elts, node)
node.inf_type = wrap_container(List, elt_inf_type)
def visit_set(self, node: nodes.Set) -> None:
if not node.elts:
node.inf_type = TypeInfo(Set[self.type_constraints.fresh_tvar(node)])
else:
elt_inf_type = self._unify_elements(node.elts, node)
node.inf_type = wrap_container(Set, elt_inf_type)
def visit_dict(self, node: nodes.Dict) -> None:
if not node.items:
node.inf_type = TypeInfo(
Dict[self.type_constraints.fresh_tvar(node), self.type_constraints.fresh_tvar(node)]
)
else:
key_list, val_list = zip(*node.items)
key_inf_type = self._unify_elements(key_list, node)
val_inf_type = self._unify_elements(val_list, node)
node.inf_type = wrap_container(Dict, key_inf_type, val_inf_type)
def visit_tuple(self, node: nodes.Tuple) -> None:
if node.ctx == nodes.Store:
# Tuple is the target of an assignment; do not give it a type.
node.inf_type = NoType()
else:
node.inf_type = wrap_container(Tuple, *(e.inf_type for e in node.elts))
def _unify_elements(self, lst: List[nodes.NodeNG], node: nodes.NodeNG) -> TypeResult:
lst = list(lst)
elt_inf_type = lst[0].inf_type
for cur_elt in lst[1:]:
elt_inf_type = self.type_constraints.unify(elt_inf_type, cur_elt.inf_type, node)
if isinstance(elt_inf_type, TypeFail):
return TypeInfo(Any)
return elt_inf_type
##############################################################################
# Expression types
##############################################################################
def visit_ifexp(self, node: nodes.IfExp) -> None:
node.inf_type = self.type_constraints.unify(node.body.inf_type, node.orelse.inf_type, node)
def visit_expr(self, node: nodes.Expr) -> None:
"""Expr nodes take the type of their child."""
node.inf_type = node.value.inf_type
##############################################################################
# Name lookup and assignment
##############################################################################
def visit_name(self, node: nodes.Name) -> None:
node.inf_type = self.lookup_inf_type(node, node.name)
def visit_assign(self, node: nodes.Assign) -> None:
"""Update the enclosing scope's type environment for the assignment's binding(s)."""
# the type of the expression being assigned
if isinstance(node.value, nodes.Name):
expr_inf_type = self.lookup_typevar(node, node.value.name)
else:
expr_inf_type = node.value.inf_type
node.inf_type = NoType()
for target in node.targets:
type_result = self._assign_type(target, expr_inf_type, node)
if isinstance(type_result, TypeFail):
node.inf_type = type_result
break
def visit_annassign(self, node: nodes.AnnAssign) -> None:
if isinstance(node.target, nodes.AssignAttr):
var_inf_type = self.lookup_typevar(node.target, node.target.attrname)
else:
var_inf_type = self.lookup_typevar(node.target, node.target.name)
ann_type = _ann_node_to_type(node.annotation)
self.type_constraints.unify(var_inf_type, ann_type, node)
if node.value:
node.targets = [node.target]
self.visit_assign(node)
elif isinstance(ann_type, TypeFail):
node.inf_type = ann_type
else:
node.inf_type = NoType()
def visit_augassign(self, node: nodes.AugAssign) -> None:
node.inf_type = NoType()
# lookup method for augmented arithmetic assignment
method_name = BINOP_TO_METHOD[node.op]
if isinstance(node.target, nodes.Subscript):
target_type = node.target.value.inf_type
binop_result = self._handle_call(
node.target,
"__setitem__",
target_type,
node.target.slice.inf_type,
node.value.inf_type,
)
else:
if isinstance(node.target, nodes.AssignName):
target_type = self.lookup_typevar(node.target, node.target.name)
elif isinstance(node.target, nodes.AssignAttr):
target_type = self._lookup_attribute_type(
node.target, node.target.expr.inf_type, node.target.attrname
)
binop_result = self._handle_call(node, method_name, target_type, node.value.inf_type)
if isinstance(binop_result, TypeFail):
# on failure, fallback to method corresponding to standard operator
boolop = INPLACE_TO_BINOP[node.op]
method_name = BINOP_TO_METHOD[boolop]
arithm_type = self._arithm_convert(node, method_name, target_type, node.value.inf_type)
if arithm_type:
binop_result = arithm_type
else:
binop_result = self._handle_call(
node, method_name, target_type, node.value.inf_type
)
type_result = self._assign_type(node.target, binop_result, node)
if isinstance(type_result, TypeFail):
node.inf_type = type_result
@accept_failable
def _assign_type(self, target: nodes.NodeNG, expr_type: type, node: nodes.Assign) -> TypeResult:
"""Update the type environment so that the target is bound to the given type."""
if isinstance(target, nodes.AssignName):
# A single identifier, e.g. x = ...
target_type_var = self.lookup_typevar(target, target.name)
return self.type_constraints.unify(target_type_var, expr_type, node)
elif isinstance(target, nodes.AssignAttr):
# Attribute mutation, e.g. x.y = ...
attr_type = self._lookup_attribute_type(target, target.expr.inf_type, target.attrname)
return self.type_constraints.unify(attr_type, expr_type, node)
elif isinstance(target, nodes.Tuple):
# Unpacking assignment, e.g. x, y = ...
if getattr(expr_type, "__origin__", None) is tuple:
assign_result = self._assign_tuple(target, expr_type, node)
else:
assign_result = self._handle_call(target, "__iter__", expr_type)
target_tvars = self._get_tuple_targets(target)
starred_target_found = False
for tvar, elt in zip(target_tvars, target.elts):
if isinstance(elt, nodes.Starred) and not starred_target_found:
starred_target_found = True
unif_result = assign_result >> (
lambda t: self.type_constraints.unify(tvar, List[t.__args__[0]], node)
)
elif isinstance(elt, nodes.Starred) and starred_target_found:
unif_result = TypeFailStarred(node)
else:
unif_result = assign_result >> (
lambda t: self.type_constraints.unify(tvar, t.__args__[0], node)
)
if isinstance(unif_result, TypeFail):
return unif_result
return assign_result
elif isinstance(target, nodes.Subscript):
# TODO: previous case must recursively handle this one
return self._handle_call(
target, "__setitem__", target.value.inf_type, target.slice.inf_type, expr_type
)
def _assign_tuple(self, target: nodes.Tuple, value: Any, node: nodes.Assign) -> TypeResult:
"""Unify tuple of type variables and tuple of types, within context of Assign statement."""
starred_index = None
for i in range(len(target.elts)):
if isinstance(target.elts[i], nodes.Starred):
if starred_index is None:
starred_index = i
else:
return TypeFailStarred(node)
target_tvars = self._get_tuple_targets(target)
if starred_index is not None:
starred_length = len(value.__args__) - len(target.elts) + 1
starred_subvalues = node.value.elts[starred_index : starred_index + starred_length]
starred_value = wrap_container(List, self._unify_elements(starred_subvalues, node))
starred_target_tvar = target_tvars[starred_index]
unif_result = self.type_constraints.unify(starred_target_tvar, starred_value, node)
if isinstance(unif_result, TypeFail):
return unif_result
nonstarred_values = Tuple[
value.__args__[:starred_index] + value.__args__[starred_index + starred_length :]
]
nonstarred_targets = target_tvars
nonstarred_targets.remove(nonstarred_targets[starred_index])
else:
nonstarred_values = value
nonstarred_targets = target_tvars
nonstarred_target_tuple = wrap_container(Tuple, *nonstarred_targets)
unif_result = self.type_constraints.unify(nonstarred_target_tuple, nonstarred_values, node)
if isinstance(unif_result, TypeFail):
return unif_result
assign_result = TypeInfo(value)
return assign_result
def _get_tuple_targets(self, t: nodes.Tuple) -> List[type]:
target_tvars = []
for subtarget in t.elts:
if isinstance(subtarget, nodes.AssignAttr):
target_tvars.append(
self._lookup_attribute_type(
subtarget, subtarget.expr.inf_type, subtarget.attrname
)
)
elif isinstance(subtarget, nodes.Starred):
if isinstance(subtarget.value, nodes.AssignAttr):
target_tvars.append(
self.lookup_typevar(subtarget.value, subtarget.value.attrname)
)
else:
target_tvars.append(self.lookup_typevar(subtarget.value, subtarget.value.name))
elif isinstance(subtarget, nodes.Subscript):
target_tvars.append(
self._handle_call(
subtarget, "__getitem__", subtarget.value.inf_type, subtarget.slice.inf_type
)
)
else:
target_tvars.append(self.lookup_typevar(subtarget, subtarget.name))
return target_tvars
@accept_failable
def _lookup_attribute_type(
self, node: nodes.NodeNG, class_type: type, attribute_name: str
) -> TypeResult:
"""Given the node, class and attribute name, return the type of the attribute."""
class_type = self.type_constraints.resolve(class_type)
class_name, _, _ = self.get_attribute_class(class_type)
if (
class_name in self.type_store.classes
and attribute_name in self.type_store.classes[class_name]
):
return self.type_constraints.resolve(
self.type_store.classes[class_name][attribute_name][0][0]
)
closest_frame = node.scope().lookup(class_name)[0]
try:
class_env = closest_frame.locals[class_name][0].type_environment
result = self.type_constraints.resolve(class_env.lookup_in_env(attribute_name))
except (KeyError, AttributeError):
result = TypeFailLookup(self.type_constraints.get_tnode(class_type), node, node.parent)
return result
def lookup_typevar(self, node: nodes.NodeNG, name: str) -> TypeResult:
"""Given a variable name, return the equivalent TypeVar in the closest scope relative to given node."""
cur_node = node
while cur_node is not None:
# Get first parent node with scope
cur_scope = cur_node.scope()
try:
# Attempt to look up variable in type environment
return TypeInfo(cur_scope.type_environment.lookup_in_env(name))
except KeyError:
# Variable not found in scope of current node, search parent node
cur_node = cur_scope.parent
# If root of astroid tree is reached with no variable found,
# search builtins and TypeStore for variable type
if name in self.type_store.classes:
result = TypeInfo(Type[__builtins__[name]])
elif name.lower() in self.type_store.classes:
result = TypeInfo(Type[__builtins__[name.lower()]])
elif name in self.type_store.functions:
result = TypeInfo(
Union[tuple([func_type for func_type, _ in self.type_store.functions[name]])]
)
else:
result = TypeFail("Unbound identifier")
return result
def lookup_inf_type(self, node: nodes.NodeNG, name: str) -> TypeResult:
"""Given a variable name, return a TypeResult object containing the type in the closest scope relative to given node."""
tvar = self.lookup_typevar(node, name)
return self.type_constraints.resolve(tvar)
##############################################################################
# Operation nodes
##############################################################################
@accept_failable
def get_call_signature(self, c: type, node: nodes.NodeNG) -> TypeResult:
"""Check for and return initializer function signature when using class name as Callable.
Return Callable unmodified otherwise.
:param c: Class, ForwardRef to a class, or Callable
:param node: nodes.Call node where function call is occurring
"""
# Any is interpreted as a function that can take any arguments.
if c is Any:
return TypeInfo(Callable[..., Any])
# Callable type; e.g., 'Callable[[int], int]'
elif is_callable(c):
return TypeInfo(c)
# Union of Callables
elif getattr(c, "__origin__", None) is Union and all(
is_callable(elt) for elt in c.__args__
):
return TypeInfo(c)
# Class types; e.g., 'Type[ForwardRef('A')]'
elif getattr(c, "__origin__", None) is type:
class_type = c.__args__[0]
if isinstance(class_type, ForwardRef):
class_name = c.__args__[0].__forward_arg__
else:
class_name = class_type.__name__
if "__init__" in self.type_store.classes[class_name]:
matching_init_funcs = []
for func_type, _ in self.type_store.classes[class_name]["__init__"]:
new_func_type = Callable[list(func_type.__args__[1:-1]), func_type.__args__[0]]
matching_init_funcs.append(new_func_type)
init_func = Union[tuple(matching_init_funcs)]
else:
# Classes declared without initializer
init_func = Callable[[], class_type]
return TypeInfo(init_func)
# Class instances; e.g., 'ForwardRef('A')'
elif isinstance(c, ForwardRef):
class_type = c
class_name = c.__forward_arg__
if "__call__" in self.type_store.classes[class_name]:
call_args = list(self.type_store.classes[class_name]["__call__"][0][0].__args__)
call_func = Callable[call_args[1:-1], call_args[-1]]
return TypeInfo(call_func)
else:
class_tnode = self.type_constraints.get_tnode(class_type)
return TypeFailLookup(class_tnode, node, node.parent)
else:
return TypeFailFunction((c,), None, node)
def visit_call(self, node: nodes.Call) -> None:
f = self.type_constraints.resolve(node.func.inf_type)
func_inf_type = self.get_call_signature(f, node.func)
arg_inf_types = [arg.inf_type for arg in node.args]
node.inf_type = self.type_constraints.unify_call(func_inf_type, *arg_inf_types, node=node)
def visit_binop(self, node: nodes.BinOp) -> None:
left_inf, right_inf = node.left.inf_type, node.right.inf_type
method_name = BINOP_TO_METHOD[node.op]
# attempt to obtain a common arithmetic type
arithm_type = self._arithm_convert(node, method_name, left_inf, right_inf)
if arithm_type:
node.inf_type = arithm_type
else:
rev_method_name = BINOP_TO_REV_METHOD[node.op]
l_type = self._handle_call(node, method_name, left_inf, right_inf)
r_type = self._handle_call(node, rev_method_name, right_inf, left_inf)
if self.type_store.is_descendant(right_inf.getValue(), left_inf.getValue()):
if isinstance(r_type, TypeFail) and isinstance(l_type, TypeInfo):
node.inf_type = l_type
else:
node.inf_type = r_type
else:
if isinstance(l_type, TypeFail) and isinstance(r_type, TypeInfo):
node.inf_type = r_type
else:
node.inf_type = l_type
@accept_failable
def _arithm_convert(
self, node: nodes.NodeNG, method: str, t1: type, t2: type
) -> Optional[TypeInfo]:
if t1 is complex and t2 is complex:
common_type = complex
elif (t1 is complex and issubclass(t2, typing.SupportsFloat)) or (
t2 is complex and issubclass(t1, typing.SupportsFloat)
):
# TODO: handle complex better. Looks like int, float don't
# support typing.SupportsComplex.
common_type = complex
elif (t1 is float and issubclass(t2, typing.SupportsFloat)) or (
t2 is float and issubclass(t1, typing.SupportsFloat)
):
common_type = float
else:
common_type = None
if common_type:
return self._handle_call(node, method, common_type, common_type)
else:
return None
def visit_unaryop(self, node: nodes.UnaryOp) -> None:
# 'not' is not a function, so this handled as a separate case.
if node.op == "not":
node.inf_type = TypeInfo(bool)
else:
method_name = UNARY_TO_METHOD[node.op]
node.inf_type = self._handle_call(node, method_name, node.operand.inf_type)
def visit_boolop(self, node: nodes.BoolOp) -> None:
node.inf_type = self._unify_elements(node.values, node)
if isinstance(node.inf_type, TypeFail):
node.inf_type = TypeInfo(Any)
def _handle_compare(
self, node: nodes.NodeNG, comparator: str, left: nodes.NodeNG, right: nodes.NodeNG
) -> TypeResult:
"""Helper function to lookup a comparator, find the equivalent function call,
and unify call with given arguments.
"""
if comparator == "is" or comparator == "is not":
return TypeInfo(bool)
elif comparator == "in" or comparator == "not in":
return self._handle_call(
node, BINOP_TO_METHOD[comparator], right.inf_type, left.inf_type
)
else:
return self._handle_call(
node, BINOP_TO_METHOD[comparator], left.inf_type, right.inf_type
)
def visit_compare(self, node: nodes.Compare) -> None:
left = node.left
compare_type = self._handle_compare(node, node.ops[0][0], left, node.ops[0][1])
for comparator, right in node.ops[1:]:
resolved_type = self._handle_compare(node, comparator, left, right)
compare_type = self.type_constraints.unify(compare_type, resolved_type, node)
node.inf_type = compare_type
##############################################################################
# Subscripting
##############################################################################
def visit_index(self, node: nodes.Index) -> None:
node.inf_type = node.value.inf_type
def visit_slice(self, node: nodes.Slice) -> None:
lower_type = node.lower.inf_type if node.lower else type(None)
upper_type = node.upper.inf_type if node.upper else type(None)
step_type = node.step.inf_type if node.step else type(None)
node.inf_type = self._handle_call(
node, "__init__", slice, lower_type, upper_type, step_type
)
node.inf_type = node.inf_type >> (
lambda t: TypeInfo(slice) if t == type(None) else TypeInfo(t)
)
def visit_extslice(self, node: nodes.ExtSlice):
unif_res = failable_collect(dim.inf_type for dim in node.dims)
node.inf_type = unif_res >> (lambda lst: wrap_container(Tuple, *lst))
def visit_subscript(self, node: nodes.Subscript) -> None:
if isinstance(node.slice.inf_type, TypeFail):
node.inf_type = node.slice.inf_type
elif node.ctx == nodes.Load:
try:
val_inf_type = self.type_constraints.resolve(node.value.inf_type)
value_gorg = val_inf_type >> _gorg
except AttributeError:
value_gorg = None
if value_gorg is type and isinstance(node.slice, nodes.Index):
if isinstance(node.slice.value, nodes.Tuple):
node.inf_type = wrap_container(
_node_to_type(node.value), *_node_to_type(node.slice.value)
)
else:
node.inf_type = wrap_container(
_node_to_type(node.value), _node_to_type(node.slice.value)
)
else:
node.inf_type = self._handle_call(
node, "__getitem__", node.value.inf_type, node.slice.inf_type
)
elif node.ctx == nodes.Store:
node.inf_type = NoType()
elif node.ctx == nodes.Del:
node.inf_type = self._handle_call(
node, "__delitem__", node.value.inf_type, node.slice.inf_type
)
##############################################################################
# Loops
##############################################################################
def visit_for(self, node: Union[nodes.For, nodes.Comprehension]) -> None:
iter_type_result = self._handle_call(node, "__iter__", node.iter.inf_type)
if isinstance(node.target, nodes.AssignName):
target_inf_type = self.lookup_inf_type(node.target, node.target.name)
elif isinstance(node.target, nodes.AssignAttr):
target_inf_type = self._lookup_attribute_type(
node.target, node.target.expr.inf_type, node.target.attrname
)
elif isinstance(node.target, nodes.Subscript):
target_inf_type = iter_type_result >> (
lambda t: self._handle_call(
node.target,
"__setitem__",
node.target.value.inf_type,
node.target.slice.inf_type,
t.__args__[0],
)
)
elif isinstance(node.target, nodes.Tuple):
target_inf_type = wrap_container(
Tuple,
*[
self.lookup_inf_type(subtarget, subtarget.name)
for subtarget in node.target.elts
],
)
iter_type_result >> (
lambda t: self.type_constraints.unify(t.__args__[0], target_inf_type, node)
)
node.inf_type = iter_type_result if isinstance(iter_type_result, TypeFail) else NoType()
##############################################################################
# Comprehensions
##############################################################################
def visit_comprehension(self, node: nodes.Comprehension) -> None:
self.visit_for(node)
def visit_dictcomp(self, node: nodes.DictComp) -> None:
key_inf_type = self.type_constraints.resolve(node.key.inf_type)
val_inf_type = self.type_constraints.resolve(node.value.inf_type)
node.inf_type = wrap_container(Dict, key_inf_type, val_inf_type)
def visit_generatorexp(self, node: nodes.GeneratorExp) -> None:
elt_inf_type = self.type_constraints.resolve(node.elt.inf_type)
node.inf_type = wrap_container(Generator, elt_inf_type, None, None)
def visit_listcomp(self, node: nodes.ListComp) -> None:
val_inf_type = self.type_constraints.resolve(node.elt.inf_type)
node.inf_type = wrap_container(List, val_inf_type)
def visit_setcomp(self, node: nodes.SetComp) -> None:
elt_inf_type = self.type_constraints.resolve(node.elt.inf_type)
node.inf_type = wrap_container(Set, elt_inf_type)
@accept_failable
def _handle_call(self, node: nodes.NodeNG, function_name: str, *arg_types: type) -> TypeResult:
"""Helper to lookup a function and unify it with given arguments.
Return the return type of unified function call.
"""
arg_inf_types = [self.type_constraints.resolve(arg) for arg in arg_types]
func_type = self.type_store.lookup_method(function_name, *arg_inf_types, node=node)
return self.type_constraints.unify_call(func_type, *arg_types, node=node)
##############################################################################
# Definitions
##############################################################################
def visit_functiondef(self, node: nodes.FunctionDef) -> None:
node.inf_type = NoType()
# Get the inferred type of the function arguments
inferred_args = [self.lookup_inf_type(node, arg) for arg in node.argnames()]
if isinstance(node.parent, nodes.ClassDef) and inferred_args:
# first argument is special in these cases
if node.type == "method":
self.type_constraints.unify(inferred_args[0], ForwardRef(node.parent.name), node)
elif node.type == "classmethod":
self.type_constraints.unify(
inferred_args[0], Type[ForwardRef(node.parent.name)], node
)
# Get inferred return type
if any(node.nodes_of_class(nodes.Return)):
return_node = list(node.nodes_of_class(nodes.Return))[-1]
if isinstance(return_node.inf_type, TypeFail):
inferred_return = return_node.inf_type
else:
inferred_return = self.lookup_inf_type(node, "return")
elif node.name == "__init__" and inferred_args:
inferred_return = inferred_args[0]
else:
inferred_return = TypeInfo(type(None))
# Update the environment storing the function's type.
polymorphic_tvars = set()
for arg in inferred_args + [inferred_return]:
arg >> (lambda a: polymorphic_tvars.add(a.__name__) if isinstance(a, TypeVar) else None)
# Create function signature
func_type = create_Callable_TypeResult(
failable_collect(inferred_args), inferred_return, polymorphic_tvars
)
# Check for optional arguments, create a Union of function signatures if necessary
num_defaults = len(node.args.defaults)
if num_defaults > 0 and not isinstance(func_type, TypeFail):
for i in range(num_defaults):
opt_args = inferred_args[: -1 - i]
opt_func_type = create_Callable_TypeResult(
failable_collect(opt_args), inferred_return, polymorphic_tvars
)
func_type = func_type >> (
lambda f: opt_func_type >> (lambda opt_f: TypeInfo(Union[f, opt_f]))
)
# Final type signature unify
func_name = self.lookup_inf_type(node.parent, node.name)
result = self.type_constraints.unify(func_name, func_type, node)
if isinstance(result, TypeFail):
node.inf_type = result
def visit_asyncfunctiondef(self, node: nodes.AsyncFunctionDef) -> None:
self.visit_functiondef(node)
def visit_lambda(self, node: nodes.Lambda) -> None:
inferred_args = [self.lookup_inf_type(node, arg) for arg in node.argnames()]
inferred_return = node.body.inf_type
polymorphic_tvars = set()
for arg in inferred_args + [inferred_return]:
arg >> (lambda a: polymorphic_tvars.add(a.__name__) if isinstance(a, TypeVar) else None)
node.inf_type = create_Callable_TypeResult(
failable_collect(inferred_args), inferred_return, polymorphic_tvars
)
def visit_arguments(self, node: nodes.Arguments) -> None:
node.inf_type = NoType()
if any(annotation is not None for annotation in node.annotations):
for i in range(len(node.annotations)):
arg_tvar = self.lookup_typevar(node, node.args[i].name)
if node.annotations[i] is not None:
ann_type = _ann_node_to_type(node.annotations[i])
result = self.type_constraints.unify(arg_tvar, ann_type, node)
if isinstance(result, TypeFail):
node.inf_type = result
else:
self.type_constraints.unify(arg_tvar, Any, node)
def visit_return(self, node: nodes.Return) -> None:
return_tvar = self.lookup_typevar(node, "return")
# TODO: Replace with isinstance() once proper TypeFail subclass is created for unbound indentifiers
if return_tvar == TypeFail("Unbound identifier"):
return_target = TypeFailReturn(node)
else:
return_target = return_tvar
if node.value is not None and getattr(node.scope(), "returns", None) is not None:
return_annotation = _ann_node_to_type(node.scope().returns)
return_value = self.type_constraints.unify(node.value.inf_type, return_annotation, node)
elif node.value is not None:
return_value = node.value.inf_type
else:
return_value = TypeInfo(None)
val_inf_type = self.type_constraints.unify(return_value, return_target, node)
node.inf_type = val_inf_type if isinstance(val_inf_type, TypeFail) else NoType()
def visit_classdef(self, node: nodes.ClassDef) -> None:
node.inf_type = NoType()
# Update type_store for this class.
# TODO: include node.instance_attrs as well?
for attr in node.locals:
attr_inf_type = self.type_constraints.resolve(node.type_environment.lookup_in_env(attr))
attr_inf_type >> (
lambda a: self.type_store.methods[attr].append((a, node.locals[attr][0].type))
if is_callable(a)
else None
)
attr_inf_type >> (
lambda a: self.type_store.classes[node.name][attr].append(
(a, node.locals[attr][0].type if is_callable(a) else "attribute")
)
)
@accept_failable
def get_attribute_class(self, t: type) -> Tuple[str, type, bool]:
"""Check for and return name and type of class represented by type t."""
is_inst_expr = True
# TypeVar; e.g., 'TypeVar('_T1')' corresponding to a function argument
if isinstance(t, TypeVar):
return t.__name__, t, None
# Class type: e.g., 'Type[ForwardRef('A')]'
if getattr(t, "__origin__", None) is type:
class_type = t.__args__[0]
is_inst_expr = False
# Instance of class or builtin type; e.g., 'ForwardRef('A')' or 'int'
else:
class_type = t
if isinstance(class_type, ForwardRef):
class_name = class_type.__forward_arg__
elif isinstance(class_type, _GenericAlias):
class_name = class_type._name
else:
class_name = getattr(t, "__name__", None)
# TODO: the condition below is too general
if class_name is not None and class_name not in self.type_store.classes:
class_name = class_name.lower()
return class_name, class_type, is_inst_expr
def visit_attribute(self, node: nodes.Attribute) -> None:
expr_inf_type = self.type_constraints.resolve(node.expr.inf_type)
result = self.get_attribute_class(expr_inf_type)
if not isinstance(result, TypeFail):
class_name, class_type, inst_expr = result
if class_type == Any:
node.inf_type = TypeInfo(Any)
elif class_name in self.type_store.classes:
attribute_type = None
for par_class_type in self.type_store.classes[class_name]["__mro"]:
attribute_type = self.type_store.classes[par_class_type].get(node.attrname)
if attribute_type:
break
if attribute_type is None:
class_tnode = self.type_constraints.get_tnode(class_type)
node.inf_type = TypeFailLookup(class_tnode, node, node.parent)
else:
func_type, method_type = attribute_type[0]
if (
is_callable(func_type)
and method_type == "method"
and inst_expr
or method_type == "classmethod"
):
# Replace polymorphic type variables with fresh type variables
fresh_func_type = self.type_constraints.fresh_callable(func_type, node)
self.type_constraints.unify(fresh_func_type.__args__[0], class_type)
# Create new Callable to avoid modifying elements of type store
new_func_type = create_Callable_TypeResult(
fresh_func_type.__args__[1:-1], fresh_func_type.__args__[-1]
)
else:
new_func_type = TypeInfo(func_type)
node.inf_type = new_func_type
else:
class_tnode = self.type_constraints.get_tnode(class_type)
node.inf_type = TypeFailLookup(class_tnode, node, node.parent)
else:
node.inf_type = result
def visit_module(self, node: nodes.Module) -> None:
node.inf_type = NoType()
# Main function (useful for quick debugging)
def main(source: str) -> Tuple[nodes.Module, TypeInferer]:
"""Parse a string representing source text, and perform a typecheck.
Return the astroid Module node (with the type_constraints attribute set
on all nodes in the tree) and TypeInferer object.
"""
module = astroid.parse(source)
type_inferer = TypeInferer()
type_inferer.environment_transformer().visit(module)
type_inferer.type_inference_transformer().visit(module)
return module, type_inferer
|
pyta-uoft/pyta
|
python_ta/transforms/type_inference_visitor.py
|
Python
|
gpl-3.0
| 43,708
|
[
"VisIt"
] |
4c7afdc78c36b180e5a620ed003782491ac7b76a08fe67be3b1e20e840a2c598
|
import os
from os.path import join
import numpy as n
def writeScript(rootName, plate):
f=open(rootName+".sh",'w')
f.write("#!/bin/bash \n")
f.write("#PBS -l walltime=20:00:00 \n")
f.write("#PBS -o "+plate+".o.$PBS_JOBID \n")
f.write("#PBS -e "+plate+".e$PBS_JOBID \n")
f.write("#PBS -M johan.comparat@gmail.com \n")
f.write("module load apps/anaconda/2.4.1 \n")
f.write("module load apps/python/2.7.8/gcc-4.4.7 \n")
f.write("export PYTHONPATH=$PYTHONPATH:/users/comparat/pySU/galaxy/python/ \n")
f.write("export PYTHONPATH=$PYTHONPATH:/users/comparat/pySU/simulations/python/ \n")
f.write("export PYTHONPATH=$PYTHONPATH:/users/comparat/pySU/multidark/python/ \n")
f.write("export PYTHONPATH=$PYTHONPATH:/users/comparat/pySU/spm/python/ \n")
f.write("export PYTHONPATH=$PYTHONPATH:/users/comparat/pySU/targetselection/python/ \n")
f.write(" \n")
f.write("cd /users/comparat/pySU/galaxy/bin_SDSS \n")
f.write("python create_master_table_kr "+plate+" \n")
f.write(" \n")
f.close()
plateList = n.loadtxt("plateList", unpack = True)
for plate in plateList:
rootName = join(os.environ['HOME'], "batchscripts_firefly_kroupa_table", str(int(plate)))
writeScript(rootName, str(int(plate)))
|
JohanComparat/pySU
|
galaxy/bin_SDSS/write_run_scripts_master_table_kr.py
|
Python
|
cc0-1.0
| 1,201
|
[
"Galaxy"
] |
7a0d1af1088e2235f751ac81f38e4f810af3b98866f52c35287c090c7cfb76a0
|
from eularian_magnification.base import eulerian_magnification, show_frequencies
import sys
#whats the frequency kenneth
# fix the output /usr/local/lib/python3.5/dist-packages/eularian_magnification
def main(filename):
show_frequencies(filename)
eulerian_magnification(filename, image_processing='gaussian',
pyramid_levels=3, freq_min=50.0 / 60.0,
freq_max=1.0, amplification=50)
eulerian_magnification(filename, image_processing='laplacian',
pyramid_levels=5, freq_min=0.45,
freq_max=1, amplification=50)
if __name__ == '__main__':
if len(sys.argv) < 2:
print ("Usage %s <videofile>" % sys.argv[0])
sys.exit(1)
main(sys.argv[1])
|
squeakus/motiontracker
|
eulerian.py
|
Python
|
bsd-2-clause
| 798
|
[
"Gaussian"
] |
da8e9306cadfb60a96ddd7465145ddfb6c532acee6c8a95717a72cae369451e4
|
# -*- encoding:ascii -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 6
_modified_time = 1417441813.18923
_template_filename='templates/webapps/galaxy/workflow/editor_tool_form.mako'
_template_uri='workflow/editor_tool_form.mako'
_template_cache=cache.Cache(__name__, _modified_time)
_source_encoding='ascii'
_exports = ['do_inputs', 'row_for_param']
def render_body(context,**pageargs):
context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
errors = context.get('errors', UNDEFINED)
h = context.get('h', UNDEFINED)
tool = context.get('tool', UNDEFINED)
def do_inputs(inputs,values,errors,prefix,ctx=None):
return render_do_inputs(context.locals_(__M_locals),inputs,values,errors,prefix,ctx)
values = context.get('values', UNDEFINED)
enumerate = context.get('enumerate', UNDEFINED)
__M_writer = context.writer()
# SOURCE LINE 1
from galaxy.tools.parameters import DataToolParameter, RuntimeValue
from galaxy.tools.parameters import DataCollectionToolParameter
from galaxy.util.expressions import ExpressionContext
__M_locals_builtin_stored = __M_locals_builtin()
__M_locals.update(__M_dict_builtin([(__M_key, __M_locals_builtin_stored[__M_key]) for __M_key in ['DataCollectionToolParameter','RuntimeValue','ExpressionContext','DataToolParameter'] if __M_key in __M_locals_builtin_stored]))
# SOURCE LINE 5
__M_writer(u'\n\n')
# SOURCE LINE 49
__M_writer(u'\n\n')
# SOURCE LINE 106
__M_writer(u'\n\n<form method="post" action="')
# SOURCE LINE 108
__M_writer(unicode(h.url_for(controller='workflow', action='editor_form_post' )))
__M_writer(u'">\n\n <div class="toolForm">\n <div class="toolFormTitle">Tool: ')
# SOURCE LINE 111
__M_writer(unicode(tool.name))
__M_writer(u'</div>\n')
# SOURCE LINE 112
if tool.version:
# SOURCE LINE 113
__M_writer(u' <div class="form-row"><div class=\'titleRow\'>Version: ')
__M_writer(unicode(tool.version))
__M_writer(u'</div></div>\n')
pass
# SOURCE LINE 115
__M_writer(u' <div class="toolFormBody">\n <input type="hidden" name="tool_id" value="')
# SOURCE LINE 116
__M_writer(unicode(tool.id))
__M_writer(u'" />\n')
# SOURCE LINE 117
for i, inputs in enumerate( tool.inputs_by_page ):
# SOURCE LINE 118
if tool.has_multiple_pages:
# SOURCE LINE 119
__M_writer(u" <div class='titleRow'>Page ")
__M_writer(unicode(i+1))
__M_writer(u'</div>\n')
pass
# SOURCE LINE 121
__M_writer(u' ')
__M_writer(unicode(do_inputs( inputs, values, errors, "" )))
__M_writer(u'\n')
pass
# SOURCE LINE 123
__M_writer(u' </div>\n </div>\n \n\n</form>\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_do_inputs(context,inputs,values,errors,prefix,ctx=None):
context.caller_stack._push_frame()
try:
def row_for_param(param,value,error_dict,prefix,ctx,allow_runtime=True):
return render_row_for_param(context,param,value,error_dict,prefix,ctx,allow_runtime)
def do_inputs(inputs,values,errors,prefix,ctx=None):
return render_do_inputs(context,inputs,values,errors,prefix,ctx)
len = context.get('len', UNDEFINED)
range = context.get('range', UNDEFINED)
dict = context.get('dict', UNDEFINED)
str = context.get('str', UNDEFINED)
enumerate = context.get('enumerate', UNDEFINED)
trans = context.get('trans', UNDEFINED)
ExpressionContext = context.get('ExpressionContext', UNDEFINED)
__M_writer = context.writer()
# SOURCE LINE 7
__M_writer(u'\n ')
# SOURCE LINE 8
ctx = ExpressionContext( values, ctx )
__M_writer(u'\n')
# SOURCE LINE 9
for input_index, input in enumerate( inputs.itervalues() ):
# SOURCE LINE 10
if input.type == "repeat":
# SOURCE LINE 11
__M_writer(u' <div class="repeat-group form-row">\n <label>')
# SOURCE LINE 12
__M_writer(unicode(input.title_plural))
__M_writer(u':</label>\n ')
# SOURCE LINE 13
repeat_values = values[input.name]
__M_writer(u'\n')
# SOURCE LINE 14
for i in range( len( repeat_values ) ):
# SOURCE LINE 15
__M_writer(u' ')
if input.name in errors:
rep_errors = errors[input.name][i]
else:
rep_errors = dict()
index = repeat_values[i]['__index__']
# SOURCE LINE 21
__M_writer(u'\n <div class="repeat-group-item">\n <div class="form-title-row"><label>')
# SOURCE LINE 23
__M_writer(unicode(input.title))
__M_writer(u' ')
__M_writer(unicode(i + 1))
__M_writer(u'</label></div>\n ')
# SOURCE LINE 24
__M_writer(unicode(do_inputs( input.inputs, repeat_values[ i ], rep_errors, prefix + input.name + "_" + str(index) + "|", ctx )))
__M_writer(u'\n <div class="form-row"><input type="submit" name="')
# SOURCE LINE 25
__M_writer(unicode(prefix))
__M_writer(unicode(input.name))
__M_writer(u'_')
__M_writer(unicode(index))
__M_writer(u'_remove" value="Remove ')
__M_writer(unicode(input.title))
__M_writer(u' ')
__M_writer(unicode(i+1))
__M_writer(u'"></div>\n </div>\n')
pass
# SOURCE LINE 28
__M_writer(u' <div class="form-row"><input type="submit" name="')
__M_writer(unicode(prefix))
__M_writer(unicode(input.name))
__M_writer(u'_add" value="Add new ')
__M_writer(unicode(input.title))
__M_writer(u'"></div>\n </div>\n')
# SOURCE LINE 30
elif input.type == "conditional":
# SOURCE LINE 31
if input.is_job_resource_conditional:
# SOURCE LINE 32
__M_writer(u' ')
continue
__M_writer(u'\n')
pass
# SOURCE LINE 34
__M_writer(u' ')
group_values = values[input.name]
__M_writer(u'\n ')
# SOURCE LINE 35
current_case = group_values['__current_case__']
__M_writer(u'\n ')
# SOURCE LINE 36
group_prefix = prefix + input.name + "|"
__M_writer(u'\n ')
# SOURCE LINE 37
group_errors = errors.get( input.name, {} )
__M_writer(u'\n ')
# SOURCE LINE 38
__M_writer(unicode(row_for_param( input.test_param, group_values[ input.test_param.name ], group_errors, group_prefix, ctx, allow_runtime=False )))
__M_writer(u'\n ')
# SOURCE LINE 39
__M_writer(unicode(do_inputs( input.cases[ current_case ].inputs, group_values, group_errors, group_prefix, ctx )))
__M_writer(u'\n')
# SOURCE LINE 40
else:
# SOURCE LINE 41
if input.name in values:
# SOURCE LINE 42
__M_writer(u' ')
__M_writer(unicode(row_for_param( input, values[ input.name ], errors, prefix, ctx )))
__M_writer(u'\n')
# SOURCE LINE 43
else:
# SOURCE LINE 44
__M_writer(u' ')
errors[ input.name ] = 'Value not stored, displaying default'
__M_writer(u'\n ')
# SOURCE LINE 45
__M_writer(unicode(row_for_param( input, input.get_initial_value( trans, values ), errors, prefix, ctx )))
__M_writer(u'\n')
pass
pass
pass
return ''
finally:
context.caller_stack._pop_frame()
def render_row_for_param(context,param,value,error_dict,prefix,ctx,allow_runtime=True):
context.caller_stack._push_frame()
try:
trans = context.get('trans', UNDEFINED)
DataToolParameter = context.get('DataToolParameter', UNDEFINED)
h = context.get('h', UNDEFINED)
RuntimeValue = context.get('RuntimeValue', UNDEFINED)
DataCollectionToolParameter = context.get('DataCollectionToolParameter', UNDEFINED)
isinstance = context.get('isinstance', UNDEFINED)
type = context.get('type', UNDEFINED)
__M_writer = context.writer()
# SOURCE LINE 51
__M_writer(u'\n')
# SOURCE LINE 52
if error_dict.has_key( param.name ):
# SOURCE LINE 53
__M_writer(u' ')
cls = "form-row form-row-error"
__M_writer(u'\n')
# SOURCE LINE 54
else:
# SOURCE LINE 55
__M_writer(u' ')
cls = "form-row"
__M_writer(u'\n')
pass
# SOURCE LINE 57
__M_writer(u' <div class="')
__M_writer(unicode(cls))
__M_writer(u'" id="row-')
__M_writer(unicode(prefix))
__M_writer(unicode(param.name))
__M_writer(u'">\n')
# SOURCE LINE 60
if type( param ) is DataToolParameter:
# SOURCE LINE 61
__M_writer(u' <label>\n ')
# SOURCE LINE 62
__M_writer(unicode(param.get_label()))
__M_writer(u"\n </label>\n <div>\n Data input '")
# SOURCE LINE 65
__M_writer(unicode(param.name))
__M_writer(u"' (")
__M_writer(unicode(" or ".join( param.extensions )))
__M_writer(u')\n </div>\n')
# SOURCE LINE 67
elif type( param ) is DataCollectionToolParameter:
# SOURCE LINE 68
__M_writer(u' <label>\n ')
# SOURCE LINE 69
__M_writer(unicode(param.get_label()))
__M_writer(u"\n </label>\n <div>\n Data collection input '")
# SOURCE LINE 72
__M_writer(unicode(param.name))
__M_writer(u"'\n </div>\n")
# SOURCE LINE 74
else:
# SOURCE LINE 75
if isinstance( value, RuntimeValue ):
# SOURCE LINE 76
__M_writer(u' <label>\n ')
# SOURCE LINE 77
__M_writer(unicode(param.get_label()))
__M_writer(u':\n <span class="popupmenu">\n <button type="submit" name="make_buildtime" value="')
# SOURCE LINE 79
__M_writer(unicode(prefix))
__M_writer(unicode(param.name))
__M_writer(u'">Set in advance</button>\n </span>\n </label>\n <div>\n <i>To be set at runtime</i>\n </div>\n')
# SOURCE LINE 85
else:
# SOURCE LINE 86
__M_writer(u' <label>\n ')
# SOURCE LINE 87
__M_writer(unicode(param.get_label()))
__M_writer(u':\n')
# SOURCE LINE 88
if allow_runtime:
# SOURCE LINE 89
__M_writer(u' <span class="popupmenu">\n <button type="submit" name="make_runtime" value="')
# SOURCE LINE 90
__M_writer(unicode(prefix))
__M_writer(unicode(param.name))
__M_writer(u'">Set at runtime</button>\n </span>\n')
pass
# SOURCE LINE 93
__M_writer(u' </label>\n <div>\n ')
# SOURCE LINE 95
__M_writer(unicode(param.get_html_field( trans, value, ctx ).get_html( prefix )))
__M_writer(u' \n </div>\n')
pass
# SOURCE LINE 98
if error_dict.has_key( param.name ):
# SOURCE LINE 99
__M_writer(u' <div style="color: red; font-weight: bold; padding-top: 1px; padding-bottom: 3px;">\n <div style="width: 300px;"><img style="vertical-align: middle;" src="')
# SOURCE LINE 100
__M_writer(unicode(h.url_for('/static/style/error_small.png')))
__M_writer(u'"> <span style="vertical-align: middle;">')
__M_writer(unicode(error_dict[param.name]))
__M_writer(u'</span></div>\n </div>\n')
pass
pass
# SOURCE LINE 104
__M_writer(u' <div style="clear: both"></div> \n </div>\n')
return ''
finally:
context.caller_stack._pop_frame()
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/database/compiled_templates/workflow/editor_tool_form.mako.py
|
Python
|
gpl-3.0
| 14,508
|
[
"Galaxy"
] |
d6f8128780d8aa1e2dfad34685fc1208fd343cea855586f9a28ec901d03438be
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Restricted open-shell Kohn-Sham for periodic systems with k-point sampling
'''
import numpy as np
from pyscf import lib
from pyscf.pbc.scf import krohf
from pyscf.pbc.dft import rks
from pyscf.pbc.dft import kuks
from pyscf.pbc.dft.kuks import energy_elec
@lib.with_doc(kuks.get_veff.__doc__)
def get_veff(ks, cell=None, dm=None, dm_last=0, vhf_last=0, hermi=1,
kpts=None, kpts_band=None):
if getattr(dm, 'mo_coeff', None) is not None:
mo_coeff = dm.mo_coeff
mo_occ_a = [(x > 0).astype(np.double) for x in dm.mo_occ]
mo_occ_b = [(x ==2).astype(np.double) for x in dm.mo_occ]
dm = lib.tag_array(dm, mo_coeff=(mo_coeff,mo_coeff),
mo_occ=(mo_occ_a,mo_occ_b))
return kuks.get_veff(ks, cell, dm, dm_last, vhf_last, hermi, kpts, kpts_band)
class KROKS(rks.KohnShamDFT, krohf.KROHF):
'''RKS class adapted for PBCs with k-point sampling.
'''
def __init__(self, cell, kpts=np.zeros((1,3)), xc='LDA,VWN'):
krohf.KROHF.__init__(self, cell, kpts)
rks.KohnShamDFT.__init__(self, xc)
def dump_flags(self, verbose=None):
krohf.KROHF.dump_flags(self, verbose)
rks.KohnShamDFT.dump_flags(self, verbose)
return self
get_veff = get_veff
energy_elec = energy_elec
get_rho = kuks.get_rho
density_fit = rks._patch_df_beckegrids(krohf.KROHF.density_fit)
mix_density_fit = rks._patch_df_beckegrids(krohf.KROHF.mix_density_fit)
if __name__ == '__main__':
from pyscf.pbc import gto
cell = gto.Cell()
cell.unit = 'A'
cell.atom = 'C 0., 0., 0.; C 0.8917, 0.8917, 0.8917'
cell.a = '''0. 1.7834 1.7834
1.7834 0. 1.7834
1.7834 1.7834 0. '''
cell.basis = 'gth-szv'
cell.pseudo = 'gth-pade'
cell.verbose = 7
cell.output = '/dev/null'
cell.build()
mf = KROKS(cell, cell.make_kpts([2,1,1]))
print(mf.kernel())
|
gkc1000/pyscf
|
pyscf/pbc/dft/kroks.py
|
Python
|
apache-2.0
| 2,630
|
[
"PySCF"
] |
47da4dbc6b3ef2ff5a66b3be3deb893a012cd12bc1d010ea63f570ca0e289557
|
from __future__ import print_function
from os.path import exists, join, dirname
try:
from simtk.openmm.app import *
from simtk.openmm import *
from simtk.unit import *
except ImportError as err:
print("Failed to import OpenMM packages:", err.message)
print("Make sure OpenMM is installed and the library path is set correctly.")
exit()
#*****************************************************************************#
# Customize these lines to change the OpenMM simulation setup #
# See the documentation at https://simtk.org/api_docs/openmm/api5_2/python/ #
# for details on the available options #
#*****************************************************************************#
input_pdb = join(dirname(__file__), 'input.pdb')
pdb = PDBFile(input_pdb)
forcefield = ForceField('amber99sb.xml', 'tip3p.xml')
system = forcefield.createSystem(pdb.topology, nonbondedMethod=PME,
nonbondedCutoff=1*nanometer, constraints=HBonds)
integrator = LangevinIntegrator(300*kelvin, 1.0/picosecond, 2.0*femtosecond)
# Now that the system is setup, write out all of the files to disk for tungsten
with open('system.xml', 'w') as f:
f.write(XmlSerializer.serialize(system))
print('saved system.xml')
with open('integrator.xml', 'w') as f:
f.write(XmlSerializer.serialize(integrator))
print('saved integrator.xml')
context = Context(system, integrator)
context.setPositions(pdb.positions)
context.setVelocitiesToTemperature(300*kelvin)
state = context.getState(getPositions=True, getVelocities=True)
with open('state.xml', 'w') as f:
f.write(XmlSerializer.serialize(state))
print('saved state.xml')
with open('AtomIndices.dat', 'w') as f:
for atom in pdb.topology.atoms():
if atom.name == 'CA':
f.write('%d\n' % atom.index)
print('saved AtomIndices.dat')
print("All done.")
|
rmcgibbo/tungsten
|
examples/buildTungstenXML.py
|
Python
|
lgpl-2.1
| 1,929
|
[
"OpenMM"
] |
a611b3f8d872c141e6ea8a91afdf5fbf15977674c0f39eee687526007d799e8b
|
from utils import any
import warnings
import traceback
from typehandlers.base import Parameter, ReturnValue, \
join_ctype_and_name, CodeGenerationError, \
param_type_matcher, return_type_matcher, CodegenErrorBase, \
DeclarationsScope, CodeBlock
from typehandlers.codesink import NullCodeSink, MemoryCodeSink
from cppattribute import CppInstanceAttributeGetter, CppInstanceAttributeSetter, \
CppStaticAttributeGetter, CppStaticAttributeSetter, \
PyGetSetDef, PyMetaclass
from pytypeobject import PyTypeObject, PyNumberMethods, PySequenceMethods
import settings
import utils
from cppclass_container import CppClassContainerTraits
try:
set
except NameError:
from sets import Set as set
def _type_no_ref(value_type):
if value_type.type_traits.type_is_reference:
return str(value_type.type_traits.target)
else:
return str(value_type.type_traits.ctype_no_modifiers)
def get_python_to_c_converter(value, root_module, code_sink):
if isinstance(value, CppClass):
val_converter = root_module.generate_python_to_c_type_converter(value.ThisClassReturn(value.full_name), code_sink)
val_name = value.full_name
elif isinstance(value, ReturnValue):
val_converter = root_module.generate_python_to_c_type_converter(value, code_sink)
val_name = _type_no_ref(value)
elif isinstance(value, Parameter):
val_return_type = ReturnValue.new(value.ctype)
val_converter = root_module.generate_python_to_c_type_converter(val_return_type, code_sink)
val_name = _type_no_ref(value)
else:
raise ValueError, "Don't know how to convert %r" % (value,)
return val_converter, val_name
def get_c_to_python_converter(value, root_module, code_sink):
if isinstance(value, CppClass):
val_converter = root_module.generate_c_to_python_type_converter(value.ThisClassReturn(value.full_name), code_sink)
val_name = value.full_name
elif isinstance(value, ReturnValue):
val_converter = root_module.generate_c_to_python_type_converter(value, code_sink)
val_name = _type_no_ref(value)
elif isinstance(value, Parameter):
val_return_type = ReturnValue.new(value.ctype)
val_converter = root_module.generate_c_to_python_type_converter(val_return_type, code_sink)
val_name = _type_no_ref(value)
else:
raise ValueError, "Don't know how to convert %s" % str(value)
return val_converter, val_name
class MemoryPolicy(object):
"""memory management policy for a C++ class or C/C++ struct"""
def __init__(self):
if type(self) is MemoryPolicy:
raise NotImplementedError("class is abstract")
def get_free_code(self, object_expression):
"""
Return a code statement to free an underlying C/C++ object.
"""
raise NotImplementedError
class ReferenceCountingPolicy(MemoryPolicy):
def write_incref(self, code_block, obj_expr):
"""
Write code to increase the reference code of an object of this
class (the real C++ class, not the wrapper). Should only be
called if the class supports reference counting, as reported
by the attribute `CppClass.has_reference_counting`.
"""
raise NotImplementedError
def write_decref(self, code_block, obj_expr):
"""
Write code to decrease the reference code of an object of this
class (the real C++ class, not the wrapper). Should only be
called if the class supports reference counting, as reported
by the attribute `CppClass.has_reference_counting`.
"""
raise NotImplementedError
class ReferenceCountingMethodsPolicy(ReferenceCountingPolicy):
def __init__(self, incref_method, decref_method, peekref_method=None):
super(ReferenceCountingMethodsPolicy, self).__init__()
self.incref_method = incref_method
self.decref_method = decref_method
self.peekref_method = peekref_method
def write_incref(self, code_block, obj_expr):
code_block.write_code('%s->%s();' % (obj_expr, self.incref_method))
def write_decref(self, code_block, obj_expr):
code_block.write_code('%s->%s();' % (obj_expr, self.decref_method))
def get_free_code(self, obj_expr):
return ('%s->%s();' % (obj_expr, self.decref_method))
def __repr__(self):
return 'cppclass.ReferenceCountingMethodsPolicy(incref_method=%r, decref_method=%r, peekref_method=%r)' \
% (self.incref_method, self.decref_method, self.peekref_method)
class ReferenceCountingFunctionsPolicy(ReferenceCountingPolicy):
def __init__(self, incref_function, decref_function, peekref_function=None):
super(ReferenceCountingFunctionsPolicy, self).__init__()
self.incref_function = incref_function
self.decref_function = decref_function
self.peekref_function = peekref_function
def write_incref(self, code_block, obj_expr):
code_block.write_code('%s(%s);' % (self.incref_function, obj_expr))
def write_decref(self, code_block, obj_expr):
code_block.write_code('%s(%s);' % (self.decref_function, obj_expr))
def get_free_code(self, obj_expr):
return ('%s(%s);' % (self.decref_function, obj_expr))
def __repr__(self):
return 'cppclass.ReferenceCountingFunctionsPolicy(incref_function=%r, decref_function=%r, peekref_function=%r)' \
% (self.incref_function, self.decref_function, self.peekref_function)
class FreeFunctionPolicy(MemoryPolicy):
def __init__(self, free_function):
super(FreeFunctionPolicy, self).__init__()
self.free_function = free_function
def get_free_code(self, obj_expr):
return ('%s(%s);' % (self.free_function, obj_expr))
def __repr__(self):
return 'cppclass.FreeFunctionPolicy(%r)' % self.free_function
def default_instance_creation_function(cpp_class, code_block, lvalue,
parameters, construct_type_name):
"""
Default "instance creation function"; it is called whenever a new
C++ class instance needs to be created; this default
implementation uses a standard C++ new allocator.
:param cpp_class: the CppClass object whose instance is to be created
:param code_block: CodeBlock object on which the instance creation code should be generated
:param lvalue: lvalue expression that should hold the result in the end
:param parameters: stringified list of parameters
:param construct_type_name: actual name of type to be constructed (it is
not always the class name, sometimes it's
the python helper class)
"""
assert lvalue
assert not lvalue.startswith('None')
if cpp_class.incomplete_type:
raise CodeGenerationError("%s cannot be constructed (incomplete type)"
% cpp_class.full_name)
code_block.write_code(
"%s = new %s(%s);" % (lvalue, construct_type_name, parameters))
class CppHelperClass(object):
"""
Generates code for a C++ proxy subclass that takes care of
forwarding virtual methods from C++ to Python.
"""
def __init__(self, class_):
"""
:param class_: original CppClass wrapper object
"""
self.class_ = class_
self.name = class_.pystruct + "__PythonHelper"
self.virtual_parent_callers = {}
self.virtual_proxies = []
self.cannot_be_constructed = False
self.custom_methods = []
self.post_generation_code = []
self.virtual_methods = []
def add_virtual_method(self, method):
assert method.is_virtual
assert method.class_ is not None
for existing in self.virtual_methods:
if method.matches_signature(existing):
return # don't re-add already existing method
if isinstance(method, CppDummyMethod):
if method.is_pure_virtual:
self.cannot_be_constructed = True
else:
self.virtual_methods.append(method)
if not method.is_pure_virtual:
if settings._get_deprecated_virtuals():
vis = ['public', 'protected']
else:
vis = ['protected']
if method.visibility in vis:
parent_caller = CppVirtualMethodParentCaller(method)
#parent_caller.class_ = method.class_
parent_caller.helper_class = self
parent_caller.main_wrapper = method # XXX: need to explain this
self.add_virtual_parent_caller(parent_caller)
proxy = CppVirtualMethodProxy(method)
proxy.main_wrapper = method # XXX: need to explain this
self.add_virtual_proxy(proxy)
def add_virtual_parent_caller(self, parent_caller):
"""Add a new CppVirtualMethodParentCaller object to this helper class"""
assert isinstance(parent_caller, CppVirtualMethodParentCaller)
name = parent_caller.method_name
try:
overload = self.virtual_parent_callers[name]
except KeyError:
overload = CppOverloadedMethod(name)
## implicit conversions + virtual methods disabled
## temporarily until I can figure out how to fix the unit
## tests.
overload.enable_implicit_conversions = False
#overload.static_decl = False
overload.pystruct = self.class_.pystruct
self.virtual_parent_callers[name] = overload
assert self.class_ is not None
for existing in overload.wrappers:
if parent_caller.matches_signature(existing):
break # don't re-add already existing method
else:
overload.add(parent_caller)
def add_custom_method(self, declaration, body=None):
"""
Add a custom method to the helper class, given by a
declaration line and a body. The body can be None, in case
the whole method definition is included in the declaration
itself.
"""
self.custom_methods.append((declaration, body))
def add_post_generation_code(self, code):
"""
Add custom code to be included right after the helper class is generated.
"""
self.post_generation_code.append(code)
def add_virtual_proxy(self, virtual_proxy):
"""Add a new CppVirtualMethodProxy object to this class"""
assert isinstance(virtual_proxy, CppVirtualMethodProxy)
self.virtual_proxies.append(virtual_proxy)
def generate_forward_declarations(self, code_sink_param):
"""
Generate the proxy class (declaration only) to a given code sink
"""
code_sink = MemoryCodeSink()
if self._generate_forward_declarations(code_sink):
code_sink.flush_to(code_sink_param)
else:
self.cannot_be_constructed = True
def _generate_forward_declarations(self, code_sink):
"""
Generate the proxy class (declaration only) to a given code sink.
Returns True if all is well, False if a pure virtual method
was found that could not be generated.
"""
code_sink.writeln("class %s : public %s\n{\npublic:" %
(self.name, self.class_.full_name))
code_sink.indent()
code_sink.writeln("PyObject *m_pyself;")
if not self.class_.import_from_module:
## replicate the parent constructors in the helper class
implemented_constructor_signatures = []
for cons in self.class_.constructors:
## filter out duplicated constructors
signature = [param.ctype for param in cons.parameters]
if signature in implemented_constructor_signatures:
continue
implemented_constructor_signatures.append(signature)
params = [join_ctype_and_name(param.ctype, param.name)
for param in cons.parameters]
code_sink.writeln("%s(%s)" % (self.name, ', '.join(params)))
code_sink.indent()
code_sink.writeln(": %s(%s), m_pyself(NULL)\n{}" %
(self.class_.full_name,
', '.join([param.name for param in cons.parameters])))
code_sink.unindent()
code_sink.writeln()
## add the set_pyobj method
code_sink.writeln("""
void set_pyobj(PyObject *pyobj)
{
Py_XDECREF(m_pyself);
Py_INCREF(pyobj);
m_pyself = pyobj;
}
""")
## write a destructor
code_sink.writeln("virtual ~%s()\n{" % self.name)
code_sink.indent()
code_sink.writeln("Py_CLEAR(m_pyself);")
code_sink.unindent()
code_sink.writeln("}\n")
if not self.class_.import_from_module:
## write the parent callers (_name)
for parent_caller in self.virtual_parent_callers.itervalues():
#parent_caller.class_ = self.class_
parent_caller.helper_class = self
parent_caller.reset_code_generation_state()
## test code generation
try:
try:
utils.call_with_error_handling(parent_caller.generate,
(NullCodeSink(),), {}, parent_caller)
except utils.SkipWrapper:
continue
finally:
parent_caller.reset_code_generation_state()
code_sink.writeln()
parent_caller.generate_class_declaration(code_sink)
for parent_caller_wrapper in parent_caller.wrappers:
parent_caller_wrapper.generate_parent_caller_method(code_sink)
## write the virtual proxies
for virtual_proxy in self.virtual_proxies:
#virtual_proxy.class_ = self.class_
virtual_proxy.helper_class = self
## test code generation
#virtual_proxy.class_ = self.class_
#virtual_proxy.helper_class = self
virtual_proxy.reset_code_generation_state()
try:
try:
utils.call_with_error_handling(virtual_proxy.generate,
(NullCodeSink(),), {}, virtual_proxy)
except utils.SkipWrapper:
if virtual_proxy.method.is_pure_virtual:
return False
continue
finally:
virtual_proxy.reset_code_generation_state()
code_sink.writeln()
virtual_proxy.generate_declaration(code_sink)
for custom_declaration, dummy in self.custom_methods:
code_sink.writeln(custom_declaration)
code_sink.unindent()
code_sink.writeln("};\n")
if not self.class_.import_from_module:
for code in self.post_generation_code:
code_sink.writeln(code)
code_sink.writeln()
return True
def generate(self, code_sink):
"""
Generate the proxy class (virtual method bodies only) to a given code sink.
returns pymethodef list of parent callers
"""
if self.class_.import_from_module:
return
## write the parent callers (_name)
method_defs = []
for name, parent_caller in self.virtual_parent_callers.iteritems():
#parent_caller.class_ = self.class_
parent_caller.helper_class = self
code_sink.writeln()
## parent_caller.generate(code_sink)
try:
utils.call_with_error_handling(parent_caller.generate,
(code_sink,), {}, parent_caller)
except utils.SkipWrapper:
continue
if settings._get_deprecated_virtuals():
parent_caller_name = '_'+name
else:
parent_caller_name = name
method_defs.append(parent_caller.get_py_method_def(parent_caller_name))
## write the virtual proxies
for virtual_proxy in self.virtual_proxies:
#virtual_proxy.class_ = self.class_
virtual_proxy.helper_class = self
code_sink.writeln()
## virtual_proxy.generate(code_sink)
try:
utils.call_with_error_handling(virtual_proxy.generate,
(code_sink,), {}, virtual_proxy)
except utils.SkipWrapper:
assert not virtual_proxy.method.is_pure_virtual
continue
for dummy, custom_body in self.custom_methods:
if custom_body:
code_sink.writeln(custom_body)
return method_defs
class CppClass(object):
"""
A CppClass object takes care of generating the code for wrapping a C++ class
"""
def __init__(self, name, parent=None, incref_method=None, decref_method=None,
automatic_type_narrowing=None, allow_subclassing=None,
is_singleton=False, outer_class=None,
peekref_method=None,
template_parameters=(), custom_template_class_name=None,
incomplete_type=False, free_function=None,
incref_function=None, decref_function=None,
python_name=None, memory_policy=None,
foreign_cpp_namespace=None,
docstring=None,
custom_name=None,
import_from_module=None,
destructor_visibility='public'
):
"""
:param name: class name
:param parent: optional parent class wrapper, or list of
parents. Valid values are None, a CppClass
instance, or a list of CppClass instances.
:param incref_method: (deprecated in favour of memory_policy) if the class supports reference counting, the
name of the method that increments the
reference count (may be inherited from parent
if not given)
:param decref_method: (deprecated in favour of memory_policy) if the class supports reference counting, the
name of the method that decrements the
reference count (may be inherited from parent
if not given)
:param automatic_type_narrowing: if True, automatic return type
narrowing will be done on objects
of this class and its descendants
when returned by pointer from a
function or method.
:param allow_subclassing: if True, generated class wrappers will
allow subclassing in Python.
:param is_singleton: if True, the class is considered a singleton,
and so the python wrapper will never call the
C++ class destructor to free the value.
:param peekref_method: (deprecated in favour of memory_policy) if the class supports reference counting, the
name of the method that returns the current reference count.
:param free_function: (deprecated in favour of memory_policy) name of C function used to deallocate class instances
:param incref_function: (deprecated in favour of memory_policy) same as incref_method, but as a function instead of method
:param decref_function: (deprecated in favour of memory_policy) same as decref_method, but as a function instead of method
:param python_name: name of the class as it will appear from
Python side. This parameter is DEPRECATED in favour of
custom_name.
:param memory_policy: memory management policy; if None, it
inherits from the parent class. Only root classes can have a
memory policy defined.
:type memory_policy: L{MemoryPolicy}
:param foreign_cpp_namespace: if set, the class is assumed to
belong to the given C++ namespace, regardless of the C++
namespace of the python module it will be added to. For
instance, this can be useful to wrap std classes, like
std::ofstream, without having to create an extra python
submodule.
:param docstring: None or a string containing the docstring
that will be generated for the class
:param custom_name: an alternative name to give to this class
at python-side; if omitted, the name of the class in the
python module will be the same name as the class in C++
(minus namespace).
:param import_from_module: if not None, the type is imported
from a foreign Python module with the given name.
"""
assert outer_class is None or isinstance(outer_class, CppClass)
self.incomplete_type = incomplete_type
self.outer_class = outer_class
self._module = None
self.name = name
self.docstring = docstring
self.mangled_name = None
self.mangled_full_name = None
self.template_parameters = template_parameters
self.container_traits = None
self.import_from_module = import_from_module
assert destructor_visibility in ['public', 'private', 'protected']
self.destructor_visibility = destructor_visibility
self.custom_name = custom_name
if custom_template_class_name:
warnings.warn("Use the custom_name parameter.",
DeprecationWarning, stacklevel=2)
self.custom_name = custom_template_class_name
if python_name:
warnings.warn("Use the custom_name parameter.",
DeprecationWarning, stacklevel=2)
self.custom_name = python_name
self.is_singleton = is_singleton
self.foreign_cpp_namespace = foreign_cpp_namespace
self.full_name = None # full name with C++ namespaces attached and template parameters
self.methods = {} # name => OverloadedMethod
self._dummy_methods = [] # methods that have parameter/retval binding problems
self.nonpublic_methods = []
self.constructors = [] # (name, wrapper) pairs
self.pytype = PyTypeObject()
self.slots = self.pytype.slots
self.helper_class = None
self.instance_creation_function = None
self.post_instance_creation_function = None
## set to True when we become aware generating the helper
## class is not going to be possible
self.helper_class_disabled = False
self.cannot_be_constructed = '' # reason
self.has_trivial_constructor = False
self.has_copy_constructor = False
self.has_output_stream_operator = False
self._have_pure_virtual_methods = None
self._wrapper_registry = None
self.binary_comparison_operators = set()
self.binary_numeric_operators = dict()
self.inplace_numeric_operators = dict()
self.unary_numeric_operators = dict()
self.valid_sequence_methods = ("__len__", "__getitem__", "__setitem__")
## list of CppClasses from which a value of this class can be
## implicitly generated; corresponds to a
## operator ThisClass(); in the other class.
self.implicitly_converts_from = []
## list of hook functions to call just prior to helper class
## code generation.
self.helper_class_hooks = []
self._pystruct = None #"***GIVE ME A NAME***"
self.metaclass_name = "***GIVE ME A NAME***"
self.pytypestruct = "***GIVE ME A NAME***"
self.instance_attributes = PyGetSetDef("%s__getsets" % self._pystruct)
self.static_attributes = PyGetSetDef("%s__getsets" % self.metaclass_name)
if isinstance(parent, list):
self.bases = list(parent)
self.parent = self.bases[0]
elif isinstance(parent, CppClass):
self.parent = parent
self.bases = [parent]
elif parent is None:
self.parent = None
self.bases = []
else:
raise TypeError("'parent' must be None, CppClass instance, or a list of CppClass instances")
if free_function:
warnings.warn("Use FreeFunctionPolicy and memory_policy parameter.", DeprecationWarning)
assert memory_policy is None
memory_policy = FreeFunctionPolicy(free_function)
elif incref_method:
warnings.warn("Use ReferenceCountingMethodsPolicy and memory_policy parameter.", DeprecationWarning)
assert memory_policy is None
memory_policy = ReferenceCountingMethodsPolicy(incref_method, decref_method, peekref_method)
elif incref_function:
warnings.warn("Use ReferenceCountingFunctionsPolicy and memory_policy parameter.", DeprecationWarning)
assert memory_policy is None
memory_policy = ReferenceCountingFunctionsPolicy(incref_function, decref_function)
if not self.bases:
assert memory_policy is None or isinstance(memory_policy, MemoryPolicy)
self.memory_policy = memory_policy
else:
for base in self.bases:
if base.memory_policy is not None:
self.memory_policy = base.memory_policy
assert memory_policy is None, \
"changing memory policy from parent (%s) to child (%s) class not permitted" \
% (base.name, self.name)
break
else:
self.memory_policy = memory_policy
if automatic_type_narrowing is None:
if not self.bases:
self.automatic_type_narrowing = settings.automatic_type_narrowing
else:
self.automatic_type_narrowing = self.parent.automatic_type_narrowing
else:
self.automatic_type_narrowing = automatic_type_narrowing
if allow_subclassing is None:
if self.parent is None:
self.allow_subclassing = settings.allow_subclassing
else:
self.allow_subclassing = self.parent.allow_subclassing
else:
if any([p.allow_subclassing for p in self.bases]) and not allow_subclassing:
raise ValueError("Cannot disable subclassing if a parent class allows it")
else:
self.allow_subclassing = allow_subclassing
if self.destructor_visibility not in ['public', 'protected']:
self.allow_subclassing = False
self.typeid_map_name = None
if name != 'dummy':
## register type handlers
class ThisClassParameter(CppClassParameter):
"""Register this C++ class as pass-by-value parameter"""
CTYPES = []
cpp_class = self
self.ThisClassParameter = ThisClassParameter
try:
param_type_matcher.register(name, self.ThisClassParameter)
except ValueError:
pass
class ThisClassRefParameter(CppClassRefParameter):
"""Register this C++ class as pass-by-reference parameter"""
CTYPES = []
cpp_class = self
self.ThisClassRefParameter = ThisClassRefParameter
try:
param_type_matcher.register(name+'&', self.ThisClassRefParameter)
except ValueError:
pass
class ThisClassReturn(CppClassReturnValue):
"""Register this C++ class as value return"""
CTYPES = []
cpp_class = self
self.ThisClassReturn = ThisClassReturn
self.ThisClassRefReturn = ThisClassReturn
try:
return_type_matcher.register(name, self.ThisClassReturn)
return_type_matcher.register(name, self.ThisClassRefReturn)
except ValueError:
pass
class ThisClassPtrParameter(CppClassPtrParameter):
"""Register this C++ class as pass-by-pointer parameter"""
CTYPES = []
cpp_class = self
self.ThisClassPtrParameter = ThisClassPtrParameter
try:
param_type_matcher.register(name+'*', self.ThisClassPtrParameter)
except ValueError:
pass
class ThisClassPtrReturn(CppClassPtrReturnValue):
"""Register this C++ class as pointer return"""
CTYPES = []
cpp_class = self
self.ThisClassPtrReturn = ThisClassPtrReturn
try:
return_type_matcher.register(name+'*', self.ThisClassPtrReturn)
except ValueError:
pass
class ThisClassRefReturn(CppClassRefReturnValue):
"""Register this C++ class as reference return"""
CTYPES = []
cpp_class = self
self.ThisClassRefReturn = ThisClassRefReturn
try:
return_type_matcher.register(name+'&', self.ThisClassRefReturn)
except ValueError:
pass
def __repr__(self):
return "<pybindgen.CppClass %r>" % self.full_name
def add_container_traits(self, *args, **kwargs):
assert self.container_traits is None
self.container_traits = CppClassContainerTraits(self, *args, **kwargs)
def add_binary_comparison_operator(self, operator):
"""
Add support for a C++ binary comparison operator, such as == or <.
The binary operator is assumed to operate with both operands
of the type of the class, either by reference or by value.
:param operator: string indicating the name of the operator to
support, e.g. '=='
"""
operator = utils.ascii(operator)
if not isinstance(operator, str):
raise TypeError("expected operator name as string")
if operator not in ['==', '!=', '<', '<=', '>', '>=']:
raise ValueError("The operator %r is invalid or not yet supported by PyBindGen" % (operator,))
self.binary_comparison_operators.add(operator)
def add_binary_numeric_operator(self, operator, result_cppclass=None,
left_cppclass=None, right=None):
"""
Add support for a C++ binary numeric operator, such as +, -, \\*, or /.
:param operator: string indicating the name of the operator to
support, e.g. '=='
:param result_cppclass: the CppClass object of the result type, assumed to be this class if omitted
:param left_cppclass: the CppClass object of the left operand type, assumed to be this class if omitted
:param right: the type of the right parameter. Can be a
CppClass, Parameter, or param spec. Assumed to be this class
if omitted
"""
operator = utils.ascii(operator)
if not isinstance(operator, str):
raise TypeError("expected operator name as string")
if operator not in ['+', '-', '*', '/']:
raise ValueError("The operator %r is invalid or not yet supported by PyBindGen" % (operator,))
try:
l = self.binary_numeric_operators[operator]
except KeyError:
l = []
self.binary_numeric_operators[operator] = l
if result_cppclass is None:
result_cppclass = self
if left_cppclass is None:
left_cppclass = self
if right is None:
right = self
elif isinstance(right, CppClass):
pass
else:
if isinstance(right, str):
right = utils.param(right, 'right')
try:
right = utils.eval_param(right, None)
except utils.SkipWrapper:
return
op = (result_cppclass, left_cppclass, right)
if op not in l:
l.append(op)
def add_inplace_numeric_operator(self, operator, right=None):
"""
Add support for a C++ inplace numeric operator, such as +=, -=, \\*=, or /=.
:param operator: string indicating the name of the operator to
support, e.g. '+='
:param right: the type of the right parameter. Can be a
CppClass, Parameter, or param spec. Assumed to be this class
if omitted
"""
operator = utils.ascii(operator)
if not isinstance(operator, str):
raise TypeError("expected operator name as string")
if operator not in ['+=', '-=', '*=', '/=']:
raise ValueError("The operator %r is invalid or not yet supported by PyBindGen" % (operator,))
try:
l = self.inplace_numeric_operators[operator]
except KeyError:
l = []
self.inplace_numeric_operators[operator] = l
if right is None:
right = self
else:
if isinstance(right, str):
right = utils.param(right, 'right')
try:
right = utils.eval_param(right, None)
except utils.SkipWrapper:
return
if right not in l:
l.append((self, self, right))
def add_unary_numeric_operator(self, operator, result_cppclass=None, left_cppclass=None):
"""
Add support for a C++ unary numeric operators, currently only -.
:param operator: string indicating the name of the operator to
support, e.g. '-'
:param result_cppclass: the CppClass object of the result type, assumed to be this class if omitted
:param left_cppclass: the CppClass object of the left operand type, assumed to be this class if omitted
"""
operator = utils.ascii(operator)
if not isinstance(operator, str):
raise TypeError("expected operator name as string")
if operator not in ['-']:
raise ValueError("The operator %r is invalid or not yet supported by PyBindGen" % (operator,))
try:
l = self.unary_numeric_operators[operator]
except KeyError:
l = []
self.unary_numeric_operators[operator] = l
if result_cppclass is None:
result_cppclass = self
if left_cppclass is None:
left_cppclass = self
op = (result_cppclass, left_cppclass)
if op not in l:
l.append(op)
def add_class(self, *args, **kwargs):
"""
Add a nested class. See L{CppClass} for information about accepted parameters.
"""
assert 'outer_class' not in kwargs
kwargs['outer_class'] = self
return self.module.add_class(*args, **kwargs)
def add_enum(self, *args, **kwargs):
"""
Add a nested enum. See L{Enum} for information about accepted parameters.
"""
assert 'outer_class' not in kwargs
kwargs['outer_class'] = self
return self.module.add_enum(*args, **kwargs)
def get_mro(self):
"""
Get the method resolution order (MRO) of this class.
:return: an iterator that gives CppClass objects, from leaf to root class
"""
to_visit = [self]
visited = set()
while to_visit:
cls = to_visit.pop(0)
visited.add(cls)
yield cls
for base in cls.bases:
if base not in visited:
to_visit.append(base)
def get_all_methods(self):
"""Returns an iterator to iterate over all methods of the class"""
for overload in self.methods.itervalues():
for method in overload.wrappers:
yield method
for method in self.nonpublic_methods:
yield method
def get_have_pure_virtual_methods(self):
"""
Returns True if the class has pure virtual methods with no
implementation (which would mean the type is not instantiable
directly, only through a helper class).
"""
if self._have_pure_virtual_methods is not None:
return self._have_pure_virtual_methods
mro = list(self.get_mro())
mro_reversed = list(mro)
mro_reversed.reverse()
self._have_pure_virtual_methods = False
for pos, cls in enumerate(mro_reversed):
for method in list(cls.get_all_methods()) + cls._dummy_methods:
if not isinstance(method, CppMethod):
continue
if method.is_pure_virtual:
## found a pure virtual method; now go see in the
## child classes, check if any of them implements
## this pure virtual method.
implemented = False
for child_cls in mro_reversed[pos+1:]:
for child_method in list(child_cls.get_all_methods()) + child_cls._dummy_methods:
if not isinstance(child_method, CppMethod):
continue
if not child_method.is_virtual:
continue
if not child_method.matches_signature(method):
continue
if not child_method.is_pure_virtual:
implemented = True
break
if implemented:
break
if not implemented:
self._have_pure_virtual_methods = True
return self._have_pure_virtual_methods
have_pure_virtual_methods = property(get_have_pure_virtual_methods)
def is_subclass(self, other):
"""Return True if this CppClass instance represents a class that is a
subclass of another class represented by the CppClasss object \\`other\\'."""
if not isinstance(other, CppClass):
raise TypeError
return other in self.get_mro()
def add_helper_class_hook(self, hook):
"""
Add a hook function to be called just prior to a helper class
being generated. The hook function applies to this class and
all subclasses. The hook function is called like this::
hook_function(helper_class)
"""
if not callable(hook):
raise TypeError("hook function must be callable")
self.helper_class_hooks.append(hook)
def _get_all_helper_class_hooks(self):
"""
Returns a list of all helper class hook functions, including
the ones registered with parent classes. Parent hooks will
appear first in the list.
"""
l = []
for cls in self.get_mro():
l = cls.helper_class_hooks + l
return l
def set_instance_creation_function(self, instance_creation_function):
"""Set a custom function to be called to create instances of this
class and its subclasses.
:param instance_creation_function: instance creation function; see
default_instance_creation_function()
for signature and example.
"""
self.instance_creation_function = instance_creation_function
def set_post_instance_creation_function(self, post_instance_creation_function):
"""Set a custom function to be called to add code after an
instance is created (usually by the "instance creation
function") and registered with the Python runtime.
:param post_instance_creation_function: post instance creation function
"""
self.post_instance_creation_function = post_instance_creation_function
def get_instance_creation_function(self):
for cls in self.get_mro():
if cls.instance_creation_function is not None:
return cls.instance_creation_function
return default_instance_creation_function
def get_post_instance_creation_function(self):
for cls in self.get_mro():
if cls.post_instance_creation_function is not None:
return cls.post_instance_creation_function
return None
def write_create_instance(self, code_block, lvalue, parameters, construct_type_name=None):
instance_creation_func = self.get_instance_creation_function()
if construct_type_name is None:
construct_type_name = self.get_construct_name()
instance_creation_func(self, code_block, lvalue, parameters, construct_type_name)
def write_post_instance_creation_code(self, code_block, lvalue, parameters, construct_type_name=None):
post_instance_creation_func = self.get_post_instance_creation_function()
if post_instance_creation_func is None:
return
if construct_type_name is None:
construct_type_name = self.get_construct_name()
post_instance_creation_func(self, code_block, lvalue, parameters, construct_type_name)
def get_pystruct(self):
if self._pystruct is None:
raise ValueError
return self._pystruct
pystruct = property(get_pystruct)
def get_construct_name(self):
"""Get a name usable for new %s construction, or raise
CodeGenerationError if none found"""
if self.cannot_be_constructed:
raise CodeGenerationError("%s cannot be constructed (%s)" % (self.full_name, self.cannot_be_constructed))
if self.have_pure_virtual_methods:
raise CodeGenerationError("%s cannot be constructed (class has pure virtual methods)" % self.full_name)
else:
return self.full_name
def implicitly_converts_to(self, other):
"""
Declares that values of this class can be implicitly converted
to another class; corresponds to a operator AnotherClass();
special method.
"""
assert isinstance(other, CppClass)
other.implicitly_converts_from.append(self)
def get_all_implicit_conversions(self):
"""
Gets a new list of all other classes whose value can be implicitly
converted to a value of this class.
>>> Foo = CppClass("Foo")
>>> Bar = CppClass("Bar")
>>> Zbr = CppClass("Zbr")
>>> Bar.implicitly_converts_to(Foo)
>>> Zbr.implicitly_converts_to(Bar)
>>> l = Foo.get_all_implicit_conversions()
>>> l.sort(lambda cls1, cls2: cmp(cls1.name, cls2.name))
>>> [cls.name for cls in l]
['Bar']
"""
return list(self.implicitly_converts_from)
# classes = []
# to_visit = list(self.implicitly_converts_from)
# while to_visit:
# source = to_visit.pop(0)
# if source in classes or source is self:
# continue
# classes.append(source)
# to_visit.extend(source.implicitly_converts_from)
# return classes
def _update_names(self):
prefix = settings.name_prefix.capitalize()
if self.outer_class is None:
if self.foreign_cpp_namespace:
self.full_name = self.foreign_cpp_namespace + '::' + self.name
else:
if self._module.cpp_namespace_prefix:
if self._module.cpp_namespace_prefix == '::':
self.full_name = '::' + self.name
else:
self.full_name = self._module.cpp_namespace_prefix + '::' + self.name
else:
self.full_name = self.name
else:
assert not self.foreign_cpp_namespace
self.full_name = '::'.join([self.outer_class.full_name, self.name])
def make_upper(s):
if s and s[0].islower():
return s[0].upper()+s[1:]
else:
return s
def mangle(s):
"make a name Like<This,and,That> look Like__lt__This_and_That__gt__"
s = s.replace('<', '__lt__').replace('>', '__gt__').replace(',', '_')
s = s.replace(' ', '_').replace('&', '__amp__').replace('*', '__star__')
return s
def flatten(name):
"make a name like::This look LikeThis"
return ''.join([make_upper(mangle(s)) for s in name.split('::')])
self.mangled_name = flatten(self.name)
self.mangled_full_name = flatten(self.full_name)
if self.template_parameters:
self.full_name += "< %s >" % (', '.join(self.template_parameters))
mangled_template_params = '__' + '_'.join([flatten(s) for s in self.template_parameters])
self.mangled_name += mangled_template_params
self.mangled_full_name += mangled_template_params
self._pystruct = "Py%s%s" % (prefix, self.mangled_full_name)
self.metaclass_name = "%sMeta" % self.mangled_full_name
self.pytypestruct = "Py%s%s_Type" % (prefix, self.mangled_full_name)
self.instance_attributes.cname = "%s__getsets" % self._pystruct
self.static_attributes.cname = "%s__getsets" % self.metaclass_name
## re-register the class type handlers, now with class full name
self.register_alias(self.full_name)
if self.get_type_narrowing_root() is self:
self.typeid_map_name = "%s__typeid_map" % self.pystruct
else:
self.typeid_map_name = None
def register_alias(self, alias):
"""Re-register the class with another base name, in addition to any
registrations that might have already been done."""
self.module.register_type(None, alias, self)
self.ThisClassParameter.CTYPES.append(alias)
try:
param_type_matcher.register(alias, self.ThisClassParameter)
except ValueError: pass
self.ThisClassRefParameter.CTYPES.append(alias+'&')
try:
param_type_matcher.register(alias+'&', self.ThisClassRefParameter)
except ValueError: pass
self.ThisClassReturn.CTYPES.append(alias)
try:
return_type_matcher.register(alias, self.ThisClassReturn)
except ValueError: pass
self.ThisClassPtrParameter.CTYPES.append(alias+'*')
try:
param_type_matcher.register(alias+'*', self.ThisClassPtrParameter)
except ValueError: pass
self.ThisClassPtrReturn.CTYPES.append(alias+'*')
try:
return_type_matcher.register(alias+'*', self.ThisClassPtrReturn)
except ValueError: pass
self.ThisClassRefReturn.CTYPES.append(alias)
try:
return_type_matcher.register(alias+'&', self.ThisClassRefReturn)
except ValueError: pass
def get_module(self):
"""Get the Module object this class belongs to"""
return self._module
def set_module(self, module):
"""Set the Module object this class belongs to"""
self._module = module
self._update_names()
module = property(get_module, set_module)
def inherit_default_constructors(self):
"""inherit the default constructors from the parentclass according to C++
language rules"""
for base in self.bases:
for cons in base.constructors:
if len(cons.parameters) == 0:
self.add_constructor([], visibility=cons.visibility)
elif (len(cons.parameters) == 1
and isinstance(cons.parameters[0], self.parent.ThisClassRefParameter)):
self.add_constructor([self.ThisClassRefParameter()], visibility=cons.visibility)
def get_helper_class(self):
"""gets the "helper class" for this class wrapper, creating it if necessary"""
for cls in self.get_mro():
if cls.helper_class_disabled:
return None
if not self.allow_subclassing:
return None
if self.helper_class is None:
if not self.is_singleton:
self.helper_class = CppHelperClass(self)
self.module.add_include('<typeinfo>')
return self.helper_class
def get_type_narrowing_root(self):
"""Find the root CppClass along the subtree of all parent classes that
have automatic_type_narrowing=True Note: multiple inheritance
not implemented"""
if not self.automatic_type_narrowing:
return None
root = self
while (root.parent is not None
and root.parent.automatic_type_narrowing):
root = root.parent
return root
def _register_typeid(self, module):
"""register this class with the typeid map root class"""
root = self.get_type_narrowing_root()
module.after_init.write_code("%s.register_wrapper(typeid(%s), &%s);"
% (root.typeid_map_name, self.full_name, self.pytypestruct))
def _generate_typeid_map(self, code_sink, module):
"""generate the typeid map and fill it with values"""
try:
module.declare_one_time_definition("TypeIDMap")
except KeyError:
pass
else:
code_sink.writeln('''
#include <map>
#include <string>
#include <typeinfo>
#if defined(__GNUC__) && __GNUC__ >= 3
# include <cxxabi.h>
#endif
#define PBG_TYPEMAP_DEBUG 0
namespace pybindgen {
class TypeMap
{
std::map<std::string, PyTypeObject *> m_map;
public:
TypeMap() {}
void register_wrapper(const std::type_info &cpp_type_info, PyTypeObject *python_wrapper)
{
#if PBG_TYPEMAP_DEBUG
std::cerr << "register_wrapper(this=" << this << ", type_name=" << cpp_type_info.name()
<< ", python_wrapper=" << python_wrapper->tp_name << ")" << std::endl;
#endif
m_map[std::string(cpp_type_info.name())] = python_wrapper;
}
''')
if settings.gcc_rtti_abi_complete:
code_sink.writeln('''
PyTypeObject * lookup_wrapper(const std::type_info &cpp_type_info, PyTypeObject *fallback_wrapper)
{
#if PBG_TYPEMAP_DEBUG
std::cerr << "lookup_wrapper(this=" << this << ", type_name=" << cpp_type_info.name() << ")" << std::endl;
#endif
PyTypeObject *python_wrapper = m_map[cpp_type_info.name()];
if (python_wrapper)
return python_wrapper;
else {
#if defined(__GNUC__) && __GNUC__ >= 3
// Get closest (in the single inheritance tree provided by cxxabi.h)
// registered python wrapper.
const abi::__si_class_type_info *_typeinfo =
dynamic_cast<const abi::__si_class_type_info*> (&cpp_type_info);
#if PBG_TYPEMAP_DEBUG
std::cerr << " -> looking at C++ type " << _typeinfo->name() << std::endl;
#endif
while (_typeinfo && (python_wrapper = m_map[std::string(_typeinfo->name())]) == 0) {
_typeinfo = dynamic_cast<const abi::__si_class_type_info*> (_typeinfo->__base_type);
#if PBG_TYPEMAP_DEBUG
std::cerr << " -> looking at C++ type " << _typeinfo->name() << std::endl;
#endif
}
#if PBG_TYPEMAP_DEBUG
if (python_wrapper) {
std::cerr << " -> found match " << std::endl;
} else {
std::cerr << " -> return fallback wrapper" << std::endl;
}
#endif
return python_wrapper? python_wrapper : fallback_wrapper;
#else // non gcc 3+ compilers can only match against explicitly registered classes, not hidden subclasses
return fallback_wrapper;
#endif
}
}
};
}
''')
else:
code_sink.writeln('''
PyTypeObject * lookup_wrapper(const std::type_info &cpp_type_info, PyTypeObject *fallback_wrapper)
{
#if PBG_TYPEMAP_DEBUG
std::cerr << "lookup_wrapper(this=" << this << ", type_name=" << cpp_type_info.name() << ")" << std::endl;
#endif
PyTypeObject *python_wrapper = m_map[cpp_type_info.name()];
return python_wrapper? python_wrapper : fallback_wrapper;
}
};
}
''')
if self.import_from_module:
code_sink.writeln("\nextern pybindgen::TypeMap *_%s;\n" % self.typeid_map_name)
code_sink.writeln("#define %s (*_%s)\n" % (self.typeid_map_name, self.typeid_map_name))
else:
code_sink.writeln("\nextern pybindgen::TypeMap %s;\n" % self.typeid_map_name)
def _add_method_obj(self, method):
"""
Add a method object to the class. For internal use.
:param method: a L{CppMethod} or L{Function} instance that can generate the method wrapper
"""
if isinstance(method, CppMethod):
name = method.mangled_name
elif isinstance(method, function.Function):
name = method.custom_name
assert isinstance(method.parameters[0], CppClassParameterBase)
assert method.parameters[0].cpp_class is self, \
"expected first parameter to be of class %s, but it is of class %s" % \
(self.full_name, method.parameters[0].cpp_class.full_name)
method.parameters[0].take_value_from_python_self = True
method.module = self.module
method.is_virtual = False
method.is_pure_virtual = False
method.self_parameter_pystruct = self.pystruct
method.visibility = 'public'
method.force_parse = method.PARSE_TUPLE_AND_KEYWORDS
else:
raise TypeError
method.class_ = self
if method.visibility == 'protected' and not method.is_virtual:
helper_class = self.get_helper_class()
if helper_class is not None:
parent_caller = CppVirtualMethodParentCaller(method)
parent_caller.helper_class = helper_class
parent_caller.main_wrapper = method
helper_class.add_virtual_parent_caller(parent_caller)
elif method.visibility == 'public':
if name == '__call__': # needs special handling
method.force_parse = method.PARSE_TUPLE_AND_KEYWORDS
try:
overload = self.methods[name]
except KeyError:
overload = CppOverloadedMethod(name)
overload.pystruct = self.pystruct
self.methods[name] = overload
## add it....
try:
utils.call_with_error_handling(overload.add, (method,), {}, method)
except utils.SkipWrapper:
return
# Grr! I hate C++. Overloading + inheritance = disaster!
# So I ended up coding something which C++ does not in
# fact support, but I feel bad to just throw away my good
# code due to a C++ fault, so I am leaving here the code
# disabled. Maybe some future C++ version will come along
# and fix this problem, who knows :P
if 0:
# due to a limitation of the pybindgen overloading
# strategy, we need to re-wrap for this class all
# methods with the same name and different signature
# from parent classes.
overload._compute_all_wrappers()
if isinstance(method, CppMethod):
mro = self.get_mro()
mro.next() # skip 'self'
for cls in mro:
try:
parent_overload = cls.methods[name]
except KeyError:
continue
parent_overload._compute_all_wrappers()
for parent_method in parent_overload.all_wrappers:
already_exists = False
for existing_method in overload.all_wrappers:
if existing_method.matches_signature(parent_method):
already_exists = True
break
if not already_exists:
new_method = parent_method.clone()
new_method.class_ = self
overload.add(new_method)
else:
self.nonpublic_methods.append(method)
if method.is_virtual:
self._have_pure_virtual_methods = None
helper_class = self.get_helper_class()
if helper_class is not None:
helper_class.add_virtual_method(method)
def add_method(self, *args, **kwargs):
"""
Add a method to the class. See the documentation for
L{CppMethod.__init__} for information on accepted parameters.
"""
## <compat>
if len(args) >= 1 and isinstance(args[0], CppMethod):
meth = args[0]
warnings.warn("add_method has changed API; see the API documentation",
DeprecationWarning, stacklevel=2)
if len(args) == 2:
meth.custom_name = args[1]
elif 'name' in kwargs:
assert len(args) == 1
meth.custom_name = kwargs['name']
else:
assert len(args) == 1
assert len(kwargs) == 0
elif len(args) >= 1 and isinstance(args[0], function.Function):
meth = args[0]
warnings.warn("add_method has changed API; see the API documentation",
DeprecationWarning, stacklevel=2)
if len(args) == 2:
meth.custom_name = args[1]
elif 'name' in kwargs:
assert len(args) == 1
meth.custom_name = kwargs['name']
else:
assert len(args) == 1
assert len(kwargs) == 0
## </compat>
else:
try:
meth = CppMethod(*args, **kwargs)
except utils.SkipWrapper:
if kwargs.get('is_virtual', False):
## if the method was supposed to be virtual, this
## is a very important fact that needs to be
## recorded in the class, even if the method is
## not wrapped.
method = CppDummyMethod(*args, **kwargs)
method.class_ = self
self._dummy_methods.append(method)
self._have_pure_virtual_methods = None
helper_class = self.get_helper_class()
if helper_class is not None:
helper_class.add_virtual_method(method)
if helper_class.cannot_be_constructed:
self.helper_class = None
self.helper_class_disabled = True
return None
self._add_method_obj(meth)
return meth
def add_function_as_method(self, *args, **kwargs):
"""
Add a function as method of the class. See the documentation for
L{Function.__init__} for information on accepted parameters.
TODO: explain the implicit first function parameter
"""
try:
meth = function.Function(*args, **kwargs)
except utils.SkipWrapper:
return None
self._add_method_obj(meth)
return meth
def add_custom_method_wrapper(self, *args, **kwargs):
"""
Adds a custom method wrapper. See L{CustomCppMethodWrapper} for more information.
"""
try:
meth = CustomCppMethodWrapper(*args, **kwargs)
except utils.SkipWrapper:
return None
self._add_method_obj(meth)
return meth
def set_helper_class_disabled(self, flag=True):
self.helper_class_disabled = flag
if flag:
self.helper_class = None
def set_cannot_be_constructed(self, reason):
assert isinstance(reason, basestring)
self.cannot_be_constructed = reason
def _add_constructor_obj(self, wrapper):
"""
Add a constructor to the class.
:param wrapper: a CppConstructor instance
"""
assert isinstance(wrapper, CppConstructor)
wrapper.set_class(self)
self.constructors.append(wrapper)
if not wrapper.parameters:
self.has_trivial_constructor = True # FIXME: I don't remember what is this used for anymore, maybe remove
if len(wrapper.parameters) == 1 and isinstance(wrapper.parameters[0], (CppClassRefParameter, CppClassParameter)) \
and wrapper.parameters[0].cpp_class is self and wrapper.visibility == 'public':
self.has_copy_constructor = True
def add_output_stream_operator(self):
"""
Add str() support based on C++ output stream operator.
Calling this method enables wrapping of an assumed to be defined operator function::
std::ostream & operator << (std::ostream &, MyClass const &);
The wrapper will be registered as an str() python operator,
and will call the C++ operator function to convert the value
to a string.
"""
self.has_output_stream_operator = True
self.module.add_include("<ostream>")
self.module.add_include("<sstream>")
def add_constructor(self, *args, **kwargs):
"""
Add a constructor to the class. See the documentation for
L{CppConstructor.__init__} for information on accepted parameters.
"""
## <compat>
if len(args) == 1 and isinstance(args[0], CppConstructor):
warnings.warn("add_constructor has changed API; see the API documentation",
DeprecationWarning, stacklevel=2)
constructor = args[0]
elif len(args) == 1 and isinstance(args[0], function.Function):
warnings.warn("add_constructor has changed API; see the API documentation",
DeprecationWarning, stacklevel=2)
func = args[0]
constructor = CppFunctionAsConstructor(func.function_name, func.parameters)
constructor.module = self.module
## </compat>
else:
try:
constructor = CppConstructor(*args, **kwargs)
except utils.SkipWrapper:
return None
self._add_constructor_obj(constructor)
return constructor
def add_copy_constructor(self):
"""
Utility method to add a 'copy constructor' method to this class.
"""
try:
constructor = CppConstructor([self.ThisClassRefParameter("const %s &" % self.full_name,
'ctor_arg')])
except utils.SkipWrapper:
return None
self._add_constructor_obj(constructor)
return constructor
def add_function_as_constructor(self, *args, **kwargs):
"""
Wrap a function that behaves as a constructor to the class. See the documentation for
L{CppFunctionAsConstructor.__init__} for information on accepted parameters.
"""
try:
constructor = CppFunctionAsConstructor(*args, **kwargs)
except utils.SkipWrapper:
return None
self._add_constructor_obj(constructor)
return constructor
def add_static_attribute(self, name, value_type, is_const=False):
"""
:param value_type: a ReturnValue object
:param name: attribute name (i.e. the name of the class member variable)
:param is_const: True if the attribute is const, i.e. cannot be modified
"""
## backward compatibility check
if isinstance(value_type, str) and isinstance(name, ReturnValue):
warnings.warn("add_static_attribute has changed API; see the API documentation (but trying to correct...)",
DeprecationWarning, stacklevel=2)
value_type, name = name, value_type
try:
value_type = utils.eval_retval(value_type, None)
except utils.SkipWrapper:
return
assert isinstance(value_type, ReturnValue)
getter = CppStaticAttributeGetter(value_type, self, name)
getter.stack_where_defined = traceback.extract_stack()
if is_const:
setter = None
else:
setter = CppStaticAttributeSetter(value_type, self, name)
setter.stack_where_defined = traceback.extract_stack()
self.static_attributes.add_attribute(name, getter, setter)
def add_instance_attribute(self, name, value_type, is_const=False,
getter=None, setter=None):
"""
:param value_type: a ReturnValue object
:param name: attribute name (i.e. the name of the class member variable)
:param is_const: True if the attribute is const, i.e. cannot be modified
:param getter: None, or name of a method of this class used to get the value
:param setter: None, or name of a method of this class used to set the value
"""
## backward compatibility check
if isinstance(value_type, str) and isinstance(name, ReturnValue):
warnings.warn("add_static_attribute has changed API; see the API documentation (but trying to correct...)",
DeprecationWarning, stacklevel=2)
value_type, name = name, value_type
try:
value_type = utils.eval_retval(value_type, None)
except utils.SkipWrapper:
return
assert isinstance(value_type, ReturnValue)
getter_wrapper = CppInstanceAttributeGetter(value_type, self, name, getter=getter)
getter_wrapper.stack_where_defined = traceback.extract_stack()
if is_const:
setter_wrapper = None
assert setter is None
else:
setter_wrapper = CppInstanceAttributeSetter(value_type, self, name, setter=setter)
setter_wrapper.stack_where_defined = traceback.extract_stack()
self.instance_attributes.add_attribute(name, getter_wrapper, setter_wrapper)
def _inherit_helper_class_parent_virtuals(self):
"""
Given a class containing a helper class, add all virtual
methods from the all parent classes of this class.
"""
mro = self.get_mro()
mro.next() # skip 'self'
for cls in mro:
for method in cls.get_all_methods():
if not method.is_virtual:
continue
method = method.clone()
self.helper_class.add_virtual_method(method)
def _get_wrapper_registry(self):
# there is one wrapper registry object per root class only,
# which is used for all subclasses.
if self.parent is None:
if self._wrapper_registry is None:
self._wrapper_registry = settings.wrapper_registry(self.pystruct)
return self._wrapper_registry
else:
return self.parent._get_wrapper_registry()
wrapper_registry = property(_get_wrapper_registry)
def generate_forward_declarations(self, code_sink, module):
"""
Generates forward declarations for the instance and type
structures.
"""
if self.allow_subclassing:
code_sink.writeln('''
typedef struct {
PyObject_HEAD
%s *obj;
PyObject *inst_dict;
PyBindGenWrapperFlags flags:8;
} %s;
''' % (self.full_name, self.pystruct))
else:
code_sink.writeln('''
typedef struct {
PyObject_HEAD
%s *obj;
PyBindGenWrapperFlags flags:8;
} %s;
''' % (self.full_name, self.pystruct))
code_sink.writeln()
if self.import_from_module:
code_sink.writeln('extern PyTypeObject *_%s;' % (self.pytypestruct,))
code_sink.writeln('#define %s (*_%s)' % (self.pytypestruct, self.pytypestruct))
else:
code_sink.writeln('extern PyTypeObject %s;' % (self.pytypestruct,))
if not self.static_attributes.empty():
code_sink.writeln('extern PyTypeObject Py%s_Type;' % (self.metaclass_name,))
code_sink.writeln()
if self.helper_class is not None:
self._inherit_helper_class_parent_virtuals()
for hook in self._get_all_helper_class_hooks():
hook(self.helper_class)
self.helper_class.generate_forward_declarations(code_sink)
if self.helper_class.cannot_be_constructed:
self.helper_class = None
self.helper_class_disabled = True
if self.have_pure_virtual_methods and self.helper_class is None:
self.cannot_be_constructed = "have pure virtual methods but no helper class"
if self.typeid_map_name is not None:
self._generate_typeid_map(code_sink, module)
if self.container_traits is not None:
self.container_traits.generate_forward_declarations(code_sink, module)
if self.parent is None:
self.wrapper_registry.generate_forward_declarations(code_sink, module, self.import_from_module)
def get_python_name(self):
if self.template_parameters:
if self.custom_name is None:
class_python_name = self.mangled_name
else:
class_python_name = self.custom_name
else:
if self.custom_name is None:
class_python_name = self.name
else:
class_python_name = self.custom_name
return class_python_name
def _generate_import_from_module(self, code_sink, module):
if module.parent is None:
error_retcode = ""
else:
error_retcode = "NULL"
# TODO: skip this step if the requested typestructure is never used
if ' named ' in self.import_from_module:
module_name, type_name = self.import_from_module.split(" named ")
else:
module_name, type_name = self.import_from_module, self.name
code_sink.writeln("PyTypeObject *_%s;" % self.pytypestruct)
module.after_init.write_code("/* Import the %r class from module %r */" % (self.full_name, self.import_from_module))
module.after_init.write_code("{"); module.after_init.indent()
module.after_init.write_code("PyObject *module = PyImport_ImportModule(\"%s\");" % module_name)
module.after_init.write_code(
"if (module == NULL) {\n"
" return %s;\n"
"}" % (error_retcode,))
module.after_init.write_code("_%s = (PyTypeObject*) PyObject_GetAttrString(module, \"%s\");\n"
% (self.pytypestruct, self.get_python_name()))
module.after_init.write_code("if (PyErr_Occurred()) PyErr_Clear();")
if self.typeid_map_name is not None:
code_sink.writeln("pybindgen::TypeMap *_%s;" % self.typeid_map_name)
module.after_init.write_code("/* Import the %r class type map from module %r */" % (self.full_name, self.import_from_module))
module.after_init.write_code("PyObject *_cobj = PyObject_GetAttrString(module, \"_%s\");"
% (self.typeid_map_name))
module.after_init.write_code("if (_cobj == NULL) {\n"
" _%s = new pybindgen::TypeMap;\n"
" PyErr_Clear();\n"
"} else {\n"
" _%s = reinterpret_cast<pybindgen::TypeMap*> (PyCObject_AsVoidPtr (_cobj));\n"
" Py_DECREF(_cobj);\n"
"}"
% (self.typeid_map_name, self.typeid_map_name))
if self.parent is None:
self.wrapper_registry.generate_import(code_sink, module.after_init, "module")
module.after_init.unindent(); module.after_init.write_code("}")
if self.helper_class is not None:
self.helper_class.generate(code_sink)
def generate(self, code_sink, module):
"""Generates the class to a code sink"""
if self.import_from_module:
self._generate_import_from_module(code_sink, module)
return # .......................... RETURN
if self.typeid_map_name is not None:
code_sink.writeln("\npybindgen::TypeMap %s;\n" % self.typeid_map_name)
module.after_init.write_code("PyModule_AddObject(m, (char *) \"_%s\", PyCObject_FromVoidPtr(&%s, NULL));"
% (self.typeid_map_name, self.typeid_map_name))
if self.automatic_type_narrowing:
self._register_typeid(module)
if self.parent is None:
self.wrapper_registry.generate(code_sink, module)
if self.helper_class is not None:
parent_caller_methods = self.helper_class.generate(code_sink)
else:
parent_caller_methods = []
## generate getsets
instance_getsets = self.instance_attributes.generate(code_sink)
self.slots.setdefault("tp_getset", instance_getsets)
static_getsets = self.static_attributes.generate(code_sink)
## --- register the class type in the module ---
module.after_init.write_code("/* Register the '%s' class */" % self.full_name)
## generate a metaclass if needed
if static_getsets == '0':
metaclass = None
else:
if self.parent is None:
parent_typestruct = 'PyBaseObject_Type'
else:
parent_typestruct = self.parent.pytypestruct
metaclass = PyMetaclass(self.metaclass_name,
"%s.ob_type" % parent_typestruct,
self.static_attributes)
metaclass.generate(code_sink, module)
if self.parent is not None:
assert isinstance(self.parent, CppClass)
module.after_init.write_code('%s.tp_base = &%s;' %
(self.pytypestruct, self.parent.pytypestruct))
if len(self.bases) > 1:
module.after_init.write_code('%s.tp_bases = PyTuple_New(%i);' % (self.pytypestruct, len(self.bases),))
for basenum, base in enumerate(self.bases):
module.after_init.write_code(' Py_INCREF((PyObject *) &%s);' % (base.pytypestruct,))
module.after_init.write_code(' PyTuple_SET_ITEM(%s.tp_bases, %i, (PyObject *) &%s);'
% (self.pytypestruct, basenum, base.pytypestruct))
if metaclass is not None:
module.after_init.write_code('%s.ob_type = &%s;' %
(self.pytypestruct, metaclass.pytypestruct))
module.after_init.write_error_check('PyType_Ready(&%s)'
% (self.pytypestruct,))
class_python_name = self.get_python_name()
if self.outer_class is None:
module.after_init.write_code(
'PyModule_AddObject(m, (char *) \"%s\", (PyObject *) &%s);' % (
class_python_name, self.pytypestruct))
else:
module.after_init.write_code(
'PyDict_SetItemString((PyObject*) %s.tp_dict, (char *) \"%s\", (PyObject *) &%s);' % (
self.outer_class.pytypestruct, class_python_name, self.pytypestruct))
have_constructor = self._generate_constructor(code_sink)
self._generate_methods(code_sink, parent_caller_methods)
if self.allow_subclassing:
self._generate_gc_methods(code_sink)
self._generate_destructor(code_sink, have_constructor)
if self.has_output_stream_operator:
self._generate_str(code_sink)
#self._generate_tp_hash(code_sink)
#self._generate_tp_compare(code_sink)
if self.slots.get("tp_richcompare", "NULL") == "NULL":
self.slots["tp_richcompare"] = self._generate_tp_richcompare(code_sink)
if self.binary_numeric_operators or self.inplace_numeric_operators:
self.slots["tp_as_number"] = self._generate_number_methods(code_sink)
if self.have_sequence_methods():
self.slots["tp_as_sequence"] = self._generate_sequence_methods(code_sink)
if self.container_traits is not None:
self.container_traits.generate(code_sink, module)
self._generate_type_structure(code_sink, self.docstring)
def _generate_number_methods(self, code_sink):
number_methods_var_name = "%s__py_number_methods" % (self.mangled_full_name,)
pynumbermethods = PyNumberMethods()
pynumbermethods.slots['variable'] = number_methods_var_name
# iterate over all types and request generation of the
# convertion functions for that type (so that those functions
# are not generated in the middle of one of the wrappers we
# are about to generate)
root_module = self.module.get_root()
for dummy_op_symbol, op_types in self.binary_numeric_operators.iteritems():
for (retval, left, right) in op_types:
get_c_to_python_converter(retval, root_module, code_sink)
get_python_to_c_converter(left, root_module, code_sink)
get_python_to_c_converter(right, root_module, code_sink)
for dummy_op_symbol, op_types in self.inplace_numeric_operators.iteritems():
for (retval, left, right) in op_types:
get_python_to_c_converter(left, root_module, code_sink)
get_python_to_c_converter(right, root_module, code_sink)
get_c_to_python_converter(retval, root_module, code_sink)
for dummy_op_symbol, op_types in self.unary_numeric_operators.iteritems():
for (retval, left) in op_types:
get_c_to_python_converter(retval, root_module, code_sink)
get_python_to_c_converter(left, root_module, code_sink)
def try_wrap_operator(op_symbol, slot_name):
if op_symbol in self.binary_numeric_operators:
op_types = self.binary_numeric_operators[op_symbol]
elif op_symbol in self.inplace_numeric_operators:
op_types = self.inplace_numeric_operators[op_symbol]
else:
return
wrapper_name = "%s__%s" % (self.mangled_full_name, slot_name)
pynumbermethods.slots[slot_name] = wrapper_name
code_sink.writeln(("static PyObject*\n"
"%s (PyObject *py_left, PyObject *py_right)\n"
"{") % wrapper_name)
code_sink.indent()
for (retval, left, right) in op_types:
retval_converter, retval_name = get_c_to_python_converter(retval, root_module, code_sink)
left_converter, left_name = get_python_to_c_converter(left, root_module, code_sink)
right_converter, right_name = get_python_to_c_converter(right, root_module, code_sink)
code_sink.writeln("{")
code_sink.indent()
code_sink.writeln("%s left;" % left_name)
code_sink.writeln("%s right;" % right_name)
code_sink.writeln("if (%s(py_left, &left) && %s(py_right, &right)) {" % (left_converter, right_converter))
code_sink.indent()
code_sink.writeln("%s result = (left %s right);" % (retval_name, op_symbol))
code_sink.writeln("return %s(&result);" % retval_converter)
code_sink.unindent()
code_sink.writeln("}")
code_sink.writeln("PyErr_Clear();")
code_sink.unindent()
code_sink.writeln("}")
code_sink.writeln("Py_INCREF(Py_NotImplemented);")
code_sink.writeln("return Py_NotImplemented;")
code_sink.unindent()
code_sink.writeln("}")
def try_wrap_unary_operator(op_symbol, slot_name):
if op_symbol in self.unary_numeric_operators:
op_types = self.unary_numeric_operators[op_symbol]
else:
return
wrapper_name = "%s__%s" % (self.mangled_full_name, slot_name)
pynumbermethods.slots[slot_name] = wrapper_name
code_sink.writeln(("static PyObject*\n"
"%s (PyObject *py_self)\n"
"{") % wrapper_name)
code_sink.indent()
for (retval, left) in op_types:
retval_converter, retval_name = get_c_to_python_converter(retval, root_module, code_sink)
left_converter, left_name = get_python_to_c_converter(left, root_module, code_sink)
code_sink.writeln("{")
code_sink.indent()
code_sink.writeln("%s self;" % left_name)
code_sink.writeln("if (%s(py_self, &self)) {" % (left_converter))
code_sink.indent()
code_sink.writeln("%s result = %s(self);" % (retval_name, op_symbol))
code_sink.writeln("return %s(&result);" % retval_converter)
code_sink.unindent()
code_sink.writeln("}")
code_sink.writeln("PyErr_Clear();")
code_sink.unindent()
code_sink.writeln("}")
code_sink.writeln("Py_INCREF(Py_NotImplemented);")
code_sink.writeln("return Py_NotImplemented;")
code_sink.unindent()
code_sink.writeln("}")
try_wrap_operator('+', 'nb_add')
try_wrap_operator('-', 'nb_subtract')
try_wrap_operator('*', 'nb_multiply')
try_wrap_operator('/', 'nb_divide')
try_wrap_operator('+=', 'nb_inplace_add')
try_wrap_operator('-=', 'nb_inplace_subtract')
try_wrap_operator('*=', 'nb_inplace_multiply')
try_wrap_operator('/=', 'nb_inplace_divide')
try_wrap_unary_operator('-', 'nb_negative')
pynumbermethods.generate(code_sink)
return '&' + number_methods_var_name
def _generate_sequence_methods(self, code_sink):
sequence_methods_var_name = "%s__py_sequence_methods" % (self.mangled_full_name,)
pysequencemethods = PySequenceMethods()
pysequencemethods.slots['variable'] = sequence_methods_var_name
root_module = self.module.get_root()
self_converter = root_module.generate_python_to_c_type_converter(self.ThisClassReturn(self.full_name), code_sink)
def try_wrap_sequence_method(py_name, slot_name):
if py_name in self.methods:
numwraps = len(self.methods[py_name].wrappers)
some_wrapper_is_function = max([isinstance(x, function.Function) for x in self.methods[py_name].wrappers])
meth_wrapper_actual_name = self.methods[py_name].wrapper_actual_name
wrapper_name = "%s__%s" % (self.mangled_full_name, slot_name)
pysequencemethods.slots[slot_name] = wrapper_name
if py_name == "__len__" and (numwraps > 1 or some_wrapper_is_function):
template = pysequencemethods.FUNCTION_TEMPLATES[slot_name + "_ARGS"]
else:
template = pysequencemethods.FUNCTION_TEMPLATES[slot_name]
code_sink.writeln(template % {'wrapper_name' : wrapper_name,
'py_struct' : self._pystruct,
'method_name' : meth_wrapper_actual_name})
return
for (py_name, slot_name) in [("__len__", "sq_length"),
("__getitem__", "sq_item"),
("__setitem__", "sq_ass_item")]:
try_wrap_sequence_method(py_name, slot_name)
pysequencemethods.generate(code_sink)
return '&' + sequence_methods_var_name
def have_sequence_methods(self):
"""Determine if this object has sequence methods registered."""
for x in self.valid_sequence_methods:
if x in self.methods:
return True
return False
def _generate_type_structure(self, code_sink, docstring):
"""generate the type structure"""
self.slots.setdefault("tp_basicsize",
"sizeof(%s)" % (self.pystruct,))
tp_flags = set(['Py_TPFLAGS_DEFAULT'])
if self.allow_subclassing:
tp_flags.add("Py_TPFLAGS_HAVE_GC")
tp_flags.add("Py_TPFLAGS_BASETYPE")
self.slots.setdefault("tp_dictoffset",
"offsetof(%s, inst_dict)" % self.pystruct)
else:
self.slots.setdefault("tp_dictoffset", "0")
if self.binary_numeric_operators:
tp_flags.add("Py_TPFLAGS_CHECKTYPES")
self.slots.setdefault("tp_flags", '|'.join(tp_flags))
self.slots.setdefault("tp_doc", (docstring is None and 'NULL'
or "\"%s\"" % (docstring,)))
dict_ = self.slots
dict_.setdefault("typestruct", self.pytypestruct)
if self.outer_class is None:
mod_path = self._module.get_module_path()
mod_path.append(self.mangled_name)
dict_.setdefault("tp_name", '.'.join(mod_path))
else:
dict_.setdefault("tp_name", '%s.%s' % (self.outer_class.slots['tp_name'], self.name))
## tp_call support
try:
call_method = self.methods['__call__']
except KeyError:
pass
else:
dict_.setdefault("tp_call", call_method.wrapper_actual_name)
self.pytype.generate(code_sink)
def _generate_constructor(self, code_sink):
"""generate the constructor, if any"""
have_constructor = True
if self.constructors and ((not self.cannot_be_constructed) or self.helper_class is not None
and not self.helper_class.cannot_be_constructed):
code_sink.writeln()
overload = CppOverloadedConstructor(None)
self.constructors_overload = overload
overload.pystruct = self.pystruct
for constructor in self.constructors:
try:
overload.add(constructor)
except CodegenErrorBase:
continue
if overload.wrappers:
try:
overload.generate(code_sink)
except utils.SkipWrapper:
constructor = None
have_constructor = False
else:
constructor = overload.wrapper_actual_name
code_sink.writeln()
else:
constructor = None
have_constructor = False
else:
## In C++, and unlike Python, constructors with
## parameters are not automatically inheritted by
## subclasses. We must generate a 'no constructor'
## tp_init to prevent this type from inheriting a
## tp_init that will allocate an instance of the
## parent class instead of this class.
code_sink.writeln()
wrapper = CppNoConstructor(self.cannot_be_constructed)
wrapper.generate(code_sink, self)
constructor = wrapper.wrapper_actual_name
have_constructor = False
code_sink.writeln()
self.slots.setdefault("tp_init", (constructor is None and "NULL"
or constructor))
return have_constructor
def _generate_copy_method(self, code_sink):
construct_name = self.get_construct_name()
copy_wrapper_name = '_wrap_%s__copy__' % self.pystruct
code_sink.writeln('''
static PyObject*\n%s(%s *self)
{
''' % (copy_wrapper_name, self.pystruct))
code_sink.indent()
declarations = DeclarationsScope()
code_block = CodeBlock("return NULL;", declarations)
if self.allow_subclassing:
new_func = 'PyObject_GC_New'
else:
new_func = 'PyObject_New'
py_copy = declarations.declare_variable("%s*" % self.pystruct, "py_copy")
code_block.write_code("%s = %s(%s, %s);" %
(py_copy, new_func, self.pystruct, '&'+self.pytypestruct))
code_block.write_code("%s->obj = new %s(*self->obj);" % (py_copy, construct_name))
if self.allow_subclassing:
code_block.write_code("%s->inst_dict = NULL;" % py_copy)
code_block.write_code("%s->flags = PYBINDGEN_WRAPPER_FLAG_NONE;" % py_copy)
self.wrapper_registry.write_register_new_wrapper(code_block, py_copy, "%s->obj" % py_copy)
code_block.write_code("return (PyObject*) %s;" % py_copy)
declarations.get_code_sink().flush_to(code_sink)
code_block.write_cleanup()
code_block.sink.flush_to(code_sink)
code_sink.unindent()
code_sink.writeln("}")
code_sink.writeln()
return copy_wrapper_name
def _generate_MI_parent_methods(self, code_sink):
methods = {}
mro = self.get_mro()
mro.next()
for base in mro:
for method_name, parent_overload in base.methods.iteritems():
# skip methods registered via special type slots, not method table
if method_name in ['__call__', "__len__", "__getitem__", "__setitem__"]:
continue
try:
overload = methods[method_name]
except KeyError:
overload = CppOverloadedMethod(method_name)
overload.pystruct = self.pystruct
methods[method_name] = overload
for parent_wrapper in parent_overload.wrappers:
if parent_wrapper.visibility != 'public':
continue
# the method may have been re-defined as private in our class
private = False
for leaf_wrapper in self.nonpublic_methods:
if leaf_wrapper.matches_signature(parent_wrapper):
private = True
break
if private:
continue
# the method may have already been wrapped in our class
already_wrapped = False
try:
overload = self.methods[method_name]
except KeyError:
pass
else:
for leaf_wrapper in overload.wrappers:
if leaf_wrapper.matches_signature(parent_wrapper):
already_wrapped = True
break
if already_wrapped:
continue
wrapper = parent_wrapper.clone()
wrapper.original_class = base
wrapper.class_ = self
overload.add(wrapper)
method_defs = []
for method_name, overload in methods.iteritems():
if not overload.wrappers:
continue
classes = []
for wrapper in overload.wrappers:
if wrapper.original_class not in classes:
classes.append(wrapper.original_class)
if len(classes) > 1:
continue # overloading with multiple base classes is just too confusing
try:
utils.call_with_error_handling(overload.generate, (code_sink,), {}, overload)
except utils.SkipWrapper:
continue
code_sink.writeln()
method_defs.append(overload.get_py_method_def(method_name))
return method_defs
def _generate_methods(self, code_sink, parent_caller_methods):
"""generate the method wrappers"""
method_defs = []
for meth_name, overload in self.methods.iteritems():
code_sink.writeln()
#overload.generate(code_sink)
try:
utils.call_with_error_handling(overload.generate, (code_sink,), {}, overload)
except utils.SkipWrapper:
continue
# skip methods registered via special type slots, not method table
if meth_name not in ['__call__', "__len__",
"__getitem__", "__setitem__"]:
method_defs.append(overload.get_py_method_def(meth_name))
code_sink.writeln()
method_defs.extend(parent_caller_methods)
if len(self.bases) > 1: # https://bugs.launchpad.net/pybindgen/+bug/563786
method_defs.extend(self._generate_MI_parent_methods(code_sink))
if self.has_copy_constructor:
try:
copy_wrapper_name = utils.call_with_error_handling(self._generate_copy_method, (code_sink,), {}, self)
except utils.SkipWrapper:
pass
else:
method_defs.append('{(char *) "__copy__", (PyCFunction) %s, METH_NOARGS, NULL},' % copy_wrapper_name)
## generate the method table
code_sink.writeln("static PyMethodDef %s_methods[] = {" % (self.pystruct,))
code_sink.indent()
for methdef in method_defs:
code_sink.writeln(methdef)
code_sink.writeln("{NULL, NULL, 0, NULL}")
code_sink.unindent()
code_sink.writeln("};")
self.slots.setdefault("tp_methods", "%s_methods" % (self.pystruct,))
def _get_delete_code(self):
if self.is_singleton:
delete_code = ''
else:
if self.memory_policy is not None:
delete_code = ("if (self->obj) {\n"
" %s *tmp = self->obj;\n"
" self->obj = NULL;\n"
" %s\n"
"}"
% (self.full_name, self.memory_policy.get_free_code('tmp')))
else:
if self.incomplete_type:
raise CodeGenerationError("Cannot finish generating class %s: "
"type is incomplete, but no free/unref_function defined"
% self.full_name)
if self.destructor_visibility == 'public':
delete_code = (" %s *tmp = self->obj;\n"
" self->obj = NULL;\n"
" if (!(self->flags&PYBINDGEN_WRAPPER_FLAG_OBJECT_NOT_OWNED)) {\n"
" delete tmp;\n"
" }" % (self.full_name,))
else:
delete_code = (" self->obj = NULL;\n")
return delete_code
def _generate_gc_methods(self, code_sink):
"""Generate tp_clear and tp_traverse"""
## --- tp_clear ---
tp_clear_function_name = "%s__tp_clear" % (self.pystruct,)
self.slots.setdefault("tp_clear", tp_clear_function_name )
delete_code = self._get_delete_code()
code_sink.writeln(r'''
static void
%s(%s *self)
{
Py_CLEAR(self->inst_dict);
%s
}
''' % (tp_clear_function_name, self.pystruct, delete_code))
## --- tp_traverse ---
tp_traverse_function_name = "%s__tp_traverse" % (self.pystruct,)
self.slots.setdefault("tp_traverse", tp_traverse_function_name )
if self.helper_class is None:
visit_self = ''
else:
if not isinstance(self.memory_policy, ReferenceCountingMethodsPolicy) or self.memory_policy.peekref_method is None:
peekref_code = ''
else:
peekref_code = " && self->obj->%s() == 1" % self.memory_policy.peekref_method
visit_self = '''
if (self->obj && typeid(*self->obj).name() == typeid(%s).name() %s)
Py_VISIT((PyObject *) self);
''' % (self.helper_class.name, peekref_code)
code_sink.writeln(r'''
static int
%s(%s *self, visitproc visit, void *arg)
{
Py_VISIT(self->inst_dict);
%s
return 0;
}
''' % (tp_traverse_function_name, self.pystruct, visit_self))
def _generate_str(self, code_sink):
"""Generate a tp_str function and register it in the type"""
tp_str_function_name = "_wrap_%s__tp_str" % (self.pystruct,)
self.slots.setdefault("tp_str", tp_str_function_name )
code_sink.writeln('''
static PyObject *
%s(%s *self)
{
std::ostringstream oss;
oss << *self->obj;
return PyString_FromString(oss.str ().c_str ());
}
''' % (tp_str_function_name, self.pystruct))
def _generate_tp_hash(self, code_sink):
"""generates a tp_hash function, which returns a hash of the self->obj pointer"""
tp_hash_function_name = "_wrap_%s__tp_hash" % (self.pystruct,)
self.slots.setdefault("tp_hash", tp_hash_function_name )
code_sink.writeln('''
static long
%s(%s *self)
{
return (long) self->obj;
}
''' % (tp_hash_function_name, self.pystruct))
def _generate_tp_compare(self, code_sink):
"""generates a tp_compare function, which compares the ->obj pointers"""
tp_compare_function_name = "_wrap_%s__tp_compare" % (self.pystruct,)
self.slots.setdefault("tp_compare", tp_compare_function_name )
code_sink.writeln('''
static int
%s(%s *self, %s *other)
{
if (self->obj == other->obj) return 0;
if (self->obj > other->obj) return -1;
return 1;
}
''' % (tp_compare_function_name, self.pystruct, self.pystruct))
def _generate_destructor(self, code_sink, have_constructor):
"""Generate a tp_dealloc function and register it in the type"""
## don't generate destructor if overridden by user
if "tp_dealloc" in self.slots:
return
tp_dealloc_function_name = "_wrap_%s__tp_dealloc" % (self.pystruct,)
code_sink.writeln(r'''
static void
%s(%s *self)
{''' % (tp_dealloc_function_name, self.pystruct))
code_sink.indent()
code_block = CodeBlock("PyErr_Print(); return;", DeclarationsScope())
self.wrapper_registry.write_unregister_wrapper(code_block, 'self', 'self->obj')
if self.allow_subclassing:
code_block.write_code("%s(self);" % self.slots["tp_clear"])
else:
code_block.write_code(self._get_delete_code())
code_block.write_code('self->ob_type->tp_free((PyObject*)self);')
code_block.write_cleanup()
code_block.declarations.get_code_sink().flush_to(code_sink)
code_block.sink.flush_to(code_sink)
code_sink.unindent()
code_sink.writeln('}\n')
self.slots.setdefault("tp_dealloc", tp_dealloc_function_name )
def _generate_tp_richcompare(self, code_sink):
tp_richcompare_function_name = "_wrap_%s__tp_richcompare" % (self.pystruct,)
code_sink.writeln("static PyObject*\n%s (%s *PYBINDGEN_UNUSED(self), %s *other, int opid)"
% (tp_richcompare_function_name, self.pystruct, self.pystruct))
code_sink.writeln("{")
code_sink.indent()
code_sink.writeln("""
if (!PyObject_IsInstance((PyObject*) other, (PyObject*) &%s)) {
Py_INCREF(Py_NotImplemented);
return Py_NotImplemented;
}""" % self.pytypestruct)
code_sink.writeln("switch (opid)\n{")
def wrap_operator(name, opid_code):
code_sink.writeln("case %s:" % opid_code)
code_sink.indent()
if name in self.binary_comparison_operators:
code_sink.writeln("if (*self->obj %(OP)s *other->obj) {\n"
" Py_INCREF(Py_True);\n"
" return Py_True;\n"
"} else {\n"
" Py_INCREF(Py_False);\n"
" return Py_False;\n"
"}" % dict(OP=name))
else:
code_sink.writeln("Py_INCREF(Py_NotImplemented);\n"
"return Py_NotImplemented;")
code_sink.unindent()
wrap_operator('<', 'Py_LT')
wrap_operator('<=', 'Py_LE')
wrap_operator('==', 'Py_EQ')
wrap_operator('!=', 'Py_NE')
wrap_operator('>=', 'Py_GE')
wrap_operator('>', 'Py_GT')
code_sink.writeln("} /* closes switch (opid) */")
code_sink.writeln("Py_INCREF(Py_NotImplemented);\n"
"return Py_NotImplemented;")
code_sink.unindent()
code_sink.writeln("}\n")
return tp_richcompare_function_name
def generate_typedef(self, module, alias):
"""
Generates the appropriate Module code to register the class
with a new name in that module (typedef alias).
"""
module.after_init.write_code(
'PyModule_AddObject(m, (char *) \"%s\", (PyObject *) &%s);' % (
alias, self.pytypestruct))
from cppclass_typehandlers import CppClassParameter, CppClassRefParameter, \
CppClassReturnValue, CppClassRefReturnValue, CppClassPtrParameter, CppClassPtrReturnValue, CppClassParameterBase
import function
from cppmethod import CppMethod, CppConstructor, CppNoConstructor, CppFunctionAsConstructor, \
CppOverloadedMethod, CppOverloadedConstructor, \
CppVirtualMethodParentCaller, CppVirtualMethodProxy, CustomCppMethodWrapper, \
CppDummyMethod
|
diedthreetimes/VCrash
|
pybindgen-0.15.0.795/pybindgen/cppclass.py
|
Python
|
gpl-2.0
| 102,925
|
[
"VisIt"
] |
1a334837eb1aed57e849cdf2965320ff5f4ea4c21415ce38807b8d796208ba51
|
#!/usr/bin/env python
#
#
# FreeType 2 glyph name builder
#
# Copyright 1996-2000, 2003, 2005, 2007, 2008, 2011 by
# David Turner, Robert Wilhelm, and Werner Lemberg.
#
# This file is part of the FreeType project, and may only be used, modified,
# and distributed under the terms of the FreeType project license,
# LICENSE.TXT. By continuing to use, modify, or distribute this file you
# indicate that you have read the license and understand and accept it
# fully.
"""\
usage: %s <output-file>
This python script generates the glyph names tables defined in the
`psnames' module.
Its single argument is the name of the header file to be created.
"""
import sys, string, struct, re, os.path
# This table lists the glyphs according to the Macintosh specification.
# It is used by the TrueType Postscript names table.
#
# See
#
# http://fonts.apple.com/TTRefMan/RM06/Chap6post.html
#
# for the official list.
#
mac_standard_names = \
[
# 0
".notdef", ".null", "nonmarkingreturn", "space", "exclam",
"quotedbl", "numbersign", "dollar", "percent", "ampersand",
# 10
"quotesingle", "parenleft", "parenright", "asterisk", "plus",
"comma", "hyphen", "period", "slash", "zero",
# 20
"one", "two", "three", "four", "five",
"six", "seven", "eight", "nine", "colon",
# 30
"semicolon", "less", "equal", "greater", "question",
"at", "A", "B", "C", "D",
# 40
"E", "F", "G", "H", "I",
"J", "K", "L", "M", "N",
# 50
"O", "P", "Q", "R", "S",
"T", "U", "V", "W", "X",
# 60
"Y", "Z", "bracketleft", "backslash", "bracketright",
"asciicircum", "underscore", "grave", "a", "b",
# 70
"c", "d", "e", "f", "g",
"h", "i", "j", "k", "l",
# 80
"m", "n", "o", "p", "q",
"r", "s", "t", "u", "v",
# 90
"w", "x", "y", "z", "braceleft",
"bar", "braceright", "asciitilde", "Adieresis", "Aring",
# 100
"Ccedilla", "Eacute", "Ntilde", "Odieresis", "Udieresis",
"aacute", "agrave", "acircumflex", "adieresis", "atilde",
# 110
"aring", "ccedilla", "eacute", "egrave", "ecircumflex",
"edieresis", "iacute", "igrave", "icircumflex", "idieresis",
# 120
"ntilde", "oacute", "ograve", "ocircumflex", "odieresis",
"otilde", "uacute", "ugrave", "ucircumflex", "udieresis",
# 130
"dagger", "degree", "cent", "sterling", "section",
"bullet", "paragraph", "germandbls", "registered", "copyright",
# 140
"trademark", "acute", "dieresis", "notequal", "AE",
"Oslash", "infinity", "plusminus", "lessequal", "greaterequal",
# 150
"yen", "mu", "partialdiff", "summation", "product",
"pi", "integral", "ordfeminine", "ordmasculine", "Omega",
# 160
"ae", "oslash", "questiondown", "exclamdown", "logicalnot",
"radical", "florin", "approxequal", "Delta", "guillemotleft",
# 170
"guillemotright", "ellipsis", "nonbreakingspace", "Agrave", "Atilde",
"Otilde", "OE", "oe", "endash", "emdash",
# 180
"quotedblleft", "quotedblright", "quoteleft", "quoteright", "divide",
"lozenge", "ydieresis", "Ydieresis", "fraction", "currency",
# 190
"guilsinglleft", "guilsinglright", "fi", "fl", "daggerdbl",
"periodcentered", "quotesinglbase", "quotedblbase", "perthousand",
"Acircumflex",
# 200
"Ecircumflex", "Aacute", "Edieresis", "Egrave", "Iacute",
"Icircumflex", "Idieresis", "Igrave", "Oacute", "Ocircumflex",
# 210
"apple", "Ograve", "Uacute", "Ucircumflex", "Ugrave",
"dotlessi", "circumflex", "tilde", "macron", "breve",
# 220
"dotaccent", "ring", "cedilla", "hungarumlaut", "ogonek",
"caron", "Lslash", "lslash", "Scaron", "scaron",
# 230
"Zcaron", "zcaron", "brokenbar", "Eth", "eth",
"Yacute", "yacute", "Thorn", "thorn", "minus",
# 240
"multiply", "onesuperior", "twosuperior", "threesuperior", "onehalf",
"onequarter", "threequarters", "franc", "Gbreve", "gbreve",
# 250
"Idotaccent", "Scedilla", "scedilla", "Cacute", "cacute",
"Ccaron", "ccaron", "dcroat"
]
# The list of standard `SID' glyph names. For the official list,
# see Annex A of document at
#
# http://partners.adobe.com/public/developer/en/font/5176.CFF.pdf .
#
sid_standard_names = \
[
# 0
".notdef", "space", "exclam", "quotedbl", "numbersign",
"dollar", "percent", "ampersand", "quoteright", "parenleft",
# 10
"parenright", "asterisk", "plus", "comma", "hyphen",
"period", "slash", "zero", "one", "two",
# 20
"three", "four", "five", "six", "seven",
"eight", "nine", "colon", "semicolon", "less",
# 30
"equal", "greater", "question", "at", "A",
"B", "C", "D", "E", "F",
# 40
"G", "H", "I", "J", "K",
"L", "M", "N", "O", "P",
# 50
"Q", "R", "S", "T", "U",
"V", "W", "X", "Y", "Z",
# 60
"bracketleft", "backslash", "bracketright", "asciicircum", "underscore",
"quoteleft", "a", "b", "c", "d",
# 70
"e", "f", "g", "h", "i",
"j", "k", "l", "m", "n",
# 80
"o", "p", "q", "r", "s",
"t", "u", "v", "w", "x",
# 90
"y", "z", "braceleft", "bar", "braceright",
"asciitilde", "exclamdown", "cent", "sterling", "fraction",
# 100
"yen", "florin", "section", "currency", "quotesingle",
"quotedblleft", "guillemotleft", "guilsinglleft", "guilsinglright", "fi",
# 110
"fl", "endash", "dagger", "daggerdbl", "periodcentered",
"paragraph", "bullet", "quotesinglbase", "quotedblbase", "quotedblright",
# 120
"guillemotright", "ellipsis", "perthousand", "questiondown", "grave",
"acute", "circumflex", "tilde", "macron", "breve",
# 130
"dotaccent", "dieresis", "ring", "cedilla", "hungarumlaut",
"ogonek", "caron", "emdash", "AE", "ordfeminine",
# 140
"Lslash", "Oslash", "OE", "ordmasculine", "ae",
"dotlessi", "lslash", "oslash", "oe", "germandbls",
# 150
"onesuperior", "logicalnot", "mu", "trademark", "Eth",
"onehalf", "plusminus", "Thorn", "onequarter", "divide",
# 160
"brokenbar", "degree", "thorn", "threequarters", "twosuperior",
"registered", "minus", "eth", "multiply", "threesuperior",
# 170
"copyright", "Aacute", "Acircumflex", "Adieresis", "Agrave",
"Aring", "Atilde", "Ccedilla", "Eacute", "Ecircumflex",
# 180
"Edieresis", "Egrave", "Iacute", "Icircumflex", "Idieresis",
"Igrave", "Ntilde", "Oacute", "Ocircumflex", "Odieresis",
# 190
"Ograve", "Otilde", "Scaron", "Uacute", "Ucircumflex",
"Udieresis", "Ugrave", "Yacute", "Ydieresis", "Zcaron",
# 200
"aacute", "acircumflex", "adieresis", "agrave", "aring",
"atilde", "ccedilla", "eacute", "ecircumflex", "edieresis",
# 210
"egrave", "iacute", "icircumflex", "idieresis", "igrave",
"ntilde", "oacute", "ocircumflex", "odieresis", "ograve",
# 220
"otilde", "scaron", "uacute", "ucircumflex", "udieresis",
"ugrave", "yacute", "ydieresis", "zcaron", "exclamsmall",
# 230
"Hungarumlautsmall", "dollaroldstyle", "dollarsuperior", "ampersandsmall",
"Acutesmall",
"parenleftsuperior", "parenrightsuperior", "twodotenleader",
"onedotenleader", "zerooldstyle",
# 240
"oneoldstyle", "twooldstyle", "threeoldstyle", "fouroldstyle",
"fiveoldstyle",
"sixoldstyle", "sevenoldstyle", "eightoldstyle", "nineoldstyle",
"commasuperior",
# 250
"threequartersemdash", "periodsuperior", "questionsmall", "asuperior",
"bsuperior",
"centsuperior", "dsuperior", "esuperior", "isuperior", "lsuperior",
# 260
"msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior",
"tsuperior", "ff", "ffi", "ffl", "parenleftinferior",
# 270
"parenrightinferior", "Circumflexsmall", "hyphensuperior", "Gravesmall",
"Asmall",
"Bsmall", "Csmall", "Dsmall", "Esmall", "Fsmall",
# 280
"Gsmall", "Hsmall", "Ismall", "Jsmall", "Ksmall",
"Lsmall", "Msmall", "Nsmall", "Osmall", "Psmall",
# 290
"Qsmall", "Rsmall", "Ssmall", "Tsmall", "Usmall",
"Vsmall", "Wsmall", "Xsmall", "Ysmall", "Zsmall",
# 300
"colonmonetary", "onefitted", "rupiah", "Tildesmall", "exclamdownsmall",
"centoldstyle", "Lslashsmall", "Scaronsmall", "Zcaronsmall",
"Dieresissmall",
# 310
"Brevesmall", "Caronsmall", "Dotaccentsmall", "Macronsmall", "figuredash",
"hypheninferior", "Ogoneksmall", "Ringsmall", "Cedillasmall",
"questiondownsmall",
# 320
"oneeighth", "threeeighths", "fiveeighths", "seveneighths", "onethird",
"twothirds", "zerosuperior", "foursuperior", "fivesuperior",
"sixsuperior",
# 330
"sevensuperior", "eightsuperior", "ninesuperior", "zeroinferior",
"oneinferior",
"twoinferior", "threeinferior", "fourinferior", "fiveinferior",
"sixinferior",
# 340
"seveninferior", "eightinferior", "nineinferior", "centinferior",
"dollarinferior",
"periodinferior", "commainferior", "Agravesmall", "Aacutesmall",
"Acircumflexsmall",
# 350
"Atildesmall", "Adieresissmall", "Aringsmall", "AEsmall", "Ccedillasmall",
"Egravesmall", "Eacutesmall", "Ecircumflexsmall", "Edieresissmall",
"Igravesmall",
# 360
"Iacutesmall", "Icircumflexsmall", "Idieresissmall", "Ethsmall",
"Ntildesmall",
"Ogravesmall", "Oacutesmall", "Ocircumflexsmall", "Otildesmall",
"Odieresissmall",
# 370
"OEsmall", "Oslashsmall", "Ugravesmall", "Uacutesmall",
"Ucircumflexsmall",
"Udieresissmall", "Yacutesmall", "Thornsmall", "Ydieresissmall",
"001.000",
# 380
"001.001", "001.002", "001.003", "Black", "Bold",
"Book", "Light", "Medium", "Regular", "Roman",
# 390
"Semibold"
]
# This table maps character codes of the Adobe Standard Type 1
# encoding to glyph indices in the sid_standard_names table.
#
t1_standard_encoding = \
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
69, 70, 71, 72, 73, 74, 75, 76, 77, 78,
79, 80, 81, 82, 83, 84, 85, 86, 87, 88,
89, 90, 91, 92, 93, 94, 95, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 96, 97, 98, 99, 100, 101, 102, 103, 104,
105, 106, 107, 108, 109, 110, 0, 111, 112, 113,
114, 0, 115, 116, 117, 118, 119, 120, 121, 122,
0, 123, 0, 124, 125, 126, 127, 128, 129, 130,
131, 0, 132, 133, 0, 134, 135, 136, 137, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 138, 0, 139, 0, 0,
0, 0, 140, 141, 142, 143, 0, 0, 0, 0,
0, 144, 0, 0, 0, 145, 0, 0, 146, 147,
148, 149, 0, 0, 0, 0
]
# This table maps character codes of the Adobe Expert Type 1
# encoding to glyph indices in the sid_standard_names table.
#
t1_expert_encoding = \
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 229, 230, 0, 231, 232, 233, 234,
235, 236, 237, 238, 13, 14, 15, 99, 239, 240,
241, 242, 243, 244, 245, 246, 247, 248, 27, 28,
249, 250, 251, 252, 0, 253, 254, 255, 256, 257,
0, 0, 0, 258, 0, 0, 259, 260, 261, 262,
0, 0, 263, 264, 265, 0, 266, 109, 110, 267,
268, 269, 0, 270, 271, 272, 273, 274, 275, 276,
277, 278, 279, 280, 281, 282, 283, 284, 285, 286,
287, 288, 289, 290, 291, 292, 293, 294, 295, 296,
297, 298, 299, 300, 301, 302, 303, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 304, 305, 306, 0, 0, 307, 308, 309, 310,
311, 0, 312, 0, 0, 313, 0, 0, 314, 315,
0, 0, 316, 317, 318, 0, 0, 0, 158, 155,
163, 319, 320, 321, 322, 323, 324, 325, 0, 0,
326, 150, 164, 169, 327, 328, 329, 330, 331, 332,
333, 334, 335, 336, 337, 338, 339, 340, 341, 342,
343, 344, 345, 346, 347, 348, 349, 350, 351, 352,
353, 354, 355, 356, 357, 358, 359, 360, 361, 362,
363, 364, 365, 366, 367, 368, 369, 370, 371, 372,
373, 374, 375, 376, 377, 378
]
# This data has been taken literally from the file `glyphlist.txt',
# version 2.0, 22 Sept 2002. It is available from
#
# http://sourceforge.net/adobe/aglfn/
#
adobe_glyph_list = """\
A;0041
AE;00C6
AEacute;01FC
AEmacron;01E2
AEsmall;F7E6
Aacute;00C1
Aacutesmall;F7E1
Abreve;0102
Abreveacute;1EAE
Abrevecyrillic;04D0
Abrevedotbelow;1EB6
Abrevegrave;1EB0
Abrevehookabove;1EB2
Abrevetilde;1EB4
Acaron;01CD
Acircle;24B6
Acircumflex;00C2
Acircumflexacute;1EA4
Acircumflexdotbelow;1EAC
Acircumflexgrave;1EA6
Acircumflexhookabove;1EA8
Acircumflexsmall;F7E2
Acircumflextilde;1EAA
Acute;F6C9
Acutesmall;F7B4
Acyrillic;0410
Adblgrave;0200
Adieresis;00C4
Adieresiscyrillic;04D2
Adieresismacron;01DE
Adieresissmall;F7E4
Adotbelow;1EA0
Adotmacron;01E0
Agrave;00C0
Agravesmall;F7E0
Ahookabove;1EA2
Aiecyrillic;04D4
Ainvertedbreve;0202
Alpha;0391
Alphatonos;0386
Amacron;0100
Amonospace;FF21
Aogonek;0104
Aring;00C5
Aringacute;01FA
Aringbelow;1E00
Aringsmall;F7E5
Asmall;F761
Atilde;00C3
Atildesmall;F7E3
Aybarmenian;0531
B;0042
Bcircle;24B7
Bdotaccent;1E02
Bdotbelow;1E04
Becyrillic;0411
Benarmenian;0532
Beta;0392
Bhook;0181
Blinebelow;1E06
Bmonospace;FF22
Brevesmall;F6F4
Bsmall;F762
Btopbar;0182
C;0043
Caarmenian;053E
Cacute;0106
Caron;F6CA
Caronsmall;F6F5
Ccaron;010C
Ccedilla;00C7
Ccedillaacute;1E08
Ccedillasmall;F7E7
Ccircle;24B8
Ccircumflex;0108
Cdot;010A
Cdotaccent;010A
Cedillasmall;F7B8
Chaarmenian;0549
Cheabkhasiancyrillic;04BC
Checyrillic;0427
Chedescenderabkhasiancyrillic;04BE
Chedescendercyrillic;04B6
Chedieresiscyrillic;04F4
Cheharmenian;0543
Chekhakassiancyrillic;04CB
Cheverticalstrokecyrillic;04B8
Chi;03A7
Chook;0187
Circumflexsmall;F6F6
Cmonospace;FF23
Coarmenian;0551
Csmall;F763
D;0044
DZ;01F1
DZcaron;01C4
Daarmenian;0534
Dafrican;0189
Dcaron;010E
Dcedilla;1E10
Dcircle;24B9
Dcircumflexbelow;1E12
Dcroat;0110
Ddotaccent;1E0A
Ddotbelow;1E0C
Decyrillic;0414
Deicoptic;03EE
Delta;2206
Deltagreek;0394
Dhook;018A
Dieresis;F6CB
DieresisAcute;F6CC
DieresisGrave;F6CD
Dieresissmall;F7A8
Digammagreek;03DC
Djecyrillic;0402
Dlinebelow;1E0E
Dmonospace;FF24
Dotaccentsmall;F6F7
Dslash;0110
Dsmall;F764
Dtopbar;018B
Dz;01F2
Dzcaron;01C5
Dzeabkhasiancyrillic;04E0
Dzecyrillic;0405
Dzhecyrillic;040F
E;0045
Eacute;00C9
Eacutesmall;F7E9
Ebreve;0114
Ecaron;011A
Ecedillabreve;1E1C
Echarmenian;0535
Ecircle;24BA
Ecircumflex;00CA
Ecircumflexacute;1EBE
Ecircumflexbelow;1E18
Ecircumflexdotbelow;1EC6
Ecircumflexgrave;1EC0
Ecircumflexhookabove;1EC2
Ecircumflexsmall;F7EA
Ecircumflextilde;1EC4
Ecyrillic;0404
Edblgrave;0204
Edieresis;00CB
Edieresissmall;F7EB
Edot;0116
Edotaccent;0116
Edotbelow;1EB8
Efcyrillic;0424
Egrave;00C8
Egravesmall;F7E8
Eharmenian;0537
Ehookabove;1EBA
Eightroman;2167
Einvertedbreve;0206
Eiotifiedcyrillic;0464
Elcyrillic;041B
Elevenroman;216A
Emacron;0112
Emacronacute;1E16
Emacrongrave;1E14
Emcyrillic;041C
Emonospace;FF25
Encyrillic;041D
Endescendercyrillic;04A2
Eng;014A
Enghecyrillic;04A4
Enhookcyrillic;04C7
Eogonek;0118
Eopen;0190
Epsilon;0395
Epsilontonos;0388
Ercyrillic;0420
Ereversed;018E
Ereversedcyrillic;042D
Escyrillic;0421
Esdescendercyrillic;04AA
Esh;01A9
Esmall;F765
Eta;0397
Etarmenian;0538
Etatonos;0389
Eth;00D0
Ethsmall;F7F0
Etilde;1EBC
Etildebelow;1E1A
Euro;20AC
Ezh;01B7
Ezhcaron;01EE
Ezhreversed;01B8
F;0046
Fcircle;24BB
Fdotaccent;1E1E
Feharmenian;0556
Feicoptic;03E4
Fhook;0191
Fitacyrillic;0472
Fiveroman;2164
Fmonospace;FF26
Fourroman;2163
Fsmall;F766
G;0047
GBsquare;3387
Gacute;01F4
Gamma;0393
Gammaafrican;0194
Gangiacoptic;03EA
Gbreve;011E
Gcaron;01E6
Gcedilla;0122
Gcircle;24BC
Gcircumflex;011C
Gcommaaccent;0122
Gdot;0120
Gdotaccent;0120
Gecyrillic;0413
Ghadarmenian;0542
Ghemiddlehookcyrillic;0494
Ghestrokecyrillic;0492
Gheupturncyrillic;0490
Ghook;0193
Gimarmenian;0533
Gjecyrillic;0403
Gmacron;1E20
Gmonospace;FF27
Grave;F6CE
Gravesmall;F760
Gsmall;F767
Gsmallhook;029B
Gstroke;01E4
H;0048
H18533;25CF
H18543;25AA
H18551;25AB
H22073;25A1
HPsquare;33CB
Haabkhasiancyrillic;04A8
Hadescendercyrillic;04B2
Hardsigncyrillic;042A
Hbar;0126
Hbrevebelow;1E2A
Hcedilla;1E28
Hcircle;24BD
Hcircumflex;0124
Hdieresis;1E26
Hdotaccent;1E22
Hdotbelow;1E24
Hmonospace;FF28
Hoarmenian;0540
Horicoptic;03E8
Hsmall;F768
Hungarumlaut;F6CF
Hungarumlautsmall;F6F8
Hzsquare;3390
I;0049
IAcyrillic;042F
IJ;0132
IUcyrillic;042E
Iacute;00CD
Iacutesmall;F7ED
Ibreve;012C
Icaron;01CF
Icircle;24BE
Icircumflex;00CE
Icircumflexsmall;F7EE
Icyrillic;0406
Idblgrave;0208
Idieresis;00CF
Idieresisacute;1E2E
Idieresiscyrillic;04E4
Idieresissmall;F7EF
Idot;0130
Idotaccent;0130
Idotbelow;1ECA
Iebrevecyrillic;04D6
Iecyrillic;0415
Ifraktur;2111
Igrave;00CC
Igravesmall;F7EC
Ihookabove;1EC8
Iicyrillic;0418
Iinvertedbreve;020A
Iishortcyrillic;0419
Imacron;012A
Imacroncyrillic;04E2
Imonospace;FF29
Iniarmenian;053B
Iocyrillic;0401
Iogonek;012E
Iota;0399
Iotaafrican;0196
Iotadieresis;03AA
Iotatonos;038A
Ismall;F769
Istroke;0197
Itilde;0128
Itildebelow;1E2C
Izhitsacyrillic;0474
Izhitsadblgravecyrillic;0476
J;004A
Jaarmenian;0541
Jcircle;24BF
Jcircumflex;0134
Jecyrillic;0408
Jheharmenian;054B
Jmonospace;FF2A
Jsmall;F76A
K;004B
KBsquare;3385
KKsquare;33CD
Kabashkircyrillic;04A0
Kacute;1E30
Kacyrillic;041A
Kadescendercyrillic;049A
Kahookcyrillic;04C3
Kappa;039A
Kastrokecyrillic;049E
Kaverticalstrokecyrillic;049C
Kcaron;01E8
Kcedilla;0136
Kcircle;24C0
Kcommaaccent;0136
Kdotbelow;1E32
Keharmenian;0554
Kenarmenian;053F
Khacyrillic;0425
Kheicoptic;03E6
Khook;0198
Kjecyrillic;040C
Klinebelow;1E34
Kmonospace;FF2B
Koppacyrillic;0480
Koppagreek;03DE
Ksicyrillic;046E
Ksmall;F76B
L;004C
LJ;01C7
LL;F6BF
Lacute;0139
Lambda;039B
Lcaron;013D
Lcedilla;013B
Lcircle;24C1
Lcircumflexbelow;1E3C
Lcommaaccent;013B
Ldot;013F
Ldotaccent;013F
Ldotbelow;1E36
Ldotbelowmacron;1E38
Liwnarmenian;053C
Lj;01C8
Ljecyrillic;0409
Llinebelow;1E3A
Lmonospace;FF2C
Lslash;0141
Lslashsmall;F6F9
Lsmall;F76C
M;004D
MBsquare;3386
Macron;F6D0
Macronsmall;F7AF
Macute;1E3E
Mcircle;24C2
Mdotaccent;1E40
Mdotbelow;1E42
Menarmenian;0544
Mmonospace;FF2D
Msmall;F76D
Mturned;019C
Mu;039C
N;004E
NJ;01CA
Nacute;0143
Ncaron;0147
Ncedilla;0145
Ncircle;24C3
Ncircumflexbelow;1E4A
Ncommaaccent;0145
Ndotaccent;1E44
Ndotbelow;1E46
Nhookleft;019D
Nineroman;2168
Nj;01CB
Njecyrillic;040A
Nlinebelow;1E48
Nmonospace;FF2E
Nowarmenian;0546
Nsmall;F76E
Ntilde;00D1
Ntildesmall;F7F1
Nu;039D
O;004F
OE;0152
OEsmall;F6FA
Oacute;00D3
Oacutesmall;F7F3
Obarredcyrillic;04E8
Obarreddieresiscyrillic;04EA
Obreve;014E
Ocaron;01D1
Ocenteredtilde;019F
Ocircle;24C4
Ocircumflex;00D4
Ocircumflexacute;1ED0
Ocircumflexdotbelow;1ED8
Ocircumflexgrave;1ED2
Ocircumflexhookabove;1ED4
Ocircumflexsmall;F7F4
Ocircumflextilde;1ED6
Ocyrillic;041E
Odblacute;0150
Odblgrave;020C
Odieresis;00D6
Odieresiscyrillic;04E6
Odieresissmall;F7F6
Odotbelow;1ECC
Ogoneksmall;F6FB
Ograve;00D2
Ogravesmall;F7F2
Oharmenian;0555
Ohm;2126
Ohookabove;1ECE
Ohorn;01A0
Ohornacute;1EDA
Ohorndotbelow;1EE2
Ohorngrave;1EDC
Ohornhookabove;1EDE
Ohorntilde;1EE0
Ohungarumlaut;0150
Oi;01A2
Oinvertedbreve;020E
Omacron;014C
Omacronacute;1E52
Omacrongrave;1E50
Omega;2126
Omegacyrillic;0460
Omegagreek;03A9
Omegaroundcyrillic;047A
Omegatitlocyrillic;047C
Omegatonos;038F
Omicron;039F
Omicrontonos;038C
Omonospace;FF2F
Oneroman;2160
Oogonek;01EA
Oogonekmacron;01EC
Oopen;0186
Oslash;00D8
Oslashacute;01FE
Oslashsmall;F7F8
Osmall;F76F
Ostrokeacute;01FE
Otcyrillic;047E
Otilde;00D5
Otildeacute;1E4C
Otildedieresis;1E4E
Otildesmall;F7F5
P;0050
Pacute;1E54
Pcircle;24C5
Pdotaccent;1E56
Pecyrillic;041F
Peharmenian;054A
Pemiddlehookcyrillic;04A6
Phi;03A6
Phook;01A4
Pi;03A0
Piwrarmenian;0553
Pmonospace;FF30
Psi;03A8
Psicyrillic;0470
Psmall;F770
Q;0051
Qcircle;24C6
Qmonospace;FF31
Qsmall;F771
R;0052
Raarmenian;054C
Racute;0154
Rcaron;0158
Rcedilla;0156
Rcircle;24C7
Rcommaaccent;0156
Rdblgrave;0210
Rdotaccent;1E58
Rdotbelow;1E5A
Rdotbelowmacron;1E5C
Reharmenian;0550
Rfraktur;211C
Rho;03A1
Ringsmall;F6FC
Rinvertedbreve;0212
Rlinebelow;1E5E
Rmonospace;FF32
Rsmall;F772
Rsmallinverted;0281
Rsmallinvertedsuperior;02B6
S;0053
SF010000;250C
SF020000;2514
SF030000;2510
SF040000;2518
SF050000;253C
SF060000;252C
SF070000;2534
SF080000;251C
SF090000;2524
SF100000;2500
SF110000;2502
SF190000;2561
SF200000;2562
SF210000;2556
SF220000;2555
SF230000;2563
SF240000;2551
SF250000;2557
SF260000;255D
SF270000;255C
SF280000;255B
SF360000;255E
SF370000;255F
SF380000;255A
SF390000;2554
SF400000;2569
SF410000;2566
SF420000;2560
SF430000;2550
SF440000;256C
SF450000;2567
SF460000;2568
SF470000;2564
SF480000;2565
SF490000;2559
SF500000;2558
SF510000;2552
SF520000;2553
SF530000;256B
SF540000;256A
Sacute;015A
Sacutedotaccent;1E64
Sampigreek;03E0
Scaron;0160
Scarondotaccent;1E66
Scaronsmall;F6FD
Scedilla;015E
Schwa;018F
Schwacyrillic;04D8
Schwadieresiscyrillic;04DA
Scircle;24C8
Scircumflex;015C
Scommaaccent;0218
Sdotaccent;1E60
Sdotbelow;1E62
Sdotbelowdotaccent;1E68
Seharmenian;054D
Sevenroman;2166
Shaarmenian;0547
Shacyrillic;0428
Shchacyrillic;0429
Sheicoptic;03E2
Shhacyrillic;04BA
Shimacoptic;03EC
Sigma;03A3
Sixroman;2165
Smonospace;FF33
Softsigncyrillic;042C
Ssmall;F773
Stigmagreek;03DA
T;0054
Tau;03A4
Tbar;0166
Tcaron;0164
Tcedilla;0162
Tcircle;24C9
Tcircumflexbelow;1E70
Tcommaaccent;0162
Tdotaccent;1E6A
Tdotbelow;1E6C
Tecyrillic;0422
Tedescendercyrillic;04AC
Tenroman;2169
Tetsecyrillic;04B4
Theta;0398
Thook;01AC
Thorn;00DE
Thornsmall;F7FE
Threeroman;2162
Tildesmall;F6FE
Tiwnarmenian;054F
Tlinebelow;1E6E
Tmonospace;FF34
Toarmenian;0539
Tonefive;01BC
Tonesix;0184
Tonetwo;01A7
Tretroflexhook;01AE
Tsecyrillic;0426
Tshecyrillic;040B
Tsmall;F774
Twelveroman;216B
Tworoman;2161
U;0055
Uacute;00DA
Uacutesmall;F7FA
Ubreve;016C
Ucaron;01D3
Ucircle;24CA
Ucircumflex;00DB
Ucircumflexbelow;1E76
Ucircumflexsmall;F7FB
Ucyrillic;0423
Udblacute;0170
Udblgrave;0214
Udieresis;00DC
Udieresisacute;01D7
Udieresisbelow;1E72
Udieresiscaron;01D9
Udieresiscyrillic;04F0
Udieresisgrave;01DB
Udieresismacron;01D5
Udieresissmall;F7FC
Udotbelow;1EE4
Ugrave;00D9
Ugravesmall;F7F9
Uhookabove;1EE6
Uhorn;01AF
Uhornacute;1EE8
Uhorndotbelow;1EF0
Uhorngrave;1EEA
Uhornhookabove;1EEC
Uhorntilde;1EEE
Uhungarumlaut;0170
Uhungarumlautcyrillic;04F2
Uinvertedbreve;0216
Ukcyrillic;0478
Umacron;016A
Umacroncyrillic;04EE
Umacrondieresis;1E7A
Umonospace;FF35
Uogonek;0172
Upsilon;03A5
Upsilon1;03D2
Upsilonacutehooksymbolgreek;03D3
Upsilonafrican;01B1
Upsilondieresis;03AB
Upsilondieresishooksymbolgreek;03D4
Upsilonhooksymbol;03D2
Upsilontonos;038E
Uring;016E
Ushortcyrillic;040E
Usmall;F775
Ustraightcyrillic;04AE
Ustraightstrokecyrillic;04B0
Utilde;0168
Utildeacute;1E78
Utildebelow;1E74
V;0056
Vcircle;24CB
Vdotbelow;1E7E
Vecyrillic;0412
Vewarmenian;054E
Vhook;01B2
Vmonospace;FF36
Voarmenian;0548
Vsmall;F776
Vtilde;1E7C
W;0057
Wacute;1E82
Wcircle;24CC
Wcircumflex;0174
Wdieresis;1E84
Wdotaccent;1E86
Wdotbelow;1E88
Wgrave;1E80
Wmonospace;FF37
Wsmall;F777
X;0058
Xcircle;24CD
Xdieresis;1E8C
Xdotaccent;1E8A
Xeharmenian;053D
Xi;039E
Xmonospace;FF38
Xsmall;F778
Y;0059
Yacute;00DD
Yacutesmall;F7FD
Yatcyrillic;0462
Ycircle;24CE
Ycircumflex;0176
Ydieresis;0178
Ydieresissmall;F7FF
Ydotaccent;1E8E
Ydotbelow;1EF4
Yericyrillic;042B
Yerudieresiscyrillic;04F8
Ygrave;1EF2
Yhook;01B3
Yhookabove;1EF6
Yiarmenian;0545
Yicyrillic;0407
Yiwnarmenian;0552
Ymonospace;FF39
Ysmall;F779
Ytilde;1EF8
Yusbigcyrillic;046A
Yusbigiotifiedcyrillic;046C
Yuslittlecyrillic;0466
Yuslittleiotifiedcyrillic;0468
Z;005A
Zaarmenian;0536
Zacute;0179
Zcaron;017D
Zcaronsmall;F6FF
Zcircle;24CF
Zcircumflex;1E90
Zdot;017B
Zdotaccent;017B
Zdotbelow;1E92
Zecyrillic;0417
Zedescendercyrillic;0498
Zedieresiscyrillic;04DE
Zeta;0396
Zhearmenian;053A
Zhebrevecyrillic;04C1
Zhecyrillic;0416
Zhedescendercyrillic;0496
Zhedieresiscyrillic;04DC
Zlinebelow;1E94
Zmonospace;FF3A
Zsmall;F77A
Zstroke;01B5
a;0061
aabengali;0986
aacute;00E1
aadeva;0906
aagujarati;0A86
aagurmukhi;0A06
aamatragurmukhi;0A3E
aarusquare;3303
aavowelsignbengali;09BE
aavowelsigndeva;093E
aavowelsigngujarati;0ABE
abbreviationmarkarmenian;055F
abbreviationsigndeva;0970
abengali;0985
abopomofo;311A
abreve;0103
abreveacute;1EAF
abrevecyrillic;04D1
abrevedotbelow;1EB7
abrevegrave;1EB1
abrevehookabove;1EB3
abrevetilde;1EB5
acaron;01CE
acircle;24D0
acircumflex;00E2
acircumflexacute;1EA5
acircumflexdotbelow;1EAD
acircumflexgrave;1EA7
acircumflexhookabove;1EA9
acircumflextilde;1EAB
acute;00B4
acutebelowcmb;0317
acutecmb;0301
acutecomb;0301
acutedeva;0954
acutelowmod;02CF
acutetonecmb;0341
acyrillic;0430
adblgrave;0201
addakgurmukhi;0A71
adeva;0905
adieresis;00E4
adieresiscyrillic;04D3
adieresismacron;01DF
adotbelow;1EA1
adotmacron;01E1
ae;00E6
aeacute;01FD
aekorean;3150
aemacron;01E3
afii00208;2015
afii08941;20A4
afii10017;0410
afii10018;0411
afii10019;0412
afii10020;0413
afii10021;0414
afii10022;0415
afii10023;0401
afii10024;0416
afii10025;0417
afii10026;0418
afii10027;0419
afii10028;041A
afii10029;041B
afii10030;041C
afii10031;041D
afii10032;041E
afii10033;041F
afii10034;0420
afii10035;0421
afii10036;0422
afii10037;0423
afii10038;0424
afii10039;0425
afii10040;0426
afii10041;0427
afii10042;0428
afii10043;0429
afii10044;042A
afii10045;042B
afii10046;042C
afii10047;042D
afii10048;042E
afii10049;042F
afii10050;0490
afii10051;0402
afii10052;0403
afii10053;0404
afii10054;0405
afii10055;0406
afii10056;0407
afii10057;0408
afii10058;0409
afii10059;040A
afii10060;040B
afii10061;040C
afii10062;040E
afii10063;F6C4
afii10064;F6C5
afii10065;0430
afii10066;0431
afii10067;0432
afii10068;0433
afii10069;0434
afii10070;0435
afii10071;0451
afii10072;0436
afii10073;0437
afii10074;0438
afii10075;0439
afii10076;043A
afii10077;043B
afii10078;043C
afii10079;043D
afii10080;043E
afii10081;043F
afii10082;0440
afii10083;0441
afii10084;0442
afii10085;0443
afii10086;0444
afii10087;0445
afii10088;0446
afii10089;0447
afii10090;0448
afii10091;0449
afii10092;044A
afii10093;044B
afii10094;044C
afii10095;044D
afii10096;044E
afii10097;044F
afii10098;0491
afii10099;0452
afii10100;0453
afii10101;0454
afii10102;0455
afii10103;0456
afii10104;0457
afii10105;0458
afii10106;0459
afii10107;045A
afii10108;045B
afii10109;045C
afii10110;045E
afii10145;040F
afii10146;0462
afii10147;0472
afii10148;0474
afii10192;F6C6
afii10193;045F
afii10194;0463
afii10195;0473
afii10196;0475
afii10831;F6C7
afii10832;F6C8
afii10846;04D9
afii299;200E
afii300;200F
afii301;200D
afii57381;066A
afii57388;060C
afii57392;0660
afii57393;0661
afii57394;0662
afii57395;0663
afii57396;0664
afii57397;0665
afii57398;0666
afii57399;0667
afii57400;0668
afii57401;0669
afii57403;061B
afii57407;061F
afii57409;0621
afii57410;0622
afii57411;0623
afii57412;0624
afii57413;0625
afii57414;0626
afii57415;0627
afii57416;0628
afii57417;0629
afii57418;062A
afii57419;062B
afii57420;062C
afii57421;062D
afii57422;062E
afii57423;062F
afii57424;0630
afii57425;0631
afii57426;0632
afii57427;0633
afii57428;0634
afii57429;0635
afii57430;0636
afii57431;0637
afii57432;0638
afii57433;0639
afii57434;063A
afii57440;0640
afii57441;0641
afii57442;0642
afii57443;0643
afii57444;0644
afii57445;0645
afii57446;0646
afii57448;0648
afii57449;0649
afii57450;064A
afii57451;064B
afii57452;064C
afii57453;064D
afii57454;064E
afii57455;064F
afii57456;0650
afii57457;0651
afii57458;0652
afii57470;0647
afii57505;06A4
afii57506;067E
afii57507;0686
afii57508;0698
afii57509;06AF
afii57511;0679
afii57512;0688
afii57513;0691
afii57514;06BA
afii57519;06D2
afii57534;06D5
afii57636;20AA
afii57645;05BE
afii57658;05C3
afii57664;05D0
afii57665;05D1
afii57666;05D2
afii57667;05D3
afii57668;05D4
afii57669;05D5
afii57670;05D6
afii57671;05D7
afii57672;05D8
afii57673;05D9
afii57674;05DA
afii57675;05DB
afii57676;05DC
afii57677;05DD
afii57678;05DE
afii57679;05DF
afii57680;05E0
afii57681;05E1
afii57682;05E2
afii57683;05E3
afii57684;05E4
afii57685;05E5
afii57686;05E6
afii57687;05E7
afii57688;05E8
afii57689;05E9
afii57690;05EA
afii57694;FB2A
afii57695;FB2B
afii57700;FB4B
afii57705;FB1F
afii57716;05F0
afii57717;05F1
afii57718;05F2
afii57723;FB35
afii57793;05B4
afii57794;05B5
afii57795;05B6
afii57796;05BB
afii57797;05B8
afii57798;05B7
afii57799;05B0
afii57800;05B2
afii57801;05B1
afii57802;05B3
afii57803;05C2
afii57804;05C1
afii57806;05B9
afii57807;05BC
afii57839;05BD
afii57841;05BF
afii57842;05C0
afii57929;02BC
afii61248;2105
afii61289;2113
afii61352;2116
afii61573;202C
afii61574;202D
afii61575;202E
afii61664;200C
afii63167;066D
afii64937;02BD
agrave;00E0
agujarati;0A85
agurmukhi;0A05
ahiragana;3042
ahookabove;1EA3
aibengali;0990
aibopomofo;311E
aideva;0910
aiecyrillic;04D5
aigujarati;0A90
aigurmukhi;0A10
aimatragurmukhi;0A48
ainarabic;0639
ainfinalarabic;FECA
aininitialarabic;FECB
ainmedialarabic;FECC
ainvertedbreve;0203
aivowelsignbengali;09C8
aivowelsigndeva;0948
aivowelsigngujarati;0AC8
akatakana;30A2
akatakanahalfwidth;FF71
akorean;314F
alef;05D0
alefarabic;0627
alefdageshhebrew;FB30
aleffinalarabic;FE8E
alefhamzaabovearabic;0623
alefhamzaabovefinalarabic;FE84
alefhamzabelowarabic;0625
alefhamzabelowfinalarabic;FE88
alefhebrew;05D0
aleflamedhebrew;FB4F
alefmaddaabovearabic;0622
alefmaddaabovefinalarabic;FE82
alefmaksuraarabic;0649
alefmaksurafinalarabic;FEF0
alefmaksurainitialarabic;FEF3
alefmaksuramedialarabic;FEF4
alefpatahhebrew;FB2E
alefqamatshebrew;FB2F
aleph;2135
allequal;224C
alpha;03B1
alphatonos;03AC
amacron;0101
amonospace;FF41
ampersand;0026
ampersandmonospace;FF06
ampersandsmall;F726
amsquare;33C2
anbopomofo;3122
angbopomofo;3124
angkhankhuthai;0E5A
angle;2220
anglebracketleft;3008
anglebracketleftvertical;FE3F
anglebracketright;3009
anglebracketrightvertical;FE40
angleleft;2329
angleright;232A
angstrom;212B
anoteleia;0387
anudattadeva;0952
anusvarabengali;0982
anusvaradeva;0902
anusvaragujarati;0A82
aogonek;0105
apaatosquare;3300
aparen;249C
apostrophearmenian;055A
apostrophemod;02BC
apple;F8FF
approaches;2250
approxequal;2248
approxequalorimage;2252
approximatelyequal;2245
araeaekorean;318E
araeakorean;318D
arc;2312
arighthalfring;1E9A
aring;00E5
aringacute;01FB
aringbelow;1E01
arrowboth;2194
arrowdashdown;21E3
arrowdashleft;21E0
arrowdashright;21E2
arrowdashup;21E1
arrowdblboth;21D4
arrowdbldown;21D3
arrowdblleft;21D0
arrowdblright;21D2
arrowdblup;21D1
arrowdown;2193
arrowdownleft;2199
arrowdownright;2198
arrowdownwhite;21E9
arrowheaddownmod;02C5
arrowheadleftmod;02C2
arrowheadrightmod;02C3
arrowheadupmod;02C4
arrowhorizex;F8E7
arrowleft;2190
arrowleftdbl;21D0
arrowleftdblstroke;21CD
arrowleftoverright;21C6
arrowleftwhite;21E6
arrowright;2192
arrowrightdblstroke;21CF
arrowrightheavy;279E
arrowrightoverleft;21C4
arrowrightwhite;21E8
arrowtableft;21E4
arrowtabright;21E5
arrowup;2191
arrowupdn;2195
arrowupdnbse;21A8
arrowupdownbase;21A8
arrowupleft;2196
arrowupleftofdown;21C5
arrowupright;2197
arrowupwhite;21E7
arrowvertex;F8E6
asciicircum;005E
asciicircummonospace;FF3E
asciitilde;007E
asciitildemonospace;FF5E
ascript;0251
ascriptturned;0252
asmallhiragana;3041
asmallkatakana;30A1
asmallkatakanahalfwidth;FF67
asterisk;002A
asteriskaltonearabic;066D
asteriskarabic;066D
asteriskmath;2217
asteriskmonospace;FF0A
asterisksmall;FE61
asterism;2042
asuperior;F6E9
asymptoticallyequal;2243
at;0040
atilde;00E3
atmonospace;FF20
atsmall;FE6B
aturned;0250
aubengali;0994
aubopomofo;3120
audeva;0914
augujarati;0A94
augurmukhi;0A14
aulengthmarkbengali;09D7
aumatragurmukhi;0A4C
auvowelsignbengali;09CC
auvowelsigndeva;094C
auvowelsigngujarati;0ACC
avagrahadeva;093D
aybarmenian;0561
ayin;05E2
ayinaltonehebrew;FB20
ayinhebrew;05E2
b;0062
babengali;09AC
backslash;005C
backslashmonospace;FF3C
badeva;092C
bagujarati;0AAC
bagurmukhi;0A2C
bahiragana;3070
bahtthai;0E3F
bakatakana;30D0
bar;007C
barmonospace;FF5C
bbopomofo;3105
bcircle;24D1
bdotaccent;1E03
bdotbelow;1E05
beamedsixteenthnotes;266C
because;2235
becyrillic;0431
beharabic;0628
behfinalarabic;FE90
behinitialarabic;FE91
behiragana;3079
behmedialarabic;FE92
behmeeminitialarabic;FC9F
behmeemisolatedarabic;FC08
behnoonfinalarabic;FC6D
bekatakana;30D9
benarmenian;0562
bet;05D1
beta;03B2
betasymbolgreek;03D0
betdagesh;FB31
betdageshhebrew;FB31
bethebrew;05D1
betrafehebrew;FB4C
bhabengali;09AD
bhadeva;092D
bhagujarati;0AAD
bhagurmukhi;0A2D
bhook;0253
bihiragana;3073
bikatakana;30D3
bilabialclick;0298
bindigurmukhi;0A02
birusquare;3331
blackcircle;25CF
blackdiamond;25C6
blackdownpointingtriangle;25BC
blackleftpointingpointer;25C4
blackleftpointingtriangle;25C0
blacklenticularbracketleft;3010
blacklenticularbracketleftvertical;FE3B
blacklenticularbracketright;3011
blacklenticularbracketrightvertical;FE3C
blacklowerlefttriangle;25E3
blacklowerrighttriangle;25E2
blackrectangle;25AC
blackrightpointingpointer;25BA
blackrightpointingtriangle;25B6
blacksmallsquare;25AA
blacksmilingface;263B
blacksquare;25A0
blackstar;2605
blackupperlefttriangle;25E4
blackupperrighttriangle;25E5
blackuppointingsmalltriangle;25B4
blackuppointingtriangle;25B2
blank;2423
blinebelow;1E07
block;2588
bmonospace;FF42
bobaimaithai;0E1A
bohiragana;307C
bokatakana;30DC
bparen;249D
bqsquare;33C3
braceex;F8F4
braceleft;007B
braceleftbt;F8F3
braceleftmid;F8F2
braceleftmonospace;FF5B
braceleftsmall;FE5B
bracelefttp;F8F1
braceleftvertical;FE37
braceright;007D
bracerightbt;F8FE
bracerightmid;F8FD
bracerightmonospace;FF5D
bracerightsmall;FE5C
bracerighttp;F8FC
bracerightvertical;FE38
bracketleft;005B
bracketleftbt;F8F0
bracketleftex;F8EF
bracketleftmonospace;FF3B
bracketlefttp;F8EE
bracketright;005D
bracketrightbt;F8FB
bracketrightex;F8FA
bracketrightmonospace;FF3D
bracketrighttp;F8F9
breve;02D8
brevebelowcmb;032E
brevecmb;0306
breveinvertedbelowcmb;032F
breveinvertedcmb;0311
breveinverteddoublecmb;0361
bridgebelowcmb;032A
bridgeinvertedbelowcmb;033A
brokenbar;00A6
bstroke;0180
bsuperior;F6EA
btopbar;0183
buhiragana;3076
bukatakana;30D6
bullet;2022
bulletinverse;25D8
bulletoperator;2219
bullseye;25CE
c;0063
caarmenian;056E
cabengali;099A
cacute;0107
cadeva;091A
cagujarati;0A9A
cagurmukhi;0A1A
calsquare;3388
candrabindubengali;0981
candrabinducmb;0310
candrabindudeva;0901
candrabindugujarati;0A81
capslock;21EA
careof;2105
caron;02C7
caronbelowcmb;032C
caroncmb;030C
carriagereturn;21B5
cbopomofo;3118
ccaron;010D
ccedilla;00E7
ccedillaacute;1E09
ccircle;24D2
ccircumflex;0109
ccurl;0255
cdot;010B
cdotaccent;010B
cdsquare;33C5
cedilla;00B8
cedillacmb;0327
cent;00A2
centigrade;2103
centinferior;F6DF
centmonospace;FFE0
centoldstyle;F7A2
centsuperior;F6E0
chaarmenian;0579
chabengali;099B
chadeva;091B
chagujarati;0A9B
chagurmukhi;0A1B
chbopomofo;3114
cheabkhasiancyrillic;04BD
checkmark;2713
checyrillic;0447
chedescenderabkhasiancyrillic;04BF
chedescendercyrillic;04B7
chedieresiscyrillic;04F5
cheharmenian;0573
chekhakassiancyrillic;04CC
cheverticalstrokecyrillic;04B9
chi;03C7
chieuchacirclekorean;3277
chieuchaparenkorean;3217
chieuchcirclekorean;3269
chieuchkorean;314A
chieuchparenkorean;3209
chochangthai;0E0A
chochanthai;0E08
chochingthai;0E09
chochoethai;0E0C
chook;0188
cieucacirclekorean;3276
cieucaparenkorean;3216
cieuccirclekorean;3268
cieuckorean;3148
cieucparenkorean;3208
cieucuparenkorean;321C
circle;25CB
circlemultiply;2297
circleot;2299
circleplus;2295
circlepostalmark;3036
circlewithlefthalfblack;25D0
circlewithrighthalfblack;25D1
circumflex;02C6
circumflexbelowcmb;032D
circumflexcmb;0302
clear;2327
clickalveolar;01C2
clickdental;01C0
clicklateral;01C1
clickretroflex;01C3
club;2663
clubsuitblack;2663
clubsuitwhite;2667
cmcubedsquare;33A4
cmonospace;FF43
cmsquaredsquare;33A0
coarmenian;0581
colon;003A
colonmonetary;20A1
colonmonospace;FF1A
colonsign;20A1
colonsmall;FE55
colontriangularhalfmod;02D1
colontriangularmod;02D0
comma;002C
commaabovecmb;0313
commaaboverightcmb;0315
commaaccent;F6C3
commaarabic;060C
commaarmenian;055D
commainferior;F6E1
commamonospace;FF0C
commareversedabovecmb;0314
commareversedmod;02BD
commasmall;FE50
commasuperior;F6E2
commaturnedabovecmb;0312
commaturnedmod;02BB
compass;263C
congruent;2245
contourintegral;222E
control;2303
controlACK;0006
controlBEL;0007
controlBS;0008
controlCAN;0018
controlCR;000D
controlDC1;0011
controlDC2;0012
controlDC3;0013
controlDC4;0014
controlDEL;007F
controlDLE;0010
controlEM;0019
controlENQ;0005
controlEOT;0004
controlESC;001B
controlETB;0017
controlETX;0003
controlFF;000C
controlFS;001C
controlGS;001D
controlHT;0009
controlLF;000A
controlNAK;0015
controlRS;001E
controlSI;000F
controlSO;000E
controlSOT;0002
controlSTX;0001
controlSUB;001A
controlSYN;0016
controlUS;001F
controlVT;000B
copyright;00A9
copyrightsans;F8E9
copyrightserif;F6D9
cornerbracketleft;300C
cornerbracketlefthalfwidth;FF62
cornerbracketleftvertical;FE41
cornerbracketright;300D
cornerbracketrighthalfwidth;FF63
cornerbracketrightvertical;FE42
corporationsquare;337F
cosquare;33C7
coverkgsquare;33C6
cparen;249E
cruzeiro;20A2
cstretched;0297
curlyand;22CF
curlyor;22CE
currency;00A4
cyrBreve;F6D1
cyrFlex;F6D2
cyrbreve;F6D4
cyrflex;F6D5
d;0064
daarmenian;0564
dabengali;09A6
dadarabic;0636
dadeva;0926
dadfinalarabic;FEBE
dadinitialarabic;FEBF
dadmedialarabic;FEC0
dagesh;05BC
dageshhebrew;05BC
dagger;2020
daggerdbl;2021
dagujarati;0AA6
dagurmukhi;0A26
dahiragana;3060
dakatakana;30C0
dalarabic;062F
dalet;05D3
daletdagesh;FB33
daletdageshhebrew;FB33
dalethatafpatah;05D3 05B2
dalethatafpatahhebrew;05D3 05B2
dalethatafsegol;05D3 05B1
dalethatafsegolhebrew;05D3 05B1
dalethebrew;05D3
dalethiriq;05D3 05B4
dalethiriqhebrew;05D3 05B4
daletholam;05D3 05B9
daletholamhebrew;05D3 05B9
daletpatah;05D3 05B7
daletpatahhebrew;05D3 05B7
daletqamats;05D3 05B8
daletqamatshebrew;05D3 05B8
daletqubuts;05D3 05BB
daletqubutshebrew;05D3 05BB
daletsegol;05D3 05B6
daletsegolhebrew;05D3 05B6
daletsheva;05D3 05B0
daletshevahebrew;05D3 05B0
dalettsere;05D3 05B5
dalettserehebrew;05D3 05B5
dalfinalarabic;FEAA
dammaarabic;064F
dammalowarabic;064F
dammatanaltonearabic;064C
dammatanarabic;064C
danda;0964
dargahebrew;05A7
dargalefthebrew;05A7
dasiapneumatacyrilliccmb;0485
dblGrave;F6D3
dblanglebracketleft;300A
dblanglebracketleftvertical;FE3D
dblanglebracketright;300B
dblanglebracketrightvertical;FE3E
dblarchinvertedbelowcmb;032B
dblarrowleft;21D4
dblarrowright;21D2
dbldanda;0965
dblgrave;F6D6
dblgravecmb;030F
dblintegral;222C
dbllowline;2017
dbllowlinecmb;0333
dbloverlinecmb;033F
dblprimemod;02BA
dblverticalbar;2016
dblverticallineabovecmb;030E
dbopomofo;3109
dbsquare;33C8
dcaron;010F
dcedilla;1E11
dcircle;24D3
dcircumflexbelow;1E13
dcroat;0111
ddabengali;09A1
ddadeva;0921
ddagujarati;0AA1
ddagurmukhi;0A21
ddalarabic;0688
ddalfinalarabic;FB89
dddhadeva;095C
ddhabengali;09A2
ddhadeva;0922
ddhagujarati;0AA2
ddhagurmukhi;0A22
ddotaccent;1E0B
ddotbelow;1E0D
decimalseparatorarabic;066B
decimalseparatorpersian;066B
decyrillic;0434
degree;00B0
dehihebrew;05AD
dehiragana;3067
deicoptic;03EF
dekatakana;30C7
deleteleft;232B
deleteright;2326
delta;03B4
deltaturned;018D
denominatorminusonenumeratorbengali;09F8
dezh;02A4
dhabengali;09A7
dhadeva;0927
dhagujarati;0AA7
dhagurmukhi;0A27
dhook;0257
dialytikatonos;0385
dialytikatonoscmb;0344
diamond;2666
diamondsuitwhite;2662
dieresis;00A8
dieresisacute;F6D7
dieresisbelowcmb;0324
dieresiscmb;0308
dieresisgrave;F6D8
dieresistonos;0385
dihiragana;3062
dikatakana;30C2
dittomark;3003
divide;00F7
divides;2223
divisionslash;2215
djecyrillic;0452
dkshade;2593
dlinebelow;1E0F
dlsquare;3397
dmacron;0111
dmonospace;FF44
dnblock;2584
dochadathai;0E0E
dodekthai;0E14
dohiragana;3069
dokatakana;30C9
dollar;0024
dollarinferior;F6E3
dollarmonospace;FF04
dollaroldstyle;F724
dollarsmall;FE69
dollarsuperior;F6E4
dong;20AB
dorusquare;3326
dotaccent;02D9
dotaccentcmb;0307
dotbelowcmb;0323
dotbelowcomb;0323
dotkatakana;30FB
dotlessi;0131
dotlessj;F6BE
dotlessjstrokehook;0284
dotmath;22C5
dottedcircle;25CC
doubleyodpatah;FB1F
doubleyodpatahhebrew;FB1F
downtackbelowcmb;031E
downtackmod;02D5
dparen;249F
dsuperior;F6EB
dtail;0256
dtopbar;018C
duhiragana;3065
dukatakana;30C5
dz;01F3
dzaltone;02A3
dzcaron;01C6
dzcurl;02A5
dzeabkhasiancyrillic;04E1
dzecyrillic;0455
dzhecyrillic;045F
e;0065
eacute;00E9
earth;2641
ebengali;098F
ebopomofo;311C
ebreve;0115
ecandradeva;090D
ecandragujarati;0A8D
ecandravowelsigndeva;0945
ecandravowelsigngujarati;0AC5
ecaron;011B
ecedillabreve;1E1D
echarmenian;0565
echyiwnarmenian;0587
ecircle;24D4
ecircumflex;00EA
ecircumflexacute;1EBF
ecircumflexbelow;1E19
ecircumflexdotbelow;1EC7
ecircumflexgrave;1EC1
ecircumflexhookabove;1EC3
ecircumflextilde;1EC5
ecyrillic;0454
edblgrave;0205
edeva;090F
edieresis;00EB
edot;0117
edotaccent;0117
edotbelow;1EB9
eegurmukhi;0A0F
eematragurmukhi;0A47
efcyrillic;0444
egrave;00E8
egujarati;0A8F
eharmenian;0567
ehbopomofo;311D
ehiragana;3048
ehookabove;1EBB
eibopomofo;311F
eight;0038
eightarabic;0668
eightbengali;09EE
eightcircle;2467
eightcircleinversesansserif;2791
eightdeva;096E
eighteencircle;2471
eighteenparen;2485
eighteenperiod;2499
eightgujarati;0AEE
eightgurmukhi;0A6E
eighthackarabic;0668
eighthangzhou;3028
eighthnotebeamed;266B
eightideographicparen;3227
eightinferior;2088
eightmonospace;FF18
eightoldstyle;F738
eightparen;247B
eightperiod;248F
eightpersian;06F8
eightroman;2177
eightsuperior;2078
eightthai;0E58
einvertedbreve;0207
eiotifiedcyrillic;0465
ekatakana;30A8
ekatakanahalfwidth;FF74
ekonkargurmukhi;0A74
ekorean;3154
elcyrillic;043B
element;2208
elevencircle;246A
elevenparen;247E
elevenperiod;2492
elevenroman;217A
ellipsis;2026
ellipsisvertical;22EE
emacron;0113
emacronacute;1E17
emacrongrave;1E15
emcyrillic;043C
emdash;2014
emdashvertical;FE31
emonospace;FF45
emphasismarkarmenian;055B
emptyset;2205
enbopomofo;3123
encyrillic;043D
endash;2013
endashvertical;FE32
endescendercyrillic;04A3
eng;014B
engbopomofo;3125
enghecyrillic;04A5
enhookcyrillic;04C8
enspace;2002
eogonek;0119
eokorean;3153
eopen;025B
eopenclosed;029A
eopenreversed;025C
eopenreversedclosed;025E
eopenreversedhook;025D
eparen;24A0
epsilon;03B5
epsilontonos;03AD
equal;003D
equalmonospace;FF1D
equalsmall;FE66
equalsuperior;207C
equivalence;2261
erbopomofo;3126
ercyrillic;0440
ereversed;0258
ereversedcyrillic;044D
escyrillic;0441
esdescendercyrillic;04AB
esh;0283
eshcurl;0286
eshortdeva;090E
eshortvowelsigndeva;0946
eshreversedloop;01AA
eshsquatreversed;0285
esmallhiragana;3047
esmallkatakana;30A7
esmallkatakanahalfwidth;FF6A
estimated;212E
esuperior;F6EC
eta;03B7
etarmenian;0568
etatonos;03AE
eth;00F0
etilde;1EBD
etildebelow;1E1B
etnahtafoukhhebrew;0591
etnahtafoukhlefthebrew;0591
etnahtahebrew;0591
etnahtalefthebrew;0591
eturned;01DD
eukorean;3161
euro;20AC
evowelsignbengali;09C7
evowelsigndeva;0947
evowelsigngujarati;0AC7
exclam;0021
exclamarmenian;055C
exclamdbl;203C
exclamdown;00A1
exclamdownsmall;F7A1
exclammonospace;FF01
exclamsmall;F721
existential;2203
ezh;0292
ezhcaron;01EF
ezhcurl;0293
ezhreversed;01B9
ezhtail;01BA
f;0066
fadeva;095E
fagurmukhi;0A5E
fahrenheit;2109
fathaarabic;064E
fathalowarabic;064E
fathatanarabic;064B
fbopomofo;3108
fcircle;24D5
fdotaccent;1E1F
feharabic;0641
feharmenian;0586
fehfinalarabic;FED2
fehinitialarabic;FED3
fehmedialarabic;FED4
feicoptic;03E5
female;2640
ff;FB00
ffi;FB03
ffl;FB04
fi;FB01
fifteencircle;246E
fifteenparen;2482
fifteenperiod;2496
figuredash;2012
filledbox;25A0
filledrect;25AC
finalkaf;05DA
finalkafdagesh;FB3A
finalkafdageshhebrew;FB3A
finalkafhebrew;05DA
finalkafqamats;05DA 05B8
finalkafqamatshebrew;05DA 05B8
finalkafsheva;05DA 05B0
finalkafshevahebrew;05DA 05B0
finalmem;05DD
finalmemhebrew;05DD
finalnun;05DF
finalnunhebrew;05DF
finalpe;05E3
finalpehebrew;05E3
finaltsadi;05E5
finaltsadihebrew;05E5
firsttonechinese;02C9
fisheye;25C9
fitacyrillic;0473
five;0035
fivearabic;0665
fivebengali;09EB
fivecircle;2464
fivecircleinversesansserif;278E
fivedeva;096B
fiveeighths;215D
fivegujarati;0AEB
fivegurmukhi;0A6B
fivehackarabic;0665
fivehangzhou;3025
fiveideographicparen;3224
fiveinferior;2085
fivemonospace;FF15
fiveoldstyle;F735
fiveparen;2478
fiveperiod;248C
fivepersian;06F5
fiveroman;2174
fivesuperior;2075
fivethai;0E55
fl;FB02
florin;0192
fmonospace;FF46
fmsquare;3399
fofanthai;0E1F
fofathai;0E1D
fongmanthai;0E4F
forall;2200
four;0034
fourarabic;0664
fourbengali;09EA
fourcircle;2463
fourcircleinversesansserif;278D
fourdeva;096A
fourgujarati;0AEA
fourgurmukhi;0A6A
fourhackarabic;0664
fourhangzhou;3024
fourideographicparen;3223
fourinferior;2084
fourmonospace;FF14
fournumeratorbengali;09F7
fouroldstyle;F734
fourparen;2477
fourperiod;248B
fourpersian;06F4
fourroman;2173
foursuperior;2074
fourteencircle;246D
fourteenparen;2481
fourteenperiod;2495
fourthai;0E54
fourthtonechinese;02CB
fparen;24A1
fraction;2044
franc;20A3
g;0067
gabengali;0997
gacute;01F5
gadeva;0917
gafarabic;06AF
gaffinalarabic;FB93
gafinitialarabic;FB94
gafmedialarabic;FB95
gagujarati;0A97
gagurmukhi;0A17
gahiragana;304C
gakatakana;30AC
gamma;03B3
gammalatinsmall;0263
gammasuperior;02E0
gangiacoptic;03EB
gbopomofo;310D
gbreve;011F
gcaron;01E7
gcedilla;0123
gcircle;24D6
gcircumflex;011D
gcommaaccent;0123
gdot;0121
gdotaccent;0121
gecyrillic;0433
gehiragana;3052
gekatakana;30B2
geometricallyequal;2251
gereshaccenthebrew;059C
gereshhebrew;05F3
gereshmuqdamhebrew;059D
germandbls;00DF
gershayimaccenthebrew;059E
gershayimhebrew;05F4
getamark;3013
ghabengali;0998
ghadarmenian;0572
ghadeva;0918
ghagujarati;0A98
ghagurmukhi;0A18
ghainarabic;063A
ghainfinalarabic;FECE
ghaininitialarabic;FECF
ghainmedialarabic;FED0
ghemiddlehookcyrillic;0495
ghestrokecyrillic;0493
gheupturncyrillic;0491
ghhadeva;095A
ghhagurmukhi;0A5A
ghook;0260
ghzsquare;3393
gihiragana;304E
gikatakana;30AE
gimarmenian;0563
gimel;05D2
gimeldagesh;FB32
gimeldageshhebrew;FB32
gimelhebrew;05D2
gjecyrillic;0453
glottalinvertedstroke;01BE
glottalstop;0294
glottalstopinverted;0296
glottalstopmod;02C0
glottalstopreversed;0295
glottalstopreversedmod;02C1
glottalstopreversedsuperior;02E4
glottalstopstroke;02A1
glottalstopstrokereversed;02A2
gmacron;1E21
gmonospace;FF47
gohiragana;3054
gokatakana;30B4
gparen;24A2
gpasquare;33AC
gradient;2207
grave;0060
gravebelowcmb;0316
gravecmb;0300
gravecomb;0300
gravedeva;0953
gravelowmod;02CE
gravemonospace;FF40
gravetonecmb;0340
greater;003E
greaterequal;2265
greaterequalorless;22DB
greatermonospace;FF1E
greaterorequivalent;2273
greaterorless;2277
greateroverequal;2267
greatersmall;FE65
gscript;0261
gstroke;01E5
guhiragana;3050
guillemotleft;00AB
guillemotright;00BB
guilsinglleft;2039
guilsinglright;203A
gukatakana;30B0
guramusquare;3318
gysquare;33C9
h;0068
haabkhasiancyrillic;04A9
haaltonearabic;06C1
habengali;09B9
hadescendercyrillic;04B3
hadeva;0939
hagujarati;0AB9
hagurmukhi;0A39
haharabic;062D
hahfinalarabic;FEA2
hahinitialarabic;FEA3
hahiragana;306F
hahmedialarabic;FEA4
haitusquare;332A
hakatakana;30CF
hakatakanahalfwidth;FF8A
halantgurmukhi;0A4D
hamzaarabic;0621
hamzadammaarabic;0621 064F
hamzadammatanarabic;0621 064C
hamzafathaarabic;0621 064E
hamzafathatanarabic;0621 064B
hamzalowarabic;0621
hamzalowkasraarabic;0621 0650
hamzalowkasratanarabic;0621 064D
hamzasukunarabic;0621 0652
hangulfiller;3164
hardsigncyrillic;044A
harpoonleftbarbup;21BC
harpoonrightbarbup;21C0
hasquare;33CA
hatafpatah;05B2
hatafpatah16;05B2
hatafpatah23;05B2
hatafpatah2f;05B2
hatafpatahhebrew;05B2
hatafpatahnarrowhebrew;05B2
hatafpatahquarterhebrew;05B2
hatafpatahwidehebrew;05B2
hatafqamats;05B3
hatafqamats1b;05B3
hatafqamats28;05B3
hatafqamats34;05B3
hatafqamatshebrew;05B3
hatafqamatsnarrowhebrew;05B3
hatafqamatsquarterhebrew;05B3
hatafqamatswidehebrew;05B3
hatafsegol;05B1
hatafsegol17;05B1
hatafsegol24;05B1
hatafsegol30;05B1
hatafsegolhebrew;05B1
hatafsegolnarrowhebrew;05B1
hatafsegolquarterhebrew;05B1
hatafsegolwidehebrew;05B1
hbar;0127
hbopomofo;310F
hbrevebelow;1E2B
hcedilla;1E29
hcircle;24D7
hcircumflex;0125
hdieresis;1E27
hdotaccent;1E23
hdotbelow;1E25
he;05D4
heart;2665
heartsuitblack;2665
heartsuitwhite;2661
hedagesh;FB34
hedageshhebrew;FB34
hehaltonearabic;06C1
heharabic;0647
hehebrew;05D4
hehfinalaltonearabic;FBA7
hehfinalalttwoarabic;FEEA
hehfinalarabic;FEEA
hehhamzaabovefinalarabic;FBA5
hehhamzaaboveisolatedarabic;FBA4
hehinitialaltonearabic;FBA8
hehinitialarabic;FEEB
hehiragana;3078
hehmedialaltonearabic;FBA9
hehmedialarabic;FEEC
heiseierasquare;337B
hekatakana;30D8
hekatakanahalfwidth;FF8D
hekutaarusquare;3336
henghook;0267
herutusquare;3339
het;05D7
hethebrew;05D7
hhook;0266
hhooksuperior;02B1
hieuhacirclekorean;327B
hieuhaparenkorean;321B
hieuhcirclekorean;326D
hieuhkorean;314E
hieuhparenkorean;320D
hihiragana;3072
hikatakana;30D2
hikatakanahalfwidth;FF8B
hiriq;05B4
hiriq14;05B4
hiriq21;05B4
hiriq2d;05B4
hiriqhebrew;05B4
hiriqnarrowhebrew;05B4
hiriqquarterhebrew;05B4
hiriqwidehebrew;05B4
hlinebelow;1E96
hmonospace;FF48
hoarmenian;0570
hohipthai;0E2B
hohiragana;307B
hokatakana;30DB
hokatakanahalfwidth;FF8E
holam;05B9
holam19;05B9
holam26;05B9
holam32;05B9
holamhebrew;05B9
holamnarrowhebrew;05B9
holamquarterhebrew;05B9
holamwidehebrew;05B9
honokhukthai;0E2E
hookabovecomb;0309
hookcmb;0309
hookpalatalizedbelowcmb;0321
hookretroflexbelowcmb;0322
hoonsquare;3342
horicoptic;03E9
horizontalbar;2015
horncmb;031B
hotsprings;2668
house;2302
hparen;24A3
hsuperior;02B0
hturned;0265
huhiragana;3075
huiitosquare;3333
hukatakana;30D5
hukatakanahalfwidth;FF8C
hungarumlaut;02DD
hungarumlautcmb;030B
hv;0195
hyphen;002D
hypheninferior;F6E5
hyphenmonospace;FF0D
hyphensmall;FE63
hyphensuperior;F6E6
hyphentwo;2010
i;0069
iacute;00ED
iacyrillic;044F
ibengali;0987
ibopomofo;3127
ibreve;012D
icaron;01D0
icircle;24D8
icircumflex;00EE
icyrillic;0456
idblgrave;0209
ideographearthcircle;328F
ideographfirecircle;328B
ideographicallianceparen;323F
ideographiccallparen;323A
ideographiccentrecircle;32A5
ideographicclose;3006
ideographiccomma;3001
ideographiccommaleft;FF64
ideographiccongratulationparen;3237
ideographiccorrectcircle;32A3
ideographicearthparen;322F
ideographicenterpriseparen;323D
ideographicexcellentcircle;329D
ideographicfestivalparen;3240
ideographicfinancialcircle;3296
ideographicfinancialparen;3236
ideographicfireparen;322B
ideographichaveparen;3232
ideographichighcircle;32A4
ideographiciterationmark;3005
ideographiclaborcircle;3298
ideographiclaborparen;3238
ideographicleftcircle;32A7
ideographiclowcircle;32A6
ideographicmedicinecircle;32A9
ideographicmetalparen;322E
ideographicmoonparen;322A
ideographicnameparen;3234
ideographicperiod;3002
ideographicprintcircle;329E
ideographicreachparen;3243
ideographicrepresentparen;3239
ideographicresourceparen;323E
ideographicrightcircle;32A8
ideographicsecretcircle;3299
ideographicselfparen;3242
ideographicsocietyparen;3233
ideographicspace;3000
ideographicspecialparen;3235
ideographicstockparen;3231
ideographicstudyparen;323B
ideographicsunparen;3230
ideographicsuperviseparen;323C
ideographicwaterparen;322C
ideographicwoodparen;322D
ideographiczero;3007
ideographmetalcircle;328E
ideographmooncircle;328A
ideographnamecircle;3294
ideographsuncircle;3290
ideographwatercircle;328C
ideographwoodcircle;328D
ideva;0907
idieresis;00EF
idieresisacute;1E2F
idieresiscyrillic;04E5
idotbelow;1ECB
iebrevecyrillic;04D7
iecyrillic;0435
ieungacirclekorean;3275
ieungaparenkorean;3215
ieungcirclekorean;3267
ieungkorean;3147
ieungparenkorean;3207
igrave;00EC
igujarati;0A87
igurmukhi;0A07
ihiragana;3044
ihookabove;1EC9
iibengali;0988
iicyrillic;0438
iideva;0908
iigujarati;0A88
iigurmukhi;0A08
iimatragurmukhi;0A40
iinvertedbreve;020B
iishortcyrillic;0439
iivowelsignbengali;09C0
iivowelsigndeva;0940
iivowelsigngujarati;0AC0
ij;0133
ikatakana;30A4
ikatakanahalfwidth;FF72
ikorean;3163
ilde;02DC
iluyhebrew;05AC
imacron;012B
imacroncyrillic;04E3
imageorapproximatelyequal;2253
imatragurmukhi;0A3F
imonospace;FF49
increment;2206
infinity;221E
iniarmenian;056B
integral;222B
integralbottom;2321
integralbt;2321
integralex;F8F5
integraltop;2320
integraltp;2320
intersection;2229
intisquare;3305
invbullet;25D8
invcircle;25D9
invsmileface;263B
iocyrillic;0451
iogonek;012F
iota;03B9
iotadieresis;03CA
iotadieresistonos;0390
iotalatin;0269
iotatonos;03AF
iparen;24A4
irigurmukhi;0A72
ismallhiragana;3043
ismallkatakana;30A3
ismallkatakanahalfwidth;FF68
issharbengali;09FA
istroke;0268
isuperior;F6ED
iterationhiragana;309D
iterationkatakana;30FD
itilde;0129
itildebelow;1E2D
iubopomofo;3129
iucyrillic;044E
ivowelsignbengali;09BF
ivowelsigndeva;093F
ivowelsigngujarati;0ABF
izhitsacyrillic;0475
izhitsadblgravecyrillic;0477
j;006A
jaarmenian;0571
jabengali;099C
jadeva;091C
jagujarati;0A9C
jagurmukhi;0A1C
jbopomofo;3110
jcaron;01F0
jcircle;24D9
jcircumflex;0135
jcrossedtail;029D
jdotlessstroke;025F
jecyrillic;0458
jeemarabic;062C
jeemfinalarabic;FE9E
jeeminitialarabic;FE9F
jeemmedialarabic;FEA0
jeharabic;0698
jehfinalarabic;FB8B
jhabengali;099D
jhadeva;091D
jhagujarati;0A9D
jhagurmukhi;0A1D
jheharmenian;057B
jis;3004
jmonospace;FF4A
jparen;24A5
jsuperior;02B2
k;006B
kabashkircyrillic;04A1
kabengali;0995
kacute;1E31
kacyrillic;043A
kadescendercyrillic;049B
kadeva;0915
kaf;05DB
kafarabic;0643
kafdagesh;FB3B
kafdageshhebrew;FB3B
kaffinalarabic;FEDA
kafhebrew;05DB
kafinitialarabic;FEDB
kafmedialarabic;FEDC
kafrafehebrew;FB4D
kagujarati;0A95
kagurmukhi;0A15
kahiragana;304B
kahookcyrillic;04C4
kakatakana;30AB
kakatakanahalfwidth;FF76
kappa;03BA
kappasymbolgreek;03F0
kapyeounmieumkorean;3171
kapyeounphieuphkorean;3184
kapyeounpieupkorean;3178
kapyeounssangpieupkorean;3179
karoriisquare;330D
kashidaautoarabic;0640
kashidaautonosidebearingarabic;0640
kasmallkatakana;30F5
kasquare;3384
kasraarabic;0650
kasratanarabic;064D
kastrokecyrillic;049F
katahiraprolongmarkhalfwidth;FF70
kaverticalstrokecyrillic;049D
kbopomofo;310E
kcalsquare;3389
kcaron;01E9
kcedilla;0137
kcircle;24DA
kcommaaccent;0137
kdotbelow;1E33
keharmenian;0584
kehiragana;3051
kekatakana;30B1
kekatakanahalfwidth;FF79
kenarmenian;056F
kesmallkatakana;30F6
kgreenlandic;0138
khabengali;0996
khacyrillic;0445
khadeva;0916
khagujarati;0A96
khagurmukhi;0A16
khaharabic;062E
khahfinalarabic;FEA6
khahinitialarabic;FEA7
khahmedialarabic;FEA8
kheicoptic;03E7
khhadeva;0959
khhagurmukhi;0A59
khieukhacirclekorean;3278
khieukhaparenkorean;3218
khieukhcirclekorean;326A
khieukhkorean;314B
khieukhparenkorean;320A
khokhaithai;0E02
khokhonthai;0E05
khokhuatthai;0E03
khokhwaithai;0E04
khomutthai;0E5B
khook;0199
khorakhangthai;0E06
khzsquare;3391
kihiragana;304D
kikatakana;30AD
kikatakanahalfwidth;FF77
kiroguramusquare;3315
kiromeetorusquare;3316
kirosquare;3314
kiyeokacirclekorean;326E
kiyeokaparenkorean;320E
kiyeokcirclekorean;3260
kiyeokkorean;3131
kiyeokparenkorean;3200
kiyeoksioskorean;3133
kjecyrillic;045C
klinebelow;1E35
klsquare;3398
kmcubedsquare;33A6
kmonospace;FF4B
kmsquaredsquare;33A2
kohiragana;3053
kohmsquare;33C0
kokaithai;0E01
kokatakana;30B3
kokatakanahalfwidth;FF7A
kooposquare;331E
koppacyrillic;0481
koreanstandardsymbol;327F
koroniscmb;0343
kparen;24A6
kpasquare;33AA
ksicyrillic;046F
ktsquare;33CF
kturned;029E
kuhiragana;304F
kukatakana;30AF
kukatakanahalfwidth;FF78
kvsquare;33B8
kwsquare;33BE
l;006C
labengali;09B2
lacute;013A
ladeva;0932
lagujarati;0AB2
lagurmukhi;0A32
lakkhangyaothai;0E45
lamaleffinalarabic;FEFC
lamalefhamzaabovefinalarabic;FEF8
lamalefhamzaaboveisolatedarabic;FEF7
lamalefhamzabelowfinalarabic;FEFA
lamalefhamzabelowisolatedarabic;FEF9
lamalefisolatedarabic;FEFB
lamalefmaddaabovefinalarabic;FEF6
lamalefmaddaaboveisolatedarabic;FEF5
lamarabic;0644
lambda;03BB
lambdastroke;019B
lamed;05DC
lameddagesh;FB3C
lameddageshhebrew;FB3C
lamedhebrew;05DC
lamedholam;05DC 05B9
lamedholamdagesh;05DC 05B9 05BC
lamedholamdageshhebrew;05DC 05B9 05BC
lamedholamhebrew;05DC 05B9
lamfinalarabic;FEDE
lamhahinitialarabic;FCCA
laminitialarabic;FEDF
lamjeeminitialarabic;FCC9
lamkhahinitialarabic;FCCB
lamlamhehisolatedarabic;FDF2
lammedialarabic;FEE0
lammeemhahinitialarabic;FD88
lammeeminitialarabic;FCCC
lammeemjeeminitialarabic;FEDF FEE4 FEA0
lammeemkhahinitialarabic;FEDF FEE4 FEA8
largecircle;25EF
lbar;019A
lbelt;026C
lbopomofo;310C
lcaron;013E
lcedilla;013C
lcircle;24DB
lcircumflexbelow;1E3D
lcommaaccent;013C
ldot;0140
ldotaccent;0140
ldotbelow;1E37
ldotbelowmacron;1E39
leftangleabovecmb;031A
lefttackbelowcmb;0318
less;003C
lessequal;2264
lessequalorgreater;22DA
lessmonospace;FF1C
lessorequivalent;2272
lessorgreater;2276
lessoverequal;2266
lesssmall;FE64
lezh;026E
lfblock;258C
lhookretroflex;026D
lira;20A4
liwnarmenian;056C
lj;01C9
ljecyrillic;0459
ll;F6C0
lladeva;0933
llagujarati;0AB3
llinebelow;1E3B
llladeva;0934
llvocalicbengali;09E1
llvocalicdeva;0961
llvocalicvowelsignbengali;09E3
llvocalicvowelsigndeva;0963
lmiddletilde;026B
lmonospace;FF4C
lmsquare;33D0
lochulathai;0E2C
logicaland;2227
logicalnot;00AC
logicalnotreversed;2310
logicalor;2228
lolingthai;0E25
longs;017F
lowlinecenterline;FE4E
lowlinecmb;0332
lowlinedashed;FE4D
lozenge;25CA
lparen;24A7
lslash;0142
lsquare;2113
lsuperior;F6EE
ltshade;2591
luthai;0E26
lvocalicbengali;098C
lvocalicdeva;090C
lvocalicvowelsignbengali;09E2
lvocalicvowelsigndeva;0962
lxsquare;33D3
m;006D
mabengali;09AE
macron;00AF
macronbelowcmb;0331
macroncmb;0304
macronlowmod;02CD
macronmonospace;FFE3
macute;1E3F
madeva;092E
magujarati;0AAE
magurmukhi;0A2E
mahapakhhebrew;05A4
mahapakhlefthebrew;05A4
mahiragana;307E
maichattawalowleftthai;F895
maichattawalowrightthai;F894
maichattawathai;0E4B
maichattawaupperleftthai;F893
maieklowleftthai;F88C
maieklowrightthai;F88B
maiekthai;0E48
maiekupperleftthai;F88A
maihanakatleftthai;F884
maihanakatthai;0E31
maitaikhuleftthai;F889
maitaikhuthai;0E47
maitholowleftthai;F88F
maitholowrightthai;F88E
maithothai;0E49
maithoupperleftthai;F88D
maitrilowleftthai;F892
maitrilowrightthai;F891
maitrithai;0E4A
maitriupperleftthai;F890
maiyamokthai;0E46
makatakana;30DE
makatakanahalfwidth;FF8F
male;2642
mansyonsquare;3347
maqafhebrew;05BE
mars;2642
masoracirclehebrew;05AF
masquare;3383
mbopomofo;3107
mbsquare;33D4
mcircle;24DC
mcubedsquare;33A5
mdotaccent;1E41
mdotbelow;1E43
meemarabic;0645
meemfinalarabic;FEE2
meeminitialarabic;FEE3
meemmedialarabic;FEE4
meemmeeminitialarabic;FCD1
meemmeemisolatedarabic;FC48
meetorusquare;334D
mehiragana;3081
meizierasquare;337E
mekatakana;30E1
mekatakanahalfwidth;FF92
mem;05DE
memdagesh;FB3E
memdageshhebrew;FB3E
memhebrew;05DE
menarmenian;0574
merkhahebrew;05A5
merkhakefulahebrew;05A6
merkhakefulalefthebrew;05A6
merkhalefthebrew;05A5
mhook;0271
mhzsquare;3392
middledotkatakanahalfwidth;FF65
middot;00B7
mieumacirclekorean;3272
mieumaparenkorean;3212
mieumcirclekorean;3264
mieumkorean;3141
mieumpansioskorean;3170
mieumparenkorean;3204
mieumpieupkorean;316E
mieumsioskorean;316F
mihiragana;307F
mikatakana;30DF
mikatakanahalfwidth;FF90
minus;2212
minusbelowcmb;0320
minuscircle;2296
minusmod;02D7
minusplus;2213
minute;2032
miribaarusquare;334A
mirisquare;3349
mlonglegturned;0270
mlsquare;3396
mmcubedsquare;33A3
mmonospace;FF4D
mmsquaredsquare;339F
mohiragana;3082
mohmsquare;33C1
mokatakana;30E2
mokatakanahalfwidth;FF93
molsquare;33D6
momathai;0E21
moverssquare;33A7
moverssquaredsquare;33A8
mparen;24A8
mpasquare;33AB
mssquare;33B3
msuperior;F6EF
mturned;026F
mu;00B5
mu1;00B5
muasquare;3382
muchgreater;226B
muchless;226A
mufsquare;338C
mugreek;03BC
mugsquare;338D
muhiragana;3080
mukatakana;30E0
mukatakanahalfwidth;FF91
mulsquare;3395
multiply;00D7
mumsquare;339B
munahhebrew;05A3
munahlefthebrew;05A3
musicalnote;266A
musicalnotedbl;266B
musicflatsign;266D
musicsharpsign;266F
mussquare;33B2
muvsquare;33B6
muwsquare;33BC
mvmegasquare;33B9
mvsquare;33B7
mwmegasquare;33BF
mwsquare;33BD
n;006E
nabengali;09A8
nabla;2207
nacute;0144
nadeva;0928
nagujarati;0AA8
nagurmukhi;0A28
nahiragana;306A
nakatakana;30CA
nakatakanahalfwidth;FF85
napostrophe;0149
nasquare;3381
nbopomofo;310B
nbspace;00A0
ncaron;0148
ncedilla;0146
ncircle;24DD
ncircumflexbelow;1E4B
ncommaaccent;0146
ndotaccent;1E45
ndotbelow;1E47
nehiragana;306D
nekatakana;30CD
nekatakanahalfwidth;FF88
newsheqelsign;20AA
nfsquare;338B
ngabengali;0999
ngadeva;0919
ngagujarati;0A99
ngagurmukhi;0A19
ngonguthai;0E07
nhiragana;3093
nhookleft;0272
nhookretroflex;0273
nieunacirclekorean;326F
nieunaparenkorean;320F
nieuncieuckorean;3135
nieuncirclekorean;3261
nieunhieuhkorean;3136
nieunkorean;3134
nieunpansioskorean;3168
nieunparenkorean;3201
nieunsioskorean;3167
nieuntikeutkorean;3166
nihiragana;306B
nikatakana;30CB
nikatakanahalfwidth;FF86
nikhahitleftthai;F899
nikhahitthai;0E4D
nine;0039
ninearabic;0669
ninebengali;09EF
ninecircle;2468
ninecircleinversesansserif;2792
ninedeva;096F
ninegujarati;0AEF
ninegurmukhi;0A6F
ninehackarabic;0669
ninehangzhou;3029
nineideographicparen;3228
nineinferior;2089
ninemonospace;FF19
nineoldstyle;F739
nineparen;247C
nineperiod;2490
ninepersian;06F9
nineroman;2178
ninesuperior;2079
nineteencircle;2472
nineteenparen;2486
nineteenperiod;249A
ninethai;0E59
nj;01CC
njecyrillic;045A
nkatakana;30F3
nkatakanahalfwidth;FF9D
nlegrightlong;019E
nlinebelow;1E49
nmonospace;FF4E
nmsquare;339A
nnabengali;09A3
nnadeva;0923
nnagujarati;0AA3
nnagurmukhi;0A23
nnnadeva;0929
nohiragana;306E
nokatakana;30CE
nokatakanahalfwidth;FF89
nonbreakingspace;00A0
nonenthai;0E13
nonuthai;0E19
noonarabic;0646
noonfinalarabic;FEE6
noonghunnaarabic;06BA
noonghunnafinalarabic;FB9F
noonhehinitialarabic;FEE7 FEEC
nooninitialarabic;FEE7
noonjeeminitialarabic;FCD2
noonjeemisolatedarabic;FC4B
noonmedialarabic;FEE8
noonmeeminitialarabic;FCD5
noonmeemisolatedarabic;FC4E
noonnoonfinalarabic;FC8D
notcontains;220C
notelement;2209
notelementof;2209
notequal;2260
notgreater;226F
notgreaternorequal;2271
notgreaternorless;2279
notidentical;2262
notless;226E
notlessnorequal;2270
notparallel;2226
notprecedes;2280
notsubset;2284
notsucceeds;2281
notsuperset;2285
nowarmenian;0576
nparen;24A9
nssquare;33B1
nsuperior;207F
ntilde;00F1
nu;03BD
nuhiragana;306C
nukatakana;30CC
nukatakanahalfwidth;FF87
nuktabengali;09BC
nuktadeva;093C
nuktagujarati;0ABC
nuktagurmukhi;0A3C
numbersign;0023
numbersignmonospace;FF03
numbersignsmall;FE5F
numeralsigngreek;0374
numeralsignlowergreek;0375
numero;2116
nun;05E0
nundagesh;FB40
nundageshhebrew;FB40
nunhebrew;05E0
nvsquare;33B5
nwsquare;33BB
nyabengali;099E
nyadeva;091E
nyagujarati;0A9E
nyagurmukhi;0A1E
o;006F
oacute;00F3
oangthai;0E2D
obarred;0275
obarredcyrillic;04E9
obarreddieresiscyrillic;04EB
obengali;0993
obopomofo;311B
obreve;014F
ocandradeva;0911
ocandragujarati;0A91
ocandravowelsigndeva;0949
ocandravowelsigngujarati;0AC9
ocaron;01D2
ocircle;24DE
ocircumflex;00F4
ocircumflexacute;1ED1
ocircumflexdotbelow;1ED9
ocircumflexgrave;1ED3
ocircumflexhookabove;1ED5
ocircumflextilde;1ED7
ocyrillic;043E
odblacute;0151
odblgrave;020D
odeva;0913
odieresis;00F6
odieresiscyrillic;04E7
odotbelow;1ECD
oe;0153
oekorean;315A
ogonek;02DB
ogonekcmb;0328
ograve;00F2
ogujarati;0A93
oharmenian;0585
ohiragana;304A
ohookabove;1ECF
ohorn;01A1
ohornacute;1EDB
ohorndotbelow;1EE3
ohorngrave;1EDD
ohornhookabove;1EDF
ohorntilde;1EE1
ohungarumlaut;0151
oi;01A3
oinvertedbreve;020F
okatakana;30AA
okatakanahalfwidth;FF75
okorean;3157
olehebrew;05AB
omacron;014D
omacronacute;1E53
omacrongrave;1E51
omdeva;0950
omega;03C9
omega1;03D6
omegacyrillic;0461
omegalatinclosed;0277
omegaroundcyrillic;047B
omegatitlocyrillic;047D
omegatonos;03CE
omgujarati;0AD0
omicron;03BF
omicrontonos;03CC
omonospace;FF4F
one;0031
onearabic;0661
onebengali;09E7
onecircle;2460
onecircleinversesansserif;278A
onedeva;0967
onedotenleader;2024
oneeighth;215B
onefitted;F6DC
onegujarati;0AE7
onegurmukhi;0A67
onehackarabic;0661
onehalf;00BD
onehangzhou;3021
oneideographicparen;3220
oneinferior;2081
onemonospace;FF11
onenumeratorbengali;09F4
oneoldstyle;F731
oneparen;2474
oneperiod;2488
onepersian;06F1
onequarter;00BC
oneroman;2170
onesuperior;00B9
onethai;0E51
onethird;2153
oogonek;01EB
oogonekmacron;01ED
oogurmukhi;0A13
oomatragurmukhi;0A4B
oopen;0254
oparen;24AA
openbullet;25E6
option;2325
ordfeminine;00AA
ordmasculine;00BA
orthogonal;221F
oshortdeva;0912
oshortvowelsigndeva;094A
oslash;00F8
oslashacute;01FF
osmallhiragana;3049
osmallkatakana;30A9
osmallkatakanahalfwidth;FF6B
ostrokeacute;01FF
osuperior;F6F0
otcyrillic;047F
otilde;00F5
otildeacute;1E4D
otildedieresis;1E4F
oubopomofo;3121
overline;203E
overlinecenterline;FE4A
overlinecmb;0305
overlinedashed;FE49
overlinedblwavy;FE4C
overlinewavy;FE4B
overscore;00AF
ovowelsignbengali;09CB
ovowelsigndeva;094B
ovowelsigngujarati;0ACB
p;0070
paampssquare;3380
paasentosquare;332B
pabengali;09AA
pacute;1E55
padeva;092A
pagedown;21DF
pageup;21DE
pagujarati;0AAA
pagurmukhi;0A2A
pahiragana;3071
paiyannoithai;0E2F
pakatakana;30D1
palatalizationcyrilliccmb;0484
palochkacyrillic;04C0
pansioskorean;317F
paragraph;00B6
parallel;2225
parenleft;0028
parenleftaltonearabic;FD3E
parenleftbt;F8ED
parenleftex;F8EC
parenleftinferior;208D
parenleftmonospace;FF08
parenleftsmall;FE59
parenleftsuperior;207D
parenlefttp;F8EB
parenleftvertical;FE35
parenright;0029
parenrightaltonearabic;FD3F
parenrightbt;F8F8
parenrightex;F8F7
parenrightinferior;208E
parenrightmonospace;FF09
parenrightsmall;FE5A
parenrightsuperior;207E
parenrighttp;F8F6
parenrightvertical;FE36
partialdiff;2202
paseqhebrew;05C0
pashtahebrew;0599
pasquare;33A9
patah;05B7
patah11;05B7
patah1d;05B7
patah2a;05B7
patahhebrew;05B7
patahnarrowhebrew;05B7
patahquarterhebrew;05B7
patahwidehebrew;05B7
pazerhebrew;05A1
pbopomofo;3106
pcircle;24DF
pdotaccent;1E57
pe;05E4
pecyrillic;043F
pedagesh;FB44
pedageshhebrew;FB44
peezisquare;333B
pefinaldageshhebrew;FB43
peharabic;067E
peharmenian;057A
pehebrew;05E4
pehfinalarabic;FB57
pehinitialarabic;FB58
pehiragana;307A
pehmedialarabic;FB59
pekatakana;30DA
pemiddlehookcyrillic;04A7
perafehebrew;FB4E
percent;0025
percentarabic;066A
percentmonospace;FF05
percentsmall;FE6A
period;002E
periodarmenian;0589
periodcentered;00B7
periodhalfwidth;FF61
periodinferior;F6E7
periodmonospace;FF0E
periodsmall;FE52
periodsuperior;F6E8
perispomenigreekcmb;0342
perpendicular;22A5
perthousand;2030
peseta;20A7
pfsquare;338A
phabengali;09AB
phadeva;092B
phagujarati;0AAB
phagurmukhi;0A2B
phi;03C6
phi1;03D5
phieuphacirclekorean;327A
phieuphaparenkorean;321A
phieuphcirclekorean;326C
phieuphkorean;314D
phieuphparenkorean;320C
philatin;0278
phinthuthai;0E3A
phisymbolgreek;03D5
phook;01A5
phophanthai;0E1E
phophungthai;0E1C
phosamphaothai;0E20
pi;03C0
pieupacirclekorean;3273
pieupaparenkorean;3213
pieupcieuckorean;3176
pieupcirclekorean;3265
pieupkiyeokkorean;3172
pieupkorean;3142
pieupparenkorean;3205
pieupsioskiyeokkorean;3174
pieupsioskorean;3144
pieupsiostikeutkorean;3175
pieupthieuthkorean;3177
pieuptikeutkorean;3173
pihiragana;3074
pikatakana;30D4
pisymbolgreek;03D6
piwrarmenian;0583
plus;002B
plusbelowcmb;031F
pluscircle;2295
plusminus;00B1
plusmod;02D6
plusmonospace;FF0B
plussmall;FE62
plussuperior;207A
pmonospace;FF50
pmsquare;33D8
pohiragana;307D
pointingindexdownwhite;261F
pointingindexleftwhite;261C
pointingindexrightwhite;261E
pointingindexupwhite;261D
pokatakana;30DD
poplathai;0E1B
postalmark;3012
postalmarkface;3020
pparen;24AB
precedes;227A
prescription;211E
primemod;02B9
primereversed;2035
product;220F
projective;2305
prolongedkana;30FC
propellor;2318
propersubset;2282
propersuperset;2283
proportion;2237
proportional;221D
psi;03C8
psicyrillic;0471
psilipneumatacyrilliccmb;0486
pssquare;33B0
puhiragana;3077
pukatakana;30D7
pvsquare;33B4
pwsquare;33BA
q;0071
qadeva;0958
qadmahebrew;05A8
qafarabic;0642
qaffinalarabic;FED6
qafinitialarabic;FED7
qafmedialarabic;FED8
qamats;05B8
qamats10;05B8
qamats1a;05B8
qamats1c;05B8
qamats27;05B8
qamats29;05B8
qamats33;05B8
qamatsde;05B8
qamatshebrew;05B8
qamatsnarrowhebrew;05B8
qamatsqatanhebrew;05B8
qamatsqatannarrowhebrew;05B8
qamatsqatanquarterhebrew;05B8
qamatsqatanwidehebrew;05B8
qamatsquarterhebrew;05B8
qamatswidehebrew;05B8
qarneyparahebrew;059F
qbopomofo;3111
qcircle;24E0
qhook;02A0
qmonospace;FF51
qof;05E7
qofdagesh;FB47
qofdageshhebrew;FB47
qofhatafpatah;05E7 05B2
qofhatafpatahhebrew;05E7 05B2
qofhatafsegol;05E7 05B1
qofhatafsegolhebrew;05E7 05B1
qofhebrew;05E7
qofhiriq;05E7 05B4
qofhiriqhebrew;05E7 05B4
qofholam;05E7 05B9
qofholamhebrew;05E7 05B9
qofpatah;05E7 05B7
qofpatahhebrew;05E7 05B7
qofqamats;05E7 05B8
qofqamatshebrew;05E7 05B8
qofqubuts;05E7 05BB
qofqubutshebrew;05E7 05BB
qofsegol;05E7 05B6
qofsegolhebrew;05E7 05B6
qofsheva;05E7 05B0
qofshevahebrew;05E7 05B0
qoftsere;05E7 05B5
qoftserehebrew;05E7 05B5
qparen;24AC
quarternote;2669
qubuts;05BB
qubuts18;05BB
qubuts25;05BB
qubuts31;05BB
qubutshebrew;05BB
qubutsnarrowhebrew;05BB
qubutsquarterhebrew;05BB
qubutswidehebrew;05BB
question;003F
questionarabic;061F
questionarmenian;055E
questiondown;00BF
questiondownsmall;F7BF
questiongreek;037E
questionmonospace;FF1F
questionsmall;F73F
quotedbl;0022
quotedblbase;201E
quotedblleft;201C
quotedblmonospace;FF02
quotedblprime;301E
quotedblprimereversed;301D
quotedblright;201D
quoteleft;2018
quoteleftreversed;201B
quotereversed;201B
quoteright;2019
quoterightn;0149
quotesinglbase;201A
quotesingle;0027
quotesinglemonospace;FF07
r;0072
raarmenian;057C
rabengali;09B0
racute;0155
radeva;0930
radical;221A
radicalex;F8E5
radoverssquare;33AE
radoverssquaredsquare;33AF
radsquare;33AD
rafe;05BF
rafehebrew;05BF
ragujarati;0AB0
ragurmukhi;0A30
rahiragana;3089
rakatakana;30E9
rakatakanahalfwidth;FF97
ralowerdiagonalbengali;09F1
ramiddlediagonalbengali;09F0
ramshorn;0264
ratio;2236
rbopomofo;3116
rcaron;0159
rcedilla;0157
rcircle;24E1
rcommaaccent;0157
rdblgrave;0211
rdotaccent;1E59
rdotbelow;1E5B
rdotbelowmacron;1E5D
referencemark;203B
reflexsubset;2286
reflexsuperset;2287
registered;00AE
registersans;F8E8
registerserif;F6DA
reharabic;0631
reharmenian;0580
rehfinalarabic;FEAE
rehiragana;308C
rehyehaleflamarabic;0631 FEF3 FE8E 0644
rekatakana;30EC
rekatakanahalfwidth;FF9A
resh;05E8
reshdageshhebrew;FB48
reshhatafpatah;05E8 05B2
reshhatafpatahhebrew;05E8 05B2
reshhatafsegol;05E8 05B1
reshhatafsegolhebrew;05E8 05B1
reshhebrew;05E8
reshhiriq;05E8 05B4
reshhiriqhebrew;05E8 05B4
reshholam;05E8 05B9
reshholamhebrew;05E8 05B9
reshpatah;05E8 05B7
reshpatahhebrew;05E8 05B7
reshqamats;05E8 05B8
reshqamatshebrew;05E8 05B8
reshqubuts;05E8 05BB
reshqubutshebrew;05E8 05BB
reshsegol;05E8 05B6
reshsegolhebrew;05E8 05B6
reshsheva;05E8 05B0
reshshevahebrew;05E8 05B0
reshtsere;05E8 05B5
reshtserehebrew;05E8 05B5
reversedtilde;223D
reviahebrew;0597
reviamugrashhebrew;0597
revlogicalnot;2310
rfishhook;027E
rfishhookreversed;027F
rhabengali;09DD
rhadeva;095D
rho;03C1
rhook;027D
rhookturned;027B
rhookturnedsuperior;02B5
rhosymbolgreek;03F1
rhotichookmod;02DE
rieulacirclekorean;3271
rieulaparenkorean;3211
rieulcirclekorean;3263
rieulhieuhkorean;3140
rieulkiyeokkorean;313A
rieulkiyeoksioskorean;3169
rieulkorean;3139
rieulmieumkorean;313B
rieulpansioskorean;316C
rieulparenkorean;3203
rieulphieuphkorean;313F
rieulpieupkorean;313C
rieulpieupsioskorean;316B
rieulsioskorean;313D
rieulthieuthkorean;313E
rieultikeutkorean;316A
rieulyeorinhieuhkorean;316D
rightangle;221F
righttackbelowcmb;0319
righttriangle;22BF
rihiragana;308A
rikatakana;30EA
rikatakanahalfwidth;FF98
ring;02DA
ringbelowcmb;0325
ringcmb;030A
ringhalfleft;02BF
ringhalfleftarmenian;0559
ringhalfleftbelowcmb;031C
ringhalfleftcentered;02D3
ringhalfright;02BE
ringhalfrightbelowcmb;0339
ringhalfrightcentered;02D2
rinvertedbreve;0213
rittorusquare;3351
rlinebelow;1E5F
rlongleg;027C
rlonglegturned;027A
rmonospace;FF52
rohiragana;308D
rokatakana;30ED
rokatakanahalfwidth;FF9B
roruathai;0E23
rparen;24AD
rrabengali;09DC
rradeva;0931
rragurmukhi;0A5C
rreharabic;0691
rrehfinalarabic;FB8D
rrvocalicbengali;09E0
rrvocalicdeva;0960
rrvocalicgujarati;0AE0
rrvocalicvowelsignbengali;09C4
rrvocalicvowelsigndeva;0944
rrvocalicvowelsigngujarati;0AC4
rsuperior;F6F1
rtblock;2590
rturned;0279
rturnedsuperior;02B4
ruhiragana;308B
rukatakana;30EB
rukatakanahalfwidth;FF99
rupeemarkbengali;09F2
rupeesignbengali;09F3
rupiah;F6DD
ruthai;0E24
rvocalicbengali;098B
rvocalicdeva;090B
rvocalicgujarati;0A8B
rvocalicvowelsignbengali;09C3
rvocalicvowelsigndeva;0943
rvocalicvowelsigngujarati;0AC3
s;0073
sabengali;09B8
sacute;015B
sacutedotaccent;1E65
sadarabic;0635
sadeva;0938
sadfinalarabic;FEBA
sadinitialarabic;FEBB
sadmedialarabic;FEBC
sagujarati;0AB8
sagurmukhi;0A38
sahiragana;3055
sakatakana;30B5
sakatakanahalfwidth;FF7B
sallallahoualayhewasallamarabic;FDFA
samekh;05E1
samekhdagesh;FB41
samekhdageshhebrew;FB41
samekhhebrew;05E1
saraaathai;0E32
saraaethai;0E41
saraaimaimalaithai;0E44
saraaimaimuanthai;0E43
saraamthai;0E33
saraathai;0E30
saraethai;0E40
saraiileftthai;F886
saraiithai;0E35
saraileftthai;F885
saraithai;0E34
saraothai;0E42
saraueeleftthai;F888
saraueethai;0E37
saraueleftthai;F887
sarauethai;0E36
sarauthai;0E38
sarauuthai;0E39
sbopomofo;3119
scaron;0161
scarondotaccent;1E67
scedilla;015F
schwa;0259
schwacyrillic;04D9
schwadieresiscyrillic;04DB
schwahook;025A
scircle;24E2
scircumflex;015D
scommaaccent;0219
sdotaccent;1E61
sdotbelow;1E63
sdotbelowdotaccent;1E69
seagullbelowcmb;033C
second;2033
secondtonechinese;02CA
section;00A7
seenarabic;0633
seenfinalarabic;FEB2
seeninitialarabic;FEB3
seenmedialarabic;FEB4
segol;05B6
segol13;05B6
segol1f;05B6
segol2c;05B6
segolhebrew;05B6
segolnarrowhebrew;05B6
segolquarterhebrew;05B6
segoltahebrew;0592
segolwidehebrew;05B6
seharmenian;057D
sehiragana;305B
sekatakana;30BB
sekatakanahalfwidth;FF7E
semicolon;003B
semicolonarabic;061B
semicolonmonospace;FF1B
semicolonsmall;FE54
semivoicedmarkkana;309C
semivoicedmarkkanahalfwidth;FF9F
sentisquare;3322
sentosquare;3323
seven;0037
sevenarabic;0667
sevenbengali;09ED
sevencircle;2466
sevencircleinversesansserif;2790
sevendeva;096D
seveneighths;215E
sevengujarati;0AED
sevengurmukhi;0A6D
sevenhackarabic;0667
sevenhangzhou;3027
sevenideographicparen;3226
seveninferior;2087
sevenmonospace;FF17
sevenoldstyle;F737
sevenparen;247A
sevenperiod;248E
sevenpersian;06F7
sevenroman;2176
sevensuperior;2077
seventeencircle;2470
seventeenparen;2484
seventeenperiod;2498
seventhai;0E57
sfthyphen;00AD
shaarmenian;0577
shabengali;09B6
shacyrillic;0448
shaddaarabic;0651
shaddadammaarabic;FC61
shaddadammatanarabic;FC5E
shaddafathaarabic;FC60
shaddafathatanarabic;0651 064B
shaddakasraarabic;FC62
shaddakasratanarabic;FC5F
shade;2592
shadedark;2593
shadelight;2591
shademedium;2592
shadeva;0936
shagujarati;0AB6
shagurmukhi;0A36
shalshelethebrew;0593
shbopomofo;3115
shchacyrillic;0449
sheenarabic;0634
sheenfinalarabic;FEB6
sheeninitialarabic;FEB7
sheenmedialarabic;FEB8
sheicoptic;03E3
sheqel;20AA
sheqelhebrew;20AA
sheva;05B0
sheva115;05B0
sheva15;05B0
sheva22;05B0
sheva2e;05B0
shevahebrew;05B0
shevanarrowhebrew;05B0
shevaquarterhebrew;05B0
shevawidehebrew;05B0
shhacyrillic;04BB
shimacoptic;03ED
shin;05E9
shindagesh;FB49
shindageshhebrew;FB49
shindageshshindot;FB2C
shindageshshindothebrew;FB2C
shindageshsindot;FB2D
shindageshsindothebrew;FB2D
shindothebrew;05C1
shinhebrew;05E9
shinshindot;FB2A
shinshindothebrew;FB2A
shinsindot;FB2B
shinsindothebrew;FB2B
shook;0282
sigma;03C3
sigma1;03C2
sigmafinal;03C2
sigmalunatesymbolgreek;03F2
sihiragana;3057
sikatakana;30B7
sikatakanahalfwidth;FF7C
siluqhebrew;05BD
siluqlefthebrew;05BD
similar;223C
sindothebrew;05C2
siosacirclekorean;3274
siosaparenkorean;3214
sioscieuckorean;317E
sioscirclekorean;3266
sioskiyeokkorean;317A
sioskorean;3145
siosnieunkorean;317B
siosparenkorean;3206
siospieupkorean;317D
siostikeutkorean;317C
six;0036
sixarabic;0666
sixbengali;09EC
sixcircle;2465
sixcircleinversesansserif;278F
sixdeva;096C
sixgujarati;0AEC
sixgurmukhi;0A6C
sixhackarabic;0666
sixhangzhou;3026
sixideographicparen;3225
sixinferior;2086
sixmonospace;FF16
sixoldstyle;F736
sixparen;2479
sixperiod;248D
sixpersian;06F6
sixroman;2175
sixsuperior;2076
sixteencircle;246F
sixteencurrencydenominatorbengali;09F9
sixteenparen;2483
sixteenperiod;2497
sixthai;0E56
slash;002F
slashmonospace;FF0F
slong;017F
slongdotaccent;1E9B
smileface;263A
smonospace;FF53
sofpasuqhebrew;05C3
softhyphen;00AD
softsigncyrillic;044C
sohiragana;305D
sokatakana;30BD
sokatakanahalfwidth;FF7F
soliduslongoverlaycmb;0338
solidusshortoverlaycmb;0337
sorusithai;0E29
sosalathai;0E28
sosothai;0E0B
sosuathai;0E2A
space;0020
spacehackarabic;0020
spade;2660
spadesuitblack;2660
spadesuitwhite;2664
sparen;24AE
squarebelowcmb;033B
squarecc;33C4
squarecm;339D
squarediagonalcrosshatchfill;25A9
squarehorizontalfill;25A4
squarekg;338F
squarekm;339E
squarekmcapital;33CE
squareln;33D1
squarelog;33D2
squaremg;338E
squaremil;33D5
squaremm;339C
squaremsquared;33A1
squareorthogonalcrosshatchfill;25A6
squareupperlefttolowerrightfill;25A7
squareupperrighttolowerleftfill;25A8
squareverticalfill;25A5
squarewhitewithsmallblack;25A3
srsquare;33DB
ssabengali;09B7
ssadeva;0937
ssagujarati;0AB7
ssangcieuckorean;3149
ssanghieuhkorean;3185
ssangieungkorean;3180
ssangkiyeokkorean;3132
ssangnieunkorean;3165
ssangpieupkorean;3143
ssangsioskorean;3146
ssangtikeutkorean;3138
ssuperior;F6F2
sterling;00A3
sterlingmonospace;FFE1
strokelongoverlaycmb;0336
strokeshortoverlaycmb;0335
subset;2282
subsetnotequal;228A
subsetorequal;2286
succeeds;227B
suchthat;220B
suhiragana;3059
sukatakana;30B9
sukatakanahalfwidth;FF7D
sukunarabic;0652
summation;2211
sun;263C
superset;2283
supersetnotequal;228B
supersetorequal;2287
svsquare;33DC
syouwaerasquare;337C
t;0074
tabengali;09A4
tackdown;22A4
tackleft;22A3
tadeva;0924
tagujarati;0AA4
tagurmukhi;0A24
taharabic;0637
tahfinalarabic;FEC2
tahinitialarabic;FEC3
tahiragana;305F
tahmedialarabic;FEC4
taisyouerasquare;337D
takatakana;30BF
takatakanahalfwidth;FF80
tatweelarabic;0640
tau;03C4
tav;05EA
tavdages;FB4A
tavdagesh;FB4A
tavdageshhebrew;FB4A
tavhebrew;05EA
tbar;0167
tbopomofo;310A
tcaron;0165
tccurl;02A8
tcedilla;0163
tcheharabic;0686
tchehfinalarabic;FB7B
tchehinitialarabic;FB7C
tchehmedialarabic;FB7D
tchehmeeminitialarabic;FB7C FEE4
tcircle;24E3
tcircumflexbelow;1E71
tcommaaccent;0163
tdieresis;1E97
tdotaccent;1E6B
tdotbelow;1E6D
tecyrillic;0442
tedescendercyrillic;04AD
teharabic;062A
tehfinalarabic;FE96
tehhahinitialarabic;FCA2
tehhahisolatedarabic;FC0C
tehinitialarabic;FE97
tehiragana;3066
tehjeeminitialarabic;FCA1
tehjeemisolatedarabic;FC0B
tehmarbutaarabic;0629
tehmarbutafinalarabic;FE94
tehmedialarabic;FE98
tehmeeminitialarabic;FCA4
tehmeemisolatedarabic;FC0E
tehnoonfinalarabic;FC73
tekatakana;30C6
tekatakanahalfwidth;FF83
telephone;2121
telephoneblack;260E
telishagedolahebrew;05A0
telishaqetanahebrew;05A9
tencircle;2469
tenideographicparen;3229
tenparen;247D
tenperiod;2491
tenroman;2179
tesh;02A7
tet;05D8
tetdagesh;FB38
tetdageshhebrew;FB38
tethebrew;05D8
tetsecyrillic;04B5
tevirhebrew;059B
tevirlefthebrew;059B
thabengali;09A5
thadeva;0925
thagujarati;0AA5
thagurmukhi;0A25
thalarabic;0630
thalfinalarabic;FEAC
thanthakhatlowleftthai;F898
thanthakhatlowrightthai;F897
thanthakhatthai;0E4C
thanthakhatupperleftthai;F896
theharabic;062B
thehfinalarabic;FE9A
thehinitialarabic;FE9B
thehmedialarabic;FE9C
thereexists;2203
therefore;2234
theta;03B8
theta1;03D1
thetasymbolgreek;03D1
thieuthacirclekorean;3279
thieuthaparenkorean;3219
thieuthcirclekorean;326B
thieuthkorean;314C
thieuthparenkorean;320B
thirteencircle;246C
thirteenparen;2480
thirteenperiod;2494
thonangmonthothai;0E11
thook;01AD
thophuthaothai;0E12
thorn;00FE
thothahanthai;0E17
thothanthai;0E10
thothongthai;0E18
thothungthai;0E16
thousandcyrillic;0482
thousandsseparatorarabic;066C
thousandsseparatorpersian;066C
three;0033
threearabic;0663
threebengali;09E9
threecircle;2462
threecircleinversesansserif;278C
threedeva;0969
threeeighths;215C
threegujarati;0AE9
threegurmukhi;0A69
threehackarabic;0663
threehangzhou;3023
threeideographicparen;3222
threeinferior;2083
threemonospace;FF13
threenumeratorbengali;09F6
threeoldstyle;F733
threeparen;2476
threeperiod;248A
threepersian;06F3
threequarters;00BE
threequartersemdash;F6DE
threeroman;2172
threesuperior;00B3
threethai;0E53
thzsquare;3394
tihiragana;3061
tikatakana;30C1
tikatakanahalfwidth;FF81
tikeutacirclekorean;3270
tikeutaparenkorean;3210
tikeutcirclekorean;3262
tikeutkorean;3137
tikeutparenkorean;3202
tilde;02DC
tildebelowcmb;0330
tildecmb;0303
tildecomb;0303
tildedoublecmb;0360
tildeoperator;223C
tildeoverlaycmb;0334
tildeverticalcmb;033E
timescircle;2297
tipehahebrew;0596
tipehalefthebrew;0596
tippigurmukhi;0A70
titlocyrilliccmb;0483
tiwnarmenian;057F
tlinebelow;1E6F
tmonospace;FF54
toarmenian;0569
tohiragana;3068
tokatakana;30C8
tokatakanahalfwidth;FF84
tonebarextrahighmod;02E5
tonebarextralowmod;02E9
tonebarhighmod;02E6
tonebarlowmod;02E8
tonebarmidmod;02E7
tonefive;01BD
tonesix;0185
tonetwo;01A8
tonos;0384
tonsquare;3327
topatakthai;0E0F
tortoiseshellbracketleft;3014
tortoiseshellbracketleftsmall;FE5D
tortoiseshellbracketleftvertical;FE39
tortoiseshellbracketright;3015
tortoiseshellbracketrightsmall;FE5E
tortoiseshellbracketrightvertical;FE3A
totaothai;0E15
tpalatalhook;01AB
tparen;24AF
trademark;2122
trademarksans;F8EA
trademarkserif;F6DB
tretroflexhook;0288
triagdn;25BC
triaglf;25C4
triagrt;25BA
triagup;25B2
ts;02A6
tsadi;05E6
tsadidagesh;FB46
tsadidageshhebrew;FB46
tsadihebrew;05E6
tsecyrillic;0446
tsere;05B5
tsere12;05B5
tsere1e;05B5
tsere2b;05B5
tserehebrew;05B5
tserenarrowhebrew;05B5
tserequarterhebrew;05B5
tserewidehebrew;05B5
tshecyrillic;045B
tsuperior;F6F3
ttabengali;099F
ttadeva;091F
ttagujarati;0A9F
ttagurmukhi;0A1F
tteharabic;0679
ttehfinalarabic;FB67
ttehinitialarabic;FB68
ttehmedialarabic;FB69
tthabengali;09A0
tthadeva;0920
tthagujarati;0AA0
tthagurmukhi;0A20
tturned;0287
tuhiragana;3064
tukatakana;30C4
tukatakanahalfwidth;FF82
tusmallhiragana;3063
tusmallkatakana;30C3
tusmallkatakanahalfwidth;FF6F
twelvecircle;246B
twelveparen;247F
twelveperiod;2493
twelveroman;217B
twentycircle;2473
twentyhangzhou;5344
twentyparen;2487
twentyperiod;249B
two;0032
twoarabic;0662
twobengali;09E8
twocircle;2461
twocircleinversesansserif;278B
twodeva;0968
twodotenleader;2025
twodotleader;2025
twodotleadervertical;FE30
twogujarati;0AE8
twogurmukhi;0A68
twohackarabic;0662
twohangzhou;3022
twoideographicparen;3221
twoinferior;2082
twomonospace;FF12
twonumeratorbengali;09F5
twooldstyle;F732
twoparen;2475
twoperiod;2489
twopersian;06F2
tworoman;2171
twostroke;01BB
twosuperior;00B2
twothai;0E52
twothirds;2154
u;0075
uacute;00FA
ubar;0289
ubengali;0989
ubopomofo;3128
ubreve;016D
ucaron;01D4
ucircle;24E4
ucircumflex;00FB
ucircumflexbelow;1E77
ucyrillic;0443
udattadeva;0951
udblacute;0171
udblgrave;0215
udeva;0909
udieresis;00FC
udieresisacute;01D8
udieresisbelow;1E73
udieresiscaron;01DA
udieresiscyrillic;04F1
udieresisgrave;01DC
udieresismacron;01D6
udotbelow;1EE5
ugrave;00F9
ugujarati;0A89
ugurmukhi;0A09
uhiragana;3046
uhookabove;1EE7
uhorn;01B0
uhornacute;1EE9
uhorndotbelow;1EF1
uhorngrave;1EEB
uhornhookabove;1EED
uhorntilde;1EEF
uhungarumlaut;0171
uhungarumlautcyrillic;04F3
uinvertedbreve;0217
ukatakana;30A6
ukatakanahalfwidth;FF73
ukcyrillic;0479
ukorean;315C
umacron;016B
umacroncyrillic;04EF
umacrondieresis;1E7B
umatragurmukhi;0A41
umonospace;FF55
underscore;005F
underscoredbl;2017
underscoremonospace;FF3F
underscorevertical;FE33
underscorewavy;FE4F
union;222A
universal;2200
uogonek;0173
uparen;24B0
upblock;2580
upperdothebrew;05C4
upsilon;03C5
upsilondieresis;03CB
upsilondieresistonos;03B0
upsilonlatin;028A
upsilontonos;03CD
uptackbelowcmb;031D
uptackmod;02D4
uragurmukhi;0A73
uring;016F
ushortcyrillic;045E
usmallhiragana;3045
usmallkatakana;30A5
usmallkatakanahalfwidth;FF69
ustraightcyrillic;04AF
ustraightstrokecyrillic;04B1
utilde;0169
utildeacute;1E79
utildebelow;1E75
uubengali;098A
uudeva;090A
uugujarati;0A8A
uugurmukhi;0A0A
uumatragurmukhi;0A42
uuvowelsignbengali;09C2
uuvowelsigndeva;0942
uuvowelsigngujarati;0AC2
uvowelsignbengali;09C1
uvowelsigndeva;0941
uvowelsigngujarati;0AC1
v;0076
vadeva;0935
vagujarati;0AB5
vagurmukhi;0A35
vakatakana;30F7
vav;05D5
vavdagesh;FB35
vavdagesh65;FB35
vavdageshhebrew;FB35
vavhebrew;05D5
vavholam;FB4B
vavholamhebrew;FB4B
vavvavhebrew;05F0
vavyodhebrew;05F1
vcircle;24E5
vdotbelow;1E7F
vecyrillic;0432
veharabic;06A4
vehfinalarabic;FB6B
vehinitialarabic;FB6C
vehmedialarabic;FB6D
vekatakana;30F9
venus;2640
verticalbar;007C
verticallineabovecmb;030D
verticallinebelowcmb;0329
verticallinelowmod;02CC
verticallinemod;02C8
vewarmenian;057E
vhook;028B
vikatakana;30F8
viramabengali;09CD
viramadeva;094D
viramagujarati;0ACD
visargabengali;0983
visargadeva;0903
visargagujarati;0A83
vmonospace;FF56
voarmenian;0578
voicediterationhiragana;309E
voicediterationkatakana;30FE
voicedmarkkana;309B
voicedmarkkanahalfwidth;FF9E
vokatakana;30FA
vparen;24B1
vtilde;1E7D
vturned;028C
vuhiragana;3094
vukatakana;30F4
w;0077
wacute;1E83
waekorean;3159
wahiragana;308F
wakatakana;30EF
wakatakanahalfwidth;FF9C
wakorean;3158
wasmallhiragana;308E
wasmallkatakana;30EE
wattosquare;3357
wavedash;301C
wavyunderscorevertical;FE34
wawarabic;0648
wawfinalarabic;FEEE
wawhamzaabovearabic;0624
wawhamzaabovefinalarabic;FE86
wbsquare;33DD
wcircle;24E6
wcircumflex;0175
wdieresis;1E85
wdotaccent;1E87
wdotbelow;1E89
wehiragana;3091
weierstrass;2118
wekatakana;30F1
wekorean;315E
weokorean;315D
wgrave;1E81
whitebullet;25E6
whitecircle;25CB
whitecircleinverse;25D9
whitecornerbracketleft;300E
whitecornerbracketleftvertical;FE43
whitecornerbracketright;300F
whitecornerbracketrightvertical;FE44
whitediamond;25C7
whitediamondcontainingblacksmalldiamond;25C8
whitedownpointingsmalltriangle;25BF
whitedownpointingtriangle;25BD
whiteleftpointingsmalltriangle;25C3
whiteleftpointingtriangle;25C1
whitelenticularbracketleft;3016
whitelenticularbracketright;3017
whiterightpointingsmalltriangle;25B9
whiterightpointingtriangle;25B7
whitesmallsquare;25AB
whitesmilingface;263A
whitesquare;25A1
whitestar;2606
whitetelephone;260F
whitetortoiseshellbracketleft;3018
whitetortoiseshellbracketright;3019
whiteuppointingsmalltriangle;25B5
whiteuppointingtriangle;25B3
wihiragana;3090
wikatakana;30F0
wikorean;315F
wmonospace;FF57
wohiragana;3092
wokatakana;30F2
wokatakanahalfwidth;FF66
won;20A9
wonmonospace;FFE6
wowaenthai;0E27
wparen;24B2
wring;1E98
wsuperior;02B7
wturned;028D
wynn;01BF
x;0078
xabovecmb;033D
xbopomofo;3112
xcircle;24E7
xdieresis;1E8D
xdotaccent;1E8B
xeharmenian;056D
xi;03BE
xmonospace;FF58
xparen;24B3
xsuperior;02E3
y;0079
yaadosquare;334E
yabengali;09AF
yacute;00FD
yadeva;092F
yaekorean;3152
yagujarati;0AAF
yagurmukhi;0A2F
yahiragana;3084
yakatakana;30E4
yakatakanahalfwidth;FF94
yakorean;3151
yamakkanthai;0E4E
yasmallhiragana;3083
yasmallkatakana;30E3
yasmallkatakanahalfwidth;FF6C
yatcyrillic;0463
ycircle;24E8
ycircumflex;0177
ydieresis;00FF
ydotaccent;1E8F
ydotbelow;1EF5
yeharabic;064A
yehbarreearabic;06D2
yehbarreefinalarabic;FBAF
yehfinalarabic;FEF2
yehhamzaabovearabic;0626
yehhamzaabovefinalarabic;FE8A
yehhamzaaboveinitialarabic;FE8B
yehhamzaabovemedialarabic;FE8C
yehinitialarabic;FEF3
yehmedialarabic;FEF4
yehmeeminitialarabic;FCDD
yehmeemisolatedarabic;FC58
yehnoonfinalarabic;FC94
yehthreedotsbelowarabic;06D1
yekorean;3156
yen;00A5
yenmonospace;FFE5
yeokorean;3155
yeorinhieuhkorean;3186
yerahbenyomohebrew;05AA
yerahbenyomolefthebrew;05AA
yericyrillic;044B
yerudieresiscyrillic;04F9
yesieungkorean;3181
yesieungpansioskorean;3183
yesieungsioskorean;3182
yetivhebrew;059A
ygrave;1EF3
yhook;01B4
yhookabove;1EF7
yiarmenian;0575
yicyrillic;0457
yikorean;3162
yinyang;262F
yiwnarmenian;0582
ymonospace;FF59
yod;05D9
yoddagesh;FB39
yoddageshhebrew;FB39
yodhebrew;05D9
yodyodhebrew;05F2
yodyodpatahhebrew;FB1F
yohiragana;3088
yoikorean;3189
yokatakana;30E8
yokatakanahalfwidth;FF96
yokorean;315B
yosmallhiragana;3087
yosmallkatakana;30E7
yosmallkatakanahalfwidth;FF6E
yotgreek;03F3
yoyaekorean;3188
yoyakorean;3187
yoyakthai;0E22
yoyingthai;0E0D
yparen;24B4
ypogegrammeni;037A
ypogegrammenigreekcmb;0345
yr;01A6
yring;1E99
ysuperior;02B8
ytilde;1EF9
yturned;028E
yuhiragana;3086
yuikorean;318C
yukatakana;30E6
yukatakanahalfwidth;FF95
yukorean;3160
yusbigcyrillic;046B
yusbigiotifiedcyrillic;046D
yuslittlecyrillic;0467
yuslittleiotifiedcyrillic;0469
yusmallhiragana;3085
yusmallkatakana;30E5
yusmallkatakanahalfwidth;FF6D
yuyekorean;318B
yuyeokorean;318A
yyabengali;09DF
yyadeva;095F
z;007A
zaarmenian;0566
zacute;017A
zadeva;095B
zagurmukhi;0A5B
zaharabic;0638
zahfinalarabic;FEC6
zahinitialarabic;FEC7
zahiragana;3056
zahmedialarabic;FEC8
zainarabic;0632
zainfinalarabic;FEB0
zakatakana;30B6
zaqefgadolhebrew;0595
zaqefqatanhebrew;0594
zarqahebrew;0598
zayin;05D6
zayindagesh;FB36
zayindageshhebrew;FB36
zayinhebrew;05D6
zbopomofo;3117
zcaron;017E
zcircle;24E9
zcircumflex;1E91
zcurl;0291
zdot;017C
zdotaccent;017C
zdotbelow;1E93
zecyrillic;0437
zedescendercyrillic;0499
zedieresiscyrillic;04DF
zehiragana;305C
zekatakana;30BC
zero;0030
zeroarabic;0660
zerobengali;09E6
zerodeva;0966
zerogujarati;0AE6
zerogurmukhi;0A66
zerohackarabic;0660
zeroinferior;2080
zeromonospace;FF10
zerooldstyle;F730
zeropersian;06F0
zerosuperior;2070
zerothai;0E50
zerowidthjoiner;FEFF
zerowidthnonjoiner;200C
zerowidthspace;200B
zeta;03B6
zhbopomofo;3113
zhearmenian;056A
zhebrevecyrillic;04C2
zhecyrillic;0436
zhedescendercyrillic;0497
zhedieresiscyrillic;04DD
zihiragana;3058
zikatakana;30B8
zinorhebrew;05AE
zlinebelow;1E95
zmonospace;FF5A
zohiragana;305E
zokatakana;30BE
zparen;24B5
zretroflexhook;0290
zstroke;01B6
zuhiragana;305A
zukatakana;30BA
"""
# string table management
#
class StringTable:
def __init__( self, name_list, master_table_name ):
self.names = name_list
self.master_table = master_table_name
self.indices = {}
index = 0
for name in name_list:
self.indices[name] = index
index += len( name ) + 1
self.total = index
def dump( self, file ):
write = file.write
write( " static const char " + self.master_table +
"[" + repr( self.total ) + "] =\n" )
write( " {\n" )
line = ""
for name in self.names:
line += " '"
line += string.join( ( re.findall( ".", name ) ), "','" )
line += "', 0,\n"
write( line + " };\n\n\n" )
def dump_sublist( self, file, table_name, macro_name, sublist ):
write = file.write
write( "#define " + macro_name + " " + repr( len( sublist ) ) + "\n\n" )
write( " /* Values are offsets into the `" +
self.master_table + "' table */\n\n" )
write( " static const short " + table_name +
"[" + macro_name + "] =\n" )
write( " {\n" )
line = " "
comma = ""
col = 0
for name in sublist:
line += comma
line += "%4d" % self.indices[name]
col += 1
comma = ","
if col == 14:
col = 0
comma = ",\n "
write( line + "\n };\n\n\n" )
# We now store the Adobe Glyph List in compressed form. The list is put
# into a data structure called `trie' (because it has a tree-like
# appearance). Consider, for example, that you want to store the
# following name mapping:
#
# A => 1
# Aacute => 6
# Abalon => 2
# Abstract => 4
#
# It is possible to store the entries as follows.
#
# A => 1
# |
# +-acute => 6
# |
# +-b
# |
# +-alon => 2
# |
# +-stract => 4
#
# We see that each node in the trie has:
#
# - one or more `letters'
# - an optional value
# - zero or more child nodes
#
# The first step is to call
#
# root = StringNode( "", 0 )
# for word in map.values():
# root.add( word, map[word] )
#
# which creates a large trie where each node has only one children.
#
# Executing
#
# root = root.optimize()
#
# optimizes the trie by merging the letters of successive nodes whenever
# possible.
#
# Each node of the trie is stored as follows.
#
# - First the node's letter, according to the following scheme. We
# use the fact that in the AGL no name contains character codes > 127.
#
# name bitsize description
# ----------------------------------------------------------------
# notlast 1 Set to 1 if this is not the last letter
# in the word.
# ascii 7 The letter's ASCII value.
#
# - The letter is followed by a children count and the value of the
# current key (if any). Again we can do some optimization because all
# AGL entries are from the BMP; this means that 16 bits are sufficient
# to store its Unicode values. Additionally, no node has more than
# 127 children.
#
# name bitsize description
# -----------------------------------------
# hasvalue 1 Set to 1 if a 16-bit Unicode value follows.
# num_children 7 Number of children. Can be 0 only if
# `hasvalue' is set to 1.
# value 16 Optional Unicode value.
#
# - A node is finished by a list of 16bit absolute offsets to the
# children, which must be sorted in increasing order of their first
# letter.
#
# For simplicity, all 16bit quantities are stored in big-endian order.
#
# The root node has first letter = 0, and no value.
#
class StringNode:
def __init__( self, letter, value ):
self.letter = letter
self.value = value
self.children = {}
def __cmp__( self, other ):
return ord( self.letter[0] ) - ord( other.letter[0] )
def add( self, word, value ):
if len( word ) == 0:
self.value = value
return
letter = word[0]
word = word[1:]
if self.children.has_key( letter ):
child = self.children[letter]
else:
child = StringNode( letter, 0 )
self.children[letter] = child
child.add( word, value )
def optimize( self ):
# optimize all children first
children = self.children.values()
self.children = {}
for child in children:
self.children[child.letter[0]] = child.optimize()
# don't optimize if there's a value,
# if we don't have any child or if we
# have more than one child
if ( self.value != 0 ) or ( not children ) or len( children ) > 1:
return self
child = children[0]
self.letter += child.letter
self.value = child.value
self.children = child.children
return self
def dump_debug( self, write, margin ):
# this is used during debugging
line = margin + "+-"
if len( self.letter ) == 0:
line += "<NOLETTER>"
else:
line += self.letter
if self.value:
line += " => " + repr( self.value )
write( line + "\n" )
if self.children:
margin += "| "
for child in self.children.values():
child.dump_debug( write, margin )
def locate( self, index ):
self.index = index
if len( self.letter ) > 0:
index += len( self.letter ) + 1
else:
index += 2
if self.value != 0:
index += 2
children = self.children.values()
children.sort()
index += 2 * len( children )
for child in children:
index = child.locate( index )
return index
def store( self, storage ):
# write the letters
l = len( self.letter )
if l == 0:
storage += struct.pack( "B", 0 )
else:
for n in range( l ):
val = ord( self.letter[n] )
if n < l - 1:
val += 128
storage += struct.pack( "B", val )
# write the count
children = self.children.values()
children.sort()
count = len( children )
if self.value != 0:
storage += struct.pack( "!BH", count + 128, self.value )
else:
storage += struct.pack( "B", count )
for child in children:
storage += struct.pack( "!H", child.index )
for child in children:
storage = child.store( storage )
return storage
def adobe_glyph_values():
"""return the list of glyph names and their unicode values"""
lines = string.split( adobe_glyph_list, '\n' )
glyphs = []
values = []
for line in lines:
if line:
fields = string.split( line, ';' )
# print fields[1] + ' - ' + fields[0]
subfields = string.split( fields[1], ' ' )
if len( subfields ) == 1:
glyphs.append( fields[0] )
values.append( fields[1] )
return glyphs, values
def filter_glyph_names( alist, filter ):
"""filter `alist' by taking _out_ all glyph names that are in `filter'"""
count = 0
extras = []
for name in alist:
try:
filtered_index = filter.index( name )
except:
extras.append( name )
return extras
def dump_encoding( file, encoding_name, encoding_list ):
"""dump a given encoding"""
write = file.write
write( " /* the following are indices into the SID name table */\n" )
write( " static const unsigned short " + encoding_name +
"[" + repr( len( encoding_list ) ) + "] =\n" )
write( " {\n" )
line = " "
comma = ""
col = 0
for value in encoding_list:
line += comma
line += "%3d" % value
comma = ","
col += 1
if col == 16:
col = 0
comma = ",\n "
write( line + "\n };\n\n\n" )
def dump_array( the_array, write, array_name ):
"""dumps a given encoding"""
write( " static const unsigned char " + array_name +
"[" + repr( len( the_array ) ) + "L] =\n" )
write( " {\n" )
line = ""
comma = " "
col = 0
for value in the_array:
line += comma
line += "%3d" % ord( value )
comma = ","
col += 1
if col == 16:
col = 0
comma = ",\n "
if len( line ) > 1024:
write( line )
line = ""
write( line + "\n };\n\n\n" )
def main():
"""main program body"""
if len( sys.argv ) != 2:
print __doc__ % sys.argv[0]
sys.exit( 1 )
file = open( sys.argv[1], "w\n" )
write = file.write
count_sid = len( sid_standard_names )
# `mac_extras' contains the list of glyph names in the Macintosh standard
# encoding which are not in the SID Standard Names.
#
mac_extras = filter_glyph_names( mac_standard_names, sid_standard_names )
# `base_list' contains the names of our final glyph names table.
# It consists of the `mac_extras' glyph names, followed by the SID
# standard names.
#
mac_extras_count = len( mac_extras )
base_list = mac_extras + sid_standard_names
write( "/***************************************************************************/\n" )
write( "/* */\n" )
write( "/* %-71s*/\n" % os.path.basename( sys.argv[1] ) )
write( "/* */\n" )
write( "/* PostScript glyph names. */\n" )
write( "/* */\n" )
write( "/* Copyright 2005, 2008 by */\n" )
write( "/* David Turner, Robert Wilhelm, and Werner Lemberg. */\n" )
write( "/* */\n" )
write( "/* This file is part of the FreeType project, and may only be used, */\n" )
write( "/* modified, and distributed under the terms of the FreeType project */\n" )
write( "/* license, LICENSE.TXT. By continuing to use, modify, or distribute */\n" )
write( "/* this file you indicate that you have read the license and */\n" )
write( "/* understand and accept it fully. */\n" )
write( "/* */\n" )
write( "/***************************************************************************/\n" )
write( "\n" )
write( "\n" )
write( " /* This file has been generated automatically -- do not edit! */\n" )
write( "\n" )
write( "\n" )
# dump final glyph list (mac extras + sid standard names)
#
st = StringTable( base_list, "ft_standard_glyph_names" )
st.dump( file )
st.dump_sublist( file, "ft_mac_names",
"FT_NUM_MAC_NAMES", mac_standard_names )
st.dump_sublist( file, "ft_sid_names",
"FT_NUM_SID_NAMES", sid_standard_names )
dump_encoding( file, "t1_standard_encoding", t1_standard_encoding )
dump_encoding( file, "t1_expert_encoding", t1_expert_encoding )
# dump the AGL in its compressed form
#
agl_glyphs, agl_values = adobe_glyph_values()
dict = StringNode( "", 0 )
for g in range( len( agl_glyphs ) ):
dict.add( agl_glyphs[g], eval( "0x" + agl_values[g] ) )
dict = dict.optimize()
dict_len = dict.locate( 0 )
dict_array = dict.store( "" )
write( """\
/*
* This table is a compressed version of the Adobe Glyph List (AGL),
* optimized for efficient searching. It has been generated by the
* `glnames.py' python script located in the `src/tools' directory.
*
* The lookup function to get the Unicode value for a given string
* is defined below the table.
*/
#ifdef FT_CONFIG_OPTION_ADOBE_GLYPH_LIST
""" )
dump_array( dict_array, write, "ft_adobe_glyph_list" )
# write the lookup routine now
#
write( """\
/*
* This function searches the compressed table efficiently.
*/
static unsigned long
ft_get_adobe_glyph_index( const char* name,
const char* limit )
{
int c = 0;
int count, min, max;
const unsigned char* p = ft_adobe_glyph_list;
if ( name == 0 || name >= limit )
goto NotFound;
c = *name++;
count = p[1];
p += 2;
min = 0;
max = count;
while ( min < max )
{
int mid = ( min + max ) >> 1;
const unsigned char* q = p + mid * 2;
int c2;
q = ft_adobe_glyph_list + ( ( (int)q[0] << 8 ) | q[1] );
c2 = q[0] & 127;
if ( c2 == c )
{
p = q;
goto Found;
}
if ( c2 < c )
min = mid + 1;
else
max = mid;
}
goto NotFound;
Found:
for (;;)
{
/* assert (*p & 127) == c */
if ( name >= limit )
{
if ( (p[0] & 128) == 0 &&
(p[1] & 128) != 0 )
return (unsigned long)( ( (int)p[2] << 8 ) | p[3] );
goto NotFound;
}
c = *name++;
if ( p[0] & 128 )
{
p++;
if ( c != (p[0] & 127) )
goto NotFound;
continue;
}
p++;
count = p[0] & 127;
if ( p[0] & 128 )
p += 2;
p++;
for ( ; count > 0; count--, p += 2 )
{
int offset = ( (int)p[0] << 8 ) | p[1];
const unsigned char* q = ft_adobe_glyph_list + offset;
if ( c == ( q[0] & 127 ) )
{
p = q;
goto NextIter;
}
}
goto NotFound;
NextIter:
;
}
NotFound:
return 0;
}
#endif /* FT_CONFIG_OPTION_ADOBE_GLYPH_LIST */
""" )
if 0: # generate unit test, or don't
#
# now write the unit test to check that everything works OK
#
write( "#ifdef TEST\n\n" )
write( "static const char* const the_names[] = {\n" )
for name in agl_glyphs:
write( ' "' + name + '",\n' )
write( " 0\n};\n" )
write( "static const unsigned long the_values[] = {\n" )
for val in agl_values:
write( ' 0x' + val + ',\n' )
write( " 0\n};\n" )
write( """
#include <stdlib.h>
#include <stdio.h>
int
main( void )
{
int result = 0;
const char* const* names = the_names;
const unsigned long* values = the_values;
for ( ; *names; names++, values++ )
{
const char* name = *names;
unsigned long reference = *values;
unsigned long value;
value = ft_get_adobe_glyph_index( name, name + strlen( name ) );
if ( value != reference )
{
result = 1;
fprintf( stderr, "name '%s' => %04x instead of %04x\\n",
name, value, reference );
}
}
return result;
}
""" )
write( "#endif /* TEST */\n" )
write("\n/* END */\n")
# Now run the main routine
#
main()
# END
|
jiayaoqijia/apv
|
pdfview/jni/freetype/src/tools/glnames.py
|
Python
|
gpl-3.0
| 103,316
|
[
"FEFF"
] |
2aff82b00959211e4ef7debff224c261364d5414208a57422643010a3b4217de
|
# -*- coding: utf-8 -*-
try:
import configparser
except ImportError:
# Due to PY27 compatibility
import ConfigParser as configparser
import os
from warnings import warn
from chemopt.utilities._decorators import Substitution
values = {}
values['hamiltonian'] = {'SCF', 'MP2', 'B3LYP', 'CCSD', 'CCSD(T)',
'RASSCF', 'CASPT2'}
values['backend'] = {'molpro', 'molcas'}
fixed_defaults = {}
fixed_defaults['charge'] = 0
fixed_defaults['multiplicity'] = 1
fixed_defaults['forces'] = False
fixed_defaults['wfn_symmetry'] = 1
fixed_defaults['title'] = 'Chemopt optimisation'
fixed_defaults['etol'] = 1e-6
fixed_defaults['gtol'] = 6e-4
fixed_defaults['max_iter'] = 100
fixed_defaults['coord_fmt'] = '.4f'
def _give_default_file_path():
HOME = os.path.expanduser('~')
filepath = os.path.join(HOME, '.chemoptrc')
return filepath
def provide_defaults():
settings = {}
settings['defaults'] = {}
settings['defaults']['backend'] = 'molcas'
settings['defaults']['num_procs'] = 1
settings['defaults']['num_threads'] = 1
settings['defaults']['mem_per_proc'] = '150MB'
settings['defaults']['molpro_exe'] = 'molpro'
settings['defaults']['molcas_exe'] = 'molcas'
return settings
def write_configuration_file(filepath=_give_default_file_path(),
overwrite=False):
"""Create a configuration file.
Writes the current state of defaults into a configuration file.
.. note:: Since a file is permamently written, this function
is strictly speaking not sideeffect free.
Args:
filepath (str): Where to write the file.
The default is under both UNIX and Windows ``~/.chemoptrc``.
overwrite (bool):
Returns:
None:
"""
config = configparser.ConfigParser()
config.read_dict(settings)
if os.path.isfile(filepath) and not overwrite:
try:
raise FileExistsError
except NameError: # because of python2
warn('File exists already and overwrite is False (default).')
else:
with open(filepath, 'w') as configfile:
config.write(configfile)
def read_configuration_file(settings, filepath=_give_default_file_path()):
"""Read the configuration file.
.. note:: This function changes ``cc.defaults`` inplace and is
therefore not sideeffect free.
Args:
filepath (str): Where to read the file.
The default is under both UNIX and Windows ``~/.chemoptrc``.
Returns:
None:
"""
config = configparser.ConfigParser()
config.read(filepath)
def get_correct_type(section, key, config):
"""Gives e.g. the boolean True for the string 'True'"""
def getstring(section, key, config):
return config[section][key]
def getinteger(section, key, config): # pylint:disable=unused-variable
return config[section].getint(key)
def getboolean(section, key, config):
return config[section].getboolean(key)
def getfloat(section, key, config): # pylint:disable=unused-variable
return config[section].getfloat(key)
special_actions = {} # Something different than a string is expected
special_actions['defaults'] = {}
special_actions['defaults']['num_procs'] = getinteger
special_actions['defaults']['num_threads'] = getinteger
try:
return special_actions[section][key](section, key, config)
except KeyError:
return getstring(section, key, config)
for section in config.sections():
for k in config[section]:
settings[section][k] = get_correct_type(section, k, config)
return settings
settings = provide_defaults()
read_configuration_file(settings)
conf_defaults = settings['defaults']
def get_docstr(key, defaults):
return "The default is '{}'. The allowed values are {}".format(
defaults[key], values[key])
docstring = {}
docstring['hamiltonian'] = "The hamiltonian to use for calculating the \
electronic energy. The allowed values are {}.\n".format(values['hamiltonian'])
docstring['basis'] = "The basis set to use for calculating \
the electronic energy."
docstring['multiplicity'] = "The spin multiplicity. \
The default is {}.\n".format(fixed_defaults['multiplicity'])
docstring['charge'] = "The overall charge of the molecule. \
The default is {}.\n".format(fixed_defaults['charge'])
docstring['forces'] = "Specify if energy gradients should be calculated. \
The default is {}.".format(fixed_defaults['forces'])
docstring['el_calc_input'] = "Specify the input filename for \
electronic calculations. \
If it is None, the filename of the calling python script is used \
(With the suffix ``.inp`` instead of ``.py``). \
The output will be ``os.path.splitext(inputfile)[0] + '.inp'``.\n"
docstring['md_out'] = "Specify the output filename for \
chemopt output files. \
If it is None, the filename of the calling python script is used \
(With the suffix ``.md`` instead of ``.py``). \
The output will be ``os.path.splitext(inputfile)[0] + '.md'``.\n"
docstring['molden_out'] = "Specify the output filename for \
the molden file from a geometry optimisation. \
If it is None, the filename of the calling python script is used \
(With the suffix ``.molden`` instead of ``.py``). \
The output will be ``os.path.splitext(inputfile)[0] + '.molden'``.\n"
docstring['backend'] = "Specify which QM program suite shoud be used. \
Allowed values are {}, \
the default is '{}'.\n".format(values['backend'], conf_defaults['backend'])
docstring['molpro_exe'] = "Specify the command to invoke molpro. \
The default is '{}'.\n".format(conf_defaults['molpro_exe'])
docstring['molcas_exe'] = "Specify the command to invoke molcas. \
The default is '{}'.\n".format(conf_defaults['molcas_exe'])
docstring['title'] = "The title to be printed in input and output.\n"
docstring['start_orb'] = "Path to an orbital file, \
if starting orbitals should be used."
docstring['wfn_symmetry'] = "The symmetry of the wavefunction specified \
with the molpro \
`notation <https://www.molpro.net/info/2015.1/doc/manual/node36.html>`_.\n"
docstring['etol'] = "Convergence criterium for the energy."
docstring['gtol'] = "Convergence criterium for the gradient."
docstring['max_iter'] = "Maximum number of iterations. The default is \
'{}'.".format(fixed_defaults['max_iter'])
docstring['num_procs'] = "The number of processes to spawn."
docstring['num_threads'] = "Currently not Implemented"
docstring['mem_per_proc'] = "Memory per process. \
This is a string with a number and a unit like '800 MB'. \
SI and binary prefixes are supported. \
Uses the `datasize library <https://pypi.python.org/pypi/datasize>`_ \
for parsing."
docstring['coord_fmt'] = "A string as float formatter for the coordinates \
in the output file of chemopt. \
The default is '{}'".format(fixed_defaults['coord_fmt'])
substitute_docstr = Substitution(**docstring)
|
mcocdawc/chemopt
|
src/chemopt/configuration.py
|
Python
|
lgpl-3.0
| 7,007
|
[
"MOLCAS",
"Molpro"
] |
1c1c404b785325ecb07a17e4d7be7e1e8fa8822f80190e2c63af5895533e94ff
|
"""
simple, elegant templating
(part of web.py)
Template design:
Template string is split into tokens and the tokens are combined into nodes.
Parse tree is a nodelist. TextNode and ExpressionNode are simple nodes and
for-loop, if-loop etc are block nodes, which contain multiple child nodes.
Each node can emit some python string. python string emitted by the
root node is validated for safeeval and executed using python in the given environment.
Enough care is taken to make sure the generated code and the template has line to line match,
so that the error messages can point to exact line number in template. (It doesn't work in some cases still.)
Grammar:
template -> defwith sections
defwith -> '$def with (' arguments ')' | ''
sections -> section*
section -> block | assignment | line
assignment -> '$ ' <assignment expression>
line -> (text|expr)*
text -> <any characters other than $>
expr -> '$' pyexpr | '$(' pyexpr ')' | '${' pyexpr '}'
pyexpr -> <python expression>
"""
import ast
import glob
import os
import sys
import tokenize
from io import open
import builtins
from .net import websafe
from .utils import re_compile, safestr, safeunicode, storage
from .webapi import config
__all__ = [
"Template",
"Render",
"render",
"frender",
"ParseError",
"SecurityError",
"test",
]
from collections.abc import MutableMapping
def splitline(text):
r"""
Splits the given text at newline.
>>> splitline('foo\nbar')
('foo\n', 'bar')
>>> splitline('foo')
('foo', '')
>>> splitline('')
('', '')
"""
index = text.find("\n") + 1
if index:
return text[:index], text[index:]
else:
return text, ""
class Parser:
"""Parser Base."""
def __init__(self):
self.statement_nodes = STATEMENT_NODES
self.keywords = KEYWORDS
def parse(self, text, name="<template>"):
self.text = text
self.name = name
defwith, text = self.read_defwith(text)
suite = self.read_suite(text)
return DefwithNode(defwith, suite)
def read_defwith(self, text):
if text.startswith("$def with"):
defwith, text = splitline(text)
defwith = defwith[1:].strip() # strip $ and spaces
return defwith, text
else:
return "", text
def read_section(self, text):
r"""Reads one section from the given text.
section -> block | assignment | line
>>> read_section = Parser().read_section
>>> read_section('foo\nbar\n')
(<line: [t'foo\n']>, 'bar\n')
>>> read_section('$ a = b + 1\nfoo\n')
(<assignment: 'a = b + 1'>, 'foo\n')
read_section('$for in range(10):\n hello $i\nfoo)
"""
if text.lstrip(" ").startswith("$"):
index = text.index("$")
begin_indent, text2 = text[:index], text[index + 1 :]
ahead = self.python_lookahead(text2)
if ahead == "var":
return self.read_var(text2)
elif ahead in self.statement_nodes:
return self.read_block_section(text2, begin_indent)
elif ahead in self.keywords:
return self.read_keyword(text2)
elif ahead.strip() == "":
# assignments starts with a space after $
# ex: $ a = b + 2
return self.read_assignment(text2)
return self.readline(text)
def read_var(self, text):
r"""Reads a var statement.
>>> read_var = Parser().read_var
>>> read_var('var x=10\nfoo')
(<var: x = 10>, 'foo')
>>> read_var('var x: hello $name\nfoo')
(<var: x = join_(u'hello ', escape_(name, True))>, 'foo')
"""
line, text = splitline(text)
tokens = self.python_tokens(line)
if len(tokens) < 4:
raise SyntaxError("Invalid var statement")
name = tokens[1]
sep = tokens[2]
value = line.split(sep, 1)[1].strip()
if sep == "=":
pass # no need to process value
elif sep == ":":
# @@ Hack for backward-compatability
if tokens[3] == "\n": # multi-line var statement
block, text = self.read_indented_block(text, " ")
lines = [self.readline(x)[0] for x in block.splitlines()]
nodes = []
for x in lines:
nodes.extend(x.nodes)
nodes.append(TextNode("\n"))
else: # single-line var statement
linenode, _ = self.readline(value)
nodes = linenode.nodes
parts = [node.emit("") for node in nodes]
value = "join_(%s)" % ", ".join(parts)
else:
raise SyntaxError("Invalid var statement")
return VarNode(name, value), text
def read_suite(self, text):
r"""Reads section by section till end of text.
>>> read_suite = Parser().read_suite
>>> read_suite('hello $name\nfoo\n')
[<line: [t'hello ', $name, t'\n']>, <line: [t'foo\n']>]
"""
sections = []
while text:
section, text = self.read_section(text)
sections.append(section)
return SuiteNode(sections)
def readline(self, text):
r"""Reads one line from the text. Newline is suppressed if the line ends with \.
>>> readline = Parser().readline
>>> readline('hello $name!\nbye!')
(<line: [t'hello ', $name, t'!\n']>, 'bye!')
>>> readline('hello $name!\\\nbye!')
(<line: [t'hello ', $name, t'!']>, 'bye!')
>>> readline('$f()\n\n')
(<line: [$f(), t'\n']>, '\n')
"""
line, text = splitline(text)
# suppress new line if line ends with \
if line.endswith("\\\n"):
line = line[:-2]
nodes = []
while line:
node, line = self.read_node(line)
nodes.append(node)
return LineNode(nodes), text
def read_node(self, text):
r"""Reads a node from the given text and returns the node and remaining text.
>>> read_node = Parser().read_node
>>> read_node('hello $name')
(t'hello ', '$name')
>>> read_node('$name')
($name, '')
"""
if text.startswith("$$"):
return TextNode("$"), text[2:]
elif text.startswith("$#"): # comment
line, text = splitline(text)
return TextNode("\n"), text
elif text.startswith("$"):
text = text[1:] # strip $
if text.startswith(":"):
escape = False
text = text[1:] # strip :
else:
escape = True
return self.read_expr(text, escape=escape)
else:
return self.read_text(text)
def read_text(self, text):
r"""Reads a text node from the given text.
>>> read_text = Parser().read_text
>>> read_text('hello $name')
(t'hello ', '$name')
"""
index = text.find("$")
if index < 0:
return TextNode(text), ""
else:
return TextNode(text[:index]), text[index:]
def read_keyword(self, text):
line, text = splitline(text)
return StatementNode(line.strip() + "\n"), text
def read_expr(self, text, escape=True):
"""Reads a python expression from the text and returns the expression and remaining text.
expr -> simple_expr | paren_expr
simple_expr -> id extended_expr
extended_expr -> attr_access | paren_expr extended_expr | ''
attr_access -> dot id extended_expr
paren_expr -> [ tokens ] | ( tokens ) | { tokens }
>>> read_expr = Parser().read_expr
>>> read_expr("name")
($name, '')
>>> read_expr("a.b and c")
($a.b, ' and c')
>>> read_expr("a. b")
($a, '. b')
>>> read_expr("name</h1>")
($name, '</h1>')
>>> read_expr("(limit)ing")
($(limit), 'ing')
>>> read_expr('a[1, 2][:3].f(1+2, "weird string[).", 3 + 4) done.')
($a[1, 2][:3].f(1+2, "weird string[).", 3 + 4), ' done.')
"""
def simple_expr():
identifier()
extended_expr()
def identifier():
next(tokens)
def extended_expr():
lookahead = tokens.lookahead()
if lookahead is None:
return
elif lookahead.value == ".":
attr_access()
elif lookahead.value in parens:
paren_expr()
extended_expr()
else:
return
def attr_access():
from token import NAME # python token constants
if tokens.lookahead2().type == NAME:
next(tokens) # consume dot
identifier()
extended_expr()
def paren_expr():
begin = next(tokens).value
end = parens[begin]
while True:
if tokens.lookahead().value in parens:
paren_expr()
else:
t = next(tokens)
if t.value == end:
break
return
parens = {"(": ")", "[": "]", "{": "}"}
def get_tokens(text):
"""tokenize text using python tokenizer.
Python tokenizer ignores spaces, but they might be important in some cases.
This function introduces dummy space tokens when it identifies any ignored space.
Each token is a storage object containing type, value, begin and end.
"""
i = iter([text])
readline = lambda: next(i)
end = None
for t in tokenize.generate_tokens(readline):
t = storage(type=t[0], value=t[1], begin=t[2], end=t[3])
if end is not None and end != t.begin:
_, x1 = end
_, x2 = t.begin
yield storage(type=-1, value=text[x1:x2], begin=end, end=t.begin)
end = t.end
yield t
class BetterIter:
"""Iterator like object with 2 support for 2 look aheads."""
def __init__(self, items):
self.iteritems = iter(items)
self.items = []
self.position = 0
self.current_item = None
def lookahead(self):
if len(self.items) <= self.position:
self.items.append(self._next())
return self.items[self.position]
def _next(self):
try:
return next(self.iteritems)
except StopIteration:
return None
def lookahead2(self):
if len(self.items) <= self.position + 1:
self.items.append(self._next())
return self.items[self.position + 1]
def __next__(self):
self.current_item = self.lookahead()
self.position += 1
return self.current_item
tokens = BetterIter(get_tokens(text))
if tokens.lookahead().value in parens:
paren_expr()
else:
simple_expr()
row, col = tokens.current_item.end
return ExpressionNode(text[:col], escape=escape), text[col:]
def read_assignment(self, text):
r"""Reads assignment statement from text.
>>> read_assignment = Parser().read_assignment
>>> read_assignment('a = b + 1\nfoo')
(<assignment: 'a = b + 1'>, 'foo')
"""
line, text = splitline(text)
return AssignmentNode(line.strip()), text
def python_lookahead(self, text):
"""Returns the first python token from the given text.
>>> python_lookahead = Parser().python_lookahead
>>> python_lookahead('for i in range(10):')
'for'
>>> python_lookahead('else:')
'else'
>>> python_lookahead(' x = 1')
' '
"""
i = iter([text])
readline = lambda: next(i)
tokens = tokenize.generate_tokens(readline)
return next(tokens)[1]
def python_tokens(self, text):
i = iter([text])
readline = lambda: next(i)
tokens = tokenize.generate_tokens(readline)
return [t[1] for t in tokens]
def read_indented_block(self, text, indent):
r"""Read a block of text. A block is what typically follows a for or it statement.
It can be in the same line as that of the statement or an indented block.
>>> read_indented_block = Parser().read_indented_block
>>> read_indented_block(' a\n b\nc', ' ')
('a\nb\n', 'c')
>>> read_indented_block(' a\n b\n c\nd', ' ')
('a\n b\nc\n', 'd')
>>> read_indented_block(' a\n\n b\nc', ' ')
('a\n\n b\n', 'c')
"""
if indent == "":
return "", text
block = ""
while text:
line, text2 = splitline(text)
if line.strip() == "":
block += "\n"
elif line.startswith(indent):
block += line[len(indent) :]
else:
break
text = text2
return block, text
def read_statement(self, text):
r"""Reads a python statement.
>>> read_statement = Parser().read_statement
>>> read_statement('for i in range(10): hello $name')
('for i in range(10):', ' hello $name')
"""
tok = PythonTokenizer(text)
tok.consume_till(":")
return text[: tok.index], text[tok.index :]
def read_block_section(self, text, begin_indent=""):
r"""
>>> read_block_section = Parser().read_block_section
>>> read_block_section('for i in range(10): hello $i\nfoo')
(<block: 'for i in range(10):', [<line: [t'hello ', $i, t'\n']>]>, 'foo')
>>> read_block_section('for i in range(10):\n hello $i\n foo', begin_indent=' ')
(<block: 'for i in range(10):', [<line: [t'hello ', $i, t'\n']>]>, ' foo')
>>> read_block_section('for i in range(10):\n hello $i\nfoo')
(<block: 'for i in range(10):', [<line: [t'hello ', $i, t'\n']>]>, 'foo')
With inline comment:
>>> read_block_section('for i in range(10): $# inline comment\n hello $i\nfoo')
(<block: 'for i in range(10):', []>, ' hello $i\nfoo')
"""
line, text = splitline(text)
stmt, line = self.read_statement(line)
keyword = self.python_lookahead(stmt)
# if there is some thing left in the line
if line.strip() and not line.lstrip().startswith("$#"):
block = line.lstrip()
else:
def find_indent(text):
rx = re_compile(" +")
match = rx.match(text)
first_indent = match and match.group(0)
return first_indent or ""
# find the indentation of the block by looking at the first line
first_indent = find_indent(text)[len(begin_indent) :]
# TODO: fix this special case
if keyword == "code":
indent = begin_indent + first_indent
else:
indent = begin_indent + min(first_indent, INDENT)
block, text = self.read_indented_block(text, indent)
return self.create_block_node(keyword, stmt, block, begin_indent), text
def create_block_node(self, keyword, stmt, block, begin_indent):
if keyword in self.statement_nodes:
return self.statement_nodes[keyword](stmt, block, begin_indent)
else:
raise ParseError("Unknown statement: %s" % repr(keyword))
class PythonTokenizer:
"""Utility wrapper over python tokenizer."""
def __init__(self, text):
self.text = text
i = iter([text])
readline = lambda: next(i)
self.tokens = tokenize.generate_tokens(readline)
self.index = 0
def consume_till(self, delim):
"""Consumes tokens till colon.
>>> tok = PythonTokenizer('for i in range(10): hello $i')
>>> tok.consume_till(':')
>>> tok.text[:tok.index]
'for i in range(10):'
>>> tok.text[tok.index:]
' hello $i'
"""
try:
while True:
t = next(self)
if t.value == delim:
break
elif t.value == "(":
self.consume_till(")")
elif t.value == "[":
self.consume_till("]")
elif t.value == "{":
self.consume_till("}")
# if end of line is found, it is an exception.
# Since there is no easy way to report the line number,
# leave the error reporting to the python parser later
# @@ This should be fixed.
if t.value == "\n":
break
except:
# raise ParseError, "Expected %s, found end of line." % repr(delim)
# raising ParseError doesn't show the line number.
# if this error is ignored, then it will be caught when compiling the python code.
return
def __next__(self):
type, t, begin, end, line = next(self.tokens)
row, col = end
self.index = col
return storage(type=type, value=t, begin=begin, end=end)
class DefwithNode:
def __init__(self, defwith, suite):
if defwith:
self.defwith = defwith.replace("with", "__template__") + ":"
# offset 4 lines. for encoding, __lineoffset__, loop and self.
self.defwith += "\n __lineoffset__ = -4"
else:
self.defwith = "def __template__():"
# offset 4 lines for encoding, __template__, __lineoffset__, loop and self.
self.defwith += "\n __lineoffset__ = -5"
self.defwith += "\n loop = ForLoop()"
self.defwith += "\n self = TemplateResult(); extend_ = self.extend"
self.suite = suite
self.end = "\n return self"
def emit(self, indent):
encoding = "# coding: utf-8\n"
return encoding + self.defwith + self.suite.emit(indent + INDENT) + self.end
def __repr__(self):
return "<defwith: %s, %s>" % (self.defwith, self.suite)
class TextNode:
def __init__(self, value):
self.value = value
def emit(self, indent, begin_indent=""):
return repr(safeunicode(self.value))
def __repr__(self):
return "t" + repr(self.value)
class ExpressionNode:
def __init__(self, value, escape=True):
self.value = value.strip()
# convert ${...} to $(...)
if value.startswith("{") and value.endswith("}"):
self.value = "(" + self.value[1:-1] + ")"
self.escape = escape
def emit(self, indent, begin_indent=""):
return "escape_(%s, %s)" % (self.value, bool(self.escape))
def __repr__(self):
if self.escape:
escape = ""
else:
escape = ":"
return "$%s%s" % (escape, self.value)
class AssignmentNode:
def __init__(self, code):
self.code = code
def emit(self, indent, begin_indent=""):
return indent + self.code + "\n"
def __repr__(self):
return "<assignment: %s>" % repr(self.code)
class LineNode:
def __init__(self, nodes):
self.nodes = nodes
def emit(self, indent, text_indent="", name=""):
text = [node.emit("") for node in self.nodes]
if text_indent:
text = [repr(text_indent)] + text
return indent + "extend_([%s])\n" % ", ".join(text)
def __repr__(self):
return "<line: %s>" % repr(self.nodes)
INDENT = " " # 4 spaces
class BlockNode:
def __init__(self, stmt, block, begin_indent=""):
self.stmt = stmt
self.suite = Parser().read_suite(block)
self.begin_indent = begin_indent
def emit(self, indent, text_indent=""):
text_indent = self.begin_indent + text_indent
out = indent + self.stmt + self.suite.emit(indent + INDENT, text_indent)
return out
def __repr__(self):
return "<block: %s, %s>" % (repr(self.stmt), repr(self.suite))
class ForNode(BlockNode):
def __init__(self, stmt, block, begin_indent=""):
self.original_stmt = stmt
tok = PythonTokenizer(stmt)
tok.consume_till("in")
a = stmt[: tok.index] # for i in
b = stmt[tok.index : -1] # rest of for stmt excluding :
stmt = a + " loop.setup(" + b.strip() + "):"
BlockNode.__init__(self, stmt, block, begin_indent)
def __repr__(self):
return "<block: %s, %s>" % (repr(self.original_stmt), repr(self.suite))
class CodeNode:
def __init__(self, stmt, block, begin_indent=""):
# compensate one line for $code:
self.code = "\n" + block
def emit(self, indent, text_indent=""):
import re
rx = re.compile("^", re.M)
return rx.sub(indent, self.code).rstrip(" ")
def __repr__(self):
return "<code: %s>" % repr(self.code)
class StatementNode:
def __init__(self, stmt):
self.stmt = stmt
def emit(self, indent, begin_indent=""):
return indent + self.stmt
def __repr__(self):
return "<stmt: %s>" % repr(self.stmt)
class IfNode(BlockNode):
pass
class ElseNode(BlockNode):
pass
class ElifNode(BlockNode):
pass
class DefNode(BlockNode):
def __init__(self, *a, **kw):
BlockNode.__init__(self, *a, **kw)
code = CodeNode("", "")
code.code = "self = TemplateResult(); extend_ = self.extend\n"
self.suite.sections.insert(0, code)
code = CodeNode("", "")
code.code = "return self\n"
self.suite.sections.append(code)
def emit(self, indent, text_indent=""):
text_indent = self.begin_indent + text_indent
out = indent + self.stmt + self.suite.emit(indent + INDENT, text_indent)
return indent + "__lineoffset__ -= 3\n" + out
class VarNode:
def __init__(self, name, value):
self.name = name
self.value = value
def emit(self, indent, text_indent):
return indent + "self[%s] = %s\n" % (repr(self.name), self.value)
def __repr__(self):
return "<var: %s = %s>" % (self.name, self.value)
class SuiteNode:
"""Suite is a list of sections."""
def __init__(self, sections):
self.sections = sections
def emit(self, indent, text_indent=""):
return "\n" + "".join([s.emit(indent, text_indent) for s in self.sections])
def __repr__(self):
return repr(self.sections)
STATEMENT_NODES = {
"for": ForNode,
"while": BlockNode,
"if": IfNode,
"elif": ElifNode,
"else": ElseNode,
"def": DefNode,
"code": CodeNode,
}
KEYWORDS = ["pass", "break", "continue", "return"]
TEMPLATE_BUILTIN_NAMES = [
"dict",
"enumerate",
"float",
"int",
"bool",
"list",
"long",
"reversed",
"set",
"slice",
"tuple",
"xrange",
"abs",
"all",
"any",
"callable",
"chr",
"cmp",
"divmod",
"filter",
"hex",
"id",
"isinstance",
"iter",
"len",
"max",
"min",
"oct",
"ord",
"pow",
"range",
"round",
"True",
"False",
"None",
"__import__", # some c-libraries like datetime requires __import__ to present in the namespace
]
TEMPLATE_BUILTINS = dict(
[
(name, getattr(builtins, name))
for name in TEMPLATE_BUILTIN_NAMES
if name in builtins.__dict__
]
)
class ForLoop:
"""
Wrapper for expression in for stament to support loop.xxx helpers.
>>> loop = ForLoop()
>>> for x in loop.setup(['a', 'b', 'c']):
... print(loop.index, loop.revindex, loop.parity, x)
...
1 3 odd a
2 2 even b
3 1 odd c
>>> loop.index
Traceback (most recent call last):
...
AttributeError: index
"""
def __init__(self):
self._ctx = None
def __getattr__(self, name):
if self._ctx is None:
raise AttributeError(name)
else:
return getattr(self._ctx, name)
def setup(self, seq):
self._push()
return self._ctx.setup(seq)
def _push(self):
self._ctx = ForLoopContext(self, self._ctx)
def _pop(self):
self._ctx = self._ctx.parent
class ForLoopContext:
"""Stackable context for ForLoop to support nested for loops."""
def __init__(self, forloop, parent):
self._forloop = forloop
self.parent = parent
def setup(self, seq):
try:
self.length = len(seq)
except:
self.length = 0
self.index = 0
for a in seq:
self.index += 1
yield a
self._forloop._pop()
index0 = property(lambda self: self.index - 1)
first = property(lambda self: self.index == 1)
last = property(lambda self: self.index == self.length)
odd = property(lambda self: self.index % 2 == 1)
even = property(lambda self: self.index % 2 == 0)
parity = property(lambda self: ["odd", "even"][self.even])
revindex0 = property(lambda self: self.length - self.index)
revindex = property(lambda self: self.length - self.index + 1)
class BaseTemplate:
def __init__(self, code, filename, filter, globals, builtins):
self.filename = filename
self.filter = filter
self._globals = globals
self._builtins = builtins
if code:
self.t = self._compile(code)
else:
self.t = lambda: ""
def _compile(self, code):
env = self.make_env(self._globals or {}, self._builtins)
exec(code, env)
# __template__ is a global function declared when executing "code"
return env["__template__"]
def __call__(self, *a, **kw):
__hidetraceback__ = True # noqa: F841
return self.t(*a, **kw)
def make_env(self, globals, builtins):
return dict(
globals,
__builtins__=builtins,
ForLoop=ForLoop,
TemplateResult=TemplateResult,
escape_=self._escape,
join_=self._join,
)
def _join(self, *items):
return u"".join(items)
def _escape(self, value, escape=False):
if value is None:
value = ""
value = safeunicode(value)
if escape and self.filter:
value = self.filter(value)
return value
class Template(BaseTemplate):
CONTENT_TYPES = {
".html": "text/html; charset=utf-8",
".xhtml": "application/xhtml+xml; charset=utf-8",
".txt": "text/plain",
}
FILTERS = {".html": websafe, ".xhtml": websafe, ".xml": websafe}
globals = {}
def __init__(
self,
text,
filename="<template>",
filter=None,
globals=None,
builtins=None,
extensions=None,
):
self.extensions = extensions or []
text = Template.normalize_text(text)
code = self.compile_template(text, filename)
_, ext = os.path.splitext(filename)
filter = filter or self.FILTERS.get(ext, None)
self.content_type = self.CONTENT_TYPES.get(ext, None)
if globals is None:
globals = self.globals
if builtins is None:
builtins = TEMPLATE_BUILTINS
BaseTemplate.__init__(
self,
code=code,
filename=filename,
filter=filter,
globals=globals,
builtins=builtins,
)
def __repr__(self):
"""
>>> Template(text='Template text', filename='burndown_chart.html')
<Template burndown_chart.html>
"""
return "<{} {}>".format(self.__class__.__name__, self.filename)
def normalize_text(text):
"""Normalizes template text by correcting \r\n, tabs and BOM chars."""
text = text.replace("\r\n", "\n").replace("\r", "\n").expandtabs()
if not text.endswith("\n"):
text += "\n"
# ignore BOM chars at the beginning of template
BOM = "\xef\xbb\xbf"
if isinstance(text, str) and text.startswith(BOM):
text = text[len(BOM) :]
# support fort \$ for backward-compatibility
text = text.replace(r"\$", "$$")
return text
normalize_text = staticmethod(normalize_text)
def __call__(self, *a, **kw):
__hidetraceback__ = True # noqa: F841
from . import webapi as web
if "headers" in web.ctx and self.content_type:
web.header("Content-Type", self.content_type, unique=True)
return BaseTemplate.__call__(self, *a, **kw)
def generate_code(text, filename, parser=None):
# parse the text
parser = parser or Parser()
rootnode = parser.parse(text, filename)
# generate python code from the parse tree
code = rootnode.emit(indent="").strip()
return safestr(code)
generate_code = staticmethod(generate_code)
def create_parser(self):
p = Parser()
for ext in self.extensions:
p = ext(p)
return p
def compile_template(self, template_string, filename):
code = Template.generate_code(
template_string, filename, parser=self.create_parser()
)
def get_source_line(filename, lineno):
try:
lines = open(filename, encoding="utf-8").read().splitlines()
return lines[lineno]
except:
return None
try:
# compile the code first to report the errors, if any, with the filename
compiled_code = compile(code, filename, "exec")
except SyntaxError as err:
# display template line that caused the error along with the traceback.
err.msg += "\n\nTemplate traceback:\n File %s, line %s\n %s" % (
repr(err.filename),
err.lineno,
get_source_line(err.filename, err.lineno - 1),
)
raise
# make sure code is safe
ast_node = ast.parse(code, filename)
SafeVisitor().walk(ast_node, filename)
return compiled_code
class CompiledTemplate(Template):
def __init__(self, f, filename):
Template.__init__(self, "", filename)
self.t = f
def compile_template(self, *a):
return None
def _compile(self, *a):
return None
class Render:
"""The most preferred way of using templates.
render = web.template.render('templates')
print render.foo()
Optional parameter can be `base` can be used to pass output of
every template through the base template.
render = web.template.render('templates', base='layout')
"""
def __init__(self, loc="templates", cache=None, base=None, **keywords):
self._loc = loc
self._keywords = keywords
if cache is None:
cache = not config.get("debug", False)
if cache:
self._cache = {}
else:
self._cache = None
if base and not hasattr(base, "__call__"):
# make base a function, so that it can be passed to sub-renders
self._base = lambda page: self._template(base)(page)
else:
self._base = base
def _add_global(self, obj, name=None):
"""Add a global to this rendering instance."""
if "globals" not in self._keywords:
self._keywords["globals"] = {}
if not name:
name = obj.__name__
self._keywords["globals"][name] = obj
def _lookup(self, name):
path = os.path.join(self._loc, name)
if os.path.isdir(path):
return "dir", path
else:
path = self._findfile(path)
if path:
return "file", path
else:
return "none", None
def _load_template(self, name):
kind, path = self._lookup(name)
if kind == "dir":
return Render(
path, cache=self._cache is not None, base=self._base, **self._keywords
)
elif kind == "file":
with open(path, encoding="utf-8") as tmpl_file:
return Template(tmpl_file.read(), filename=path, **self._keywords)
else:
raise AttributeError("No template named " + name)
def _findfile(self, path_prefix):
p = [
f for f in glob.glob(path_prefix + ".*") if not f.endswith("~")
] # skip backup files
p.sort() # sort the matches for deterministic order
# support templates without extension (#364)
# When no templates are found and a file is found with the exact name, use it.
if not p and os.path.exists(path_prefix):
p = [path_prefix]
return p and p[0]
def _template(self, name):
if self._cache is not None:
if name not in self._cache:
self._cache[name] = self._load_template(name)
return self._cache[name]
else:
return self._load_template(name)
def __getattr__(self, name):
t = self._template(name)
if self._base and isinstance(t, Template):
def template(*a, **kw):
return self._base(t(*a, **kw))
return template
else:
return self._template(name)
class GAE_Render(Render):
# Render gets over-written. make a copy here.
super = Render
def __init__(self, loc, *a, **kw):
GAE_Render.super.__init__(self, loc, *a, **kw)
import types
if isinstance(loc, types.ModuleType):
self.mod = loc
else:
name = loc.rstrip("/").replace("/", ".")
self.mod = __import__(name, None, None, ["x"])
self.mod.__dict__.update(kw.get("builtins", TEMPLATE_BUILTINS))
self.mod.__dict__.update(Template.globals)
self.mod.__dict__.update(kw.get("globals", {}))
def _load_template(self, name):
t = getattr(self.mod, name)
import types
if isinstance(t, types.ModuleType):
return GAE_Render(
t, cache=self._cache is not None, base=self._base, **self._keywords
)
else:
return t
render = Render
# setup render for Google App Engine.
try:
from google import appengine # noqa: F401
render = Render = GAE_Render
except ImportError:
pass
def frender(path, **keywords):
"""Creates a template from the given file path."""
return Template(open(path, encoding="utf-8").read(), filename=path, **keywords)
def compile_templates(root):
"""Compiles templates to python code."""
for dirpath, dirnames, filenames in os.walk(root):
filenames = [
f
for f in filenames
if not f.startswith(".")
and not f.endswith("~")
and not f.startswith("__init__.py")
]
for d in dirnames[:]:
if d.startswith("."):
dirnames.remove(d) # don't visit this dir
out = open(os.path.join(dirpath, "__init__.py"), "w", encoding="utf-8")
out.write(
"from web.template import CompiledTemplate, ForLoop, TemplateResult\n\n"
)
if dirnames:
out.write("import " + ", ".join(dirnames))
out.write("\n")
for f in filenames:
path = os.path.join(dirpath, f)
if "." in f:
name, _ = f.split(".", 1)
else:
name = f
text = open(path, encoding="utf-8").read()
text = Template.normalize_text(text)
code = Template.generate_code(text, path)
code = code.replace("__template__", name, 1)
out.write(code)
out.write("\n\n")
out.write("%s = CompiledTemplate(%s, %s)\n" % (name, name, repr(path)))
out.write("join_ = %s._join; escape_ = %s._escape\n\n" % (name, name))
# create template to make sure it compiles
Template(open(path, encoding="utf-8").read(), path)
out.close()
class ParseError(Exception):
pass
class SecurityError(Exception):
"""The template seems to be trying to do something naughty."""
pass
ALLOWED_AST_NODES = [
"Add",
"And",
"Assign",
"Attribute",
"AugAssign",
"AugLoad",
"AugStore",
"BinOp",
"BitAnd",
"BitOr",
"BitXor",
"BoolOp",
"Break",
"Call",
"ClassDef",
"Compare",
"Constant",
"Continue",
"Del",
"Delete",
"Dict",
"DictComp",
"Div",
"Ellipsis",
"Eq",
"ExceptHandler",
"Expr",
"Expression",
"ExtSlice",
"FloorDiv",
"For",
"FunctionDef",
"GeneratorExp",
"Gt",
"GtE",
"If",
"IfExp",
"In",
"Index",
"Interactive",
"Invert",
"Is",
"IsNot",
"JoinedStr",
"LShift",
"Lambda",
"List",
"ListComp",
"Load",
"Lt",
"LtE",
"Mod",
"Module",
"Mult",
"Name",
"NameConstant",
"Not",
"NotEq",
"NotIn",
"Num",
"Or",
"Param",
"Pass",
"Pow",
"RShift",
"Return",
"Set",
"SetComp",
"Slice",
"Store",
"Str",
"Sub",
"Subscript",
"Suite",
"Tuple",
"UAdd",
"USub",
"UnaryOp",
"While",
"With",
"Yield",
"alias",
"arg",
"arguments",
"comprehension",
"keyword",
]
# Assert Exec Global Import ImportFrom Print Raise Repr TryExcept TryFinally
class SafeVisitor(ast.NodeVisitor):
"""
Make sure code is safe by walking through the AST.
Code considered unsafe if:
* it has restricted AST nodes (only nodes defined in ALLOWED_AST_NODES are allowed)
* it is trying to assign to attributes
* it is trying to access resricted attributes
Adopted from http://www.zafar.se/bkz/uploads/safe.txt (public domain, Babar K. Zafar)
* Using ast rather than compiler tree, for jython and Py3 support since Py2.6
* Simplified with ast.NodeVisitor class
"""
def __init__(self, *args, **kwargs):
"Initialize visitor by generating callbacks for all AST node types."
super(SafeVisitor, self).__init__(*args, **kwargs)
self.errors = []
def walk(self, tree, filename):
"Validate each node in AST and raise SecurityError if the code is not safe."
self.filename = filename
self.visit(tree)
if self.errors:
raise SecurityError("\n".join([str(err) for err in self.errors]))
def generic_visit(self, node):
nodename = type(node).__name__
if nodename not in ALLOWED_AST_NODES:
self.fail_name(node, nodename)
super(SafeVisitor, self).generic_visit(node)
def visit_Attribute(self, node):
attrname = self.get_node_attr(node)
if self.is_unallowed_attr(attrname):
self.fail_attribute(node, attrname)
super(SafeVisitor, self).generic_visit(node)
def visit_Assign(self, node):
self.check_assign_targets(node)
def visit_AugAssign(self, node):
self.check_assign_target(node)
def check_assign_targets(self, node):
for target in node.targets:
self.check_assign_target(target)
super(SafeVisitor, self).generic_visit(node)
def check_assign_target(self, targetnode):
targetname = type(targetnode).__name__
if targetname == "Attribute":
attrname = self.get_node_attr(targetnode)
self.fail_attribute(targetnode, attrname)
# failure modes
def fail_name(self, node, nodename):
lineno = self.get_node_lineno(node)
e = SecurityError(
"%s:%d - execution of '%s' statements is denied"
% (self.filename, lineno, nodename)
)
self.errors.append(e)
def fail_attribute(self, node, attrname):
lineno = self.get_node_lineno(node)
e = SecurityError(
"%s:%d - access to attribute '%s' is denied"
% (self.filename, lineno, attrname)
)
self.errors.append(e)
# helpers
def is_unallowed_attr(self, name):
return (
name.startswith("_") or name.startswith("func_") or name.startswith("im_")
)
def get_node_attr(self, node):
return "attr" in node._fields and node.attr or None
def get_node_lineno(self, node):
return (node.lineno) and node.lineno or 0
class TemplateResult(MutableMapping):
"""Dictionary like object for storing template output.
The result of a template execution is usually a string, but sometimes it
contains attributes set using $var. This class provides a simple
dictionary like interface for storing the output of the template and the
attributes. The output is stored with a special key __body__. Converting
the TemplateResult to string or unicode returns the value of __body__.
When the template is in execution, the output is generated part by part
and those parts are combined at the end. Parts are added to the
TemplateResult by calling the `extend` method and the parts are combined
seamlessly when __body__ is accessed.
>>> d = TemplateResult(__body__='hello, world', x='foo')
>>> print(d)
hello, world
>>> d.x
'foo'
>>> d = TemplateResult()
>>> d.extend([u'hello', u'world'])
>>> d
<TemplateResult: {'__body__': u'helloworld'}>
"""
def __init__(self, *a, **kw):
self.__dict__["_d"] = dict(*a, **kw)
self._d.setdefault("__body__", u"")
self.__dict__["_parts"] = []
self.__dict__["extend"] = self._parts.extend
self._d.setdefault("__body__", None)
def keys(self):
return self._d.keys()
def _prepare_body(self):
"""Prepare value of __body__ by joining parts."""
if self._parts:
value = u"".join(self._parts)
self._parts[:] = []
body = self._d.get("__body__")
if body:
self._d["__body__"] = body + value
else:
self._d["__body__"] = value
def __getitem__(self, name):
if name == "__body__":
self._prepare_body()
return self._d[name]
def __setitem__(self, name, value):
if name == "__body__":
self._prepare_body()
return self._d.__setitem__(name, value)
def __delitem__(self, name):
if name == "__body__":
self._prepare_body()
return self._d.__delitem__(name)
def __getattr__(self, key):
try:
return self[key]
except KeyError as k:
raise AttributeError(k)
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
try:
del self[key]
except KeyError as k:
raise AttributeError(k)
def __unicode__(self):
self._prepare_body()
return self["__body__"]
def __str__(self):
self._prepare_body()
return self["__body__"]
def __repr__(self):
self._prepare_body()
return "<TemplateResult: %s>" % self._d
def __len__(self):
return self._d.__len__()
def __iter__(self):
for i in self._d.__iter__():
if i == "__body__":
self._prepare_body()
yield i
def test():
r"""Doctest for testing template module.
Define a utility function to run template test.
>>> class TestResult:
... def __init__(self, t): self.t = t
... def __getattr__(self, name): return getattr(self.t, name)
... def __repr__(self): return str(self.t)
...
>>> def t(code, **keywords):
... tmpl = Template(code, **keywords)
... return lambda *a, **kw: TestResult(tmpl(*a, **kw))
...
Simple tests.
>>> t('1')()
u'1\n'
>>> t('$def with ()\n1')()
u'1\n'
>>> t('$def with (a)\n$a')(1)
u'1\n'
>>> t('$def with (a=0)\n$a')(1)
u'1\n'
>>> t('$def with (a=0)\n$a')(a=1)
u'1\n'
Test complicated expressions.
>>> t('$def with (x)\n$x.upper()')('hello')
u'HELLO\n'
>>> t('$(2 * 3 + 4 * 5)')()
u'26\n'
>>> t('${2 * 3 + 4 * 5}')()
u'26\n'
>>> t('$def with (limit)\nkeep $(limit)ing.')('go')
u'keep going.\n'
>>> t('$def with (a)\n$a.b[0]')(storage(b=[1]))
u'1\n'
Test html escaping.
>>> t('$def with (x)\n$x', filename='a.html')('<html>')
u'<html>\n'
>>> t('$def with (x)\n$x', filename='a.txt')('<html>')
u'<html>\n'
Test if, for and while.
>>> t('$if 1: 1')()
u'1\n'
>>> t('$if 1:\n 1')()
u'1\n'
>>> t('$if 1:\n 1\\')()
u'1'
>>> t('$if 0: 0\n$elif 1: 1')()
u'1\n'
>>> t('$if 0: 0\n$elif None: 0\n$else: 1')()
u'1\n'
>>> t('$if 0 < 1 and 1 < 2: 1')()
u'1\n'
>>> t('$for x in [1, 2, 3]: $x')()
u'1\n2\n3\n'
>>> t('$def with (d)\n$for k, v in d.items(): $k')({1: 1})
u'1\n'
>>> t('$for x in [1, 2, 3]:\n\t$x')()
u' 1\n 2\n 3\n'
>>> t('$def with (a)\n$while a and a.pop():1')([1, 2, 3])
u'1\n1\n1\n'
The space after : must be ignored.
>>> t('$if True: foo')()
u'foo\n'
Test loop.xxx.
>>> t("$for i in range(5):$loop.index, $loop.parity")()
u'1, odd\n2, even\n3, odd\n4, even\n5, odd\n'
>>> t("$for i in range(2):\n $for j in range(2):$loop.parent.parity $loop.parity")()
u'odd odd\nodd even\neven odd\neven even\n'
Test assignment.
>>> t('$ a = 1\n$a')()
u'1\n'
>>> t('$ a = [1]\n$a[0]')()
u'1\n'
>>> t('$ a = {1: 1}\n$list(a.keys())[0]')()
u'1\n'
>>> t('$ a = []\n$if not a: 1')()
u'1\n'
>>> t('$ a = {}\n$if not a: 1')()
u'1\n'
>>> t('$ a = -1\n$a')()
u'-1\n'
>>> t('$ a = "1"\n$a')()
u'1\n'
Test comments.
>>> t('$# 0')()
u'\n'
>>> t('hello$#comment1\nhello$#comment2')()
u'hello\nhello\n'
>>> t('$#comment0\nhello$#comment1\nhello$#comment2')()
u'\nhello\nhello\n'
Test unicode.
>>> t('$def with (a)\n$a')(u'\u203d')
u'\u203d\n'
>>> t(u'$def with (a)\n$a $:a')(u'\u203d')
u'\u203d \u203d\n'
>>> t(u'$def with ()\nfoo')()
u'foo\n'
>>> def f(x): return x
...
>>> t(u'$def with (f)\n$:f("x")')(f)
u'x\n'
>>> t('$def with (f)\n$:f("x")')(f)
u'x\n'
Test dollar escaping.
>>> t("Stop, $$money isn't evaluated.")()
u"Stop, $money isn't evaluated.\n"
>>> t("Stop, \$money isn't evaluated.")()
u"Stop, $money isn't evaluated.\n"
Test space sensitivity.
>>> t('$def with (x)\n$x')(1)
u'1\n'
>>> t('$def with(x ,y)\n$x')(1, 1)
u'1\n'
>>> t('$(1 + 2*3 + 4)')()
u'11\n'
Make sure globals are working.
>>> t('$x')()
Traceback (most recent call last):
...
NameError: global name 'x' is not defined
>>> t('$x', globals={'x': 1})()
u'1\n'
Can't change globals.
>>> t('$ x = 2\n$x', globals={'x': 1})()
u'2\n'
>>> t('$ x = x + 1\n$x', globals={'x': 1})()
Traceback (most recent call last):
...
UnboundLocalError: local variable 'x' referenced before assignment
Make sure builtins are customizable.
>>> t('$min(1, 2)')()
u'1\n'
>>> t('$min(1, 2)', builtins={})()
Traceback (most recent call last):
...
NameError: global name 'min' is not defined
Test vars.
>>> x = t('$var x: 1')()
>>> x.x
u'1'
>>> x = t('$var x = 1')()
>>> x.x
1
>>> x = t('$var x: \n foo\n bar')()
>>> x.x
u'foo\nbar\n'
Test BOM chars.
>>> t('\xef\xbb\xbf$def with(x)\n$x')('foo')
u'foo\n'
Test for with weird cases.
>>> t('$for i in range(10)[1:5]:\n $i')()
u'1\n2\n3\n4\n'
>>> t("$for k, v in sorted({'a': 1, 'b': 2}.items()):\n $k $v", globals={'sorted':sorted})()
u'a 1\nb 2\n'
Test for syntax error.
>>> try:
... t("$for k, v in ({'a': 1, 'b': 2}.items():\n $k $v")()
... except SyntaxError:
... print("OK")
... else:
... print("Expected SyntaxError")
...
OK
Test datetime.
>>> import datetime
>>> t("$def with (date)\n$date.strftime('%m %Y')")(datetime.datetime(2009, 1, 1))
u'01 2009\n'
"""
pass
if __name__ == "__main__":
if "--compile" in sys.argv:
compile_templates(sys.argv[2])
else:
import doctest
doctest.testmod()
|
bobintetley/asm3
|
src/web062/template.py
|
Python
|
gpl-3.0
| 49,781
|
[
"VisIt"
] |
a43b4279a394d5b4579dbf043b09120d9dd97ba7d26dd01487a8802ae438f2fb
|
##
# Copyright 2009-2021 Ghent University
# Copyright 2015-2021 Stanford University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for VMD, implemented as an easyblock
@author: Stephane Thiell (Stanford University)
@author: Kenneth Hoste (HPC-UGent)
"""
import os
from distutils.version import LooseVersion
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.easyblocks.generic.pythonpackage import det_pylibdir
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import change_dir, copy_file, extract_file
from easybuild.tools.run import run_cmd
from easybuild.tools.modules import get_software_root, get_software_version
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
class EB_VMD(ConfigureMake):
"""Easyblock for building and installing VMD"""
def __init__(self, *args, **kwargs):
"""Initialize VMD-specific variables."""
super(EB_VMD, self).__init__(*args, **kwargs)
# source tarballs contains a 'plugins' and 'vmd-<version>' directory
self.vmddir = os.path.join(self.builddir, '%s-%s' % (self.name.lower(), self.version))
self.surf_dir = os.path.join(self.vmddir, 'lib', 'surf')
self.stride_dir = os.path.join(self.vmddir, 'lib', 'stride')
def extract_step(self):
"""Custom extract step for VMD."""
super(EB_VMD, self).extract_step()
if LooseVersion(self.version) >= LooseVersion("1.9.3"):
change_dir(self.surf_dir)
srcdir = extract_file('surf.tar.Z', os.getcwd(), change_into_dir=False)
change_dir(srcdir)
def configure_step(self):
"""
Configure VMD for building.
"""
# make sure required dependencies are available
deps = {}
for dep in ['FLTK', 'Mesa', 'netCDF', 'Python', 'Tcl', 'Tk']:
deps[dep] = get_software_root(dep)
if deps[dep] is None:
raise EasyBuildError("Required dependency %s is missing", dep)
# optional dependencies
for dep in ['ACTC', 'CUDA', 'OptiX']:
deps[dep] = get_software_root(dep)
# specify Tcl/Tk locations & libraries
tclinc = os.path.join(deps['Tcl'], 'include')
tcllib = os.path.join(deps['Tcl'], 'lib')
env.setvar('TCL_INCLUDE_DIR', tclinc)
env.setvar('TCL_LIBRARY_DIR', tcllib)
env.setvar('TK_INCLUDE_DIR', os.path.join(deps['Tk'], 'include'))
env.setvar('TK_LIBRARY_DIR', os.path.join(deps['Tk'], 'lib'))
tclshortver = '.'.join(get_software_version('Tcl').split('.')[:2])
self.cfg.update('buildopts', 'TCLLDFLAGS="-ltcl%s"' % tclshortver)
# Netcdf locations
netcdfinc = os.path.join(deps['netCDF'], 'include')
netcdflib = os.path.join(deps['netCDF'], 'lib')
# Python locations
pyver = get_software_version('Python')
pymajver = pyver.split('.')[0]
out, ec = run_cmd("python -c 'import sysconfig; print(sysconfig.get_path(\"include\"))'", simple=False)
if ec:
raise EasyBuildError("Failed to determine Python include path: %s", out)
else:
env.setvar('PYTHON_INCLUDE_DIR', out.strip())
pylibdir = det_pylibdir()
python_libdir = os.path.join(deps['Python'], os.path.dirname(pylibdir))
env.setvar('PYTHON_LIBRARY_DIR', python_libdir)
if LooseVersion(pyver) >= LooseVersion('3.8'):
out, ec = run_cmd("python%s-config --libs --embed" % pymajver, simple=False)
else:
out, ec = run_cmd("python%s-config --libs" % pymajver, simple=False)
if ec:
raise EasyBuildError("Failed to determine Python library name: %s", out)
else:
env.setvar('PYTHON_LIBRARIES', out.strip())
# numpy include location, easiest way to determine it is via numpy.get_include()
out, ec = run_cmd("python -c 'import numpy; print(numpy.get_include())'", simple=False)
if ec:
raise EasyBuildError("Failed to determine numpy include directory: %s", out)
else:
env.setvar('NUMPY_INCLUDE_DIR', out.strip())
# compiler commands
self.cfg.update('buildopts', 'CC="%s"' % os.getenv('CC'))
self.cfg.update('buildopts', 'CCPP="%s"' % os.getenv('CXX'))
# plugins need to be built first (see http://www.ks.uiuc.edu/Research/vmd/doxygen/compiling.html)
change_dir(os.path.join(self.builddir, 'plugins'))
cmd = ' '.join([
'make',
'LINUXAMD64',
"TCLINC='-I%s'" % tclinc,
"TCLLIB='-L%s'" % tcllib,
"TCLLDFLAGS='-ltcl%s'" % tclshortver,
"NETCDFINC='-I%s'" % netcdfinc,
"NETCDFLIB='-L%s'" % netcdflib,
self.cfg['buildopts'],
])
run_cmd(cmd, log_all=True, simple=False)
# create plugins distribution
plugindir = os.path.join(self.vmddir, 'plugins')
env.setvar('PLUGINDIR', plugindir)
self.log.info("Generating VMD plugins in %s", plugindir)
run_cmd("make distrib %s" % self.cfg['buildopts'], log_all=True, simple=False)
# explicitely mention whether or not we're building with CUDA/OptiX support
if deps['CUDA']:
self.log.info("Building with CUDA %s support", get_software_version('CUDA'))
if deps['OptiX']:
self.log.info("Building with Nvidia OptiX %s support", get_software_version('OptiX'))
else:
self.log.warn("Not building with Nvidia OptiX support!")
else:
self.log.warn("Not building with CUDA nor OptiX support!")
# see http://www.ks.uiuc.edu/Research/vmd/doxygen/configure.html
# LINUXAMD64: Linux 64-bit
# LP64: build VMD as 64-bit binary
# IMD: enable support for Interactive Molecular Dynamics (e.g. to connect to NAMD for remote simulations)
# PTHREADS: enable support for POSIX threads
# COLVARS: enable support for collective variables (related to NAMD/LAMMPS)
# NOSILENT: verbose build command
# FLTK: enable the standard FLTK GUI
# TK: enable TK to support extension GUI elements
# OPENGL: enable OpenGL
self.cfg.update(
'configopts', "LINUXAMD64 LP64 IMD PTHREADS COLVARS NOSILENT FLTK TK OPENGL", allow_duplicate=False)
# add additional configopts based on available dependencies
for key in deps:
if deps[key]:
if key == 'Mesa':
self.cfg.update('configopts', "OPENGL MESA", allow_duplicate=False)
elif key == 'OptiX':
self.cfg.update('configopts', "LIBOPTIX", allow_duplicate=False)
elif key == 'Python':
self.cfg.update('configopts', "PYTHON NUMPY", allow_duplicate=False)
else:
self.cfg.update('configopts', key.upper(), allow_duplicate=False)
# configure for building with Intel compilers specifically
if self.toolchain.comp_family() == toolchain.INTELCOMP:
self.cfg.update('configopts', 'ICC', allow_duplicate=False)
# specify install location using environment variables
env.setvar('VMDINSTALLBINDIR', os.path.join(self.installdir, 'bin'))
env.setvar('VMDINSTALLLIBRARYDIR', os.path.join(self.installdir, 'lib'))
# configure in vmd-<version> directory
change_dir(self.vmddir)
run_cmd("%s ./configure %s" % (self.cfg['preconfigopts'], self.cfg['configopts']))
# change to 'src' subdirectory, ready for building
change_dir(os.path.join(self.vmddir, 'src'))
def build_step(self):
"""Custom build step for VMD."""
super(EB_VMD, self).build_step()
self.have_stride = False
# Build Surf, which is part of VMD as of VMD version 1.9.3
if LooseVersion(self.version) >= LooseVersion("1.9.3"):
change_dir(self.surf_dir)
surf_build_cmd = 'make CC="%s" OPT="%s"' % (os.environ['CC'], os.environ['CFLAGS'])
run_cmd(surf_build_cmd)
# Build Stride if it was downloaded
if os.path.exists(os.path.join(self.stride_dir, 'Makefile')):
change_dir(self.stride_dir)
self.have_stride = True
stride_build_cmd = 'make CC="%s" CFLAGS="%s"' % (os.environ['CC'], os.environ['CFLAGS'])
run_cmd(stride_build_cmd)
else:
self.log.info("Stride has not been downloaded and/or unpacked.")
def install_step(self):
"""Custom build step for VMD."""
# Install must also be done in 'src' subdir
change_dir(os.path.join(self.vmddir, 'src'))
super(EB_VMD, self).install_step()
if LooseVersion(self.version) >= LooseVersion("1.9.3"):
surf_bin = os.path.join(self.surf_dir, 'surf')
copy_file(surf_bin, os.path.join(self.installdir, 'lib', 'surf_LINUXAMD64'))
if self.have_stride:
stride_bin = os.path.join(self.stride_dir, 'stride')
copy_file(stride_bin, os.path.join(self.installdir, 'lib', 'stride_LINUXAMD64'))
def sanity_check_step(self):
"""Custom sanity check for VMD."""
custom_paths = {
'files': ['bin/vmd'],
'dirs': ['lib'],
}
super(EB_VMD, self).sanity_check_step(custom_paths=custom_paths)
|
akesandgren/easybuild-easyblocks
|
easybuild/easyblocks/v/vmd.py
|
Python
|
gpl-2.0
| 10,493
|
[
"LAMMPS",
"NAMD",
"NetCDF",
"VMD"
] |
3b9f390127f20a61ce85057ac5f4674d8d0d82e5eedd44b3a2a656fc15ca0397
|
"""
==========================================
Statistical functions (:mod:`scipy.stats`)
==========================================
.. module:: scipy.stats
This module contains a large number of probability distributions as
well as a growing library of statistical functions.
Each univariate distribution is an instance of a subclass of `rv_continuous`
(`rv_discrete` for discrete distributions):
.. autosummary::
:toctree: generated/
rv_continuous
rv_discrete
rv_histogram
Continuous distributions
========================
.. autosummary::
:toctree: generated/
alpha -- Alpha
anglit -- Anglit
arcsine -- Arcsine
argus -- Argus
beta -- Beta
betaprime -- Beta Prime
bradford -- Bradford
burr -- Burr (Type III)
burr12 -- Burr (Type XII)
cauchy -- Cauchy
chi -- Chi
chi2 -- Chi-squared
cosine -- Cosine
dgamma -- Double Gamma
dweibull -- Double Weibull
erlang -- Erlang
expon -- Exponential
exponnorm -- Exponentially Modified Normal
exponweib -- Exponentiated Weibull
exponpow -- Exponential Power
f -- F (Snecdor F)
fatiguelife -- Fatigue Life (Birnbaum-Saunders)
fisk -- Fisk
foldcauchy -- Folded Cauchy
foldnorm -- Folded Normal
frechet_r -- Frechet Right Sided, Extreme Value Type II (Extreme LB) or weibull_min
frechet_l -- Frechet Left Sided, Weibull_max
genlogistic -- Generalized Logistic
gennorm -- Generalized normal
genpareto -- Generalized Pareto
genexpon -- Generalized Exponential
genextreme -- Generalized Extreme Value
gausshyper -- Gauss Hypergeometric
gamma -- Gamma
gengamma -- Generalized gamma
genhalflogistic -- Generalized Half Logistic
gilbrat -- Gilbrat
gompertz -- Gompertz (Truncated Gumbel)
gumbel_r -- Right Sided Gumbel, Log-Weibull, Fisher-Tippett, Extreme Value Type I
gumbel_l -- Left Sided Gumbel, etc.
halfcauchy -- Half Cauchy
halflogistic -- Half Logistic
halfnorm -- Half Normal
halfgennorm -- Generalized Half Normal
hypsecant -- Hyperbolic Secant
invgamma -- Inverse Gamma
invgauss -- Inverse Gaussian
invweibull -- Inverse Weibull
johnsonsb -- Johnson SB
johnsonsu -- Johnson SU
kappa4 -- Kappa 4 parameter
kappa3 -- Kappa 3 parameter
ksone -- Kolmogorov-Smirnov one-sided (no stats)
kstwobign -- Kolmogorov-Smirnov two-sided test for Large N (no stats)
laplace -- Laplace
levy -- Levy
levy_l
levy_stable
logistic -- Logistic
loggamma -- Log-Gamma
loglaplace -- Log-Laplace (Log Double Exponential)
lognorm -- Log-Normal
lomax -- Lomax (Pareto of the second kind)
maxwell -- Maxwell
mielke -- Mielke's Beta-Kappa
nakagami -- Nakagami
ncx2 -- Non-central chi-squared
ncf -- Non-central F
nct -- Non-central Student's T
norm -- Normal (Gaussian)
pareto -- Pareto
pearson3 -- Pearson type III
powerlaw -- Power-function
powerlognorm -- Power log normal
powernorm -- Power normal
rdist -- R-distribution
reciprocal -- Reciprocal
rayleigh -- Rayleigh
rice -- Rice
recipinvgauss -- Reciprocal Inverse Gaussian
semicircular -- Semicircular
skewnorm -- Skew normal
t -- Student's T
trapz -- Trapezoidal
triang -- Triangular
truncexpon -- Truncated Exponential
truncnorm -- Truncated Normal
tukeylambda -- Tukey-Lambda
uniform -- Uniform
vonmises -- Von-Mises (Circular)
vonmises_line -- Von-Mises (Line)
wald -- Wald
weibull_min -- Minimum Weibull (see Frechet)
weibull_max -- Maximum Weibull (see Frechet)
wrapcauchy -- Wrapped Cauchy
Multivariate distributions
==========================
.. autosummary::
:toctree: generated/
multivariate_normal -- Multivariate normal distribution
matrix_normal -- Matrix normal distribution
dirichlet -- Dirichlet
wishart -- Wishart
invwishart -- Inverse Wishart
multinomial -- Multinomial distribution
special_ortho_group -- SO(N) group
ortho_group -- O(N) group
unitary_group -- U(N) gropu
random_correlation -- random correlation matrices
Discrete distributions
======================
.. autosummary::
:toctree: generated/
bernoulli -- Bernoulli
binom -- Binomial
boltzmann -- Boltzmann (Truncated Discrete Exponential)
dlaplace -- Discrete Laplacian
geom -- Geometric
hypergeom -- Hypergeometric
logser -- Logarithmic (Log-Series, Series)
nbinom -- Negative Binomial
planck -- Planck (Discrete Exponential)
poisson -- Poisson
randint -- Discrete Uniform
skellam -- Skellam
zipf -- Zipf
Statistical functions
=====================
Several of these functions have a similar version in scipy.stats.mstats
which work for masked arrays.
.. autosummary::
:toctree: generated/
describe -- Descriptive statistics
gmean -- Geometric mean
hmean -- Harmonic mean
kurtosis -- Fisher or Pearson kurtosis
kurtosistest --
mode -- Modal value
moment -- Central moment
normaltest --
skew -- Skewness
skewtest --
kstat --
kstatvar --
tmean -- Truncated arithmetic mean
tvar -- Truncated variance
tmin --
tmax --
tstd --
tsem --
variation -- Coefficient of variation
find_repeats
trim_mean
.. autosummary::
:toctree: generated/
cumfreq
histogram2
histogram
itemfreq
percentileofscore
scoreatpercentile
relfreq
.. autosummary::
:toctree: generated/
binned_statistic -- Compute a binned statistic for a set of data.
binned_statistic_2d -- Compute a 2-D binned statistic for a set of data.
binned_statistic_dd -- Compute a d-D binned statistic for a set of data.
.. autosummary::
:toctree: generated/
obrientransform
signaltonoise
bayes_mvs
mvsdist
sem
zmap
zscore
iqr
.. autosummary::
:toctree: generated/
sigmaclip
threshold
trimboth
trim1
.. autosummary::
:toctree: generated/
f_oneway
pearsonr
spearmanr
pointbiserialr
kendalltau
weightedtau
linregress
theilslopes
f_value
.. autosummary::
:toctree: generated/
ttest_1samp
ttest_ind
ttest_ind_from_stats
ttest_rel
kstest
chisquare
power_divergence
ks_2samp
mannwhitneyu
tiecorrect
rankdata
ranksums
wilcoxon
kruskal
friedmanchisquare
combine_pvalues
ss
square_of_sums
jarque_bera
.. autosummary::
:toctree: generated/
ansari
bartlett
levene
shapiro
anderson
anderson_ksamp
binom_test
fligner
median_test
mood
.. autosummary::
:toctree: generated/
boxcox
boxcox_normmax
boxcox_llf
entropy
.. autosummary::
:toctree: generated/
chisqprob
betai
Circular statistical functions
==============================
.. autosummary::
:toctree: generated/
circmean
circvar
circstd
Contingency table functions
===========================
.. autosummary::
:toctree: generated/
chi2_contingency
contingency.expected_freq
contingency.margins
fisher_exact
Plot-tests
==========
.. autosummary::
:toctree: generated/
ppcc_max
ppcc_plot
probplot
boxcox_normplot
Masked statistics functions
===========================
.. toctree::
stats.mstats
Univariate and multivariate kernel density estimation (:mod:`scipy.stats.kde`)
==============================================================================
.. autosummary::
:toctree: generated/
gaussian_kde
For many more stat related functions install the software R and the
interface package rpy.
"""
from __future__ import division, print_function, absolute_import
from .stats import *
from .distributions import *
from .morestats import *
from ._binned_statistic import *
from .kde import gaussian_kde
from . import mstats
from .contingency import chi2_contingency
from ._multivariate import *
__all__ = [s for s in dir() if not s.startswith("_")] # Remove dunders.
from numpy.testing import Tester
test = Tester().test
|
jakevdp/scipy
|
scipy/stats/__init__.py
|
Python
|
bsd-3-clause
| 9,236
|
[
"Gaussian"
] |
4d69eabed7f103fcea32f00b7e7c3c79c90e5dc990425e32ada0fb0ce4be7d12
|
"""
Layer.
Copyright 2014 Stanford University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
class Layer(object):
"""
Layer containing weights and biases for neurons of the same type.
Parameters
----------
neuron : Neuron
Neuron associated with this layer.
size : int
Layer size.
scale : float, optional (default 0.01)
Scale of distribution used to sample initial weights.
weights : array_like, optional
Weight matrix.
biases : array_like, optional
Biases. Defaults to 0 for each neuron.
"""
def __init__(self, size, scale=0.01, weights=None, biases=None):
self.size = size
self.scale = scale
self.weights = weights
self.biases = biases
def transform(self, a):
"""
Transform input.
Parameters
----------
a : array_like
Input activations, with examples as columns.
"""
return self.weights * a + self.biases
def activate(self, z):
"""
Compute activation on transformed input.
Parameters
----------
z : float
Transformed input.
"""
raise NotImplementedError
def gradient(self, z):
"""
Compute gradient.
Parameters
----------
z : float
Weighted and biased input value.
"""
raise NotImplementedError
def get_activations_and_gradient(self, z):
"""
Compute activations and gradient.
Parameters
----------
z : float
Weighted and biased input value.
"""
return self.activate(z), self.gradient(z)
def update_weights(self, update):
"""
Update weights.
Parameters
----------
update : array_like
Update for weights.
"""
self.weights += update
def update_biases(self, update):
"""
Update biases.
Parameters
----------
update : array_like
Update for biases.
"""
self.biases += update
class InputLayer(Layer):
"""
Input layer.
Parameters
----------
size : int
Layer size.
weights : array_like
Weight matrix.
biases : array_like, optional
Biases. Defaults to 0 for each neuron.
"""
def activate(self, z):
"""
Compute activation.
Parameters
----------
z : array_like
Transformed input.
"""
return z
def gradient(self, z):
"""
Compute gradient.
Parameters
----------
z : array_like
Transformed input.
"""
return np.asmatrix(np.ones_like(z))
class SigmoidLayer(Layer):
"""
Sigmoid layer.
Parameters
----------
size : int
Layer size.
weights : array_like
Weight matrix.
biases : array_like, optional
Biases. Defaults to 0 for each neuron.
"""
def activate(self, z):
"""
Compute activation.
Parameters
----------
z : array_like
Transformed input.
"""
return 1 / (1 + np.exp(-z))
def gradient(self, z):
"""
Compute gradient.
Parameters
----------
z : array_like
Transformed input.
"""
a = self.activate(z)
return np.multiply(a, 1 - a)
def get_activations_and_gradient(self, z):
"""
Compute activations and gradient.
Parameters
----------
z : array_like
Transformed input.
"""
a = self.activate(z)
return a, np.multiply(a, 1 - a)
|
skearnes/neural-network
|
neural_network/layer.py
|
Python
|
apache-2.0
| 4,272
|
[
"NEURON"
] |
f997bf80c7d3fcf50cef0562e8c356e687427672e35d29388aba92b816e999d7
|
#!/usr/bin/env python
# Copyright (c) 2015 Chris Olstrom <chris@olstrom.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from subprocess import call
def install_with_pip(packages):
""" Installs packages with pip """
for package in packages:
call('pip install -U ' + package, shell=True)
def detect(setting):
""" Detects a setting in tags, falls back to environment variables """
import os
if setting in resource_tags():
return resource_tags()[setting]
else:
return os.getenv(shell_style(setting))
def shell_style(name):
""" Translates reasonable names into names you would expect for environment
variables. Example: 'ForgeRegion' becomes 'FORGE_REGION' """
import re
return re.sub('(?!^)([A-Z]+)', r'_\1', name).upper()
def download_from_s3(source, destination):
""" Downloads a file from an S3 bucket """
call("aws s3 cp --region {region} s3://{bucket}/{file} {save_to}".format(
region=detect('ForgeRegion'),
bucket=detect('ForgeBucket'),
file=source,
save_to=destination
), shell=True)
def instance_metadata(item):
""" Returns information about the current instance from EC2 Instace API """
import httplib
api = httplib.HTTPConnection('169.254.169.254')
api.request('GET', '/latest/meta-data/' + item)
metadata = api.getresponse().read()
api.close()
return metadata
def instance_id():
""" Returns the ID of the current instance """
return instance_metadata('instance-id')
def region():
""" Returns the region the current instance is located in """
return instance_metadata('placement/availability-zone')[:-1]
def resource_tags():
""" Returns a dictionary of all resource tags for the current instance """
import boto.ec2
api = boto.ec2.connect_to_region(region())
tags = api.get_all_tags(filters={'resource-id': instance_id()})
return {tag.name: tag.value for tag in tags}
def security_groups():
""" Returns a list of sercurity groups for the current instance """
return instance_metadata('security-groups').split('\n')
def infer_tags(security_group):
""" Attempts to infer tags from a security group name """
import re
matches = re.search(r'(?P<Project>[\w-]+)-(?P<Role>\w+)$', security_group)
return matches.groupdict()
def implicit_tags():
""" Returns a list of tags inferred from security groups """
return [infer_tags(name) for name in security_groups()]
def discover(trait):
""" Tries to find a trait in tags, makes a reasonable guess if it fails """
if trait in resource_tags():
return [resource_tags()[trait]]
else:
return [implicit_tags()[trait]]
def project_path():
""" Returns the forge path for the discovered project """
return discover('Project')[0] + '/'
def role_paths():
""" Returns a list of forge paths for all discovered roles """
return [project_path() + role + '/' for role in discover('Role')]
def unique(enumerable):
""" Returns a list without duplicate items """
return list(set(enumerable))
def applicable_playbooks():
""" Returns a list of playbooks that should be applied to this system """
playbooks = [''] # Base Playbook
playbooks.append(project_path()) # Project Playbook
playbooks.extend(role_paths()) # System Roles
return sorted(unique(playbooks), key=len)
def flat_path(path):
""" Flattens a path by substituting dashes for slashes """
import re
return re.sub('/', '-', path)
def get_dependencies(playbook):
""" Downloads and installs all roles required for a playbook to run """
path = '/tmp/' + flat_path(playbook)
download_from_s3(playbook + 'dependencies.yml', path + 'dependencies.yml')
call('ansible-galaxy install -ifr' + path + 'dependencies.yml', shell=True)
def get_vault(playbook):
""" Downloads a vault file, and puts it where Ansible can find it. """
vault_name = flat_path(playbook)[:-1]
if len(vault_name) == 0:
vault_name = 'all'
vault_file = '/etc/ansible/group_vars/' + vault_name + '.yml'
download_from_s3(playbook + 'vault.yml', vault_file)
with open('/etc/ansible/hosts', 'a') as stream:
stream.writelines(["\n[" + vault_name + "]\n", 'localhost\n'])
def configure_environment():
""" Exposes information from Resource Tags in Ansible vars """
get_vault('')
with open('/etc/ansible/group_vars/local.yml', 'w+') as stream:
stream.write("\nproject: " + resource_tags()['Project'])
stream.write("\nenvironment_tier: " + resource_tags()['Environment'])
stream.write("\nsystem_role: " + resource_tags()['Role'])
def record_exit(playbook, exit_status):
""" Saves exit status of playbook for notfication purposes"""
playbook_name = '/tmp/' + flat_path(playbook + 'playbook' + '.status')
with open(playbook_name, 'w+') as stream:
stream.write(str(exit_status))
def execute(playbook):
""" Downloads and executes a playbook. """
path = '/tmp/' + flat_path(playbook)
for hook in ['pre-', '', 'post-']:
filename = hook + 'playbook.yml'
download_from_s3(playbook + filename, path + filename)
exit_status = call('ansible-playbook ' + path + filename, shell=True)
record_exit(playbook, exit_status)
def ssh_keyscan(host):
""" Get the SSH host key from a remote server by connecting to it """
from paramiko import transport
with transport.Transport(host) as ssh:
ssh.start_client()
return ssh.get_remote_server_key()
def ssh_host_key(host, port=22):
""" Get SSH host key, return string formatted for known_hosts """
if port != 22:
host = "{host}:{port}".format(host=host, port=port)
key = ssh_keyscan(host)
return "{host} {key_name} {key}".format(
host=host,
key_name=key.get_name(),
key=key.get_base64())
def in_known_hosts(host_key):
""" Checks if a key is in known_hosts """
from os import path
if not path.isfile('/etc/ssh/ssh_known_hosts'):
return False
with open('/etc/ssh/ssh_known_hosts', 'r') as known_hosts:
for entry in known_hosts:
if host_key in entry:
return True
return False
def add_to_known_hosts(host_key):
""" Appends line to a file """
if in_known_hosts(host_key):
return
with open('/etc/ssh/ssh_known_hosts', 'a') as known_hosts:
known_hosts.write(host_key + "\n")
def configure_ansible():
""" Fetches ansible configurations from ForgeBucket """
download_from_s3('ansible.hosts', '/etc/ansible/hosts')
download_from_s3('ansible.cfg', '/etc/ansible/ansible.cfg')
download_from_s3('vault.key', '/etc/ansible/vault.key')
files = ['/etc/ansible/ansible.cfg', '/etc/ansible/vault.key']
set_permissions(files, 0400)
add_to_known_hosts(ssh_host_key('github.com'))
add_to_known_hosts(ssh_host_key('bitbucket.org'))
def set_permissions(files, mode):
""" Sets permissions on a list of files """
from os import chmod
for filename in files:
try:
chmod(filename, mode)
except OSError:
pass
def get_credentials():
""" Fetches credentials needed for private repositories """
download_from_s3('ssh.ed25519', '/root/.ssh/id_ed25519')
download_from_s3('ssh.rsa', '/root/.ssh/id_rsa')
set_permissions(['/root/.ssh/id_ed25519', '/root/.ssh/id_rsa'], 0400)
def preconfigure():
""" Configure everything needed to configure everything else. """
install_with_pip(['"ansible<2"', 'awscli', 'boto'])
configure_ansible()
configure_environment()
get_credentials()
download_from_s3('bin/reforge', '/usr/local/sbin/reforge')
set_permissions(['/usr/local/sbin/reforge'], 0500)
def self_provision():
""" Bring it all together and follow your dreams, little server! """
preconfigure()
for playbook in applicable_playbooks():
get_dependencies(playbook)
get_vault(playbook)
execute(playbook)
self_provision()
|
killerwails/forge
|
bootstrap.py
|
Python
|
mit
| 9,110
|
[
"Galaxy"
] |
ff5b6880d31a44cc4833bc8d847c31eaf6ebf8fa3d2ed9616cfbc0c73cd10859
|
# Default Django settings. Override these with settings in the module
# pointed-to by the DJANGO_SETTINGS_MODULE environment variable.
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
gettext_noop = lambda s: s
####################
# CORE #
####################
DEBUG = False
TEMPLATE_DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing siutations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# Whether to use the "Etag" header. This saves bandwidth but slows down performance.
USE_ETAGS = False
# People who get code error notifications.
# In the format (('Full Name', 'email@domain.com'), ('Full Name', 'anotheremail@domain.com'))
ADMINS = ()
# Tuple of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = ()
# Local time zone for this installation. All choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities).
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box. The language name
# should be the utf-8 encoded local name for the language.
LANGUAGES = (
('ar', gettext_noop('Arabic')),
('bn', gettext_noop('Bengali')),
('bg', gettext_noop('Bulgarian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('es', gettext_noop('Spanish')),
('et', gettext_noop('Estonian')),
('es-ar', gettext_noop('Argentinean Spanish')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('ga', gettext_noop('Irish')),
('gl', gettext_noop('Galician')),
('hu', gettext_noop('Hungarian')),
('he', gettext_noop('Hebrew')),
('hr', gettext_noop('Croatian')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('ko', gettext_noop('Korean')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('lv', gettext_noop('Latvian')),
('lt', gettext_noop('Lithuanian')),
('mk', gettext_noop('Macedonian')),
('nl', gettext_noop('Dutch')),
('no', gettext_noop('Norwegian')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portugese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sr', gettext_noop('Serbian')),
('sv', gettext_noop('Swedish')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('tr', gettext_noop('Turkish')),
('uk', gettext_noop('Ukrainian')),
('zh-cn', gettext_noop('Simplified Chinese')),
('zh-tw', gettext_noop('Traditional Chinese')),
)
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ("he", "ar", "fa")
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = ()
LANGUAGE_COOKIE_NAME = 'django_language'
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various e-mails.
MANAGERS = ADMINS
# Default content type and charset to use for all HttpResponse objects, if a
# MIME type isn't manually specified. These are used to construct the
# Content-Type header.
DEFAULT_CONTENT_TYPE = 'text/html'
DEFAULT_CHARSET = 'utf-8'
# Encoding of files read from disk (template and initial SQL files).
FILE_CHARSET = 'utf-8'
# E-mail address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Whether to send broken-link e-mails.
SEND_BROKEN_LINK_EMAILS = False
# Database connection info.
DATABASE_ENGINE = '' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = '' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
DATABASE_OPTIONS = {} # Set to empty dictionary for default.
# Host for sending e-mail.
EMAIL_HOST = 'localhost'
# Port for sending e-mail.
EMAIL_PORT = 25
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
# List of strings representing installed apps.
INSTALLED_APPS = ()
# List of locations of the template source files, in search order.
TEMPLATE_DIRS = ()
# List of callables that know how to import templates from various sources.
# See the comments in django/core/template/loader.py for interface
# documentation.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
# 'django.core.context_processors.request',
)
# Output to use in template system for invalid (e.g. misspelled) variables.
TEMPLATE_STRING_IF_INVALID = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Default e-mail address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = (
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search')
# )
DISALLOWED_USER_AGENTS = ()
ABSOLUTE_URL_OVERRIDES = {}
# Tuple of strings representing allowed prefixes for the {% ssi %} tag.
# Example: ('/home/html', '/var/www')
ALLOWED_INCLUDE_ROOTS = ()
# If this is a admin settings module, this should be a list of
# settings modules (in the format 'foo.bar.baz') for which this admin
# is an admin.
ADMIN_FOR = ()
# 404s that may be ignored.
IGNORABLE_404_STARTS = ('/cgi-bin/', '/_vti_bin', '/_vti_inf')
IGNORABLE_404_ENDS = ('mail.pl', 'mailform.pl', 'mail.cgi', 'mailform.cgi', 'favicon.ico', '.php')
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Path to the "jing" executable -- needed to validate XMLFields
JING_PATH = "/usr/bin/jing"
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = ''
# List of upload handler classes to be applied in order.
FILE_UPLOAD_HANDLERS = (
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
)
# Maximum size, in bytes, of a request before it will be streamed to the
# file system instead of into memory.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Directory in which upload streamed files will be temporarily saved. A value of
# `None` will make Django use the operating system's default temporary directory
# (i.e. "/tmp" on *nix systems).
FILE_UPLOAD_TEMP_DIR = None
# Default formatting for date objects. See all available format strings here:
# http://www.djangoproject.com/documentation/templates/#now
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# http://www.djangoproject.com/documentation/templates/#now
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# http://www.djangoproject.com/documentation/templates/#now
TIME_FORMAT = 'P'
# Default formatting for date objects when only the year and month are relevant.
# See all available format strings here:
# http://www.djangoproject.com/documentation/templates/#now
YEAR_MONTH_FORMAT = 'F Y'
# Default formatting for date objects when only the month and day are relevant.
# See all available format strings here:
# http://www.djangoproject.com/documentation/templates/#now
MONTH_DAY_FORMAT = 'F j'
# Do you want to manage transactions manually?
# Hint: you really don't!
TRANSACTIONS_MANAGED = False
# The User-Agent string to use when checking for URL validity through the
# isExistingURL validator.
from django import get_version
URL_VALIDATOR_USER_AGENT = "Django/%s (http://www.djangoproject.com)" % get_version()
# The tablespaces to use for each model when not specified otherwise.
DEFAULT_TABLESPACE = ''
DEFAULT_INDEX_TABLESPACE = ''
##############
# MIDDLEWARE #
##############
# List of middleware classes to use. Order is important; in the request phase,
# this middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
# 'django.middleware.http.ConditionalGetMiddleware',
# 'django.middleware.gzip.GZipMiddleware',
'django.middleware.common.CommonMiddleware',
)
############
# SESSIONS #
############
SESSION_COOKIE_NAME = 'sessionid' # Cookie name. This can be whatever you want.
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2 # Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_DOMAIN = None # A string like ".lawrence.com", or None for standard domain cookie.
SESSION_COOKIE_SECURE = False # Whether the session cookie should be secure (https:// only).
SESSION_COOKIE_PATH = '/' # The path of the session cookie.
SESSION_SAVE_EVERY_REQUEST = False # Whether to save the session data on every request.
SESSION_EXPIRE_AT_BROWSER_CLOSE = False # Whether a user's session cookie expires when the Web browser is closed.
SESSION_ENGINE = 'django.contrib.sessions.backends.db' # The module to store session data
SESSION_FILE_PATH = None # Directory to store session files if using the file session module. If None, the backend will use a sensible default.
#########
# CACHE #
#########
# The cache backend to use. See the docstring in django.core.cache for the
# possible values.
CACHE_BACKEND = 'locmem://'
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 600
####################
# COMMENTS #
####################
COMMENTS_ALLOW_PROFANITIES = False
# The profanities that will trigger a validation error in the
# 'hasNoProfanities' validator. All of these should be in lowercase.
PROFANITIES_LIST = ('asshat', 'asshead', 'asshole', 'cunt', 'fuck', 'gook', 'nigger', 'shit')
# The group ID that designates which users are banned.
# Set to None if you're not using it.
COMMENTS_BANNED_USERS_GROUP = None
# The group ID that designates which users can moderate comments.
# Set to None if you're not using it.
COMMENTS_MODERATORS_GROUP = None
# The group ID that designates the users whose comments should be e-mailed to MANAGERS.
# Set to None if you're not using it.
COMMENTS_SKETCHY_USERS_GROUP = None
# The system will e-mail MANAGERS the first COMMENTS_FIRST_FEW comments by each
# user. Set this to 0 if you want to disable it.
COMMENTS_FIRST_FEW = 0
# A tuple of IP addresses that have been banned from participating in various
# Django-powered features.
BANNED_IPS = ()
##################
# AUTHENTICATION #
##################
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',)
LOGIN_URL = '/accounts/login/'
LOGOUT_URL = '/accounts/logout/'
LOGIN_REDIRECT_URL = '/accounts/profile/'
# The number of days a password reset link is valid for
PASSWORD_RESET_TIMEOUT_DAYS = 3
###########
# TESTING #
###########
# The name of the method to use to invoke the test suite
TEST_RUNNER = 'django.test.simple.run_tests'
# The name of the database to use for testing purposes.
# If None, a name of 'test_' + DATABASE_NAME will be assumed
TEST_DATABASE_NAME = None
# Strings used to set the character set and collation order for the test
# database. These values are passed literally to the server, so they are
# backend-dependent. If None, no special settings are sent (system defaults are
# used).
TEST_DATABASE_CHARSET = None
TEST_DATABASE_COLLATION = None
############
# FIXTURES #
############
# The list of directories to search for fixtures
FIXTURE_DIRS = ()
|
Shrews/PyGerrit
|
webapp/django/conf/global_settings.py
|
Python
|
apache-2.0
| 14,357
|
[
"VisIt"
] |
2f3cac9f8d58abec13c89481dae77703b83954982ad78a65ebfa4fbb782c81c1
|
"""
Read/write functions for Gaussian.
Written by:
Glen R. Jenness
University of Wisconsin - Madison
See accompanying license files for details.
"""
import numpy as np
import ase.units
from ase.atoms import Atoms
from ase.atom import Atom
from ase.calculators.singlepoint import SinglePointCalculator
from ase.io.gaussian_reader import GaussianReader as GR
from ase.calculators.gaussian import Gaussian
# http://www.gaussian.com/g_tech/g_ur/k_dft.htm
allowed_dft_functionals = ['lsda', # = 'svwn'
'svwn',
'svwn5', # != 'svwn'
'blyp',
'b3lyp',
'bp86',
'pbepbe',
'pbe1pbe', # pbe0
'm06',
'm06hf',
'm062x',
'tpssh',
'tpsstpss',
'wb97xd',
]
def read_gaussian_out(filename, index=-1, quantity='atoms'):
""""Interface to GaussianReader and returns various quantities"""
energy = 0.0
data = GR(filename)[index]
formula = data['Chemical_formula']
positions = np.array(data['Positions'])
method = data['Method']
version = data['Version']
if method.lower()[1:] in allowed_dft_functionals:
method = 'HF'
atoms = Atoms(formula, positions=positions)
for key, value in data.items():
if (key in method):
energy = value
try:
# Re-read in the log file
f = open(filename, 'r')
lines = f.readlines()
f.close()
forces = list()
for n, line in enumerate(lines):
if ('Forces (Hartrees/Bohr)' in line):
for j in range(len(atoms)):
forces += [[float(lines[n + j + 3].split()[2]),
float(lines[n + j + 3].split()[3]),
float(lines[n + j + 3].split()[4])]]
convert = ase.units.Hartree / ase.units.Bohr
forces = np.array(forces) * convert
except:
forces = None
energy *= ase.units.Hartree # Convert the energy from a.u. to eV
calc = SinglePointCalculator(energy, forces, None, None, atoms)
atoms.set_calculator(calc)
if (quantity == 'energy'):
return energy
elif (quantity == 'forces'):
return forces
elif (quantity == 'dipole'):
return data['Dipole']
elif (quantity == 'atoms'):
return atoms
elif (quantity == 'version'):
return version
def read_gaussian(filename):
"""Reads a Gaussian input file"""
f = open(filename, 'r')
lines = f.readlines()
f.close()
atoms = Atoms()
for n, line in enumerate(lines):
if ('#' in line):
i = 0
while (lines[n + i + 5] != '\n'):
info = lines[n + i + 5].split()
symbol = info[0]
position = [float(info[1]), float(info[2]), float(info[3])]
atoms += Atom(symbol, position=position)
i += 1
return atoms
def write_gaussian(filename, atoms):
"""Writes a basic Gaussian input file"""
# Since Gaussian prints the geometry directly into the input file, we'll just
# the write_input method from the Gaussian calculator, and just use the
# default settings
calc = Gaussian()
calc.initialize(atoms)
calc.write_input(filename, atoms)
|
alexei-matveev/ase-local
|
ase/io/gaussian.py
|
Python
|
gpl-2.0
| 3,514
|
[
"ASE",
"Gaussian"
] |
38e470d851beba09218a0ed3c13f2ea324b3a1ca86069421121debe07686b356
|
# Hidden Markov Model Implementation
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import scipy.io
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
import ghmm
# Returns mu,sigma for 20 hidden-states from feature-vectors(123,35) for Smooth, Moderate, and Rough Surface Models
def feature_to_mu_sigma(fvec):
index = 0
m,n = np.shape(fvec)
#print m,n
mu = np.matrix(np.zeros((10,1)))
sigma = np.matrix(np.zeros((10,1)))
DIVS = m/10
while (index < 10):
m_init = index*DIVS
temp_fvec = fvec[(m_init):(m_init+DIVS),0:]
#if index == 1:
#print temp_fvec
mu[index] = scp.mean(temp_fvec)
sigma[index] = scp.std(temp_fvec)
index = index+1
return mu,sigma
# Returns sequence given raw data
def create_seq(fvec):
m,n = np.shape(fvec)
#print m,n
seq = np.matrix(np.zeros((10,n)))
DIVS = m/10
for i in range(n):
index = 0
while (index < 10):
m_init = index*DIVS
temp_fvec = fvec[(m_init):(m_init+DIVS),i]
#if index == 1:
#print temp_fvec
seq[index,i] = scp.mean(temp_fvec)
index = index+1
return seq
if __name__ == '__main__':
### Simulation Data from Object Variation
tSamples = 121
data_rf_training = scipy.io.loadmat('rigid_fixed_object_training.mat')
data_sf_training = scipy.io.loadmat('soft_fixed_object_training.mat')
data_rm_training = scipy.io.loadmat('rigid_movable_object_training.mat')
data_sm_training = scipy.io.loadmat('soft_movable_object_training.mat')
simulmotion_training = np.zeros((tSamples,400))
datatime = np.arange(0,1.21,0.01)
datamotion_rf_training = np.transpose(data_rf_training['robot_pos_rf'])
datamotion_sf_training = np.transpose(data_sf_training['robot_pos_sf'])
datamotion_rm_training = np.transpose(data_rm_training['robot_pos_rm'])
datamotion_sm_training = np.transpose(data_sm_training['robot_pos_sm'])
simulmotion_training = np.concatenate((datamotion_rf_training, datamotion_rm_training, datamotion_sf_training, datamotion_sm_training), axis = 1)
Fmat_training = np.matrix(simulmotion_training)
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat_training)
#print " "
#print 'Total_Matrix_Shape:',m_tot,n_tot
mu_rf,sigma_rf = feature_to_mu_sigma(Fmat_training[0:121,0:100])
mu_rm,sigma_rm = feature_to_mu_sigma(Fmat_training[0:121,100:200])
mu_sf,sigma_sf = feature_to_mu_sigma(Fmat_training[0:121,200:300])
mu_sm,sigma_sm = feature_to_mu_sigma(Fmat_training[0:121,300:400])
#print [mu_rf, sigma_rf]
# HMM - Implementation:
# 10 Hidden States
# Max. Force(For now), Contact Area(Not now), and Contact Motion(Not Now) as Continuous Gaussian Observations from each hidden state
# Four HMM-Models for Rigid-Fixed, Soft-Fixed, Rigid-Movable, Soft-Movable
# Transition probabilities obtained as upper diagonal matrix (to be trained using Baum_Welch)
# For new objects, it is classified according to which model it represenst the closest..
F = ghmm.Float() # emission domain of this model
# A - Transition Matrix
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.1, 0.25, 0.25, 0.1, 0.1, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.20, 0.20, 0.1, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.20, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.2, 0.30, 0.30, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.2, 0.50, 0.30],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.4, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf = np.zeros((10,2))
B_rm = np.zeros((10,2))
B_sf = np.zeros((10,2))
B_sm = np.zeros((10,2))
for num_states in range(10):
B_rf[num_states,0] = mu_rf[num_states]
B_rf[num_states,1] = sigma_rf[num_states]
B_rm[num_states,0] = mu_rm[num_states]
B_rm[num_states,1] = sigma_rm[num_states]
B_sf[num_states,0] = mu_sf[num_states]
B_sf[num_states,1] = sigma_sf[num_states]
B_sm[num_states,0] = mu_sm[num_states]
B_sm[num_states,1] = sigma_sm[num_states]
B_rf = B_rf.tolist()
B_rm = B_rm.tolist()
B_sf = B_sf.tolist()
B_sm = B_sm.tolist()
# pi - initial probabilities per state
pi = [0.1] * 10
# generate RF, RM, SF, SM models from parameters
model_rf = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rf, pi) # Will be Trained
model_rm = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rm, pi) # Will be Trained
model_sf = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sf, pi) # Will be Trained
model_sm = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sm, pi) # Will be Trained
# For Training
total_seq = Fmat_training[0:121,:]
m_total, n_total = np.shape(total_seq)
#print 'Total_Sequence_Shape:', m_total, n_total
total_seq_rf = total_seq[:,0:100]
total_seq_rm = total_seq[:,100:200]
total_seq_sf = total_seq[:,200:300]
total_seq_sm = total_seq[:,300:400]
train_seq_rf = (np.array(total_seq_rf).T).tolist()
train_seq_rm = (np.array(total_seq_rm).T).tolist()
train_seq_sf = (np.array(total_seq_sf).T).tolist()
train_seq_sm = (np.array(total_seq_sm).T).tolist()
#print train_seq_rf
final_ts_rf = ghmm.SequenceSet(F,train_seq_rf)
final_ts_rm = ghmm.SequenceSet(F,train_seq_rm)
final_ts_sf = ghmm.SequenceSet(F,train_seq_sf)
final_ts_sm = ghmm.SequenceSet(F,train_seq_sm)
model_rf.baumWelch(final_ts_rf)
model_rm.baumWelch(final_ts_rm)
model_sf.baumWelch(final_ts_sf)
model_sm.baumWelch(final_ts_sm)
# For Testing
### Simulation Data from All Variation
data_rf = scipy.io.loadmat('rigid_fixed.mat')
data_sf = scipy.io.loadmat('soft_fixed.mat')
data_rm = scipy.io.loadmat('rigid_movable.mat')
data_sm = scipy.io.loadmat('soft_movable.mat')
simulmotion = np.zeros((tSamples,8000))
datamotion_rf = np.transpose(data_rf['robot_pos_rf'])
datamotion_sf = np.transpose(data_sf['robot_pos_sf'])
datamotion_rm = np.transpose(data_rm['robot_pos_rm'])
datamotion_sm = np.transpose(data_sm['robot_pos_sm'])
simulmotion = np.concatenate((datamotion_rf, datamotion_rm, datamotion_sf, datamotion_sm), axis = 1)
Fmat = np.matrix(simulmotion)
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
#print " "
#print 'Total_Matrix_Shape:',m_tot,n_tot
total_seq = Fmat[0:121,:]
m_total, n_total = np.shape(total_seq)
#print 'Total_Sequence_Shape:', m_total, n_total
rf_final = np.matrix(np.zeros((8000,1)))
rm_final = np.matrix(np.zeros((8000,1)))
sf_final = np.matrix(np.zeros((8000,1)))
sm_final = np.matrix(np.zeros((8000,1)))
total_seq_rf = total_seq[:,0:2000]
total_seq_rm = total_seq[:,2000:4000]
total_seq_sf = total_seq[:,4000:6000]
total_seq_sm = total_seq[:,6000:8000]
total_seq_obj = np.matrix(np.column_stack((total_seq_rf,total_seq_rm,total_seq_sf,total_seq_sm)))
#print np.shape(total_seq_obj)
rf = np.matrix(np.zeros(np.size(total_seq_obj,1)))
rm = np.matrix(np.zeros(np.size(total_seq_obj,1)))
sf = np.matrix(np.zeros(np.size(total_seq_obj,1)))
sm = np.matrix(np.zeros(np.size(total_seq_obj,1)))
#print np.shape(rf)
#print np.size(total_seq_obj,1)
k = 0
while (k < np.size(total_seq_obj,1)):
test_seq_obj = (np.array(total_seq_obj[:,k]).T).tolist()
new_test_seq_obj = np.array(sum(test_seq_obj,[]))
ts_obj = new_test_seq_obj
final_ts_obj = ghmm.EmissionSequence(F,ts_obj.tolist())
# Find Viterbi Path
path_rf_obj = model_rf.viterbi(final_ts_obj)
print "Rigid_Fixed_Model_Path"
print path_rf_obj
#print np.shape(path_rf_obj[0])
path_rm_obj = model_rm.viterbi(final_ts_obj)
print "Rigid_Movable_Model_Path"
print path_rm_obj
#print np.shape(path_rm_obj[0])
path_sf_obj = model_sf.viterbi(final_ts_obj)
print "Soft_Fixed_Model_Path"
print path_sf_obj
#print np.shape(path_sf_obj[0])
path_sm_obj = model_sm.viterbi(final_ts_obj)
print "Soft_Movable_Model_Path"
print path_sm_obj
#print np.shape(path_sm_obj[0])
obj = max(path_rf_obj[1],path_rm_obj[1],path_sf_obj[1],path_sm_obj[1])
#print obj
if obj == path_rf_obj[1]:
rf[0,k] = 1
elif obj == path_rm_obj[1]:
rm[0,k] = 1
elif obj == path_sf_obj[1]:
sf[0,k] = 1
else:
sm[0,k] = 1
k = k+1
#print rf.T
rf_final = rf_final + rf.T
rm_final = rm_final + rm.T
sf_final = sf_final + sf.T
sm_final = sm_final + sm.T
#print rf_final
#print rm_final
#print sf_final
#print sm_final
# Confusion Matrix
cmat = np.zeros((4,4))
arrsum_rf = np.zeros((4,1))
arrsum_rm = np.zeros((4,1))
arrsum_sf = np.zeros((4,1))
arrsum_sm = np.zeros((4,1))
k = 2000
i = 0
while (k < 8001):
arrsum_rf[i] = np.sum(rf_final[k-2000:k,0])
arrsum_rm[i] = np.sum(rm_final[k-2000:k,0])
arrsum_sf[i] = np.sum(sf_final[k-2000:k,0])
arrsum_sm[i] = np.sum(sm_final[k-2000:k,0])
i = i+1
k = k+2000
i=0
while (i < 4):
j=0
while (j < 4):
if (i == 0):
cmat[i][j] = arrsum_rf[j]
elif (i == 1):
cmat[i][j] = arrsum_rm[j]
elif (i == 2):
cmat[i][j] = arrsum_sf[j]
else:
cmat[i][j] = arrsum_sm[j]
j = j+1
i = i+1
#print cmat
# Plot Confusion Matrix
Nlabels = 4
fig = pp.figure()
ax = fig.add_subplot(111)
figplot = ax.matshow(cmat, interpolation = 'nearest', origin = 'upper', extent=[0, Nlabels, 0, Nlabels])
ax.set_title('Performance of HMM Models')
pp.xlabel("Targets")
pp.ylabel("Predictions")
ax.set_xticks([0.5,1.5,2.5,3.5])
ax.set_xticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
ax.set_yticks([3.5,2.5,1.5,0.5])
ax.set_yticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
figbar = fig.colorbar(figplot)
i = 0
while (i < 4):
j = 0
while (j < 4):
pp.text(j+0.5,3.5-i,cmat[i][j])
j = j+1
i = i+1
pp.show()
|
tapomayukh/projects_in_python
|
classification/Classification_with_HMM/Single_Contact_Classification/simulation_results/Combined/object_training/hmm_crossvalidation_motion_10_states_object_training_all_testing.py
|
Python
|
mit
| 11,241
|
[
"Gaussian",
"Mayavi"
] |
1ec5fe69573c562174d6e2ed15a3e29c5723652aac9283e44e9ab264d7298b5f
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Namespace for operators used in Gluon dispatched by F=symbol."""
import numpy as np
from ...context import current_context
from ...util import is_np_default_dtype
from . import _internal as _npi
__all__ = ['randint', 'uniform', 'normal', 'multivariate_normal',
'logistic', 'gumbel', 'rayleigh', 'f',
'rand', 'shuffle', 'gamma', 'beta', 'chisquare', 'exponential', 'lognormal',
'weibull', 'pareto', 'power', 'laplace']
def randint(low, high=None, size=None, dtype=None, ctx=None, out=None):
r"""Return random integers from `low` (inclusive) to `high` (exclusive).
Return random integers from the "discrete uniform" distribution of
the specified dtype in the "half-open" interval [`low`, `high`). If
`high` is None (the default), then results are from [0, `low`).
Parameters
----------
low : int
Lowest (signed) integer to be drawn from the distribution (unless
``high=None``, in which case this parameter is one above the
*highest* such integer).
high : int, optional
If provided, one above the largest (signed) integer to be drawn
from the distribution (see above for behavior if ``high=None``).
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
dtype : dtype, optional
Desired dtype of the result. All dtypes are determined by their
name, i.e., 'int64', 'int', etc, so byteorder is not available
and a specific precision may have different C types depending
on the platform. The default value is 'np.int'.
ctx : Context, optional
Device context of output. Default is current context.
out : _Symbol, optional
The output symbol (default is `None`).
Returns
-------
out : _Symbol
`size`-shaped array of random integers from the appropriate
distribution, or a single such random int if `size` not provided.
Examples
--------
>>> np.random.randint(2, size=10)
array([1, 0, 0, 0, 1, 1, 0, 0, 1, 0])
>>> np.random.randint(1, size=10)
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
Generate a 2 x 4 array of ints between 0 and 4, inclusive:
>>> np.random.randint(5, size=(2, 4))
array([[4, 0, 2, 1],
[3, 2, 2, 0]])
"""
if dtype is None:
dtype = 'int'
if ctx is None:
ctx = current_context()
if size is None:
size = ()
if high is None:
high = low
low = 0
return _npi.random_randint(low, high, shape=size, dtype=dtype, ctx=ctx, out=out)
def rand(*size, **kwargs):
r"""Random values in a given shape.
Create an array of the given shape and populate it with random
samples from a uniform distribution over [0, 1).
Parameters
----------
d0, d1, ..., dn : int, optional
The dimensions of the returned array, should be all positive.
If no argument is given a single Python float is returned.
Returns
-------
out : _Symbol
Random values.
Examples
--------
>>> np.random.rand(3,2)
array([[ 0.14022471, 0.96360618], #random
[ 0.37601032, 0.25528411], #random
[ 0.49313049, 0.94909878]]) #random
"""
output_shape = ()
for s in size:
output_shape += (s,)
return uniform(0, 1, size=output_shape, **kwargs)
def uniform(low=0.0, high=1.0, size=None, dtype=None, ctx=None, out=None):
r"""Draw samples from a uniform distribution.
Samples are uniformly distributed over the half-open interval
``[low, high)`` (includes low, but excludes high). In other words,
any value within the given interval is equally likely to be drawn
by `uniform`.
Parameters
----------
low : float, _Symbol, optional
Lower boundary of the output interval. All values generated will be
greater than or equal to low. The default value is 0.
high : float, _Symbol, optional
Upper boundary of the output interval. All values generated will be
less than high. The default value is 1.0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a scalar tensor containing a single value is returned if
``low`` and ``high`` are both scalars.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples.
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
ctx : Context, optional
Device context of output. Default is current context.
Returns
-------
out : _Symbol
Drawn samples from the parameterized uniform distribution.
"""
from ._symbol import _Symbol as np_symbol
input_type = (isinstance(low, np_symbol), isinstance(high, np_symbol))
if ctx is None:
ctx = current_context()
if out is not None:
size = out.shape
if size == ():
size = None
if input_type == (True, True):
return _npi.uniform(low, high, low=None, high=None, size=size,
ctx=ctx, dtype=dtype, out=out)
elif input_type == (False, True):
return _npi.uniform(high, low=low, high=None, size=size,
ctx=ctx, dtype=dtype, out=out)
elif input_type == (True, False):
return _npi.uniform(low, low=None, high=high, size=size,
ctx=ctx, dtype=dtype, out=out)
else:
return _npi.uniform(low=low, high=high, size=size,
ctx=ctx, dtype=dtype, out=out)
def normal(loc=0.0, scale=1.0, size=None, dtype=None, ctx=None, out=None):
r"""Draw random samples from a normal (Gaussian) distribution.
Samples are distributed according to a normal distribution parametrized
by *loc* (mean) and *scale* (standard deviation).
Parameters
----------
loc : float, optional
Mean (centre) of the distribution.
scale : float, optional
Standard deviation (spread or "width") of the distribution.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., `(m, n, k)`, then `m * n * k`
samples are drawn. If size is `None` (default), a scalar tensor containing
a single value is returned if loc and scale are both scalars.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples.
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
ctx : Context, optional
Device context of output. Default is current context.
Returns
-------
out : _Symbol (symbol representing `mxnet.numpy.ndarray` in computational graphs)
Drawn samples from the parameterized normal distribution.
"""
from ._symbol import _Symbol as np_symbol
input_type = (isinstance(loc, np_symbol), isinstance(scale, np_symbol))
if ctx is None:
ctx = current_context()
if size == ():
size = None
if input_type == (True, True):
return _npi.normal(loc, scale, loc=None, scale=None, size=size,
ctx=ctx, dtype=dtype, out=out)
elif input_type == (False, True):
return _npi.normal(scale, loc=loc, scale=None, size=size,
ctx=ctx, dtype=dtype, out=out)
elif input_type == (True, False):
return _npi.normal(loc, loc=None, scale=scale, size=size,
ctx=ctx, dtype=dtype, out=out)
else:
return _npi.normal(loc=loc, scale=scale, size=size,
ctx=ctx, dtype=dtype, out=out)
def lognormal(mean=0.0, sigma=1.0, size=None, dtype=None, ctx=None, out=None):
r"""Draw samples from a log-normal distribution.
Draw samples from a log-normal distribution with specified mean,
standard deviation, and array shape. Note that the mean and standard
deviation are not the values for the distribution itself, but of the
underlying normal distribution it is derived from.
Parameters
----------
mean : float, optional
Mean value of the underlying normal distribution. Default is 0.
sigma : float, optional
Standard deviation of the underlying normal distribution. Must be
non-negative. Default is 1.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``mean`` and ``sigma`` are both scalars.
Otherwise, ``np.broadcast(mean, sigma).size`` samples are drawn.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context, optional
Device context of output. Default is current context.
Returns
-------
out : _Symbol (symbol representing `mxnet.numpy.ndarray` in computational graphs)
Drawn samples from the parameterized lognormal distribution.
"""
from . import _symbol as _mx_np_symbol
return _mx_np_symbol.exp(normal(loc=mean, scale=sigma, size=size, dtype=dtype, ctx=ctx, out=out))
def logistic(loc=0.0, scale=1.0, size=None, ctx=None, out=None):
r"""Draw samples from a logistic distribution.
Samples are drawn from a logistic distribution with specified
parameters, loc (location or mean, also median), and scale (>0).
Parameters
----------
loc : float, optional
Parameter of the distribution. Default is 0.
scale : float, optional
Parameter of the distribution. Must be non-negative.
Default is 1.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``loc`` and ``scale`` are both scalars.
Otherwise, ``np.broadcast(loc, scale).size`` samples are drawn.
ctx : Context, optional
Device context of output. Default is current context.
Returns
-------
out : _Symbol (symbol representing `mxnet.numpy.ndarray` in computational graphs)
Drawn samples from the parameterized logistic distribution.
"""
from ._symbol import _Symbol as np_symbol
input_type = (isinstance(loc, np_symbol), isinstance(scale, np_symbol))
if ctx is None:
ctx = current_context()
if size == ():
size = None
if input_type == (True, True):
return _npi.logistic(loc, scale, loc=None, scale=None, size=size,
ctx=ctx, out=out)
elif input_type == (False, True):
return _npi.logistic(scale, loc=loc, scale=None, size=size,
ctx=ctx, out=out)
elif input_type == (True, False):
return _npi.logistic(loc, loc=None, scale=scale, size=size,
ctx=ctx, out=out)
else:
return _npi.logistic(loc=loc, scale=scale, size=size,
ctx=ctx, out=out)
def gumbel(loc=0.0, scale=1.0, size=None, ctx=None, out=None):
r"""Draw samples from a Gumbel distribution.
Parameters
----------
loc : float or array_like of floats, optional
The location of the mode of the distribution. Default is 0.
scale : float or array_like of floats, optional
The scale parameter of the distribution. Default is 1. Must be non-
negative.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``loc`` and ``scale`` are both scalars.
Otherwise, ``np.broadcast(loc, scale).size`` samples are drawn.
ctx : Context, optional
Device context of output. Default is current context.
Returns
-------
out : _Symbol (symbol representing `mxnet.numpy.ndarray` in computational graphs)
Drawn samples from the parameterized gumbel distribution.
"""
from ._symbol import _Symbol as np_symbol
input_type = (isinstance(loc, np_symbol), isinstance(scale, np_symbol))
if ctx is None:
ctx = current_context()
if size == ():
size = None
if input_type == (True, True):
return _npi.gumbel(loc, scale, loc=None, scale=None, size=size,
ctx=ctx, out=out)
elif input_type == (False, True):
return _npi.gumbel(scale, loc=loc, scale=None, size=size,
ctx=ctx, out=out)
elif input_type == (True, False):
return _npi.gumbel(loc, loc=None, scale=scale, size=size,
ctx=ctx, out=out)
else:
return _npi.gumbel(loc=loc, scale=scale, size=size,
ctx=ctx, out=out)
def choice(a, size=None, replace=True, p=None, ctx=None, out=None):
r"""Generates a random sample from a given 1-D array
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a were np.arange(a)
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
replace : boolean, optional
Whether the sample is with or without replacement
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribution over all
entries in a.
ctx : Context, optional
Device context of output. Default is current context.
Returns
--------
samples : _Symbol
The generated random samples
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3)
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0])
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False)
array([3,1,0])
>>> #This is equivalent to np.random.permutation(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
array([2, 3, 0])
"""
from ._symbol import _Symbol as np_symbol
if ctx is None:
ctx = current_context()
if size == ():
size = None
if isinstance(a, np_symbol):
ctx = None
if p is None:
indices = _npi.choice(a, a=None, size=size,
replace=replace, ctx=ctx, weighted=False)
return _npi.take(a, indices)
else:
indices = _npi.choice(a, p, a=None, size=size,
replace=replace, ctx=ctx, weighted=True)
return _npi.take(a, indices)
else:
if p is None:
return _npi.choice(a=a, size=size, replace=replace, ctx=ctx, weighted=False, out=out)
else:
return _npi.choice(p, a=a, size=size, replace=replace, ctx=ctx, weighted=True, out=out)
def laplace(loc=0.0, scale=1.0, size=None, dtype=None, ctx=None, out=None):
r"""Draw random samples from a Laplace distribution.
Samples are distributed according to a Laplace distribution parametrized
by *loc* (mean) and *scale* (the exponential decay).
Parameters
----------
loc : float, The position of the distribution peak.
scale : float, the exponential decay.
size : int or tuple of ints, optional. Output shape.
If the given shape is, e.g., (m, n, k), then m * n * k samples are drawn.
Default is None, in which case a single value is returned.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context, optional
Device context of output. Default is current context.
out : ``ndarray``, optional
Store output to an existing ``ndarray``.
Returns
-------
out : _Symbol (symbol representing `mxnet.numpy.ndarray` in computational graphs)
Drawn samples from the parameterized Laplace distribution.
"""
from ._symbol import _Symbol as np_symbol
input_type = (isinstance(loc, np_symbol), isinstance(scale, np_symbol))
if dtype is None:
dtype = 'float32'
if ctx is None:
ctx = current_context()
if size == ():
size = None
if input_type == (True, True):
return _npi.laplace(loc, scale, loc=None, scale=None, size=size,
ctx=ctx, dtype=dtype, out=out)
elif input_type == (False, True):
return _npi.laplace(scale, loc=loc, scale=None, size=size,
ctx=ctx, dtype=dtype, out=out)
elif input_type == (True, False):
return _npi.laplace(loc, loc=None, scale=scale, size=size,
ctx=ctx, dtype=dtype, out=out)
else:
return _npi.laplace(loc=loc, scale=scale, size=size,
ctx=ctx, dtype=dtype, out=out)
def gamma(shape, scale=1.0, size=None, dtype=None, ctx=None, out=None):
"""Draw samples from a Gamma distribution.
Samples are drawn from a Gamma distribution with specified parameters,
`shape` (sometimes designated "k") and `scale` (sometimes designated
"theta"), where both parameters are > 0.
Parameters
----------
shape : float or array_like of floats
The shape of the gamma distribution. Should be greater than zero.
scale : float or array_like of floats, optional
The scale of the gamma distribution. Should be greater than zero.
Default is equal to 1.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``shape`` and ``scale`` are both scalars.
Otherwise, ``np.broadcast(shape, scale).size`` samples are drawn.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples.
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
ctx : Context, optional
Device context of output. Default is current context.
Returns
-------
out : _Symbol
Drawn samples from the parameterized gamma distribution.
The Gamma distribution is often used to model the times to failure of
electronic components, and arises naturally in processes for which the
waiting times between Poisson distributed events are relevant.
"""
from ._symbol import _Symbol as np_symbol
input_type = (isinstance(shape, np_symbol), isinstance(scale, np_symbol))
if ctx is None:
ctx = current_context()
if out is not None:
size = out.shape
if size == ():
size = None
if input_type == (True, True):
return _npi.gamma(shape, scale, shape=None, scale=None, size=size,
ctx=ctx, dtype=dtype, out=out)
elif input_type == (False, True):
return _npi.gamma(scale, shape=shape, scale=None, size=size,
ctx=ctx, dtype=dtype, out=out)
elif input_type == (True, False):
return _npi.gamma(shape, shape=None, scale=scale, size=size,
ctx=ctx, dtype=dtype, out=out)
else:
return _npi.gamma(shape=shape, scale=scale, size=size,
ctx=ctx, dtype=dtype, out=out)
raise ValueError("Distribution parameters must be either _Symbol or numbers")
def rayleigh(scale=0.0, size=None, ctx=None, out=None):
r"""Draw samples from a Rayleigh distribution.
The :math:`\chi` and Weibull distributions are generalizations of the
Rayleigh.
Parameters
----------
scale : float or _Symbol
Scale, also equals the mode. Must be non-negative. Default is 1.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``scale`` is a scalar. Otherwise,
``np.array(scale).size`` samples are drawn.
ctx : Context, optional
Device context of output. Default is current context.
Returns
-------
out : _Symbol
Drawn samples from the parameterized Rayleigh distribution.
"""
from ..numpy import _Symbol as np_symbol
tensor_type_name = np_symbol
if ctx is None:
ctx = current_context()
if size == ():
size = None
is_tensor = isinstance(scale, tensor_type_name)
if is_tensor:
return _npi.rayleigh(scale, scale=None, size=size, ctx=ctx, out=out)
else:
return _npi.rayleigh(scale=scale, size=size, ctx=ctx, out=out)
def beta(a, b, size=None, dtype=None, ctx=None):
r"""Draw samples from a Beta distribution.
The Beta distribution is a special case of the Dirichlet distribution,
and is related to the Gamma distribution. It has the probability
distribution function
.. math:: f(x; a,b) = \frac{1}{B(\alpha, \beta)} x^{\alpha - 1}
(1 - x)^{\beta - 1},
where the normalisation, B, is the beta function,
.. math:: B(\alpha, \beta) = \int_0^1 t^{\alpha - 1}
(1 - t)^{\beta - 1} dt.
It is often seen in Bayesian inference and order statistics.
Parameters
----------
a : float or _Symbol of floats
Alpha, positive (>0).
b : float or _Symbol of floats
Beta, positive (>0).
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``a`` and ``b`` are both scalars.
Otherwise, ``np.broadcast(a, b).size`` samples are drawn.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples.
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
Dtype 'float32' or 'float64' is strongly recommended,
since lower precision might lead to out of range issue.
ctx : Context, optional
Device context of output. Default is current context.
Notes
-------
To use this operator with scalars as input, please run ``npx.set_np()`` first.
Returns
-------
out : _Symbol
Drawn samples from the parameterized beta distribution.
"""
if dtype is None:
dtype = np.float64 if is_np_default_dtype() else np.float32
if ctx is None:
ctx = current_context()
if size == ():
size = None
# use fp64 to prevent precision loss
X = gamma(a, 1, size=size, dtype='float64', ctx=ctx)
Y = gamma(b, 1, size=size, dtype='float64', ctx=ctx)
out = X/(X + Y)
return out.astype(dtype)
def f(dfnum, dfden, size=None, ctx=None):
r"""Draw samples from an F distribution.
Samples are drawn from an F distribution with specified parameters,
`dfnum` (degrees of freedom in numerator) and `dfden` (degrees of
freedom in denominator), where both parameters must be greater than
zero.
The random variate of the F distribution (also known as the
Fisher distribution) is a continuous probability distribution
that arises in ANOVA tests, and is the ratio of two chi-square
variates.
Parameters
----------
dfnum : float or _Symbol of floats
Degrees of freedom in numerator, must be > 0.
dfden : float or _Symbol of float
Degrees of freedom in denominator, must be > 0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``dfnum`` and ``dfden`` are both scalars.
Otherwise, ``np.broadcast(dfnum, dfden).size`` samples are drawn.
ctx : Context, optional
Device context of output. Default is current context.
Returns
-------
out : _Symbol
Drawn samples from the parameterized Fisher distribution.
"""
X = chisquare(df=dfnum, size=size, ctx=ctx)
Y = chisquare(df=dfden, size=size, ctx=ctx)
return (X * dfden) / (Y * dfnum)
def chisquare(df, size=None, dtype=None, ctx=None):
r"""
chisquare(df, size=None, dtype=None, ctx=None)
Draw samples from a chi-square distribution.
When `df` independent random variables, each with standard normal
distributions (mean 0, variance 1), are squared and summed, the
resulting distribution is chi-square (see Notes). This distribution
is often used in hypothesis testing.
Parameters
----------
df : float or _Symbol of floats
Number of degrees of freedom, must be > 0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``df`` is a scalar. Otherwise,
``np.array(df).size`` samples are drawn.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples.
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
ctx : Context, optional
Device context of output. Default is current context.
Returns
-------
out : _Symbol
Drawn samples from the parameterized chi-square distribution.
Raises
------
ValueError
When `df` <= 0 or when an inappropriate `size`
is given.
Notes
-----
The variable obtained by summing the squares of `df` independent,
standard normally distributed random variables:
.. math:: Q = \sum_{i=0}^{\mathtt{df}} X^2_i
is chi-square distributed, denoted
.. math:: Q \sim \chi^2_k.
The probability density function of the chi-squared distribution is
.. math:: p(x) = \frac{(1/2)^{k/2}}{\Gamma(k/2)}
x^{k/2 - 1} e^{-x/2},
where :math:`\Gamma` is the gamma function,
.. math:: \Gamma(x) = \int_0^{-\infty} t^{x - 1} e^{-t} dt.
References
----------
.. [1] NIST "Engineering Statistics Handbook"
https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm
"""
if dtype is None:
dtype = np.float64 if is_np_default_dtype() else np.float32
if ctx is None:
ctx = current_context()
if size == ():
size = None
return gamma(df/2, 2, size=size, dtype=dtype, ctx=ctx)
def exponential(scale=1.0, size=None, ctx=None, out=None):
r"""Draw samples from an exponential distribution.
Parameters
----------
scale : float or array_like of floats
The scale parameter, :math:`\beta = 1/\lambda`. Must be
non-negative.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``scale`` is a scalar. Otherwise,
``np.array(scale).size`` samples are drawn.
ctx : Context, optional
Device context of output. Default is current context.
Returns
-------
out : _Symbol (symbol representing `mxnet.numpy.ndarray` in computational graphs)
Drawn samples from the parameterized exponential distribution.
"""
from ..numpy import _Symbol as np_symbol
tensor_type_name = np_symbol
if ctx is None:
ctx = current_context()
if size == ():
size = None
is_tensor = isinstance(scale, tensor_type_name)
if is_tensor:
return _npi.exponential(scale, scale=None, size=size,
ctx=ctx, out=out)
else:
return _npi.exponential(scale=scale, size=size, ctx=ctx, out=out)
def weibull(a, size=None, ctx=None, out=None):
r"""Draw samples from a 1-parameter Weibull distribution with given parameter a
via inversion.
Parameters
----------
a : float or array_like of floats
Shape of the distribution. Must be non-negative.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``a`` is a scalar. Otherwise,
``np.array(a).size`` samples are drawn.
Returns
-------
out : _Symbol
Drawn samples from the 1-parameter Weibull distribution.
Examples
--------
>>> np.random.weibull(a=5)
array(0.9553641)
>>> np.random.weibull(a=5, size=[2,3])
array([[1.0466299 , 1.1320982 , 0.98415005],
[1.1430776 , 0.9532727 , 1.1344457 ]])
>>> np.random.weibull(a=np.array([2,3])
array([0.98843634, 1.0125613 ])
The Weibull distribution is one of a class of Generalized Extreme
Value (GEV) distributions. This class includes the Gumbel and Frechet
distributions.
The probability density for the Weibull distribution is
f(x) = \frac{a}{\lambda}(\frac{x}{\lambda})^{a-1}e^{-(x/\lambda)^a},
where a is the shape and \lambda the scale. The generated 1-parameter Weibull
sample has the scale parameter \lambda = 1.
The Weibull distribution is commonly used in reliability engineering to
model time to failure, in modeling particle sizes, in information retrieval
to model dwell time on pages, in quantitative finance to model risk etc.
"""
from ..numpy import _Symbol as np_symbol
tensor_type_name = np_symbol
if ctx is None:
ctx = current_context()
if size == ():
size = None
is_tensor = isinstance(a, tensor_type_name)
if is_tensor:
return _npi.weibull(a, a=None, size=size, ctx=ctx, out=out)
else:
return _npi.weibull(a=a, size=size, ctx=ctx, out=out)
def pareto(a, size=None, ctx=None, out=None):
r"""Draw samples from a Pareto II or Lomax distribution with specified shape a.
Parameters
----------
a : float or array_like of floats
Shape of the distribution. Must be > 0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``a`` is a scalar. Otherwise,
``np.array(a).size`` samples are drawn.
Returns
-------
out : _Symbol
Drawn samples from the Pareto distribution.
Examples
--------
>>> np.random.pareto(a=5)
array(0.12749612)
>>> mx.numpy.random.pareto(a=5, size=[2,3])
array([[0.06933999, 0.0344373 , 0.10654891],
[0.0311172 , 0.12911797, 0.03370714]])
>>> np.random.pareto(a=np.array([2,3])
array([0.26636696, 0.15685666])
The probability density for the Pareto distribution is f(x) = \frac{am^a}{x^{a+1}}
where a is the shape and m the scale. Here m is assumed 1. The Pareto distribution
is a power law distribution. Pareto created it to describe the wealth in the economy.
"""
from ..numpy import _Symbol as np_symbol
tensor_type_name = np_symbol
if ctx is None:
ctx = current_context()
if size == ():
size = None
is_tensor = isinstance(a, tensor_type_name)
if is_tensor:
return _npi.pareto(a, a=None, size=size, ctx=ctx, out=out)
else:
return _npi.pareto(a=a, size=size, ctx=ctx, out=out)
def power(a, size=None, ctx=None, out=None):
r"""Draw samples in [0, 1] from a power distribution with given parameter a.
Parameters
----------
a : float or array_like of floats
Shape of the distribution. Must be > 0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``a`` is a scalar. Otherwise,
``np.array(a).size`` samples are drawn.
Returns
-------
out : _Symbol
Drawn samples from the power distribution.
Examples
--------
>>> np.random.power(a=5)
array(0.8602478)
>>> np.random.power(a=5, size=[2,3])
array([[0.988391 , 0.5153122 , 0.9383134 ],
[0.9078098 , 0.87819266, 0.730635]])
>>> np.random.power(a=np.array([2,3])
array([0.7499419 , 0.88894516])
The probability density function is f(x; a) = ax^{a-1}, 0 \le x \le 1, a>0.
The power distribution is just the inverse of the Pareto distribution and
a special case of the Beta distribution.
"""
from ..numpy import _Symbol as np_symbol
tensor_type_name = np_symbol
if ctx is None:
ctx = current_context()
if size == ():
size = None
is_tensor = isinstance(a, tensor_type_name)
if is_tensor:
return _npi.powerd(a, a=None, size=size, ctx=ctx, out=out)
else:
return _npi.powerd(a=a, size=size, ctx=ctx, out=out)
def multivariate_normal(mean, cov, size=None, check_valid=None, tol=None):
"""
multivariate_normal(mean, cov, size=None, check_valid=None, tol=None)
Draw random samples from a multivariate normal distribution.
The multivariate normal, multinormal or Gaussian distribution is a
generalization of the one-dimensional normal distribution to higher
dimensions. Such a distribution is specified by its mean and
covariance matrix. These parameters are analogous to the mean
(average or "center") and variance (standard deviation, or "width,"
squared) of the one-dimensional normal distribution.
This operator is a little different from the one in official NumPy.
The official NumPy operator only accepts 1-D ndarray as mean and 2-D ndarray as cov,
whereas the operator in MXNet np supports batch operation and auto-broadcasting.
Both `mean` and `cov` may have any number of leading dimensions, which correspond
to a batch shape. They are not necessarily assumed to have the same batch shape,
just ones which can be broadcasted.
Parameters
----------
mean : K-D _Symbol, of shape (..., N)
Mean of the N-dimensional distribution.
cov : (K+1)-D _Symbol, of shape (..., N, N)
Covariance matrix of the distribution. The last two dimensions must be symmetric and
positive-semidefinite for proper sampling.
size : int or tuple of ints, optional
Given a shape of, for example, ``(m,n,k)``,
``m*n*k`` identically distributed batchs of samples are
generated, and packed in an `m`-by-`n`-by-`k` arrangement.
If no shape is specified, a batch of (`N`-D) sample is returned.
check_valid : { 'warn', 'raise', 'ignore' }, optional
Behavior when the covariance matrix is not positive semidefinite.
(Not supported)
tol : float, optional
Tolerance when checking the singular values in covariance matrix.
cov is cast to double before the check.
(Not supported)
Returns
-------
out : _Symbol
The input shape of `mean` and `cov` should satisfy the requirements of broadcasting.
If the parameter `size` is not provided,
the output shape is ``np.broadcast(mean.shape, cov.shape[:-1])``.
Otherwise, the output shape is ``size + np.broadcast(mean.shape, cov.shape[:-1])``
Examples
--------
>>> mean = np.array([1, 2])
>>> cov = np.array([[1, 0], [0, 1]])
>>> x = np.random.multivariate_normal(mean, cov, (3, 3))
>>> x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> list((x[0,0,:] - mean) < 0.6)
[True, True] # random
# Performs autobroadcasting when the batch shape of
# `mean` and `cov` is different but compatible.
>>> mean = np.zeros((3,2)) # shape (3, 2)
>>> cov = np.array([[1, 0], [0, 100]]) # shape (2, 2)
>>> x = np.random.multivariate_normal(mean, cov)
>>> x
array([[-1.6115597 , -8.726251 ],
[ 2.2425299 , 2.8104177 ],
[ 0.36229908, -8.386591 ]])
"""
if check_valid is not None:
raise NotImplementedError('Parameter `check_valid` is not supported')
if tol is not None:
raise NotImplementedError('Parameter `tol` is not supported')
return _npi.mvn_fallback(mean, cov, size=size)
def shuffle(x):
"""
Modify a sequence in-place by shuffling its contents.
This function only shuffles the array along the first axis of a
multi-dimensional array. The order of sub-arrays is changed but
their contents remain the same.
Parameters
----------
x: _Symbol
The array or list to be shuffled.
Returns
-------
None
Examples
--------
>>> arr = np.arange(10)
>>> np.random.shuffle(arr)
>>> arr
array([5., 1., 0., 6., 7., 3., 9., 8., 4., 2.]) # random
Multi-dimensional arrays are only shuffled along the first axis:
>>> arr = np.arange(9).reshape((3, 3))
>>> np.random.shuffle(arr)
>>> arr
array([[6., 7., 8.], # random
[3., 4., 5.],
[0., 1., 2.]])
"""
_npi.shuffle(x, out=x)
|
szha/mxnet
|
python/mxnet/symbol/numpy/random.py
|
Python
|
apache-2.0
| 39,064
|
[
"Gaussian"
] |
f40fce0bb84be32e588f5aac98187ea060c1e6d03a581edba3feb2be8e3d0cb8
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import paddle
from ...fluid.framework import in_dygraph_mode, default_main_program
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.layers.tensor import fill_constant
from ...tensor import concat
from ...tensor.creation import zeros
from paddle.static import Variable
from ...fluid.layers import core
from ...fluid import dygraph_utils
# TODO: define the common functions to build a neural network
from ...fluid.layers import unfold # noqa: F401
from ...tensor.manipulation import squeeze
from ...tensor.manipulation import unsqueeze
from ...tensor import clip
from ...tensor import sum
from ...tensor import sqrt
from ...fluid.data_feeder import check_variable_and_dtype, check_dtype
from ...fluid.framework import in_dygraph_mode, _varbase_creator
from ...fluid.framework import in_dygraph_mode
from ...fluid import core, dygraph_utils
from ...fluid import core, layers
from ...fluid.data_feeder import check_variable_and_dtype
from paddle import _C_ops
__all__ = []
def interpolate(x,
size=None,
scale_factor=None,
mode='nearest',
align_corners=False,
align_mode=0,
data_format='NCHW',
name=None):
"""
This op resizes a batch of images.
The input must be a 3-D Tensor of the shape (num_batches, channels, in_w)
or 4-D (num_batches, channels, in_h, in_w), or a 5-D Tensor of the shape
(num_batches, channels, in_d, in_h, in_w) or (num_batches, in_d, in_h, in_w, channels),
Where in_w is width of the input tensor, in_h is the height of the input tensor,
in_d is the depth of the intput tensor.
and the resizing only applies on the three dimensions(depth, height and width).
Supporting resample methods:
'linear' : Linear interpolation
'bilinear' : Bilinear interpolation
'trilinear' : Trilinear interpolation
'nearest' : Nearest neighbor interpolation
'bicubic' : Bicubic interpolation
'area': Area interpolation
Linear interpolation is the method of using a line connecting two known quantities
to determine the value of an unknown quantity between the two known quantities.
Nearest neighbor interpolation is to perform nearest neighbor interpolation
in both the 3rd dimension(in height direction) and the 4th dimension(in width
direction) on input tensor.
Bilinear interpolation is an extension of linear interpolation for
interpolating functions of two variables (e.g. H-direction and
W-direction in this op) on a rectilinear 2D grid. The key idea is
to perform linear interpolation first in one direction, and then
again in the other direction.
Trilinear interpolation is an extension of linear interpolation for
interpolating functions of three variables (e.g. D-direction,
H-direction and W-direction in this op) on a rectilinear 3D grid.
The linear interpolation is performed on three directions.
align_corners and align_mode are optional parameters,the calculation method
of interpolation can be selected by them.
Bicubic interpolation is an extension of cubic interpolation for interpolating
data points on a two-dimensional regular grid. The interpolated surface is
smoother than corresponding surfaces obtained by bilinear interpolation or
nearest-neighbor interpolation.
Area interpolation is to perform area interpolation
in both the 3rd dimension(in height direction) , the 4th dimension(in width
direction) and the 5th dimension(in depth direction) on input tensor. Set to
area will directly call `paddle.nn.functional.adaptive_avg_pool1d` or
`paddle.nn.functional.adaptive_avg_pool2d` or `paddle.nn.functional.adaptive_avg_pool3d`.
Example:
.. code-block:: text
For scale_factor:
if align_corners = True && out_size > 1 :
scale_factor = (in_size-1.0)/(out_size-1.0)
else:
scale_factor = float(in_size/out_size)
Linear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,W_in)
output: (N,C,W_out) where:
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,W_in)
output: (N,C,W_out) where:
W_out = W_{in} * scale_{factor}
Nearest neighbor interpolation:
align_corners = False
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = floor (H_{in} * scale_{factor})
W_out = floor (W_{in} * scale_{factor})
Bilinear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = (H_{in}+0.5) * scale_{factor} - 0.5
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = H_{in} * scale_{factor}
W_out = W_{in} * scale_{factor}
Bicubic interpolation:
if:
align_corners = False
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = (H_{in}+0.5) * scale_{factor} - 0.5
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = H_{in} * scale_{factor}
W_out = W_{in} * scale_{factor}
Trilinear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,D_in,H_in,W_in)
output: (N,C,D_out,H_out,W_out) where:
D_out = (D_{in}+0.5) * scale_{factor} - 0.5
H_out = (H_{in}+0.5) * scale_{factor} - 0.5
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,D_in,H_in,W_in)
output: (N,C,D_out,H_out,W_out) where:
D_out = D_{in} * scale_{factor}
H_out = H_{in} * scale_{factor}
W_out = W_{in} * scale_{factor}
For details of linear interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Linear_interpolation.
For details of nearest neighbor interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation.
For details of bilinear interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Bilinear_interpolation.
For details of trilinear interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Trilinear_interpolation.
For details of bicubic interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Bicubic_interpolation
Parameters:
x (Tensor): 3-D, 4-D or 5-D Tensor, its data type is float32, float64, or uint8,
its data format is specified by :attr:`data_format`.
size (list|tuple|Tensor|None): Output shape of image resize
layer, the shape is (out_w, ) when input is a 3-D Tensor, the shape is (out_h, out_w)
when input is a 4-D Tensor and is (out_d, out_h, out_w) when input is a 5-D Tensor.
Default: None. If a list/tuple, each element can be an integer or a Tensor of shape: [1].
If a Tensor, its dimensions size should be a 1.
scale_factor (float|Tensor|list|tuple|None): The multiplier for the input height or width. At
least one of :attr:`size` or :attr:`scale_factor` must be set.
And :attr:`size` has a higher priority than :attr:`scale_factor`.Has to match input size if it is either a list or a tuple or a Tensor.
Default: None.
mode (str): The resample method. It supports 'linear', 'area', 'nearest', 'bilinear',
'bicubic' and 'trilinear' currently. Default: 'nearest'
align_corners(bool) : An optional bool, If True, the centers of the 4 corner pixels of the
input and output tensors are aligned, preserving the values at the
corner pixels.This only has an effect when 'linear', 'bilinear', 'bicubic' or 'trilinear'.
Default: False
align_mode(int) : An optional for linear/bilinear/trilinear interpolation. Refer to the formula in the example above,
it can be \'0\' for src_idx = scale_factor*(dst_indx+0.5)-0.5 , can be \'1\' for
src_idx = scale_factor*dst_index.
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from:`NCW`, `NWC`, `"NCHW"`, `"NHWC"`, `"NCDHW"`,
`"NDHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`. When it is `"NCHW"`, the data is stored
in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`.
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
A 3-D Tensor of the shape (num_batches, channels, out_w) or (num_batches, out_w, channels),
A 4-D Tensor of the shape (num_batches, channels, out_h, out_w) or (num_batches, out_h, out_w, channels),
or 5-D Tensor of the shape (num_batches, channels, out_d, out_h, out_w) or (num_batches, out_d, out_h, out_w, channels).
Raises:
TypeError: size should be a list or tuple or Tensor.
ValueError: The 'mode' of image_resize can only be 'linear', 'bilinear',
'trilinear', 'bicubic', 'area' or 'nearest' currently.
ValueError: 'linear' only support 3-D tensor.
ValueError: 'bilinear' and 'bicubic' only support 4-D tensor.
ValueError: 'nearest' only support 4-D or 5-D tensor.
ValueError: 'trilinear' only support 5-D tensor.
ValueError: One of size and scale_factor must not be None.
ValueError: size length should be 1 for input 3-D tensor.
ValueError: size length should be 2 for input 4-D tensor.
ValueError: size length should be 3 for input 5-D tensor.
ValueError: scale_factor should be greater than zero.
TypeError: align_corners should be a bool value
ValueError: align_mode can only be '0' or '1'
ValueError: data_format can only be 'NCW', 'NWC', 'NCHW', 'NHWC', 'NCDHW' or 'NDHWC'.
Examples:
.. code-block:: python
import paddle
import numpy as np
import paddle.nn.functional as F
# given out size
input_data = np.random.rand(2,3,6,10).astype("float32")
x = paddle.to_tensor(input_data)
output_1 = F.interpolate(x=x, size=[12,12])
print(output_1.shape)
# [2L, 3L, 12L, 12L]
# given scale
output_2 = F.interpolate(x=x, scale_factor=[2,1])
print(output_2.shape)
# [2L, 3L, 12L, 10L]
# bilinear interp
output_3 = F.interpolate(x=x, scale_factor=[2,1], mode="bilinear")
print(output_2.shape)
# [2L, 3L, 12L, 10L]
"""
data_format = data_format.upper()
resample = mode.upper()
resample_type = mode.lower()
resample_methods = [
'LINEAR',
'BILINEAR',
'TRILINEAR',
'NEAREST',
'BICUBIC',
'AREA',
]
if resample not in resample_methods:
raise ValueError(
"The 'resample' of image_resize can only be 'area', 'linear', 'bilinear', 'trilinear', "
" 'bicubic' or 'nearest' currently.")
if resample in ['LINEAR'] and len(x.shape) != 3:
raise ValueError("'linear' only support 3-D tensor.")
if resample in ['NEAREST'] and len(x.shape) != 4 and len(x.shape) != 5:
raise ValueError("'NEAREST' only support 4-D or 5-D tensor.")
if resample in ['BILINEAR', 'BICUBIC'] and len(x.shape) != 4:
raise ValueError("'bilinear' and 'bicubic' only support 4-D tensor.")
if resample == 'TRILINEAR' and len(x.shape) != 5:
raise ValueError("'trilinear'only support 5-D tensor.")
if size is None and scale_factor is None:
raise ValueError("One of size and scale_factor must not be None.")
if not isinstance(align_corners, bool):
raise TypeError("Attr align_corners should be a bool value")
if align_mode != 0 and align_mode != 1:
raise ValueError("align_mode can only be 0 or 1")
if align_corners != 0 and resample == 'NEAREST':
raise ValueError(
"align_corners option can only be set with the interpolating modes: linear | bilinear | bicubic | trilinear"
)
if resample == 'AREA':
if isinstance(size, list) or isinstance(size, tuple) or isinstance(
size, Variable):
if len(size) == 0:
raise ValueError("output size can not be empty")
if len(x.shape) == 3:
return paddle.nn.functional.adaptive_avg_pool1d(x, size)
elif len(x.shape) == 4:
return paddle.nn.functional.adaptive_avg_pool2d(x, size)
elif len(x.shape) == 5:
return paddle.nn.functional.adaptive_avg_pool3d(x, size)
helper = LayerHelper('{}_interp_v2'.format(resample_type), **locals())
dtype = helper.input_dtype(input_param_name='x')
if len(x.shape) == 3 and data_format not in ['NCW', 'NWC']:
raise ValueError(
"Got wrong value for param `data_format`: " + data_format +
" received but only `NCW` or `NWC` supported for 3-D input.")
elif len(x.shape) == 4 and data_format not in ['NCHW', 'NHWC']:
raise ValueError(
"Got wrong value for param `data_format`: " + data_format +
" received but only `NCHW` or `NHWC` supported for 4-D input.")
elif len(x.shape) == 5 and data_format not in ['NCDHW', 'NDHWC']:
raise ValueError(
"Got wrong value for param `data_format`: " + data_format +
" received but only `NCDHW` or `NDHWC` supported for 5-D input.")
def _is_list_or_turple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if data_format == 'NCHW' or data_format == 'NCDHW' or data_format == 'NCW':
data_layout = 'NCHW'
if data_format == 'NHWC' or data_format == 'NDHWC' or data_format == 'NWC':
data_layout = 'NHWC'
if resample == 'NEAREST':
align_corners = False
inputs = {"X": x}
attrs = {
"out_d": -1,
"out_h": -1,
"out_w": -1,
"interp_method": resample_type,
"align_corners": align_corners,
"align_mode": align_mode,
"data_layout": data_layout
}
out_shape = size
scale = scale_factor
if out_shape is not None and scale is not None:
raise ValueError("Only one of size or scale_factor should be defined.")
if out_shape is not None:
if isinstance(out_shape, Variable) and not in_dygraph_mode():
out_shape.stop_gradient = True
inputs['OutSize'] = out_shape
else:
if in_dygraph_mode():
if isinstance(out_shape, Variable):
out_shape = list(out_shape.numpy())
for i, dim in enumerate(out_shape):
if isinstance(dim, Variable):
out_shape[i] = dim.numpy()[0]
if not (_is_list_or_turple_(out_shape)):
raise TypeError("size should be a list or tuple or Variable.")
# Validate the shape
contain_var = False
for dim_idx, dim_size in enumerate(out_shape):
if isinstance(dim_size, Variable):
contain_var = True
continue
assert dim_size > 0, (
"Each dimension size given in out_shape must be greater than 0."
)
if contain_var:
new_size_tensor = []
size_list = []
for dim in out_shape:
if isinstance(dim, Variable):
dim.stop_gradient = True
new_size_tensor.append(dim)
size_list.append(-1)
else:
assert (isinstance(dim, int))
temp_out = helper.create_variable_for_type_inference(
'int32')
fill_constant(
[1], 'int32', dim, force_cpu=True, out=temp_out)
new_size_tensor.append(temp_out)
size_list.append(dim)
inputs['SizeTensor'] = new_size_tensor
if len(x.shape) == 3:
if len(out_shape) != 1:
raise ValueError(
"size length should be 2 for input 3-D tensor")
if contain_var:
attrs['out_w'] = size_list[0]
else:
out_shape = list(map(int, out_shape))
attrs['out_w'] = out_shape[0]
if len(x.shape) == 4:
if len(out_shape) != 2:
raise ValueError("size length should be 2 for "
"input 4-D tensor.")
if contain_var:
attrs['out_h'] = size_list[0]
attrs['out_w'] = size_list[1]
else:
out_shape = list(map(int, out_shape))
attrs['out_h'] = out_shape[0]
attrs['out_w'] = out_shape[1]
if len(x.shape) == 5:
if len(out_shape) != 3:
raise ValueError("size length should be 3 for "
"input 5-D tensor.")
if contain_var:
attrs['out_d'] = size_list[0]
attrs['out_h'] = size_list[1]
attrs['out_w'] = size_list[2]
else:
out_shape = list(map(int, out_shape))
attrs['out_d'] = out_shape[0]
attrs['out_h'] = out_shape[1]
attrs['out_w'] = out_shape[2]
else:
if in_dygraph_mode() and isinstance(scale, Variable):
scale = list(scale.numpy())
if isinstance(scale, Variable):
scale.stop_gradient = True
inputs["Scale"] = scale
elif isinstance(scale, float) or isinstance(scale, int):
if scale <= 0:
raise ValueError("Attr(scale) should be greater than zero.")
scale_list = []
for i in range(len(x.shape) - 2):
scale_list.append(scale)
attrs['scale'] = list(map(float, scale_list))
elif isinstance(scale, list) or isinstance(scale, tuple):
if len(scale) != len(x.shape) - 2:
raise ValueError("scale_shape length should be {} for "
"input {}-D tensor.".format(
len(x.shape) - 2, len(x.shape)))
for value in scale:
if value <= 0:
raise ValueError("Attr(scale) should be greater than zero.")
attrs['scale'] = list(map(float, scale))
else:
raise TypeError(
"Attr(scale)'s type should be float, int, list, tuple, or Tensor."
)
if in_dygraph_mode():
attr_list = []
for k, v in attrs.items():
attr_list.append(k)
attr_list.append(v)
dy_attr = tuple(attr_list)
if resample_type == "linear":
out = _C_ops.linear_interp_v2(x, *dy_attr)
elif resample_type == "bilinear":
out = _C_ops.bilinear_interp_v2(x, *dy_attr)
elif resample_type == "trilinear":
out = _C_ops.trilinear_interp_v2(x, *dy_attr)
elif resample_type == "nearest":
out = _C_ops.nearest_interp_v2(x, *dy_attr)
elif resample_type == "bicubic":
out = _C_ops.bicubic_interp_v2(x, *dy_attr)
return out
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='{}_interp_v2'.format(resample_type),
inputs=inputs,
outputs={"Out": out},
attrs=attrs)
return out
def upsample(x,
size=None,
scale_factor=None,
mode='nearest',
align_corners=False,
align_mode=0,
data_format='NCHW',
name=None):
"""
This op resizes a batch of images.
The input must be a 3-D Tensor of the shape (num_batches, channels, in_w)
or 4-D (num_batches, channels, in_h, in_w), or a 5-D Tensor of the shape
(num_batches, channels, in_d, in_h, in_w) or (num_batches, in_d, in_h, in_w, channels),
Where in_w is width of the input tensor, in_h is the height of the input tensor,
in_d is the depth of the intput tensor.
and the resizing only applies on the three dimensions(depth, height and width).
Supporting resample methods:
'linear' : Linear interpolation
'bilinear' : Bilinear interpolation
'trilinear' : Trilinear interpolation
'nearest' : Nearest neighbor interpolation
'bicubic' : Bicubic interpolation
Linear interpolation is the method of using a line connecting two known quantities
to determine the value of an unknown quantity between the two known quantities.
Nearest neighbor interpolation is to perform nearest neighbor interpolation
in both the 3rd dimension(in height direction) and the 4th dimension(in width
direction) on input tensor.
Bilinear interpolation is an extension of linear interpolation for
interpolating functions of two variables (e.g. H-direction and
W-direction in this op) on a rectilinear 2D grid. The key idea is
to perform linear interpolation first in one direction, and then
again in the other direction.
Bicubic interpolation is an extension of cubic interpolation for interpolating
data points on a two-dimensional regular grid. The interpolated surface is
smoother than corresponding surfaces obtained by bilinear interpolation or
nearest-neighbor interpolation.
Trilinear interpolation is an extension of linear interpolation for
interpolating functions of three variables (e.g. D-direction,
H-direction and W-direction in this op) on a rectilinear 3D grid.
The linear interpolation is performed on three directions.
align_corners and align_mode are optional parameters,the calculation method
of interpolation can be selected by them.
Area interpolation is to perform area interpolation
in both the 3rd dimension(in height direction) , the 4th dimension(in width
direction) and the 5th dimension(in depth direction) on input tensor. Set to
area will directly call `paddle.nn.functional.adaptive_avg_pool1d` or
`paddle.nn.functional.adaptive_avg_pool2d` or `paddle.nn.functional.adaptive_avg_pool3d`.
Example:
.. code-block:: text
For scale_factor:
if align_corners = True && out_size > 1 :
scale_factor = (in_size-1.0)/(out_size-1.0)
else:
scale_factor = float(in_size/out_size)
Linear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,W_in)
output: (N,C,W_out) where:
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,W_in)
output: (N,C,W_out) where:
W_out = W_{in} * scale_{factor}
Nearest neighbor interpolation:
if:
align_corners = False
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = floor (H_{in} * scale_{factor})
W_out = floor (W_{in} * scale_{factor})
else:
align_corners = True
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = round(H_{in} * scale_{factor})
W_out = round(W_{in} * scale_{factor})
Bilinear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = (H_{in}+0.5) * scale_{factor} - 0.5
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = H_{in} * scale_{factor}
W_out = W_{in} * scale_{factor}
Bicubic interpolation:
if:
align_corners = False
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = (H_{in}+0.5) * scale_{factor} - 0.5
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = H_{in} * scale_{factor}
W_out = W_{in} * scale_{factor}
Trilinear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,D_in,H_in,W_in)
output: (N,C,D_out,H_out,W_out) where:
D_out = (D_{in}+0.5) * scale_{factor} - 0.5
H_out = (H_{in}+0.5) * scale_{factor} - 0.5
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,D_in,H_in,W_in)
output: (N,C,D_out,H_out,W_out) where:
D_out = D_{in} * scale_{factor}
H_out = H_{in} * scale_{factor}
W_out = W_{in} * scale_{factor}
https://en.wikipedia.org/wiki/Linear_interpolation.
For details of linear interpolation, please refer to Wikipedia:
For details of nearest neighbor interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation.
For details of bilinear interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Bilinear_interpolation.
For details of bicubic interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Bicubic_interpolation
For details of trilinear interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Trilinear_interpolation.
Parameters:
x (Tensor): 3-D, 4-D or 5-D Tensor, its data type is float32, float64, or uint8,
its data format is specified by :attr:`data_format`.
size (list|tuple|Tensor|None): Output shape of image resize
layer, the shape is (out_w, ) when input is a 3-D Tensor, the shape is (out_h, out_w)
when input is a 4-D Tensor and is (out_d, out_h, out_w) when input is a 5-D Tensor.
Default: None. If a list/tuple, each element can be an integer or a Tensor of shape: [1].
If a Tensor , its dimensions size should be a 1.
scale_factor (float|Tensor|list|tuple|None): The multiplier for the input height or width. At
least one of :attr:`size` or :attr:`scale_factor` must be set.
And :attr:`size` has a higher priority than :attr:`scale_factor`.Has to match input size if
it is either a list or a tuple or a Tensor.
Default: None.
mode (str): The resample method. It supports 'linear', 'nearest', 'bilinear',
'bicubic' and 'trilinear' currently. Default: 'nearest'
align_corners(bool) : An optional bool, If True, the centers of the 4 corner pixels of the
input and output tensors are aligned, preserving the values at the
corner pixels.
Default: False
align_mode(int) : An optional for linear/bilinear/trilinear interpolation. Refer to the formula in the example above,
it can be \'0\' for src_idx = scale_factor*(dst_indx+0.5)-0.5 , can be \'1\' for
src_idx = scale_factor*dst_index.
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from:`NCW`, `NWC`, `"NCHW"`, `"NHWC"`, `"NCDHW"`,
`"NDHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`. When it is `"NCHW"`, the data is stored
in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`.
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
A 3-D Tensor of the shape (num_batches, channels, out_w) or (num_batches, out_w, channels),
A 4-D Tensor of the shape (num_batches, channels, out_h, out_w) or (num_batches, out_h, out_w, channels),
or 5-D Tensor of the shape (num_batches, channels, out_d, out_h, out_w) or (num_batches, out_d, out_h, out_w, channels).
Raises:
TypeError: size should be a list or tuple or Tensor.
ValueError: The 'mode' of image_resize can only be 'linear', 'bilinear',
'trilinear', 'bicubic', or 'nearest' currently.
ValueError: 'linear' only support 3-D tensor.
ValueError: 'bilinear', 'bicubic' and 'nearest' only support 4-D tensor.
ValueError: 'trilinear' only support 5-D tensor.
ValueError: One of size and scale_factor must not be None.
ValueError: size length should be 1 for input 3-D tensor.
ValueError: size length should be 2 for input 4-D tensor.
ValueError: size length should be 3 for input 5-D tensor.
ValueError: scale_factor should be greater than zero.
TypeError: align_corners should be a bool value
ValueError: align_mode can only be '0' or '1'
ValueError: data_format can only be 'NCW', 'NWC', 'NCHW', 'NHWC', 'NCDHW' or 'NDHWC'.
Examples:
.. code-block:: python
import paddle
import numpy as np
import paddle.nn.functional as F
input_data = np.random.rand(2,3,6,10).astype("float32")
input = paddle.to_tensor(input_data)
output = F.upsample(x=input, size=[12,12])
print(output.shape)
# [2L, 3L, 12L, 12L]
"""
return interpolate(x, size, scale_factor, mode, align_corners, align_mode,
data_format)
def bilinear(x1, x2, weight, bias=None, name=None):
"""
This layer performs bilinear on two inputs.
See :ref:`api_nn_Bilinear` for details and output shape.
Parameters:
x1 (Tensor): the first input tensor, it's data type should be float32, float64.
x2 (Tensor): the second input tensor, it's data type should be float32, float64.
weight (Parameter): The learnable weights of this layer, shape is [out_features, in1_features, in2_features].
bias (Parameter, optional): The learnable bias(Bias) of this layer, shape is [1, out_features]. If it is set to None, no bias will be added to the output units. The default value is None.
name (str, optional): The default value is None. Normally there is no need for user
to set this property. For more information, please refer to :ref:`api_guide_Name`. Default: None.
Returns:
Tensor: A 2-D Tensor of shape [batch_size, out_features].
Examples:
.. code-block:: python
import paddle
import numpy
import paddle.nn.functional as F
x1 = numpy.random.random((5, 5)).astype('float32')
x2 = numpy.random.random((5, 4)).astype('float32')
w = numpy.random.random((1000, 5, 4)).astype('float32')
b = numpy.random.random((1, 1000)).astype('float32')
result = F.bilinear(paddle.to_tensor(x1), paddle.to_tensor(x2), paddle.to_tensor(w), paddle.to_tensor(b)) # result shape [5, 1000]
"""
if in_dygraph_mode():
return _C_ops.bilinear_tensor_product(x1, x2, weight, bias)
check_variable_and_dtype(x1, 'x1', ['float32', 'float64'], 'bilinear')
check_variable_and_dtype(x2, 'x2', ['float32', 'float64'], 'bilinear')
inputs = {"X": x1, "Y": x2, "Weight": weight}
if bias is not None:
inputs["Bias"] = bias
helper = LayerHelper("bilinear", **locals())
out = helper.create_variable_for_type_inference(dtype=x1.dtype)
helper.append_op(
type="bilinear_tensor_product", inputs=inputs, outputs={"Out": out})
return out
def dropout(x,
p=0.5,
axis=None,
training=True,
mode="upscale_in_train",
name=None):
"""
Dropout is a regularization technique for reducing overfitting by preventing
neuron co-adaption during training. The dropout operator randomly sets the
outputs of some units to zero, while upscale others according to the given
dropout probability.
Args:
x (Tensor): The input tensor. The data type is float32 or float64.
p (float|int): Probability of setting units to zero. Default 0.5.
axis (int|list|tuple): The axis along which the dropout is performed. Default None.
training (bool): A flag indicating whether it is in train phrase or not. Default True.
mode(str): ['upscale_in_train'(default) | 'downscale_in_infer'].
1. upscale_in_train(default), upscale the output at training time
- train: out = input * mask / ( 1.0 - dropout_prob )
- inference: out = input
2. downscale_in_infer, downscale the output at inference
- train: out = input * mask
- inference: out = input * (1.0 - dropout_prob)
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor representing the dropout, has same shape and data type as `x` .
Examples:
We use ``p=0.5`` in the following description for simplicity.
1. When ``axis=None`` , this is commonly used dropout, which dropout each element of x randomly.
.. code-block:: text
Let's see a simple case when x is a 2d tensor with shape 2*3:
[[1 2 3]
[4 5 6]]
we generate mask with the same shape as x, which is 2*3. The value of mask is
sampled from a Bernoulli distribution randomly. For example, we may get such mask:
[[0 1 0]
[1 0 1]]
So the output is obtained from elementwise multiply of x and mask:
[[0 2 0]
[4 0 6]]
Using default setting, i.e. ``mode='upscale_in_train'`` ,
if in training phase, the final upscale output is:
[[0 4 0 ]
[8 0 12]]
if in test phase, the output is the same as input:
[[1 2 3]
[4 5 6]]
we can also set ``mode='downscale_in_infer'`` , then
if in training phase, the final output is:
[[0 2 0]
[4 0 6]]
if in test phase, the scale output is:
[[0.5 1. 1.5]
[2. 2.5 3. ]]
2. When ``axis!=None`` , this is useful for dropping whole channels from an image or sequence.
.. code-block:: text
Let's see the simple case when x is a 2d tensor with shape 2*3 again:
[[1 2 3]
[4 5 6]]
(1) If ``axis=0`` , this means the dropout is only performed in axis `0` .
we generate mask with the shape 2*1. Only in axis `0` the value is randomly selected.
For example, we may get such mask:
[[1]
[0]]
The output is obtained from elementwise multiply of x and mask. Doing that the mask will be
broadcast from 2*1 to 2*3:
[[1 1 1]
[0 0 0]]
and the result after elementwise multiply is:
[[1 2 3]
[0 0 0]]
then we can do upscale or downscale according to the setting of other arguments.
(2) If ``axis=1`` , this means the dropout is only performed in axis `1` .
we generate mask with the shape 1*3. Only in axis `1` the value is randomly selected.
For example, we may get such mask:
[[1 0 1]]
Doing elementwise multiply the mask will be broadcast from 1*3 to 2*3:
[[1 0 1]
[1 0 1]]
and the result after elementwise multiply is:
[[1 0 3]
[4 0 6]]
(3) What about ``axis=[0, 1]`` ? This means the dropout is performed in all axes of x,
which is the same case as default setting ``axis=None`` .
(4) You may note that logically `axis=None` means the dropout is performed in none axis of x,
We generate mask with the shape 1*1. Whole input is randomly selected or dropped.
For example, we may get such mask:
[[0]]
Doing elementwise multiply the mask will be broadcast from 1*1 to 2*3:
[[0 0 0]
[0 0 0]]
and the result after elementwise multiply is:
[[0 0 0]
[0 0 0]]
Actually this is not what we want because all elements may set to zero~
When x is a 4d tensor with shape `NCHW`, we can set ``axis=[0,1]`` and the dropout will be performed in channel `N` and `C`, `H` and `W` is tied, i.e. paddle.nn.dropout(x, p, axis=[0,1]) . Please refer to ``paddle.nn.functional.dropout2d`` for more details.
Similarly, when x is a 5d tensor with shape `NCDHW`, we can set ``axis=[0,1]`` to perform dropout3d. Please refer to ``paddle.nn.functional.dropout3d`` for more details.
.. code-block:: python
import paddle
import numpy as np
x = np.array([[1,2,3], [4,5,6]]).astype('float32')
x = paddle.to_tensor(x)
y_train = paddle.nn.functional.dropout(x, 0.5)
y_test = paddle.nn.functional.dropout(x, 0.5, training=False)
y_0 = paddle.nn.functional.dropout(x, axis=0)
y_1 = paddle.nn.functional.dropout(x, axis=1)
y_01 = paddle.nn.functional.dropout(x, axis=[0,1])
print(x)
print(y_train)
print(y_test)
print(y_0)
print(y_1)
print(y_01)
"""
# fast return for p == 0
if p == 0:
return x
if not isinstance(p, (float, int)):
raise TypeError("p argument should be a number")
if p < 0 or p > 1:
raise ValueError("p argument should between 0 and 1")
if mode not in ('downscale_in_infer', 'upscale_in_train'):
raise ValueError(
"mode argument should be 'downscale_in_infer' or 'upscale_in_train'")
if axis and not isinstance(axis, (int, list, tuple)):
raise TypeError("datatype of axis argument should be int or list")
if axis == None: # commonly used dropout
seed = None
mode = 'downgrade_in_infer' if mode == 'downscale_in_infer' else mode #semantic transfer
if in_dygraph_mode():
if default_main_program().random_seed != 0:
seed = default_main_program().random_seed
out, mask = _C_ops.dropout(
x, 'dropout_prob', p, 'is_test', not training, 'fix_seed',
seed is not None, 'seed', seed
if seed is not None else 0, 'dropout_implementation', mode)
return out
helper = LayerHelper('dropout', **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'dropout')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
mask = helper.create_variable_for_type_inference(
dtype=core.VarDesc.VarType.UINT8, stop_gradient=True)
def get_attrs(prog, dropout_prob, is_test, seed):
if (seed is None or seed == 0) and prog.random_seed != 0:
seed = prog.random_seed
attrs = {
'dropout_prob': dropout_prob,
'is_test': is_test,
'fix_seed': seed is not None,
'seed': seed if seed is not None else 0,
'dropout_implementation': mode,
}
return attrs
attrs = get_attrs(helper.main_program, p, not training, seed)
helper.append_op(
type='dropout',
inputs={'X': [x]},
outputs={'Out': [out],
'Mask': [mask]},
attrs=attrs)
return out
else: #sometimes called dropout_nd #TODO: optimize with c++
if not in_dygraph_mode():
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'dropout')
dtype = x.dtype
keep_prob = 1 - p
if training:
if p == 1.:
return paddle.scale(x, scale=0.)
scale_input = paddle.scale(
x, scale=1 / keep_prob) if mode == 'upscale_in_train' else x
#get mask shape
input_shape = x.shape
if not in_dygraph_mode():
input_shape_tensor = paddle.shape(x)
drop_axes = [axis] if isinstance(axis, int) else list(axis)
if min(drop_axes) < 0 or max(drop_axes) > len(input_shape) - 1:
raise ValueError("axis value should be greater than or equal to 0 and less than dimensions of x:{}, but get axis value:{} " \
.format(len(input_shape), max(drop_axes)))
if len(drop_axes) > len(input_shape):
raise ValueError(
"length of axis should not be greater than dimensions of x:{}, but get length of axis: {}".
format(len(input_shape), len(drop_axes)))
mask_shape = [1] * len(input_shape)
if not in_dygraph_mode():
for i in drop_axes:
mask_shape[i] = input_shape_tensor[i]
else:
for i in drop_axes:
mask_shape[i] = input_shape[i]
#get mask
random_tensor = paddle.uniform(
mask_shape, dtype='float32', min=0., max=1.0)
p = layers.fill_constant(shape=[1], dtype='float32', value=p)
keep_mask = paddle.greater_equal(random_tensor, p)
scale_input = paddle.cast(scale_input, dtype)
keep_mask = paddle.cast(keep_mask, dtype)
ret = paddle.multiply(scale_input, keep_mask, name=name)
return ret
else: # test
ret = paddle.scale(
x, scale=keep_prob) if mode == 'downscale_in_infer' else x
return ret
def dropout2d(x, p=0.5, training=True, data_format='NCHW', name=None):
"""
Randomly zero out entire channels (in the batched input 4d tensor with the shape `NCHW` ,
a channel is a 2D feature map with the shape `HW` ). Each channel will be zeroed out independently
on every forward call with probability `p` using samples from a Bernoulli distribution.
See ``paddle.nn.functional.dropout`` for more details.
Args:
x (Tensor): The input is 4-D Tensor with shape [N, C, H, W] or [N, H, W, C].
The data type is float32 or float64.
p (float): Probability of setting units to zero. Default 0.5.
training (bool): A flag indicating whether it is in train phrase or not. Default True.
data_format (str, optional): Specify the data format of the input, and the data format of the output will be consistent with that of the input. An optional string from `NCHW` or `NHWC` . The default is `NCHW` . When it is `NCHW` , the data is stored in the order of: [batch_size, input_channels, input_height, input_width].
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor representing the dropout2d, has same shape and data type as `x` .
Examples:
.. code-block:: python
import paddle
import numpy as np
x = np.random.random(size=(2, 3, 4, 5)).astype('float32')
x = paddle.to_tensor(x)
y_train = paddle.nn.functional.dropout2d(x) #train
y_test = paddle.nn.functional.dropout2d(x, training=False) #test
for i in range(2):
for j in range(3):
print(x.numpy()[i,j,:,:])
print(y_train.numpy()[i,j,:,:]) # may all 0
print(y_test.numpy()[i,j,:,:])
"""
input_shape = x.shape
if len(input_shape) != 4:
raise ValueError("dimensions of x should be 4, but received {} != 4"\
.format(len(input_shape)))
if data_format not in ["NCHW", "NHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCHW' or 'NHWC'. Received "
"Attr(data_format): %s." % str(data_format))
return dropout(
x,
p=p,
axis=[0, 1] if data_format == 'NCHW' else [0, 3],
training=training,
mode="upscale_in_train",
name=name)
def dropout3d(x, p=0.5, training=True, data_format='NCDHW', name=None):
"""
Randomly zero out entire channels (in the batched input 5d tensor with the shape `NCDHW` ,
a channel is a 3D feature map with the shape `DHW` ). Each channel will be zeroed out independently
on every forward call with probability `p` using samples from a Bernoulli distribution.
See ``paddle.nn.functional.dropout`` for more details.
Args:
x (Tensor): The input is 5-D Tensor with shape [N, C, D, H, W] or [N, D, H, W, C].
The data type is float32 or float64.
p (float): Probability of setting units to zero. Default 0.5.
training (bool): A flag indicating whether it is in train phrase or not. Default True.
data_format (str, optional): Specify the data format of the input, and the data format of the output will be consistent with that of the input. An optional string from ``NCDHW`` or ``NDHWC``. The default is ``NCDHW`` . When it is ``NCDHW`` , the data is stored in the order of: [batch_size, input_channels, input_depth, input_height, input_width].
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor representing the dropout3d, has same shape and data type with `x` .
Examples:
.. code-block:: python
import paddle
import numpy as np
x = np.random.random(size=(2, 3, 4, 5, 6)).astype('float32')
x = paddle.to_tensor(x)
y_train = paddle.nn.functional.dropout3d(x) #train
y_test = paddle.nn.functional.dropout3d(x, training=False) #test
print(x.numpy()[0,0,:,:,:])
print(y_train.numpy()[0,0,:,:,:]) # may all 0
print(y_test.numpy()[0,0,:,:,:])
"""
input_shape = x.shape
if len(input_shape) != 5:
raise ValueError("dimensions of x should be 5, but received {} != 5" \
.format(len(input_shape)))
if data_format not in ["NCDHW", "NDHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received "
"Attr(data_format): %s." % str(data_format))
return dropout(
x,
p=p,
axis=[0, 1] if data_format == 'NCDHW' else [0, 4],
training=training,
mode="upscale_in_train",
name=name)
def alpha_dropout(x, p=0.5, training=True, name=None):
"""
Alpha Dropout is a type of Dropout that maintains the self-normalizing property.
For an input with zero mean and unit standard deviation, the output of Alpha Dropout
maintains the original mean and standard deviation of the input.
Alpha Dropout fits well to SELU activate function by randomly setting activations to the negative saturation value.
Args:
x (Tensor): The input tensor. The data type is float32 or float64.
p (float | int): Probability of setting units to zero. Default 0.5.
training (bool): A flag indicating whether it is in train phrase or not. Default True.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor representing the dropout, has same shape and data type as `x`.
Examples:
.. code-block:: python
import paddle
import numpy as np
x = np.array([[-1, 1], [-1, 1]]).astype('float32')
x = paddle.to_tensor(x)
y_train = paddle.nn.functional.alpha_dropout(x, 0.5)
y_test = paddle.nn.functional.alpha_dropout(x, 0.5, training=False)
print(x)
print(y_train)
# [[-0.10721093, 1.6655989 ], [-0.7791938, -0.7791938]] (randomly)
print(y_test)
"""
if not isinstance(p, (float, int)):
raise TypeError("p argument should be a float or int")
if p < 0 or p > 1:
raise ValueError("p argument should between 0 and 1")
if not in_dygraph_mode():
check_variable_and_dtype(x, 'x', ['float32', 'float64'],
'alpha_dropout')
if training:
if p == 1:
return paddle.scale(x, scale=0.)
#get transformation params
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
alpha_p = -alpha * scale
a = ((1 - p) * (1 + p * alpha_p**2))**-0.5
b = -a * alpha_p * p
dtype = x.dtype
input_shape = x.shape
#get mask
random_tensor = paddle.uniform(
input_shape, dtype='float32', min=0., max=1.0)
p = layers.fill_constant(shape=[1], dtype='float32', value=p)
keep_mask = paddle.greater_equal(random_tensor, p)
keep_mask = paddle.cast(keep_mask, dtype)
drop_mask = paddle.subtract(
layers.fill_constant(
shape=input_shape, dtype=dtype, value=1.),
keep_mask)
#apply mask
b = layers.fill_constant(shape=[1], dtype=dtype, value=b)
y = paddle.add(paddle.multiply(x, keep_mask),
paddle.scale(
drop_mask, scale=alpha_p))
res = paddle.add(paddle.scale(y, scale=a), b, name=name)
return res
else: # test
return x
def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None):
"""
Pad tensor according to 'pad' and 'mode'.
If mode is 'constant' and length of pad is twice as length of x dimension,
then the padding will be started from the first dimension and moved back onto x
according to 'pad' and 'value'.
If mode is 'reflect', pad[0] and pad[1] must be no greater
than width-1. The height and depth dimension has the same condition.
Parameters:
x (Tensor): The input tensor with data type float32/double/int32/int64_t.
pad (Tensor | List[int] | Tuple[int]): The padding size with data type int.
If mode is 'constant' and length of pad is twice as length of x dimension, then x will
be padded from the first dimension to the last dimension.
Else: 1. If input dimension is 3, then the pad has the form (pad_left,
pad_right). 2. If the input dimension is 4, then the pad has the form (pad_left, pad_right,
pad_top, pad_bottom). 3. If the input dimension is 5, then the pad has the form
(pad_left, pad_right, pad_top, pad_bottom, pad_front, pad_back).
mode (str): Four modes: 'constant' (default), 'reflect', 'replicate', 'circular'.
When in 'constant' mode, this op uses a constant value to pad the input tensor.
When in 'reflect' mode, uses reflection of the input boundaries to pad the input tensor.
When in 'replicate' mode, uses input boundaries to pad the input tensor.
When in 'circular' mode, uses circular input to pad the input tensor.
Default is 'constant'
value (float32): The value to fill the padded areas in 'constant' mode . Default is 0.0
data_format (str): An string from: "NCL", "NLC", NHWC", "NCHW", "NCDHW", "NDHWC". Specify the data format of
the input data.
Default is "NCHW"
name (str, optional) : The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns: a Tensor padded according to pad and mode and data type is same as input.
Return Type: Tensor
Examples:
.. code-block:: text
x = [[[[[1., 2., 3.],
[4., 5., 6.]]]]]
Case 0:
pad = [0, 0, 0, 0, 0, 0, 1, 1, 0, 0],
mode = 'constant'
value = 0
Out = [[[[[0., 0., 0.],
[1., 2., 3.],
[4., 5., 6.],
[0., 0., 0.]]]]]
Case 1:
pad = [2, 2, 1, 1, 0, 0],
mode = 'constant'
value = 0
Out = [[[[[0. 0. 0. 0. 0. 0. 0.]
[0. 0. 1. 2. 3. 0. 0.]
[0. 0. 4. 5. 6. 0. 0.]
[0. 0. 0. 0. 0. 0. 0.]]]]]
Case 2:
pad = [2, 2, 1, 1, 0, 0],
mode = 'reflect'
Out = [[[[[6. 5. 4. 5. 6. 5. 4.]
[3. 2. 1. 2. 3. 2. 1.]
[6. 5. 4. 5. 6. 5. 4.]
[3. 2. 1. 2. 3. 2. 1.]]]]]
Case 3:
pad = [2, 2, 1, 1, 0, 0],
mode = 'replicate'
Out = [[[[[1. 1. 1. 2. 3. 3. 3.]
[1. 1. 1. 2. 3. 3. 3.]
[4. 4. 4. 5. 6. 6. 6.]
[4. 4. 4. 5. 6. 6. 6.]]]]]
Case 4:
pad = [2, 2, 1, 1, 0, 0],
mode = 'circular'
Out = [[[[[5. 6. 4. 5. 6. 4. 5.]
[2. 3. 1. 2. 3. 1. 2.]
[5. 6. 4. 5. 6. 4. 5.]
[2. 3. 1. 2. 3. 1. 2.]]]]]
Code Examples:
.. code-block:: python
import numpy as np
import paddle
import paddle.nn.functional as F
# example 1
x_shape = (1, 1, 3)
x = paddle.arange(np.prod(x_shape), dtype="float32").reshape(x_shape) + 1
y = F.pad(x, [0, 0, 0, 0, 2, 3], value=1, mode='constant', data_format="NCL")
print(y)
# [[[1. 1. 1. 2. 3. 1. 1. 1.]]]
# example 2
x_shape = (1, 1, 3)
x = paddle.arange(np.prod(x_shape), dtype="float32").reshape(x_shape) + 1
y = F.pad(x, [2, 3], value=1, mode='constant', data_format="NCL")
print(y)
# [[[1. 1. 1. 2. 3. 1. 1. 1.]]]
# example 3
x_shape = (1, 1, 2, 3)
x = paddle.arange(np.prod(x_shape), dtype="float32").reshape(x_shape) + 1
y = F.pad(x, [1, 2, 1, 1], value=1, mode='circular')
print(y)
# [[[[6. 4. 5. 6. 4. 5.]
# [3. 1. 2. 3. 1. 2.]
# [6. 4. 5. 6. 4. 5.]
# [3. 1. 2. 3. 1. 2.]]]]
"""
assert mode in ['reflect', 'replicate', 'constant', 'circular'], \
"mode should be one of constant, reflect, replicate, circular, but got {}.".format(mode)
data_format = data_format.upper()
assert data_format in ["NCL", "NCHW", "NCDHW", "NLC", "NHWC", "NDHWC"], \
"data_format should be in one of [NCL, NCHW, NCDHW, NLC, NHWC, NDHWC], " \
"but got {}".format(data_format)
x_dim = len(x.shape)
if mode == "constant" and isinstance(pad, (
list, tuple)) and len(pad) == x_dim * 2:
return layers.pad(x, pad, pad_value=value)
assert x_dim in [
3, 4, 5
], "input tesor dimension must be in [3, 4, 5] but got {}".format(x_dim)
supported_format_map = {
3: ["NCL", "NLC"],
4: ["NCHW", "NHWC"],
5: ["NCDHW", "NDHWC"],
}
assert data_format in supported_format_map[x_dim], \
"input tensor dimension is {}, it's data format should be in {} but got {}".format(
x_dim, supported_format_map[x_dim], data_format)
unsqueezed_dim = []
if isinstance(pad, Variable):
if data_format in ["NCL", "NCHW", "NCDHW"]:
data_format = "NCDHW"
if x_dim == 3:
pad = concat([zeros((4, ), dtype="int32"), pad], axis=0)
unsqueezed_dim = [3, 4]
x = unsqueeze(x, axis=unsqueezed_dim)
elif x_dim == 4:
pad = concat([pad, zeros((2, ), dtype="int32")], axis=0)
unsqueezed_dim = [2]
x = unsqueeze(x, axis=unsqueezed_dim)
elif data_format in ["NLC", "NHWC", "NDHWC"]:
data_format = "NDHWC"
if x_dim == 3:
pad = concat([zeros((4, ), dtype="int32"), pad], axis=0)
unsqueezed_dim = [2, 3]
x = unsqueeze(x, axis=unsqueezed_dim)
elif x_dim == 4:
pad = concat([pad, zeros((2, ), dtype="int32")], axis=0)
unsqueezed_dim = [1]
x = unsqueeze(x, axis=unsqueezed_dim)
else:
pad = list(pad)
if data_format in ["NCL", "NCHW", "NCDHW"]:
data_format = "NCDHW"
if x_dim == 3:
pad = [0, 0, 0, 0] + pad
unsqueezed_dim = [3, 4]
x = unsqueeze(x, axis=unsqueezed_dim)
elif x_dim == 4:
pad = pad + [0, 0]
unsqueezed_dim = [2]
x = unsqueeze(x, axis=unsqueezed_dim)
elif data_format in ["NLC", "NHWC", "NDHWC"]:
data_format = "NDHWC"
if x_dim == 3:
pad = [0, 0, 0, 0] + pad
unsqueezed_dim = [2, 3]
x = unsqueeze(x, axis=unsqueezed_dim)
elif x_dim == 4:
pad = pad + [0, 0]
unsqueezed_dim = [1]
x = unsqueeze(x, axis=unsqueezed_dim)
if in_dygraph_mode():
if isinstance(pad, Variable):
pad = pad.numpy()
out = _C_ops.pad3d(x, "paddings", pad, "mode", mode, "value", value,
"data_format", data_format, "name", name)
else:
attrs = {'mode': mode, 'value': value, 'data_format': data_format}
inputs = {'X': [x]}
if isinstance(pad, Variable):
inputs['Paddings'] = [pad]
attrs['paddings'] = []
else:
attrs['paddings'] = pad
helper = LayerHelper('pad3d', **locals())
dtype = helper.input_dtype(input_param_name='input')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='pad3d', inputs=inputs, outputs={"Out": out}, attrs=attrs)
if len(unsqueezed_dim) != 0:
out = squeeze(out, axis=unsqueezed_dim)
return out
def zeropad2d(x, padding, data_format="NCHW", name=None):
"""
Pads the input tensor boundaries with zero according to 'pad'.
Args:
x(Tensor): The input tensor with data type float16/float32/float64/int32/int64.
padding(int | Tensor | List[int] | Tuple[int]): The padding size with data type int.
The input dimension should be 4 and pad has the form (pad_left, pad_right,
pad_top, pad_bottom).
data_format(str): An string from: "NHWC", "NCHW". Specify the data format of
the input data. Default: "NCHW".
name(str, optional): The default value is None. Normally there is no need for user
to set this property.
Returns:Tensor,padded with 0 according to pad and data type is same as input.
Examples:
.. code-block:: python
import paddle
import numpy as np
import paddle.nn.functional as F
x_shape = (1, 1, 2, 3)
x = paddle.arange(np.prod(x_shape), dtype="float32").reshape(x_shape) + 1
y = F.zeropad2d(x, [1, 2, 1, 1])
# [[[[0. 0. 0. 0. 0. 0.]
# [0. 1. 2. 3. 0. 0.]
# [0. 4. 5. 6. 0. 0.]
# [0. 0. 0. 0. 0. 0.]]]]
"""
return pad(x,
pad=padding,
mode='constant',
value=0,
data_format=data_format,
name=name)
def cosine_similarity(x1, x2, axis=1, eps=1e-8):
"""
Compute cosine similarity between x1 and x2 along axis.
Parameters:
x1 (Tensor): First input. float32/double.
x2 (Tensor): Second input. float32/double.
axis (int): Dimension of vectors to compute cosine similarity. Default is 1.
eps(float): Small value to avoid division by zero. Default is 1e-8.
Returns: a Tensor representing cosine similarity between x1 and x2 along axis.
Return Type: Tensor
Examples:
.. code-block:: text
Case 0:
x1 = [[0.8024077 0.9927354 0.27238318 0.8344984 ]
[0.48949873 0.5797396 0.65444374 0.66510963]
[0.1031398 0.9614342 0.08365563 0.6796464 ]
[0.10760343 0.7461209 0.7726148 0.5801006 ]]
x2 = [[0.62913156 0.1536727 0.9847992 0.04591406]
[0.9098952 0.15715368 0.8671125 0.3156102 ]
[0.4427798 0.54136837 0.5276275 0.32394758]
[0.3769419 0.8535014 0.48041078 0.9256797 ]]
axis = 1
eps = 1e-8
Out: [0.5275037 0.8368967 0.75037485 0.9245899]
Code Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
import numpy as np
np.random.seed(0)
x1 = np.random.rand(2,3)
x2 = np.random.rand(2,3)
x1 = paddle.to_tensor(x1)
x2 = paddle.to_tensor(x2)
result = paddle.nn.functional.cosine_similarity(x1, x2, axis=0)
print(result)
# [0.99806249 0.9817672 0.94987036]
"""
w12 = sum(paddle.multiply(x1, x2), axis=axis)
w1 = sum(paddle.multiply(x1, x1), axis=axis)
w2 = sum(paddle.multiply(x2, x2), axis=axis)
n12 = sqrt(clip(w1 * w2, min=eps * eps))
cos_sim = w12 / n12
return cos_sim
def linear(x, weight, bias=None, name=None):
r"""
Fully-connected linear transformation operator. For each input :math:`X` ,
the equation is:
.. math::
Out = XW + b
where :math:`W` is the weight and :math:`b` is the bias.
If the weight is a 2-D tensor of shape :math:`[in\_features, out\_features]` ,
input should be a multi-dimensional tensor of shape
:math:`[batch\_size, *, in\_features]` , where :math:`*` means any number of
additional dimensions. The linear operator multiplies input tensor with
weight and produces an output tensor of shape :math:`[batch\_size, *, out\_features]` ,
If :math:`bias` is not None, the bias should be a 1-D tensor of shape
:math:`[out\_features]` and will be added to the output.
Parameters:
x (Tensor): Input tensor. The data type should be float16, float32 or float64.
weight (Tensor): Weight tensor. The data type should be float16, float32 or float64.
bias (Tensor, optional): Bias tensor. The data type should be float16, float32 or float64.
If it is set to None, no bias will be added to the output units.
name (str, optional): Normally there is no need for user to set this parameter.
For detailed information, please refer to :ref:`api_guide_Name` .
Returns:
Tensor, the shape is :math:`[batch\_size, *, out\_features]` and the
data type is the same with input :math:`x` .
Examples:
.. code-block:: python
import paddle
x = paddle.randn((3, 2), dtype="float32")
# x: [[-0.32342386 -1.200079 ]
# [ 0.7979031 -0.90978354]
# [ 0.40597573 1.8095392 ]]
weight = paddle.full(shape=[2, 4], fill_value="0.5", dtype="float32", name="weight")
# weight: [[0.5 0.5 0.5 0.5]
# [0.5 0.5 0.5 0.5]]
bias = paddle.ones(shape=[4], dtype="float32", name="bias")
# bias: [1. 1. 1. 1.]
y = paddle.nn.functional.linear(x, weight, bias)
# y: [[0.23824859 0.23824859 0.23824859 0.23824859]
# [0.9440598 0.9440598 0.9440598 0.9440598 ]
# [2.1077576 2.1077576 2.1077576 2.1077576 ]]
"""
if in_dygraph_mode():
pre_bias = _C_ops.matmul_v2(x, weight, 'trans_x', False, 'trans_y',
False)
if bias is None:
return pre_bias
return _C_ops.elementwise_add(pre_bias, bias)
else:
helper = LayerHelper('linear', **locals())
dtype = x.dtype
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'linear')
check_dtype(dtype, 'dtype', ['float16', 'float32', 'float64'], 'linear')
inputs = {'X': [x], 'Y': [weight]}
attrs = {'trans_x': False, 'trans_y': False}
tmp = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='matmul_v2', inputs=inputs, outputs={'Out': tmp}, attrs=attrs)
if bias is not None:
res = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='elementwise_add',
inputs={'X': [tmp],
'Y': [bias]},
outputs={'Out': [res]},
attrs={'axis': len(x.shape) - 1})
else:
res = tmp
return res
def label_smooth(label, prior_dist=None, epsilon=0.1, name=None):
r"""
Label smoothing is a mechanism to regularize the classifier layer and is called
label-smoothing regularization (LSR).
Label smoothing is proposed to encourage the model to be less confident,
since optimizing the log-likelihood of the correct label directly may
cause overfitting and reduce the ability of the model to adapt. Label
smoothing replaces the ground-truth label :math:`y` with the weighted sum
of itself and some fixed distribution :math:`\mu`. For class :math:`k`,
i.e.
.. math::
\\tilde{y_k} = (1 - \epsilon) * y_k + \epsilon * \mu_k,
where :math:`1 - \epsilon` and :math:`\epsilon` are the weights
respectively, and :math:`\\tilde{y}_k` is the smoothed label. Usually
uniform distribution is used for :math:`\mu`.
See more details about label smoothing in https://arxiv.org/abs/1512.00567.
Parameters:
label(Tensor): The input variable containing the label data. The
label data should use one-hot representation. It's
a multidimensional tensor with a shape of
:math:`[N_1, ..., Depth]`, where Depth is class number. The dtype can be "float32" and "float64".
prior_dist(Tensor, optional): The prior distribution to be used to smooth
labels. If not provided, an uniform distribution
is used. It's a multidimensional tensor with a shape of
:math:`[1, class\_num]` . The default value is None.
epsilon(float, optional): The weight used to mix up the original ground-truth
distribution and the fixed distribution. The default value is
0.1.
name(str, optional): The default value is None. Normally there is no need for user
to set this property. For more information, please refer to
:ref:`api_guide_Name`.
Returns:
Tensor: The tensor containing the smoothed labels.
Examples:
.. code-block:: python
import paddle
import numpy as np
x_data = np.array([[[0, 1, 0],
[ 1, 0, 1]]]).astype("float32")
print(x_data.shape)
paddle.disable_static()
x = paddle.to_tensor(x_data, stop_gradient=False)
output = paddle.nn.functional.label_smooth(x)
print(output)
#[[[0.03333334 0.93333334 0.03333334]
# [0.93333334 0.03333334 0.93333334]]]
"""
if epsilon > 1. or epsilon < 0.:
raise ValueError("The value of epsilon must be between 0 and 1.")
if in_dygraph_mode():
return _C_ops.label_smooth(label, prior_dist, 'epsilon', float(epsilon))
check_variable_and_dtype(label, 'label', ['float32', 'float64'],
'label_smooth')
helper = LayerHelper("label_smooth", **locals())
label.stop_gradient = True
smooth_label = helper.create_variable_for_type_inference(label.dtype)
helper.append_op(
type="label_smooth",
inputs={"X": label,
"PriorDist": prior_dist} if prior_dist else {"X": label},
outputs={"Out": smooth_label},
attrs={"epsilon": float(epsilon)})
return smooth_label
def class_center_sample(label, num_classes, num_samples, group=None):
"""
Class center sample method is proposed from the paper PartialFC that only sample a subset of the class centers.
The process of sampling subset class centers is straightforward:
1. First select the positive class centers;
2. Then randomly sample negative class centers.
Specifically, given a label tensor, shape [batch_size], select all the positive class centers and randomly
sample negative class centers, then remap the input label tensor using the sampled class centers.
For more information, Partial FC: Training 10 Million Identities on a Single Machine
arxiv: https://arxiv.org/abs/2010.05222
.. hint::
If the number of the positive class centers is greater than the input num_samples, it keeps all the positive
class centers and the shape of sampled_class_center will be [num_positive_class_centers].
The API supports CPU, single GPU and multi GPU.
Args:
label (Tensor): 1-D tensor with shape [N], each label in [0, num_classes)
num_classes (int): A positive integer to specify the number of classes at local rank.
Note that num_classes of each GPU can be different.
num_samples (int): A positive integer to specify the number of class center to sample.
group (Group, optional): The abstract representation of group.
See paddle.distributed.collective.Group. Default is ``None``.
Returns:
Tuple of two ``Tensor`` : (remapped_label, sampled_class_center), remapped label using sampled class center,
sampled class center from [0, num_classes).
Examples:
.. code-block:: python
:name: code-example1
# CPU or single GPU
import paddle
num_classes = 20
batch_size = 10
num_samples = 6
label = paddle.randint(low=0, high=num_classes, shape=[batch_size], dtype='int64')
remapped_label, sampled_class_index = paddle.nn.functional.class_center_sample(label, num_classes, num_samples)
print(label)
print(remapped_label)
print(sampled_class_index)
# the output is
#Tensor(shape=[10], dtype=int64, place=CPUPlace, stop_gradient=True,
# [11, 5 , 1 , 3 , 12, 2 , 15, 19, 18, 19])
#Tensor(shape=[10], dtype=int64, place=CPUPlace, stop_gradient=True,
# [4, 3, 0, 2, 5, 1, 6, 8, 7, 8])
#Tensor(shape=[9], dtype=int64, place=CPUPlace, stop_gradient=True,
# [1 , 2 , 3 , 5 , 11, 12, 15, 18, 19])
.. code-block:: python
:name: code-example2
# required: distributed
# Multi GPU, test_class_center_sample.py
import paddle
import paddle.distributed as dist
strategy = dist.fleet.DistributedStrategy()
dist.fleet.init(is_collective=True, strategy=strategy)
batch_size = 10
num_samples = 6
rank_id = dist.get_rank()
# num_classes of each GPU can be different, e.g num_classes_list = [10, 8]
num_classes_list = [10, 10]
num_classes = paddle.sum(paddle.to_tensor(num_classes_list))
label = paddle.randint(low=0, high=num_classes.item(), shape=[batch_size], dtype='int64')
label_list = []
dist.all_gather(label_list, label)
label = paddle.concat(label_list, axis=0)
remapped_label, sampled_class_index = paddle.nn.functional.class_center_sample(label, num_classes_list[rank_id], num_samples)
print(label)
print(remapped_label)
print(sampled_class_index)
#python -m paddle.distributed.launch --gpus=0,1 test_class_center_sample.py
# rank 0 output:
#Tensor(shape=[20], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
# [10, 17, 15, 11, 9 , 12, 18, 18, 17, 18, 19, 2 , 8 , 13, 11, 13, 9 , 10, 0 , 4 ])
#Tensor(shape=[20], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
# [6 , 11, 10, 7 , 4 , 8 , 12, 12, 11, 12, 13, 1 , 3 , 9 , 7 , 9 , 4 , 6 , 0 , 2 ])
#Tensor(shape=[6], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
# [0, 2, 4, 8, 9, 3])
# rank 1 output:
#Tensor(shape=[20], dtype=int64, place=CUDAPlace(1), stop_gradient=True,
# [10, 17, 15, 11, 9 , 12, 18, 18, 17, 18, 19, 2 , 8 , 13, 11, 13, 9 , 10, 0 , 4 ])
#Tensor(shape=[20], dtype=int64, place=CUDAPlace(1), stop_gradient=True,
# [6 , 11, 10, 7 , 4 , 8 , 12, 12, 11, 12, 13, 1 , 3 , 9 , 7 , 9 , 4 , 6 , 0 , 2 ])
#Tensor(shape=[7], dtype=int64, place=CUDAPlace(1), stop_gradient=True,
# [0, 1, 2, 3, 5, 7, 8])
"""
if group is not None and not group.is_member():
return
ring_id = 0 if group is None else group.id
rank = 0
nranks = 1
if core.is_compiled_with_dist():
parallel_env = paddle.distributed.ParallelEnv()
global_rank = parallel_env.rank
rank = global_rank if group is None else group.get_group_rank(
global_rank)
nranks = parallel_env.world_size if group is None else group.nranks
if num_samples > num_classes:
raise ValueError(
'Expected num_samples less than or equal to {}, got num_samples {}'.
format(num_classes, num_samples))
label_size = 1
for dim in list(label.shape):
label_size *= dim
if label_size != -1 and label_size < 1:
raise ValueError('Expected label_size > 0 \
(got label_size: {})'.format(label_size))
label_dims = len(list(label.shape))
if label_dims != 1:
raise ValueError('Expected label_dims == 1 \
(got label_dims: {})'.format(label_dims))
seed = None
if (seed is None or seed == 0) and default_main_program().random_seed != 0:
seed = default_main_program().random_seed
if in_dygraph_mode():
remapped_label, sampled_class_center = _C_ops.class_center_sample(
label, 'num_classes', num_classes, 'num_samples', num_samples,
'ring_id', ring_id, 'nranks', nranks, 'rank', rank, 'fix_seed',
seed is not None, 'seed', seed if seed is not None else 0)
return remapped_label, sampled_class_center
check_variable_and_dtype(label, 'label', ['int64', 'int32'],
'class_center_sample')
op_type = 'class_center_sample'
helper = LayerHelper(op_type, **locals())
remapped_label = helper.create_variable_for_type_inference(
dtype=label.dtype)
sampled_class_center = helper.create_variable_for_type_inference(
dtype=label.dtype)
helper.append_op(
type=op_type,
inputs={'Label': label},
outputs={
'RemappedLabel': remapped_label,
'SampledLocalClassCenter': sampled_class_center
},
attrs={
'num_classes': num_classes,
'num_samples': num_samples,
'ring_id': ring_id,
'nranks': nranks,
'rank': rank,
'fix_seed': seed is not None,
'seed': seed if seed is not None else 0
})
return remapped_label, sampled_class_center
def fold(x,
output_sizes,
kernel_sizes,
strides=1,
paddings=0,
dilations=1,
name=None):
r"""
This Op is used to combines an array of sliding local blocks into a large containing
tensor. also known as col2im when operated on batched 2D image tensor. Fold calculates each
combined value in the resulting large tensor by summing all values from all containing blocks.
For each input :math:`x` with shape [N, C_in , L], the output shape [N, C_out, H_out, W_out]
can be calculated as following.
.. math::
H_out &= output_size[0]
W_out &= output_size[1]
C_out &= C_in / kernel\_sizes[0] / kernel\_sizes[1]
Parameters:
x(Tensor): 3-D Tensor, input tensor of format [N, C, L],
data type can be float32 or float64
output_sizes(list): The size of output size, should be [output_size_h, output_size_w]
or an interger o treated as [o, o].
kernel_sizes(int|list): The size of convolution kernel, should be [k_h, k_w]
or an integer k treated as [k, k].
strides(int|list): The strides, should be [stride_h, stride_w]
or an integer stride treated as [sride, stride].
For default, strides will be [1, 1].
paddings(int|list): The paddings of each dimension, should be
[padding_top, padding_left, padding_bottom, padding_right]
or [padding_h, padding_w] or an integer padding.
If [padding_h, padding_w] was given, it will expanded to
[padding_h, padding_w, padding_h, padding_w]. If an integer
padding was given, [padding, padding, padding, padding] will
be used. For default, paddings will be [0, 0, 0, 0]
dilations(int|list): the dilations of convolution kernel, should be
[dilation_h, dilation_w], or an integer dilation treated as
[dilation, dilation]. For default, it will be [1, 1].
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
The tensor formed by combining a group of sliding local blocks
The output shape is [N, Cout, H, W] as decriabled above.
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = paddle.randn([2,12,9])
y = F.fold(x, output_sizes=(4, 4), kernel_sizes=2)
# y.shape = [2,3,4,4]
"""
helper = LayerHelper("fold", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'fold')
assert len(x.shape) == 3, \
"input should be the format of [N, C, L]"
if isinstance(output_sizes, int):
output_sizes = [output_sizes, output_sizes]
else:
assert isinstance(output_sizes, list) and (len(output_sizes) == 2), \
"output_sizes should either be an integer or a list of two integers"
if isinstance(kernel_sizes, int):
kernel_sizes = [kernel_sizes, kernel_sizes]
else:
assert isinstance(kernel_sizes, list) and (len(kernel_sizes) == 2), \
"kernel_sizes should either be an integer or a list of two integers"
if isinstance(strides, int):
strides = [strides, strides]
else:
assert isinstance(strides, list) and (len(strides) == 2), \
"strides should either be an integer or a list of two integers"
if isinstance(dilations, int):
dilations = [dilations, dilations]
else:
assert isinstance(dilations, list) and (len(dilations) == 2), \
"dilations should either be an integer or a list of two integers"
if isinstance(paddings, int):
paddings = [paddings] * 4
elif isinstance(paddings, list):
if len(paddings) == 2:
paddings = paddings * 2
elif len(paddings) == 4:
pass
else:
raise ValueError(
"paddings should either be an integer or a list of 2 or 4 integers"
)
else:
raise ValueError(
"Unexpected type of paddings, it should be either an integer or a list"
"of 2 or 4 integers")
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="fold",
inputs={"X": x},
outputs={"Y": out},
attrs={
"output_sizes": output_sizes,
"kernel_sizes": kernel_sizes,
"strides": strides,
"paddings": paddings,
"dilations": dilations
})
return out
|
luotao1/Paddle
|
python/paddle/nn/functional/common.py
|
Python
|
apache-2.0
| 83,880
|
[
"NEURON"
] |
d895c6ab3164f60ddb2314296bb3a09cb13772498a402e4b36c3602d54555975
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkOutlineFilter(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkOutlineFilter(), 'Processing.',
('vtkDataSet',), ('vtkPolyData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
chrisidefix/devide
|
modules/vtk_basic/vtkOutlineFilter.py
|
Python
|
bsd-3-clause
| 486
|
[
"VTK"
] |
4121a7e5c811ba0ad2311e867bc0192cfc2be401f239ac81e317635ecfc89b5e
|
#!/usr/bin/env python
#
# Tools for working with bowtie2-build
#
# http://bowtie-bio.sourceforge.net/bowtie2/
#
# (c) The James Hutton Institute 2016
# Author: Leighton Pritchard and Peter Thorpe
import subprocess
from collections import namedtuple
from .tools import is_exe, NotExecutableError
# factory class for bowtie build class returned values
# the order of the outfiles is defined in the build_command self._outfnames
# index - this is the index file generated.
# stderr
Results = namedtuple("Results", "command index stdout stderr")
class Bowtie2_BuildError(Exception):
"""Exception raised when bowtie2-build fails"""
def __init__(self, message):
self.message = message
class Bowtie2_Build(object):
"""Class for working with bowtie2-build"""
def __init__(self, exe_path):
"""Instantiate with location of executable"""
if not is_exe(exe_path):
msg = "{0} is not an executable".format(exe_path)
raise NotExecutableError(msg)
self._exe_path = exe_path
def run(self, infname, outstem, dry_run=False):
"""Construct and execute a bowtie2-build command-line"""
self.__build_cmd(infname, outstem)
if dry_run:
results = Results(self._cmd, self._outfname, None, None)
else:
pipe = subprocess.run(self._cmd, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True)
results = Results(self._cmd, self._outfname, pipe.stdout,
pipe.stderr)
return results
def __build_cmd(self, infname, outstem):
"""Build a command-line for bowtie2-build"""
self._outfname = outstem
cmd = ["bowtie2-build",
"--quiet",
"-f",
infname,
self._outfname]
self._cmd = ' '.join(cmd)
|
widdowquinn/THAPBI-pycits
|
pycits/bowtie2_build.py
|
Python
|
mit
| 2,017
|
[
"Bowtie"
] |
70b1248a575a8d040039cc5614cdbb2adc434a8d9155d3890f3a73182b37d63b
|
'''This example uses a convolutional stack followed by a recurrent stack
and a CTC logloss function to perform optical character recognition
of generated text images. I have no evidence of whether it actually
learns general shapes of text, or just is able to recognize all
the different fonts thrown at it...the purpose is more to demonstrate CTC
inside of Keras. Note that the font list may need to be updated
for the particular OS in use.
This starts off with 4 letter words. For the first 12 epochs, the
difficulty is gradually increased using the TextImageGenerator class
which is both a generator class for test/train data and a Keras
callback class. After 20 epochs, longer sequences are thrown at it
by recompiling the model to handle a wider image and rebuilding
the word list to include two words separated by a space.
The table below shows normalized edit distance values. Theano uses
a slightly different CTC implementation, hence the different results.
Norm. ED
Epoch | TF | TH
------------------------
10 0.027 0.064
15 0.038 0.035
20 0.043 0.045
25 0.014 0.019
This requires cairo and editdistance packages:
pip install cairocffi
pip install editdistance
Created by Mike Henry
https://github.com/mbhenry/
'''
import os
import itertools
import re
import datetime
import cairocffi as cairo
import editdistance
import numpy as np
from scipy import ndimage
import pylab
from keras import backend as K
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.layers import Input, Dense, Activation
from keras.layers import Reshape, Lambda
from keras.layers.merge import add, concatenate
from keras.models import Model
from keras.layers.recurrent import GRU
from keras.optimizers import SGD
from keras.utils.data_utils import get_file
from keras.preprocessing import image
import keras.callbacks
OUTPUT_DIR = 'image_ocr'
np.random.seed(55)
# this creates larger "blotches" of noise which look
# more realistic than just adding gaussian noise
# assumes greyscale with pixels ranging from 0 to 1
def speckle(img):
severity = np.random.uniform(0, 0.6)
blur = ndimage.gaussian_filter(np.random.randn(*img.shape) * severity, 1)
img_speck = (img + blur)
img_speck[img_speck > 1] = 1
img_speck[img_speck <= 0] = 0
return img_speck
# paints the string in a random location the bounding box
# also uses a random font, a slight random rotation,
# and a random amount of speckle noise
def paint_text(text, w, h, rotate=False, ud=False, multi_fonts=False):
surface = cairo.ImageSurface(cairo.FORMAT_RGB24, w, h)
with cairo.Context(surface) as context:
context.set_source_rgb(1, 1, 1) # White
context.paint()
# this font list works in Centos 7
if multi_fonts:
fonts = ['Century Schoolbook', 'Courier', 'STIX', 'URW Chancery L', 'FreeMono']
context.select_font_face(np.random.choice(fonts), cairo.FONT_SLANT_NORMAL,
np.random.choice([cairo.FONT_WEIGHT_BOLD, cairo.FONT_WEIGHT_NORMAL]))
else:
context.select_font_face('Courier', cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_BOLD)
context.set_font_size(25)
box = context.text_extents(text)
border_w_h = (4, 4)
if box[2] > (w - 2 * border_w_h[1]) or box[3] > (h - 2 * border_w_h[0]):
raise IOError('Could not fit string into image. Max char count is too large for given image width.')
# teach the RNN translational invariance by
# fitting text box randomly on canvas, with some room to rotate
max_shift_x = w - box[2] - border_w_h[0]
max_shift_y = h - box[3] - border_w_h[1]
top_left_x = np.random.randint(0, int(max_shift_x))
if ud:
top_left_y = np.random.randint(0, int(max_shift_y))
else:
top_left_y = h // 2
context.move_to(top_left_x - int(box[0]), top_left_y - int(box[1]))
context.set_source_rgb(0, 0, 0)
context.show_text(text)
buf = surface.get_data()
a = np.frombuffer(buf, np.uint8)
a.shape = (h, w, 4)
a = a[:, :, 0] # grab single channel
a = a.astype(np.float32) / 255
a = np.expand_dims(a, 0)
if rotate:
a = image.random_rotation(a, 3 * (w - top_left_x) / w + 1)
a = speckle(a)
return a
def shuffle_mats_or_lists(matrix_list, stop_ind=None):
ret = []
assert all([len(i) == len(matrix_list[0]) for i in matrix_list])
len_val = len(matrix_list[0])
if stop_ind is None:
stop_ind = len_val
assert stop_ind <= len_val
a = list(range(stop_ind))
np.random.shuffle(a)
a += list(range(stop_ind, len_val))
for mat in matrix_list:
if isinstance(mat, np.ndarray):
ret.append(mat[a])
elif isinstance(mat, list):
ret.append([mat[i] for i in a])
else:
raise TypeError('`shuffle_mats_or_lists` only supports '
'numpy.array and list objects.')
return ret
def text_to_labels(text, num_classes):
ret = []
for char in text:
if char >= 'a' and char <= 'z':
ret.append(ord(char) - ord('a'))
elif char == ' ':
ret.append(26)
return ret
# only a-z and space..probably not to difficult
# to expand to uppercase and symbols
def is_valid_str(in_str):
search = re.compile(r'[^a-z\ ]').search
return not bool(search(in_str))
# Uses generator functions to supply train/test with
# data. Image renderings are text are created on the fly
# each time with random perturbations
class TextImageGenerator(keras.callbacks.Callback):
def __init__(self, monogram_file, bigram_file, minibatch_size,
img_w, img_h, downsample_factor, val_split,
absolute_max_string_len=16):
self.minibatch_size = minibatch_size
self.img_w = img_w
self.img_h = img_h
self.monogram_file = monogram_file
self.bigram_file = bigram_file
self.downsample_factor = downsample_factor
self.val_split = val_split
self.blank_label = self.get_output_size() - 1
self.absolute_max_string_len = absolute_max_string_len
def get_output_size(self):
return 28
# num_words can be independent of the epoch size due to the use of generators
# as max_string_len grows, num_words can grow
def build_word_list(self, num_words, max_string_len=None, mono_fraction=0.5):
assert max_string_len <= self.absolute_max_string_len
assert num_words % self.minibatch_size == 0
assert (self.val_split * num_words) % self.minibatch_size == 0
self.num_words = num_words
self.string_list = [''] * self.num_words
tmp_string_list = []
self.max_string_len = max_string_len
self.Y_data = np.ones([self.num_words, self.absolute_max_string_len]) * -1
self.X_text = []
self.Y_len = [0] * self.num_words
# monogram file is sorted by frequency in english speech
with open(self.monogram_file, 'rt') as f:
for line in f:
if len(tmp_string_list) == int(self.num_words * mono_fraction):
break
word = line.rstrip()
if max_string_len == -1 or max_string_len is None or len(word) <= max_string_len:
tmp_string_list.append(word)
# bigram file contains common word pairings in english speech
with open(self.bigram_file, 'rt') as f:
lines = f.readlines()
for line in lines:
if len(tmp_string_list) == self.num_words:
break
columns = line.lower().split()
word = columns[0] + ' ' + columns[1]
if is_valid_str(word) and \
(max_string_len == -1 or max_string_len is None or len(word) <= max_string_len):
tmp_string_list.append(word)
if len(tmp_string_list) != self.num_words:
raise IOError('Could not pull enough words from supplied monogram and bigram files. ')
# interlace to mix up the easy and hard words
self.string_list[::2] = tmp_string_list[:self.num_words // 2]
self.string_list[1::2] = tmp_string_list[self.num_words // 2:]
for i, word in enumerate(self.string_list):
self.Y_len[i] = len(word)
self.Y_data[i, 0:len(word)] = text_to_labels(word, self.get_output_size())
self.X_text.append(word)
self.Y_len = np.expand_dims(np.array(self.Y_len), 1)
self.cur_val_index = self.val_split
self.cur_train_index = 0
# each time an image is requested from train/val/test, a new random
# painting of the text is performed
def get_batch(self, index, size, train):
# width and height are backwards from typical Keras convention
# because width is the time dimension when it gets fed into the RNN
if K.image_data_format() == 'channels_first':
X_data = np.ones([size, 1, self.img_w, self.img_h])
else:
X_data = np.ones([size, self.img_w, self.img_h, 1])
labels = np.ones([size, self.absolute_max_string_len])
input_length = np.zeros([size, 1])
label_length = np.zeros([size, 1])
source_str = []
for i in range(0, size):
# Mix in some blank inputs. This seems to be important for
# achieving translational invariance
if train and i > size - 4:
if K.image_data_format() == 'channels_first':
X_data[i, 0, 0:self.img_w, :] = self.paint_func('')[0, :, :].T
else:
X_data[i, 0:self.img_w, :, 0] = self.paint_func('',)[0, :, :].T
labels[i, 0] = self.blank_label
input_length[i] = self.img_w // self.downsample_factor - 2
label_length[i] = 1
source_str.append('')
else:
if K.image_data_format() == 'channels_first':
X_data[i, 0, 0:self.img_w, :] = self.paint_func(self.X_text[index + i])[0, :, :].T
else:
X_data[i, 0:self.img_w, :, 0] = self.paint_func(self.X_text[index + i])[0, :, :].T
labels[i, :] = self.Y_data[index + i]
input_length[i] = self.img_w // self.downsample_factor - 2
label_length[i] = self.Y_len[index + i]
source_str.append(self.X_text[index + i])
inputs = {'the_input': X_data,
'the_labels': labels,
'input_length': input_length,
'label_length': label_length,
'source_str': source_str # used for visualization only
}
outputs = {'ctc': np.zeros([size])} # dummy data for dummy loss function
return (inputs, outputs)
def next_train(self):
while 1:
ret = self.get_batch(self.cur_train_index, self.minibatch_size, train=True)
self.cur_train_index += self.minibatch_size
if self.cur_train_index >= self.val_split:
self.cur_train_index = self.cur_train_index % 32
(self.X_text, self.Y_data, self.Y_len) = shuffle_mats_or_lists(
[self.X_text, self.Y_data, self.Y_len], self.val_split)
yield ret
def next_val(self):
while 1:
ret = self.get_batch(self.cur_val_index, self.minibatch_size, train=False)
self.cur_val_index += self.minibatch_size
if self.cur_val_index >= self.num_words:
self.cur_val_index = self.val_split + self.cur_val_index % 32
yield ret
def on_train_begin(self, logs={}):
self.build_word_list(16000, 4, 1)
self.paint_func = lambda text: paint_text(text, self.img_w, self.img_h,
rotate=False, ud=False, multi_fonts=False)
def on_epoch_begin(self, epoch, logs={}):
# rebind the paint function to implement curriculum learning
if epoch >= 3 and epoch < 6:
self.paint_func = lambda text: paint_text(text, self.img_w, self.img_h,
rotate=False, ud=True, multi_fonts=False)
elif epoch >= 6 and epoch < 9:
self.paint_func = lambda text: paint_text(text, self.img_w, self.img_h,
rotate=False, ud=True, multi_fonts=True)
elif epoch >= 9:
self.paint_func = lambda text: paint_text(text, self.img_w, self.img_h,
rotate=True, ud=True, multi_fonts=True)
if epoch >= 21 and self.max_string_len < 12:
self.build_word_list(32000, 12, 0.5)
# the actual loss calc occurs here despite it not being
# an internal Keras loss function
def ctc_lambda_func(args):
y_pred, labels, input_length, label_length = args
# the 2 is critical here since the first couple outputs of the RNN
# tend to be garbage:
y_pred = y_pred[:, 2:, :]
return K.ctc_batch_cost(labels, y_pred, input_length, label_length)
# For a real OCR application, this should be beam search with a dictionary
# and language model. For this example, best path is sufficient.
def decode_batch(test_func, word_batch):
out = test_func([word_batch])[0]
ret = []
for j in range(out.shape[0]):
out_best = list(np.argmax(out[j, 2:], 1))
out_best = [k for k, g in itertools.groupby(out_best)]
# 26 is space, 27 is CTC blank char
outstr = ''
for c in out_best:
if c >= 0 and c < 26:
outstr += chr(c + ord('a'))
elif c == 26:
outstr += ' '
ret.append(outstr)
return ret
class VizCallback(keras.callbacks.Callback):
def __init__(self, run_name, test_func, text_img_gen, num_display_words=6):
self.test_func = test_func
self.output_dir = os.path.join(
OUTPUT_DIR, run_name)
self.text_img_gen = text_img_gen
self.num_display_words = num_display_words
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
def show_edit_distance(self, num):
num_left = num
mean_norm_ed = 0.0
mean_ed = 0.0
while num_left > 0:
word_batch = next(self.text_img_gen)[0]
num_proc = min(word_batch['the_input'].shape[0], num_left)
decoded_res = decode_batch(self.test_func, word_batch['the_input'][0:num_proc])
for j in range(0, num_proc):
edit_dist = editdistance.eval(decoded_res[j], word_batch['source_str'][j])
mean_ed += float(edit_dist)
mean_norm_ed += float(edit_dist) / len(word_batch['source_str'][j])
num_left -= num_proc
mean_norm_ed = mean_norm_ed / num
mean_ed = mean_ed / num
print('\nOut of %d samples: Mean edit distance: %.3f Mean normalized edit distance: %0.3f'
% (num, mean_ed, mean_norm_ed))
def on_epoch_end(self, epoch, logs={}):
self.model.save_weights(os.path.join(self.output_dir, 'weights%02d.h5' % (epoch)))
self.show_edit_distance(256)
word_batch = next(self.text_img_gen)[0]
res = decode_batch(self.test_func, word_batch['the_input'][0:self.num_display_words])
if word_batch['the_input'][0].shape[0] < 256:
cols = 2
else:
cols = 1
for i in range(self.num_display_words):
pylab.subplot(self.num_display_words // cols, cols, i + 1)
if K.image_data_format() == 'channels_first':
the_input = word_batch['the_input'][i, 0, :, :]
else:
the_input = word_batch['the_input'][i, :, :, 0]
pylab.imshow(the_input.T, cmap='Greys_r')
pylab.xlabel('Truth = \'%s\'\nDecoded = \'%s\'' % (word_batch['source_str'][i], res[i]))
fig = pylab.gcf()
fig.set_size_inches(10, 13)
pylab.savefig(os.path.join(self.output_dir, 'e%02d.png' % (epoch)))
pylab.close()
def train(run_name, start_epoch, stop_epoch, img_w):
# Input Parameters
img_h = 64
words_per_epoch = 16000
val_split = 0.2
val_words = int(words_per_epoch * (val_split))
# Network parameters
conv_filters = 16
kernel_size = (3, 3)
pool_size = 2
time_dense_size = 32
rnn_size = 512
if K.image_data_format() == 'channels_first':
input_shape = (1, img_w, img_h)
else:
input_shape = (img_w, img_h, 1)
fdir = os.path.dirname(get_file('wordlists.tgz',
origin='http://www.mythic-ai.com/datasets/wordlists.tgz', untar=True))
img_gen = TextImageGenerator(monogram_file=os.path.join(fdir, 'wordlist_mono_clean.txt'),
bigram_file=os.path.join(fdir, 'wordlist_bi_clean.txt'),
minibatch_size=32,
img_w=img_w,
img_h=img_h,
downsample_factor=(pool_size ** 2),
val_split=words_per_epoch - val_words
)
act = 'relu'
input_data = Input(name='the_input', shape=input_shape, dtype='float32')
inner = Conv2D(conv_filters, kernel_size, padding='same',
activation=act, kernel_initializer='he_normal',
name='conv1')(input_data)
inner = MaxPooling2D(pool_size=(pool_size, pool_size), name='max1')(inner)
inner = Conv2D(conv_filters, kernel_size, padding='same',
activation=act, kernel_initializer='he_normal',
name='conv2')(inner)
inner = MaxPooling2D(pool_size=(pool_size, pool_size), name='max2')(inner)
conv_to_rnn_dims = (img_w // (pool_size ** 2), (img_h // (pool_size ** 2)) * conv_filters)
inner = Reshape(target_shape=conv_to_rnn_dims, name='reshape')(inner)
# cuts down input size going into RNN:
inner = Dense(time_dense_size, activation=act, name='dense1')(inner)
# Two layers of bidirecitonal GRUs
# GRU seems to work as well, if not better than LSTM:
gru_1 = GRU(rnn_size, return_sequences=True, kernel_initializer='he_normal', name='gru1')(inner)
gru_1b = GRU(rnn_size, return_sequences=True, go_backwards=True, kernel_initializer='he_normal', name='gru1_b')(inner)
gru1_merged = add([gru_1, gru_1b])
gru_2 = GRU(rnn_size, return_sequences=True, kernel_initializer='he_normal', name='gru2')(gru1_merged)
gru_2b = GRU(rnn_size, return_sequences=True, go_backwards=True, kernel_initializer='he_normal', name='gru2_b')(gru1_merged)
# transforms RNN output to character activations:
inner = Dense(img_gen.get_output_size(), kernel_initializer='he_normal',
name='dense2')(concatenate([gru_2, gru_2b]))
y_pred = Activation('softmax', name='softmax')(inner)
Model(inputs=input_data, outputs=y_pred).summary()
labels = Input(name='the_labels', shape=[img_gen.absolute_max_string_len], dtype='float32')
input_length = Input(name='input_length', shape=[1], dtype='int64')
label_length = Input(name='label_length', shape=[1], dtype='int64')
# Keras doesn't currently support loss funcs with extra parameters
# so CTC loss is implemented in a lambda layer
loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name='ctc')([y_pred, labels, input_length, label_length])
# clipnorm seems to speeds up convergence
sgd = SGD(lr=0.02, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=5)
model = Model(inputs=[input_data, labels, input_length, label_length], outputs=loss_out)
# the loss calc occurs elsewhere, so use a dummy lambda func for the loss
model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=sgd)
# save as JSON
json_string = model.to_json()
with open("image_ocr.json", "w") as of:
of.write(json_string)
if start_epoch > 0:
weight_file = os.path.join(OUTPUT_DIR, os.path.join(run_name, 'weights%02d.h5' % (start_epoch - 1)))
model.load_weights(weight_file)
# captures output of softmax so we can decode the output during visualization
test_func = K.function([input_data], [y_pred])
viz_cb = VizCallback(run_name, test_func, img_gen.next_val())
model.fit_generator(generator=img_gen.next_train(), steps_per_epoch=(words_per_epoch - val_words),
epochs=stop_epoch, validation_data=img_gen.next_val(), validation_steps=val_words,
callbacks=[viz_cb, img_gen], initial_epoch=start_epoch)
model.save_weights('image_ocr.h5')
if __name__ == '__main__':
run_name = datetime.datetime.now().strftime('%Y:%m:%d:%H:%M:%S')
train(run_name, 0, 20, 128)
# increase to wider images and start at epoch 20. The learned weights are reloaded
train(run_name, 20, 25, 512)
|
kitstar/DNNConvert
|
example/keras/image_ocr.py
|
Python
|
apache-2.0
| 21,106
|
[
"Gaussian"
] |
704ed9fd3b1ae9cfe265c11eee5ec764c299ab047df281450870d5377835b6ae
|
"""Oral Argument Audio Scraper for Eighth Circuit Court of Appeals
CourtID: ca8
Court Short Name: 8th Cir.
Author: Brian W. Carver
Date created: 2014-06-21
History:
- 2014-07-22: download_url fixed by mlr
"""
from datetime import datetime
from juriscraper.OralArgumentSite import OralArgumentSite
class Site(OralArgumentSite):
def __init__(self):
super(Site, self).__init__()
self.court_id = self.__module__
self.url = 'http://8cc-www.ca8.uscourts.gov/circ8rss.xml'
def _download(self, request_dict={}):
"""Go through the items and filter out ones that aren't complete.
"""
self.items = []
html_tree = super(Site, self)._download(request_dict=request_dict)
for item in html_tree.xpath('//item'):
case_name = item.xpath('./title/text()')[0].split(":", 1)[1]
if case_name.strip():
self.items.append(item)
# Set self.html to None so it can't be misused.
return None
def _get_download_urls(self):
return [item.xpath('//enclosure/@url')[0] for item in self.items]
def _get_case_names(self):
case_names = []
for txt in [item.xpath('./title/text()')[0] for item in self.items]:
case_name = txt.split(': ', 1)[1]
case_names.append(case_name)
return case_names
def _get_case_dates(self):
case_dates = []
for txt in [item.xpath('./description/text()')[0] for item in self.items]:
# I can't see it, but there's apparently whitespace or a newline
# at the end of these dates that has to be removed or we error out.
case_date = txt.split('about ', 1)[1].strip()
case_dates.append(datetime.strptime(case_date, '%m-%d-%Y').date())
return case_dates
def _get_docket_numbers(self):
docket_numbers = []
for txt in [item.xpath('./title/text()')[0] for item in self.items]:
docket_number = txt.split(': ', 1)[0]
docket_numbers.append(docket_number)
return docket_numbers
|
brianwc/juriscraper
|
oral_args/united_states/federal_appellate/ca8.py
|
Python
|
bsd-2-clause
| 2,081
|
[
"Brian"
] |
7fe67df0671bdda66f587f86fb09de7e6ae8e75ff2c7da81e56c6929e17f6905
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converter for slice operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.contrib.autograph.core import converter
from tensorflow.contrib.autograph.pyct import anno
from tensorflow.contrib.autograph.pyct import templates
class SliceTransformer(converter.Base):
"""Converts slicing operations to their TF counterpart.
Currently, relying on the default slice operator that Tensor uses is
insufficient, because TensorArray and tensor lists use dedicated index read
and write functions.
"""
def _process_single_assignment(self, target, value):
if not isinstance(target, gast.Subscript):
return None
template = """
target = ag__.set_item(target, key, item)
"""
return templates.replace(
template, target=target.value, key=target.slice, item=value)
def visit_Assign(self, node):
node = self.generic_visit(node)
# TODO(mdan): Support unpackings and multiple assignments.
if len(node.targets) != 1:
raise NotImplementedError('multiple assignment')
replacement = self._process_single_assignment(node.targets[0], node.value)
if replacement is not None:
return replacement
return node
def visit_Subscript(self, node):
node = self.generic_visit(node)
if not isinstance(node.slice, gast.Index):
# TODO(mdan): It might make more sense to wave them through.
raise NotImplementedError('non-index slice')
if not isinstance(node.ctx, gast.Load):
# Index writes are handled at a higher level, one at which the rvalue is
# also available.
return node
dtype = anno.getanno(
node.value,
'element_type',
default=templates.replace_as_expression('None'))
template = """
ag__.get_item(
target,
key,
opts=ag__.GetItemOpts(element_dtype=dtype))
"""
return templates.replace_as_expression(
template, target=node.value, key=node.slice, dtype=dtype)
def transform(node, ctx):
return SliceTransformer(ctx).visit(node)
|
drpngx/tensorflow
|
tensorflow/contrib/autograph/converters/slices.py
|
Python
|
apache-2.0
| 2,807
|
[
"VisIt"
] |
a52aa732ea8ddae0ee1224c0064f574435442324cf7d2fc532fd6c596ff989ab
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# -*- coding: iso-8859-1 -*-
"""This Python module initializes particles on the sites
of a simple cubic lattice. By setting perfect=False
the particle positions will be given random displacements
with a magnitude of one-tenth the lattice spacing."""
def createCubic(N, rho, perfect=True, RNG=None):
if RNG == None:
import random
cubes = []
for i in range(100):
cubes.append(i**3)
if(cubes.count(N) != 1):
print '\nWARNING: num_particles is not a perfect cube. Initial'
print ' configuration may be inhomogeneous.\n'
L = (N / rho)**(1.0/3.0)
a = int(N**(1.0/3.0))
if(a**3 < N):
a = a + 1
lattice_spacing = L / a
def rnd(magn_):
if RNG == None:
rand = random.random()
else :
rand = RNG()
return magn_ * (2.0 * rand - 1.0)
# magnitude of random displacements
magn = 0.0 if perfect else lattice_spacing / 10.0
ct = 0
x = []
y = []
z = []
for i in range(a):
for j in range(a):
for k in range(a):
if(ct < N):
x.append(0.5 * lattice_spacing + i * lattice_spacing + rnd(magn))
y.append(0.5 * lattice_spacing + j * lattice_spacing + rnd(magn))
z.append(0.5 * lattice_spacing + k * lattice_spacing + rnd(magn))
ct += 1
return x, y, z, L, L, L
# TODO implement checking for a wrong number of particles, lightly nonideal lattice etc.
def createDiamond(N, rho, perfect=True, RNG=None):
from espresso import Real3D
#L = (N / 8.0 / rho)**(1.0/3.0)
L = (N / rho)**(1.0/3.0)
num_per_edge = int( (N/8.0)**(1.0/3.0) )
if(8.0*num_per_edge**3 < N):
num_per_edge = num_per_edge + 1
#print 'num_per_site= ', num_per_edge
a = L / num_per_edge
#print 'a= ', a
#print 'a1= ', (1.0 / rho)**(1.0/3.0)
pos = []
# in general structure is shifted relative to (0,0,0)
R0 = Real3D(0.125 * a, 0.125 * a, 0.125 * a)
R1 = Real3D(0.25 * a, 0.25 * a, 0.25 * a)
a11 = a * Real3D(1,0,0)
a22 = a * Real3D(0,1,0)
a33 = a * Real3D(0,0,1)
a1 = 0.5 * a * Real3D(0,1,1)
a2 = 0.5 * a * Real3D(1,0,1)
a3 = 0.5 * a * Real3D(1,1,0)
for i in range(num_per_edge):
for j in range(num_per_edge):
for k in range(num_per_edge):
Rijk = R0 + i*a11 + j*a22 + k*a33
pos.append(Rijk)
pos.append(Rijk+a1)
pos.append(Rijk+a2)
pos.append(Rijk+a3)
pos.append(Rijk+R1)
pos.append(Rijk+a1+R1)
pos.append(Rijk+a2+R1)
pos.append(Rijk+a3+R1)
'''
L1 = L-0.01
pos.append( Real3D(0.01, 0.01, 0.01) )
pos.append( Real3D(L1, 0.01, 0.01) )
pos.append( Real3D(0.01, L1, 0.01) )
pos.append( Real3D(0.01, 0.01, L1) )
pos.append( Real3D(0.01, L1, L1) )
pos.append( Real3D(L1, L1, 0.01) )
pos.append( Real3D(L1, 0.01, L1) )
pos.append( Real3D(L1, L1, L1) )
'''
return pos, L, L, L
|
BackupTheBerlios/espressopp
|
src/tools/init_cfg/lattice.py
|
Python
|
gpl-3.0
| 3,712
|
[
"ESPResSo"
] |
0a0ab7585d02de9b216d3f413119f6f51d2d321a1c87ccdf07aed48dd0b9b857
|
# Orca
#
# Copyright 2005-2008 Sun Microsystems Inc.
# Copyright 2011 Igalia, S.L.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Provides support for handling input events."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2005-2008 Sun Microsystems Inc." \
"Copyright (c) 2011 Igalia, S.L."
__license__ = "LGPL"
import pyatspi
import time
import unicodedata
from . import debug
from . import keybindings
from . import keynames
from . import messages
from . import orca_state
from . import settings
KEYBOARD_EVENT = "keyboard"
BRAILLE_EVENT = "braille"
MOUSE_BUTTON_EVENT = "mouse:button"
class InputEvent:
_clickCount = 0
def __init__(self, eventType):
"""Creates a new input event of the given type.
Arguments:
- eventType: one of KEYBOARD_EVENT, BRAILLE_EVENT, MOUSE_BUTTON_EVENT
"""
self.type = eventType
def getClickCount(self):
"""Return the count of the number of clicks a user has made."""
# TODO - JD: I relocated this out of script.py, because it seems
# to belong there even less than here. Need to revisit how this
# functionality is used and where.
return InputEvent._clickCount
def setClickCount(self):
"""Sets the count of the number of clicks a user has made to one
of the non-modifier keys on the keyboard. Note that this looks at
the event_string (keysym) instead of hw_code (keycode) because
the Java platform gives us completely different keycodes for keys.
Arguments:
- inputEvent: the current input event.
"""
# TODO - JD: This setter for the getter I found in script.py was
# in orca.py. :-/ Again, this needs sorting out. But for now it
# is less out of place here.
lastInputEvent = orca_state.lastNonModifierKeyEvent
if self.type == pyatspi.KEY_RELEASED_EVENT:
return
if not isinstance(self, KeyboardEvent):
InputEvent._clickCount = 0
return
if not isinstance(lastInputEvent, KeyboardEvent):
InputEvent._clickCount = 1
return
if self.time - lastInputEvent.time < settings.doubleClickTimeout \
and lastInputEvent.event_string == self.event_string:
# Cap the possible number of clicks at 3.
if InputEvent._clickCount < 3:
InputEvent._clickCount += 1
return
InputEvent._clickCount = 1
class KeyboardEvent(InputEvent):
TYPE_UNKNOWN = "unknown"
TYPE_PRINTABLE = "printable"
TYPE_MODIFIER = "modifier"
TYPE_LOCKING = "locking"
TYPE_FUNCTION = "function"
TYPE_ACTION = "action"
TYPE_NAVIGATION = "navigation"
TYPE_DIACRITICAL = "diacritical"
def __init__(self, event):
"""Creates a new InputEvent of type KEYBOARD_EVENT.
Arguments:
- event: the AT-SPI keyboard event
"""
InputEvent.__init__(self, KEYBOARD_EVENT)
self.id = event.id
self.type = event.type
self.hw_code = event.hw_code
self.modifiers = event.modifiers
self.event_string = event.event_string
self.is_text = event.is_text
self.time = time.time()
self.timestamp = event.timestamp
# Add an empty field for the keyval_name because there are a number
# of places we might want to know this information, and we don't
# want to have to keep calculating it. The default calculation will
# take place in script.checkKeyboardEventData.
#
self.keyval_name = ""
# Call the specific toolkit method, to ensure that all fields
# are filled.
#
script = orca_state.activeScript
if script:
script.checkKeyboardEventData(self)
# Control characters come through as control characters, so we
# just turn them into their ASCII equivalent. NOTE that the
# upper case ASCII characters will be used (e.g., ctrl+a will
# be turned into the string "A"). All these checks here are
# to just do some sanity checking before doing the
# conversion. [[[WDW - this is making assumptions about
# mapping ASCII control characters to UTF-8.]]]
#
if (self.modifiers & keybindings.CTRL_MODIFIER_MASK) \
and (not self.is_text) and (len(self.event_string) == 1):
value = ord(self.event_string[0])
if value < 32:
self.event_string = chr(value + 0x40)
self.keyType = None
if self.isNavigationKey():
self.keyType = KeyboardEvent.TYPE_NAVIGATION
self.shouldEcho = settings.enableNavigationKeys
elif self.isActionKey():
self.keyType = KeyboardEvent.TYPE_ACTION
self.shouldEcho = settings.enableActionKeys
elif self.isModifierKey():
self.keyType = KeyboardEvent.TYPE_MODIFIER
self.shouldEcho = settings.enableModifierKeys
elif self.isFunctionKey():
self.keyType = KeyboardEvent.TYPE_FUNCTION
self.shouldEcho = settings.enableFunctionKeys
elif self.isDiacriticalKey():
self.keyType = KeyboardEvent.TYPE_DIACRITICAL
self.shouldEcho = settings.enableDiacriticalKeys
elif self.isLockingKey():
self.keyType = KeyboardEvent.TYPE_LOCKING
self.shouldEcho = settings.presentLockingKeys
if self.shouldEcho == None:
self.shouldEcho = not settings.onlySpeakDisplayedText
elif self.isPrintableKey():
self.keyType = KeyboardEvent.TYPE_PRINTABLE
self.shouldEcho = \
settings.enablePrintableKeys or settings.enableEchoByCharacter
else:
self.keyType = KeyboardEvent.TYPE_UNKNOWN
self.shouldEcho = False
if not self.isLockingKey():
self.shouldEcho = self.shouldEcho and settings.enableKeyEcho
def __eq__(self, other):
if not other:
return False
if self.type == other.type \
and self.hw_code == other.hw_code \
and self.timestamp == other.timestamp:
return True
return False
def toString(self):
return ("KEYBOARDEVENT: type=%d\n" % self.type) \
+ (" id=%d\n" % self.id) \
+ (" hw_code=%d\n" % self.hw_code) \
+ (" modifiers=%d\n" % self.modifiers) \
+ (" event_string=(%s)\n" % self.event_string) \
+ (" keyval_name=(%s)\n" % self.keyval_name) \
+ (" is_text=%s\n" % self.is_text) \
+ (" timestamp=%d\n" % self.timestamp) \
+ (" time=%f\n" % time.time()) \
+ (" keyType=%s\n" % self.keyType) \
+ (" shouldEcho=%s\n" % self.shouldEcho)
def isNavigationKey(self):
"""Return True if this is a navigation key."""
if self.keyType:
return self.keyType == KeyboardEvent.TYPE_NAVIGATION
return self.event_string in \
["Left", "Right", "Up", "Down", "Home", "End"]
def isActionKey(self):
"""Return True if this is an action key."""
if self.keyType:
return self.keyType == KeyboardEvent.TYPE_ACTION
return self.event_string in \
["Return", "Escape", "Tab", "BackSpace", "Delete",
"Page_Up", "Page_Down"]
def isDiacriticalKey(self):
"""Return True if this is a non-spacing diacritical key."""
if self.keyType:
return self.keyType == KeyboardEvent.TYPE_DIACRITICAL
return self.event_string.startswith("dead_")
def isFunctionKey(self):
"""Return True if this is a function key."""
if self.keyType:
return self.keyType == KeyboardEvent.TYPE_FUNCTION
return self.event_string in \
["F1", "F2", "F3", "F4", "F5", "F6",
"F7", "F8", "F9", "F10", "F11", "F12"]
def isLockingKey(self):
"""Return True if this is a locking key."""
if self.keyType:
return self.keyType in KeyboardEvent.TYPE_LOCKING
lockingKeys = ["Caps_Lock", "Num_Lock", "Scroll_Lock"]
if not self.event_string in lockingKeys:
return False
if not orca_state.bypassNextCommand:
return not self.event_string in settings.orcaModifierKeys
return True
def isModifierKey(self):
"""Return True if this is a modifier key."""
if self.keyType:
return self.keyType == KeyboardEvent.TYPE_MODIFIER
if self.isOrcaModifier():
return True
return self.event_string in \
['Alt_L', 'Alt_R', 'Control_L', 'Control_R',
'Shift_L', 'Shift_R', 'Meta_L', 'Meta_R',
'ISO_Level3_Shift']
def isOrcaModifier(self):
"""Return True if this is the Orca modifier key."""
if orca_state.bypassNextCommand:
return False
if self.event_string in settings.orcaModifierKeys:
return True
if self.keyval_name == "KP_0" \
and "KP_Insert" in settings.orcaModifierKeys \
and self.modifiers & keybindings.SHIFT_MODIFIER_MASK:
return True
return False
def isOrcaModified(self):
"""Return True if this key is Orca modified."""
if orca_state.bypassNextCommand:
return False
return self.modifiers & keybindings.ORCA_MODIFIER_MASK
def isPrintableKey(self):
"""Return True if this is a printable key."""
if self.keyType:
return self.keyType == KeyboardEvent.TYPE_PRINTABLE
if self.event_string in ["space", " "]:
return True
if not len(self.event_string) == 1:
return False
if self.event_string.isalnum() or self.event_string.isspace():
return True
return unicodedata.category(self.event_string)[0] in ('P', 'S')
def isPressedKey(self):
"""Returns True if the key is pressed"""
return self.type == pyatspi.KEY_PRESSED_EVENT
def isCharacterEchoable(self):
"""Returns True if the script will echo this event as part of
character echo. We do this to not double-echo a given printable
character."""
if not self.isPrintableKey():
return False
if orca_state.learnModeEnabled:
return False
script = orca_state.activeScript
return script and script.utilities.willEchoCharacter(self)
def getLockingState(self):
"""Returns True if the event locked a locking key, False if the
event unlocked a locking key, and None if we do not know or this
is not a locking key."""
if not self.isLockingKey():
return None
if self.event_string == "Caps_Lock":
mod = pyatspi.MODIFIER_SHIFTLOCK
elif self.event_string == "Num_Lock":
mod = pyatspi.MODIFIER_NUMLOCK
else:
return None
return not self.modifiers & (1 << mod)
def getLockingStateString(self):
"""Returns the string which reflects the locking state we wish to
include when presenting a locking key."""
locked = self.getLockingState()
if locked == None:
return ''
if not locked:
return messages.LOCKING_KEY_STATE_OFF
return messages.LOCKING_KEY_STATE_ON
def getKeyName(self):
"""Returns the string to be used for presenting the key to the user."""
return keynames.getKeyName(self.event_string)
class BrailleEvent(InputEvent):
def __init__(self, event):
"""Creates a new InputEvent of type BRAILLE_EVENT.
Arguments:
- event: the integer BrlTTY command for this event.
"""
InputEvent.__init__(self, BRAILLE_EVENT)
self.event = event
class MouseButtonEvent(InputEvent):
def __init__(self, event):
"""Creates a new InputEvent of type MOUSE_BUTTON_EVENT.
"""
InputEvent.__init__(self, MOUSE_BUTTON_EVENT)
self.x = event.detail1
self.y = event.detail2
self.pressed = event.type.endswith('p')
self.button = event.type[len("mouse:button:"):-1]
self.time = time.time()
class InputEventHandler:
def __init__(self, function, description, learnModeEnabled=True):
"""Creates a new InputEventHandler instance. All bindings
(e.g., key bindings and braille bindings) will be handled
by an instance of an InputEventHandler.
Arguments:
- function: the function to call with an InputEvent instance as its
sole argument. The function is expected to return True
if it consumes the event; otherwise it should return
False
- description: a localized string describing what this InputEvent
does
- learnModeEnabled: if True, the description will be spoken and
brailled if learn mode is enabled. If False,
the function will be called no matter what.
"""
self.function = function
self.description = description
self._learnModeEnabled = learnModeEnabled
def __eq__(self, other):
"""Compares one input handler to another."""
if not other:
return False
return (self.function == other.function)
def processInputEvent(self, script, inputEvent):
"""Processes an input event. If learnModeEnabled is True,
this will merely present the description of the input event via
If learnModeEnabled is False, this will call the function bound
to this InputEventHandler instance, passing the inputEvent as
the sole argument to the function.
This function is expected to return True if it consumes the
event; otherwise it is expected to return False.
Arguments:
- script: the script (if any) associated with this event
- inputEvent: the input event to pass to the function bound
to this InputEventHandler instance.
"""
consumed = False
if orca_state.learnModeEnabled and self._learnModeEnabled:
if self.description:
script.presentMessage(self.description)
consumed = True
else:
try:
consumed = self.function(script, inputEvent)
except:
debug.printException(debug.LEVEL_SEVERE)
return consumed
|
pvagner/orca
|
src/orca/input_event.py
|
Python
|
lgpl-2.1
| 15,640
|
[
"ORCA"
] |
6fcfa6af1d08b814fa09ec84bdc1b6fac1b684f484b749a5cf0080e8384b4d6b
|
#
# tsne.py
#
# Implementation of t-SNE in Python. The implementation was tested on Python 2.5.1, and it requires a working
# installation of NumPy. The implementation comes with an example on the MNIST dataset. In order to plot the
# results of this example, a working installation of matplotlib is required.
# The example can be run by executing: ipython tsne.py -pylab
#
#
# Created by Laurens van der Maaten on 20-12-08.
# Copyright (c) 2008 Tilburg University. All rights reserved.
import numpy as np
from pylab import scatter
def Hbeta(D = np.array([]), beta = 1.0):
"""Compute the perplexity and the P-row for a specific value of the precision of a Gaussian distribution."""
# Compute P-row and corresponding perplexity
P = np.exp(-D.copy() * beta);
sumP = sum(P);
H = np.log(sumP) + beta * np.sum(D * P) / sumP;
P = P / sumP;
return H, P;
def x2p(X = np.array([]), tol = 1e-5, perplexity = 30.0):
"""Performs a binary search to get P-values in such a way that each conditional Gaussian has the same perplexity."""
# Initialize some variables
print("Computing pairwise distances...")
(n, d) = X.shape;
sum_X = np.sum(np.square(X), 1);
D = np.add(np.add(-2 * np.dot(X, X.T), sum_X).T, sum_X);
P = np.zeros((n, n));
beta = np.ones((n, 1));
logU = np.log(perplexity);
# Loop over all datapoints
for i in range(n):
# Print progress
if i % 500 == 0:
print("Computing P-values for point ", i, " of ", n, "...")
# Compute the Gaussian kernel and entropy for the current precision
betamin = -np.inf;
betamax = np.inf;
Di = D[i, np.concatenate((np.r_[0:i], np.r_[i+1:n]))];
(H, thisP) = Hbeta(Di, beta[i]);
# Evaluate whether the perplexity is within tolerance
Hdiff = H - logU;
tries = 0;
while np.abs(Hdiff) > tol and tries < 50:
# If not, increase or decrease precision
if Hdiff > 0:
betamin = beta[i];
if betamax == np.inf or betamax == -np.inf:
beta[i] = beta[i] * 2;
else:
beta[i] = (beta[i] + betamax) / 2;
else:
betamax = beta[i];
if betamin == np.inf or betamin == -np.inf:
beta[i] = beta[i] / 2;
else:
beta[i] = (beta[i] + betamin) / 2;
# Recompute the values
(H, thisP) = Hbeta(Di, beta[i]);
Hdiff = H - logU;
tries = tries + 1;
# Set the final row of P
P[i, np.concatenate((np.r_[0:i], np.r_[i+1:n]))] = thisP;
# Return final P-matrix
print("Mean value of sigma: ", np.mean(np.sqrt(1 / beta)))
return P;
def pca(X = np.array([]), no_dims = 50):
"""Runs PCA on the NxD array X in order to reduce its dimensionality to no_dims dimensions."""
print("Preprocessing the data using PCA...")
(n, d) = X.shape;
X = X - np.tile(np.mean(X, 0), (n, 1));
(l, M) = np.linalg.eig(np.dot(X.T, X));
Y = np.dot(X, M[:, 0:no_dims]);
return Y;
def run_tsne(X = np.array([]), no_dims = 2, initial_dims = 50, perplexity = 30.0):
"""Runs t-SNE on the dataset in the NxD array X to reduce its dimensionality to no_dims dimensions.
The syntaxis of the function is Y = tsne.tsne(X, no_dims, perplexity), where X is an NxD NumPy array."""
# Check inputs
if X.dtype != "float64":
print("Error: array X should have type float64.");
return -1;
#if no_dims.__class__ != "<type 'int'>": # doesn't work yet!
# print "Error: number of dimensions should be an integer.";
# return -1;
# Initialize variables
X = pca(X, initial_dims);
(n, d) = X.shape;
max_iter = 1000;
initial_momentum = 0.5;
final_momentum = 0.8;
eta = 500;
min_gain = 0.01;
Y = np.random.randn(n, no_dims);
dY = np.zeros((n, no_dims));
iY = np.zeros((n, no_dims));
gains = np.ones((n, no_dims));
# Compute P-values
P = x2p(X, 1e-5, perplexity);
P = P + np.transpose(P);
P = P / np.sum(P);
P = P * 4; # early exaggeration
P = np.maximum(P, 1e-12);
# Run iterations
for iter in range(max_iter):
# Compute pairwise affinities
sum_Y = np.sum(np.square(Y), 1);
num = 1 / (1 + np.add(np.add(-2 * np.dot(Y, Y.T), sum_Y).T, sum_Y));
num[list(range(n)), list(range(n))] = 0;
Q = num / np.sum(num);
Q = np.maximum(Q, 1e-12);
# Compute gradient
PQ = P - Q;
for i in range(n):
dY[i,:] = np.sum(np.tile(PQ[:, i] * num[:, i], (no_dims, 1)).T * (Y[i,:] - Y), 0);
# Perform the update
if iter < 20:
momentum = initial_momentum
else:
momentum = final_momentum
gains = (gains + 0.2) * ((dY > 0) != (iY > 0)) + (gains * 0.8) * ((dY > 0) == (iY > 0));
gains[gains < min_gain] = min_gain;
iY = momentum * iY - eta * (gains * dY);
Y = Y + iY;
Y = Y - np.tile(np.mean(Y, 0), (n, 1));
# Compute current value of cost function
if (iter + 1) % 10 == 0:
C = np.sum(P * np.log(P / Q));
print("Iteration ", (iter + 1), ": error is ", C)
# Stop lying about P-values
if iter == 100:
P = P / 4;
# Return solution
return Y;
if __name__ == "__main__":
print("Run Y = tsne.tsne(X, no_dims, perplexity) to perform t-SNE on your dataset.")
print("Running example on 2,500 MNIST digits...")
X = np.loadtxt("mnist2500_X.txt");
labels = np.loadtxt("mnist2500_labels.txt");
Y = tsne(X, 2, 50, 20.0);
scatter(Y[:, 0], Y[:, 1], 20, labels);
|
agartland/utils
|
pytsne.py
|
Python
|
mit
| 5,162
|
[
"Gaussian"
] |
4fa097dc6b75e35ea97ac428f2b8f9ec290bdef7edcb94254dfebd378d14332c
|
""" JobLoggingDB class is a front-end to the Job Logging Database.
The following methods are provided
addLoggingRecord()
getJobLoggingInfo()
getWMSTimeStamps()
"""
import time
from types import StringTypes, IntType, LongType
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities import Time
from DIRAC.Core.Base.DB import DB
__RCSID__ = "$Id$"
# Here for debugging purpose; should be initialized by the containing component
gLogger.initialize( 'WMS', '/Databases/JobLoggingDB/Test' )
MAGIC_EPOC_NUMBER = 1270000000
#############################################################################
class JobLoggingDB( DB ):
def __init__( self ):
""" Standard Constructor
"""
DB.__init__( self, 'JobLoggingDB', 'WorkloadManagement/JobLoggingDB' )
self.gLogger = gLogger
#############################################################################
def addLoggingRecord( self,
jobID,
status = 'idem',
minor = 'idem',
application = 'idem',
date = '',
source = 'Unknown' ):
""" Add a new entry to the JobLoggingDB table. One, two or all the three status
components can be specified. Optionaly the time stamp of the status can
be provided in a form of a string in a format '%Y-%m-%d %H:%M:%S' or
as datetime.datetime object. If the time stamp is not provided the current
UTC time is used.
"""
event = 'status/minor/app=%s/%s/%s' % ( status, minor, application )
self.gLogger.info( "Adding record for job " + str( jobID ) + ": '" + event + "' from " + source )
if not date:
# Make the UTC datetime string and float
_date = Time.dateTime()
epoc = time.mktime( _date.timetuple() ) + _date.microsecond / 1000000. - MAGIC_EPOC_NUMBER
time_order = round( epoc, 3 )
else:
try:
if type( date ) in StringTypes:
# The date is provided as a string in UTC
_date = Time.fromString( date )
epoc = time.mktime( _date.timetuple() ) + _date.microsecond / 1000000. - MAGIC_EPOC_NUMBER
time_order = round( epoc, 3 )
elif type( date ) == Time._dateTimeType:
_date = date
epoc = time.mktime( _date.timetuple() ) + _date.microsecond / 1000000. - MAGIC_EPOC_NUMBER
time_order = round( epoc, 3 )
else:
self.gLogger.error( 'Incorrect date for the logging record' )
_date = Time.dateTime()
epoc = time.mktime( _date.timetuple() ) - MAGIC_EPOC_NUMBER
time_order = round( epoc, 3 )
except:
self.gLogger.exception( 'Exception while date evaluation' )
_date = Time.dateTime()
epoc = time.mktime( _date.timetuple() ) - MAGIC_EPOC_NUMBER
time_order = round( epoc, 3 )
cmd = "INSERT INTO LoggingInfo (JobId, Status, MinorStatus, ApplicationStatus, " + \
"StatusTime, StatusTimeOrder, StatusSource) VALUES (%d,'%s','%s','%s','%s',%f,'%s')" % \
( int( jobID ), status, minor, application, str( _date ), time_order, source )
return self._update( cmd )
#############################################################################
def getJobLoggingInfo( self, jobID ):
""" Returns a Status,MinorStatus,ApplicationStatus,StatusTime,StatusSource tuple
for each record found for job specified by its jobID in historical order
"""
cmd = 'SELECT Status,MinorStatus,ApplicationStatus,StatusTime,StatusSource FROM' \
' LoggingInfo WHERE JobId=%d ORDER BY StatusTimeOrder,StatusTime' % int( jobID )
result = self._query( cmd )
if not result['OK']:
return result
if result['OK'] and not result['Value']:
return S_ERROR( 'No Logging information for job %d' % int( jobID ) )
return_value = []
status, minor, app = result['Value'][0][:3]
if app == "idem":
app = "Unknown"
for row in result['Value']:
if row[0] != "idem":
status = row[0];
if row[1] != "idem":
minor = row[1];
if row[2] != "idem":
app = row[2];
return_value.append( ( status, minor, app, str( row[3] ), row[4] ) )
return S_OK( return_value )
#############################################################################
def deleteJob( self, jobID ):
""" Delete logging records for given jobs
"""
# Make sure that we have a list of jobs
if type( jobID ) in [ IntType, LongType ]:
jobList = [ str( jobID ) ]
elif type( jobID ) in StringTypes:
jobList = [ jobID ]
else:
jobList = list( jobID )
jobString = ','.join( jobList )
req = "DELETE FROM LoggingInfo WHERE JobID IN (%s)" % jobString
result = self._update( req )
return result
#############################################################################
def getWMSTimeStamps( self, jobID ):
""" Get TimeStamps for job MajorState transitions
return a {State:timestamp} dictionary
"""
self.gLogger.debug( 'getWMSTimeStamps: Retrieving Timestamps for Job %d' % int( jobID ) )
result = {}
cmd = 'SELECT Status,StatusTimeOrder FROM LoggingInfo WHERE JobID=%d' % int( jobID )
resCmd = self._query( cmd )
if not resCmd['OK']:
return resCmd
if not resCmd['Value']:
return S_ERROR( 'No Logging Info for job %d' % int( jobID ) )
for event, etime in resCmd['Value']:
result[event] = str( etime + MAGIC_EPOC_NUMBER )
# Get last date and time
cmd = 'SELECT MAX(StatusTime) FROM LoggingInfo WHERE JobID=%d' % int( jobID )
resCmd = self._query( cmd )
if not resCmd['OK']:
return resCmd
if len( resCmd['Value'] ) > 0:
result['LastTime'] = str( resCmd['Value'][0][0] )
else:
result['LastTime'] = "Unknown"
return S_OK( result )
|
vmendez/DIRAC
|
WorkloadManagementSystem/DB/JobLoggingDB.py
|
Python
|
gpl-3.0
| 5,906
|
[
"DIRAC"
] |
d87ff246335be4da140374494106dd7b43eff534ac3853dd59d2ca6d8075d0c4
|
"""
tint.tracks
===========
Cell_tracks class.
"""
import copy
import datetime
import numpy as np
import pandas as pd
from .grid_utils import get_grid_size, get_radar_info, extract_grid_data
from .helpers import Record, Counter
from .phase_correlation import get_global_shift
from .matching import get_pairs
from .objects import init_current_objects, update_current_objects
from .objects import get_object_prop, write_tracks
# Tracking Parameter Defaults
FIELD_THRESH = 32
ISO_THRESH = 8
ISO_SMOOTH = 3
MIN_SIZE = 8
SEARCH_MARGIN = 4000
FLOW_MARGIN = 10000
MAX_DISPARITY = 999
MAX_FLOW_MAG = 50
MAX_SHIFT_DISP = 15
GS_ALT = 1500
"""
Tracking Parameter Guide
------------------------
FIELD_THRESH : units of 'field' attribute
The threshold used for object detection. Detected objects are connnected
pixels above this threshold.
ISO_THRESH : units of 'field' attribute
Used in isolated cell classification. Isolated cells must not be connected
to any other cell by contiguous pixels above this threshold.
ISO_SMOOTH : pixels
Gaussian smoothing parameter in peak detection preprocessing. See
single_max in tint.objects.
MIN_SIZE : square kilometers
The minimum size threshold in pixels for an object to be detected.
SEARCH_MARGIN : meters
The radius of the search box around the predicted object center.
FLOW_MARGIN : meters
The margin size around the object extent on which to perform phase
correlation.
MAX_DISPARITY : float
Maximum allowable disparity value. Larger disparity values are sent to
LARGE_NUM.
MAX_FLOW_MAG : meters per second
Maximum allowable global shift magnitude. See get_global_shift in
tint.phase_correlation.
MAX_SHIFT_DISP : meters per second
Maximum magnitude of difference in meters per second for two shifts to be
considered in agreement. See correct_shift in tint.matching.
GS_ALT : meters
Altitude in meters at which to perform phase correlation for global shift
calculation. See correct_shift in tint.matching.
"""
class Cell_tracks(object):
"""
This is the main class in the module. It allows tracks
objects to be built using lists of pyart grid objects.
Attributes
----------
params : dict
Parameters for the tracking algorithm.
field : str
String specifying pyart grid field to be used for tracking. Default is
'reflectivity'.
grid_size : array
Array containing z, y, and x mesh size in meters respectively.
last_grid : Grid
Contains the most recent grid object tracked. This is used for dynamic
updates.
counter : Counter
See Counter class.
record : Record
See Record class.
current_objects : dict
Contains information about objects in the current scan.
tracks : DataFrame
__saved_record : Record
Deep copy of Record at the penultimate scan in the sequence. This and
following 2 attributes used for link-up in dynamic updates.
__saved_counter : Counter
Deep copy of Counter.
__saved_objects : dict
Deep copy of current_objects.
"""
def __init__(self, field='reflectivity'):
self.params = {'FIELD_THRESH': FIELD_THRESH,
'MIN_SIZE': MIN_SIZE,
'SEARCH_MARGIN': SEARCH_MARGIN,
'FLOW_MARGIN': FLOW_MARGIN,
'MAX_FLOW_MAG': MAX_FLOW_MAG,
'MAX_DISPARITY': MAX_DISPARITY,
'MAX_SHIFT_DISP': MAX_SHIFT_DISP,
'ISO_THRESH': ISO_THRESH,
'ISO_SMOOTH': ISO_SMOOTH,
'GS_ALT': GS_ALT}
self.field = field
self.grid_size = None
self.radar_info = None
self.last_grid = None
self.counter = None
self.record = None
self.current_objects = None
self.tracks = pd.DataFrame()
self.__saved_record = None
self.__saved_counter = None
self.__saved_objects = None
def __save(self):
""" Saves deep copies of record, counter, and current_objects. """
self.__saved_record = copy.deepcopy(self.record)
self.__saved_counter = copy.deepcopy(self.counter)
self.__saved_objects = copy.deepcopy(self.current_objects)
def __load(self):
""" Loads saved copies of record, counter, and current_objects. If new
tracks are appended to existing tracks via the get_tracks method, the
most recent scan prior to the addition must be overwritten to link up
with the new scans. Because of this, record, counter and
current_objects must be reverted to their state in the penultimate
iteration of the loop in get_tracks. See get_tracks for details. """
self.record = self.__saved_record
self.counter = self.__saved_counter
self.current_objects = self.__saved_objects
def get_tracks(self, grids):
""" Obtains tracks given a list of pyart grid objects. This is the
primary method of the tracks class. This method makes use of all of the
functions and helper classes defined above. """
start_time = datetime.datetime.now()
if self.record is None:
# tracks object being initialized
grid_obj2 = next(grids)
self.grid_size = get_grid_size(grid_obj2)
self.radar_info = get_radar_info(grid_obj2)
self.counter = Counter()
self.record = Record(grid_obj2)
else:
# tracks object being updated
grid_obj2 = self.last_grid
self.tracks.drop(self.record.scan + 1) # last scan is overwritten
if self.current_objects is None:
newRain = True
else:
newRain = False
raw2, frame2 = extract_grid_data(grid_obj2, self.field, self.grid_size,
self.params)
while grid_obj2 is not None:
grid_obj1 = grid_obj2
raw1 = raw2
frame1 = frame2
try:
grid_obj2 = next(grids)
except StopIteration:
grid_obj2 = None
if grid_obj2 is not None:
self.record.update_scan_and_time(grid_obj1, grid_obj2)
raw2, frame2 = extract_grid_data(grid_obj2,
self.field,
self.grid_size,
self.params)
else:
# setup to write final scan
self.__save()
self.last_grid = grid_obj1
self.record.update_scan_and_time(grid_obj1)
raw2 = None
frame2 = np.zeros_like(frame1)
if np.max(frame1) == 0:
newRain = True
print('No cells found in scan', self.record.scan)
self.current_objects = None
continue
global_shift = get_global_shift(raw1, raw2, self.params)
pairs = get_pairs(frame1,
frame2,
global_shift,
self.current_objects,
self.record,
self.params)
if newRain:
# first nonempty scan after a period of empty scans
self.current_objects, self.counter = init_current_objects(
frame1,
frame2,
pairs,
self.counter
)
newRain = False
else:
self.current_objects, self.counter = update_current_objects(
frame1,
frame2,
pairs,
self.current_objects,
self.counter
)
obj_props = get_object_prop(frame1, grid_obj1, self.field,
self.record, self.params)
self.record.add_uids(self.current_objects)
self.tracks = write_tracks(self.tracks, self.record,
self.current_objects, obj_props)
del grid_obj1, raw1, frame1, global_shift, pairs, obj_props
# scan loop end
self.__load()
time_elapsed = datetime.datetime.now() - start_time
print('\n')
print('time elapsed', np.round(time_elapsed.seconds/60, 1), 'minutes')
return
|
openradar/TINT
|
tint/tracks.py
|
Python
|
bsd-2-clause
| 8,579
|
[
"Gaussian"
] |
99f8237cfecb3b704f4fc66693eeda3e0fd3797ad0aff30d17c774bdd569dcc1
|
# Copyright Yair Benita Y.Benita@pharm.uu.nl
# Biopython (http://biopython.org) license applies
"""Simple protein analysis.
Example::
X = ProteinAnalysis("MAEGEITTFTALTEKFNLPPGNYKKPKLLYCSNGGHFLRILPDGTVDGTRDRSDQHIQLQLSAESVGEVYIKSTETGQYLAMDTSGLLYGSQTPSEECLFLERLEENHYNTYTSKKHAEKNWFVGLKKNGSCKRGPRTHYGQKAILFLPLPV")
print(X.count_amino_acids())
print(X.get_amino_acids_percent())
print(X.molecular_weight())
print(X.aromaticity())
print(X.instability_index())
print(X.flexibility())
print(X.isoelectric_point())
print(X.secondary_structure_fraction())
print(X.protein_scale(ProtParamData.kd, 9, 0.4))
"""
from __future__ import print_function
import sys
# Add path to Bio
sys.path.append('../../..')
import sys
from Bio.SeqUtils import ProtParamData # Local
from Bio.SeqUtils import IsoelectricPoint # Local
from Bio.Seq import Seq
from Bio.Alphabet import IUPAC
from Bio.Data import IUPACData
from Bio.SeqUtils import molecular_weight
__docformat__ = "restructuredtext en"
class ProteinAnalysis(object):
"""Class containing methods for protein analysis.
The constructor takes two arguments.
The first is the protein sequence as a string, which is then converted to a
sequence object using the Bio.Seq module. This is done just to make sure
the sequence is a protein sequence and not anything else.
The second argument is optional. If set to True, the weight of the amino
acids will be calculated using their monoisotopic mass (the weight of the
most abundant isotopes for each element), instead of the average molecular
mass (the averaged weight of all stable isotopes for each element).
If set to false (the default value) or left out, the IUPAC average
molecular mass will be used for the calculation.
"""
def __init__(self, prot_sequence, monoisotopic=False):
if prot_sequence.islower():
self.sequence = Seq(prot_sequence.upper(), IUPAC.protein)
else:
self.sequence = Seq(prot_sequence, IUPAC.protein)
self.amino_acids_content = None
self.amino_acids_percent = None
self.length = len(self.sequence)
self.monoisotopic = monoisotopic
def count_amino_acids(self):
"""Count standard amino acids, returns a dict.
Counts the number times each amino acid is in the protein
sequence. Returns a dictionary {AminoAcid:Number}.
The return value is cached in self.amino_acids_content.
It is not recalculated upon subsequent calls.
"""
if self.amino_acids_content is None:
prot_dic = dict((k, 0) for k in IUPACData.protein_letters)
for aa in prot_dic:
prot_dic[aa] = self.sequence.count(aa)
self.amino_acids_content = prot_dic
return self.amino_acids_content
def get_amino_acids_percent(self):
"""Calculate the amino acid content in percentages.
The same as count_amino_acids only returns the Number in percentage of
entire sequence. Returns a dictionary of {AminoAcid:percentage}.
The return value is cached in self.amino_acids_percent.
input is the dictionary self.amino_acids_content.
output is a dictionary with amino acids as keys.
"""
if self.amino_acids_percent is None:
aa_counts = self.count_amino_acids()
percentages = {}
for aa in aa_counts:
percentages[aa] = aa_counts[aa] / float(self.length)
self.amino_acids_percent = percentages
return self.amino_acids_percent
def molecular_weight(self):
"""Calculate MW from Protein sequence"""
return molecular_weight(self.sequence, monoisotopic=self.monoisotopic)
def aromaticity(self):
"""Calculate the aromaticity according to Lobry, 1994.
Calculates the aromaticity value of a protein according to Lobry, 1994.
It is simply the relative frequency of Phe+Trp+Tyr.
"""
aromatic_aas = 'YWF'
aa_percentages = self.get_amino_acids_percent()
aromaticity = sum(aa_percentages[aa] for aa in aromatic_aas)
return aromaticity
def instability_index(self):
"""Calculate the instability index according to Guruprasad et al 1990.
Implementation of the method of Guruprasad et al. 1990 to test a
protein for stability. Any value above 40 means the protein is unstable
(has a short half life).
See: Guruprasad K., Reddy B.V.B., Pandit M.W.
Protein Engineering 4:155-161(1990).
"""
index = ProtParamData.DIWV
score = 0.0
for i in range(self.length - 1):
this, next = self.sequence[i:i + 2]
dipeptide_value = index[this][next]
score += dipeptide_value
return (10.0 / self.length) * score
def flexibility(self):
"""Calculate the flexibility according to Vihinen, 1994.
No argument to change window size because parameters are specific for a
window=9. The parameters used are optimized for determining the flexibility.
"""
flexibilities = ProtParamData.Flex
window_size = 9
weights = [0.25, 0.4375, 0.625, 0.8125, 1]
scores = []
for i in range(self.length - window_size):
subsequence = self.sequence[i:i + window_size]
score = 0.0
for j in range(window_size // 2):
front = subsequence[j]
back = subsequence[window_size - j - 1]
score += (flexibilities[front] + flexibilities[back]) * weights[j]
middle = subsequence[window_size // 2 + 1]
score += flexibilities[middle]
scores.append(score / 5.25)
return scores
def gravy(self):
"""Calculate the gravy according to Kyte and Doolittle."""
total_gravy = sum(ProtParamData.kd[aa] for aa in self.sequence)
return total_gravy / self.length
def _weight_list(self, window, edge):
"""Makes a list of relative weight of the
window edges compared to the window center. The weights are linear.
it actually generates half a list. For a window of size 9 and edge 0.4
you get a list of [0.4, 0.55, 0.7, 0.85].
"""
unit = 2 * (1.0 - edge) / (window - 1)
weights = [0.0] * (window // 2)
for i in range(window // 2):
weights[i] = edge + unit * i
return weights
def protein_scale(self, param_dict, window, edge=1.0):
"""Compute a profile by any amino acid scale.
An amino acid scale is defined by a numerical value assigned to each type of
amino acid. The most frequently used scales are the hydrophobicity or
hydrophilicity scales and the secondary structure conformational parameters
scales, but many other scales exist which are based on different chemical and
physical properties of the amino acids. You can set several parameters that
control the computation of a scale profile, such as the window size and the
window edge relative weight value.
WindowSize: The window size is the length
of the interval to use for the profile computation. For a window size n, we
use the i-(n-1)/2 neighboring residues on each side to compute
the score for residue i. The score for residue i is the sum of the scaled values
for these amino acids, optionally weighted according to their position in the
window.
Edge: The central amino acid of the window always has a weight of 1.
By default, the amino acids at the remaining window positions have the same
weight, but you can make the residue at the center of the window have a
larger weight than the others by setting the edge value for the residues at
the beginning and end of the interval to a value between 0 and 1. For
instance, for Edge=0.4 and a window size of 5 the weights will be: 0.4, 0.7,
1.0, 0.7, 0.4.
The method returns a list of values which can be plotted to
view the change along a protein sequence. Many scales exist. Just add your
favorites to the ProtParamData modules.
Similar to expasy's ProtScale: http://www.expasy.org/cgi-bin/protscale.pl
"""
# generate the weights
# _weight_list returns only one tail. If the list should be [0.4,0.7,1.0,0.7,0.4]
# what you actually get from _weights_list is [0.4,0.7]. The correct calculation is done
# in the loop.
weights = self._weight_list(window, edge)
scores = []
# the score in each Window is divided by the sum of weights
# (* 2 + 1) since the weight list is one sided:
sum_of_weights = sum(weights) * 2 + 1
for i in range(self.length - window + 1):
subsequence = self.sequence[i:i + window]
score = 0.0
for j in range(window // 2):
# walk from the outside of the Window towards the middle.
# Iddo: try/except clauses added to avoid raising an exception on a non-standard amino acid
try:
front = param_dict[subsequence[j]]
back = param_dict[subsequence[window - j - 1]]
score += weights[j] * front + weights[j] * back
except KeyError:
sys.stderr.write('warning: %s or %s is not a standard amino acid.\n' %
(subsequence[j], subsequence[window - j - 1]))
# Now add the middle value, which always has a weight of 1.
middle = subsequence[window // 2]
if middle in param_dict:
score += param_dict[middle]
else:
sys.stderr.write('warning: %s is not a standard amino acid.\n' % (middle))
scores.append(score / sum_of_weights)
return scores
def isoelectric_point(self):
"""Calculate the isoelectric point.
Uses the module IsoelectricPoint to calculate the pI of a protein.
"""
aa_content = self.count_amino_acids()
ie_point = IsoelectricPoint.IsoelectricPoint(self.sequence, aa_content)
return ie_point.pi()
def secondary_structure_fraction(self):
"""Calculate fraction of helix, turn and sheet.
Returns a list of the fraction of amino acids which tend
to be in Helix, Turn or Sheet.
Amino acids in helix: V, I, Y, F, W, L.
Amino acids in Turn: N, P, G, S.
Amino acids in sheet: E, M, A, L.
Returns a tuple of three integers (Helix, Turn, Sheet).
"""
aa_percentages = self.get_amino_acids_percent()
helix = sum(aa_percentages[r] for r in 'VIYFWL')
turn = sum(aa_percentages[r] for r in 'NPGS')
sheet = sum(aa_percentages[r] for r in 'EMAL')
return helix, turn, sheet
|
Ambuj-UF/ConCat-1.0
|
src/Utils/Bio/SeqUtils/ProtParam.py
|
Python
|
gpl-2.0
| 11,022
|
[
"Biopython"
] |
85640ac63a7b3686959c9c55841257c9bad72f9880f11bcc9dc2515aed72ca88
|
import Bio.SubsMat.MatrixInfo
from Bio import pairwise2
import KmerUtil
from Bio.pairwise2 import format_alignment
DEF_MATCH = 1
DEF_MISMATCH = -1
DEF_GAP = -1
# idt evidentally uses -2 for opening a gap or mismatching
IDT_DEF_GAP_MIS = -2
# Parameters from European Bioinformatics Institute,
# http://www.ebi.ac.uk/Tools/psa/emboss_needle/nucleotide.html
# 2016-8-9:
EBI_GAP_OPEN = -10
EBI_GAP_EXTEND = -0.5
# see : http://osdir.com/ml/science.biology.emboss/2005-12/msg00028.html
EBI_MISMATCH = -4
EBI_MATCH = 5
class AlignmentInfo:
def __init__(self,s1,s2,score,startIdx,endIdx):
"""
Wrapper class for Biopython. just wraps the sequences we need.
All args come directly from pairwise2.align.globalXX
Unit tested by TestUtil.TestAlignments.TestReverseComplementAlignments
Args:
s1: alignment of s1
s2: alignment of s2
scoore: score of the alignment
startIndex: start index, 4th arg returned by pairwise
endIdx: start index, 5th arg returned by pairwise
"""
self.s1 = s1
self.s2 = s2
self.score = score
self.startIdx = startIdx
self.endIdx = endIdx
def AlignmentTuple(self):
"""
Return the tuple which pairwise2.align.global returns. Useful for
(e.g.) pretty printing
"""
return (self.s1,self.s2,self.score,self.startIdx,self.endIdx)
def __str__(self):
return format_alignment(*self.AlignmentTuple())
def __repr__(self):
return str(self)
def Sanitize(Seq):
"""
Sanitizes (strips out trailing/starting whitespace, lowercase)
a given sequence
Args:
Seq: The sequence to sanitize
Returns:
The sanitized sequence
"""
return Seq.strip().upper()
def GetIdtAlignments(Seq1,Seq2,MismatchScore=IDT_DEF_GAP_MIS,
GapOpen=IDT_DEF_GAP_MIS,
GapExtend=0,**kwargs):
"""
Gets the alignment scores for two sequences,using (by default) IDT's params,
Args:
Seq1,Seq2: align Seq1 to Seq2. *both should be DNA
Other args: see GetBestSelfDimerAlignmentScore
Returns:
maximum over all alignment scores
"""
alignments = AlignmentScores(Seq1,Seq2,MismatchScore=MismatchScore,
GapOpen=GapOpen,GapExtend=GapExtend,**kwargs)
return alignments
def GetEbiAlignments(Seq1,Seq2,**kwargs):
"""
Gets the EBI (European Bioinformatics Institute) local alignment on DNA,
using defaults listed on
ebi.ac.uk/Tools/psa/emboss_needle/help/index-nucleotide.html
Args:
See AlignmentScores: both are DNA
**kwargs: passed to AlignmentScores
"""
return AlignmentScores(Seq1,Seq2,
MatchScore=EBI_MATCH,
MismatchScore=EBI_MISMATCH,
GapOpen=EBI_GAP_OPEN,
GapExtend=EBI_GAP_EXTEND)
def GetBestSelfDimerAlignmentScore(Seq,MismatchScore=IDT_DEF_GAP_MIS,
GapOpen=IDT_DEF_GAP_MIS,
GapExtend=0,**kwargs):
"""
Gets the best (highest) self-dimer alignment for the given sequence with
its reverse complement (this states 'how likely is the sequence to
bind to itself).
By default, similar to what the Homo-Dimer Analysis of Idt
(http://www.idtdna.com/calc/analyzer , look for "Self-Dimer") does. It
peanlizes any gas by two
e.g. TAGGACCACTCG -> 2 are most according to ids
Unit tested by TestUtil.TestAlignments.TestReverseComplementAlignments
Args:
Seq: Sequence to align with its reverse
Others: see AlignmentScores. Note default has no penalties
Returns:
score from alignment. If using default arguments, this is number
of base-pair matches, less 2 for the start of any gap
"""
alignment = AlignSelfWithReverseComplement(Seq,
MismatchScore=MismatchScore,
GapOpen=GapOpen,
GapExtend=GapExtend,
one_alignment_only=True,
**kwargs)
return alignment[0].score
def AlignSelfWithReverseComplement(Seq,MismatchScore=IDT_DEF_GAP_MIS,
GapOpen=IDT_DEF_GAP_MIS,GapExtend=0,
**kwargs):
"""
Gets an alignment score for the sequence with itself reversed, complemented.
Unit tested implicitly by
TestUtil.TestAlignments.TestReverseComplementAlignment
Args:
Seq: Sequence to align with itself
Others: See AlignmentScores
Returns:
List of possible alignments as AlignmentInfo objects
"""
ReverseComp = KmerUtil.ReverseComplement(Seq)
return AlignmentScores(Seq,ReverseComp,MismatchScore=MismatchScore,
GapOpen=GapOpen,GapExtend=GapExtend,**kwargs)
def AlignmentScores(Seq1,Seq2,MatchScore=DEF_MATCH,MismatchScore=DEF_MISMATCH,
GapOpen=DEF_GAP,GapExtend=DEF_GAP,SanitizeSeqs=True,
**kwargs):
"""
Align two sequences locally.
Unit tested implicitly by
TestUtil.TestAlignments.TestReverseComplementAlignment
Args:
Seq1: First Sequence
Seq2: Second Sequence
MatchScore: Amount to add per match
MismatchScore: Amount to add per mismatch. Usually <0
GapOpen : Amount to add per gap Open. Usually <0
GapExtend: Amount to add per gap extension (given open). Usually <0
SanitizeSeqs: If true, calls the sanitize function on input strings
**kwargs: passed to localms
Returns:
List of possible Alignments as AlignmentInfo objects
"""
# see http://biopython.org/DIST/docs/api/Bio.pairwise2-module.html
# look for 'globalms'
if (SanitizeSeqs):
Seq1 = Sanitize(Seq1)
Seq2 = Sanitize(Seq2)
alignments = pairwise2.align.localms(Seq1,Seq2,MatchScore,MismatchScore,
GapOpen,GapExtend,**kwargs)
if (len(alignments) == 0):
# no alignment possible
return [AlignmentInfo(Seq1,Seq2,None,None,None)]
else:
return [AlignmentInfo(*a) for a in alignments]
|
prheenan/Research
|
Perkins/Projects/Primers/Util/AlignUtil.py
|
Python
|
gpl-3.0
| 6,532
|
[
"Biopython"
] |
a5a6f67fabb4ae7d2b162ddc8fccaabb2354738a5a1b39bec899955bd1dd09d6
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for compute resource tracking."""
import copy
import uuid
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from nova.compute import flavors
from nova.compute import resource_tracker
from nova.compute import resources
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova import objects
from nova.objects import base as obj_base
from nova import rpc
from nova import test
from nova.tests.unit.compute.monitors import test_monitors
from nova.tests.unit.pci import fakes as pci_fakes
from nova.virt import driver
FAKE_VIRT_MEMORY_MB = 5
FAKE_VIRT_MEMORY_OVERHEAD = 1
FAKE_VIRT_MEMORY_WITH_OVERHEAD = (
FAKE_VIRT_MEMORY_MB + FAKE_VIRT_MEMORY_OVERHEAD)
FAKE_VIRT_NUMA_TOPOLOGY = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([1, 2]), memory=3072,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([3, 4]), memory=3072,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([]))])
FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD = objects.NUMATopologyLimits(
cpu_allocation_ratio=2, ram_allocation_ratio=2)
ROOT_GB = 5
EPHEMERAL_GB = 1
FAKE_VIRT_LOCAL_GB = ROOT_GB + EPHEMERAL_GB
FAKE_VIRT_VCPUS = 1
FAKE_VIRT_STATS = {'virt_stat': 10}
FAKE_VIRT_STATS_JSON = jsonutils.dumps(FAKE_VIRT_STATS)
RESOURCE_NAMES = ['vcpu']
CONF = cfg.CONF
class UnsupportedVirtDriver(driver.ComputeDriver):
"""Pretend version of a lame virt driver."""
def __init__(self):
super(UnsupportedVirtDriver, self).__init__(None)
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
# no support for getting resource usage info
return {}
class FakeVirtDriver(driver.ComputeDriver):
def __init__(self, pci_support=False, stats=None,
numa_topology=FAKE_VIRT_NUMA_TOPOLOGY):
super(FakeVirtDriver, self).__init__(None)
self.memory_mb = FAKE_VIRT_MEMORY_MB
self.local_gb = FAKE_VIRT_LOCAL_GB
self.vcpus = FAKE_VIRT_VCPUS
self.numa_topology = numa_topology
self.memory_mb_used = 0
self.local_gb_used = 0
self.pci_support = pci_support
self.pci_devices = [
{
'label': 'label_8086_0443',
'dev_type': 'type-VF',
'compute_node_id': 1,
'address': '0000:00:01.1',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_0443',
'dev_type': 'type-VF',
'compute_node_id': 1,
'address': '0000:00:01.2',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_0443',
'dev_type': 'type-PF',
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_0123',
'dev_type': 'type-PCI',
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '0123',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_7891',
'dev_type': 'type-VF',
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '7891',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': None
},
] if self.pci_support else []
self.pci_stats = [
{
'count': 2,
'vendor_id': '8086',
'product_id': '0443',
'numa_node': 1
},
{
'count': 1,
'vendor_id': '8086',
'product_id': '7891',
'numa_node': None
},
] if self.pci_support else []
if stats is not None:
self.stats = stats
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
d = {
'vcpus': self.vcpus,
'memory_mb': self.memory_mb,
'local_gb': self.local_gb,
'vcpus_used': 0,
'memory_mb_used': self.memory_mb_used,
'local_gb_used': self.local_gb_used,
'hypervisor_type': 'fake',
'hypervisor_version': 0,
'hypervisor_hostname': 'fakehost',
'cpu_info': '',
'numa_topology': (
self.numa_topology._to_json() if self.numa_topology else None),
}
if self.pci_support:
d['pci_passthrough_devices'] = jsonutils.dumps(self.pci_devices)
if hasattr(self, 'stats'):
d['stats'] = self.stats
return d
def estimate_instance_overhead(self, instance_info):
instance_info['memory_mb'] # make sure memory value is present
overhead = {
'memory_mb': FAKE_VIRT_MEMORY_OVERHEAD
}
return overhead # just return a constant value for testing
class BaseTestCase(test.TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
self.context = context.get_admin_context()
self.flags(pci_passthrough_whitelist=[
'{"vendor_id": "8086", "product_id": "0443"}',
'{"vendor_id": "8086", "product_id": "7891"}'])
self.flags(use_local=True, group='conductor')
self.conductor = self.start_service('conductor',
manager=CONF.conductor.manager)
self._instances = {}
self._numa_topologies = {}
self._instance_types = {}
self.stubs.Set(self.conductor.db,
'instance_get_all_by_host_and_node',
self._fake_instance_get_all_by_host_and_node)
self.stubs.Set(db, 'instance_extra_get_by_instance_uuid',
self._fake_instance_extra_get_by_instance_uuid)
self.stubs.Set(self.conductor.db,
'flavor_get', self._fake_flavor_get)
self.host = 'fakehost'
self.compute = self._create_compute_node()
self.updated = False
self.deleted = False
self.update_call_count = 0
def _create_compute_node(self, values=None):
compute = {
"id": 1,
"service_id": 1,
"host": "fakehost",
"vcpus": 1,
"memory_mb": 1,
"local_gb": 1,
"vcpus_used": 1,
"memory_mb_used": 1,
"local_gb_used": 1,
"free_ram_mb": 1,
"free_disk_gb": 1,
"current_workload": 1,
"running_vms": 0,
"cpu_info": None,
"numa_topology": None,
"stats": '{"num_instances": "1"}',
"hypervisor_hostname": "fakenode",
'hypervisor_version': 1,
'hypervisor_type': 'fake-hyp',
'disk_available_least': None,
'host_ip': None,
'metrics': None,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
}
if values:
compute.update(values)
return compute
def _create_service(self, host="fakehost", compute=None):
if compute:
compute = [compute]
service = {
"id": 1,
"host": host,
"binary": "nova-compute",
"topic": "compute",
"compute_node": compute,
"report_count": 0,
'disabled': False,
'disabled_reason': None,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
}
return service
def _fake_instance_system_metadata(self, instance_type, prefix=''):
sys_meta = []
for key in flavors.system_metadata_flavor_props.keys():
sys_meta.append({'key': '%sinstance_type_%s' % (prefix, key),
'value': instance_type[key]})
return sys_meta
def _fake_instance(self, stash=True, flavor=None, **kwargs):
# NOTE(danms): Remove this when all the compute_node stuff is
# converted to objects
# Default to an instance ready to resize to or from the same
# instance_type
flavor = flavor or self._fake_flavor_create()
sys_meta = self._fake_instance_system_metadata(flavor)
if stash:
# stash instance types in system metadata.
sys_meta = (sys_meta +
self._fake_instance_system_metadata(flavor, 'new_') +
self._fake_instance_system_metadata(flavor, 'old_'))
instance_uuid = str(uuid.uuid1())
instance = {
'uuid': instance_uuid,
'vm_state': vm_states.RESIZED,
'task_state': None,
'ephemeral_key_uuid': None,
'os_type': 'Linux',
'project_id': '123456',
'host': None,
'node': None,
'instance_type_id': flavor['id'],
'memory_mb': flavor['memory_mb'],
'vcpus': flavor['vcpus'],
'root_gb': flavor['root_gb'],
'ephemeral_gb': flavor['ephemeral_gb'],
'launched_on': None,
'system_metadata': sys_meta,
'availability_zone': None,
'vm_mode': None,
'reservation_id': None,
'display_name': None,
'default_swap_device': None,
'power_state': None,
'scheduled_at': None,
'access_ip_v6': None,
'access_ip_v4': None,
'key_name': None,
'updated_at': None,
'cell_name': None,
'locked': None,
'locked_by': None,
'launch_index': None,
'architecture': None,
'auto_disk_config': None,
'terminated_at': None,
'ramdisk_id': None,
'user_data': None,
'cleaned': None,
'deleted_at': None,
'id': 333,
'disable_terminate': None,
'hostname': None,
'display_description': None,
'key_data': None,
'deleted': None,
'default_ephemeral_device': None,
'progress': None,
'launched_at': None,
'config_drive': None,
'kernel_id': None,
'user_id': None,
'shutdown_terminate': None,
'created_at': None,
'image_ref': None,
'root_device_name': None,
}
extra = {
'id': 1, 'created_at': None, 'updated_at': None,
'deleted_at': None, 'deleted': None,
'instance_uuid': instance['uuid'],
'numa_topology': None,
'pci_requests': None,
}
numa_topology = kwargs.pop('numa_topology', None)
if numa_topology:
extra['numa_topology'] = numa_topology._to_json()
instance.update(kwargs)
instance['extra'] = extra
self._instances[instance_uuid] = instance
self._numa_topologies[instance_uuid] = extra
return instance
def _fake_instance_obj(self, stash=True, flavor=None, **kwargs):
# Default to an instance ready to resize to or from the same
# instance_type
flavor = flavor or self._fake_flavor_create()
if not isinstance(flavor, objects.Flavor):
flavor = objects.Flavor(**flavor)
instance_uuid = str(uuid.uuid1())
instance = objects.Instance(context=self.context, uuid=instance_uuid,
flavor=flavor)
instance.update({
'vm_state': vm_states.RESIZED,
'task_state': None,
'ephemeral_key_uuid': None,
'os_type': 'Linux',
'project_id': '123456',
'host': None,
'node': None,
'instance_type_id': flavor['id'],
'memory_mb': flavor['memory_mb'],
'vcpus': flavor['vcpus'],
'root_gb': flavor['root_gb'],
'ephemeral_gb': flavor['ephemeral_gb'],
'launched_on': None,
'system_metadata': {},
'availability_zone': None,
'vm_mode': None,
'reservation_id': None,
'display_name': None,
'default_swap_device': None,
'power_state': None,
'scheduled_at': None,
'access_ip_v6': None,
'access_ip_v4': None,
'key_name': None,
'updated_at': None,
'cell_name': None,
'locked': None,
'locked_by': None,
'launch_index': None,
'architecture': None,
'auto_disk_config': None,
'terminated_at': None,
'ramdisk_id': None,
'user_data': None,
'cleaned': None,
'deleted_at': None,
'id': 333,
'disable_terminate': None,
'hostname': None,
'display_description': None,
'key_data': None,
'deleted': None,
'default_ephemeral_device': None,
'progress': None,
'launched_at': None,
'config_drive': None,
'kernel_id': None,
'user_id': None,
'shutdown_terminate': None,
'created_at': None,
'image_ref': None,
'root_device_name': None,
})
if stash:
instance.old_flavor = flavor
instance.new_flavor = flavor
instance.numa_topology = kwargs.pop('numa_topology', None)
instance.update(kwargs)
self._instances[instance_uuid] = instance
return instance
def _fake_flavor_create(self, **kwargs):
instance_type = {
'id': 1,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'disabled': False,
'is_public': True,
'name': 'fakeitype',
'memory_mb': FAKE_VIRT_MEMORY_MB,
'vcpus': FAKE_VIRT_VCPUS,
'root_gb': ROOT_GB,
'ephemeral_gb': EPHEMERAL_GB,
'swap': 0,
'rxtx_factor': 1.0,
'vcpu_weight': 1,
'flavorid': 'fakeflavor',
'extra_specs': {},
}
instance_type.update(**kwargs)
id_ = instance_type['id']
self._instance_types[id_] = instance_type
return instance_type
def _fake_instance_get_all_by_host_and_node(self, context, host, nodename,
columns_to_join=None):
return [i for i in self._instances.values() if i['host'] == host]
def _fake_instance_extra_get_by_instance_uuid(self, context,
instance_uuid, columns=None):
return self._numa_topologies.get(instance_uuid)
def _fake_flavor_get(self, ctxt, id_):
return self._instance_types[id_]
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
self.update_call_count += 1
self.updated = True
self.compute.update(values)
return self.compute
def _driver(self):
return FakeVirtDriver()
def _tracker(self, host=None):
if host is None:
host = self.host
node = "fakenode"
driver = self._driver()
tracker = resource_tracker.ResourceTracker(host, driver, node)
tracker.compute_node = self._create_compute_node()
tracker.ext_resources_handler = \
resources.ResourceHandler(RESOURCE_NAMES, True)
return tracker
class UnsupportedDriverTestCase(BaseTestCase):
"""Resource tracking should be disabled when the virt driver doesn't
support it.
"""
def setUp(self):
super(UnsupportedDriverTestCase, self).setUp()
self.tracker = self._tracker()
# seed tracker with data:
self.tracker.update_available_resource(self.context)
def _driver(self):
return UnsupportedVirtDriver()
def test_disabled(self):
# disabled = no compute node stats
self.assertTrue(self.tracker.disabled)
self.assertIsNone(self.tracker.compute_node)
def test_disabled_claim(self):
# basic claim:
instance = self._fake_instance_obj()
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
def test_disabled_instance_claim(self):
# instance variation:
instance = self._fake_instance_obj()
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
@mock.patch('nova.objects.Instance.save')
def test_disabled_instance_context_claim(self, mock_save):
# instance context manager variation:
instance = self._fake_instance_obj()
self.tracker.instance_claim(self.context, instance)
with self.tracker.instance_claim(self.context, instance) as claim:
self.assertEqual(0, claim.memory_mb)
def test_disabled_updated_usage(self):
instance = self._fake_instance(host='fakehost', memory_mb=5,
root_gb=10)
self.tracker.update_usage(self.context, instance)
def test_disabled_resize_claim(self):
instance = self._fake_instance()
instance_type = self._fake_flavor_create()
claim = self.tracker.resize_claim(self.context, instance,
instance_type)
self.assertEqual(0, claim.memory_mb)
self.assertEqual(instance['uuid'], claim.migration['instance_uuid'])
self.assertEqual(instance_type['id'],
claim.migration['new_instance_type_id'])
def test_disabled_resize_context_claim(self):
instance = self._fake_instance()
instance_type = self._fake_flavor_create()
with self.tracker.resize_claim(self.context, instance, instance_type) \
as claim:
self.assertEqual(0, claim.memory_mb)
class MissingServiceTestCase(BaseTestCase):
def setUp(self):
super(MissingServiceTestCase, self).setUp()
self.context = context.get_admin_context()
self.tracker = self._tracker()
def test_missing_service(self):
self.tracker.compute_node = None
self.tracker._get_service = mock.Mock(return_value=None)
self.tracker.update_available_resource(self.context)
self.assertTrue(self.tracker.disabled)
class MissingComputeNodeTestCase(BaseTestCase):
def setUp(self):
super(MissingComputeNodeTestCase, self).setUp()
self.tracker = self._tracker()
self.stubs.Set(db, 'service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_get_by_host_and_nodename',
self._fake_compute_node_get_by_host_and_nodename)
self.stubs.Set(db, 'compute_node_create',
self._fake_create_compute_node)
self.tracker.scheduler_client.update_resource_stats = mock.Mock()
def _fake_create_compute_node(self, context, values):
self.created = True
return self._create_compute_node(values)
def _fake_service_get_by_compute_host(self, ctx, host):
# return a service with no joined compute
service = self._create_service()
return service
def _fake_compute_node_get_by_host_and_nodename(self, ctx, host, nodename):
# return no compute node
raise exception.ComputeHostNotFound(host=host)
def test_create_compute_node(self):
self.tracker.compute_node = None
self.tracker.update_available_resource(self.context)
self.assertTrue(self.created)
def test_enabled(self):
self.tracker.update_available_resource(self.context)
self.assertFalse(self.tracker.disabled)
class BaseTrackerTestCase(BaseTestCase):
def setUp(self):
# setup plumbing for a working resource tracker with required
# database models and a compatible compute driver:
super(BaseTrackerTestCase, self).setUp()
self.tracker = self._tracker()
self._migrations = {}
self.stubs.Set(db, 'service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_get_by_host_and_nodename',
self._fake_compute_node_get_by_host_and_nodename)
self.stubs.Set(db, 'compute_node_update',
self._fake_compute_node_update)
self.stubs.Set(db, 'compute_node_delete',
self._fake_compute_node_delete)
self.stubs.Set(db, 'migration_update',
self._fake_migration_update)
self.stubs.Set(db, 'migration_get_in_progress_by_host_and_node',
self._fake_migration_get_in_progress_by_host_and_node)
# Note that this must be called before the call to _init_tracker()
patcher = pci_fakes.fake_pci_whitelist()
self.addCleanup(patcher.stop)
self.stubs.Set(self.tracker.scheduler_client, 'update_resource_stats',
self._fake_compute_node_update)
self._init_tracker()
self.limits = self._limits()
def _fake_service_get_by_compute_host(self, ctx, host):
self.service = self._create_service(host, compute=self.compute)
return self.service
def _fake_compute_node_get_by_host_and_nodename(self, ctx, host, nodename):
self.compute = self._create_compute_node()
return self.compute
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
self.update_call_count += 1
self.updated = True
self.compute.update(values)
return self.compute
def _fake_compute_node_delete(self, ctx, compute_node_id):
self.deleted = True
self.compute.update({'deleted': 1})
return self.compute
def _fake_migration_get_in_progress_by_host_and_node(self, ctxt, host,
node):
status = ['confirmed', 'reverted', 'error']
migrations = []
for migration in self._migrations.values():
migration = obj_base.obj_to_primitive(migration)
if migration['status'] in status:
continue
uuid = migration['instance_uuid']
migration['instance'] = self._instances[uuid]
migrations.append(migration)
return migrations
def _fake_migration_update(self, ctxt, migration_id, values):
# cheat and assume there's only 1 migration present
migration = self._migrations.values()[0]
migration.update(values)
return migration
def _init_tracker(self):
self.tracker.update_available_resource(self.context)
def _limits(self, memory_mb=FAKE_VIRT_MEMORY_WITH_OVERHEAD,
disk_gb=FAKE_VIRT_LOCAL_GB,
vcpus=FAKE_VIRT_VCPUS,
numa_topology=FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD):
"""Create limits dictionary used for oversubscribing resources."""
return {
'memory_mb': memory_mb,
'disk_gb': disk_gb,
'vcpu': vcpus,
'numa_topology': numa_topology,
}
def assertEqualNUMAHostTopology(self, expected, got):
attrs = ('cpuset', 'memory', 'id', 'cpu_usage', 'memory_usage')
if None in (expected, got):
if expected != got:
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
else:
return
if len(expected) != len(got):
raise AssertionError("Topologies don't match due to different "
"number of cells. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
for exp_cell, got_cell in zip(expected.cells, got.cells):
for attr in attrs:
if getattr(exp_cell, attr) != getattr(got_cell, attr):
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
def _assert(self, value, field, tracker=None):
if tracker is None:
tracker = self.tracker
if field not in tracker.compute_node:
raise test.TestingException(
"'%(field)s' not in compute node." % {'field': field})
x = tracker.compute_node[field]
if field == 'numa_topology':
self.assertEqualNUMAHostTopology(
value, objects.NUMATopology.obj_from_db_obj(x))
else:
self.assertEqual(value, x)
class TrackerTestCase(BaseTrackerTestCase):
def test_free_ram_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.memory_mb - driver.memory_mb_used
self.assertEqual(mem_free, self.tracker.compute_node['free_ram_mb'])
def test_free_disk_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.local_gb - driver.local_gb_used
self.assertEqual(mem_free, self.tracker.compute_node['free_disk_gb'])
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
driver = self._driver()
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
self.assertEqual(driver.pci_stats,
self.tracker.compute_node['pci_device_pools'])
class SchedulerClientTrackerTestCase(BaseTrackerTestCase):
def setUp(self):
super(SchedulerClientTrackerTestCase, self).setUp()
self.tracker.scheduler_client.update_resource_stats = mock.Mock(
side_effect=self._fake_compute_node_update)
def test_update_resource(self):
# change a compute node value to simulate a change
self.tracker.compute_node['local_gb_used'] += 1
expected = copy.deepcopy(self.tracker.compute_node)
self.tracker._update(self.context)
self.tracker.scheduler_client.update_resource_stats.\
assert_called_once_with(self.context,
("fakehost", "fakenode"),
expected)
def test_no_update_resource(self):
self.tracker._update(self.context)
update = self.tracker.scheduler_client.update_resource_stats
self.assertFalse(update.called, "update_resource_stats should not be "
"called when there is no change")
class TrackerPciStatsTestCase(BaseTrackerTestCase):
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
driver = self._driver()
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
# NOTE(danms): PciDeviceStats only supports iteration, so we have to
# listify it before we can examine the contents by index.
pools = list(self.tracker.compute_node['pci_device_pools'])
self.assertEqual(driver.pci_stats[0]['product_id'],
pools[0]['product_id'])
def _driver(self):
return FakeVirtDriver(pci_support=True)
class TrackerExtraResourcesTestCase(BaseTrackerTestCase):
def setUp(self):
super(TrackerExtraResourcesTestCase, self).setUp()
self.driver = self._driver()
def _driver(self):
return FakeVirtDriver()
def test_set_empty_ext_resources(self):
resources = self.driver.get_available_resource(self.tracker.nodename)
self.assertNotIn('stats', resources)
self.tracker._write_ext_resources(resources)
self.assertIn('stats', resources)
def test_set_extra_resources(self):
def fake_write_resources(resources):
resources['stats']['resA'] = '123'
resources['stats']['resB'] = 12
self.stubs.Set(self.tracker.ext_resources_handler,
'write_resources',
fake_write_resources)
resources = self.driver.get_available_resource(self.tracker.nodename)
self.tracker._write_ext_resources(resources)
expected = {"resA": "123", "resB": 12}
self.assertEqual(sorted(expected),
sorted(resources['stats']))
class InstanceClaimTestCase(BaseTrackerTestCase):
def _instance_topology(self, mem):
mem = mem * 1024
return objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=0, cpuset=set([1]), memory=mem),
objects.InstanceNUMACell(
id=1, cpuset=set([3]), memory=mem)])
def _claim_topology(self, mem, cpus=1):
if self.tracker.driver.numa_topology is None:
return None
mem = mem * 1024
return objects.NUMATopology(
cells=[objects.NUMACell(
id=0, cpuset=set([1, 2]), memory=3072, cpu_usage=cpus,
memory_usage=mem, mempages=[], siblings=[],
pinned_cpus=set([])),
objects.NUMACell(
id=1, cpuset=set([3, 4]), memory=3072, cpu_usage=cpus,
memory_usage=mem, mempages=[], siblings=[],
pinned_cpus=set([]))])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_update_usage_only_for_tracked(self, mock_get):
flavor = self._fake_flavor_create()
claim_mem = flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD
claim_gb = flavor['root_gb'] + flavor['ephemeral_gb']
claim_topology = self._claim_topology(claim_mem / 2)
instance_topology = self._instance_topology(claim_mem / 2)
instance = self._fake_instance_obj(
flavor=flavor, task_state=None,
numa_topology=instance_topology)
self.tracker.update_usage(self.context, instance)
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'current_workload')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance,
self.limits)
self.assertNotEqual(0, claim.memory_mb)
self._assert(claim_mem, 'memory_mb_used')
self._assert(claim_gb, 'local_gb_used')
self._assert(claim_topology, 'numa_topology')
# now update should actually take effect
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self._assert(claim_mem, 'memory_mb_used')
self._assert(claim_gb, 'local_gb_used')
self._assert(claim_topology, 'numa_topology')
self._assert(1, 'current_workload')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim_and_abort(self, mock_get):
claim_mem = 3
claim_mem_total = 3 + FAKE_VIRT_MEMORY_OVERHEAD
claim_disk = 2
claim_topology = self._claim_topology(claim_mem_total / 2)
instance_topology = self._instance_topology(claim_mem_total / 2)
instance = self._fake_instance_obj(memory_mb=claim_mem,
root_gb=claim_disk, ephemeral_gb=0,
numa_topology=instance_topology)
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance,
self.limits)
self.assertIsNotNone(claim)
self.assertEqual(claim_mem_total, self.compute["memory_mb_used"])
self.assertEqual(FAKE_VIRT_MEMORY_MB - claim_mem_total,
self.compute["free_ram_mb"])
self.assertEqualNUMAHostTopology(
claim_topology, objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(claim_disk, self.compute["local_gb_used"])
self.assertEqual(FAKE_VIRT_LOCAL_GB - claim_disk,
self.compute["free_disk_gb"])
claim.abort()
self.assertEqual(0, self.compute["memory_mb_used"])
self.assertEqual(FAKE_VIRT_MEMORY_MB, self.compute["free_ram_mb"])
self.assertEqualNUMAHostTopology(
FAKE_VIRT_NUMA_TOPOLOGY,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(0, self.compute["local_gb_used"])
self.assertEqual(FAKE_VIRT_LOCAL_GB, self.compute["free_disk_gb"])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_instance_claim_with_oversubscription(self, mock_get):
memory_mb = FAKE_VIRT_MEMORY_MB * 2
root_gb = ephemeral_gb = FAKE_VIRT_LOCAL_GB
vcpus = FAKE_VIRT_VCPUS * 2
claim_topology = self._claim_topology(3)
instance_topology = self._instance_topology(3)
limits = {'memory_mb': memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
'disk_gb': root_gb * 2,
'vcpu': vcpus,
'numa_topology': FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD}
instance = self._fake_instance_obj(memory_mb=memory_mb,
root_gb=root_gb, ephemeral_gb=ephemeral_gb,
numa_topology=instance_topology)
with mock.patch.object(instance, 'save'):
self.tracker.instance_claim(self.context, instance, limits)
self.assertEqual(memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(root_gb * 2,
self.tracker.compute_node['local_gb_used'])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
def test_additive_claims(self, mock_save, mock_get):
self.limits['vcpu'] = 2
claim_topology = self._claim_topology(2, cpus=2)
flavor = self._fake_flavor_create(
memory_mb=1, root_gb=1, ephemeral_gb=0)
instance_topology = self._instance_topology(1)
instance = self._fake_instance_obj(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
instance = self._fake_instance_obj(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
self.assertEqual(2 * (flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD),
self.tracker.compute_node['memory_mb_used'])
self.assertEqual(2 * (flavor['root_gb'] + flavor['ephemeral_gb']),
self.tracker.compute_node['local_gb_used'])
self.assertEqual(2 * flavor['vcpus'],
self.tracker.compute_node['vcpus_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
def test_context_claim_with_exception(self, mock_save, mock_get):
instance = self._fake_instance_obj(memory_mb=1, root_gb=1,
ephemeral_gb=1)
try:
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
raise test.TestingException()
except test.TestingException:
pass
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
self.assertEqual(0, self.compute['memory_mb_used'])
self.assertEqual(0, self.compute['local_gb_used'])
self.assertEqualNUMAHostTopology(
FAKE_VIRT_NUMA_TOPOLOGY,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_instance_context_claim(self, mock_get_all, mock_save, mock_get):
flavor = self._fake_flavor_create(
memory_mb=1, root_gb=2, ephemeral_gb=3)
claim_topology = self._claim_topology(1)
instance_topology = self._instance_topology(1)
instance = self._fake_instance_obj(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.tracker.compute_node['local_gb_used'])
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.compute['memory_mb_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.compute['local_gb_used'])
# after exiting claim context, build is marked as finished. usage
# totals should be same:
mock_get_all.return_value = [instance]
self.tracker.update_available_resource(self.context)
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.tracker.compute_node['local_gb_used'])
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.compute['memory_mb_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.compute['local_gb_used'])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_update_load_stats_for_instance(self, mock_get):
instance = self._fake_instance_obj(task_state=task_states.SCHEDULING)
with mock.patch.object(instance, 'save'):
with self.tracker.instance_claim(self.context, instance):
pass
self.assertEqual(1, self.tracker.compute_node['current_workload'])
instance['vm_state'] = vm_states.ACTIVE
instance['task_state'] = None
instance['host'] = 'fakehost'
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
def test_cpu_stats(self, mock_save, mock_get):
limits = {'disk_gb': 100, 'memory_mb': 100}
self.assertEqual(0, self.tracker.compute_node['vcpus_used'])
vcpus = 1
instance = self._fake_instance_obj(vcpus=vcpus)
# should not do anything until a claim is made:
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node['vcpus_used'])
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
# instance state can change without modifying vcpus in use:
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
add_vcpus = 10
vcpus += add_vcpus
instance = self._fake_instance_obj(vcpus=add_vcpus)
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
instance['vm_state'] = vm_states.DELETED
self.tracker.update_usage(self.context, instance)
vcpus -= add_vcpus
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
def test_skip_deleted_instances(self):
# ensure that the audit process skips instances that have vm_state
# DELETED, but the DB record is not yet deleted.
self._fake_instance(vm_state=vm_states.DELETED, host=self.host)
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_deleted_instances_with_migrations(self, mock_migration_list):
migration = objects.Migration(context=self.context,
instance_uuid='invalid')
mock_migration_list.return_value = [migration]
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
mock_migration_list.assert_called_once_with(self.context,
"fakehost",
"fakenode")
@mock.patch('nova.compute.claims.Claim')
@mock.patch('nova.objects.Instance.save')
def test_claim_saves_numa_topology(self, mock_save, mock_claim):
def fake_save():
self.assertEqual(set(['numa_topology', 'host', 'node',
'launched_on']),
inst.obj_what_changed())
mock_save.side_effect = fake_save
inst = objects.Instance(host=None, node=None, memory_mb=1024)
inst.obj_reset_changes()
numa = objects.InstanceNUMATopology()
claim = mock.MagicMock()
claim.claimed_numa_topology = numa
mock_claim.return_value = claim
with mock.patch.object(self.tracker, '_update_usage_from_instance'):
self.tracker.instance_claim(self.context, inst)
mock_save.assert_called_once_with()
def test_set_instance_host_and_node(self):
inst = objects.Instance()
with mock.patch.object(inst, 'save') as mock_save:
self.tracker._set_instance_host_and_node(self.context, inst)
mock_save.assert_called_once_with()
self.assertEqual(self.tracker.host, inst.host)
self.assertEqual(self.tracker.nodename, inst.node)
self.assertEqual(self.tracker.host, inst.launched_on)
class ResizeClaimTestCase(BaseTrackerTestCase):
def setUp(self):
super(ResizeClaimTestCase, self).setUp()
self.instance = self._fake_instance()
self.instance_type = self._fake_flavor_create()
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim(self, mock_get):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits)
self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
self.assertEqual(1, len(self.tracker.tracked_migrations))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_abort(self, mock_get):
try:
with self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits):
raise test.TestingException("abort")
except test.TestingException:
pass
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self.assertEqual(0, len(self.tracker.tracked_migrations))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_additive_claims(self, mock_get):
limits = self._limits(
2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
2 * FAKE_VIRT_LOCAL_GB,
2 * FAKE_VIRT_VCPUS)
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, limits)
instance2 = self._fake_instance()
self.tracker.resize_claim(self.context, instance2, self.instance_type,
limits)
self._assert(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(2 * FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(2 * FAKE_VIRT_VCPUS, 'vcpus_used')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_revert(self, mock_get):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, {}, self.limits)
self.tracker.drop_resize_claim(self.context, self.instance)
self.assertEqual(0, len(self.tracker.tracked_instances))
self.assertEqual(0, len(self.tracker.tracked_migrations))
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
def test_resize_filter(self):
instance = self._fake_instance(vm_state=vm_states.ACTIVE,
task_state=task_states.SUSPENDING)
self.assertFalse(self.tracker._instance_in_resize_state(instance))
instance = self._fake_instance(vm_state=vm_states.RESIZED,
task_state=task_states.SUSPENDING)
self.assertTrue(self.tracker._instance_in_resize_state(instance))
states = [task_states.RESIZE_PREP, task_states.RESIZE_MIGRATING,
task_states.RESIZE_MIGRATED, task_states.RESIZE_FINISH]
for vm_state in [vm_states.ACTIVE, vm_states.STOPPED]:
for task_state in states:
instance = self._fake_instance(vm_state=vm_state,
task_state=task_state)
result = self.tracker._instance_in_resize_state(instance)
self.assertTrue(result)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_set_instance_host_and_node(self, mock_get):
instance = self._fake_instance_obj()
self.assertIsNone(instance['host'])
self.assertIsNone(instance['launched_on'])
self.assertIsNone(instance['node'])
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance)
self.assertNotEqual(0, claim.memory_mb)
self.assertEqual('fakehost', instance['host'])
self.assertEqual('fakehost', instance['launched_on'])
self.assertEqual('fakenode', instance['node'])
class NoInstanceTypesInSysMetadata(ResizeClaimTestCase):
"""Make sure we handle the case where the following are true:
#) Compute node C gets upgraded to code that looks for instance types in
system metadata. AND
#) C already has instances in the process of migrating that do not have
stashed instance types.
bug 1164110
"""
def setUp(self):
super(NoInstanceTypesInSysMetadata, self).setUp()
self.instance = self._fake_instance(stash=False)
def test_get_instance_type_stash_false(self):
with (mock.patch.object(objects.Flavor, 'get_by_id',
return_value=self.instance_type)):
flavor = self.tracker._get_instance_type(self.context,
self.instance, "new_")
self.assertEqual(self.instance_type, flavor)
class OrphanTestCase(BaseTrackerTestCase):
def _driver(self):
class OrphanVirtDriver(FakeVirtDriver):
def get_per_instance_usage(self):
return {
'1-2-3-4-5': {'memory_mb': FAKE_VIRT_MEMORY_MB,
'uuid': '1-2-3-4-5'},
'2-3-4-5-6': {'memory_mb': FAKE_VIRT_MEMORY_MB,
'uuid': '2-3-4-5-6'},
}
return OrphanVirtDriver()
def test_usage(self):
self.assertEqual(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
def test_find(self):
# create one legit instance and verify the 2 orphans remain
self._fake_instance()
orphans = self.tracker._find_orphaned_instances()
self.assertEqual(2, len(orphans))
class ComputeMonitorTestCase(BaseTestCase):
def setUp(self):
super(ComputeMonitorTestCase, self).setUp()
fake_monitors = [
'nova.tests.unit.compute.monitors.test_monitors.FakeMonitorClass1',
'nova.tests.unit.compute.monitors.test_monitors.FakeMonitorClass2']
self.flags(compute_available_monitors=fake_monitors)
self.tracker = self._tracker()
self.node_name = 'nodename'
self.user_id = 'fake'
self.project_id = 'fake'
self.info = {}
self.context = context.RequestContext(self.user_id,
self.project_id)
def test_get_host_metrics_none(self):
self.flags(compute_monitors=['FakeMontorClass1', 'FakeMonitorClass4'])
self.tracker.monitors = []
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
self.assertEqual(len(metrics), 0)
def test_get_host_metrics_one_failed(self):
self.flags(compute_monitors=['FakeMonitorClass1', 'FakeMonitorClass4'])
class1 = test_monitors.FakeMonitorClass1(self.tracker)
class4 = test_monitors.FakeMonitorClass4(self.tracker)
self.tracker.monitors = [class1, class4]
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
self.assertTrue(len(metrics) > 0)
@mock.patch.object(resource_tracker.LOG, 'warning')
def test_get_host_metrics_exception(self, mock_LOG_warning):
self.flags(compute_monitors=['FakeMontorClass1'])
class1 = test_monitors.FakeMonitorClass1(self.tracker)
self.tracker.monitors = [class1]
with mock.patch.object(class1, 'get_metrics',
side_effect=test.TestingException()):
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
mock_LOG_warning.assert_called_once_with(
u'Cannot get the metrics from %s.', class1)
self.assertEqual(0, len(metrics))
def test_get_host_metrics(self):
self.flags(compute_monitors=['FakeMonitorClass1', 'FakeMonitorClass2'])
class1 = test_monitors.FakeMonitorClass1(self.tracker)
class2 = test_monitors.FakeMonitorClass2(self.tracker)
self.tracker.monitors = [class1, class2]
mock_notifier = mock.Mock()
with mock.patch.object(rpc, 'get_notifier',
return_value=mock_notifier) as mock_get:
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
mock_get.assert_called_once_with(service='compute',
host=self.node_name)
expected_metrics = [{
'timestamp': 1232,
'name': 'key1',
'value': 2600,
'source': 'libvirt'
}, {
'name': 'key2',
'source': 'libvirt',
'timestamp': 123,
'value': 1600
}]
payload = {
'metrics': expected_metrics,
'host': self.tracker.host,
'host_ip': CONF.my_ip,
'nodename': self.node_name
}
mock_notifier.info.assert_called_once_with(
self.context, 'compute.metrics.update', payload)
self.assertEqual(metrics, expected_metrics)
class TrackerPeriodicTestCase(BaseTrackerTestCase):
def test_periodic_status_update(self):
# verify update called on instantiation
self.assertEqual(1, self.update_call_count)
# verify update not called if no change to resources
self.tracker.update_available_resource(self.context)
self.assertEqual(1, self.update_call_count)
# verify update is called when resources change
driver = self.tracker.driver
driver.memory_mb += 1
self.tracker.update_available_resource(self.context)
self.assertEqual(2, self.update_call_count)
def test_update_available_resource_calls_locked_inner(self):
@mock.patch.object(self.tracker, 'driver')
@mock.patch.object(self.tracker,
'_update_available_resource')
@mock.patch.object(self.tracker, '_verify_resources')
@mock.patch.object(self.tracker, '_report_hypervisor_resource_view')
def _test(mock_rhrv, mock_vr, mock_uar, mock_driver):
resources = {'there is someone in my head': 'but it\'s not me'}
mock_driver.get_available_resource.return_value = resources
self.tracker.update_available_resource(self.context)
mock_uar.assert_called_once_with(self.context, resources)
_test()
class StatsDictTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
stats as a dictionary.
"""
def _driver(self):
return FakeVirtDriver(stats=FAKE_VIRT_STATS)
def _get_stats(self):
return jsonutils.loads(self.tracker.compute_node['stats'])
def test_virt_stats(self):
# start with virt driver stats
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
# adding an instance should keep virt driver stats
self._fake_instance(vm_state=vm_states.ACTIVE, host=self.host)
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
expected_stats = {}
expected_stats.update(FAKE_VIRT_STATS)
expected_stats.update(self.tracker.stats)
self.assertEqual(expected_stats, stats)
# removing the instances should keep only virt driver stats
self._instances = {}
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
class StatsJsonTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
stats as a json string.
"""
def _driver(self):
return FakeVirtDriver(stats=FAKE_VIRT_STATS_JSON)
def _get_stats(self):
return jsonutils.loads(self.tracker.compute_node['stats'])
def test_virt_stats(self):
# start with virt driver stats
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
# adding an instance should keep virt driver stats
# and add rt stats
self._fake_instance(vm_state=vm_states.ACTIVE, host=self.host)
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
expected_stats = {}
expected_stats.update(FAKE_VIRT_STATS)
expected_stats.update(self.tracker.stats)
self.assertEqual(expected_stats, stats)
# removing the instances should keep only virt driver stats
self._instances = {}
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
class StatsInvalidJsonTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
an invalid type for stats.
"""
def _driver(self):
return FakeVirtDriver(stats='this is not json')
def _init_tracker(self):
# do not do initial update in setup
pass
def test_virt_stats(self):
# should throw exception for string that does not parse as json
self.assertRaises(ValueError,
self.tracker.update_available_resource,
context=self.context)
class StatsInvalidTypeTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
an invalid type for stats.
"""
def _driver(self):
return FakeVirtDriver(stats=10)
def _init_tracker(self):
# do not do initial update in setup
pass
def test_virt_stats(self):
# should throw exception for incorrect stats value type
self.assertRaises(ValueError,
self.tracker.update_available_resource,
context=self.context)
|
thomasem/nova
|
nova/tests/unit/compute/test_resource_tracker.py
|
Python
|
apache-2.0
| 61,844
|
[
"exciting"
] |
fe1c87fa27df8cceea4838d6116d0abfa3f56761a1a848e88c9f979cb8901463
|
##
# Copyright 2013-2021 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing Bowtie, implemented as an easyblock
@author: Cedric Laczny (Uni.Lu)
@author: Fotis Georgatos (Uni.Lu)
@author: Kenneth Hoste (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
from distutils.version import LooseVersion
import glob
import os
import shutil
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import mkdir
class EB_Bowtie(ConfigureMake):
"""
Support for building bowtie (ifast and sensitive read alignment)
"""
def configure_step(self):
"""
Set compilers in buildopts, there is no configure script.
"""
comp_opts = 'CC="%(cc)s" CXX="%(cxx)s" CPP="%(cxx)s"' % {'cc': os.getenv('CC'), 'cxx': os.getenv('CXX')}
self.cfg.update('buildopts', comp_opts)
# make sure install target is specified for recent Bowtie versions that support 'make install'
if LooseVersion(self.version) >= LooseVersion('1.1.2'):
self.cfg.update('installopts', "prefix=%s" % self.installdir)
def install_step(self):
"""
Install by copying files to install dir
"""
if LooseVersion(self.version) >= LooseVersion('1.1.2'):
# 'make install' is supported since Bowtie 1.1.2
super(EB_Bowtie, self).install_step()
else:
destdir = os.path.join(self.installdir, 'bin')
mkdir(destdir)
try:
glob_pat = os.path.join(self.cfg['start_dir'], 'bowtie*')
binaries = [x for x in glob.glob(glob_pat) if os.path.splitext(x)[0] == x]
self.log.debug("Copying binaries to %s: %s", destdir, binaries)
for binary in binaries:
shutil.copy2(binary, destdir)
except (IOError, OSError) as err:
raise EasyBuildError("Copying binaries to installation dir %s failed: %s", destdir, err)
def sanity_check_step(self):
"""Custom sanity check for Bowtie."""
binaries = ['bowtie', 'bowtie-build', 'bowtie-inspect']
if LooseVersion(self.version) > LooseVersion('1.1.0'):
binaries.extend(['bowtie-align-l', 'bowtie-align-s', 'bowtie-build-l', 'bowtie-build-s',
'bowtie-inspect-l', 'bowtie-inspect-s'])
custom_paths = {
'files': [os.path.join('bin', x) for x in binaries],
'dirs': []
}
super(EB_Bowtie, self).sanity_check_step(custom_paths=custom_paths)
|
akesandgren/easybuild-easyblocks
|
easybuild/easyblocks/b/bowtie.py
|
Python
|
gpl-2.0
| 3,637
|
[
"Bowtie"
] |
ee2bfae2a1050aab8ed9943224b21565ef9e54eaf54ad913e67dea306f26f6f7
|
# -*- coding: utf-8 -*-
import datetime
from email.utils import parseaddr
import re
import django_otp
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.http import HttpResponse, HttpRequest
from django.test import TestCase, override_settings
from django.utils.timezone import now as timezone_now
from django.core.exceptions import ValidationError
from two_factor.utils import default_device
from mock import patch, MagicMock
from zerver.lib.test_helpers import MockLDAP, get_test_image_file, avatar_disk_path
from confirmation.models import Confirmation, create_confirmation_link, MultiuseInvite, \
generate_key, confirmation_url, get_object_from_key, ConfirmationKeyException, \
one_click_unsubscribe_link
from confirmation import settings as confirmation_settings
from zerver.forms import HomepageForm, WRONG_SUBDOMAIN_ERROR, check_subdomain_available
from zerver.lib.actions import do_change_password
from zerver.lib.exceptions import CannotDeactivateLastUserError
from zerver.decorator import do_two_factor_login
from zerver.views.auth import login_or_register_remote_user, \
redirect_and_log_into_subdomain, start_two_factor_auth
from zerver.views.invite import get_invitee_emails_set
from zerver.views.registration import confirmation_key, \
send_confirm_registration_email
from zerver.models import (
get_realm, get_user, get_stream_recipient,
PreregistrationUser, Realm, RealmDomain, Recipient, Message,
ScheduledEmail, UserProfile, UserMessage,
Stream, Subscription, flush_per_request_caches
)
from zerver.lib.actions import (
set_default_streams,
do_change_is_admin,
get_stream,
do_create_realm,
do_create_default_stream_group,
do_add_default_stream,
)
from zerver.lib.send_email import send_email, send_future_email, FromAddress
from zerver.lib.initial_password import initial_password
from zerver.lib.actions import (
do_deactivate_realm,
do_deactivate_user,
do_set_realm_property,
add_new_user_history,
)
from zerver.lib.avatar import avatar_url
from zerver.lib.mobile_auth_otp import xor_hex_strings, ascii_to_hex, \
otp_encrypt_api_key, is_valid_otp, hex_to_ascii, otp_decrypt_api_key
from zerver.lib.notifications import enqueue_welcome_emails, \
followup_day2_email_delay
from zerver.lib.subdomains import is_root_domain_available
from zerver.lib.test_helpers import find_key_by_email, queries_captured, \
HostRequestMock, load_subdomain_token
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zerver.lib.test_runner import slow
from zerver.lib.sessions import get_session_dict_user
from zerver.lib.name_restrictions import is_disposable_domain
from zerver.context_processors import common_context
from collections import defaultdict
import re
import smtplib
import ujson
from typing import Any, Dict, List, Optional, Set
import urllib
import os
import pytz
class RedirectAndLogIntoSubdomainTestCase(ZulipTestCase):
def test_cookie_data(self) -> None:
realm = Realm.objects.all().first()
name = 'Hamlet'
email = self.example_email("hamlet")
response = redirect_and_log_into_subdomain(realm, name, email)
data = load_subdomain_token(response)
self.assertDictEqual(data, {'name': name, 'next': '',
'email': email,
'subdomain': realm.subdomain,
'is_signup': False})
response = redirect_and_log_into_subdomain(realm, name, email,
is_signup=True)
data = load_subdomain_token(response)
self.assertDictEqual(data, {'name': name, 'next': '',
'email': email,
'subdomain': realm.subdomain,
'is_signup': True})
class DeactivationNoticeTestCase(ZulipTestCase):
def test_redirection_for_deactivated_realm(self) -> None:
realm = get_realm("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
for url in ('/register/', '/login/'):
result = self.client_get(url)
self.assertEqual(result.status_code, 302)
self.assertIn('deactivated', result.url)
def test_redirection_for_active_realm(self) -> None:
for url in ('/register/', '/login/'):
result = self.client_get(url)
self.assertEqual(result.status_code, 200)
def test_deactivation_notice_when_realm_is_active(self) -> None:
result = self.client_get('/accounts/deactivated/')
self.assertEqual(result.status_code, 302)
self.assertIn('login', result.url)
def test_deactivation_notice_when_deactivated(self) -> None:
realm = get_realm("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.client_get('/accounts/deactivated/')
self.assertIn("Zulip Dev, has been deactivated.", result.content.decode())
class AddNewUserHistoryTest(ZulipTestCase):
def test_add_new_user_history_race(self) -> None:
"""Sends a message during user creation"""
# Create a user who hasn't had historical messages added
stream_dict = {
"Denmark": {"description": "A Scandinavian country", "invite_only": False},
"Verona": {"description": "A city in Italy", "invite_only": False}
} # type: Dict[str, Dict[str, Any]]
realm = get_realm('zulip')
set_default_streams(realm, stream_dict)
with patch("zerver.lib.actions.add_new_user_history"):
self.register(self.nonreg_email('test'), "test")
user_profile = self.nonreg_user('test')
subs = Subscription.objects.select_related("recipient").filter(
user_profile=user_profile, recipient__type=Recipient.STREAM)
streams = Stream.objects.filter(id__in=[sub.recipient.type_id for sub in subs])
self.send_stream_message(self.example_email('hamlet'), streams[0].name, "test")
add_new_user_history(user_profile, streams)
class InitialPasswordTest(ZulipTestCase):
def test_none_initial_password_salt(self) -> None:
with self.settings(INITIAL_PASSWORD_SALT=None):
self.assertIsNone(initial_password('test@test.com'))
class PasswordResetTest(ZulipTestCase):
"""
Log in, reset password, log out, log in with new password.
"""
def test_password_reset(self) -> None:
email = self.example_email("hamlet")
old_password = initial_password(email)
self.login(email)
# test password reset template
result = self.client_get('/accounts/password/reset/')
self.assert_in_response('Reset your password', result)
# start the password reset process by supplying an email address
result = self.client_post('/accounts/password/reset/', {'email': email})
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email in a few minutes to finish the process.", result)
# Check that the password reset email is from a noreply address.
from django.core.mail import outbox
from_email = outbox[0].from_email
self.assertIn("Zulip Account Security", from_email)
tokenized_no_reply_email = parseaddr(from_email)[1]
self.assertTrue(re.search(self.TOKENIZED_NOREPLY_REGEX, tokenized_no_reply_email))
self.assertIn("Psst. Word on the street is that you", outbox[0].body)
# Visit the password reset link.
password_reset_url = self.get_confirmation_url_from_outbox(
email, url_pattern=settings.EXTERNAL_HOST + r"(\S+)")
result = self.client_get(password_reset_url)
self.assertEqual(result.status_code, 200)
# Reset your password
result = self.client_post(password_reset_url,
{'new_password1': 'new_password',
'new_password2': 'new_password'})
# password reset succeeded
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith("/password/done/"))
# log back in with new password
self.login(email, password='new_password')
user_profile = self.example_user('hamlet')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
# make sure old password no longer works
self.login(email, password=old_password, fails=True)
def test_password_reset_for_non_existent_user(self) -> None:
email = 'nonexisting@mars.com'
# start the password reset process by supplying an email address
result = self.client_post('/accounts/password/reset/', {'email': email})
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email in a few minutes to finish the process.", result)
# Check that the password reset email is from a noreply address.
from django.core.mail import outbox
from_email = outbox[0].from_email
self.assertIn("Zulip Account Security", from_email)
tokenized_no_reply_email = parseaddr(from_email)[1]
self.assertTrue(re.search(self.TOKENIZED_NOREPLY_REGEX, tokenized_no_reply_email))
self.assertIn('Someone (possibly you) requested a password',
outbox[0].body)
self.assertNotIn('does have an active account in the zulip.testserver',
outbox[0].body)
def test_password_reset_for_deactivated_user(self) -> None:
user_profile = self.example_user("hamlet")
email = user_profile.email
do_deactivate_user(user_profile)
# start the password reset process by supplying an email address
result = self.client_post('/accounts/password/reset/', {'email': email})
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email in a few minutes to finish the process.", result)
# Check that the password reset email is from a noreply address.
from django.core.mail import outbox
from_email = outbox[0].from_email
self.assertIn("Zulip Account Security", from_email)
tokenized_no_reply_email = parseaddr(from_email)[1]
self.assertTrue(re.search(self.TOKENIZED_NOREPLY_REGEX, tokenized_no_reply_email))
self.assertIn('Someone (possibly you) requested a password',
outbox[0].body)
self.assertNotIn('does have an active account in the zulip.testserver',
outbox[0].body)
self.assertIn('but your account has been deactivated',
outbox[0].body)
def test_password_reset_with_deactivated_realm(self) -> None:
user_profile = self.example_user("hamlet")
email = user_profile.email
do_deactivate_realm(user_profile.realm)
# start the password reset process by supplying an email address
with patch('logging.info') as mock_logging:
result = self.client_post('/accounts/password/reset/', {'email': email})
mock_logging.assert_called_once()
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email in a few minutes to finish the process.", result)
# Check that the password reset email is from a noreply address.
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
def test_wrong_subdomain(self) -> None:
email = self.example_email("hamlet")
# start the password reset process by supplying an email address
result = self.client_post(
'/accounts/password/reset/', {'email': email},
subdomain="zephyr")
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email in a few minutes to finish the process.", result)
from django.core.mail import outbox
self.assertEqual(len(outbox), 1)
message = outbox.pop()
tokenized_no_reply_email = parseaddr(message.from_email)[1]
self.assertTrue(re.search(self.TOKENIZED_NOREPLY_REGEX, tokenized_no_reply_email))
self.assertIn('Someone (possibly you) requested a password reset email for',
message.body)
self.assertIn("but you do not have an account in that organization",
message.body)
self.assertIn("You do have active accounts in the following organization(s).\nhttp://zulip.testserver",
message.body)
def test_invalid_subdomain(self) -> None:
email = self.example_email("hamlet")
# start the password reset process by supplying an email address
result = self.client_post(
'/accounts/password/reset/', {'email': email},
subdomain="invalid")
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 200)
self.assert_in_success_response(["There is no Zulip organization hosted at this subdomain."],
result)
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_ldap_auth_only(self) -> None:
"""If the email auth backend is not enabled, password reset should do nothing"""
email = self.example_email("hamlet")
with patch('logging.info') as mock_logging:
result = self.client_post('/accounts/password/reset/', {'email': email})
mock_logging.assert_called_once()
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email in a few minutes to finish the process.", result)
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.EmailAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_ldap_and_email_auth(self) -> None:
"""If both email and ldap auth backends are enabled, limit password
reset to users outside the LDAP domain"""
# If the domain matches, we don't generate an email
with self.settings(LDAP_APPEND_DOMAIN="zulip.com"):
email = self.example_email("hamlet")
with patch('logging.info') as mock_logging:
result = self.client_post('/accounts/password/reset/', {'email': email})
mock_logging.assert_called_once_with("Password reset not allowed for user in LDAP domain")
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
# If the domain doesn't match, we do generate an email
with self.settings(LDAP_APPEND_DOMAIN="example.com"):
email = self.example_email("hamlet")
with patch('logging.info') as mock_logging:
result = self.client_post('/accounts/password/reset/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
self.assertEqual(len(outbox), 1)
message = outbox.pop()
tokenized_no_reply_email = parseaddr(message.from_email)[1]
self.assertTrue(re.search(self.TOKENIZED_NOREPLY_REGEX, tokenized_no_reply_email))
self.assertIn('Psst. Word on the street is that you need a new password',
message.body)
def test_redirect_endpoints(self) -> None:
'''
These tests are mostly designed to give us 100% URL coverage
in our URL coverage reports. Our mechanism for finding URL
coverage doesn't handle redirects, so we just have a few quick
tests here.
'''
result = self.client_get('/accounts/password/reset/done/')
self.assert_in_success_response(["Check your email"], result)
result = self.client_get('/accounts/password/done/')
self.assert_in_success_response(["We've reset your password!"], result)
result = self.client_get('/accounts/send_confirm/alice@example.com')
self.assert_in_success_response(["/accounts/home/"], result)
result = self.client_get('/accounts/new/send_confirm/alice@example.com')
self.assert_in_success_response(["/new/"], result)
class LoginTest(ZulipTestCase):
"""
Logging in, registration, and logging out.
"""
def test_login(self) -> None:
self.login(self.example_email("hamlet"))
user_profile = self.example_user('hamlet')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_login_deactivated_user(self) -> None:
user_profile = self.example_user('hamlet')
do_deactivate_user(user_profile)
result = self.login_with_return(self.example_email("hamlet"), "xxx")
self.assertEqual(result.status_code, 200)
self.assert_in_response("Your account is no longer active.", result)
self.assertIsNone(get_session_dict_user(self.client.session))
def test_login_bad_password(self) -> None:
email = self.example_email("hamlet")
result = self.login_with_return(email, password="wrongpassword")
self.assert_in_success_response([email], result)
self.assertIsNone(get_session_dict_user(self.client.session))
def test_login_nonexist_user(self) -> None:
result = self.login_with_return("xxx@zulip.com", "xxx")
self.assertEqual(result.status_code, 200)
self.assert_in_response("Please enter a correct email and password", result)
self.assertIsNone(get_session_dict_user(self.client.session))
def test_login_wrong_subdomain(self) -> None:
with patch("logging.warning") as mock_warning:
result = self.login_with_return(self.mit_email("sipbtest"), "xxx")
mock_warning.assert_called_once()
self.assertEqual(result.status_code, 200)
self.assert_in_response("Your Zulip account is not a member of the "
"organization associated with this subdomain.", result)
self.assertIsNone(get_session_dict_user(self.client.session))
def test_login_invalid_subdomain(self) -> None:
result = self.login_with_return(self.example_email("hamlet"), "xxx",
subdomain="invalid")
self.assertEqual(result.status_code, 200)
self.assert_in_response("There is no Zulip organization hosted at this subdomain.", result)
self.assertIsNone(get_session_dict_user(self.client.session))
def test_register(self) -> None:
realm = get_realm("zulip")
stream_dict = {"stream_"+str(i): {"description": "stream_%s_description" % i, "invite_only": False}
for i in range(40)} # type: Dict[str, Dict[str, Any]]
for stream_name in stream_dict.keys():
self.make_stream(stream_name, realm=realm)
set_default_streams(realm, stream_dict)
# Clear all the caches.
flush_per_request_caches()
ContentType.objects.clear_cache()
Site.objects.clear_cache()
with queries_captured() as queries:
self.register(self.nonreg_email('test'), "test")
# Ensure the number of queries we make is not O(streams)
self.assert_length(queries, 79)
user_profile = self.nonreg_user('test')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
self.assertFalse(user_profile.enable_stream_desktop_notifications)
def test_register_deactivated(self) -> None:
"""
If you try to register for a deactivated realm, you get a clear error
page.
"""
realm = get_realm("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.client_post('/accounts/home/', {'email': self.nonreg_email('test')},
subdomain="zulip")
self.assertEqual(result.status_code, 302)
self.assertEqual('/accounts/deactivated/', result.url)
with self.assertRaises(UserProfile.DoesNotExist):
self.nonreg_user('test')
def test_register_deactivated_partway_through(self) -> None:
"""
If you try to register for a deactivated realm, you get a clear error
page.
"""
email = self.nonreg_email('test')
result = self.client_post('/accounts/home/', {'email': email},
subdomain="zulip")
self.assertEqual(result.status_code, 302)
self.assertNotIn('deactivated', result.url)
realm = get_realm("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.submit_reg_form_for_user(email, "abcd1234", subdomain="zulip")
self.assertEqual(result.status_code, 302)
self.assertEqual('/accounts/deactivated/', result.url)
with self.assertRaises(UserProfile.DoesNotExist):
self.nonreg_user('test')
def test_login_deactivated_realm(self) -> None:
"""
If you try to log in to a deactivated realm, you get a clear error page.
"""
realm = get_realm("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.login_with_return(self.example_email("hamlet"), subdomain="zulip")
self.assertEqual(result.status_code, 302)
self.assertEqual('/accounts/deactivated/', result.url)
def test_logout(self) -> None:
self.login(self.example_email("hamlet"))
# We use the logout API, not self.logout, to make sure we test
# the actual logout code path.
self.client_post('/accounts/logout/')
self.assertIsNone(get_session_dict_user(self.client.session))
def test_non_ascii_login(self) -> None:
"""
You can log in even if your password contain non-ASCII characters.
"""
email = self.nonreg_email('test')
password = u"hümbüǵ"
# Registering succeeds.
self.register(email, password)
user_profile = self.nonreg_user('test')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
self.logout()
self.assertIsNone(get_session_dict_user(self.client.session))
# Logging in succeeds.
self.logout()
self.login(email, password)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
@override_settings(TWO_FACTOR_AUTHENTICATION_ENABLED=False)
def test_login_page_redirects_logged_in_user(self) -> None:
"""You will be redirected to the app's main page if you land on the
login page when already logged in.
"""
self.login(self.example_email("cordelia"))
response = self.client_get("/login/")
self.assertEqual(response["Location"], "http://zulip.testserver")
def test_options_request_to_login_page(self) -> None:
response = self.client_options('/login/')
self.assertEqual(response.status_code, 200)
@override_settings(TWO_FACTOR_AUTHENTICATION_ENABLED=True)
def test_login_page_redirects_logged_in_user_under_2fa(self) -> None:
"""You will be redirected to the app's main page if you land on the
login page when already logged in.
"""
user_profile = self.example_user("cordelia")
self.create_default_device(user_profile)
self.login(self.example_email("cordelia"))
self.login_2fa(user_profile)
response = self.client_get("/login/")
self.assertEqual(response["Location"], "http://zulip.testserver")
def test_start_two_factor_auth(self) -> None:
request = MagicMock(POST=dict())
with patch('zerver.views.auth.TwoFactorLoginView') as mock_view:
mock_view.as_view.return_value = lambda *a, **k: HttpResponse()
response = start_two_factor_auth(request)
self.assertTrue(isinstance(response, HttpResponse))
def test_do_two_factor_login(self) -> None:
user_profile = self.example_user('hamlet')
self.create_default_device(user_profile)
request = MagicMock()
with patch('zerver.decorator.django_otp.login') as mock_login:
do_two_factor_login(request, user_profile)
mock_login.assert_called_once()
class InviteUserBase(ZulipTestCase):
def check_sent_emails(self, correct_recipients: List[str],
custom_from_name: Optional[str]=None) -> None:
from django.core.mail import outbox
self.assertEqual(len(outbox), len(correct_recipients))
email_recipients = [email.recipients()[0] for email in outbox]
self.assertEqual(sorted(email_recipients), sorted(correct_recipients))
if len(outbox) == 0:
return
if custom_from_name is not None:
self.assertIn(custom_from_name, outbox[0].from_email)
tokenized_no_reply_email = parseaddr(outbox[0].from_email)[1]
self.assertTrue(re.search(self.TOKENIZED_NOREPLY_REGEX, tokenized_no_reply_email))
def invite(self, users: str, streams: List[str], body: str='',
invite_as_admin: str="false") -> HttpResponse:
"""
Invites the specified users to Zulip with the specified streams.
users should be a string containing the users to invite, comma or
newline separated.
streams should be a list of strings.
"""
return self.client_post("/json/invites",
{"invitee_emails": users,
"stream": streams,
"invite_as_admin": invite_as_admin})
class InviteUserTest(InviteUserBase):
def test_successful_invite_user(self) -> None:
"""
A call to /json/invites with valid parameters causes an invitation
email to be sent.
"""
self.login(self.example_email("hamlet"))
invitee = "alice-test@zulip.com"
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(invitee))
self.check_sent_emails([invitee], custom_from_name="Hamlet")
def test_newbie_restrictions(self) -> None:
user_profile = self.example_user('hamlet')
invitee = "alice-test@zulip.com"
stream_name = 'Denmark'
self.login(user_profile.email)
result = self.invite(invitee, [stream_name])
self.assert_json_success(result)
user_profile.date_joined = timezone_now() - datetime.timedelta(days=10)
user_profile.save()
with self.settings(INVITES_MIN_USER_AGE_DAYS=5):
result = self.invite(invitee, [stream_name])
self.assert_json_success(result)
with self.settings(INVITES_MIN_USER_AGE_DAYS=15):
result = self.invite(invitee, [stream_name])
self.assert_json_error_contains(result, "Your account is too new")
def test_invite_limits(self) -> None:
user_profile = self.example_user('hamlet')
realm = user_profile.realm
stream_name = 'Denmark'
# These constants only need to be in descending order
# for this test to trigger an InvitationError based
# on max daily counts.
site_max = 50
realm_max = 40
num_invitees = 30
max_daily_count = 20
daily_counts = [(1, max_daily_count)]
invite_emails = [
'foo-%02d@zulip.com' % (i,)
for i in range(num_invitees)
]
invitees = ','.join(invite_emails)
self.login(user_profile.email)
realm.max_invites = realm_max
realm.date_created = timezone_now()
realm.save()
def try_invite() -> HttpResponse:
with self.settings(OPEN_REALM_CREATION=True,
INVITES_DEFAULT_REALM_DAILY_MAX=site_max,
INVITES_NEW_REALM_LIMIT_DAYS=daily_counts):
result = self.invite(invitees, [stream_name])
return result
result = try_invite()
self.assert_json_error_contains(result, 'enough remaining invites')
# Next show that aggregate limits expire once the realm is old
# enough.
realm.date_created = timezone_now() - datetime.timedelta(days=8)
realm.save()
result = try_invite()
self.assert_json_success(result)
# Next get line coverage on bumping a realm's max_invites.
realm.date_created = timezone_now()
realm.max_invites = site_max + 10
realm.save()
result = try_invite()
self.assert_json_success(result)
# Finally get coverage on the case that OPEN_REALM_CREATION is False.
with self.settings(OPEN_REALM_CREATION=False):
result = self.invite(invitees, [stream_name])
self.assert_json_success(result)
def test_successful_invite_user_as_admin_from_admin_account(self) -> None:
"""
Test that a new user invited to a stream receives some initial
history but only from public streams.
"""
self.login(self.example_email('iago'))
invitee = self.nonreg_email('alice')
self.assert_json_success(self.invite(invitee, ["Denmark"], invite_as_admin="true"))
self.assertTrue(find_key_by_email(invitee))
self.submit_reg_form_for_user(invitee, "password")
invitee_profile = self.nonreg_user('alice')
self.assertTrue(invitee_profile.is_realm_admin)
def test_invite_user_as_admin_from_normal_account(self) -> None:
"""
Test that a new user invited to a stream receives some initial
history but only from public streams.
"""
self.login(self.example_email('hamlet'))
invitee = self.nonreg_email('alice')
response = self.invite(invitee, ["Denmark"], invite_as_admin="true")
self.assert_json_error(response, "Must be an organization administrator")
def test_successful_invite_user_with_name(self) -> None:
"""
A call to /json/invites with valid parameters causes an invitation
email to be sent.
"""
self.login(self.example_email("hamlet"))
email = "alice-test@zulip.com"
invitee = "Alice Test <{}>".format(email)
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(email))
self.check_sent_emails([email], custom_from_name="Hamlet")
def test_successful_invite_user_with_name_and_normal_one(self) -> None:
"""
A call to /json/invites with valid parameters causes an invitation
email to be sent.
"""
self.login(self.example_email("hamlet"))
email = "alice-test@zulip.com"
email2 = "bob-test@zulip.com"
invitee = "Alice Test <{}>, {}".format(email, email2)
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(email))
self.assertTrue(find_key_by_email(email2))
self.check_sent_emails([email, email2], custom_from_name="Hamlet")
def test_require_realm_admin(self) -> None:
"""
The invite_by_admins_only realm setting works properly.
"""
realm = get_realm('zulip')
realm.invite_by_admins_only = True
realm.save()
self.login("hamlet@zulip.com")
email = "alice-test@zulip.com"
email2 = "bob-test@zulip.com"
invitee = "Alice Test <{}>, {}".format(email, email2)
self.assert_json_error(self.invite(invitee, ["Denmark"]),
"Must be an organization administrator")
# Now verify an administrator can do it
self.login("iago@zulip.com")
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(email))
self.assertTrue(find_key_by_email(email2))
self.check_sent_emails([email, email2])
def test_successful_invite_user_with_notifications_stream(self) -> None:
"""
A call to /json/invites with valid parameters unconditionally
subscribes the invitee to the notifications stream if it exists and is
public.
"""
realm = get_realm('zulip')
notifications_stream = get_stream('Verona', realm)
realm.notifications_stream_id = notifications_stream.id
realm.save()
self.login(self.example_email("hamlet"))
invitee = 'alice-test@zulip.com'
self.assert_json_success(self.invite(invitee, ['Denmark']))
self.assertTrue(find_key_by_email(invitee))
self.check_sent_emails([invitee])
prereg_user = PreregistrationUser.objects.get(email=invitee)
stream_ids = [stream.id for stream in prereg_user.streams.all()]
self.assertTrue(notifications_stream.id in stream_ids)
def test_invite_user_signup_initial_history(self) -> None:
"""
Test that a new user invited to a stream receives some initial
history but only from public streams.
"""
self.login(self.example_email('hamlet'))
user_profile = self.example_user('hamlet')
private_stream_name = "Secret"
self.make_stream(private_stream_name, invite_only=True)
self.subscribe(user_profile, private_stream_name)
public_msg_id = self.send_stream_message(
self.example_email("hamlet"),
"Denmark",
topic_name="Public topic",
content="Public message",
)
secret_msg_id = self.send_stream_message(
self.example_email("hamlet"),
private_stream_name,
topic_name="Secret topic",
content="Secret message",
)
invitee = self.nonreg_email('alice')
self.assert_json_success(self.invite(invitee, [private_stream_name, "Denmark"]))
self.assertTrue(find_key_by_email(invitee))
self.submit_reg_form_for_user(invitee, "password")
invitee_profile = self.nonreg_user('alice')
invitee_msg_ids = [um.message_id for um in
UserMessage.objects.filter(user_profile=invitee_profile)]
self.assertTrue(public_msg_id in invitee_msg_ids)
self.assertFalse(secret_msg_id in invitee_msg_ids)
self.assertFalse(invitee_profile.is_realm_admin)
# Test that exactly 2 new Zulip messages were sent, both notifications.
last_3_messages = list(reversed(list(Message.objects.all().order_by("-id")[0:3])))
first_msg = last_3_messages[0]
self.assertEqual(first_msg.id, secret_msg_id)
# The first, from notification-bot to the user who invited the new user.
second_msg = last_3_messages[1]
self.assertEqual(second_msg.sender.email, "notification-bot@zulip.com")
self.assertTrue(second_msg.content.startswith("alice_zulip.com <`alice@zulip.com`> accepted your"))
# The second, from welcome-bot to the user who was invited.
third_msg = last_3_messages[2]
self.assertEqual(third_msg.sender.email, "welcome-bot@zulip.com")
self.assertTrue(third_msg.content.startswith("Hello, and welcome to Zulip!"))
def test_multi_user_invite(self) -> None:
"""
Invites multiple users with a variety of delimiters.
"""
self.login(self.example_email("hamlet"))
# Intentionally use a weird string.
self.assert_json_success(self.invite(
"""bob-test@zulip.com, carol-test@zulip.com,
dave-test@zulip.com
earl-test@zulip.com""", ["Denmark"]))
for user in ("bob", "carol", "dave", "earl"):
self.assertTrue(find_key_by_email("%s-test@zulip.com" % (user,)))
self.check_sent_emails(["bob-test@zulip.com", "carol-test@zulip.com",
"dave-test@zulip.com", "earl-test@zulip.com"])
def test_max_invites_model(self) -> None:
realm = get_realm("zulip")
self.assertEqual(realm.max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX)
realm.max_invites = 3
realm.save()
self.assertEqual(get_realm("zulip").max_invites, 3)
realm.max_invites = settings.INVITES_DEFAULT_REALM_DAILY_MAX
realm.save()
def test_invite_too_many_users(self) -> None:
# Only a light test of this pathway; e.g. doesn't test that
# the limit gets reset after 24 hours
self.login(self.example_email("iago"))
self.client_post("/json/invites",
{"invitee_emails": "1@zulip.com, 2@zulip.com",
"stream": ["Denmark"]}),
self.assert_json_error(
self.client_post("/json/invites",
{"invitee_emails": ", ".join(
[str(i) for i in range(get_realm("zulip").max_invites - 1)]),
"stream": ["Denmark"]}),
"You do not have enough remaining invites. "
"Please contact zulip-admin@example.com to have your limit raised. "
"No invitations were sent.")
def test_missing_or_invalid_params(self) -> None:
"""
Tests inviting with various missing or invalid parameters.
"""
self.login(self.example_email("hamlet"))
self.assert_json_error(
self.client_post("/json/invites",
{"invitee_emails": "foo@zulip.com"}),
"You must specify at least one stream for invitees to join.")
for address in ("noatsign.com", "outsideyourdomain@example.net"):
self.assert_json_error(
self.invite(address, ["Denmark"]),
"Some emails did not validate, so we didn't send any invitations.")
self.check_sent_emails([])
self.assert_json_error(
self.invite("", ["Denmark"]),
"You must specify at least one email address.")
self.check_sent_emails([])
def test_guest_user_invitation(self) -> None:
"""
Guest user can't invite new users
"""
self.login(self.example_email("polonius"))
invitee = "alice-test@zulip.com"
self.assert_json_error(self.invite(invitee, ["Denmark"]), "Not allowed for guest users")
self.assertEqual(find_key_by_email(invitee), None)
self.check_sent_emails([])
def test_invalid_stream(self) -> None:
"""
Tests inviting to a non-existent stream.
"""
self.login(self.example_email("hamlet"))
self.assert_json_error(self.invite("iago-test@zulip.com", ["NotARealStream"]),
"Stream does not exist: NotARealStream. No invites were sent.")
self.check_sent_emails([])
def test_invite_existing_user(self) -> None:
"""
If you invite an address already using Zulip, no invitation is sent.
"""
self.login(self.example_email("hamlet"))
self.assert_json_error(
self.client_post("/json/invites",
{"invitee_emails": self.example_email("hamlet"),
"stream": ["Denmark"]}),
"We weren't able to invite anyone.")
self.assertRaises(PreregistrationUser.DoesNotExist,
lambda: PreregistrationUser.objects.get(
email=self.example_email("hamlet")))
self.check_sent_emails([])
def test_invite_some_existing_some_new(self) -> None:
"""
If you invite a mix of already existing and new users, invitations are
only sent to the new users.
"""
self.login(self.example_email("hamlet"))
existing = [self.example_email("hamlet"), u"othello@zulip.com"]
new = [u"foo-test@zulip.com", u"bar-test@zulip.com"]
result = self.client_post("/json/invites",
{"invitee_emails": "\n".join(existing + new),
"stream": ["Denmark"]})
self.assert_json_error(result,
"Some of those addresses are already using Zulip, \
so we didn't send them an invitation. We did send invitations to everyone else!")
# We only created accounts for the new users.
for email in existing:
self.assertRaises(PreregistrationUser.DoesNotExist,
lambda: PreregistrationUser.objects.get(
email=email))
for email in new:
self.assertTrue(PreregistrationUser.objects.get(email=email))
# We only sent emails to the new users.
self.check_sent_emails(new)
prereg_user = PreregistrationUser.objects.get(email='foo-test@zulip.com')
self.assertEqual(prereg_user.email, 'foo-test@zulip.com')
def test_invite_outside_domain_in_closed_realm(self) -> None:
"""
In a realm with `emails_restricted_to_domains = True`, you can't invite people
with a different domain from that of the realm or your e-mail address.
"""
zulip_realm = get_realm("zulip")
zulip_realm.emails_restricted_to_domains = True
zulip_realm.save()
self.login(self.example_email("hamlet"))
external_address = "foo@example.com"
self.assert_json_error(
self.invite(external_address, ["Denmark"]),
"Some emails did not validate, so we didn't send any invitations.")
def test_invite_using_disposable_email(self) -> None:
"""
In a realm with `disallow_disposable_email_addresses = True`, you can't invite
people with a disposable domain.
"""
zulip_realm = get_realm("zulip")
zulip_realm.emails_restricted_to_domains = False
zulip_realm.disallow_disposable_email_addresses = True
zulip_realm.save()
self.login(self.example_email("hamlet"))
external_address = "foo@mailnator.com"
self.assert_json_error(
self.invite(external_address, ["Denmark"]),
"Some emails did not validate, so we didn't send any invitations.")
def test_invite_outside_domain_in_open_realm(self) -> None:
"""
In a realm with `emails_restricted_to_domains = False`, you can invite people
with a different domain from that of the realm or your e-mail address.
"""
zulip_realm = get_realm("zulip")
zulip_realm.emails_restricted_to_domains = False
zulip_realm.save()
self.login(self.example_email("hamlet"))
external_address = "foo@example.com"
self.assert_json_success(self.invite(external_address, ["Denmark"]))
self.check_sent_emails([external_address])
def test_invite_outside_domain_before_closing(self) -> None:
"""
If you invite someone with a different domain from that of the realm
when `emails_restricted_to_domains = False`, but `emails_restricted_to_domains` later
changes to true, the invitation should succeed but the invitee's signup
attempt should fail.
"""
zulip_realm = get_realm("zulip")
zulip_realm.emails_restricted_to_domains = False
zulip_realm.save()
self.login(self.example_email("hamlet"))
external_address = "foo@example.com"
self.assert_json_success(self.invite(external_address, ["Denmark"]))
self.check_sent_emails([external_address])
zulip_realm.emails_restricted_to_domains = True
zulip_realm.save()
result = self.submit_reg_form_for_user("foo@example.com", "password")
self.assertEqual(result.status_code, 200)
self.assert_in_response("only allows users with email addresses", result)
def test_disposable_emails_before_closing(self) -> None:
"""
If you invite someone with a disposable email when
`disallow_disposable_email_addresses = False`, but
later changes to true, the invitation should succeed
but the invitee's signup attempt should fail.
"""
zulip_realm = get_realm("zulip")
zulip_realm.emails_restricted_to_domains = False
zulip_realm.disallow_disposable_email_addresses = False
zulip_realm.save()
self.login(self.example_email("hamlet"))
external_address = "foo@mailnator.com"
self.assert_json_success(self.invite(external_address, ["Denmark"]))
self.check_sent_emails([external_address])
zulip_realm.disallow_disposable_email_addresses = True
zulip_realm.save()
result = self.submit_reg_form_for_user("foo@mailnator.com", "password")
self.assertEqual(result.status_code, 200)
self.assert_in_response("Please sign up using a real email address.", result)
def test_invite_with_email_containing_plus_before_closing(self) -> None:
"""
If you invite someone with an email containing plus when
`emails_restricted_to_domains = False`, but later change
`emails_restricted_to_domains = True`, the invitation should
succeed but the invitee's signup attempt should fail as
users are not allowed to signup using email containing +
when the realm is restricted to domain.
"""
zulip_realm = get_realm("zulip")
zulip_realm.emails_restricted_to_domains = False
zulip_realm.save()
self.login(self.example_email("hamlet"))
external_address = "foo+label@zulip.com"
self.assert_json_success(self.invite(external_address, ["Denmark"]))
self.check_sent_emails([external_address])
zulip_realm.emails_restricted_to_domains = True
zulip_realm.save()
result = self.submit_reg_form_for_user(external_address, "password")
self.assertEqual(result.status_code, 200)
self.assert_in_response("Zulip Dev, does not allow signups using emails\n that contains +", result)
def test_invalid_email_check_after_confirming_email(self) -> None:
self.login(self.example_email("hamlet"))
email = "test@zulip.com"
self.assert_json_success(self.invite(email, ["Denmark"]))
obj = Confirmation.objects.get(confirmation_key=find_key_by_email(email))
prereg_user = obj.content_object
prereg_user.email = "invalid.email"
prereg_user.save()
result = self.submit_reg_form_for_user(email, "password")
self.assertEqual(result.status_code, 200)
self.assert_in_response("The email address you are trying to sign up with is not valid", result)
def test_invite_with_non_ascii_streams(self) -> None:
"""
Inviting someone to streams with non-ASCII characters succeeds.
"""
self.login(self.example_email("hamlet"))
invitee = "alice-test@zulip.com"
stream_name = u"hümbüǵ"
# Make sure we're subscribed before inviting someone.
self.subscribe(self.example_user("hamlet"), stream_name)
self.assert_json_success(self.invite(invitee, [stream_name]))
def test_invitation_reminder_email(self) -> None:
from django.core.mail import outbox
# All users belong to zulip realm
referrer_user = 'hamlet'
current_user_email = self.example_email(referrer_user)
self.login(current_user_email)
invitee_email = self.nonreg_email('alice')
self.assert_json_success(self.invite(invitee_email, ["Denmark"]))
self.assertTrue(find_key_by_email(invitee_email))
self.check_sent_emails([invitee_email])
data = {"email": invitee_email, "referrer_email": current_user_email}
invitee = PreregistrationUser.objects.get(email=data["email"])
referrer = self.example_user(referrer_user)
link = create_confirmation_link(invitee, referrer.realm.host, Confirmation.INVITATION)
context = common_context(referrer)
context.update({
'activate_url': link,
'referrer_name': referrer.full_name,
'referrer_email': referrer.email,
'referrer_realm_name': referrer.realm.name,
})
with self.settings(EMAIL_BACKEND='django.core.mail.backends.console.EmailBackend'):
email = data["email"]
send_future_email(
"zerver/emails/invitation_reminder", referrer.realm, to_emails=[email],
from_address=FromAddress.NOREPLY, context=context)
email_jobs_to_deliver = ScheduledEmail.objects.filter(
scheduled_timestamp__lte=timezone_now())
self.assertEqual(len(email_jobs_to_deliver), 1)
email_count = len(outbox)
for job in email_jobs_to_deliver:
send_email(**ujson.loads(job.data))
self.assertEqual(len(outbox), email_count + 1)
self.assertIn(FromAddress.NOREPLY, outbox[-1].from_email)
# Now verify that signing up clears invite_reminder emails
email_jobs_to_deliver = ScheduledEmail.objects.filter(
scheduled_timestamp__lte=timezone_now(), type=ScheduledEmail.INVITATION_REMINDER)
self.assertEqual(len(email_jobs_to_deliver), 1)
self.register(invitee_email, "test")
email_jobs_to_deliver = ScheduledEmail.objects.filter(
scheduled_timestamp__lte=timezone_now(), type=ScheduledEmail.INVITATION_REMINDER)
self.assertEqual(len(email_jobs_to_deliver), 0)
# make sure users can't take a valid confirmation key from another
# pathway and use it with the invitation url route
def test_confirmation_key_of_wrong_type(self) -> None:
user = self.example_user('hamlet')
url = create_confirmation_link(user, 'host', Confirmation.USER_REGISTRATION)
registration_key = url.split('/')[-1]
# Mainly a test of get_object_from_key, rather than of the invitation pathway
with self.assertRaises(ConfirmationKeyException) as cm:
get_object_from_key(registration_key, Confirmation.INVITATION)
self.assertEqual(cm.exception.error_type, ConfirmationKeyException.DOES_NOT_EXIST)
# Verify that using the wrong type doesn't work in the main confirm code path
email_change_url = create_confirmation_link(user, 'host', Confirmation.EMAIL_CHANGE)
email_change_key = email_change_url.split('/')[-1]
url = '/accounts/do_confirm/' + email_change_key
result = self.client_get(url)
self.assert_in_success_response(["Whoops. We couldn't find your "
"confirmation link in the system."], result)
def test_confirmation_expired(self) -> None:
user = self.example_user('hamlet')
url = create_confirmation_link(user, 'host', Confirmation.USER_REGISTRATION)
registration_key = url.split('/')[-1]
conf = Confirmation.objects.filter(confirmation_key=registration_key).first()
conf.date_sent -= datetime.timedelta(weeks=3)
conf.save()
target_url = '/' + url.split('/', 3)[3]
result = self.client_get(target_url)
self.assert_in_success_response(["Whoops. The confirmation link has expired "
"or been deactivated."], result)
class InvitationsTestCase(InviteUserBase):
def test_successful_get_open_invitations(self) -> None:
"""
A GET call to /json/invites returns all unexpired invitations.
"""
days_to_activate = getattr(settings, 'ACCOUNT_ACTIVATION_DAYS', "Wrong")
active_value = getattr(confirmation_settings, 'STATUS_ACTIVE', "Wrong")
self.assertNotEqual(days_to_activate, "Wrong")
self.assertNotEqual(active_value, "Wrong")
self.login(self.example_email("iago"))
user_profile = self.example_user("iago")
prereg_user_one = PreregistrationUser(email="TestOne@zulip.com", referred_by=user_profile)
prereg_user_one.save()
expired_datetime = timezone_now() - datetime.timedelta(days=(days_to_activate+1))
prereg_user_two = PreregistrationUser(email="TestTwo@zulip.com", referred_by=user_profile)
prereg_user_two.save()
PreregistrationUser.objects.filter(id=prereg_user_two.id).update(invited_at=expired_datetime)
prereg_user_three = PreregistrationUser(email="TestThree@zulip.com",
referred_by=user_profile, status=active_value)
prereg_user_three.save()
result = self.client_get("/json/invites")
self.assertEqual(result.status_code, 200)
self.assert_in_success_response(["TestOne@zulip.com"], result)
self.assert_not_in_success_response(["TestTwo@zulip.com", "TestThree@zulip.com"], result)
def test_successful_delete_invitation(self) -> None:
"""
A DELETE call to /json/invites/<ID> should delete the invite and
any scheduled invitation reminder emails.
"""
self.login(self.example_email("iago"))
invitee = "DeleteMe@zulip.com"
self.assert_json_success(self.invite(invitee, ['Denmark']))
prereg_user = PreregistrationUser.objects.get(email=invitee)
# Verify that the scheduled email exists.
ScheduledEmail.objects.get(address__iexact=invitee,
type=ScheduledEmail.INVITATION_REMINDER)
result = self.client_delete('/json/invites/' + str(prereg_user.id))
self.assertEqual(result.status_code, 200)
error_result = self.client_delete('/json/invites/' + str(prereg_user.id))
self.assert_json_error(error_result, "No such invitation")
self.assertRaises(ScheduledEmail.DoesNotExist,
lambda: ScheduledEmail.objects.get(address__iexact=invitee,
type=ScheduledEmail.INVITATION_REMINDER))
def test_successful_resend_invitation(self) -> None:
"""
A POST call to /json/invites/<ID>/resend should send an invitation reminder email
and delete any scheduled invitation reminder email.
"""
self.login(self.example_email("iago"))
invitee = "resend_me@zulip.com"
self.assert_json_success(self.invite(invitee, ['Denmark']))
prereg_user = PreregistrationUser.objects.get(email=invitee)
# Verify and then clear from the outbox the original invite email
self.check_sent_emails([invitee], custom_from_name="Zulip")
from django.core.mail import outbox
outbox.pop()
# Verify that the scheduled email exists.
scheduledemail_filter = ScheduledEmail.objects.filter(
address=invitee, type=ScheduledEmail.INVITATION_REMINDER)
self.assertEqual(scheduledemail_filter.count(), 1)
original_timestamp = scheduledemail_filter.values_list('scheduled_timestamp', flat=True)
# Resend invite
result = self.client_post('/json/invites/' + str(prereg_user.id) + '/resend')
self.assertEqual(ScheduledEmail.objects.filter(
address=invitee, type=ScheduledEmail.INVITATION_REMINDER).count(), 1)
# Check that we have exactly one scheduled email, and that it is different
self.assertEqual(scheduledemail_filter.count(), 1)
self.assertNotEqual(original_timestamp,
scheduledemail_filter.values_list('scheduled_timestamp', flat=True))
self.assertEqual(result.status_code, 200)
error_result = self.client_post('/json/invites/' + str(9999) + '/resend')
self.assert_json_error(error_result, "No such invitation")
self.check_sent_emails([invitee], custom_from_name="Zulip")
def test_accessing_invites_in_another_realm(self) -> None:
invitor = UserProfile.objects.exclude(realm=get_realm('zulip')).first()
prereg_user = PreregistrationUser.objects.create(
email='email', referred_by=invitor, realm=invitor.realm)
self.login(self.example_email("iago"))
error_result = self.client_post('/json/invites/' + str(prereg_user.id) + '/resend')
self.assert_json_error(error_result, "No such invitation")
error_result = self.client_delete('/json/invites/' + str(prereg_user.id))
self.assert_json_error(error_result, "No such invitation")
class InviteeEmailsParserTests(TestCase):
def setUp(self) -> None:
self.email1 = "email1@zulip.com"
self.email2 = "email2@zulip.com"
self.email3 = "email3@zulip.com"
def test_if_emails_separated_by_commas_are_parsed_and_striped_correctly(self) -> None:
emails_raw = "{} ,{}, {}".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_separated_by_newlines_are_parsed_and_striped_correctly(self) -> None:
emails_raw = "{}\n {}\n {} ".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_from_email_client_separated_by_newlines_are_parsed_correctly(self) -> None:
emails_raw = "Email One <{}>\nEmailTwo<{}>\nEmail Three<{}>".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_in_mixed_style_are_parsed_correctly(self) -> None:
emails_raw = "Email One <{}>,EmailTwo<{}>\n{}".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
class MultiuseInviteTest(ZulipTestCase):
def setUp(self) -> None:
self.realm = get_realm('zulip')
self.realm.invite_required = True
self.realm.save()
def generate_multiuse_invite_link(self, streams: List[Stream]=None,
date_sent: Optional[datetime.datetime]=None) -> str:
invite = MultiuseInvite(realm=self.realm, referred_by=self.example_user("iago"))
invite.save()
if streams is not None:
invite.streams.set(streams)
if date_sent is None:
date_sent = timezone_now()
key = generate_key()
Confirmation.objects.create(content_object=invite, date_sent=date_sent,
confirmation_key=key, type=Confirmation.MULTIUSE_INVITE)
return confirmation_url(key, self.realm.host, Confirmation.MULTIUSE_INVITE)
def check_user_able_to_register(self, email: str, invite_link: str) -> None:
password = "password"
result = self.client_post(invite_link, {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password)
self.assertEqual(result.status_code, 302)
from django.core.mail import outbox
outbox.pop()
def test_valid_multiuse_link(self) -> None:
email1 = self.nonreg_email("test")
email2 = self.nonreg_email("test1")
email3 = self.nonreg_email("alice")
date_sent = timezone_now() - datetime.timedelta(days=settings.INVITATION_LINK_VALIDITY_DAYS - 1)
invite_link = self.generate_multiuse_invite_link(date_sent=date_sent)
self.check_user_able_to_register(email1, invite_link)
self.check_user_able_to_register(email2, invite_link)
self.check_user_able_to_register(email3, invite_link)
def test_expired_multiuse_link(self) -> None:
email = self.nonreg_email('newuser')
date_sent = timezone_now() - datetime.timedelta(days=settings.INVITATION_LINK_VALIDITY_DAYS)
invite_link = self.generate_multiuse_invite_link(date_sent=date_sent)
result = self.client_post(invite_link, {'email': email})
self.assertEqual(result.status_code, 200)
self.assert_in_response("The confirmation link has expired or been deactivated.", result)
def test_invalid_multiuse_link(self) -> None:
email = self.nonreg_email('newuser')
invite_link = "/join/invalid_key/"
result = self.client_post(invite_link, {'email': email})
self.assertEqual(result.status_code, 200)
self.assert_in_response("Whoops. The confirmation link is malformed.", result)
def test_invalid_multiuse_link_in_open_realm(self) -> None:
self.realm.invite_required = False
self.realm.save()
email = self.nonreg_email('newuser')
invite_link = "/join/invalid_key/"
with patch('zerver.views.registration.get_realm_from_request', return_value=self.realm):
with patch('zerver.views.registration.get_realm', return_value=self.realm):
self.check_user_able_to_register(email, invite_link)
def test_multiuse_link_with_specified_streams(self) -> None:
name1 = "newuser"
name2 = "bob"
email1 = self.nonreg_email(name1)
email2 = self.nonreg_email(name2)
stream_names = ["Rome", "Scotland", "Venice"]
streams = [get_stream(stream_name, self.realm) for stream_name in stream_names]
invite_link = self.generate_multiuse_invite_link(streams=streams)
self.check_user_able_to_register(email1, invite_link)
self.check_user_subscribed_only_to_streams(name1, streams)
stream_names = ["Rome", "Verona"]
streams = [get_stream(stream_name, self.realm) for stream_name in stream_names]
invite_link = self.generate_multiuse_invite_link(streams=streams)
self.check_user_able_to_register(email2, invite_link)
self.check_user_subscribed_only_to_streams(name2, streams)
def test_create_multiuse_link_api_call(self) -> None:
self.login(self.example_email('iago'))
result = self.client_post('/json/invites/multiuse')
self.assert_json_success(result)
invite_link = result.json()["invite_link"]
self.check_user_able_to_register(self.nonreg_email("test"), invite_link)
def test_create_multiuse_link_with_specified_streams_api_call(self) -> None:
self.login(self.example_email('iago'))
stream_names = ["Rome", "Scotland", "Venice"]
streams = [get_stream(stream_name, self.realm) for stream_name in stream_names]
stream_ids = [stream.id for stream in streams]
result = self.client_post('/json/invites/multiuse',
{"stream_ids": ujson.dumps(stream_ids)})
self.assert_json_success(result)
invite_link = result.json()["invite_link"]
self.check_user_able_to_register(self.nonreg_email("test"), invite_link)
self.check_user_subscribed_only_to_streams("test", streams)
def test_only_admin_can_create_multiuse_link_api_call(self) -> None:
self.login(self.example_email('iago'))
# Only admins should be able to create multiuse invites even if
# invite_by_admins_only is set to False.
self.realm.invite_by_admins_only = False
self.realm.save()
result = self.client_post('/json/invites/multiuse')
self.assert_json_success(result)
invite_link = result.json()["invite_link"]
self.check_user_able_to_register(self.nonreg_email("test"), invite_link)
self.login(self.example_email('hamlet'))
result = self.client_post('/json/invites/multiuse')
self.assert_json_error(result, "Must be an organization administrator")
def test_create_multiuse_link_invalid_stream_api_call(self) -> None:
self.login(self.example_email('iago'))
result = self.client_post('/json/invites/multiuse',
{"stream_ids": ujson.dumps([54321])})
self.assert_json_error(result, "Invalid stream id 54321. No invites were sent.")
class EmailUnsubscribeTests(ZulipTestCase):
def test_error_unsubscribe(self) -> None:
# An invalid unsubscribe token "test123" produces an error.
result = self.client_get('/accounts/unsubscribe/missed_messages/test123')
self.assert_in_response('Unknown email unsubscribe request', result)
# An unknown message type "fake" produces an error.
user_profile = self.example_user('hamlet')
unsubscribe_link = one_click_unsubscribe_link(user_profile, "fake")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
self.assert_in_response('Unknown email unsubscribe request', result)
def test_missedmessage_unsubscribe(self) -> None:
"""
We provide one-click unsubscribe links in missed message
e-mails that you can click even when logged out to update your
email notification settings.
"""
user_profile = self.example_user('hamlet')
user_profile.enable_offline_email_notifications = True
user_profile.save()
unsubscribe_link = one_click_unsubscribe_link(user_profile,
"missed_messages")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
self.assertEqual(result.status_code, 200)
user_profile.refresh_from_db()
self.assertFalse(user_profile.enable_offline_email_notifications)
def test_welcome_unsubscribe(self) -> None:
"""
We provide one-click unsubscribe links in welcome e-mails that you can
click even when logged out to stop receiving them.
"""
user_profile = self.example_user('hamlet')
# Simulate a new user signing up, which enqueues 2 welcome e-mails.
enqueue_welcome_emails(user_profile)
self.assertEqual(2, ScheduledEmail.objects.filter(user=user_profile).count())
# Simulate unsubscribing from the welcome e-mails.
unsubscribe_link = one_click_unsubscribe_link(user_profile, "welcome")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
# The welcome email jobs are no longer scheduled.
self.assertEqual(result.status_code, 200)
self.assertEqual(0, ScheduledEmail.objects.filter(user=user_profile).count())
def test_digest_unsubscribe(self) -> None:
"""
We provide one-click unsubscribe links in digest e-mails that you can
click even when logged out to stop receiving them.
Unsubscribing from these emails also dequeues any digest email jobs that
have been queued.
"""
user_profile = self.example_user('hamlet')
self.assertTrue(user_profile.enable_digest_emails)
# Enqueue a fake digest email.
context = {'name': '', 'realm_uri': '', 'unread_pms': [], 'hot_conversations': [],
'new_users': [], 'new_streams': {'plain': []}, 'unsubscribe_link': ''}
send_future_email('zerver/emails/digest', user_profile.realm,
to_user_ids=[user_profile.id], context=context)
self.assertEqual(1, ScheduledEmail.objects.filter(user=user_profile).count())
# Simulate unsubscribing from digest e-mails.
unsubscribe_link = one_click_unsubscribe_link(user_profile, "digest")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
# The setting is toggled off, and scheduled jobs have been removed.
self.assertEqual(result.status_code, 200)
# Circumvent user_profile caching.
user_profile.refresh_from_db()
self.assertFalse(user_profile.enable_digest_emails)
self.assertEqual(0, ScheduledEmail.objects.filter(user=user_profile).count())
def test_login_unsubscribe(self) -> None:
"""
We provide one-click unsubscribe links in login
e-mails that you can click even when logged out to update your
email notification settings.
"""
user_profile = self.example_user('hamlet')
user_profile.enable_login_emails = True
user_profile.save()
unsubscribe_link = one_click_unsubscribe_link(user_profile, "login")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
self.assertEqual(result.status_code, 200)
user_profile.refresh_from_db()
self.assertFalse(user_profile.enable_login_emails)
class RealmCreationTest(ZulipTestCase):
@override_settings(OPEN_REALM_CREATION=True)
def check_able_to_create_realm(self, email: str) -> None:
password = "test"
string_id = "zuliptest"
realm = get_realm(string_id)
# Make sure the realm does not exist
self.assertIsNone(realm)
# Create new realm with the email
result = self.client_post('/new/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/new/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password, realm_subdomain=string_id)
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].startswith('http://zuliptest.testserver/accounts/login/subdomain/'))
# Make sure the realm is created
realm = get_realm(string_id)
self.assertIsNotNone(realm)
self.assertEqual(realm.string_id, string_id)
self.assertEqual(get_user(email, realm).realm, realm)
# Check defaults
self.assertEqual(realm.org_type, Realm.CORPORATE)
self.assertEqual(realm.emails_restricted_to_domains, False)
self.assertEqual(realm.invite_required, True)
# Check welcome messages
for stream_name, text, message_count in [
('announce', 'This is', 1),
(Realm.INITIAL_PRIVATE_STREAM_NAME, 'This is', 1),
('general', 'Welcome to', 1),
('new members', 'stream is', 1),
('zulip', 'Here is', 3)]:
stream = get_stream(stream_name, realm)
recipient = get_stream_recipient(stream.id)
messages = Message.objects.filter(recipient=recipient).order_by('pub_date')
self.assertEqual(len(messages), message_count)
self.assertIn(text, messages[0].content)
def test_create_realm_non_existing_email(self) -> None:
self.check_able_to_create_realm("user1@test.com")
def test_create_realm_existing_email(self) -> None:
self.check_able_to_create_realm("hamlet@zulip.com")
def test_create_realm_as_system_bot(self) -> None:
result = self.client_post('/new/', {'email': 'notification-bot@zulip.com'})
self.assertEqual(result.status_code, 200)
self.assert_in_response('notification-bot@zulip.com is an email address reserved', result)
def test_create_realm_no_creation_key(self) -> None:
"""
Trying to create a realm without a creation_key should fail when
OPEN_REALM_CREATION is false.
"""
email = "user1@test.com"
with self.settings(OPEN_REALM_CREATION=False):
# Create new realm with the email, but no creation key.
result = self.client_post('/new/', {'email': email})
self.assertEqual(result.status_code, 200)
self.assert_in_response('New organization creation disabled', result)
@override_settings(OPEN_REALM_CREATION=True)
def test_create_realm_with_subdomain(self) -> None:
password = "test"
string_id = "zuliptest"
email = "user1@test.com"
realm_name = "Test"
# Make sure the realm does not exist
self.assertIsNone(get_realm(string_id))
# Create new realm with the email
result = self.client_post('/new/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/new/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = string_id,
realm_name=realm_name,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=string_id + ".testserver")
self.assertEqual(result.status_code, 302)
# Make sure the realm is created
realm = get_realm(string_id)
self.assertIsNotNone(realm)
self.assertEqual(realm.string_id, string_id)
self.assertEqual(get_user(email, realm).realm, realm)
self.assertEqual(realm.name, realm_name)
self.assertEqual(realm.subdomain, string_id)
@override_settings(OPEN_REALM_CREATION=True)
def test_mailinator_signup(self) -> None:
result = self.client_post('/new/', {'email': "hi@mailinator.com"})
self.assert_in_response('Please use your real email address.', result)
@override_settings(OPEN_REALM_CREATION=True)
def test_subdomain_restrictions(self) -> None:
password = "test"
email = "user1@test.com"
realm_name = "Test"
result = self.client_post('/new/', {'email': email})
self.client_get(result["Location"])
confirmation_url = self.get_confirmation_url_from_outbox(email)
self.client_get(confirmation_url)
errors = {'id': "length 3 or greater",
'-id': "cannot start or end with a",
'string-ID': "lowercase letters",
'string_id': "lowercase letters",
'stream': "unavailable",
'streams': "unavailable",
'about': "unavailable",
'abouts': "unavailable",
'zephyr': "unavailable"}
for string_id, error_msg in errors.items():
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = string_id,
realm_name = realm_name)
self.assert_in_response(error_msg, result)
# test valid subdomain
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = 'a-0',
realm_name = realm_name)
self.assertEqual(result.status_code, 302)
self.assertTrue(result.url.startswith('http://a-0.testserver/accounts/login/subdomain/'))
@override_settings(OPEN_REALM_CREATION=True)
def test_subdomain_restrictions_root_domain(self) -> None:
password = "test"
email = "user1@test.com"
realm_name = "Test"
result = self.client_post('/new/', {'email': email})
self.client_get(result["Location"])
confirmation_url = self.get_confirmation_url_from_outbox(email)
self.client_get(confirmation_url)
# test root domain will fail with ROOT_DOMAIN_LANDING_PAGE
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = '',
realm_name = realm_name)
self.assert_in_response('unavailable', result)
# test valid use of root domain
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = '',
realm_name = realm_name)
self.assertEqual(result.status_code, 302)
self.assertTrue(result.url.startswith('http://testserver/accounts/login/subdomain/'))
@override_settings(OPEN_REALM_CREATION=True)
def test_subdomain_restrictions_root_domain_option(self) -> None:
password = "test"
email = "user1@test.com"
realm_name = "Test"
result = self.client_post('/new/', {'email': email})
self.client_get(result["Location"])
confirmation_url = self.get_confirmation_url_from_outbox(email)
self.client_get(confirmation_url)
# test root domain will fail with ROOT_DOMAIN_LANDING_PAGE
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = 'abcdef',
realm_in_root_domain = 'true',
realm_name = realm_name)
self.assert_in_response('unavailable', result)
# test valid use of root domain
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = 'abcdef',
realm_in_root_domain = 'true',
realm_name = realm_name)
self.assertEqual(result.status_code, 302)
self.assertTrue(result.url.startswith('http://testserver/accounts/login/subdomain/'))
def test_is_root_domain_available(self) -> None:
self.assertTrue(is_root_domain_available())
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
self.assertFalse(is_root_domain_available())
realm = get_realm("zulip")
realm.string_id = Realm.SUBDOMAIN_FOR_ROOT_DOMAIN
realm.save()
self.assertFalse(is_root_domain_available())
def test_subdomain_check_api(self) -> None:
result = self.client_get("/json/realm/subdomain/zulip")
self.assert_in_success_response(["Subdomain unavailable. Please choose a different one."], result)
result = self.client_get("/json/realm/subdomain/zu_lip")
self.assert_in_success_response(["Subdomain can only have lowercase letters, numbers, and \'-\'s."], result)
result = self.client_get("/json/realm/subdomain/hufflepuff")
self.assert_in_success_response(["available"], result)
self.assert_not_in_success_response(["unavailable"], result)
def test_subdomain_check_management_command(self) -> None:
# Short names should work
check_subdomain_available('aa', from_management_command=True)
# So should reserved ones
check_subdomain_available('zulip', from_management_command=True)
# malformed names should still not
with self.assertRaises(ValidationError):
check_subdomain_available('-ba_d-', from_management_command=True)
class UserSignUpTest(ZulipTestCase):
def _assert_redirected_to(self, result: HttpResponse, url: str) -> None:
self.assertEqual(result.status_code, 302)
self.assertEqual(result['LOCATION'], url)
def test_bad_email_configuration_for_accounts_home(self) -> None:
"""
Make sure we redirect for SMTP errors.
"""
email = self.nonreg_email('newguy')
smtp_mock = patch(
'zerver.views.registration.send_confirm_registration_email',
side_effect=smtplib.SMTPException('uh oh')
)
error_mock = patch('logging.error')
with smtp_mock, error_mock as err:
result = self.client_post('/accounts/home/', {'email': email})
self._assert_redirected_to(result, '/config-error/smtp')
self.assertEqual(
err.call_args_list[0][0][0],
'Error in accounts_home: uh oh'
)
def test_bad_email_configuration_for_create_realm(self) -> None:
"""
Make sure we redirect for SMTP errors.
"""
email = self.nonreg_email('newguy')
smtp_mock = patch(
'zerver.views.registration.send_confirm_registration_email',
side_effect=smtplib.SMTPException('uh oh')
)
error_mock = patch('logging.error')
with smtp_mock, error_mock as err:
result = self.client_post('/new/', {'email': email})
self._assert_redirected_to(result, '/config-error/smtp')
self.assertEqual(
err.call_args_list[0][0][0],
'Error in create_realm: uh oh'
)
def test_user_default_language_and_timezone(self) -> None:
"""
Check if the default language of new user is the default language
of the realm.
"""
email = self.nonreg_email('newguy')
password = "newpassword"
timezone = "US/Mountain"
realm = get_realm('zulip')
do_set_realm_property(realm, 'default_language', u"de")
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
# Pick a password and agree to the ToS.
result = self.submit_reg_form_for_user(email, password, timezone=timezone)
self.assertEqual(result.status_code, 302)
user_profile = self.nonreg_user('newguy')
self.assertEqual(user_profile.default_language, realm.default_language)
self.assertEqual(user_profile.timezone, timezone)
from django.core.mail import outbox
outbox.pop()
def test_default_twenty_four_hour_time(self) -> None:
"""
Check if the default twenty_four_hour_time setting of new user
is the default twenty_four_hour_time of the realm.
"""
email = self.nonreg_email('newguy')
password = "newpassword"
realm = get_realm('zulip')
do_set_realm_property(realm, 'default_twenty_four_hour_time', True)
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password)
self.assertEqual(result.status_code, 302)
user_profile = self.nonreg_user('newguy')
self.assertEqual(user_profile.twenty_four_hour_time, realm.default_twenty_four_hour_time)
def test_signup_already_active(self) -> None:
"""
Check if signing up with an active email redirects to a login page.
"""
email = self.example_email("hamlet")
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertIn('login', result['Location'])
result = self.client_get(result.url)
self.assert_in_response("You've already registered", result)
def test_signup_system_bot(self) -> None:
email = "notification-bot@zulip.com"
result = self.client_post('/accounts/home/', {'email': email}, subdomain="lear")
self.assertEqual(result.status_code, 302)
self.assertIn('login', result['Location'])
result = self.client_get(result.url)
# This is not really the right error message, but at least it's an error.
self.assert_in_response("You've already registered", result)
def test_signup_existing_email(self) -> None:
"""
Check if signing up with an email used in another realm succeeds.
"""
email = self.example_email('hamlet')
password = "newpassword"
realm = get_realm('lear')
result = self.client_post('/accounts/home/', {'email': email}, subdomain="lear")
self.assertEqual(result.status_code, 302)
result = self.client_get(result["Location"], subdomain="lear")
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url, subdomain="lear")
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password, subdomain="lear")
self.assertEqual(result.status_code, 302)
get_user(email, realm)
self.assertEqual(UserProfile.objects.filter(email=email).count(), 2)
def test_signup_invalid_name(self) -> None:
"""
Check if an invalid name during signup is handled properly.
"""
email = "newguy@zulip.com"
password = "newpassword"
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
# Pick a password and agree to the ToS.
result = self.submit_reg_form_for_user(email, password, full_name="<invalid>")
self.assert_in_success_response(["Invalid characters in name!"], result)
# Verify that the user is asked for name and password
self.assert_in_success_response(['id_password', 'id_full_name'], result)
def test_signup_without_password(self) -> None:
"""
Check if signing up without a password works properly when
password_auth_enabled is False.
"""
email = self.nonreg_email('newuser')
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
with patch('zerver.views.registration.password_auth_enabled', return_value=False):
result = self.client_post(
'/accounts/register/',
{'full_name': 'New User',
'key': find_key_by_email(email),
'terms': True})
# User should now be logged in.
self.assertEqual(result.status_code, 302)
user_profile = self.nonreg_user('newuser')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_signup_without_full_name(self) -> None:
"""
Check if signing up without a full name redirects to a registration
form.
"""
email = "newguy@zulip.com"
password = "newpassword"
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.client_post(
'/accounts/register/',
{'password': password,
'key': find_key_by_email(email),
'terms': True,
'from_confirmation': '1'})
self.assert_in_success_response(["You're almost there."], result)
# Verify that the user is asked for name and password
self.assert_in_success_response(['id_password', 'id_full_name'], result)
def test_signup_with_full_name(self) -> None:
"""
Check if signing up without a full name redirects to a registration
form.
"""
email = "newguy@zulip.com"
password = "newpassword"
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.client_post(
'/accounts/register/',
{'password': password,
'key': find_key_by_email(email),
'terms': True,
'full_name': "New Guy",
'from_confirmation': '1'})
self.assert_in_success_response(["You're almost there."], result)
def test_signup_with_default_stream_group(self) -> None:
# Check if user is subscribed to the streams of default
# stream group as well as default streams.
email = self.nonreg_email('newguy')
password = "newpassword"
realm = get_realm("zulip")
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
result = self.client_get(result["Location"])
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
default_streams = []
for stream_name in ["venice", "verona"]:
stream = get_stream(stream_name, realm)
do_add_default_stream(stream)
default_streams.append(stream)
group1_streams = []
for stream_name in ["scotland", "denmark"]:
stream = get_stream(stream_name, realm)
group1_streams.append(stream)
do_create_default_stream_group(realm, "group 1", "group 1 description", group1_streams)
result = self.submit_reg_form_for_user(email, password, default_stream_groups=["group 1"])
self.check_user_subscribed_only_to_streams("newguy", default_streams + group1_streams)
def test_signup_with_multiple_default_stream_groups(self) -> None:
# Check if user is subscribed to the streams of default
# stream groups as well as default streams.
email = self.nonreg_email('newguy')
password = "newpassword"
realm = get_realm("zulip")
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
result = self.client_get(result["Location"])
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
default_streams = []
for stream_name in ["venice", "verona"]:
stream = get_stream(stream_name, realm)
do_add_default_stream(stream)
default_streams.append(stream)
group1_streams = []
for stream_name in ["scotland", "denmark"]:
stream = get_stream(stream_name, realm)
group1_streams.append(stream)
do_create_default_stream_group(realm, "group 1", "group 1 description", group1_streams)
group2_streams = []
for stream_name in ["scotland", "rome"]:
stream = get_stream(stream_name, realm)
group2_streams.append(stream)
do_create_default_stream_group(realm, "group 2", "group 2 description", group2_streams)
result = self.submit_reg_form_for_user(email, password,
default_stream_groups=["group 1", "group 2"])
self.check_user_subscribed_only_to_streams(
"newguy", list(set(default_streams + group1_streams + group2_streams)))
def test_signup_without_user_settings_from_another_realm(self) -> None:
email = self.example_email('hamlet')
password = "newpassword"
subdomain = "lear"
realm = get_realm("lear")
# Make an account in the Zulip realm, but we're not copying from there.
hamlet_in_zulip = get_user(self.example_email("hamlet"), get_realm("zulip"))
hamlet_in_zulip.left_side_userlist = True
hamlet_in_zulip.default_language = "de"
hamlet_in_zulip.emojiset = "twitter"
hamlet_in_zulip.high_contrast_mode = True
hamlet_in_zulip.enter_sends = True
hamlet_in_zulip.tutorial_status = UserProfile.TUTORIAL_FINISHED
hamlet_in_zulip.save()
result = self.client_post('/accounts/home/', {'email': email}, subdomain=subdomain)
self.assertEqual(result.status_code, 302)
result = self.client_get(result["Location"], subdomain=subdomain)
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url, subdomain=subdomain)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password, source_realm="on",
HTTP_HOST=subdomain + ".testserver")
hamlet = get_user(self.example_email("hamlet"), realm)
self.assertEqual(hamlet.left_side_userlist, False)
self.assertEqual(hamlet.default_language, "en")
self.assertEqual(hamlet.emojiset, "google-blob")
self.assertEqual(hamlet.high_contrast_mode, False)
self.assertEqual(hamlet.enable_stream_sounds, False)
self.assertEqual(hamlet.enter_sends, False)
self.assertEqual(hamlet.tutorial_status, UserProfile.TUTORIAL_WAITING)
def test_signup_with_user_settings_from_another_realm(self) -> None:
email = self.example_email('hamlet')
password = "newpassword"
subdomain = "lear"
lear_realm = get_realm("lear")
zulip_realm = get_realm("zulip")
self.login(self.example_email("hamlet"))
with get_test_image_file('img.png') as image_file:
self.client_post("/json/users/me/avatar", {'file': image_file})
hamlet_in_zulip = get_user(self.example_email("hamlet"), zulip_realm)
hamlet_in_zulip.left_side_userlist = True
hamlet_in_zulip.default_language = "de"
hamlet_in_zulip.emojiset = "twitter"
hamlet_in_zulip.high_contrast_mode = True
hamlet_in_zulip.enter_sends = True
hamlet_in_zulip.tutorial_status = UserProfile.TUTORIAL_FINISHED
hamlet_in_zulip.save()
result = self.client_post('/accounts/home/', {'email': email}, subdomain=subdomain)
self.assertEqual(result.status_code, 302)
result = self.client_get(result["Location"], subdomain=subdomain)
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url, subdomain=subdomain)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password, source_realm="zulip",
HTTP_HOST=subdomain + ".testserver")
hamlet_in_lear = get_user(self.example_email("hamlet"), lear_realm)
self.assertEqual(hamlet_in_lear.left_side_userlist, True)
self.assertEqual(hamlet_in_lear.default_language, "de")
self.assertEqual(hamlet_in_lear.emojiset, "twitter")
self.assertEqual(hamlet_in_lear.high_contrast_mode, True)
self.assertEqual(hamlet_in_lear.enter_sends, True)
self.assertEqual(hamlet_in_lear.enable_stream_sounds, False)
self.assertEqual(hamlet_in_lear.tutorial_status, UserProfile.TUTORIAL_FINISHED)
zulip_path_id = avatar_disk_path(hamlet_in_zulip)
hamlet_path_id = avatar_disk_path(hamlet_in_zulip)
self.assertEqual(open(zulip_path_id, "rb").read(), open(hamlet_path_id, "rb").read())
def test_signup_invalid_subdomain(self) -> None:
"""
Check if attempting to authenticate to the wrong subdomain logs an
error and redirects.
"""
email = "newuser@zulip.com"
password = "newpassword"
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
def invalid_subdomain(**kwargs: Any) -> Any:
return_data = kwargs.get('return_data', {})
return_data['invalid_subdomain'] = True
with patch('zerver.views.registration.authenticate', side_effect=invalid_subdomain):
with patch('logging.error') as mock_error:
result = self.client_post(
'/accounts/register/',
{'password': password,
'full_name': 'New User',
'key': find_key_by_email(email),
'terms': True})
mock_error.assert_called_once()
self.assertEqual(result.status_code, 302)
def test_replace_subdomain_in_confirmation_link(self) -> None:
"""
Check that manually changing the subdomain in a registration
confirmation link doesn't allow you to register to a different realm.
"""
email = "newuser@zulip.com"
self.client_post('/accounts/home/', {'email': email})
result = self.client_post(
'/accounts/register/',
{'password': "password",
'key': find_key_by_email(email),
'terms': True,
'full_name': "New User",
'from_confirmation': '1'}, subdomain="zephyr")
self.assert_in_success_response(["We couldn't find your confirmation link"], result)
def test_failed_signup_due_to_restricted_domain(self) -> None:
realm = get_realm('zulip')
realm.invite_required = False
realm.save()
request = HostRequestMock(host = realm.host)
request.session = {} # type: ignore
email = 'user@acme.com'
form = HomepageForm({'email': email}, realm=realm)
self.assertIn("Your email address, {}, is not in one of the domains".format(email),
form.errors['email'][0])
def test_failed_signup_due_to_disposable_email(self) -> None:
realm = get_realm('zulip')
realm.emails_restricted_to_domains = False
realm.disallow_disposable_email_addresses = True
realm.save()
request = HostRequestMock(host = realm.host)
request.session = {} # type: ignore
email = 'abc@mailnator.com'
form = HomepageForm({'email': email}, realm=realm)
self.assertIn("Please use your real email address", form.errors['email'][0])
def test_failed_signup_due_to_email_containing_plus(self) -> None:
realm = get_realm('zulip')
realm.emails_restricted_to_domains = True
realm.save()
request = HostRequestMock(host = realm.host)
request.session = {} # type: ignore
email = 'iago+label@zulip.com'
form = HomepageForm({'email': email}, realm=realm)
self.assertIn("Email addresses containing + are not allowed in this organization.", form.errors['email'][0])
def test_failed_signup_due_to_invite_required(self) -> None:
realm = get_realm('zulip')
realm.invite_required = True
realm.save()
request = HostRequestMock(host = realm.host)
request.session = {} # type: ignore
email = 'user@zulip.com'
form = HomepageForm({'email': email}, realm=realm)
self.assertIn("Please request an invite for {} from".format(email),
form.errors['email'][0])
def test_failed_signup_due_to_nonexistent_realm(self) -> None:
request = HostRequestMock(host = 'acme.' + settings.EXTERNAL_HOST)
request.session = {} # type: ignore
email = 'user@acme.com'
form = HomepageForm({'email': email}, realm=None)
self.assertIn("organization you are trying to join using {} does "
"not exist".format(email), form.errors['email'][0])
def test_access_signup_page_in_root_domain_without_realm(self) -> None:
result = self.client_get('/register', subdomain="", follow=True)
self.assert_in_success_response(["Find your Zulip accounts"], result)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_ldap_registration_from_confirmation(self) -> None:
password = "testing"
email = "newuser@zulip.com"
subdomain = "zulip"
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
ldap_patcher = patch('django_auth_ldap.config.ldap.initialize')
mock_initialize = ldap_patcher.start()
mock_ldap = MockLDAP()
mock_initialize.return_value = mock_ldap
mock_ldap.directory = {
'uid=newuser,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing',
'fn': ['New LDAP fullname']
}
}
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + r"(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise AssertionError("Couldn't find a confirmation email.")
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
# Full name should be set from LDAP
result = self.submit_reg_form_for_user(email,
password,
full_name="Ignore",
from_confirmation="1",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there.",
"New LDAP fullname",
"newuser@zulip.com"],
result)
# Verify that the user is asked for name
self.assert_in_success_response(['id_full_name'], result)
# TODO: Ideally, we wouldn't ask for a password if LDAP is
# enabled, in which case this assert should be invertedq.
self.assert_in_success_response(['id_password'], result)
# Test the TypeError exception handler
mock_ldap.directory = {
'uid=newuser,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing',
'fn': None # This will raise TypeError
}
}
result = self.submit_reg_form_for_user(email,
password,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there.",
"newuser@zulip.com"],
result)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_ldap_registration_end_to_end(self) -> None:
password = "testing"
email = "newuser@zulip.com"
subdomain = "zulip"
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
ldap_patcher = patch('django_auth_ldap.config.ldap.initialize')
mock_initialize = ldap_patcher.start()
mock_ldap = MockLDAP()
mock_initialize.return_value = mock_ldap
full_name = 'New LDAP fullname'
mock_ldap.directory = {
'uid=newuser,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing',
'fn': [full_name],
'sn': ['shortname'],
}
}
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
# Click confirmation link
result = self.submit_reg_form_for_user(email,
password,
full_name="Ignore",
from_confirmation="1",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
# Full name should be set from LDAP
self.assert_in_success_response(["You're almost there.",
full_name,
"newuser@zulip.com"],
result)
# Submit the final form with the wrong password.
result = self.submit_reg_form_for_user(email,
'wrongpassword',
full_name=full_name,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
# Didn't create an account
with self.assertRaises(UserProfile.DoesNotExist):
user_profile = UserProfile.objects.get(email=email)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/accounts/login/?email=newuser%40zulip.com")
# Submit the final form with the wrong password.
result = self.submit_reg_form_for_user(email,
password,
full_name=full_name,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
user_profile = UserProfile.objects.get(email=email)
# Name comes from form which was set by LDAP.
self.assertEqual(user_profile.full_name, full_name)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_ldap_auto_registration_on_login(self) -> None:
"""The most common way for LDAP authentication to be used is with a
server that doesn't have a terms-of-service required, in which
case we offer a complete single-sign-on experience (where the
user just enters their LDAP username and password, and their
account is created if it doesn't already exist).
This test verifies that flow.
"""
password = "testing"
email = "newuser@zulip.com"
subdomain = "zulip"
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
ldap_patcher = patch('django_auth_ldap.config.ldap.initialize')
mock_initialize = ldap_patcher.start()
mock_ldap = MockLDAP()
mock_initialize.return_value = mock_ldap
full_name = 'New LDAP fullname'
mock_ldap.directory = {
'uid=newuser,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing',
'fn': [full_name],
'sn': ['shortname'],
}
}
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
self.login_with_return(email, password,
HTTP_HOST=subdomain + ".testserver")
user_profile = UserProfile.objects.get(email=email)
# Name comes from form which was set by LDAP.
self.assertEqual(user_profile.full_name, full_name)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_ldap_registration_when_names_changes_are_disabled(self) -> None:
password = "testing"
email = "newuser@zulip.com"
subdomain = "zulip"
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
ldap_patcher = patch('django_auth_ldap.config.ldap.initialize')
mock_initialize = ldap_patcher.start()
mock_ldap = MockLDAP()
mock_initialize.return_value = mock_ldap
mock_ldap.directory = {
'uid=newuser,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing',
'fn': ['New LDAP fullname'],
'sn': ['New LDAP shortname'],
}
}
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
# Click confirmation link. This will 'authenticated_full_name'
# session variable which will be used to set the fullname of
# the user.
result = self.submit_reg_form_for_user(email,
password,
full_name="Ignore",
from_confirmation="1",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
with patch('zerver.views.registration.name_changes_disabled', return_value=True):
result = self.submit_reg_form_for_user(email,
password,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
user_profile = UserProfile.objects.get(email=email)
# Name comes from LDAP session.
self.assertEqual(user_profile.full_name, 'New LDAP fullname')
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.EmailAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_signup_with_ldap_and_email_enabled_using_email(self) -> None:
password = "mynewpassword"
email = "newuser@zulip.com"
subdomain = "zulip"
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
ldap_patcher = patch('django_auth_ldap.config.ldap.initialize')
mock_initialize = ldap_patcher.start()
mock_ldap = MockLDAP()
mock_initialize.return_value = mock_ldap
mock_ldap.directory = {
'uid=newuser,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing',
'fn': ['New LDAP fullname'],
'sn': ['New LDAP shortname'],
}
}
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# If the user's email is inside the LDAP domain and we just
# have a wrong password, then we refuse to create an account
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
# Important: This doesn't match the new user
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
result = self.submit_reg_form_for_user(
email,
password,
from_confirmation="1",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email,
password,
full_name="Non-LDAP Full Name",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 302)
# We get redirected back to the login page because password was wrong
self.assertEqual(result.url, "/accounts/login/?email=newuser%40zulip.com")
self.assertFalse(UserProfile.objects.filter(email=email).exists())
# If the user's email is outside the LDAP domain, though, we
# successfully create an account with a password in the Zulip
# database.
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
# Important: This doesn't match the new user
LDAP_APPEND_DOMAIN='example.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
with patch('zerver.views.registration.logging.warning') as mock_warning:
result = self.submit_reg_form_for_user(
email,
password,
from_confirmation="1",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 200)
mock_warning.assert_called_once_with("New account email newuser@zulip.com could not be found in LDAP")
result = self.submit_reg_form_for_user(email,
password,
full_name="Non-LDAP Full Name",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "http://zulip.testserver/")
user_profile = UserProfile.objects.get(email=email)
# Name comes from the POST request, not LDAP
self.assertEqual(user_profile.full_name, 'Non-LDAP Full Name')
def test_registration_when_name_changes_are_disabled(self) -> None:
"""
Test `name_changes_disabled` when we are not running under LDAP.
"""
password = "testing"
email = "newuser@zulip.com"
subdomain = "zulip"
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
with patch('zerver.views.registration.name_changes_disabled', return_value=True):
result = self.submit_reg_form_for_user(email,
password,
full_name="New Name",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
user_profile = UserProfile.objects.get(email=email)
# 'New Name' comes from POST data; not from LDAP session.
self.assertEqual(user_profile.full_name, 'New Name')
def test_realm_creation_through_ldap(self) -> None:
password = "testing"
email = "newuser@zulip.com"
subdomain = "zulip"
realm_name = "Zulip"
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
ldap_patcher = patch('django_auth_ldap.config.ldap.initialize')
mock_initialize = ldap_patcher.start()
mock_ldap = MockLDAP()
mock_initialize.return_value = mock_ldap
mock_ldap.directory = {
'uid=newuser,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing',
'fn': ['New User Name']
}
}
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + r"(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise AssertionError("Couldn't find a confirmation email.")
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',),
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com',
TERMS_OF_SERVICE=False,
):
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
key = find_key_by_email(email),
confirmation = Confirmation.objects.get(confirmation_key=key[0])
prereg_user = confirmation.content_object
prereg_user.realm_creation = True
prereg_user.save()
result = self.submit_reg_form_for_user(email,
password,
realm_name=realm_name,
realm_subdomain=subdomain,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there.",
"newuser@zulip.com"],
result)
mock_ldap.reset()
mock_initialize.stop()
@patch('DNS.dnslookup', return_value=[['sipbtest:*:20922:101:Fred Sipb,,,:/mit/sipbtest:/bin/athena/tcsh']])
def test_registration_of_mirror_dummy_user(self, ignored: Any) -> None:
password = "test"
subdomain = "zephyr"
user_profile = self.mit_user("sipbtest")
email = user_profile.email
user_profile.is_mirror_dummy = True
user_profile.is_active = False
user_profile.save()
result = self.client_post('/register/', {'email': email}, subdomain="zephyr")
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"], subdomain="zephyr")
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + r"(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise AssertionError("Couldn't find a confirmation email.")
result = self.client_get(confirmation_url, subdomain="zephyr")
self.assertEqual(result.status_code, 200)
# If the mirror dummy user is already active, attempting to
# submit the registration form should raise an AssertionError
# (this is an invalid state, so it's a bug we got here):
user_profile.is_active = True
user_profile.save()
with self.assertRaisesRegex(AssertionError, "Mirror dummy user is already active!"):
result = self.submit_reg_form_for_user(
email,
password,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
user_profile.is_active = False
user_profile.save()
result = self.submit_reg_form_for_user(email,
password,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email,
password,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 302)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_registration_of_active_mirror_dummy_user(self) -> None:
"""
Trying to activate an already-active mirror dummy user should
raise an AssertionError.
"""
user_profile = self.mit_user("sipbtest")
email = user_profile.email
user_profile.is_mirror_dummy = True
user_profile.is_active = True
user_profile.save()
with self.assertRaisesRegex(AssertionError, "Mirror dummy user is already active!"):
self.client_post('/register/', {'email': email}, subdomain="zephyr")
class DeactivateUserTest(ZulipTestCase):
def test_deactivate_user(self) -> None:
email = self.example_email("hamlet")
self.login(email)
user = self.example_user('hamlet')
self.assertTrue(user.is_active)
result = self.client_delete('/json/users/me')
self.assert_json_success(result)
user = self.example_user('hamlet')
self.assertFalse(user.is_active)
self.login(email, fails=True)
def test_do_not_deactivate_final_admin(self) -> None:
email = self.example_email("iago")
self.login(email)
user = self.example_user('iago')
self.assertTrue(user.is_active)
result = self.client_delete('/json/users/me')
self.assert_json_error(result, "Cannot deactivate the only organization administrator.")
user = self.example_user('iago')
self.assertTrue(user.is_active)
self.assertTrue(user.is_realm_admin)
email = self.example_email("hamlet")
user_2 = self.example_user('hamlet')
do_change_is_admin(user_2, True)
self.assertTrue(user_2.is_realm_admin)
result = self.client_delete('/json/users/me')
self.assert_json_success(result)
do_change_is_admin(user, True)
def test_do_not_deactivate_final_user(self) -> None:
realm = get_realm('zulip')
UserProfile.objects.filter(realm=realm, is_realm_admin=False).update(is_active=False)
email = self.example_email("iago")
self.login(email)
result = self.client_delete('/json/users/me')
self.assert_json_error(result, "Cannot deactivate the only user.")
class TestLoginPage(ZulipTestCase):
def test_login_page_wrong_subdomain_error(self) -> None:
result = self.client_get("/login/?subdomain=1")
self.assertIn(WRONG_SUBDOMAIN_ERROR, result.content.decode('utf8'))
@patch('django.http.HttpRequest.get_host')
def test_login_page_redirects_for_root_alias(self, mock_get_host: MagicMock) -> None:
mock_get_host.return_value = 'www.testserver'
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/go/')
result = self.client_get("/en/login/?next=/upgrade/")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/go/?next=%2Fupgrade%2F')
@patch('django.http.HttpRequest.get_host')
def test_login_page_redirects_for_root_domain(self, mock_get_host: MagicMock) -> None:
mock_get_host.return_value = 'testserver'
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/go/')
result = self.client_get("/en/login/?next=/upgrade/")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/go/?next=%2Fupgrade%2F')
mock_get_host.return_value = 'www.testserver.com'
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True,
EXTERNAL_HOST='www.testserver.com',
ROOT_SUBDOMAIN_ALIASES=['test']):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/go/')
result = self.client_get("/en/login/?next=/upgrade/")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/go/?next=%2Fupgrade%2F')
@patch('django.http.HttpRequest.get_host')
def test_login_page_works_without_subdomains(self, mock_get_host: MagicMock) -> None:
mock_get_host.return_value = 'www.testserver'
with self.settings(ROOT_SUBDOMAIN_ALIASES=['www']):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 200)
mock_get_host.return_value = 'testserver'
with self.settings(ROOT_SUBDOMAIN_ALIASES=['www']):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 200)
class TestFindMyTeam(ZulipTestCase):
def test_template(self) -> None:
result = self.client_get('/accounts/find/')
self.assertIn("Find your Zulip accounts", result.content.decode('utf8'))
def test_result(self) -> None:
result = self.client_post('/accounts/find/',
dict(emails="iago@zulip.com,cordelia@zulip.com"))
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/accounts/find/?emails=iago%40zulip.com%2Ccordelia%40zulip.com")
result = self.client_get(result.url)
content = result.content.decode('utf8')
self.assertIn("Emails sent! You will only receive emails", content)
self.assertIn(self.example_email("iago"), content)
self.assertIn(self.example_email("cordelia"), content)
from django.core.mail import outbox
# 3 = 1 + 2 -- Cordelia gets an email each for the "zulip" and "lear" realms.
self.assertEqual(len(outbox), 3)
def test_find_team_ignore_invalid_email(self) -> None:
result = self.client_post('/accounts/find/',
dict(emails="iago@zulip.com,invalid_email@zulip.com"))
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/accounts/find/?emails=iago%40zulip.com%2Cinvalid_email%40zulip.com")
result = self.client_get(result.url)
content = result.content.decode('utf8')
self.assertIn("Emails sent! You will only receive emails", content)
self.assertIn(self.example_email("iago"), content)
self.assertIn("invalid_email@", content)
from django.core.mail import outbox
self.assertEqual(len(outbox), 1)
def test_find_team_reject_invalid_email(self) -> None:
result = self.client_post('/accounts/find/',
dict(emails="invalid_string"))
self.assertEqual(result.status_code, 200)
self.assertIn(b"Enter a valid email", result.content)
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
# Just for coverage on perhaps-unnecessary validation code.
result = self.client_get('/accounts/find/?emails=invalid')
self.assertEqual(result.status_code, 200)
def test_find_team_zero_emails(self) -> None:
data = {'emails': ''}
result = self.client_post('/accounts/find/', data)
self.assertIn('This field is required', result.content.decode('utf8'))
self.assertEqual(result.status_code, 200)
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
def test_find_team_one_email(self) -> None:
data = {'emails': self.example_email("hamlet")}
result = self.client_post('/accounts/find/', data)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/find/?emails=hamlet%40zulip.com')
from django.core.mail import outbox
self.assertEqual(len(outbox), 1)
def test_find_team_deactivated_user(self) -> None:
do_deactivate_user(self.example_user("hamlet"))
data = {'emails': self.example_email("hamlet")}
result = self.client_post('/accounts/find/', data)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/find/?emails=hamlet%40zulip.com')
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
def test_find_team_deactivated_realm(self) -> None:
do_deactivate_realm(get_realm("zulip"))
data = {'emails': self.example_email("hamlet")}
result = self.client_post('/accounts/find/', data)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/find/?emails=hamlet%40zulip.com')
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
def test_find_team_bot_email(self) -> None:
data = {'emails': self.example_email("webhook_bot")}
result = self.client_post('/accounts/find/', data)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/find/?emails=webhook-bot%40zulip.com')
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
def test_find_team_more_than_ten_emails(self) -> None:
data = {'emails': ','.join(['hamlet-{}@zulip.com'.format(i) for i in range(11)])}
result = self.client_post('/accounts/find/', data)
self.assertEqual(result.status_code, 200)
self.assertIn("Please enter at most 10", result.content.decode('utf8'))
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
class ConfirmationKeyTest(ZulipTestCase):
def test_confirmation_key(self) -> None:
request = MagicMock()
request.session = {
'confirmation_key': {'confirmation_key': 'xyzzy'}
}
result = confirmation_key(request)
self.assert_json_success(result)
self.assert_in_response('xyzzy', result)
class MobileAuthOTPTest(ZulipTestCase):
def test_xor_hex_strings(self) -> None:
self.assertEqual(xor_hex_strings('1237c81ab', '18989fd12'), '0aaf57cb9')
with self.assertRaises(AssertionError):
xor_hex_strings('1', '31')
def test_is_valid_otp(self) -> None:
self.assertEqual(is_valid_otp('1234'), False)
self.assertEqual(is_valid_otp('1234abcd' * 8), True)
self.assertEqual(is_valid_otp('1234abcZ' * 8), False)
def test_ascii_to_hex(self) -> None:
self.assertEqual(ascii_to_hex('ZcdR1234'), '5a63645231323334')
self.assertEqual(hex_to_ascii('5a63645231323334'), 'ZcdR1234')
def test_otp_encrypt_api_key(self) -> None:
api_key = '12ac' * 8
otp = '7be38894' * 8
result = otp_encrypt_api_key(api_key, otp)
self.assertEqual(result, '4ad1e9f7' * 8)
decryped = otp_decrypt_api_key(result, otp)
self.assertEqual(decryped, api_key)
class FollowupEmailTest(ZulipTestCase):
def test_followup_day2_email(self) -> None:
user_profile = self.example_user('hamlet')
# Test date_joined == Sunday
user_profile.date_joined = datetime.datetime(2018, 1, 7, 1, 0, 0, 0, pytz.UTC)
self.assertEqual(followup_day2_email_delay(user_profile), datetime.timedelta(days=2, hours=-1))
# Test date_joined == Tuesday
user_profile.date_joined = datetime.datetime(2018, 1, 2, 1, 0, 0, 0, pytz.UTC)
self.assertEqual(followup_day2_email_delay(user_profile), datetime.timedelta(days=2, hours=-1))
# Test date_joined == Thursday
user_profile.date_joined = datetime.datetime(2018, 1, 4, 1, 0, 0, 0, pytz.UTC)
self.assertEqual(followup_day2_email_delay(user_profile), datetime.timedelta(days=1, hours=-1))
# Test date_joined == Friday
user_profile.date_joined = datetime.datetime(2018, 1, 5, 1, 0, 0, 0, pytz.UTC)
self.assertEqual(followup_day2_email_delay(user_profile), datetime.timedelta(days=3, hours=-1))
# Time offset of America/Phoenix is -07:00
user_profile.timezone = 'America/Phoenix'
# Test date_joined == Friday in UTC, but Thursday in the user's timezone
user_profile.date_joined = datetime.datetime(2018, 1, 5, 1, 0, 0, 0, pytz.UTC)
self.assertEqual(followup_day2_email_delay(user_profile), datetime.timedelta(days=1, hours=-1))
class NoReplyEmailTest(ZulipTestCase):
def test_noreply_email_address(self) -> None:
self.assertTrue(re.search(self.TOKENIZED_NOREPLY_REGEX, FromAddress.tokenized_no_reply_address()))
with self.settings(ADD_TOKENS_TO_NOREPLY_ADDRESS=False):
self.assertEqual(FromAddress.tokenized_no_reply_address(), "noreply@testserver")
class TwoFactorAuthTest(ZulipTestCase):
@patch('two_factor.models.totp')
def test_two_factor_login(self, mock_totp):
# type: (MagicMock) -> None
token = 123456
email = self.example_email('hamlet')
password = 'testing'
user_profile = self.example_user('hamlet')
user_profile.set_password(password)
user_profile.save()
self.create_default_device(user_profile)
def totp(*args, **kwargs):
# type: (*Any, **Any) -> int
return token
mock_totp.side_effect = totp
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.EmailAuthBackend',),
TWO_FACTOR_CALL_GATEWAY='two_factor.gateways.fake.Fake',
TWO_FACTOR_SMS_GATEWAY='two_factor.gateways.fake.Fake',
TWO_FACTOR_AUTHENTICATION_ENABLED=True):
first_step_data = {"username": email,
"password": password,
"two_factor_login_view-current_step": "auth"}
result = self.client_post("/accounts/login/", first_step_data)
self.assertEqual(result.status_code, 200)
second_step_data = {"token-otp_token": str(token),
"two_factor_login_view-current_step": "token"}
result = self.client_post("/accounts/login/", second_step_data)
self.assertEqual(result.status_code, 302)
self.assertEqual(result["Location"], "http://zulip.testserver")
# Going to login page should redirect to '/' if user is already
# logged in.
result = self.client_get('/accounts/login/')
self.assertEqual(result["Location"], "http://zulip.testserver")
class NameRestrictionsTest(ZulipTestCase):
def test_whitelisted_disposable_domains(self) -> None:
self.assertFalse(is_disposable_domain('OPayQ.com'))
class RealmRedirectTest(ZulipTestCase):
def test_realm_redirect_without_next_param(self) -> None:
result = self.client_get("/accounts/go/")
self.assert_in_success_response(["Enter your organization's Zulip URL"], result)
result = self.client_post("/accounts/go/", {"subdomain": "zephyr"})
self.assertEqual(result.status_code, 302)
self.assertEqual(result["Location"], "http://zephyr.testserver")
result = self.client_post("/accounts/go/", {"subdomain": "invalid"})
self.assert_in_success_response(["We couldn't find that Zulip organization."], result)
def test_realm_redirect_with_next_param(self) -> None:
result = self.client_get("/accounts/go/?next=billing")
self.assert_in_success_response(["Enter your organization's Zulip URL", 'action="/accounts/go/?next=billing"'], result)
result = self.client_post("/accounts/go/?next=billing", {"subdomain": "lear"})
self.assertEqual(result.status_code, 302)
self.assertEqual(result["Location"], "http://lear.testserver/billing")
|
jackrzhang/zulip
|
zerver/tests/test_signup.py
|
Python
|
apache-2.0
| 145,030
|
[
"VisIt"
] |
d3e59bfd555fb0af11531a2113d077a0a03966e9734d9251a81a0db20c0962c8
|
############################################################
# $HeadURL$
############################################################
"""
DIRAC.WorkloadManagementSystem.private package
"""
__RCSID__ = "$Id$"
|
avedaee/DIRAC
|
WorkloadManagementSystem/private/__init__.py
|
Python
|
gpl-3.0
| 212
|
[
"DIRAC"
] |
f1ceb5a4ca23071f09ad53d540f697208a752c69bf7566e8629787722705ec4b
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for compute resource tracking."""
import copy
import uuid
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from nova.compute import flavors
from nova.compute import resource_tracker
from nova.compute import resources
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova import objects
from nova.objects import base as obj_base
from nova import rpc
from nova import test
from nova.tests.unit.compute.monitors import test_monitors
from nova.tests.unit.pci import fakes as pci_fakes
from nova.virt import driver
FAKE_VIRT_MEMORY_MB = 5
FAKE_VIRT_MEMORY_OVERHEAD = 1
FAKE_VIRT_MEMORY_WITH_OVERHEAD = (
FAKE_VIRT_MEMORY_MB + FAKE_VIRT_MEMORY_OVERHEAD)
FAKE_VIRT_NUMA_TOPOLOGY = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([1, 2]), memory=3072,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([3, 4]), memory=3072,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([]))])
FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD = objects.NUMATopologyLimits(
cpu_allocation_ratio=2, ram_allocation_ratio=2)
ROOT_GB = 5
EPHEMERAL_GB = 1
FAKE_VIRT_LOCAL_GB = ROOT_GB + EPHEMERAL_GB
FAKE_VIRT_VCPUS = 1
FAKE_VIRT_STATS = {'virt_stat': 10}
FAKE_VIRT_STATS_JSON = jsonutils.dumps(FAKE_VIRT_STATS)
RESOURCE_NAMES = ['vcpu']
CONF = cfg.CONF
class UnsupportedVirtDriver(driver.ComputeDriver):
"""Pretend version of a lame virt driver."""
def __init__(self):
super(UnsupportedVirtDriver, self).__init__(None)
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
# no support for getting resource usage info
return {}
class FakeVirtDriver(driver.ComputeDriver):
def __init__(self, pci_support=False, stats=None,
numa_topology=FAKE_VIRT_NUMA_TOPOLOGY):
super(FakeVirtDriver, self).__init__(None)
self.memory_mb = FAKE_VIRT_MEMORY_MB
self.local_gb = FAKE_VIRT_LOCAL_GB
self.vcpus = FAKE_VIRT_VCPUS
self.numa_topology = numa_topology
self.memory_mb_used = 0
self.local_gb_used = 0
self.pci_support = pci_support
self.pci_devices = [
{
'label': 'label_8086_0443',
'dev_type': 'type-VF',
'compute_node_id': 1,
'address': '0000:00:01.1',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_0443',
'dev_type': 'type-VF',
'compute_node_id': 1,
'address': '0000:00:01.2',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_0443',
'dev_type': 'type-PF',
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_0123',
'dev_type': 'type-PCI',
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '0123',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_7891',
'dev_type': 'type-VF',
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '7891',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': None
},
] if self.pci_support else []
self.pci_stats = [
{
'count': 2,
'vendor_id': '8086',
'product_id': '0443',
'numa_node': 1
},
{
'count': 1,
'vendor_id': '8086',
'product_id': '7891',
'numa_node': None
},
] if self.pci_support else []
if stats is not None:
self.stats = stats
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
d = {
'vcpus': self.vcpus,
'memory_mb': self.memory_mb,
'local_gb': self.local_gb,
'vcpus_used': 0,
'memory_mb_used': self.memory_mb_used,
'local_gb_used': self.local_gb_used,
'hypervisor_type': 'fake',
'hypervisor_version': 0,
'hypervisor_hostname': 'fakehost',
'cpu_info': '',
'numa_topology': (
self.numa_topology._to_json() if self.numa_topology else None),
}
if self.pci_support:
d['pci_passthrough_devices'] = jsonutils.dumps(self.pci_devices)
if hasattr(self, 'stats'):
d['stats'] = self.stats
return d
def estimate_instance_overhead(self, instance_info):
instance_info['memory_mb'] # make sure memory value is present
overhead = {
'memory_mb': FAKE_VIRT_MEMORY_OVERHEAD
}
return overhead # just return a constant value for testing
class BaseTestCase(test.TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
self.context = context.get_admin_context()
self.flags(pci_passthrough_whitelist=[
'{"vendor_id": "8086", "product_id": "0443"}',
'{"vendor_id": "8086", "product_id": "7891"}'])
self.flags(use_local=True, group='conductor')
self.conductor = self.start_service('conductor',
manager=CONF.conductor.manager)
self._instances = {}
self._numa_topologies = {}
self._instance_types = {}
self.stubs.Set(self.conductor.db,
'instance_get_all_by_host_and_node',
self._fake_instance_get_all_by_host_and_node)
self.stubs.Set(db, 'instance_extra_get_by_instance_uuid',
self._fake_instance_extra_get_by_instance_uuid)
self.stubs.Set(self.conductor.db,
'instance_update_and_get_original',
self._fake_instance_update_and_get_original)
self.stubs.Set(self.conductor.db,
'flavor_get', self._fake_flavor_get)
self.host = 'fakehost'
self.compute = self._create_compute_node()
self.updated = False
self.deleted = False
self.update_call_count = 0
def _create_compute_node(self, values=None):
compute = {
"id": 1,
"service_id": 1,
"host": "fakehost",
"vcpus": 1,
"memory_mb": 1,
"local_gb": 1,
"vcpus_used": 1,
"memory_mb_used": 1,
"local_gb_used": 1,
"free_ram_mb": 1,
"free_disk_gb": 1,
"current_workload": 1,
"running_vms": 0,
"cpu_info": None,
"numa_topology": None,
"stats": '{"num_instances": "1"}',
"hypervisor_hostname": "fakenode",
'hypervisor_version': 1,
'hypervisor_type': 'fake-hyp',
'disk_available_least': None,
'host_ip': None,
'metrics': None,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
}
if values:
compute.update(values)
return compute
def _create_service(self, host="fakehost", compute=None):
if compute:
compute = [compute]
service = {
"id": 1,
"host": host,
"binary": "nova-compute",
"topic": "compute",
"compute_node": compute,
"report_count": 0,
'disabled': False,
'disabled_reason': None,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
}
return service
def _fake_instance_system_metadata(self, instance_type, prefix=''):
sys_meta = []
for key in flavors.system_metadata_flavor_props.keys():
sys_meta.append({'key': '%sinstance_type_%s' % (prefix, key),
'value': instance_type[key]})
return sys_meta
def _fake_instance(self, stash=True, flavor=None, **kwargs):
# Default to an instance ready to resize to or from the same
# instance_type
flavor = flavor or self._fake_flavor_create()
sys_meta = self._fake_instance_system_metadata(flavor)
if stash:
# stash instance types in system metadata.
sys_meta = (sys_meta +
self._fake_instance_system_metadata(flavor, 'new_') +
self._fake_instance_system_metadata(flavor, 'old_'))
instance_uuid = str(uuid.uuid1())
instance = {
'uuid': instance_uuid,
'vm_state': vm_states.RESIZED,
'task_state': None,
'ephemeral_key_uuid': None,
'os_type': 'Linux',
'project_id': '123456',
'host': None,
'node': None,
'instance_type_id': flavor['id'],
'memory_mb': flavor['memory_mb'],
'vcpus': flavor['vcpus'],
'root_gb': flavor['root_gb'],
'ephemeral_gb': flavor['ephemeral_gb'],
'launched_on': None,
'system_metadata': sys_meta,
'availability_zone': None,
'vm_mode': None,
'reservation_id': None,
'display_name': None,
'default_swap_device': None,
'power_state': None,
'scheduled_at': None,
'access_ip_v6': None,
'access_ip_v4': None,
'key_name': None,
'updated_at': None,
'cell_name': None,
'locked': None,
'locked_by': None,
'launch_index': None,
'architecture': None,
'auto_disk_config': None,
'terminated_at': None,
'ramdisk_id': None,
'user_data': None,
'cleaned': None,
'deleted_at': None,
'id': 333,
'disable_terminate': None,
'hostname': None,
'display_description': None,
'key_data': None,
'deleted': None,
'default_ephemeral_device': None,
'progress': None,
'launched_at': None,
'config_drive': None,
'kernel_id': None,
'user_id': None,
'shutdown_terminate': None,
'created_at': None,
'image_ref': None,
'root_device_name': None,
}
extra = {
'id': 1, 'created_at': None, 'updated_at': None,
'deleted_at': None, 'deleted': None,
'instance_uuid': instance['uuid'],
'numa_topology': None,
'pci_requests': None,
}
numa_topology = kwargs.pop('numa_topology', None)
if numa_topology:
extra['numa_topology'] = numa_topology._to_json()
instance.update(kwargs)
instance['extra'] = extra
self._instances[instance_uuid] = instance
self._numa_topologies[instance_uuid] = extra
return instance
def _fake_flavor_create(self, **kwargs):
instance_type = {
'id': 1,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'disabled': False,
'is_public': True,
'name': 'fakeitype',
'memory_mb': FAKE_VIRT_MEMORY_MB,
'vcpus': FAKE_VIRT_VCPUS,
'root_gb': ROOT_GB,
'ephemeral_gb': EPHEMERAL_GB,
'swap': 0,
'rxtx_factor': 1.0,
'vcpu_weight': 1,
'flavorid': 'fakeflavor',
'extra_specs': {},
}
instance_type.update(**kwargs)
id_ = instance_type['id']
self._instance_types[id_] = instance_type
return instance_type
def _fake_instance_get_all_by_host_and_node(self, context, host, nodename,
columns_to_join=None):
return [i for i in self._instances.values() if i['host'] == host]
def _fake_instance_extra_get_by_instance_uuid(self, context,
instance_uuid, columns=None):
return self._numa_topologies.get(instance_uuid)
def _fake_flavor_get(self, ctxt, id_):
return self._instance_types[id_]
def _fake_instance_update_and_get_original(self, context, instance_uuid,
values, columns_to_join=None):
instance = self._instances[instance_uuid]
instance.update(values)
# the test doesn't care what the original instance values are, it's
# only used in the subsequent notification:
return (instance, instance)
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
self.update_call_count += 1
self.updated = True
self.compute.update(values)
return self.compute
def _driver(self):
return FakeVirtDriver()
def _tracker(self, host=None):
if host is None:
host = self.host
node = "fakenode"
driver = self._driver()
tracker = resource_tracker.ResourceTracker(host, driver, node)
tracker.compute_node = self._create_compute_node()
tracker.ext_resources_handler = \
resources.ResourceHandler(RESOURCE_NAMES, True)
return tracker
class UnsupportedDriverTestCase(BaseTestCase):
"""Resource tracking should be disabled when the virt driver doesn't
support it.
"""
def setUp(self):
super(UnsupportedDriverTestCase, self).setUp()
self.tracker = self._tracker()
# seed tracker with data:
self.tracker.update_available_resource(self.context)
def _driver(self):
return UnsupportedVirtDriver()
def test_disabled(self):
# disabled = no compute node stats
self.assertTrue(self.tracker.disabled)
self.assertIsNone(self.tracker.compute_node)
def test_disabled_claim(self):
# basic claim:
instance = self._fake_instance()
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
def test_disabled_instance_claim(self):
# instance variation:
instance = self._fake_instance()
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
def test_disabled_instance_context_claim(self):
# instance context manager variation:
instance = self._fake_instance()
self.tracker.instance_claim(self.context, instance)
with self.tracker.instance_claim(self.context, instance) as claim:
self.assertEqual(0, claim.memory_mb)
def test_disabled_updated_usage(self):
instance = self._fake_instance(host='fakehost', memory_mb=5,
root_gb=10)
self.tracker.update_usage(self.context, instance)
def test_disabled_resize_claim(self):
instance = self._fake_instance()
instance_type = self._fake_flavor_create()
claim = self.tracker.resize_claim(self.context, instance,
instance_type)
self.assertEqual(0, claim.memory_mb)
self.assertEqual(instance['uuid'], claim.migration['instance_uuid'])
self.assertEqual(instance_type['id'],
claim.migration['new_instance_type_id'])
def test_disabled_resize_context_claim(self):
instance = self._fake_instance()
instance_type = self._fake_flavor_create()
with self.tracker.resize_claim(self.context, instance, instance_type) \
as claim:
self.assertEqual(0, claim.memory_mb)
class MissingServiceTestCase(BaseTestCase):
def setUp(self):
super(MissingServiceTestCase, self).setUp()
self.context = context.get_admin_context()
self.tracker = self._tracker()
def test_missing_service(self):
self.tracker.compute_node = None
self.tracker._get_service = mock.Mock(return_value=None)
self.tracker.update_available_resource(self.context)
self.assertTrue(self.tracker.disabled)
class MissingComputeNodeTestCase(BaseTestCase):
def setUp(self):
super(MissingComputeNodeTestCase, self).setUp()
self.tracker = self._tracker()
self.stubs.Set(db, 'service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_get_by_host_and_nodename',
self._fake_compute_node_get_by_host_and_nodename)
self.stubs.Set(db, 'compute_node_create',
self._fake_create_compute_node)
self.tracker.scheduler_client.update_resource_stats = mock.Mock()
def _fake_create_compute_node(self, context, values):
self.created = True
return self._create_compute_node(values)
def _fake_service_get_by_compute_host(self, ctx, host):
# return a service with no joined compute
service = self._create_service()
return service
def _fake_compute_node_get_by_host_and_nodename(self, ctx, host, nodename):
# return no compute node
raise exception.ComputeHostNotFound(host=host)
def test_create_compute_node(self):
self.tracker.compute_node = None
self.tracker.update_available_resource(self.context)
self.assertTrue(self.created)
def test_enabled(self):
self.tracker.update_available_resource(self.context)
self.assertFalse(self.tracker.disabled)
class BaseTrackerTestCase(BaseTestCase):
def setUp(self):
# setup plumbing for a working resource tracker with required
# database models and a compatible compute driver:
super(BaseTrackerTestCase, self).setUp()
self.tracker = self._tracker()
self._migrations = {}
self.stubs.Set(db, 'service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_get_by_host_and_nodename',
self._fake_compute_node_get_by_host_and_nodename)
self.stubs.Set(db, 'compute_node_update',
self._fake_compute_node_update)
self.stubs.Set(db, 'compute_node_delete',
self._fake_compute_node_delete)
self.stubs.Set(db, 'migration_update',
self._fake_migration_update)
self.stubs.Set(db, 'migration_get_in_progress_by_host_and_node',
self._fake_migration_get_in_progress_by_host_and_node)
# Note that this must be called before the call to _init_tracker()
patcher = pci_fakes.fake_pci_whitelist()
self.addCleanup(patcher.stop)
self.stubs.Set(self.tracker.scheduler_client, 'update_resource_stats',
self._fake_compute_node_update)
self._init_tracker()
self.limits = self._limits()
def _fake_service_get_by_compute_host(self, ctx, host):
self.service = self._create_service(host, compute=self.compute)
return self.service
def _fake_compute_node_get_by_host_and_nodename(self, ctx, host, nodename):
self.compute = self._create_compute_node()
return self.compute
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
self.update_call_count += 1
self.updated = True
self.compute.update(values)
return self.compute
def _fake_compute_node_delete(self, ctx, compute_node_id):
self.deleted = True
self.compute.update({'deleted': 1})
return self.compute
def _fake_migration_get_in_progress_by_host_and_node(self, ctxt, host,
node):
status = ['confirmed', 'reverted', 'error']
migrations = []
for migration in self._migrations.values():
migration = obj_base.obj_to_primitive(migration)
if migration['status'] in status:
continue
uuid = migration['instance_uuid']
migration['instance'] = self._instances[uuid]
migrations.append(migration)
return migrations
def _fake_migration_update(self, ctxt, migration_id, values):
# cheat and assume there's only 1 migration present
migration = self._migrations.values()[0]
migration.update(values)
return migration
def _init_tracker(self):
self.tracker.update_available_resource(self.context)
def _limits(self, memory_mb=FAKE_VIRT_MEMORY_WITH_OVERHEAD,
disk_gb=FAKE_VIRT_LOCAL_GB,
vcpus=FAKE_VIRT_VCPUS,
numa_topology=FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD):
"""Create limits dictionary used for oversubscribing resources."""
return {
'memory_mb': memory_mb,
'disk_gb': disk_gb,
'vcpu': vcpus,
'numa_topology': numa_topology,
}
def assertEqualNUMAHostTopology(self, expected, got):
attrs = ('cpuset', 'memory', 'id', 'cpu_usage', 'memory_usage')
if None in (expected, got):
if expected != got:
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
else:
return
if len(expected) != len(got):
raise AssertionError("Topologies don't match due to different "
"number of cells. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
for exp_cell, got_cell in zip(expected.cells, got.cells):
for attr in attrs:
if getattr(exp_cell, attr) != getattr(got_cell, attr):
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
def _assert(self, value, field, tracker=None):
if tracker is None:
tracker = self.tracker
if field not in tracker.compute_node:
raise test.TestingException(
"'%(field)s' not in compute node." % {'field': field})
x = tracker.compute_node[field]
if field == 'numa_topology':
self.assertEqualNUMAHostTopology(
value, objects.NUMATopology.obj_from_db_obj(x))
else:
self.assertEqual(value, x)
class TrackerTestCase(BaseTrackerTestCase):
def test_free_ram_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.memory_mb - driver.memory_mb_used
self.assertEqual(mem_free, self.tracker.compute_node['free_ram_mb'])
def test_free_disk_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.local_gb - driver.local_gb_used
self.assertEqual(mem_free, self.tracker.compute_node['free_disk_gb'])
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
driver = self._driver()
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
self.assertEqual(driver.pci_stats,
self.tracker.compute_node['pci_device_pools'])
class SchedulerClientTrackerTestCase(BaseTrackerTestCase):
def setUp(self):
super(SchedulerClientTrackerTestCase, self).setUp()
self.tracker.scheduler_client.update_resource_stats = mock.Mock(
side_effect=self._fake_compute_node_update)
def test_update_resource(self):
# change a compute node value to simulate a change
self.tracker.compute_node['local_gb_used'] += 1
expected = copy.deepcopy(self.tracker.compute_node)
self.tracker._update(self.context)
self.tracker.scheduler_client.update_resource_stats.\
assert_called_once_with(self.context,
("fakehost", "fakenode"),
expected)
def test_no_update_resource(self):
self.tracker._update(self.context)
update = self.tracker.scheduler_client.update_resource_stats
self.assertFalse(update.called, "update_resource_stats should not be "
"called when there is no change")
class TrackerPciStatsTestCase(BaseTrackerTestCase):
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
driver = self._driver()
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
# NOTE(danms): PciDeviceStats only supports iteration, so we have to
# listify it before we can examine the contents by index.
pools = list(self.tracker.compute_node['pci_device_pools'])
self.assertEqual(driver.pci_stats[0]['product_id'],
pools[0]['product_id'])
def _driver(self):
return FakeVirtDriver(pci_support=True)
class TrackerExtraResourcesTestCase(BaseTrackerTestCase):
def setUp(self):
super(TrackerExtraResourcesTestCase, self).setUp()
self.driver = self._driver()
def _driver(self):
return FakeVirtDriver()
def test_set_empty_ext_resources(self):
resources = self.driver.get_available_resource(self.tracker.nodename)
self.assertNotIn('stats', resources)
self.tracker._write_ext_resources(resources)
self.assertIn('stats', resources)
def test_set_extra_resources(self):
def fake_write_resources(resources):
resources['stats']['resA'] = '123'
resources['stats']['resB'] = 12
self.stubs.Set(self.tracker.ext_resources_handler,
'write_resources',
fake_write_resources)
resources = self.driver.get_available_resource(self.tracker.nodename)
self.tracker._write_ext_resources(resources)
expected = {"resA": "123", "resB": 12}
self.assertEqual(sorted(expected),
sorted(resources['stats']))
class InstanceClaimTestCase(BaseTrackerTestCase):
def _instance_topology(self, mem):
mem = mem * 1024
return objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=0, cpuset=set([1]), memory=mem),
objects.InstanceNUMACell(
id=1, cpuset=set([3]), memory=mem)])
def _claim_topology(self, mem, cpus=1):
if self.tracker.driver.numa_topology is None:
return None
mem = mem * 1024
return objects.NUMATopology(
cells=[objects.NUMACell(
id=0, cpuset=set([1, 2]), memory=3072, cpu_usage=cpus,
memory_usage=mem, mempages=[], siblings=[],
pinned_cpus=set([])),
objects.NUMACell(
id=1, cpuset=set([3, 4]), memory=3072, cpu_usage=cpus,
memory_usage=mem, mempages=[], siblings=[],
pinned_cpus=set([]))])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_update_usage_only_for_tracked(self, mock_get):
flavor = self._fake_flavor_create()
claim_mem = flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD
claim_gb = flavor['root_gb'] + flavor['ephemeral_gb']
claim_topology = self._claim_topology(claim_mem / 2)
instance_topology = self._instance_topology(claim_mem / 2)
instance = self._fake_instance(
flavor=flavor, task_state=None,
numa_topology=instance_topology)
self.tracker.update_usage(self.context, instance)
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'current_workload')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
claim = self.tracker.instance_claim(self.context, instance,
self.limits)
self.assertNotEqual(0, claim.memory_mb)
self._assert(claim_mem, 'memory_mb_used')
self._assert(claim_gb, 'local_gb_used')
self._assert(claim_topology, 'numa_topology')
# now update should actually take effect
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self._assert(claim_mem, 'memory_mb_used')
self._assert(claim_gb, 'local_gb_used')
self._assert(claim_topology, 'numa_topology')
self._assert(1, 'current_workload')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim_and_abort(self, mock_get):
claim_mem = 3
claim_mem_total = 3 + FAKE_VIRT_MEMORY_OVERHEAD
claim_disk = 2
claim_topology = self._claim_topology(claim_mem_total / 2)
instance_topology = self._instance_topology(claim_mem_total / 2)
instance = self._fake_instance(memory_mb=claim_mem,
root_gb=claim_disk, ephemeral_gb=0,
numa_topology=instance_topology)
claim = self.tracker.instance_claim(self.context, instance,
self.limits)
self.assertIsNotNone(claim)
self.assertEqual(claim_mem_total, self.compute["memory_mb_used"])
self.assertEqual(FAKE_VIRT_MEMORY_MB - claim_mem_total,
self.compute["free_ram_mb"])
self.assertEqualNUMAHostTopology(
claim_topology, objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(claim_disk, self.compute["local_gb_used"])
self.assertEqual(FAKE_VIRT_LOCAL_GB - claim_disk,
self.compute["free_disk_gb"])
claim.abort()
self.assertEqual(0, self.compute["memory_mb_used"])
self.assertEqual(FAKE_VIRT_MEMORY_MB, self.compute["free_ram_mb"])
self.assertEqualNUMAHostTopology(
FAKE_VIRT_NUMA_TOPOLOGY,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(0, self.compute["local_gb_used"])
self.assertEqual(FAKE_VIRT_LOCAL_GB, self.compute["free_disk_gb"])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_instance_claim_with_oversubscription(self, mock_get):
memory_mb = FAKE_VIRT_MEMORY_MB * 2
root_gb = ephemeral_gb = FAKE_VIRT_LOCAL_GB
vcpus = FAKE_VIRT_VCPUS * 2
claim_topology = self._claim_topology(3)
instance_topology = self._instance_topology(3)
limits = {'memory_mb': memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
'disk_gb': root_gb * 2,
'vcpu': vcpus,
'numa_topology': FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD}
instance = self._fake_instance(memory_mb=memory_mb,
root_gb=root_gb, ephemeral_gb=ephemeral_gb,
numa_topology=instance_topology)
self.tracker.instance_claim(self.context, instance, limits)
self.assertEqual(memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(root_gb * 2,
self.tracker.compute_node['local_gb_used'])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_additive_claims(self, mock_get):
self.limits['vcpu'] = 2
claim_topology = self._claim_topology(2, cpus=2)
flavor = self._fake_flavor_create(
memory_mb=1, root_gb=1, ephemeral_gb=0)
instance_topology = self._instance_topology(1)
instance = self._fake_instance(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
instance = self._fake_instance(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
self.assertEqual(2 * (flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD),
self.tracker.compute_node['memory_mb_used'])
self.assertEqual(2 * (flavor['root_gb'] + flavor['ephemeral_gb']),
self.tracker.compute_node['local_gb_used'])
self.assertEqual(2 * flavor['vcpus'],
self.tracker.compute_node['vcpus_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_context_claim_with_exception(self, mock_get):
instance = self._fake_instance(memory_mb=1, root_gb=1, ephemeral_gb=1)
try:
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
raise test.TestingException()
except test.TestingException:
pass
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
self.assertEqual(0, self.compute['memory_mb_used'])
self.assertEqual(0, self.compute['local_gb_used'])
self.assertEqualNUMAHostTopology(
FAKE_VIRT_NUMA_TOPOLOGY,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_instance_context_claim(self, mock_get):
flavor = self._fake_flavor_create(
memory_mb=1, root_gb=2, ephemeral_gb=3)
claim_topology = self._claim_topology(1)
instance_topology = self._instance_topology(1)
instance = self._fake_instance(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.tracker.compute_node['local_gb_used'])
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.compute['memory_mb_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.compute['local_gb_used'])
# after exiting claim context, build is marked as finished. usage
# totals should be same:
self.tracker.update_available_resource(self.context)
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.tracker.compute_node['local_gb_used'])
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.compute['memory_mb_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.compute['local_gb_used'])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_update_load_stats_for_instance(self, mock_get):
instance = self._fake_instance(task_state=task_states.SCHEDULING)
with self.tracker.instance_claim(self.context, instance):
pass
self.assertEqual(1, self.tracker.compute_node['current_workload'])
instance['vm_state'] = vm_states.ACTIVE
instance['task_state'] = None
instance['host'] = 'fakehost'
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_cpu_stats(self, mock_get):
limits = {'disk_gb': 100, 'memory_mb': 100}
self.assertEqual(0, self.tracker.compute_node['vcpus_used'])
vcpus = 1
instance = self._fake_instance(vcpus=vcpus)
# should not do anything until a claim is made:
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node['vcpus_used'])
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
# instance state can change without modifying vcpus in use:
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
add_vcpus = 10
vcpus += add_vcpus
instance = self._fake_instance(vcpus=add_vcpus)
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
instance['vm_state'] = vm_states.DELETED
self.tracker.update_usage(self.context, instance)
vcpus -= add_vcpus
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
def test_skip_deleted_instances(self):
# ensure that the audit process skips instances that have vm_state
# DELETED, but the DB record is not yet deleted.
self._fake_instance(vm_state=vm_states.DELETED, host=self.host)
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_deleted_instances_with_migrations(self, mock_migration_list):
migration = objects.Migration(context=self.context,
instance_uuid='invalid')
mock_migration_list.return_value = [migration]
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
mock_migration_list.assert_called_once_with(self.context,
"fakehost",
"fakenode")
class ResizeClaimTestCase(BaseTrackerTestCase):
def setUp(self):
super(ResizeClaimTestCase, self).setUp()
self.instance = self._fake_instance()
self.instance_type = self._fake_flavor_create()
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim(self, mock_get):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits)
self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
self.assertEqual(1, len(self.tracker.tracked_migrations))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_abort(self, mock_get):
try:
with self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits):
raise test.TestingException("abort")
except test.TestingException:
pass
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self.assertEqual(0, len(self.tracker.tracked_migrations))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_additive_claims(self, mock_get):
limits = self._limits(
2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
2 * FAKE_VIRT_LOCAL_GB,
2 * FAKE_VIRT_VCPUS)
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, limits)
instance2 = self._fake_instance()
self.tracker.resize_claim(self.context, instance2, self.instance_type,
limits)
self._assert(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(2 * FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(2 * FAKE_VIRT_VCPUS, 'vcpus_used')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_revert(self, mock_get):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, {}, self.limits)
self.tracker.drop_resize_claim(self.context, self.instance)
self.assertEqual(0, len(self.tracker.tracked_instances))
self.assertEqual(0, len(self.tracker.tracked_migrations))
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
def test_resize_filter(self):
instance = self._fake_instance(vm_state=vm_states.ACTIVE,
task_state=task_states.SUSPENDING)
self.assertFalse(self.tracker._instance_in_resize_state(instance))
instance = self._fake_instance(vm_state=vm_states.RESIZED,
task_state=task_states.SUSPENDING)
self.assertTrue(self.tracker._instance_in_resize_state(instance))
states = [task_states.RESIZE_PREP, task_states.RESIZE_MIGRATING,
task_states.RESIZE_MIGRATED, task_states.RESIZE_FINISH]
for vm_state in [vm_states.ACTIVE, vm_states.STOPPED]:
for task_state in states:
instance = self._fake_instance(vm_state=vm_state,
task_state=task_state)
result = self.tracker._instance_in_resize_state(instance)
self.assertTrue(result)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_set_instance_host_and_node(self, mock_get):
instance = self._fake_instance()
self.assertIsNone(instance['host'])
self.assertIsNone(instance['launched_on'])
self.assertIsNone(instance['node'])
claim = self.tracker.instance_claim(self.context, instance)
self.assertNotEqual(0, claim.memory_mb)
self.assertEqual('fakehost', instance['host'])
self.assertEqual('fakehost', instance['launched_on'])
self.assertEqual('fakenode', instance['node'])
class NoInstanceTypesInSysMetadata(ResizeClaimTestCase):
"""Make sure we handle the case where the following are true:
#) Compute node C gets upgraded to code that looks for instance types in
system metadata. AND
#) C already has instances in the process of migrating that do not have
stashed instance types.
bug 1164110
"""
def setUp(self):
super(NoInstanceTypesInSysMetadata, self).setUp()
self.instance = self._fake_instance(stash=False)
def test_get_instance_type_stash_false(self):
with (mock.patch.object(objects.Flavor, 'get_by_id',
return_value=self.instance_type)):
flavor = self.tracker._get_instance_type(self.context,
self.instance, "new_")
self.assertEqual(self.instance_type, flavor)
class OrphanTestCase(BaseTrackerTestCase):
def _driver(self):
class OrphanVirtDriver(FakeVirtDriver):
def get_per_instance_usage(self):
return {
'1-2-3-4-5': {'memory_mb': FAKE_VIRT_MEMORY_MB,
'uuid': '1-2-3-4-5'},
'2-3-4-5-6': {'memory_mb': FAKE_VIRT_MEMORY_MB,
'uuid': '2-3-4-5-6'},
}
return OrphanVirtDriver()
def test_usage(self):
self.assertEqual(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
def test_find(self):
# create one legit instance and verify the 2 orphans remain
self._fake_instance()
orphans = self.tracker._find_orphaned_instances()
self.assertEqual(2, len(orphans))
class ComputeMonitorTestCase(BaseTestCase):
def setUp(self):
super(ComputeMonitorTestCase, self).setUp()
fake_monitors = [
'nova.tests.unit.compute.monitors.test_monitors.FakeMonitorClass1',
'nova.tests.unit.compute.monitors.test_monitors.FakeMonitorClass2']
self.flags(compute_available_monitors=fake_monitors)
self.tracker = self._tracker()
self.node_name = 'nodename'
self.user_id = 'fake'
self.project_id = 'fake'
self.info = {}
self.context = context.RequestContext(self.user_id,
self.project_id)
def test_get_host_metrics_none(self):
self.flags(compute_monitors=['FakeMontorClass1', 'FakeMonitorClass4'])
self.tracker.monitors = []
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
self.assertEqual(len(metrics), 0)
def test_get_host_metrics_one_failed(self):
self.flags(compute_monitors=['FakeMonitorClass1', 'FakeMonitorClass4'])
class1 = test_monitors.FakeMonitorClass1(self.tracker)
class4 = test_monitors.FakeMonitorClass4(self.tracker)
self.tracker.monitors = [class1, class4]
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
self.assertTrue(len(metrics) > 0)
@mock.patch.object(resource_tracker.LOG, 'warning')
def test_get_host_metrics_exception(self, mock_LOG_warning):
self.flags(compute_monitors=['FakeMontorClass1'])
class1 = test_monitors.FakeMonitorClass1(self.tracker)
self.tracker.monitors = [class1]
with mock.patch.object(class1, 'get_metrics',
side_effect=test.TestingException()):
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
mock_LOG_warning.assert_called_once_with(
u'Cannot get the metrics from %s.', class1)
self.assertEqual(0, len(metrics))
def test_get_host_metrics(self):
self.flags(compute_monitors=['FakeMonitorClass1', 'FakeMonitorClass2'])
class1 = test_monitors.FakeMonitorClass1(self.tracker)
class2 = test_monitors.FakeMonitorClass2(self.tracker)
self.tracker.monitors = [class1, class2]
mock_notifier = mock.Mock()
with mock.patch.object(rpc, 'get_notifier',
return_value=mock_notifier) as mock_get:
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
mock_get.assert_called_once_with(service='compute',
host=self.node_name)
expected_metrics = [{
'timestamp': 1232,
'name': 'key1',
'value': 2600,
'source': 'libvirt'
}, {
'name': 'key2',
'source': 'libvirt',
'timestamp': 123,
'value': 1600
}]
payload = {
'metrics': expected_metrics,
'host': self.tracker.host,
'host_ip': CONF.my_ip,
'nodename': self.node_name
}
mock_notifier.info.assert_called_once_with(
self.context, 'compute.metrics.update', payload)
self.assertEqual(metrics, expected_metrics)
class TrackerPeriodicTestCase(BaseTrackerTestCase):
def test_periodic_status_update(self):
# verify update called on instantiation
self.assertEqual(1, self.update_call_count)
# verify update not called if no change to resources
self.tracker.update_available_resource(self.context)
self.assertEqual(1, self.update_call_count)
# verify update is called when resources change
driver = self.tracker.driver
driver.memory_mb += 1
self.tracker.update_available_resource(self.context)
self.assertEqual(2, self.update_call_count)
def test_update_available_resource_calls_locked_inner(self):
@mock.patch.object(self.tracker, 'driver')
@mock.patch.object(self.tracker,
'_update_available_resource')
@mock.patch.object(self.tracker, '_verify_resources')
@mock.patch.object(self.tracker, '_report_hypervisor_resource_view')
def _test(mock_rhrv, mock_vr, mock_uar, mock_driver):
resources = {'there is someone in my head': 'but it\'s not me'}
mock_driver.get_available_resource.return_value = resources
self.tracker.update_available_resource(self.context)
mock_uar.assert_called_once_with(self.context, resources)
_test()
class StatsDictTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
stats as a dictionary.
"""
def _driver(self):
return FakeVirtDriver(stats=FAKE_VIRT_STATS)
def _get_stats(self):
return jsonutils.loads(self.tracker.compute_node['stats'])
def test_virt_stats(self):
# start with virt driver stats
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
# adding an instance should keep virt driver stats
self._fake_instance(vm_state=vm_states.ACTIVE, host=self.host)
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
expected_stats = {}
expected_stats.update(FAKE_VIRT_STATS)
expected_stats.update(self.tracker.stats)
self.assertEqual(expected_stats, stats)
# removing the instances should keep only virt driver stats
self._instances = {}
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
class StatsJsonTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
stats as a json string.
"""
def _driver(self):
return FakeVirtDriver(stats=FAKE_VIRT_STATS_JSON)
def _get_stats(self):
return jsonutils.loads(self.tracker.compute_node['stats'])
def test_virt_stats(self):
# start with virt driver stats
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
# adding an instance should keep virt driver stats
# and add rt stats
self._fake_instance(vm_state=vm_states.ACTIVE, host=self.host)
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
expected_stats = {}
expected_stats.update(FAKE_VIRT_STATS)
expected_stats.update(self.tracker.stats)
self.assertEqual(expected_stats, stats)
# removing the instances should keep only virt driver stats
self._instances = {}
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
class StatsInvalidJsonTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
an invalid type for stats.
"""
def _driver(self):
return FakeVirtDriver(stats='this is not json')
def _init_tracker(self):
# do not do initial update in setup
pass
def test_virt_stats(self):
# should throw exception for string that does not parse as json
self.assertRaises(ValueError,
self.tracker.update_available_resource,
context=self.context)
class StatsInvalidTypeTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
an invalid type for stats.
"""
def _driver(self):
return FakeVirtDriver(stats=10)
def _init_tracker(self):
# do not do initial update in setup
pass
def test_virt_stats(self):
# should throw exception for incorrect stats value type
self.assertRaises(ValueError,
self.tracker.update_available_resource,
context=self.context)
|
bgxavier/nova
|
nova/tests/unit/compute/test_resource_tracker.py
|
Python
|
apache-2.0
| 57,477
|
[
"exciting"
] |
282cfe5ff93d918306143e0467dc73a61c1ec1b7369e79ed5e538128e823816e
|
'''
compile_test.py - check pyximport
=================================
test script for checking if compilation against
pysam and tabix works.
'''
# clean up previous compilation
import os
try:
os.unlink('_compile_test.c')
os.unlink('_compile_test.pyxbldc')
except OSError:
pass
import pyximport
pyximport.install(build_in_temp=False)
import _compile_test
import unittest
import pysam
class BAMTest(unittest.TestCase):
input_filename = "pysam_data/ex1.bam"
def testCount(self):
nread = _compile_test.testCountBAM(
pysam.Samfile(self.input_filename))
self.assertEqual(nread, 3270)
class GTFTest(unittest.TestCase):
input_filename = "tabix_data/example.gtf.gz"
def testCount(self):
nread = _compile_test.testCountGTF(
pysam.Tabixfile(self.input_filename))
self.assertEqual(nread, 237)
if __name__ == "__main__":
unittest.main()
|
daler/pysam
|
tests/compile_test.py
|
Python
|
mit
| 928
|
[
"pysam"
] |
6c4520ae597c34e6b4a78cbd0fc33a73cca2bc7daab43faef16e7242657864a7
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.dialogflowcx_v3beta1.services.security_settings_service import (
SecuritySettingsServiceAsyncClient,
)
from google.cloud.dialogflowcx_v3beta1.services.security_settings_service import (
SecuritySettingsServiceClient,
)
from google.cloud.dialogflowcx_v3beta1.services.security_settings_service import pagers
from google.cloud.dialogflowcx_v3beta1.services.security_settings_service import (
transports,
)
from google.cloud.dialogflowcx_v3beta1.types import security_settings
from google.cloud.dialogflowcx_v3beta1.types import (
security_settings as gcdc_security_settings,
)
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert SecuritySettingsServiceClient._get_default_mtls_endpoint(None) is None
assert (
SecuritySettingsServiceClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
SecuritySettingsServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
SecuritySettingsServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
SecuritySettingsServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
SecuritySettingsServiceClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class", [SecuritySettingsServiceClient, SecuritySettingsServiceAsyncClient,]
)
def test_security_settings_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.SecuritySettingsServiceGrpcTransport, "grpc"),
(transports.SecuritySettingsServiceGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_security_settings_service_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class", [SecuritySettingsServiceClient, SecuritySettingsServiceAsyncClient,]
)
def test_security_settings_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_security_settings_service_client_get_transport_class():
transport = SecuritySettingsServiceClient.get_transport_class()
available_transports = [
transports.SecuritySettingsServiceGrpcTransport,
]
assert transport in available_transports
transport = SecuritySettingsServiceClient.get_transport_class("grpc")
assert transport == transports.SecuritySettingsServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
SecuritySettingsServiceClient,
transports.SecuritySettingsServiceGrpcTransport,
"grpc",
),
(
SecuritySettingsServiceAsyncClient,
transports.SecuritySettingsServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
SecuritySettingsServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(SecuritySettingsServiceClient),
)
@mock.patch.object(
SecuritySettingsServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(SecuritySettingsServiceAsyncClient),
)
def test_security_settings_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(SecuritySettingsServiceClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(SecuritySettingsServiceClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
SecuritySettingsServiceClient,
transports.SecuritySettingsServiceGrpcTransport,
"grpc",
"true",
),
(
SecuritySettingsServiceAsyncClient,
transports.SecuritySettingsServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
SecuritySettingsServiceClient,
transports.SecuritySettingsServiceGrpcTransport,
"grpc",
"false",
),
(
SecuritySettingsServiceAsyncClient,
transports.SecuritySettingsServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
SecuritySettingsServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(SecuritySettingsServiceClient),
)
@mock.patch.object(
SecuritySettingsServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(SecuritySettingsServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_security_settings_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class", [SecuritySettingsServiceClient, SecuritySettingsServiceAsyncClient]
)
@mock.patch.object(
SecuritySettingsServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(SecuritySettingsServiceClient),
)
@mock.patch.object(
SecuritySettingsServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(SecuritySettingsServiceAsyncClient),
)
def test_security_settings_service_client_get_mtls_endpoint_and_cert_source(
client_class,
):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
SecuritySettingsServiceClient,
transports.SecuritySettingsServiceGrpcTransport,
"grpc",
),
(
SecuritySettingsServiceAsyncClient,
transports.SecuritySettingsServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_security_settings_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
SecuritySettingsServiceClient,
transports.SecuritySettingsServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
SecuritySettingsServiceAsyncClient,
transports.SecuritySettingsServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_security_settings_service_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_security_settings_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.dialogflowcx_v3beta1.services.security_settings_service.transports.SecuritySettingsServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = SecuritySettingsServiceClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
SecuritySettingsServiceClient,
transports.SecuritySettingsServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
SecuritySettingsServiceAsyncClient,
transports.SecuritySettingsServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_security_settings_service_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
scopes=None,
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"request_type", [gcdc_security_settings.CreateSecuritySettingsRequest, dict,]
)
def test_create_security_settings(request_type, transport: str = "grpc"):
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_security_settings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_security_settings.SecuritySettings(
name="name_value",
display_name="display_name_value",
redaction_strategy=gcdc_security_settings.SecuritySettings.RedactionStrategy.REDACT_WITH_SERVICE,
redaction_scope=gcdc_security_settings.SecuritySettings.RedactionScope.REDACT_DISK_STORAGE,
inspect_template="inspect_template_value",
deidentify_template="deidentify_template_value",
purge_data_types=[
gcdc_security_settings.SecuritySettings.PurgeDataType.DIALOGFLOW_HISTORY
],
retention_window_days=2271,
)
response = client.create_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_security_settings.CreateSecuritySettingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_security_settings.SecuritySettings)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert (
response.redaction_strategy
== gcdc_security_settings.SecuritySettings.RedactionStrategy.REDACT_WITH_SERVICE
)
assert (
response.redaction_scope
== gcdc_security_settings.SecuritySettings.RedactionScope.REDACT_DISK_STORAGE
)
assert response.inspect_template == "inspect_template_value"
assert response.deidentify_template == "deidentify_template_value"
assert response.purge_data_types == [
gcdc_security_settings.SecuritySettings.PurgeDataType.DIALOGFLOW_HISTORY
]
def test_create_security_settings_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_security_settings), "__call__"
) as call:
client.create_security_settings()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_security_settings.CreateSecuritySettingsRequest()
@pytest.mark.asyncio
async def test_create_security_settings_async(
transport: str = "grpc_asyncio",
request_type=gcdc_security_settings.CreateSecuritySettingsRequest,
):
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_security_settings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcdc_security_settings.SecuritySettings(
name="name_value",
display_name="display_name_value",
redaction_strategy=gcdc_security_settings.SecuritySettings.RedactionStrategy.REDACT_WITH_SERVICE,
redaction_scope=gcdc_security_settings.SecuritySettings.RedactionScope.REDACT_DISK_STORAGE,
inspect_template="inspect_template_value",
deidentify_template="deidentify_template_value",
purge_data_types=[
gcdc_security_settings.SecuritySettings.PurgeDataType.DIALOGFLOW_HISTORY
],
)
)
response = await client.create_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_security_settings.CreateSecuritySettingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_security_settings.SecuritySettings)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert (
response.redaction_strategy
== gcdc_security_settings.SecuritySettings.RedactionStrategy.REDACT_WITH_SERVICE
)
assert (
response.redaction_scope
== gcdc_security_settings.SecuritySettings.RedactionScope.REDACT_DISK_STORAGE
)
assert response.inspect_template == "inspect_template_value"
assert response.deidentify_template == "deidentify_template_value"
assert response.purge_data_types == [
gcdc_security_settings.SecuritySettings.PurgeDataType.DIALOGFLOW_HISTORY
]
@pytest.mark.asyncio
async def test_create_security_settings_async_from_dict():
await test_create_security_settings_async(request_type=dict)
def test_create_security_settings_field_headers():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_security_settings.CreateSecuritySettingsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_security_settings), "__call__"
) as call:
call.return_value = gcdc_security_settings.SecuritySettings()
client.create_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_security_settings_field_headers_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_security_settings.CreateSecuritySettingsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_security_settings), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcdc_security_settings.SecuritySettings()
)
await client.create_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_security_settings_flattened():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_security_settings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_security_settings.SecuritySettings()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_security_settings(
parent="parent_value",
security_settings=gcdc_security_settings.SecuritySettings(
name="name_value"
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].security_settings
mock_val = gcdc_security_settings.SecuritySettings(name="name_value")
assert arg == mock_val
def test_create_security_settings_flattened_error():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_security_settings(
gcdc_security_settings.CreateSecuritySettingsRequest(),
parent="parent_value",
security_settings=gcdc_security_settings.SecuritySettings(
name="name_value"
),
)
@pytest.mark.asyncio
async def test_create_security_settings_flattened_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_security_settings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_security_settings.SecuritySettings()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcdc_security_settings.SecuritySettings()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_security_settings(
parent="parent_value",
security_settings=gcdc_security_settings.SecuritySettings(
name="name_value"
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].security_settings
mock_val = gcdc_security_settings.SecuritySettings(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_security_settings_flattened_error_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_security_settings(
gcdc_security_settings.CreateSecuritySettingsRequest(),
parent="parent_value",
security_settings=gcdc_security_settings.SecuritySettings(
name="name_value"
),
)
@pytest.mark.parametrize(
"request_type", [security_settings.GetSecuritySettingsRequest, dict,]
)
def test_get_security_settings(request_type, transport: str = "grpc"):
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_security_settings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = security_settings.SecuritySettings(
name="name_value",
display_name="display_name_value",
redaction_strategy=security_settings.SecuritySettings.RedactionStrategy.REDACT_WITH_SERVICE,
redaction_scope=security_settings.SecuritySettings.RedactionScope.REDACT_DISK_STORAGE,
inspect_template="inspect_template_value",
deidentify_template="deidentify_template_value",
purge_data_types=[
security_settings.SecuritySettings.PurgeDataType.DIALOGFLOW_HISTORY
],
retention_window_days=2271,
)
response = client.get_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == security_settings.GetSecuritySettingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, security_settings.SecuritySettings)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert (
response.redaction_strategy
== security_settings.SecuritySettings.RedactionStrategy.REDACT_WITH_SERVICE
)
assert (
response.redaction_scope
== security_settings.SecuritySettings.RedactionScope.REDACT_DISK_STORAGE
)
assert response.inspect_template == "inspect_template_value"
assert response.deidentify_template == "deidentify_template_value"
assert response.purge_data_types == [
security_settings.SecuritySettings.PurgeDataType.DIALOGFLOW_HISTORY
]
def test_get_security_settings_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_security_settings), "__call__"
) as call:
client.get_security_settings()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == security_settings.GetSecuritySettingsRequest()
@pytest.mark.asyncio
async def test_get_security_settings_async(
transport: str = "grpc_asyncio",
request_type=security_settings.GetSecuritySettingsRequest,
):
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_security_settings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
security_settings.SecuritySettings(
name="name_value",
display_name="display_name_value",
redaction_strategy=security_settings.SecuritySettings.RedactionStrategy.REDACT_WITH_SERVICE,
redaction_scope=security_settings.SecuritySettings.RedactionScope.REDACT_DISK_STORAGE,
inspect_template="inspect_template_value",
deidentify_template="deidentify_template_value",
purge_data_types=[
security_settings.SecuritySettings.PurgeDataType.DIALOGFLOW_HISTORY
],
)
)
response = await client.get_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == security_settings.GetSecuritySettingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, security_settings.SecuritySettings)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert (
response.redaction_strategy
== security_settings.SecuritySettings.RedactionStrategy.REDACT_WITH_SERVICE
)
assert (
response.redaction_scope
== security_settings.SecuritySettings.RedactionScope.REDACT_DISK_STORAGE
)
assert response.inspect_template == "inspect_template_value"
assert response.deidentify_template == "deidentify_template_value"
assert response.purge_data_types == [
security_settings.SecuritySettings.PurgeDataType.DIALOGFLOW_HISTORY
]
@pytest.mark.asyncio
async def test_get_security_settings_async_from_dict():
await test_get_security_settings_async(request_type=dict)
def test_get_security_settings_field_headers():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = security_settings.GetSecuritySettingsRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_security_settings), "__call__"
) as call:
call.return_value = security_settings.SecuritySettings()
client.get_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_security_settings_field_headers_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = security_settings.GetSecuritySettingsRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_security_settings), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
security_settings.SecuritySettings()
)
await client.get_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_security_settings_flattened():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_security_settings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = security_settings.SecuritySettings()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_security_settings(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_security_settings_flattened_error():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_security_settings(
security_settings.GetSecuritySettingsRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_security_settings_flattened_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_security_settings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = security_settings.SecuritySettings()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
security_settings.SecuritySettings()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_security_settings(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_security_settings_flattened_error_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_security_settings(
security_settings.GetSecuritySettingsRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [gcdc_security_settings.UpdateSecuritySettingsRequest, dict,]
)
def test_update_security_settings(request_type, transport: str = "grpc"):
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_security_settings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_security_settings.SecuritySettings(
name="name_value",
display_name="display_name_value",
redaction_strategy=gcdc_security_settings.SecuritySettings.RedactionStrategy.REDACT_WITH_SERVICE,
redaction_scope=gcdc_security_settings.SecuritySettings.RedactionScope.REDACT_DISK_STORAGE,
inspect_template="inspect_template_value",
deidentify_template="deidentify_template_value",
purge_data_types=[
gcdc_security_settings.SecuritySettings.PurgeDataType.DIALOGFLOW_HISTORY
],
retention_window_days=2271,
)
response = client.update_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_security_settings.UpdateSecuritySettingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_security_settings.SecuritySettings)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert (
response.redaction_strategy
== gcdc_security_settings.SecuritySettings.RedactionStrategy.REDACT_WITH_SERVICE
)
assert (
response.redaction_scope
== gcdc_security_settings.SecuritySettings.RedactionScope.REDACT_DISK_STORAGE
)
assert response.inspect_template == "inspect_template_value"
assert response.deidentify_template == "deidentify_template_value"
assert response.purge_data_types == [
gcdc_security_settings.SecuritySettings.PurgeDataType.DIALOGFLOW_HISTORY
]
def test_update_security_settings_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_security_settings), "__call__"
) as call:
client.update_security_settings()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_security_settings.UpdateSecuritySettingsRequest()
@pytest.mark.asyncio
async def test_update_security_settings_async(
transport: str = "grpc_asyncio",
request_type=gcdc_security_settings.UpdateSecuritySettingsRequest,
):
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_security_settings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcdc_security_settings.SecuritySettings(
name="name_value",
display_name="display_name_value",
redaction_strategy=gcdc_security_settings.SecuritySettings.RedactionStrategy.REDACT_WITH_SERVICE,
redaction_scope=gcdc_security_settings.SecuritySettings.RedactionScope.REDACT_DISK_STORAGE,
inspect_template="inspect_template_value",
deidentify_template="deidentify_template_value",
purge_data_types=[
gcdc_security_settings.SecuritySettings.PurgeDataType.DIALOGFLOW_HISTORY
],
)
)
response = await client.update_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_security_settings.UpdateSecuritySettingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_security_settings.SecuritySettings)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert (
response.redaction_strategy
== gcdc_security_settings.SecuritySettings.RedactionStrategy.REDACT_WITH_SERVICE
)
assert (
response.redaction_scope
== gcdc_security_settings.SecuritySettings.RedactionScope.REDACT_DISK_STORAGE
)
assert response.inspect_template == "inspect_template_value"
assert response.deidentify_template == "deidentify_template_value"
assert response.purge_data_types == [
gcdc_security_settings.SecuritySettings.PurgeDataType.DIALOGFLOW_HISTORY
]
@pytest.mark.asyncio
async def test_update_security_settings_async_from_dict():
await test_update_security_settings_async(request_type=dict)
def test_update_security_settings_field_headers():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_security_settings.UpdateSecuritySettingsRequest()
request.security_settings.name = "security_settings.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_security_settings), "__call__"
) as call:
call.return_value = gcdc_security_settings.SecuritySettings()
client.update_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"security_settings.name=security_settings.name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_security_settings_field_headers_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_security_settings.UpdateSecuritySettingsRequest()
request.security_settings.name = "security_settings.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_security_settings), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcdc_security_settings.SecuritySettings()
)
await client.update_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"security_settings.name=security_settings.name/value",
) in kw["metadata"]
def test_update_security_settings_flattened():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_security_settings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_security_settings.SecuritySettings()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_security_settings(
security_settings=gcdc_security_settings.SecuritySettings(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].security_settings
mock_val = gcdc_security_settings.SecuritySettings(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_security_settings_flattened_error():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_security_settings(
gcdc_security_settings.UpdateSecuritySettingsRequest(),
security_settings=gcdc_security_settings.SecuritySettings(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_security_settings_flattened_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_security_settings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_security_settings.SecuritySettings()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcdc_security_settings.SecuritySettings()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_security_settings(
security_settings=gcdc_security_settings.SecuritySettings(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].security_settings
mock_val = gcdc_security_settings.SecuritySettings(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_security_settings_flattened_error_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_security_settings(
gcdc_security_settings.UpdateSecuritySettingsRequest(),
security_settings=gcdc_security_settings.SecuritySettings(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize(
"request_type", [security_settings.ListSecuritySettingsRequest, dict,]
)
def test_list_security_settings(request_type, transport: str = "grpc"):
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_security_settings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = security_settings.ListSecuritySettingsResponse(
next_page_token="next_page_token_value",
)
response = client.list_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == security_settings.ListSecuritySettingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListSecuritySettingsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_security_settings_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_security_settings), "__call__"
) as call:
client.list_security_settings()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == security_settings.ListSecuritySettingsRequest()
@pytest.mark.asyncio
async def test_list_security_settings_async(
transport: str = "grpc_asyncio",
request_type=security_settings.ListSecuritySettingsRequest,
):
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_security_settings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
security_settings.ListSecuritySettingsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == security_settings.ListSecuritySettingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListSecuritySettingsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_security_settings_async_from_dict():
await test_list_security_settings_async(request_type=dict)
def test_list_security_settings_field_headers():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = security_settings.ListSecuritySettingsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_security_settings), "__call__"
) as call:
call.return_value = security_settings.ListSecuritySettingsResponse()
client.list_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_security_settings_field_headers_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = security_settings.ListSecuritySettingsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_security_settings), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
security_settings.ListSecuritySettingsResponse()
)
await client.list_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_security_settings_flattened():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_security_settings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = security_settings.ListSecuritySettingsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_security_settings(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_security_settings_flattened_error():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_security_settings(
security_settings.ListSecuritySettingsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_security_settings_flattened_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_security_settings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = security_settings.ListSecuritySettingsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
security_settings.ListSecuritySettingsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_security_settings(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_security_settings_flattened_error_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_security_settings(
security_settings.ListSecuritySettingsRequest(), parent="parent_value",
)
def test_list_security_settings_pager(transport_name: str = "grpc"):
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_security_settings), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
security_settings.ListSecuritySettingsResponse(
security_settings=[
security_settings.SecuritySettings(),
security_settings.SecuritySettings(),
security_settings.SecuritySettings(),
],
next_page_token="abc",
),
security_settings.ListSecuritySettingsResponse(
security_settings=[], next_page_token="def",
),
security_settings.ListSecuritySettingsResponse(
security_settings=[security_settings.SecuritySettings(),],
next_page_token="ghi",
),
security_settings.ListSecuritySettingsResponse(
security_settings=[
security_settings.SecuritySettings(),
security_settings.SecuritySettings(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_security_settings(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, security_settings.SecuritySettings) for i in results)
def test_list_security_settings_pages(transport_name: str = "grpc"):
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_security_settings), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
security_settings.ListSecuritySettingsResponse(
security_settings=[
security_settings.SecuritySettings(),
security_settings.SecuritySettings(),
security_settings.SecuritySettings(),
],
next_page_token="abc",
),
security_settings.ListSecuritySettingsResponse(
security_settings=[], next_page_token="def",
),
security_settings.ListSecuritySettingsResponse(
security_settings=[security_settings.SecuritySettings(),],
next_page_token="ghi",
),
security_settings.ListSecuritySettingsResponse(
security_settings=[
security_settings.SecuritySettings(),
security_settings.SecuritySettings(),
],
),
RuntimeError,
)
pages = list(client.list_security_settings(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_security_settings_async_pager():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_security_settings),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
security_settings.ListSecuritySettingsResponse(
security_settings=[
security_settings.SecuritySettings(),
security_settings.SecuritySettings(),
security_settings.SecuritySettings(),
],
next_page_token="abc",
),
security_settings.ListSecuritySettingsResponse(
security_settings=[], next_page_token="def",
),
security_settings.ListSecuritySettingsResponse(
security_settings=[security_settings.SecuritySettings(),],
next_page_token="ghi",
),
security_settings.ListSecuritySettingsResponse(
security_settings=[
security_settings.SecuritySettings(),
security_settings.SecuritySettings(),
],
),
RuntimeError,
)
async_pager = await client.list_security_settings(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, security_settings.SecuritySettings) for i in responses)
@pytest.mark.asyncio
async def test_list_security_settings_async_pages():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_security_settings),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
security_settings.ListSecuritySettingsResponse(
security_settings=[
security_settings.SecuritySettings(),
security_settings.SecuritySettings(),
security_settings.SecuritySettings(),
],
next_page_token="abc",
),
security_settings.ListSecuritySettingsResponse(
security_settings=[], next_page_token="def",
),
security_settings.ListSecuritySettingsResponse(
security_settings=[security_settings.SecuritySettings(),],
next_page_token="ghi",
),
security_settings.ListSecuritySettingsResponse(
security_settings=[
security_settings.SecuritySettings(),
security_settings.SecuritySettings(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_security_settings(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [security_settings.DeleteSecuritySettingsRequest, dict,]
)
def test_delete_security_settings(request_type, transport: str = "grpc"):
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_security_settings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == security_settings.DeleteSecuritySettingsRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_security_settings_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_security_settings), "__call__"
) as call:
client.delete_security_settings()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == security_settings.DeleteSecuritySettingsRequest()
@pytest.mark.asyncio
async def test_delete_security_settings_async(
transport: str = "grpc_asyncio",
request_type=security_settings.DeleteSecuritySettingsRequest,
):
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_security_settings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == security_settings.DeleteSecuritySettingsRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_security_settings_async_from_dict():
await test_delete_security_settings_async(request_type=dict)
def test_delete_security_settings_field_headers():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = security_settings.DeleteSecuritySettingsRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_security_settings), "__call__"
) as call:
call.return_value = None
client.delete_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_security_settings_field_headers_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = security_settings.DeleteSecuritySettingsRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_security_settings), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_security_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_security_settings_flattened():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_security_settings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_security_settings(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_security_settings_flattened_error():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_security_settings(
security_settings.DeleteSecuritySettingsRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_security_settings_flattened_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_security_settings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_security_settings(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_security_settings_flattened_error_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_security_settings(
security_settings.DeleteSecuritySettingsRequest(), name="name_value",
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.SecuritySettingsServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.SecuritySettingsServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SecuritySettingsServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.SecuritySettingsServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = SecuritySettingsServiceClient(
client_options=options, transport=transport,
)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = SecuritySettingsServiceClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.SecuritySettingsServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SecuritySettingsServiceClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.SecuritySettingsServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = SecuritySettingsServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.SecuritySettingsServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.SecuritySettingsServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.SecuritySettingsServiceGrpcTransport,
transports.SecuritySettingsServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport, transports.SecuritySettingsServiceGrpcTransport,
)
def test_security_settings_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.SecuritySettingsServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_security_settings_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.dialogflowcx_v3beta1.services.security_settings_service.transports.SecuritySettingsServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.SecuritySettingsServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"create_security_settings",
"get_security_settings",
"update_security_settings",
"list_security_settings",
"delete_security_settings",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_security_settings_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.dialogflowcx_v3beta1.services.security_settings_service.transports.SecuritySettingsServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.SecuritySettingsServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
def test_security_settings_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.dialogflowcx_v3beta1.services.security_settings_service.transports.SecuritySettingsServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.SecuritySettingsServiceTransport()
adc.assert_called_once()
def test_security_settings_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
SecuritySettingsServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.SecuritySettingsServiceGrpcTransport,
transports.SecuritySettingsServiceGrpcAsyncIOTransport,
],
)
def test_security_settings_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.SecuritySettingsServiceGrpcTransport, grpc_helpers),
(transports.SecuritySettingsServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_security_settings_service_transport_create_channel(
transport_class, grpc_helpers
):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
scopes=["1", "2"],
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.SecuritySettingsServiceGrpcTransport,
transports.SecuritySettingsServiceGrpcAsyncIOTransport,
],
)
def test_security_settings_service_grpc_transport_client_cert_source_for_mtls(
transport_class,
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_security_settings_service_host_no_port():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com"
),
)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_security_settings_service_host_with_port():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com:8000"
),
)
assert client.transport._host == "dialogflow.googleapis.com:8000"
def test_security_settings_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.SecuritySettingsServiceGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_security_settings_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.SecuritySettingsServiceGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.SecuritySettingsServiceGrpcTransport,
transports.SecuritySettingsServiceGrpcAsyncIOTransport,
],
)
def test_security_settings_service_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.SecuritySettingsServiceGrpcTransport,
transports.SecuritySettingsServiceGrpcAsyncIOTransport,
],
)
def test_security_settings_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_deidentify_template_path():
organization = "squid"
location = "clam"
deidentify_template = "whelk"
expected = "organizations/{organization}/locations/{location}/deidentifyTemplates/{deidentify_template}".format(
organization=organization,
location=location,
deidentify_template=deidentify_template,
)
actual = SecuritySettingsServiceClient.deidentify_template_path(
organization, location, deidentify_template
)
assert expected == actual
def test_parse_deidentify_template_path():
expected = {
"organization": "octopus",
"location": "oyster",
"deidentify_template": "nudibranch",
}
path = SecuritySettingsServiceClient.deidentify_template_path(**expected)
# Check that the path construction is reversible.
actual = SecuritySettingsServiceClient.parse_deidentify_template_path(path)
assert expected == actual
def test_inspect_template_path():
organization = "cuttlefish"
location = "mussel"
inspect_template = "winkle"
expected = "organizations/{organization}/locations/{location}/inspectTemplates/{inspect_template}".format(
organization=organization, location=location, inspect_template=inspect_template,
)
actual = SecuritySettingsServiceClient.inspect_template_path(
organization, location, inspect_template
)
assert expected == actual
def test_parse_inspect_template_path():
expected = {
"organization": "nautilus",
"location": "scallop",
"inspect_template": "abalone",
}
path = SecuritySettingsServiceClient.inspect_template_path(**expected)
# Check that the path construction is reversible.
actual = SecuritySettingsServiceClient.parse_inspect_template_path(path)
assert expected == actual
def test_security_settings_path():
project = "squid"
location = "clam"
security_settings = "whelk"
expected = "projects/{project}/locations/{location}/securitySettings/{security_settings}".format(
project=project, location=location, security_settings=security_settings,
)
actual = SecuritySettingsServiceClient.security_settings_path(
project, location, security_settings
)
assert expected == actual
def test_parse_security_settings_path():
expected = {
"project": "octopus",
"location": "oyster",
"security_settings": "nudibranch",
}
path = SecuritySettingsServiceClient.security_settings_path(**expected)
# Check that the path construction is reversible.
actual = SecuritySettingsServiceClient.parse_security_settings_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "cuttlefish"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = SecuritySettingsServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "mussel",
}
path = SecuritySettingsServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = SecuritySettingsServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "winkle"
expected = "folders/{folder}".format(folder=folder,)
actual = SecuritySettingsServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nautilus",
}
path = SecuritySettingsServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = SecuritySettingsServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "scallop"
expected = "organizations/{organization}".format(organization=organization,)
actual = SecuritySettingsServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "abalone",
}
path = SecuritySettingsServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = SecuritySettingsServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "squid"
expected = "projects/{project}".format(project=project,)
actual = SecuritySettingsServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "clam",
}
path = SecuritySettingsServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = SecuritySettingsServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "whelk"
location = "octopus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = SecuritySettingsServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "oyster",
"location": "nudibranch",
}
path = SecuritySettingsServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = SecuritySettingsServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.SecuritySettingsServiceTransport, "_prep_wrapped_messages"
) as prep:
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.SecuritySettingsServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = SecuritySettingsServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = SecuritySettingsServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = SecuritySettingsServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(
SecuritySettingsServiceClient,
transports.SecuritySettingsServiceGrpcTransport,
),
(
SecuritySettingsServiceAsyncClient,
transports.SecuritySettingsServiceGrpcAsyncIOTransport,
),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
googleapis/python-dialogflow-cx
|
tests/unit/gapic/dialogflowcx_v3beta1/test_security_settings_service.py
|
Python
|
apache-2.0
| 110,511
|
[
"Octopus"
] |
4ff647ab626a33e181ec4f0329a07049d5468a009e966172415aa3307274a926
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: kubevirt_vm
short_description: Manage KubeVirt virtual machine
description:
- Use Openshift Python SDK to manage the state of KubeVirt virtual machines.
version_added: "2.8"
author: KubeVirt Team (@kubevirt)
options:
state:
description:
- Set the virtual machine to either I(present), I(absent), I(running) or I(stopped).
- "I(present) - Create or update a virtual machine. (And run it if it's ephemeral.)"
- "I(absent) - Remove a virtual machine."
- "I(running) - Create or update a virtual machine and run it."
- "I(stopped) - Stop a virtual machine. (This deletes ephemeral VMs.)"
default: "present"
choices:
- present
- absent
- running
- stopped
type: str
name:
description:
- Name of the virtual machine.
required: true
type: str
namespace:
description:
- Namespace where the virtual machine exists.
required: true
type: str
ephemeral:
description:
- If (true) ephemeral vitual machine will be created. When destroyed it won't be accessible again.
- Works only with C(state) I(present) and I(absent).
type: bool
default: false
datavolumes:
description:
- "DataVolumes are a way to automate importing virtual machine disks onto pvcs during the virtual machine's
launch flow. Without using a DataVolume, users have to prepare a pvc with a disk image before assigning
it to a VM or VMI manifest. With a DataVolume, both the pvc creation and import is automated on behalf of the user."
type: list
template:
description:
- "Name of Template to be used in creation of a virtual machine."
type: str
template_parameters:
description:
- "New values of parameters from Template."
type: dict
extends_documentation_fragment:
- k8s_auth_options
- kubevirt_vm_options
- kubevirt_common_options
requirements:
- python >= 2.7
- openshift >= 0.8.2
'''
EXAMPLES = '''
- name: Start virtual machine 'myvm'
kubevirt_vm:
state: running
name: myvm
namespace: vms
- name: Create virtual machine 'myvm' and start it
kubevirt_vm:
state: running
name: myvm
namespace: vms
memory: 64Mi
cpu_cores: 1
bootloader: efi
smbios_uuid: 5d307ca9-b3ef-428c-8861-06e72d69f223
cpu_model: Conroe
headless: true
hugepage_size: 2Mi
tablets:
- bus: virtio
name: tablet1
cpu_limit: 3
cpu_shares: 2
disks:
- name: containerdisk
volume:
containerDisk:
image: kubevirt/cirros-container-disk-demo:latest
path: /custom-disk/cirros.img
disk:
bus: virtio
- name: Create virtual machine 'myvm' with multus network interface
kubevirt_vm:
name: myvm
namespace: vms
memory: 512M
interfaces:
- name: default
bridge: {}
network:
pod: {}
- name: mynet
bridge: {}
network:
multus:
networkName: mynetconf
- name: Combine inline definition with Ansible parameters
kubevirt_vm:
# Kubernetes specification:
definition:
metadata:
labels:
app: galaxy
service: web
origin: vmware
# Ansible parameters:
state: running
name: myvm
namespace: vms
memory: 64M
disks:
- name: containerdisk
volume:
containerDisk:
image: kubevirt/cirros-container-disk-demo:latest
path: /custom-disk/cirros.img
disk:
bus: virtio
- name: Start ephemeral virtual machine 'myvm' and wait to be running
kubevirt_vm:
ephemeral: true
state: running
wait: true
wait_timeout: 180
name: myvm
namespace: vms
memory: 64M
labels:
kubevirt.io/vm: myvm
disks:
- name: containerdisk
volume:
containerDisk:
image: kubevirt/cirros-container-disk-demo:latest
path: /custom-disk/cirros.img
disk:
bus: virtio
- name: Start fedora vm with cloud init
kubevirt_vm:
state: running
wait: true
name: myvm
namespace: vms
memory: 1024M
cloud_init_nocloud:
userData: |-
#cloud-config
password: fedora
chpasswd: { expire: False }
disks:
- name: containerdisk
volume:
containerDisk:
image: kubevirt/fedora-cloud-container-disk-demo:latest
path: /disk/fedora.qcow2
disk:
bus: virtio
- name: Create virtual machine with datavolume
kubevirt_vm:
name: myvm
namespace: default
memory: 1024Mi
datavolumes:
- name: mydv
source:
http:
url: https://url/disk.qcow2
pvc:
accessModes:
- ReadWriteOnce
storage: 5Gi
- name: Remove virtual machine 'myvm'
kubevirt_vm:
state: absent
name: myvm
namespace: vms
'''
RETURN = '''
kubevirt_vm:
description:
- The virtual machine dictionary specification returned by the API.
- "This dictionary contains all values returned by the KubeVirt API all options
are described here U(https://kubevirt.io/api-reference/master/definitions.html#_v1_virtualmachine)"
returned: success
type: complex
contains: {}
'''
import copy
import traceback
from ansible.module_utils.k8s.common import AUTH_ARG_SPEC
from ansible.module_utils.kubevirt import (
virtdict,
KubeVirtRawModule,
VM_COMMON_ARG_SPEC,
VM_SPEC_DEF_ARG_SPEC
)
VM_ARG_SPEC = {
'ephemeral': {'type': 'bool', 'default': False},
'state': {
'type': 'str',
'choices': [
'present', 'absent', 'running', 'stopped'
],
'default': 'present'
},
'datavolumes': {'type': 'list'},
'template': {'type': 'str'},
'template_parameters': {'type': 'dict'},
}
# Which params (can) modify 'spec:' contents of a VM:
VM_SPEC_PARAMS = list(VM_SPEC_DEF_ARG_SPEC.keys()) + ['datavolumes', 'template', 'template_parameters']
class KubeVirtVM(KubeVirtRawModule):
@property
def argspec(self):
""" argspec property builder """
argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
argument_spec.update(VM_COMMON_ARG_SPEC)
argument_spec.update(VM_ARG_SPEC)
return argument_spec
@staticmethod
def fix_serialization(obj):
if obj and hasattr(obj, 'to_dict'):
return obj.to_dict()
return obj
def _wait_for_vmi_running(self):
for event in self._kind_resource.watch(namespace=self.namespace, timeout=self.params.get('wait_timeout')):
entity = event['object']
if entity.metadata.name != self.name:
continue
status = entity.get('status', {})
phase = status.get('phase', None)
if phase == 'Running':
return entity
self.fail("Timeout occurred while waiting for virtual machine to start. Maybe try a higher wait_timeout value?")
def _wait_for_vm_state(self, new_state):
if new_state == 'running':
want_created = want_ready = True
else:
want_created = want_ready = False
for event in self._kind_resource.watch(namespace=self.namespace, timeout=self.params.get('wait_timeout')):
entity = event['object']
if entity.metadata.name != self.name:
continue
status = entity.get('status', {})
created = status.get('created', False)
ready = status.get('ready', False)
if (created, ready) == (want_created, want_ready):
return entity
self.fail("Timeout occurred while waiting for virtual machine to achieve '{0}' state. "
"Maybe try a higher wait_timeout value?".format(new_state))
def manage_vm_state(self, new_state, already_changed):
new_running = True if new_state == 'running' else False
changed = False
k8s_obj = {}
if not already_changed:
k8s_obj = self.get_resource(self._kind_resource)
if not k8s_obj:
self.fail("VirtualMachine object disappeared during module operation, aborting.")
if k8s_obj.spec.get('running', False) == new_running:
return False, k8s_obj
newdef = dict(metadata=dict(name=self.name, namespace=self.namespace), spec=dict(running=new_running))
k8s_obj, err = self.patch_resource(self._kind_resource, newdef, k8s_obj,
self.name, self.namespace, merge_type='merge')
if err:
self.fail_json(**err)
else:
changed = True
if self.params.get('wait'):
k8s_obj = self._wait_for_vm_state(new_state)
return changed, k8s_obj
def _process_template_defaults(self, proccess_template, processedtemplate, defaults):
def set_template_default(default_name, default_name_index, definition_spec):
default_value = proccess_template['metadata']['annotations'][default_name]
if default_value:
values = definition_spec[default_name_index]
default_values = [d for d in values if d.get('name') == default_value]
defaults[default_name_index] = default_values
if definition_spec[default_name_index] is None:
definition_spec[default_name_index] = []
definition_spec[default_name_index].extend([d for d in values if d.get('name') != default_value])
devices = processedtemplate['spec']['template']['spec']['domain']['devices']
spec = processedtemplate['spec']['template']['spec']
set_template_default('defaults.template.cnv.io/disk', 'disks', devices)
set_template_default('defaults.template.cnv.io/volume', 'volumes', spec)
set_template_default('defaults.template.cnv.io/nic', 'interfaces', devices)
set_template_default('defaults.template.cnv.io/network', 'networks', spec)
def construct_definition(self, kind, our_state, ephemeral):
definition = virtdict()
processedtemplate = {}
# Construct the API object definition:
defaults = {'disks': [], 'volumes': [], 'interfaces': [], 'networks': []}
vm_template = self.params.get('template')
if vm_template:
# Find the template the VM should be created from:
template_resource = self.client.resources.get(api_version='template.openshift.io/v1', kind='Template', name='templates')
proccess_template = template_resource.get(name=vm_template, namespace=self.params.get('namespace'))
# Set proper template values taken from module option 'template_parameters':
for k, v in self.params.get('template_parameters', {}).items():
for parameter in proccess_template.parameters:
if parameter.name == k:
parameter.value = v
# Proccess the template:
processedtemplates_res = self.client.resources.get(api_version='template.openshift.io/v1', kind='Template', name='processedtemplates')
processedtemplate = processedtemplates_res.create(proccess_template.to_dict()).to_dict()['objects'][0]
# Process defaults of the template:
self._process_template_defaults(proccess_template, processedtemplate, defaults)
if not ephemeral:
definition['spec']['running'] = our_state == 'running'
template = definition if ephemeral else definition['spec']['template']
template['metadata']['labels']['vm.cnv.io/name'] = self.params.get('name')
dummy, definition = self.construct_vm_definition(kind, definition, template, defaults)
return self.merge_dicts(definition, processedtemplate)
def execute_module(self):
# Parse parameters specific to this module:
ephemeral = self.params.get('ephemeral')
k8s_state = our_state = self.params.get('state')
kind = 'VirtualMachineInstance' if ephemeral else 'VirtualMachine'
_used_params = [name for name in self.params if self.params[name] is not None]
# Is 'spec:' getting changed?
vm_spec_change = True if set(VM_SPEC_PARAMS).intersection(_used_params) else False
changed = False
crud_executed = False
method = ''
# Underlying module_utils/k8s/* code knows only of state == present/absent; let's make sure not to confuse it
if ephemeral:
# Ephemerals don't actually support running/stopped; we treat those as aliases for present/absent instead
if our_state == 'running':
self.params['state'] = k8s_state = 'present'
elif our_state == 'stopped':
self.params['state'] = k8s_state = 'absent'
else:
if our_state != 'absent':
self.params['state'] = k8s_state = 'present'
# Start with fetching the current object to make sure it exists
# If it does, but we end up not performing any operations on it, at least we'll be able to return
# its current contents as part of the final json
self.client = self.get_api_client()
self._kind_resource = self.find_supported_resource(kind)
k8s_obj = self.get_resource(self._kind_resource)
if not self.check_mode and not vm_spec_change and k8s_state != 'absent' and not k8s_obj:
self.fail("It's impossible to create an empty VM or change state of a non-existent VM.")
# If there are (potential) changes to `spec:` or we want to delete the object, that warrants a full CRUD
# Also check_mode always warrants a CRUD, as that'll produce a sane result
if vm_spec_change or k8s_state == 'absent' or self.check_mode:
definition = self.construct_definition(kind, our_state, ephemeral)
result = self.execute_crud(kind, definition)
changed = result['changed']
k8s_obj = result['result']
method = result['method']
crud_executed = True
if ephemeral and self.params.get('wait') and k8s_state == 'present' and not self.check_mode:
# Waiting for k8s_state==absent is handled inside execute_crud()
k8s_obj = self._wait_for_vmi_running()
if not ephemeral and our_state in ['running', 'stopped'] and not self.check_mode:
# State==present/absent doesn't involve any additional VMI state management and is fully
# handled inside execute_crud() (including wait logic)
patched, k8s_obj = self.manage_vm_state(our_state, crud_executed)
changed = changed or patched
if changed:
method = method or 'patch'
# Return from the module:
self.exit_json(**{
'changed': changed,
'kubevirt_vm': self.fix_serialization(k8s_obj),
'method': method
})
def main():
module = KubeVirtVM()
try:
module.execute_module()
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
|
rosmo/ansible
|
lib/ansible/modules/cloud/kubevirt/kubevirt_vm.py
|
Python
|
gpl-3.0
| 16,102
|
[
"Galaxy"
] |
2d97f54e1c9d9a7aa6fc71a049c57d3a2d975b697ad3f88956410400060d1691
|
"""
TornadoServer create a web server and load services.
It may work better with TornadoClient but as it accepts HTTPS you can create your own client
"""
import time
import datetime
import os
import asyncio
import M2Crypto
import tornado.iostream
tornado.iostream.SSLIOStream.configure(
"tornado_m2crypto.m2iostream.M2IOStream"
) # pylint: disable=wrong-import-position
import tornado.ioloop
from tornado.httpserver import HTTPServer
from tornado.web import Application, RequestHandler
import DIRAC
from DIRAC import gConfig, gLogger, S_OK
from DIRAC.Core.Security import Locations
from DIRAC.Core.Utilities import MemStat
from DIRAC.Core.Tornado.Server.HandlerManager import HandlerManager
from DIRAC.ConfigurationSystem.Client import PathFinder
from DIRAC.FrameworkSystem.Client.MonitoringClient import MonitoringClient
sLog = gLogger.getSubLogger(__name__)
DEBUG_M2CRYPTO = os.getenv("DIRAC_DEBUG_M2CRYPTO", "No").lower() in ("yes", "true")
class NotFoundHandler(RequestHandler):
"""Handle 404 errors"""
def prepare(self):
self.set_status(404)
from DIRAC.FrameworkSystem.private.authorization.utils.Utilities import getHTML
self.finish(getHTML("Not found.", state=404, info="Nothing matches the given URI."))
class TornadoServer(object):
"""
Tornado webserver
Initialize and run an HTTPS Server for DIRAC services.
By default it load all https services defined in the CS,
but you can also give an explicit list.
The listening port is either:
* Given as parameter
* Loaded from the CS ``/Systems/Tornado/<instance>/Port``
* Default to 8443
Example 1: Easy way to start tornado::
# Initialize server and load services
serverToLaunch = TornadoServer()
# Start listening when ready
serverToLaunch.startTornado()
Example 2:We want to debug service1 and service2 only, and use another port for that ::
services = ['component/service1:port1', 'component/service2']
endpoints = ['component/endpoint1', 'component/endpoint2']
serverToLaunch = TornadoServer(services=services, endpoints=endpoints, port=1234)
serverToLaunch.startTornado()
"""
def __init__(self, services=True, endpoints=False, port=None):
"""C'r
:param list services: (default True) List of service handlers to load.
If ``True``, loads all described in the CS
If ``False``, do not load services
:param list endpoints: (default False) List of endpoint handlers to load.
If ``True``, loads all described in the CS
If ``False``, do not load endpoints
:param int port: Port to listen to.
If ``None``, the port is resolved following the logic described in the class documentation
"""
# Application metadata, routes and settings mapping on the ports
self.__appsSettings = {}
# Default port, if enother is not discover
if port is None:
port = gConfig.getValue("/Systems/Tornado/%s/Port" % PathFinder.getSystemInstance("Tornado"), 8443)
self.port = port
# Handler manager initialization with default settings
self.handlerManager = HandlerManager(services, endpoints)
# Monitoring attributes
self._monitor = MonitoringClient()
# temp value for computation, used by the monitoring
self.__report = None
# Last update time stamp
self.__monitorLastStatsUpdate = None
self.__monitoringLoopDelay = 60 # In secs
# If services are defined, load only these ones (useful for debug purpose or specific services)
retVal = self.handlerManager.loadServicesHandlers()
if not retVal["OK"]:
sLog.error(retVal["Message"])
raise ImportError("Some services can't be loaded, check the service names and configuration.")
retVal = self.handlerManager.loadEndpointsHandlers()
if not retVal["OK"]:
sLog.error(retVal["Message"])
raise ImportError("Some endpoints can't be loaded, check the endpoint names and configuration.")
def __calculateAppSettings(self):
"""Calculate application information mapping on the ports"""
# if no service list is given, load services from configuration
handlerDict = self.handlerManager.getHandlersDict()
for data in handlerDict.values():
port = data.get("Port") or self.port
for hURL in data["URLs"]:
if port not in self.__appsSettings:
self.__appsSettings[port] = {"routes": [], "settings": {}}
if hURL not in self.__appsSettings[port]["routes"]:
self.__appsSettings[port]["routes"].append(hURL)
return bool(self.__appsSettings)
def loadServices(self, services):
"""Load a services
:param services: List of service handlers to load. Default value set at initialization
If ``True``, loads all services from CS
:type services: bool or list
:return: S_OK()/S_ERROR()
"""
return self.handlerManager.loadServicesHandlers(services)
def loadEndpoints(self, endpoints):
"""Load a endpoints
:param endpoints: List of service handlers to load. Default value set at initialization
If ``True``, loads all endpoints from CS
:type endpoints: bool or list
:return: S_OK()/S_ERROR()
"""
return self.handlerManager.loadEndpointsHandlers(endpoints)
def addHandlers(self, routes, settings=None, port=None):
"""Add new routes
:param list routes: routes
:param dict settings: application settings
:param int port: port
"""
port = port or self.port
if port not in self.__appsSettings:
self.__appsSettings[port] = {"routes": [], "settings": {}}
if settings:
self.__appsSettings[port]["settings"].update(settings)
for route in routes:
if route not in self.__appsSettings[port]["routes"]:
self.__appsSettings[port]["routes"].append(route)
return S_OK()
def startTornado(self):
"""
Starts the tornado server when ready.
This method never returns.
"""
# If there is no services loaded:
if not self.__calculateAppSettings():
raise Exception("There is no services loaded, please check your configuration")
sLog.debug("Starting Tornado")
# Prepare SSL settings
certs = Locations.getHostCertificateAndKeyLocation()
if certs is False:
sLog.fatal("Host certificates not found ! Can't start the Server")
raise ImportError("Unable to load certificates")
ca = Locations.getCAsLocation()
ssl_options = {
"certfile": certs[0],
"keyfile": certs[1],
"cert_reqs": M2Crypto.SSL.verify_peer,
"ca_certs": ca,
"sslDebug": DEBUG_M2CRYPTO, # Set to true if you want to see the TLS debug messages
}
# Init monitoring
self._initMonitoring()
self.__monitorLastStatsUpdate = time.time()
self.__report = self.__startReportToMonitoringLoop()
# Starting monitoring, IOLoop waiting time in ms, __monitoringLoopDelay is defined in seconds
tornado.ioloop.PeriodicCallback(self.__reportToMonitoring, self.__monitoringLoopDelay * 1000).start()
# If we are running with python3, Tornado will use asyncio,
# and we have to convince it to let us run in a different thread
# Doing this ensures a consistent behavior between py2 and py3
asyncio.set_event_loop_policy(tornado.platform.asyncio.AnyThreadEventLoopPolicy())
for port, app in self.__appsSettings.items():
sLog.debug(" - %s" % "\n - ".join(["%s = %s" % (k, ssl_options[k]) for k in ssl_options]))
# Default server configuration
settings = dict(compress_response=True, cookie_secret="secret")
# Merge appllication settings
settings.update(app["settings"])
# Start server
router = Application(app["routes"], default_handler_class=NotFoundHandler, **settings)
server = HTTPServer(router, ssl_options=ssl_options, decompress_request=True)
try:
server.listen(int(port))
except Exception as e: # pylint: disable=broad-except
sLog.exception("Exception starting HTTPServer", e)
raise
sLog.always("Listening on port %s" % port)
tornado.ioloop.IOLoop.current().start()
def _initMonitoring(self):
"""
Initialize the monitoring
"""
self._monitor.setComponentType(MonitoringClient.COMPONENT_TORNADO)
self._monitor.initialize()
self._monitor.setComponentName("Tornado")
self._monitor.registerActivity("CPU", "CPU Usage", "Framework", "CPU,%", MonitoringClient.OP_MEAN, 600)
self._monitor.registerActivity("MEM", "Memory Usage", "Framework", "Memory,MB", MonitoringClient.OP_MEAN, 600)
self._monitor.setComponentExtraParam("DIRACVersion", DIRAC.version)
self._monitor.setComponentExtraParam("platform", DIRAC.getPlatform())
self._monitor.setComponentExtraParam("startTime", datetime.datetime.utcnow())
def __reportToMonitoring(self):
"""
Periodically report to the monitoring of the CPU and MEM
"""
# Calculate CPU usage by comparing realtime and cpu time since last report
self.__endReportToMonitoringLoop(*self.__report)
# Save memory usage and save realtime/CPU time for next call
self.__report = self.__startReportToMonitoringLoop()
def __startReportToMonitoringLoop(self):
"""
Snapshot of resources to be taken at the beginning
of a monitoring cycle.
Also sends memory snapshot to the monitoring.
This is basically copy/paste of Service.py
:returns: tuple (<time.time(), cpuTime )
"""
now = time.time() # Used to calulate a delta
stats = os.times()
cpuTime = stats[0] + stats[2]
if now - self.__monitorLastStatsUpdate < 0:
return (now, cpuTime)
# Send CPU consumption mark
self.__monitorLastStatsUpdate = now
# Send Memory consumption mark
membytes = MemStat.VmB("VmRSS:")
if membytes:
mem = membytes / (1024.0 * 1024.0)
self._monitor.addMark("MEM", mem)
return (now, cpuTime)
def __endReportToMonitoringLoop(self, initialWallTime, initialCPUTime):
"""
Snapshot of resources to be taken at the end
of a monitoring cycle.
This is basically copy/paste of Service.py
Determines CPU usage by comparing walltime and cputime and send it to monitor
"""
wallTime = time.time() - initialWallTime
stats = os.times()
cpuTime = stats[0] + stats[2] - initialCPUTime
percentage = cpuTime / wallTime * 100.0
if percentage > 0:
self._monitor.addMark("CPU", percentage)
|
DIRACGrid/DIRAC
|
src/DIRAC/Core/Tornado/Server/TornadoServer.py
|
Python
|
gpl-3.0
| 11,231
|
[
"DIRAC"
] |
7d6697015c9a384105fc17c1f1a628c8111cc4626b4d4ebf534f98cab43c472e
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from __future__ import absolute_import
from __future__ import print_function
import collections
import shelve
import copy
import sys
import inspect
import os
from psi4.driver.constants import *
from psi4.driver.p4util import *
from psi4 import core
from . import findif_response_utils
def run_roa(name, **kwargs):
"""
Main driver for managing Raman Optical activity computations with
CC response theory.
Uses distributed finite differences approach -->
1. Sets up a database to keep track of running/finished/waiting
computations.
2. Generates separate input files for displaced geometries.
3. When all displacements are run, collects the necessary information
from each displaced computation, and computes final result.
"""
# Get list of omega values -> Make sure we only have one wavelength
# Catch this now before any real work gets done
omega = core.get_option('CCRESPONSE', 'OMEGA')
if len(omega) > 2:
raise Exception('ROA scattering can only be performed for one wavelength.')
else:
pass
core.print_out(
'Running ROA computation. Subdirectories for each '
'required displaced geometry have been created.\n\n')
dbno = 0
# Initialize database
db = shelve.open('database', writeback=True)
# Check if final result is in here
# ->if we have already computed roa, back up the dict
# ->copy it setting this flag to false and continue
if ('roa_computed' in db) and ( db['roa_computed'] ):
db2 = shelve.open('.database.bak{}'.format(dbno), writeback=True)
dbno += 1
for key,value in db.items():
db2[key]=value
db2.close()
db['roa_computed'] = False
else:
db['roa_computed'] = False
if 'inputs_generated' not in db:
findif_response_utils.initialize_database(db,name,"roa", ["roa_tensor"])
# Generate input files
if not db['inputs_generated']:
findif_response_utils.generate_inputs(db,name)
# handled by helper db['inputs_generated'] = True
# Check job status
if db['inputs_generated'] and not db['jobs_complete']:
print('Checking status')
findif_response_utils.stat(db)
for job, status in db['job_status'].items():
print("{} --> {}".format(job, status))
# Compute ROA Scattering
if db['jobs_complete']:
mygauge = core.get_option('CCRESPONSE', 'GAUGE')
consider_gauge = {
'LENGTH': ['Length Gauge'],
'VELOCITY': ['Modified Velocity Gauge'],
'BOTH': ['Length Gauge', 'Modified Velocity Gauge']
}
gauge_list = ["{} Results".format(x) for x in consider_gauge[mygauge]]
# Gather data
dip_polar_list = findif_response_utils.collect_displaced_matrix_data(
db, 'Dipole Polarizability', 3)
opt_rot_list = [
x for x in (
findif_response_utils.collect_displaced_matrix_data(
db,
"Optical Rotation Tensor ({})".format(gauge),
3
)
for gauge in consider_gauge[mygauge]
)
]
dip_quad_polar_list = findif_response_utils.collect_displaced_matrix_data(
db, "Electric-Dipole/Quadrupole Polarizability", 9)
# Compute Scattering
# Run new function (src/bin/ccresponse/scatter.cc)
core.print_out('Running scatter function')
step = core.get_local_option('FINDIF', 'DISP_SIZE')
for g_idx, gauge in enumerate(opt_rot_list):
print('\n\n----------------------------------------------------------------------')
print('\t%%%%%%%%%% {} %%%%%%%%%%'.format(gauge_list[g_idx]))
print('----------------------------------------------------------------------\n\n')
core.print_out('\n\n----------------------------------------------------------------------\n')
core.print_out('\t%%%%%%%%%% {} %%%%%%%%%%\n'.format(gauge_list[g_idx]))
core.print_out('----------------------------------------------------------------------\n\n')
print('roa.py:85 I am not being passed a molecule, grabbing from global :(')
core.scatter(core.get_active_molecule(), step, dip_polar_list, gauge, dip_quad_polar_list)
db['roa_computed'] = True
db.close()
# SAVE this for when multiple wavelengths works
# # Get list of omega values
# omega = core.get_option('CCRESPONSE','OMEGA')
# if len(omega) > 1:
# units = copy.copy(omega[-1])
# omega.pop()
# else:
# units = 'atomic'
# wavelength = copy.copy(omega[0])
# # Set up units for scatter.cc
# if units == 'NM':
# wavelength = (constants.c * constants.h * 1*(10**-9))/(wavelength * constants.hartree2J)
# if units == 'HZ':
# wavelength = wavelength * constants.h / constants.hartree2J
# if units == 'EV':
# wavelength = wavelength / constants.hartree2ev
# if units == 'atomic':
# pass
# ################################
# ### ###
# ### DATABASE STRUCTURE ###
# ### ###
# ################################
# Dict of dicts
# inputs_generated (boolean)
# job_status: (ordered Dict)
# key-> {atom}_{cord}_{p/m}
# val-> (not_started,running,finished)
# job_list: (string)
# status (string)
# jobs_complete (boolean)
# roa_computed (boolean)
# prop (string) = roa
#
# ?
# data: dipole_polarizability
# : optical_rotation
# : dipole_quadrupole_polarizability
# ?
# results:
|
jH0ward/psi4
|
psi4/driver/procrouting/roa.py
|
Python
|
lgpl-3.0
| 6,539
|
[
"Psi4"
] |
d8a5d4626986adcb1631e1ae2ae4d854dc43510e187f8b8ea815be069e6d304d
|
#
# Restriction Analysis Libraries.
# Copyright (C) 2004. Frederic Sohm.
#
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
import os
###############################################################################
# Configuration of the console.
#
# Mainly used by PrintFormat.PrintFormat
#
# ConsoleWidth : width of the console used default to 80.
# should never be less than 60.
# NameWidth : space attributed to the name in PrintList method.
# Indent : Indent of the second line.
# MaxSize : Maximal size of the sequence (default=6:
# -> 99 999 bp + 1 trailing ','
# people are unlikely to ask for restriction map of sequences
# bigger than 100.000 bp. This is needed to determine the
# space to be reserved for sites location.
#
# MaxSize = 5 => 9.999 bp
# MaxSize = 6 => 99.999 bp
# MaxSize = 7 => 999.999 bp
# example:
#
# <------------ ConsoleWidth --------------->
# <- NameWidth ->
# EcoRI : 1, 45, 50, 300, 400, 650,
# 700, 1200, 2500.
# <-->
# Indent
#
ConsoleWidth = 80
NameWidth = 10
Indent = 4
MaxSize = 6
###############################################################################
# Proxies
#
# Enter here the address of your proxy if any.
# If you don't use proxy use an empty string
# i.e.
# ftp_proxy = ''
# -> no proxy
#
# ftp_proxy = 'http://www.somewhere.something:one_number'
# -> www.somewhere.something is the address of the proxy.
# one_number is the port number.
#
ftp_proxy = ''
###############################################################################
# Rebase ftp location
#
# Do not modify the addresses.
#
ftp_Rebase = 'ftp://ftp.neb.com/'
ftp_emb_e = ftp_Rebase+'pub/rebase/emboss_e.###'
ftp_emb_s = ftp_Rebase+'pub/rebase/emboss_s.###'
ftp_emb_r = ftp_Rebase+'pub/rebase/emboss_r.###'
###############################################################################
# ftp rebase account.
#
# In order to update the rebase files, Rana need to connect to the
# ftp server corresponding.
#
# the general procedure for accessing a ftp server is generally to
# connect as anonymous user (rebase_name) and providing your e-mail address
# as password.
#
# Therefore, you need to enter your e-mail address in rebase_password.
# The address will not be send to anyone but is necessary to login the
# ftp server of rebase when connecting as anonymous user.
#
# Do not forget to enclose the address between "'".
#
Rebase_name = 'anonymous'
Rebase_password = ''
#Rebase_password = 'your_address@somewhere.something'
|
BlogomaticProject/Blogomatic
|
opt/blog-o-matic/usr/lib/python/Bio/Restriction/RanaConfig.py
|
Python
|
gpl-2.0
| 3,234
|
[
"Biopython"
] |
206efce6c7882f515b41afcf09d136db9e08069423216b5df31a72aaa64e6fa6
|
# Copyright (C) 2012 Alex Nitz
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""Convenience functions to genenerate gravitational wave templates and
waveforms.
"""
import lal, lalsimulation, numpy, copy
from pycbc.types import TimeSeries, FrequencySeries, zeros, Array
from pycbc.types import real_same_precision_as, complex_same_precision_as
import pycbc.scheme as _scheme
import inspect
from pycbc.fft import fft
from pycbc import pnutils
from pycbc.waveform import utils as wfutils
from pycbc.waveform import parameters
from pycbc.filter import interpolate_complex_frequency, resample_to_delta_t
import pycbc
from spa_tmplt import spa_tmplt, spa_tmplt_norm, spa_tmplt_end, \
spa_tmplt_precondition, spa_amplitude_factor, \
spa_length_in_time
class NoWaveformError(Exception):
"""This should be raised if generating a waveform would just result in all
zeros being returned, e.g., if a requested `f_final` is <= `f_lower`.
"""
pass
# If this is set to True, waveform generation codes will try to regenerate
# waveforms with known failure conditions to try to avoid the failure. For
# example SEOBNRv3 waveforms would be regenerated with double the sample rate.
# If this is set to False waveform failures will always raise exceptions
fail_tolerant_waveform_generation = True
default_args = (parameters.fd_waveform_params.default_dict() + \
parameters.td_waveform_params).default_dict()
default_sgburst_args = {'eccentricity':0, 'polarization':0}
td_required_args = parameters.td_waveform_params.nodefaults.aslist
fd_required_args = parameters.fd_waveform_params.nodefaults.aslist
sgburst_required_args = ['q','frequency','hrss']
# td, fd, filter waveforms generated on the CPU
_lalsim_td_approximants = {}
_lalsim_fd_approximants = {}
_lalsim_enum = {}
_lalsim_sgburst_approximants = {}
def _check_lal_pars(p):
""" Create a laldict object from the dictionary of waveform parameters
Parameters
----------
p: dictionary
The dictionary of lalsimulation paramaters
Returns
-------
laldict: LalDict
The lal type dictionary to pass to the lalsimulation waveform functions.
"""
lal_pars = lal.CreateDict()
#nonGRparams can be straightforwardly added if needed, however they have to
# be invoked one by one
if p['phase_order']!=-1:
lalsimulation.SimInspiralWaveformParamsInsertPNPhaseOrder(lal_pars,int(p['phase_order']))
if p['amplitude_order']!=-1:
lalsimulation.SimInspiralWaveformParamsInsertPNAmplitudeOrder(lal_pars,int(p['amplitude_order']))
if p['spin_order']!=-1:
lalsimulation.SimInspiralWaveformParamsInsertPNSpinOrder(lal_pars,int(p['spin_order']))
if p['tidal_order']!=-1:
lalsimulation.SimInspiralWaveformParamsInsertPNTidalOrder(lal_pars, p['tidal_order'])
if p['eccentricity_order']!=-1:
lalsimulation.SimInspiralWaveformParamsInsertPNEccentricityOrder(lal_pars, p['eccentricity_order'])
if p['lambda1']:
lalsimulation.SimInspiralWaveformParamsInsertTidalLambda1(lal_pars, p['lambda1'])
if p['lambda2']:
lalsimulation.SimInspiralWaveformParamsInsertTidalLambda2(lal_pars, p['lambda2'])
if p['dquad_mon1']:
lalsimulation.SimInspiralWaveformParamsInsertdQuadMon1(lal_pars, p['dquad_mon1'])
if p['dquad_mon2']:
lalsimulation.SimInspiralWaveformParamsInsertdQuadMon2(lal_pars, p['dquad_mon2'])
if p['numrel_data']:
lalsimulation.SimInspiralWaveformParamsInsertNumRelData(lal_pars, str(p['numrel_data']))
if p['modes_choice']:
lalsimulation.SimInspiralWaveformParamsInsertModesChoice(lal_pars, p['modes_choice'])
if p['frame_axis']:
lalsimulation.SimInspiralWaveformParamsInsertFrameAxis(lal_pars, p['frame_axis'])
if p['side_bands']:
lalsimulation.SimInspiralWaveformParamsInsertSideband(lal_pars, p['side_bands'])
return lal_pars
def _lalsim_td_waveform(**p):
fail_tolerant_waveform_generation
lal_pars = _check_lal_pars(p)
#nonGRparams can be straightforwardly added if needed, however they have to
# be invoked one by one
try:
hp1, hc1 = lalsimulation.SimInspiralChooseTDWaveform(
float(pnutils.solar_mass_to_kg(p['mass1'])),
float(pnutils.solar_mass_to_kg(p['mass2'])),
float(p['spin1x']), float(p['spin1y']), float(p['spin1z']),
float(p['spin2x']), float(p['spin2y']), float(p['spin2z']),
pnutils.megaparsecs_to_meters(float(p['distance'])),
float(p['inclination']), float(p['coa_phase']),
float(p['long_asc_nodes']), float(p['eccentricity']), float(p['mean_per_ano']),
float(p['delta_t']), float(p['f_lower']), float(p['f_ref']),
lal_pars,
_lalsim_enum[p['approximant']])
except RuntimeError:
if not fail_tolerant_waveform_generation:
raise
# For some cases failure modes can occur. Here we add waveform-specific
# instructions to try to work with waveforms that are known to fail.
if p['approximant'] == 'SEOBNRv3':
# In this case we'll try doubling the sample time and trying again
# Don't want to get stuck in a loop though!
if 'delta_t_orig' not in p:
p['delta_t_orig'] = p['delta_t']
p['delta_t'] = p['delta_t'] / 2.
if p['delta_t_orig'] / p['delta_t'] > 9:
raise
hp, hc = _lalsim_td_waveform(**p)
p['delta_t'] = p['delta_t_orig']
hp = resample_to_delta_t(hp, hp.delta_t*2)
hc = resample_to_delta_t(hc, hc.delta_t*2)
return hp, hc
raise
#lal.DestroyDict(lal_pars)
hp = TimeSeries(hp1.data.data[:], delta_t=hp1.deltaT, epoch=hp1.epoch)
hc = TimeSeries(hc1.data.data[:], delta_t=hc1.deltaT, epoch=hc1.epoch)
return hp, hc
def _spintaylor_aligned_prec_swapper(**p):
"""
SpinTaylorF2 is only single spin, it also struggles with anti-aligned spin
waveforms. This construct chooses between the aligned-twospin TaylorF2 model
and the precessing singlespin SpinTaylorF2 models. If aligned spins are
given, use TaylorF2, if nonaligned spins are given use SpinTaylorF2. In
the case of nonaligned doublespin systems the code will fail at the
waveform generator level.
"""
orig_approximant = p['approximant']
if p['spin2x'] == 0 and p['spin2y'] == 0 and p['spin1x'] == 0 and \
p['spin1y'] == 0:
p['approximant'] = 'TaylorF2'
else:
p['approximant'] = 'SpinTaylorF2'
hp, hc = _lalsim_fd_waveform(**p)
p['approximant'] = orig_approximant
return hp, hc
def _lalsim_fd_waveform(**p):
lal_pars = _check_lal_pars(p)
hp1, hc1 = lalsimulation.SimInspiralChooseFDWaveform(
float(pnutils.solar_mass_to_kg(p['mass1'])),
float(pnutils.solar_mass_to_kg(p['mass2'])),
float(p['spin1x']), float(p['spin1y']), float(p['spin1z']),
float(p['spin2x']), float(p['spin2y']), float(p['spin2z']),
pnutils.megaparsecs_to_meters(float(p['distance'])),
float(p['inclination']), float(p['coa_phase']),
float(p['long_asc_nodes']), float(p['eccentricity']), float(p['mean_per_ano']),
p['delta_f'], float(p['f_lower']), float(p['f_final']), float(p['f_ref']),
lal_pars,
_lalsim_enum[p['approximant']])
hp = FrequencySeries(hp1.data.data[:], delta_f=hp1.deltaF,
epoch=hp1.epoch)
hc = FrequencySeries(hc1.data.data[:], delta_f=hc1.deltaF,
epoch=hc1.epoch)
#lal.DestroyDict(lal_pars)
return hp, hc
def _lalsim_sgburst_waveform(**p):
hp, hc = lalsimulation.SimBurstSineGaussian(float(p['q']),
float(p['frequency']),
float(p['hrss']),
float(p['eccentricity']),
float(p['polarization']),
float(p['delta_t']))
hp = TimeSeries(hp.data.data[:], delta_t=hp.deltaT, epoch=hp.epoch)
hc = TimeSeries(hc.data.data[:], delta_t=hc.deltaT, epoch=hc.epoch)
return hp, hc
for approx_enum in xrange(0, lalsimulation.NumApproximants):
if lalsimulation.SimInspiralImplementedTDApproximants(approx_enum):
approx_name = lalsimulation.GetStringFromApproximant(approx_enum)
_lalsim_enum[approx_name] = approx_enum
_lalsim_td_approximants[approx_name] = _lalsim_td_waveform
for approx_enum in xrange(0, lalsimulation.NumApproximants):
if lalsimulation.SimInspiralImplementedFDApproximants(approx_enum):
approx_name = lalsimulation.GetStringFromApproximant(approx_enum)
_lalsim_enum[approx_name] = approx_enum
_lalsim_fd_approximants[approx_name] = _lalsim_fd_waveform
# sine-Gaussian burst
for approx_enum in xrange(0, lalsimulation.NumApproximants):
if lalsimulation.SimInspiralImplementedFDApproximants(approx_enum):
approx_name = lalsimulation.GetStringFromApproximant(approx_enum)
_lalsim_enum[approx_name] = approx_enum
_lalsim_sgburst_approximants[approx_name] = _lalsim_sgburst_waveform
cpu_sgburst = _lalsim_sgburst_approximants
cpu_td = dict(_lalsim_td_approximants.items())
cpu_fd = _lalsim_fd_approximants
# Waveforms written in CUDA
_cuda_td_approximants = {}
_cuda_fd_approximants = {}
if pycbc.HAVE_CUDA:
from pycbc.waveform.TaylorF2 import taylorf2 as cuda_taylorf2
from pycbc.waveform.pycbc_phenomC_tmplt import imrphenomc_tmplt
from pycbc.waveform.SpinTaylorF2 import spintaylorf2 as cuda_spintaylorf2
_cuda_fd_approximants["IMRPhenomC"] = imrphenomc_tmplt
_cuda_fd_approximants["SpinTaylorF2"] = cuda_spintaylorf2
cuda_td = dict(_lalsim_td_approximants.items() + _cuda_td_approximants.items())
cuda_fd = dict(_lalsim_fd_approximants.items() + _cuda_fd_approximants.items())
# List the various available approximants ####################################
def print_td_approximants():
print("LalSimulation Approximants")
for approx in _lalsim_td_approximants.keys():
print " " + approx
print("CUDA Approximants")
for approx in _cuda_td_approximants.keys():
print " " + approx
def print_fd_approximants():
print("LalSimulation Approximants")
for approx in _lalsim_fd_approximants.keys():
print " " + approx
print("CUDA Approximants")
for approx in _cuda_fd_approximants.keys():
print " " + approx
def print_sgburst_approximants():
print("LalSimulation Approximants")
for approx in _lalsim_sgburst_approximants.keys():
print " " + approx
def td_approximants(scheme=_scheme.mgr.state):
"""Return a list containing the available time domain approximants for
the given processing scheme.
"""
return td_wav[type(scheme)].keys()
def fd_approximants(scheme=_scheme.mgr.state):
"""Return a list containing the available fourier domain approximants for
the given processing scheme.
"""
return fd_wav[type(scheme)].keys()
def sgburst_approximants(scheme=_scheme.mgr.state):
"""Return a list containing the available time domain sgbursts for
the given processing scheme.
"""
return sgburst_wav[type(scheme)].keys()
def filter_approximants(scheme=_scheme.mgr.state):
"""Return a list of fourier domain approximants including those
written specifically as templates.
"""
return filter_wav[type(scheme)].keys()
# Input parameter handling ###################################################
def get_obj_attrs(obj):
""" Return a dictionary built from the attributes of the given object.
"""
pr = {}
if obj is not None:
if isinstance(obj, numpy.core.records.record):
for name in obj.dtype.names:
pr[name] = getattr(obj, name)
elif hasattr(obj, '__dict__'):
pr = obj.__dict__
elif hasattr(obj, '__slots__'):
for slot in obj.__slots__:
if hasattr(obj, slot):
pr[slot] = getattr(obj, slot)
else:
for name in dir(obj):
try:
value = getattr(obj, name)
if not name.startswith('__') and not inspect.ismethod(value):
pr[name] = value
except:
continue
return pr
def props(obj, **kwargs):
""" Return a dictionary built from the combination of defaults, kwargs,
and the attributes of the given object.
"""
pr = get_obj_attrs(obj)
# Get the parameters to generate the waveform
# Note that keyword arguments override values in the template object
input_params = default_args.copy()
input_params.update(pr)
input_params.update(kwargs)
return input_params
# Input parameter handling for bursts ########################################
def props_sgburst(obj, **kwargs):
pr = {}
if obj is not None:
for name in dir(obj):
try:
value = getattr(obj, name)
if not name.startswith('__') and not inspect.ismethod(value):
pr[name] = value
except:
continue
# Get the parameters to generate the waveform
# Note that keyword arguments override values in the template object
input_params = default_sgburst_args.copy()
input_params.update(pr)
input_params.update(kwargs)
return input_params
# Waveform generation ########################################################
def get_fd_waveform_sequence(template=None, **kwds):
"""Return values of the waveform evaluated at the sequence of frequency
points.
Parameters
----------
template: object
An object that has attached properties. This can be used to substitute
for keyword arguments. A common example would be a row in an xml table.
{params}
Returns
-------
hplustilde: Array
The plus phase of the waveform in frequency domain evaluated at the
frequency points.
hcrosstilde: Array
The cross phase of the waveform in frequency domain evaluated at the
frequency points.
"""
kwds['delta_f'] = -1
kwds['f_lower'] = -1
p = props(template, **kwds)
lal_pars = _check_lal_pars(p)
flags = lalsimulation.SimInspiralCreateWaveformFlags()
lalsimulation.SimInspiralSetSpinOrder(flags, p['spin_order'])
lalsimulation.SimInspiralSetTidalOrder(flags, p['tidal_order'])
hp, hc = lalsimulation.SimInspiralChooseFDWaveformSequence(float(p['coa_phase']),
float(pnutils.solar_mass_to_kg(p['mass1'])),
float(pnutils.solar_mass_to_kg(p['mass2'])),
float(p['spin1x']), float(p['spin1y']), float(p['spin1z']),
float(p['spin2x']), float(p['spin2y']), float(p['spin2z']),
float(p['f_ref']),
pnutils.megaparsecs_to_meters(float(p['distance'])),
float(p['inclination']),
lal_pars,
_lalsim_enum[p['approximant']],
p['sample_points'].lal())
return Array(hp.data.data), Array(hc.data.data)
get_fd_waveform_sequence.__doc__ = get_fd_waveform_sequence.__doc__.format(
params=parameters.fd_waveform_sequence_params.docstr(prefix=" ",
include_label=False).lstrip(' '))
def get_td_waveform(template=None, **kwargs):
"""Return the plus and cross polarizations of a time domain waveform.
Parameters
----------
template: object
An object that has attached properties. This can be used to subsitute
for keyword arguments. A common example would be a row in an xml table.
{params}
Returns
-------
hplus: TimeSeries
The plus polarization of the waveform.
hcross: TimeSeries
The cross polarization of the waveform.
"""
input_params = props(template,**kwargs)
wav_gen = td_wav[type(_scheme.mgr.state)]
if 'approximant' not in input_params or input_params['approximant'] is None:
raise ValueError("Please provide an approximant name")
elif input_params['approximant'] not in wav_gen:
raise ValueError("Approximant %s not available" %
(input_params['approximant']))
for arg in td_required_args:
if arg not in input_params:
raise ValueError("Please provide " + str(arg) )
return wav_gen[input_params['approximant']](**input_params)
get_td_waveform.__doc__ = get_td_waveform.__doc__.format(
params=parameters.td_waveform_params.docstr(prefix=" ",
include_label=False).lstrip(' '))
def get_fd_waveform(template=None, **kwargs):
"""Return a frequency domain gravitational waveform.
Parameters
----------
template: object
An object that has attached properties. This can be used to substitute
for keyword arguments. A common example would be a row in an xml table.
{params}
Returns
-------
hplustilde: FrequencySeries
The plus phase of the waveform in frequency domain.
hcrosstilde: FrequencySeries
The cross phase of the waveform in frequency domain.
"""
input_params = props(template,**kwargs)
wav_gen = fd_wav[type(_scheme.mgr.state)]
if 'approximant' not in input_params:
raise ValueError("Please provide an approximant name")
elif input_params['approximant'] not in wav_gen:
raise ValueError("Approximant %s not available" %
(input_params['approximant']))
for arg in fd_required_args:
if arg not in input_params:
raise ValueError("Please provide " + str(arg) )
try:
ffunc = input_params.pop('f_final_func')
if ffunc != '':
# convert the frequency function to a value
input_params['f_final'] = pnutils.named_frequency_cutoffs[ffunc](
input_params)
# if the f_final is < f_lower, raise a NoWaveformError
if 'f_final' in input_params and (
input_params['f_lower']+input_params['delta_f']
>= input_params['f_final']):
raise NoWaveformError("cannot generate waveform: f_lower >= f_final")
except KeyError:
pass
return wav_gen[input_params['approximant']](**input_params)
get_fd_waveform.__doc__ = get_fd_waveform.__doc__.format(
params=parameters.fd_waveform_params.docstr(prefix=" ",
include_label=False).lstrip(' '))
def get_interpolated_fd_waveform(dtype=numpy.complex64, return_hc=True,
**params):
""" Return a fourier domain waveform approximant, using interpolation
"""
def rulog2(val):
return 2.0 ** numpy.ceil(numpy.log2(float(val)))
orig_approx = params['approximant']
params['approximant'] = params['approximant'].replace('_INTERP', '')
df = params['delta_f']
if 'duration' not in params:
duration = get_waveform_filter_length_in_time(**params)
elif params['duration'] > 0:
duration = params['duration']
else:
err_msg = "Waveform duration must be greater than 0."
raise ValueError(err_msg)
#FIXME We should try to get this length directly somehow
# I think this number should be conservative
ringdown_padding = 0.5
df_min = 1.0 / rulog2(duration + ringdown_padding)
# FIXME: I don't understand this, but waveforms with df_min < 0.5 will chop
# off the inspiral when using ringdown_padding - 0.5.
# Also, if ringdown_padding is set to a very small
# value we can see cases where the ringdown is chopped.
if df_min > 0.5:
df_min = 0.5
params['delta_f'] = df_min
hp, hc = get_fd_waveform(**params)
hp = hp.astype(dtype)
if return_hc:
hc = hc.astype(dtype)
else:
hc = None
f_end = get_waveform_end_frequency(**params)
if f_end is None:
f_end = (len(hp) - 1) * hp.delta_f
if 'f_final' in params and params['f_final'] > 0:
f_end_params = params['f_final']
if f_end is not None:
f_end = min(f_end_params, f_end)
n_min = int(rulog2(f_end / df_min)) + 1
if n_min < len(hp):
hp = hp[:n_min]
if hc is not None:
hc = hc[:n_min]
offset = int(ringdown_padding * (len(hp)-1)*2 * hp.delta_f)
hp = interpolate_complex_frequency(hp, df, zeros_offset=offset, side='left')
if hc is not None:
hc = interpolate_complex_frequency(hc, df, zeros_offset=offset,
side='left')
params['approximant'] = orig_approx
return hp, hc
def get_sgburst_waveform(template=None, **kwargs):
"""Return the plus and cross polarizations of a time domain
sine-Gaussian burst waveform.
Parameters
----------
template: object
An object that has attached properties. This can be used to subsitute
for keyword arguments. A common example would be a row in an xml table.
approximant : string
A string that indicates the chosen approximant. See `td_approximants`
for available options.
q : float
The quality factor of a sine-Gaussian burst
frequency : float
The centre-frequency of a sine-Gaussian burst
delta_t : float
The time step used to generate the waveform
hrss : float
The strain rss
amplitude: float
The strain amplitude
Returns
-------
hplus: TimeSeries
The plus polarization of the waveform.
hcross: TimeSeries
The cross polarization of the waveform.
"""
input_params = props_sgburst(template,**kwargs)
for arg in sgburst_required_args:
if arg not in input_params:
raise ValueError("Please provide " + str(arg))
return _lalsim_sgburst_waveform(**input_params)
# Waveform filter routines ###################################################
# Organize Filter Generators
_inspiral_fd_filters = {}
_cuda_fd_filters = {}
_cuda_fd_filters['SPAtmplt'] = spa_tmplt
_inspiral_fd_filters['SPAtmplt'] = spa_tmplt
filter_wav = _scheme.ChooseBySchemeDict()
filter_wav.update( {_scheme.CPUScheme:_inspiral_fd_filters,
_scheme.CUDAScheme:_cuda_fd_filters,
} )
# Organize functions for function conditioning/precalculated values
_filter_norms = {}
_filter_ends = {}
_filter_preconditions = {}
_template_amplitude_norms = {}
_filter_time_lengths = {}
def seobnrv2_final_frequency(**kwds):
return pnutils.get_final_freq("SEOBNRv2", kwds['mass1'], kwds['mass2'],
kwds['spin1z'], kwds['spin2z'])
def get_imr_length(approx, **kwds):
"""Call through to pnutils to obtain IMR waveform durations
"""
m1 = float(kwds['mass1'])
m2 = float(kwds['mass2'])
s1z = float(kwds['spin1z'])
s2z = float(kwds['spin2z'])
f_low = float(kwds['f_lower'])
# 10% margin of error is incorporated in the pnutils function
return pnutils.get_imr_duration(m1, m2, s1z, s2z, f_low, approximant=approx)
def seobnrv2_length_in_time(**kwds):
"""Stub for holding the calculation of SEOBNRv2* waveform duration.
"""
return get_imr_length("SEOBNRv2", **kwds)
def seobnrv4_length_in_time(**kwds):
"""Stub for holding the calculation of SEOBNRv4* waveform duration.
"""
return get_imr_length("SEOBNRv4", **kwds)
def imrphenomd_length_in_time(**kwds):
"""Stub for holding the calculation of IMRPhenomD waveform duration.
"""
return get_imr_length("IMRPhenomD", **kwds)
_filter_norms["SPAtmplt"] = spa_tmplt_norm
_filter_preconditions["SPAtmplt"] = spa_tmplt_precondition
_filter_ends["SPAtmplt"] = spa_tmplt_end
_filter_ends["TaylorF2"] = spa_tmplt_end
#_filter_ends["SEOBNRv1_ROM_EffectiveSpin"] = seobnrv2_final_frequency
#_filter_ends["SEOBNRv1_ROM_DoubleSpin"] = seobnrv2_final_frequency
#_filter_ends["SEOBNRv2_ROM_EffectiveSpin"] = seobnrv2_final_frequency
#_filter_ends["SEOBNRv2_ROM_DoubleSpin"] = seobnrv2_final_frequency
#_filter_ends["SEOBNRv2_ROM_DoubleSpin_HI"] = seobnrv2_final_frequency
# PhenomD returns higher frequencies than this, so commenting this out for now
#_filter_ends["IMRPhenomC"] = seobnrv2_final_frequency
#_filter_ends["IMRPhenomD"] = seobnrv2_final_frequency
_template_amplitude_norms["SPAtmplt"] = spa_amplitude_factor
_filter_time_lengths["SPAtmplt"] = spa_length_in_time
_filter_time_lengths["TaylorF2"] = spa_length_in_time
_filter_time_lengths["SEOBNRv1_ROM_EffectiveSpin"] = seobnrv2_length_in_time
_filter_time_lengths["SEOBNRv1_ROM_DoubleSpin"] = seobnrv2_length_in_time
_filter_time_lengths["SEOBNRv2_ROM_EffectiveSpin"] = seobnrv2_length_in_time
_filter_time_lengths["SEOBNRv2_ROM_DoubleSpin"] = seobnrv2_length_in_time
_filter_time_lengths["SEOBNRv2_ROM_DoubleSpin_HI"] = seobnrv2_length_in_time
_filter_time_lengths["SEOBNRv4_ROM"] = seobnrv4_length_in_time
_filter_time_lengths["IMRPhenomC"] = imrphenomd_length_in_time
_filter_time_lengths["IMRPhenomD"] = imrphenomd_length_in_time
_filter_time_lengths["IMRPhenomPv2"] = imrphenomd_length_in_time
_filter_time_lengths["SpinTaylorF2"] = spa_length_in_time
# Also add generators for switching between approximants
apx_name = "SpinTaylorF2_SWAPPER"
cpu_fd[apx_name] = _spintaylor_aligned_prec_swapper
_filter_time_lengths[apx_name] = _filter_time_lengths["SpinTaylorF2"]
# We can do interpolation for waveforms that have a time length
for apx in copy.copy(_filter_time_lengths):
if apx in cpu_fd:
apx_int = apx + '_INTERP'
cpu_fd[apx_int] = get_interpolated_fd_waveform
_filter_time_lengths[apx_int] = _filter_time_lengths[apx]
td_wav = _scheme.ChooseBySchemeDict()
fd_wav = _scheme.ChooseBySchemeDict()
td_wav.update({_scheme.CPUScheme:cpu_td,_scheme.CUDAScheme:cuda_td})
fd_wav.update({_scheme.CPUScheme:cpu_fd,_scheme.CUDAScheme:cuda_fd})
sgburst_wav = {_scheme.CPUScheme:cpu_sgburst}
def get_waveform_filter(out, template=None, **kwargs):
"""Return a frequency domain waveform filter for the specified approximant
"""
n = len(out)
input_params = props(template, **kwargs)
if input_params['approximant'] in filter_approximants(_scheme.mgr.state):
wav_gen = filter_wav[type(_scheme.mgr.state)]
htilde = wav_gen[input_params['approximant']](out=out, **input_params)
htilde.resize(n)
htilde.chirp_length = get_waveform_filter_length_in_time(**input_params)
htilde.length_in_time = htilde.chirp_length
return htilde
if input_params['approximant'] in fd_approximants(_scheme.mgr.state):
wav_gen = fd_wav[type(_scheme.mgr.state)]
duration = get_waveform_filter_length_in_time(**input_params)
hp, hc = wav_gen[input_params['approximant']](duration=duration,
return_hc=False, **input_params)
hp.resize(n)
out[0:len(hp)] = hp[:]
hp = FrequencySeries(out, delta_f=hp.delta_f, copy=False)
hp.length_in_time = hp.chirp_length = duration
return hp
elif input_params['approximant'] in td_approximants(_scheme.mgr.state):
# N: number of time samples required
N = (n-1)*2
delta_f = 1.0 / (N * input_params['delta_t'])
wav_gen = td_wav[type(_scheme.mgr.state)]
hp, hc = wav_gen[input_params['approximant']](**input_params)
# taper the time series hp if required
if ('taper' in input_params.keys() and \
input_params['taper'] is not None):
hp = wfutils.taper_timeseries(hp, input_params['taper'],
return_lal=False)
return td_waveform_to_fd_waveform(hp, out=out)
else:
raise ValueError("Approximant %s not available" %
(input_params['approximant']))
def td_waveform_to_fd_waveform(waveform, out=None, length=None,
buffer_length=100):
""" Convert a time domain into a frequency domain waveform by FFT.
As a waveform is assumed to "wrap" in the time domain one must be
careful to ensure the waveform goes to 0 at both "boundaries". To
ensure this is done correctly the waveform must have the epoch set such
the merger time is at t=0 and the length of the waveform should be
shorter than the desired length of the FrequencySeries (times 2 - 1)
so that zeroes can be suitably pre- and post-pended before FFTing.
If given, out is a memory array to be used as the output of the FFT.
If not given memory is allocated internally.
If present the length of the returned FrequencySeries is determined
from the length out. If out is not given the length can be provided
expicitly, or it will be chosen as the nearest power of 2. If choosing
length explicitly the waveform length + buffer_length is used when
choosing the nearest binary number so that some zero padding is always
added.
"""
# Figure out lengths and set out if needed
if out is None:
if length is None:
N = pnutils.nearest_larger_binary_number(len(waveform) + \
buffer_length)
n = int(N//2) + 1
else:
n = length
N = (n-1)*2
out = zeros(n, dtype=complex_same_precision_as(waveform))
else:
n = len(out)
N = (n-1)*2
delta_f = 1. / (N * waveform.delta_t)
# total duration of the waveform
tmplt_length = len(waveform) * waveform.delta_t
# for IMR templates the zero of time is at max amplitude (merger)
# thus the start time is minus the duration of the template from
# lower frequency cutoff to merger, i.e. minus the 'chirp time'
tChirp = - float( waveform.start_time ) # conversion from LIGOTimeGPS
waveform.resize(N)
k_zero = int(waveform.start_time / waveform.delta_t)
waveform.roll(k_zero)
htilde = FrequencySeries(out, delta_f=delta_f, copy=False)
fft(waveform.astype(real_same_precision_as(htilde)), htilde)
htilde.length_in_time = tmplt_length
htilde.chirp_length = tChirp
return htilde
def get_two_pol_waveform_filter(outplus, outcross, template, **kwargs):
"""Return a frequency domain waveform filter for the specified approximant.
Unlike get_waveform_filter this function returns both h_plus and h_cross
components of the waveform, which are needed for searches where h_plus
and h_cross are not related by a simple phase shift.
"""
n = len(outplus)
# If we don't have an inclination column alpha3 might be used
if not hasattr(template, 'inclination')\
and not kwargs.has_key('inclination'):
if hasattr(template, 'alpha3'):
kwargs['inclination'] = template.alpha3
input_params = props(template, **kwargs)
if input_params['approximant'] in fd_approximants(_scheme.mgr.state):
wav_gen = fd_wav[type(_scheme.mgr.state)]
hp, hc = wav_gen[input_params['approximant']](**input_params)
hp.resize(n)
hc.resize(n)
outplus[0:len(hp)] = hp[:]
hp = FrequencySeries(outplus, delta_f=hp.delta_f, copy=False)
outcross[0:len(hc)] = hc[:]
hc = FrequencySeries(outcross, delta_f=hc.delta_f, copy=False)
hp.chirp_length = get_waveform_filter_length_in_time(**input_params)
hp.length_in_time = hp.chirp_length
hc.chirp_length = hp.chirp_length
hc.length_in_time = hp.length_in_time
return hp, hc
elif input_params['approximant'] in td_approximants(_scheme.mgr.state):
# N: number of time samples required
N = (n-1)*2
delta_f = 1.0 / (N * input_params['delta_t'])
wav_gen = td_wav[type(_scheme.mgr.state)]
hp, hc = wav_gen[input_params['approximant']](**input_params)
# taper the time series hp if required
if ('taper' in input_params.keys() and \
input_params['taper'] is not None):
hp = wfutils.taper_timeseries(hp, input_params['taper'],
return_lal=False)
hc = wfutils.taper_timeseries(hc, input_params['taper'],
return_lal=False)
# total duration of the waveform
tmplt_length = len(hp) * hp.delta_t
# for IMR templates the zero of time is at max amplitude (merger)
# thus the start time is minus the duration of the template from
# lower frequency cutoff to merger, i.e. minus the 'chirp time'
tChirp = - float( hp.start_time ) # conversion from LIGOTimeGPS
hp.resize(N)
hc.resize(N)
k_zero = int(hp.start_time / hp.delta_t)
hp.roll(k_zero)
hc.roll(k_zero)
hp_tilde = FrequencySeries(outplus, delta_f=delta_f, copy=False)
hc_tilde = FrequencySeries(outcross, delta_f=delta_f, copy=False)
fft(hp.astype(real_same_precision_as(hp_tilde)), hp_tilde)
fft(hc.astype(real_same_precision_as(hc_tilde)), hc_tilde)
hp_tilde.length_in_time = tmplt_length
hp_tilde.chirp_length = tChirp
hc_tilde.length_in_time = tmplt_length
hc_tilde.chirp_length = tChirp
return hp_tilde, hc_tilde
else:
raise ValueError("Approximant %s not available" %
(input_params['approximant']))
def waveform_norm_exists(approximant):
if approximant in _filter_norms:
return True
else:
return False
def get_template_amplitude_norm(template=None, **kwargs):
""" Return additional constant template normalization. This only affects
the effective distance calculation. Returns None for all templates with a
physically meaningful amplitude.
"""
input_params = props(template,**kwargs)
approximant = kwargs['approximant']
if approximant in _template_amplitude_norms:
return _template_amplitude_norms[approximant](**input_params)
else:
return None
def get_waveform_filter_precondition(approximant, length, delta_f):
"""Return the data preconditioning factor for this approximant.
"""
if approximant in _filter_preconditions:
return _filter_preconditions[approximant](length, delta_f)
else:
return None
def get_waveform_filter_norm(approximant, psd, length, delta_f, f_lower):
""" Return the normalization vector for the approximant
"""
if approximant in _filter_norms:
return _filter_norms[approximant](psd, length, delta_f, f_lower)
else:
return None
def get_waveform_end_frequency(template=None, **kwargs):
"""Return the stop frequency of a template
"""
input_params = props(template,**kwargs)
approximant = kwargs['approximant']
if approximant in _filter_ends:
return _filter_ends[approximant](**input_params)
else:
return None
def get_waveform_filter_length_in_time(approximant, template=None, **kwargs):
"""For filter templates, return the length in time of the template.
"""
kwargs = props(template, **kwargs)
if approximant in _filter_time_lengths:
return _filter_time_lengths[approximant](**kwargs)
else:
return None
__all__ = ["get_td_waveform", "get_fd_waveform", "get_fd_waveform_sequence",
"print_td_approximants", "print_fd_approximants",
"td_approximants", "fd_approximants",
"get_waveform_filter", "filter_approximants",
"get_waveform_filter_norm", "get_waveform_end_frequency",
"waveform_norm_exists", "get_template_amplitude_norm",
"get_waveform_filter_length_in_time", "get_sgburst_waveform",
"print_sgburst_approximants", "sgburst_approximants",
"td_waveform_to_fd_waveform", "get_two_pol_waveform_filter",
"NoWaveformError"]
|
bema-ligo/pycbc
|
pycbc/waveform/waveform.py
|
Python
|
gpl-3.0
| 36,965
|
[
"Gaussian"
] |
b5fe14d11ac272d52c12399a8f3ab980280bb58f73d9f8db6789e4cdb6984140
|
"""
Parameterizes molecules for molecular dynamics simulations
"""
__version__ = '2.5.2'
__author__ = 'Robin Betz'
from Dabble.param.moleculematcher import MoleculeMatcher
from Dabble.param.charmmmatcher import CharmmMatcher
from Dabble.param.ambermatcher import AmberMatcher
from Dabble.param.charmm import CharmmWriter
from Dabble.param.amber import AmberWriter
|
drorlab/dabble
|
Dabble/param/__init__.py
|
Python
|
gpl-2.0
| 367
|
[
"Amber",
"CHARMM"
] |
b994dd3c817f38471b028aacfda3690fe6853d6db78078041d7e37430c2a5c58
|
# This module implements classes that represent atoms, molecules, and
# complexes. They are made as copies from blueprints in the database.
#
# Written by Konrad Hinsen
# last revision: 2002-6-3
#
import Bonds, Collection, ConfigIO, Database, Units, Utility, Visualization
from Scientific.Geometry import Vector, Tensor
from Scientific.Geometry.Transformation import Rotation, Translation
from Scientific.DictWithDefault import DictWithDefault
from Scientific.Geometry import Objects3D
import copy, Numeric, operator, string, types
#
# The base class for all chemical objects.
#
class ChemicalObject(Collection.GroupOfAtoms, Visualization.Viewable):
"""General chemical object
A Glossary:Subclass of Class:MMTK.Collection.GroupOfAtoms
and Class:MMTK.Visualization.Viewable.
This is an Glossary:abstract-base-class that implements methods which
are applicable to any chemical object (atom, molecule, etc.).
"""
def __init__(self, blueprint, memo):
if type(blueprint) == types.StringType:
blueprint = self.blueprintclass(blueprint)
self.type = blueprint.type
if hasattr(blueprint, 'name'):
self.name = blueprint.name
if memo is None: memo = {}
memo[id(blueprint)] = self
for attr in blueprint.instance:
setattr(self, attr,
Database.instantiate(getattr(blueprint, attr), memo))
is_chemical_object = 1
is_incomplete = 0
is_modified = 0
def __getinitargs__(self):
return (None,)
__safe_for_unpickling__ = 1
def __getattr__(self, attr):
if attr[:1] == '_' or attr[:3] == 'is_':
raise AttributeError
else:
return getattr(self.type, attr)
def isSubsetModel(self):
return 0
def addProperties(self, properties):
if properties:
for item in properties.items():
if hasattr(self, item[0]) and item[0] != 'name':
raise TypeError, 'attribute '+item[0]+' already defined'
setattr(self, item[0], item[1])
def binaryProperty(self, properties, name, default):
value = default
try:
value = properties[name]
del properties[name]
except KeyError:
pass
return value
def topLevelChemicalObject(self):
"""Returns the highest-level chemical object of which
the current object is a part."""
if self.parent is None or not isChemicalObject(self.parent):
return self
else:
return self.parent.topLevelChemicalObject()
def universe(self):
"Returns the universe to which the object belongs."
if self.parent is None:
return None
else:
return self.parent.universe()
def bondedUnits(self):
"""Returns a list containing the subobjects which can
contain bonds. There are no bonds between any of the
subobjects in the list."""
return [self]
def fullName(self):
"""Returns the full name of the object. The full name
consists of the proper name of the object preceded by
the full name of its parent separated by a dot."""
if self.parent is None or not isChemicalObject(self.parent):
return self.name
else:
return self.parent.fullName() + '.' + self.name
def degreesOfFreedom(self):
return Collection.GroupOfAtoms.degreesOfFreedom(self) \
- self.numberOfDistanceConstraints()
def distanceConstraintList(self):
"Returns the list of distance constraints."
return []
def _distanceConstraintList(self):
return []
def traverseBondTree(self, function = None):
return []
def numberOfDistanceConstraints(self):
"Returns the number of distance constraints."
return 0
def setBondConstraints(self, universe=None):
"Sets distance constraints for all bonds."
pass
def removeDistanceConstraints(self, universe=None):
"Removes all distance constraints."
pass
def setRigidBodyConstraints(self, universe = None):
"Sets distance constraints that make the object fully rigid."
if universe is None:
universe = self.universe()
if universe is None:
import Universe
universe = Universe.InfiniteUniverse()
atoms = self.atomList()
if len(atoms) > 1:
self.addDistanceConstraint(atoms[0], atoms[1],
universe.distance(atoms[0], atoms[1]))
if len(atoms) > 2:
self.addDistanceConstraint(atoms[0], atoms[2],
universe.distance(atoms[0], atoms[2]))
self.addDistanceConstraint(atoms[1], atoms[2],
universe.distance(atoms[1], atoms[2]))
if len(atoms) > 3:
for a in atoms[3:]:
self.addDistanceConstraint(atoms[0], a,
universe.distance(atoms[0], a))
self.addDistanceConstraint(atoms[1], a,
universe.distance(atoms[1], a))
self.addDistanceConstraint(atoms[2], a,
universe.distance(atoms[2], a))
def getAtomProperty(self, atom, property):
"""Returns the value of the specified |property| for the
given |atom| from the chemical database.
Note: the property is first looked up in the database entry
for the object on which the method is called. If the lookup
fails, the complete hierarchy from the atom to the top-level
object is constructed and traversed starting from the top-level
object until the property is found. This permits database entries
for higher-level objects to override property definitions in
its constituents.
At the atom level, the property is retrieved from an attribute
with the same name. This means that properties at the atom
level can be defined both in the chemical database and for
each atom individually by assignment to the attribute."""
def description(self, index_map = None):
tag = Utility.uniqueAttribute()
s = self._description(tag, index_map, 1)
for a in self.atomList():
delattr(a, tag)
return s
def __repr__(self):
return self.__class__.__name__ + ' ' + self.fullName()
__str__ = __repr__
def __copy__(self):
if self.is_incomplete:
raise TypeError, "Can't copy incomplete object"
return copy.deepcopy(self, {id(self.parent): None})
# Type check
def isChemicalObject(object):
"Returns 1 if |object| is a chemical object."
return hasattr(object, 'is_chemical_object')
#
# The second base class for all composite chemical objects.
#
class CompositeChemicalObject:
"""Chemical object with subobjects
This is an Glossary:abstract-base-class that implements methods
which can be used with any composite chemical object,
i.e. any chemical object that is not an atom.
"""
def __init__(self, properties):
if properties.has_key('configuration'):
conf = properties['configuration']
self.configurations[conf].applyTo(self)
del properties['configuration']
elif hasattr(self, 'configurations') and \
self.configurations.has_key('default'):
self.configurations['default'].applyTo(self)
if properties.has_key('position'):
self.translateTo(properties['position'])
del properties['position']
self.addProperties(properties)
def atomList(self):
"Returns a list containing all atoms in the object."
return self.atoms
def setPosition(self, atom, position):
if atom.__class__ is Database.AtomReference:
atom = self.atoms[atom.number]
atom.setPosition(position)
def setIndex(self, atom, index):
if atom.__class__ is Database.AtomReference:
atom = self.atoms[atom.number]
atom.setIndex(index)
def getAtom(self, atom):
if atom.__class__ is Database.AtomReference:
atom = self.atoms[atom.number]
return atom
def getReference(self, atom):
if atom.__class__ is Database.AtomReference:
return atom
return Database.AtomReference(self.atoms.index(atom))
def getAtomProperty(self, atom, property, levels = None):
try:
return getattr(self, property)[self.getReference(atom)]
except (AttributeError, KeyError):
if levels is None:
object = atom
levels = []
while object != self:
levels.append(object)
object = object.parent
if not levels:
raise KeyError, 'Property ' + property + \
' not defined for ', `atom`
return levels[-1].getAtomProperty(atom, property, levels[:-1])
def deleteUndefinedAtoms(self):
delete = []
for a in self.atoms:
if a.position() is None:
delete.append(a)
for a in delete:
a.delete()
def _deleteAtom(self, atom):
self.atoms.remove(atom)
self.is_modified = 1
self.type = None
if self.parent is not None:
self.parent._deleteAtom(atom)
def distanceConstraintList(self):
dc = self._distanceConstraintList()
for o in self._subunits():
dc = dc + o._distanceConstraintList()
return dc
def _distanceConstraintList(self):
try:
return self.distance_constraints
except AttributeError:
return []
def numberOfDistanceConstraints(self):
n = len(self._distanceConstraintList())
for o in self._subunits():
n = n + len(o._distanceConstraintList())
return n
def setBondConstraints(self, universe=None):
if universe is None:
universe = self.universe()
bond_database = universe.bondLengthDatabase()
for o in self.bondedUnits():
o._setBondConstraints(universe, bond_database)
def _setBondConstraints(self, universe, bond_database):
self.distance_constraints = []
for bond in self.bonds:
d = bond_database.bondLength(bond)
if d is None:
d = universe.distance(bond.a1, bond.a2)
self.distance_constraints.append((bond.a1, bond.a2, d))
def addDistanceConstraint(self, atom1, atom2, distance):
try:
self.distance_constraints.append((atom1, atom2, distance))
except AttributeError:
self.distance_constraints = [(atom1, atom2, distance)]
def removeDistanceConstraints(self, universe=None):
try:
del self.distance_constraints
except AttributeError:
pass
for o in self._subunits():
o.removeDistanceConstraints()
def traverseBondTree(self, function = None):
self.setBondAttributes()
todo = [self.atoms[0]]
done = {todo[0]: 1}
bonds = []
while todo:
next_todo = []
for atom in todo:
bonded = atom.bondedTo()
for other in bonded:
if not done.get(other, 0):
if function is None:
bonds.append((atom, other))
else:
bonds.append((function(atom), function(other)))
next_todo.append(other)
done[other] = 1
todo = next_todo
self.clearBondAttributes()
return bonds
def _description(self, tag, index_map, toplevel):
letter, kwargs = self._descriptionSpec()
s = letter + '(' + `self.name` + ',['
for o in self._subunits():
s = s + o._description(tag, index_map, 0) + ','
for a in self.atoms:
if not hasattr(a, tag):
s = s + a._description(tag, index_map, 0) + ','
s = s + ']'
if toplevel:
s = s + ',' + `self._typeName()`
if kwargs is not None:
s = s + ',' + kwargs
constraints = self._distanceConstraintList()
if constraints:
s = s + ',dc=['
if index_map is None:
for c in constraints:
s = s + '(%d,%d,%f),' % (c[0].index, c[1].index, c[2])
else:
for c in constraints:
s = s + '(%d,%d,%f),' % (index_map[c[0].index],
index_map[c[1].index], c[2])
s = s + ']'
return s + ')'
def _typeName(self):
return self.type.name
def _graphics(self, conf, distance_fn, model, module, options):
lists = []
for bu in self.bondedUnits():
for a in bu.atomList():
lists.append(a._graphics(conf, distance_fn, model,
module, options))
if hasattr(bu, 'bonds'):
for b in bu.bonds:
lists.append(b._graphics(conf, distance_fn, model,
module, options))
return reduce(operator.add, lists)
#
# The classes for atoms, groups, molecules, and complexes.
#
class Atom(ChemicalObject):
"""Atom
A Glossary:Subclass of Class:MMTK.ChemicalObjects.ChemicalObject.
Constructor: Atom(|element|, **|properties|)
Arguments:
|element| -- a string (not case sensitive) specifying the chemical element
|properties| -- optional keyword properties:
* position: the atom position (a vector)
* name: the atom name (a string)
"""
def __init__(self, blueprint, _memo = None, **properties):
Utility.uniqueID.registerObject(self)
if blueprint is not None:
ChemicalObject.__init__(self, blueprint, _memo)
self._mass = self.type.average_mass
self.array = None
self.index = None
if properties.has_key('position'):
self.setPosition(properties['position'])
del properties['position']
self.addProperties(properties)
blueprintclass = Database.BlueprintAtom
def __getstate__(self):
state = copy.copy(self.__dict__)
if self.array is not None:
state['array'] = None
state['pos'] = Vector(self.array[self.index,:])
return state
def atomList(self):
return [self]
def setPosition(self, position):
"Changes the position to |position|."
if position is None:
if self.array is None:
try: del self.pos
except AttributeError: pass
else:
self.array[self.index,0] = Utility.undefined
self.array[self.index,1] = Utility.undefined
self.array[self.index,2] = Utility.undefined
else:
if self.array is None:
self.pos = position
else:
self.array[self.index,0] = position[0]
self.array[self.index,1] = position[1]
self.array[self.index,2] = position[2]
translateTo = setPosition
def position(self, conf = None):
"""Returns the position in configuration |conf|. If |conf| is
'None', use the current configuration. If the atom has not been
assigned a position, the return value is 'None'."""
if conf is None:
if self.array is None:
try:
return self.pos
except AttributeError:
return None
else:
if Numeric.logical_or.reduce(
Numeric.greater(self.array[self.index,:],
Utility.undefined_limit)):
return None
else:
return Vector(self.array[self.index,:])
else:
return conf[self]
centerOfMass = position
def setMass(self, mass):
"Set the atom mass to |mass|."
self._mass = mass
def getAtom(self, atom):
return self
def translateBy(self, vector):
if self.array is None:
self.pos = self.pos + vector
else:
self.array[self.index,0] = self.array[self.index,0] + vector[0]
self.array[self.index,1] = self.array[self.index,1] + vector[1]
self.array[self.index,2] = self.array[self.index,2] + vector[2]
def numberOfPoints(self):
return 1
numberOfCartesianCoordinates = numberOfPoints
def setIndex(self, index):
if self.index is not None and self.index != index:
raise ValueError, 'Wrong atom index'
self.index = index
def setArray(self, array, indices):
if len(indices) == 1:
index = indices[0]
else:
if self.index is None or self.index not in indices:
return 0
index = self.index
indices.remove(index)
if array is None:
self.index = index
self.array = None
return 1
if self.array is None:
try:
array[index,0] = self.pos[0]
array[index,1] = self.pos[1]
array[index,2] = self.pos[2]
except AttributeError:
array[index,0] = Utility.undefined
array[index,1] = Utility.undefined
array[index,2] = Utility.undefined
else:
array[index,0] = self.array[self.index,0]
array[index,1] = self.array[self.index,1]
array[index,2] = self.array[self.index,2]
self.array = array
self.index = index
try:
del self.pos
except AttributeError: pass
return 1
def getArray(self):
return self.array
def setBondAttribute(self, atom):
try:
self.bonded_to__.append(atom)
except AttributeError:
self.bonded_to__ = [atom]
def clearBondAttribute(self):
try:
del self.bonded_to__
except AttributeError:
pass
def bondedTo(self):
"Returns a list of all atoms to which a chemical bond exists."
try:
return self.bonded_to__
except AttributeError:
if self.parent is None or not isChemicalObject(self.parent):
return []
else:
return self.parent.bondedTo(self)
def delete(self):
if self.parent is not None:
self.parent._deleteAtom(self)
def getAtomProperty(self, atom, property, levels = None):
if self != atom:
raise ValueError, "Wrong atom"
return getattr(self, property)
def _description(self, tag, index_map, toplevel):
setattr(self, tag, None)
if index_map is None:
index = self.index
else:
index = index_map[self.index]
if toplevel:
return 'A(' + `self.name` + ',' + `index` + ',' + \
`self.symbol` + ')'
else:
return 'A(' + `self.name` + ',' + `index` + ')'
def _graphics(self, conf, distance_fn, model, module, options):
if model != 'ball_and_stick':
return []
color = self._atomColor(self, options)
material = module.DiffuseMaterial(color)
radius = options.get('ball_radius', 0.03)
return [module.Sphere(self.position(), radius, material=material)]
class Group(CompositeChemicalObject, ChemicalObject):
"""Group of bonded atoms
A Glossary:Subclass of Class:MMTK.ChemicalObjects.ChemicalObject.
Groups can contain atoms and other groups, and link them by chemical
bonds. They are used to represent functional groups or any other
part of a molecule that has a well-defined identity.
Groups cannot be created in application programs, but only in
database definitions for molecules.
Constructor: Group(|species|, **|properties|)
Arguments:
|species| -- a string (not case sensitive) that specifies the group
name in the chemical database
|properties| -- optional keyword properties:
* position: the center-of-mass position (a vector)
* name: the atom name (a string)
"""
def __init__(self, blueprint, _memo = None, **properties):
if blueprint is not None:
ChemicalObject.__init__(self, blueprint, _memo)
self.addProperties(properties)
blueprintclass = Database.BlueprintGroup
is_incomplete = 1
def bondedTo(self, atom):
if self.parent is None or not isChemicalObject(self.parent):
return []
else:
return self.parent.bondedTo(atom)
def setBondAttributes(self):
pass
def clearBondAttributes(self):
pass
def _subunits(self):
return self.groups
def _descriptionSpec(self):
return "G", None
class Molecule(CompositeChemicalObject, ChemicalObject):
"""Molecule
A Glossary:Subclass of Class:MMTK.ChemicalObjects.ChemicalObject.
Molecules consist of atoms and groups linked by bonds.
Constructor: Molecule(|species|, **|properties|)
Arguments:
|species| -- a string (not case sensitive) that specifies the molecule
name in the chemical database
|properties| -- optional keyword properties:
* position: the center-of-mass position (a vector)
* configuration: the name of a configuration listed in the database
definition of the molecule, which is used to
initialize the atom positions. If no configuration
is specified, the configuration named "default" will
be used, if it exists. Otherwise the atom positions
are undefined.
* name: the atom name (a string)
"""
def __init__(self, blueprint, _memo = None, **properties):
if blueprint is not None:
ChemicalObject.__init__(self, blueprint, _memo)
properties = copy.copy(properties)
CompositeChemicalObject.__init__(self, properties)
self.bonds = Bonds.BondList(self.bonds)
blueprintclass = Database.BlueprintMolecule
def bondedTo(self, atom):
return self.bonds.bondedTo(atom)
def setBondAttributes(self):
self.bonds.setBondAttributes()
def clearBondAttributes(self):
for a in self.atoms:
a.clearBondAttribute()
def _subunits(self):
return self.groups
def _descriptionSpec(self):
return "M", None
def addGroup(self, group, bond_atom_pairs):
for a1, a2 in bond_atom_pairs:
o1 = a1.topLevelChemicalObject()
o2 = a2.topLevelChemicalObject()
if not (o1 == self and o2 == group) \
and not(o2 == self and o1 == group):
raise ValueError, "bond %s-%s outside object" % \
(str(a1), str(a2))
self.groups.append(group)
self.atoms = self.atoms + group.atoms
group.parent = self
self.clearBondAttributes()
for a1, a2 in bond_atom_pairs:
self.bonds.append(Bonds.Bond((a1, a2)))
for b in group.bonds:
self.bonds.append(b)
# construct positions of missing hydrogens
def findHydrogenPositions(self):
"""Find reasonable positions for hydrogen atoms that have no
position assigned.
This method uses a heuristic approach based on standard geometry
data. It was developed for proteins and DNA and may not give
good results for other molecules. It raises an exception
if presented with a topology it cannot handle."""
self.setBondAttributes()
try:
unknown = DictWithDefault([])
for a in self.atoms:
if a.position() is None:
if a.symbol != 'H':
raise ValueError, 'position of ' + a.fullName() + \
' is undefined'
bonded = a.bondedTo()[0]
unknown[bonded].append(a)
for a, list in unknown.items():
bonded = a.bondedTo()
n = len(bonded)
known = []
for b in bonded:
if b.position() is not None:
known.append(b)
nb = len(list)
if a.symbol == 'C':
if n == 4:
if nb == 1:
self._C4oneH(a, known, list)
elif nb == 2:
self._C4twoH(a, known, list)
elif nb == 3:
self._C4threeH(a, known, list)
elif n == 3:
if nb == 1:
self._C3oneH(a, known, list)
else:
self._C3twoH(a, known, list)
else:
print a
raise ValueError, "Can't handle C with "+`n`+" bonds"
elif a.symbol == 'N':
if n == 4:
if nb == 3:
self._N4threeH(a, known, list)
elif nb == 2:
self._N4twoH(a, known, list)
elif n == 3:
if nb == 1:
self._N3oneH(a, known, list)
elif nb == 2:
self._N3twoH(a, known, list)
else:
print a
raise ValueError, "Can't handle N with "+`n`+" bonds"
elif a.symbol == 'O' and n == 2:
self._O2(a, known, list)
elif a.symbol == 'S' and n == 2:
self._S2(a, known, list)
else:
print a
raise ValueError, "Can't handle this yet: " + \
a.symbol + ' with ' + `n` + ' bonds (' + \
a.fullName() + ').'
finally:
self.clearBondAttributes()
# default C-H bond length and X-C-H angle
_ch_bond = 1.09*Units.Ang
_hch_angle = Numeric.arccos(-1./3.)*Units.rad
_nh_bond = 1.03*Units.Ang
_hnh_angle = 120.*Units.deg
_oh_bond = 0.95*Units.Ang
_coh_angle = 114.9*Units.deg
_sh_bond = 1.007*Units.Ang
_csh_angle = 96.5*Units.deg
def _C4oneH(self, atom, known, unknown):
r = atom.position()
n0 = (known[0].position()-r).normal()
n1 = (known[1].position()-r).normal()
n2 = (known[2].position()-r).normal()
n3 = (n0 + n1 + n2).normal()
unknown[0].setPosition(r-self._ch_bond*n3)
def _C4twoH(self, atom, known, unknown):
r = atom.position()
r1 = known[0].position()
r2 = known[1].position()
plane = Objects3D.Plane(r, r1, r2)
axis = -((r1-r)+(r2-r)).normal()
plane = plane.rotate(Objects3D.Line(r, axis), 90.*Units.deg)
cone = Objects3D.Cone(r, axis, 0.5*self._hch_angle)
sphere = Objects3D.Sphere(r, self._ch_bond)
circle = sphere.intersectWith(cone)
points = circle.intersectWith(plane)
unknown[0].setPosition(points[0])
unknown[1].setPosition(points[1])
def _C4threeH(self, atom, known, unknown):
self._tetrahedralH(atom, known, unknown, self._ch_bond)
def _C3oneH(self, atom, known, unknown):
r = atom.position()
n1 = (known[0].position()-r).normal()
n2 = (known[1].position()-r).normal()
n3 = -(n1 + n2).normal()
unknown[0].setPosition(r+self._ch_bond*n3)
def _C3twoH(self, atom, known, unknown):
r = atom.position()
r1 = known[0].position()
others = filter(lambda a: a.symbol != 'H', known[0].bondedTo())
r2 = others[0].position()
plane = Objects3D.Plane(r, r1, r2)
axis = (r-r1).normal()
cone = Objects3D.Cone(r, axis, 0.5*self._hch_angle)
sphere = Objects3D.Sphere(r, self._ch_bond)
circle = sphere.intersectWith(cone)
points = circle.intersectWith(plane)
unknown[0].setPosition(points[0])
unknown[1].setPosition(points[1])
def _N3oneH(self, atom, known, unknown):
r = atom.position()
n1 = (known[0].position()-r).normal()
n2 = (known[1].position()-r).normal()
n3 = -(n1 + n2).normal()
unknown[0].setPosition(r+self._nh_bond*n3)
def _N3twoH(self, atom, known, unknown):
r = atom.position()
r1 = known[0].position()
others = filter(lambda a: a.symbol != 'H', known[0].bondedTo())
r2 = others[0].position()
plane = Objects3D.Plane(r, r1, r2)
axis = (r-r1).normal()
cone = Objects3D.Cone(r, axis, 0.5*self._hnh_angle)
sphere = Objects3D.Sphere(r, self._nh_bond)
circle = sphere.intersectWith(cone)
points = circle.intersectWith(plane)
unknown[0].setPosition(points[0])
unknown[1].setPosition(points[1])
def _N4threeH(self, atom, known, unknown):
self._tetrahedralH(atom, known, unknown, self._nh_bond)
def _N4twoH(self, atom, known, unknown):
r = atom.position()
r1 = known[0].position()
r2 = known[1].position()
plane = Objects3D.Plane(r, r1, r2)
axis = -((r1-r)+(r2-r)).normal()
plane = plane.rotate(Objects3D.Line(r, axis), 90.*Units.deg)
cone = Objects3D.Cone(r, axis, 0.5*self._hnh_angle)
sphere = Objects3D.Sphere(r, self._nh_bond)
circle = sphere.intersectWith(cone)
points = circle.intersectWith(plane)
unknown[0].setPosition(points[0])
unknown[1].setPosition(points[1])
def _O2(self, atom, known, unknown):
others = known[0].bondedTo()
for a in others:
r = a.position()
if a != atom and r is not None: break
dihedral = 180.*Units.deg
self._findPosition(unknown[0], atom.position(), known[0].position(), r,
self._oh_bond, self._coh_angle, dihedral)
def _S2(self, atom, known, unknown):
c2 = filter(lambda a: a.symbol == 'C', known[0].bondedTo())[0]
self._findPosition(unknown[0], atom.position(), known[0].position(),
c2.position(),
self._sh_bond, self._csh_angle,
180.*Units.deg)
def _tetrahedralH(self, atom, known, unknown, bond):
r = atom.position()
n = (known[0].position()-r).normal()
cone = Objects3D.Cone(r, n, Numeric.arccos(-1./3.))
sphere = Objects3D.Sphere(r, bond)
circle = sphere.intersectWith(cone)
others = filter(lambda a: a.symbol != 'H', known[0].bondedTo())
others.remove(atom)
other = others[0]
ref = (Objects3D.Plane(circle.center, circle.normal) \
.projectionOf(other.position())-circle.center).normal()
p0 = circle.center + circle.radius*ref
p0 = Objects3D.rotatePoint(p0,
Objects3D.Line(circle.center, circle.normal),
60.*Units.deg)
p1 = Objects3D.rotatePoint(p0,
Objects3D.Line(circle.center, circle.normal),
120.*Units.deg)
p2 = Objects3D.rotatePoint(p1,
Objects3D.Line(circle.center, circle.normal),
120.*Units.deg)
unknown[0].setPosition(p0)
unknown[1].setPosition(p1)
unknown[2].setPosition(p2)
def _findPosition(self, unknown, a1, a2, a3, bond, angle, dihedral):
sphere = Objects3D.Sphere(a1, bond)
cone = Objects3D.Cone(a1, a2-a1, angle)
plane = Objects3D.Plane(a3, a2, a1)
plane = plane.rotate(Objects3D.Line(a1, a2-a1), dihedral)
points = sphere.intersectWith(cone).intersectWith(plane)
for p in points:
if (a1-a2).cross(p-a1)*(plane.normal) > 0:
unknown.setPosition(p)
break
class Crystal(CompositeChemicalObject, ChemicalObject):
def __init__(self, blueprint, _memo = None, **properties):
if blueprint is not None:
ChemicalObject.__init__(self, blueprint, _memo)
properties = copy.copy(properties)
CompositeChemicalObject.__init__(self, properties)
self.bonds = Bonds.BondList(self.bonds)
blueprintclass = Database.BlueprintCrystal
def _subunits(self):
return self.groups
def _descriptionSpec(self):
return "X", None
class Complex(CompositeChemicalObject, ChemicalObject):
"""Complex
A Glossary:Subclass of Class:MMTK.ChemicalObjects.ChemicalObject.
A complex is an assembly of molecules that are not connected by
chemical bonds.
Constructor: Complex(|species|, **|properties|)
Arguments:
|species| -- a string (not case sensitive) that specifies the complex
name in the chemical database
|properties| -- optional keyword properties:
* position: the center-of-mass position (a vector)
* configuration: the name of a configuration listed in the database
definition of the complex
* name: the atom name (a string)
"""
def __init__(self, blueprint, _memo = None, **properties):
if blueprint is not None:
ChemicalObject.__init__(self, blueprint, _memo)
properties = copy.copy(properties)
CompositeChemicalObject.__init__(self, properties)
blueprintclass = Database.BlueprintComplex
def bondedUnits(self):
return self.molecules
def _subunits(self):
return self.molecules
def _descriptionSpec(self):
return "C", None
Database.registerInstanceClass(Atom.blueprintclass, Atom)
Database.registerInstanceClass(Group.blueprintclass, Group)
Database.registerInstanceClass(Molecule.blueprintclass, Molecule)
Database.registerInstanceClass(Crystal.blueprintclass, Crystal)
Database.registerInstanceClass(Complex.blueprintclass, Complex)
class AtomCluster(CompositeChemicalObject, ChemicalObject):
"""An agglomeration of atoms
A Glossary:Subclass of Class:MMTK.ChemicalObjects.ChemicalObject.
An atom cluster acts like a molecule without any bonds or atom
properties. It can be used to represent a group of atoms that
are known to form a chemical unit but whose chemical properties
are not sufficiently known to define a molecule.
Constructor: AtomCluster(|atoms|, **|properties|)
Arguments:
|atoms| -- a list of atom objects
|properties| -- optional keyword properties:
* position: the center-of-mass position (a vector)
* name: the atom name (a string)
"""
def __init__(self, atoms = None, **properties):
if atoms is not None:
self.atoms = list(atoms)
self.parent = None
self.type = None
for a in self.atoms:
if a.parent is not None:
raise ValueError, repr(a)+' is part of ' + repr(a.parent)
a.parent = self
if a.name != '':
setattr(self, a.name, a)
properties = copy.copy(properties)
CompositeChemicalObject.__init__(self, properties)
self.bonds = Bonds.BondList([])
def bondedTo(self, atom):
return []
def setBondAttributes(self):
pass
def clearBondAttributes(self):
pass
def _subunits(self):
return []
def _description(self, tag, index_map, toplevel):
s = 'AC(' + `self.name` + ',['
for a in self.atoms:
s = s + a._description(tag, index_map, 1) + ','
return s + '])'
|
fxia22/ASM_xf
|
PythonD/site_python/MMTK/ChemicalObjects.py
|
Python
|
gpl-2.0
| 32,290
|
[
"CRYSTAL"
] |
dc8ca632734b31245e7396201a1ef4ace0c87625e330e24547a417bf38c7989b
|
#!/usr/bin/python
#
# check-aacraid.py
#
# Grabs the output from "/usr/StorMan/arcconf GETCONFIG 1 LD" then
# determines the health of the Logical Devices.
#
# Grabs the output from "/usr/StorMan/arcconf GETCONFIG 1 AL" then
# determines the health of various status indicators from the card
# and drives.
#
# After the checks are run, it deletes the file "UcliEvt.log" from
# the current working directory.
#
# Add this to your "/etc/sudoers" file:
# "nagios ALL=(root) NOPASSWD: /usr/StorMan/arcconf GETCONFIG 1 *"
# Alternately, run this script as a user who can sudo.
#
# v0.1 - only checks card information so far, not drives yet
# v0.2 - checks logical volume status & wipes log
# v0.3 - strips trailing "," & tells you the logical volume with
# the failure
# v0.4 - fixed for modern Python compatibility (subprocess vs popen4)
# v0.5 - do not alert on the BBU "Charging" state
#
# LICENSE/COPYRIGHT
#
# Anchor System - http://www.anchor.com.au
#
# Oliver Hookins
# Paul De Audney
# Barney Desmond
# Mark Smith <mark@bu.mp>
#
# This script has no known license. I found it on Nagios Exchange and made
# some modifications, so I'm publishing it here.
#
import sys, os, re, string, subprocess
c_status_re = re.compile('^\s*Controller Status\s*:\s*(.*)$')
l_status_re = re.compile('^\s*Status of logical device\s*:\s*(.*)$')
l_device_re = re.compile('^Logical device number ([0-9]+).*$')
c_defunct_re = re.compile('^\s*Defunct disk drive count\s:\s*([0-9]+).*$')
c_degraded_re = re.compile('^\s*Logical devices/Failed/Degraded\s*:\s*([0-9]+)/([0-9]+)/([0-9]+).*$')
b_status_re = re.compile('^\s*Status\s*:\s*(.*)$')
b_temp_re = re.compile('^\s*Over temperature\s*:\s*(.*)$')
b_capacity_re = re.compile('\s*Capacity remaining\s*:\s*([0-9]+)\s*percent.*$')
b_time_re = re.compile('\s*Time remaining \(at current draw\)\s*:\s*([0-9]+) days, ([0-9]+) hours, ([0-9]+) minutes.*$')
def main(argv):
cstatus = lstatus = ldevice = cdefunct = cdegraded = bstatus = btemp = bcapacity = btime = ""
lnum = result = ""
check_status = 0
for line in exec_and_read("/usr/bin/sudo /usr/StorMan/arcconf GETCONFIG 1 LD"):
# Match the regexs
ldevice = l_device_re.match(line)
if ldevice:
lnum = ldevice.group(1)
continue
lstatus = l_status_re.match(line)
if lstatus:
if lstatus.group(1) != "Optimal":
check_status = 2
result += "Logical Device " + lnum + " " + lstatus.group(1) + ","
for line in exec_and_read("/usr/bin/sudo /usr/StorMan/arcconf GETCONFIG 1 AD"):
# Match the regexs
cstatus = c_status_re.match(line)
if cstatus:
if cstatus.group(1) != "Optimal":
check_status = 2
result += "Controller " + cstatus.group(1) + ","
continue
cdefunct = c_defunct_re.match(line)
if cdefunct:
if int(cdefunct.group(1)) > 0:
check_status = 2
result += "Defunct drives " + cdefunct.group(1) + ","
continue
cdegraded = c_degraded_re.match(line)
if cdegraded:
if int(cdegraded.group(2)) > 0:
check_status = 2
result += "Failed drives " + cdegraded.group(2) + ","
if int(cdegraded.group(3)) > 0:
check_status = 2
result += "Degraded drives " + cdegraded.group(3) + ","
continue
bstatus = b_status_re.match(line)
if bstatus:
if bstatus.group(1) == "Not Installed":
continue
if bstatus.group(1) == "Charging":
# this sets WARNING if the status is charging, but we seem to get
# that pretty frequently, so don't do that. maybe need this?
#if check_status < 2:
# check_status = 1
pass
elif "Optimal" not in bstatus.group(1):
check_status = 2
result += "Battery Status " + bstatus.group(1) + ","
continue
btemp = b_temp_re.match(line)
if btemp:
if btemp.group(1) != "No":
check_status = 2
result += "Battery Overtemp " + btemp.group(1) + ","
continue
bcapacity = b_capacity_re.match(line)
if bcapacity:
result += "Battery Capacity " + bcapacity.group(1) + "%,"
if int(bcapacity.group(1)) < 50:
if check_status < 2:
check_status = 1
if int(bcapacity.group(1)) < 25:
check_status = 2
continue
btime = b_time_re.match(line)
if btime:
timemins = int(btime.group(1)) * 1440 + int(btime.group(2)) * 60 + int(btime.group(3))
if timemins < 1440:
if check_status < 2:
check_status = 1
if timemins < 720:
check_status = 2
result += "Battery Time "
if timemins < 60:
result += str(timemins) + "mins,"
else:
result += str(timemins/60) + "hours,"
if result == "":
result = "No output from arcconf!"
check_status = 3
# strip the trailing "," from the result string.
result = result.rstrip(",")
print result
# we often have a log file sitting around... kill it
try:
os.unlink(os.path.join(os.getcwd(),'UcliEvt.log'))
except:
pass
sys.exit(check_status)
def exec_and_read(cmd):
proc = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
stdout, _ = proc.communicate()
if proc.returncode == 0:
return stdout.split("\n")
else:
print "Unable to execute arcconf."
sys.exit(3)
if __name__ == '__main__':
main(sys.argv[1:])
|
zorkian/nagios-plugins
|
check_aacraid.py
|
Python
|
bsd-3-clause
| 5,847
|
[
"Desmond"
] |
5563571260cb81e5830f8b9c8b6cc346d26727a55af1b5c405e712e61b5747da
|
import pyspeckit
import numpy as np
from pyspeckit.spectrum.models import inherited_voigtfitter
# This example uses scipy
try:
import scipy
except ImportError:
exit
# technically, the voigt fitter works as a singlefitter (i.e., you can fit the
# background level and the peak simultaneously)
# in practice, however, you need to fit the background independently except for
# gaussians. I don't know why this is.
xarr = pyspeckit.spectrum.units.SpectroscopicAxis(np.linspace(-100, 100, 500),
unit='km/s',
refX=1e9,
refX_unit='Hz')
VF = inherited_voigtfitter.voigt_fitter()
sp1 = pyspeckit.Spectrum(xarr=xarr,
data=(VF.n_modelfunc((1, 0, 2.5, 2.5))(xarr) +
np.random.randn(xarr.shape[0])/20.),
error=np.ones(xarr.shape[0])/20.)
sp1.plotter()
sp1.specfit(fittype='gaussian', composite_fit_color='b', clear=False,
annotate=False, guesses='moments')
sp1.specfit(fittype='lorentzian', composite_fit_color='g', clear=False,
annotate=False, guesses='moments')
sp1.specfit(fittype='voigt', composite_fit_color='r', clear=False,
annotate=True, guesses='moments')
sp2 = pyspeckit.Spectrum(xarr=xarr, data=VF.n_modelfunc((1,0,2.5,5.0))(xarr) +
np.random.randn(xarr.shape[0])/20.,
error=np.ones(xarr.shape[0])/20.)
sp2.plotter()
sp2.specfit(fittype='gaussian', composite_fit_color='b', clear=False,
annotate=False, guesses='moments')
sp2.specfit(fittype='lorentzian', composite_fit_color='g', clear=False,
annotate=False, guesses='moments')
sp2.specfit(fittype='voigt', composite_fit_color='r', clear=False,
annotate=True, guesses='moments')
sp3 = pyspeckit.Spectrum(xarr=xarr, data=VF.n_modelfunc((1,0,2.5,5.0))(xarr) +
np.random.randn(xarr.shape[0])/50.,
error=np.ones(xarr.shape[0])/50.)
sp3.plotter()
sp3.specfit(fittype='gaussian', composite_fit_color='b', clear=False,
annotate=False, guesses='moments')
sp3.specfit(fittype='lorentzian', composite_fit_color='g', clear=False,
annotate=False, guesses='moments')
sp3.specfit(fittype='voigt', composite_fit_color='r', clear=False,
annotate=True, guesses='moments')
|
vlas-sokolov/pyspeckit
|
examples/voigt.py
|
Python
|
mit
| 2,458
|
[
"Gaussian"
] |
409c71dc24529dc829c0ffe1f6acd40d11b21da3e34e258e1c76557decbdf8da
|
"""Functionality for representing data on disk of individual models."""
import logging
import numpy as np
import xarray as xr
from ._constants import RADIUS_EARTH
from . import internal_names
from . import utils
def _get_grid_attr(grid_objs, attr_name):
"""Get attribute from the grid_objs file(s)."""
for xds in grid_objs:
try:
return getattr(xds, attr_name)
except AttributeError:
pass
def _rename_coords(ds, attrs):
"""Rename coordinates to aospy's internal names."""
for name_int, names_ext in attrs.items():
# Check if coord is in dataset already.
ds_coord_name = set(names_ext).intersection(set(ds.coords))
if ds_coord_name:
# Rename to the aospy internal name.
try:
ds = ds.rename({list(ds_coord_name)[0]: name_int})
logging.debug("Rename coord from `{0}` to `{1}` for "
"Dataset `{2}`".format(ds_coord_name,
name_int, ds))
# xarray throws a ValueError if the name already exists
except ValueError:
ds = ds
return ds
def _bounds_from_array(arr, dim_name, bounds_name):
"""Get the bounds of an array given its center values.
E.g. if lat-lon grid center lat/lon values are known, but not the
bounds of each grid box. The algorithm assumes that the bounds
are simply halfway between each pair of center values.
"""
# TODO: don't assume needed dimension is in axis=0
# TODO: refactor to get rid of repetitive code
spacing = arr.diff(dim_name).values
lower = xr.DataArray(np.empty_like(arr), dims=arr.dims,
coords=arr.coords)
lower.values[:-1] = arr.values[:-1] - 0.5*spacing
lower.values[-1] = arr.values[-1] - 0.5*spacing[-1]
upper = xr.DataArray(np.empty_like(arr), dims=arr.dims,
coords=arr.coords)
upper.values[:-1] = arr.values[:-1] + 0.5*spacing
upper.values[-1] = arr.values[-1] + 0.5*spacing[-1]
bounds = xr.concat([lower, upper], dim='bounds')
return bounds.T
def _diff_bounds(bounds, coord):
"""Get grid spacing by subtracting upper and lower bounds."""
try:
return bounds[:, 1] - bounds[:, 0]
except IndexError:
diff = np.diff(bounds, axis=0)
return xr.DataArray(diff, dims=coord.dims, coords=coord.coords)
def _grid_sfc_area(lon, lat, lon_bounds=None, lat_bounds=None):
"""Calculate surface area of each grid cell in a lon-lat grid."""
# Compute the bounds if not given.
if lon_bounds is None:
lon_bounds = _bounds_from_array(
lon, internal_names.LON_STR, internal_names.LON_BOUNDS_STR)
if lat_bounds is None:
lat_bounds = _bounds_from_array(
lat, internal_names.LAT_STR, internal_names.LAT_BOUNDS_STR)
# Compute the surface area.
dlon = _diff_bounds(utils.vertcoord.to_radians(lon_bounds, is_delta=True),
lon)
sinlat_bounds = np.sin(utils.vertcoord.to_radians(lat_bounds,
is_delta=True))
dsinlat = np.abs(_diff_bounds(sinlat_bounds, lat))
sfc_area = dlon*dsinlat*(RADIUS_EARTH**2)
# Rename the coordinates such that they match the actual lat / lon.
try:
sfc_area = sfc_area.rename(
{internal_names.LAT_BOUNDS_STR: internal_names.LAT_STR,
internal_names.LON_BOUNDS_STR: internal_names.LON_STR})
except ValueError:
pass
# Clean up: correct names and dimension order.
sfc_area = sfc_area.rename(internal_names.SFC_AREA_STR)
sfc_area[internal_names.LAT_STR] = lat
sfc_area[internal_names.LON_STR] = lon
return sfc_area.transpose()
class Model(object):
"""An object that describes a single climate or weather model.
Each `Model` object is associated with a parent `Proj` object and also with
one or more child `Run` objects.
If aospy is being used to work with non climate- or weather-model data, the
`Model` object can be used e.g. to represent a gridded observational
product, with its child `Run` objects representing different released
versions of that dataset.
Attributes
----------
name : str
The model's name
description : str
A description of the model
proj : {None, aospy.Proj}
The model's parent aospy.Proj object
runs : list
A list of this model's child Run objects
default_runs : list
The default subset of child run objects on which to perform
calculations via `aospy.Calc` with this model if not otherwise
specified
grid_file_paths : list
The paths to netCDF files stored on disk from which the model's
coordinate data can be taken.
default_start_date, default_end_date : datetime.datetime
The default start and end dates of any calculations using this Model
"""
def __init__(self, name=None, description=None, proj=None,
grid_file_paths=None, default_start_date=None,
default_end_date=None, runs=None, default_runs=None,
load_grid_data=False, grid_attrs=None):
"""
Parameters
----------
name : str
The model's name. This must be unique from that of any other
`Model` objects being used by the parent `Proj`.
description : str, optional
A description of the model. This is not used internally by
aospy; it is solely for the user's information.
proj : {None, aospy.Proj}, optional
The parent Proj object. When the parent `Proj` object is
instantiated with this Model included in its `models` attribute,
this will be over-written with that `Proj` object.
grid_file_paths : {None, sequence of strings}, optional
The paths to netCDF files stored on disk from which the model's
coordinate data can be taken.
default_start_date : {None, `datetime.datetime`}, optional
Default start date of calculations to be performed using
this Model.
default_end_date : {None, `datetime.datetime`}, optional
Default end date of calculations to be performed using
this Model.
runs : {None, sequence of aospy.Run objects}, optional
The child run objects of this Model
default_runs : {None, sequence of aospy.Run objects}, optional
The subset of this Model's runs over which to perform calculations
by default.
load_grid_data : bool, optional (default False)
Whether or not to load the grid data specified by 'grid_file_paths'
upon initilization
grid_attrs : dict, optional (default None)
Dictionary mapping aospy internal names of grid attributes to their
corresponding names used in a particular model.
E.g. ``{TIME_STR: 'T'}``. While aospy checks for a number of
alternative names for grid attributes used by various models,
it is not possible to anticipate all possible names. This option
allows the user to explicitly tell aospy which variables correspond
to which internal names (internal names not provided in this
dictionary will be attempted to be found in the usual way). For a
list of built-in alternative names see
:ref:`the table here <built-in-alternative-names>`.
See Also
--------
aospy.DataLoader, aospy.Proj, aospy.Run
Notes
-----
A side-effect of instantiating a Model object is that the `parent`
attribute of all of the model's `Run` objects is set to that model.
"""
if isinstance(name, str) and name:
self.name = name
else:
raise ValueError("Non-empty string value of `name` is required")
self.description = '' if description is None else description
self.proj = proj
grid_file_paths = [] if grid_file_paths is None else grid_file_paths
self.grid_file_paths = grid_file_paths
self.default_start_date = default_start_date
self.default_end_date = default_end_date
self.runs = runs
[setattr(run, 'parent', self) for run in self.runs]
if default_runs is None:
self.default_runs = []
else:
self.default_runs = default_runs
self.grid_attrs = grid_attrs
self._grid_data_is_set = False
if load_grid_data:
self.set_grid_data()
self._grid_data_is_set = True
def __str__(self):
return 'Model instance "' + self.name + '"'
__repr__ = __str__
def _get_grid_files(self):
"""Get the files holding grid data for an aospy object."""
grid_file_paths = self.grid_file_paths
datasets = []
if isinstance(grid_file_paths, str):
grid_file_paths = [grid_file_paths]
for path in grid_file_paths:
try:
ds = xr.open_dataset(path, decode_times=False)
except (TypeError, AttributeError):
ds = xr.open_mfdataset(path, decode_times=False,
combine='by_coords').load()
except (RuntimeError, OSError) as e:
msg = str(e) + ': {}'.format(path)
raise RuntimeError(msg)
datasets.append(ds)
return tuple(datasets)
def _set_mult_grid_attr(self):
"""
Set multiple attrs from grid file given their names in the grid file.
"""
grid_objs = self._get_grid_files()
if self.grid_attrs is None:
self.grid_attrs = {}
# Override GRID_ATTRS with entries in grid_attrs
attrs = internal_names.GRID_ATTRS.copy()
for k, v in self.grid_attrs.items():
if k not in attrs:
raise ValueError(
'Unrecognized internal name, {!r}, specified for a '
'custom grid attribute name. See the full list of '
'valid internal names below:\n\n{}'.format(
k, list(internal_names.GRID_ATTRS.keys())))
attrs[k] = (v, )
for name_int, names_ext in attrs.items():
for name in names_ext:
grid_attr = _get_grid_attr(grid_objs, name)
if grid_attr is not None:
TIME_STR = internal_names.TIME_STR
renamed_attr = _rename_coords(grid_attr, attrs)
if ((TIME_STR not in renamed_attr.dims) and
(TIME_STR in renamed_attr.coords)):
renamed_attr = renamed_attr.drop_vars(TIME_STR)
setattr(self, name_int, renamed_attr)
break
def set_grid_data(self):
"""Populate the attrs that hold grid data."""
if self._grid_data_is_set:
return
self._set_mult_grid_attr()
if not np.any(getattr(self, 'sfc_area', None)):
try:
sfc_area = _grid_sfc_area(self.lon, self.lat, self.lon_bounds,
self.lat_bounds)
except AttributeError:
sfc_area = _grid_sfc_area(self.lon, self.lat)
self.sfc_area = sfc_area
try:
self.levs_thick = utils.vertcoord.level_thickness(self.level)
except AttributeError:
self.level = None
self.levs_thick = None
self._grid_data_is_set = True
|
spencerahill/aospy
|
aospy/model.py
|
Python
|
apache-2.0
| 11,711
|
[
"NetCDF"
] |
e57f4e5ec9013b0de570caff049821d17872719c9097d355cc89bd716f88c43d
|
# -*- coding: utf-8 -*-
#
# libmemcached documentation build configuration file, created by
# sphinx-quickstart on Sun Mar 6 12:05:53 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
#extensions = ['sphinxcontrib.googleanalytics']
# Google
#googleanalytics_id = 'UA-15307604-2'
#googleanalytics_enabled = 'True'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'libmemcached'
copyright = u'2011-2013, Brian Aker DataDifferential, http://datadifferential.com/'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0.18'
# The full version, including alpha/beta/rc tags.
release = '1.0.18'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'libmemcacheddoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'libmemcached.tex', u'libmemcached Documentation',
u'Brian Aker', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('hashkit_create', 'hashkit_clone', u'libhashkit Documentation', [u'Brian Aker'], 3),
('hashkit_create', 'hashkit_create', u'libhashkit Documentation', [u'Brian Aker'], 3),
('hashkit_create', 'hashkit_free', u'libhashkit Documentation', [u'Brian Aker'], 3),
('hashkit_create', 'hashkit_is_allocated', u'libhashkit Documentation', [u'Brian Aker'], 3),
('hashkit_functions', 'hashkit_crc32', u'libhashkit Documentation', [u'Brian Aker'], 3),
('hashkit_functions', 'hashkit_fnv1_32', u'libhashkit Documentation', [u'Brian Aker'], 3),
('hashkit_functions', 'hashkit_fnv1_64', u'libhashkit Documentation', [u'Brian Aker'], 3),
('hashkit_functions', 'hashkit_fnv1a_32', u'libhashkit Documentation', [u'Brian Aker'], 3),
('hashkit_functions', 'hashkit_fnv1a_64', u'libhashkit Documentation', [u'Brian Aker'], 3),
('hashkit_functions', 'hashkit_functions', u'libhashkit Documentation', [u'Brian Aker'], 3),
('hashkit_functions', 'hashkit_hsieh', u'libhashkit Documentation', [u'Brian Aker'], 3),
('hashkit_functions', 'hashkit_jenkins', u'libhashkit Documentation', [u'Brian Aker'], 3),
('hashkit_functions', 'hashkit_md5', u'libhashkit Documentation', [u'Brian Aker'], 3),
('hashkit_functions', 'hashkit_murmur', u'libhashkit Documentation', [u'Brian Aker'], 3),
('hashkit_value', 'hashkit_value', u'libhashkit Documentation', [u'Brian Aker'], 3),
('libhashkit', 'libhashkit', u'libhashkit Documentation', [u'Brian Aker'], 3),
('libmemcached', 'libmemcached', u'Introducing the C Client Library for memcached', [u'Brian Aker'], 3),
('libmemcached_configuration', 'libmemcached_check_configuration', u'libmemcached Documentation', [u'Brian Aker'], 3),
('libmemcached_configuration', 'libmemcached_configuration', u'libmemcached Documentation', [u'Brian Aker'], 3),
('libmemcached_configuration', 'memcached', u'libmemcached Documentation', [u'Brian Aker'], 3),
('libmemcached_examples', 'libmemcached_examples', u'libmemcached Documentation', [u'Brian Aker'], 3),
('libmemcachedutil', 'libmemcachedutil', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_analyze', 'memcached_analyze', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_append', 'memcached_append', u'Appending to or Prepending to data on the server', [u'Brian Aker'], 3),
('memcached_append', 'memcached_append_by_key', u'Appending to or Prepending to data on the server', [u'Brian Aker'], 3),
('memcached_append', 'memcached_prepend', u'Appending to or Prepending to data on the server', [u'Brian Aker'], 3),
('memcached_append', 'memcached_prepend_by_key', u'Appending to or Prepending to data on the server', [u'Brian Aker'], 3),
('memcached_auto', 'memcached_auto', u'Incrementing and Decrementing Values', [u'Brian Aker'], 3),
('memcached_auto', 'memcached_decrement', u'Incrementing and Decrementing Values', [u'Brian Aker'], 3),
('memcached_auto', 'memcached_decrement_with_initial', u'Incrementing and Decrementing Values', [u'Brian Aker'], 3),
('memcached_auto', 'memcached_increment', u'Incrementing and Decrementing Values', [u'Brian Aker'], 3),
('memcached_auto', 'memcached_increment_with_initial', u'Incrementing and Decrementing Values', [u'Brian Aker'], 3),
('memcached_behavior', 'memcached_behavior', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_behavior', 'memcached_behavior_get', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_behavior', 'memcached_behavior_set', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_callback', 'memcached_callback', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_callback', 'memcached_callback_get', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_callback', 'memcached_callback_set', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_cas', 'memcached_cas', u'Working with data on the server in an atomic fashion', [u'Brian Aker'], 3),
('memcached_cas', 'memcached_cas_by_key', u'Storing and Replacing Data', [u'Brian Aker'], 3),
('memcached_create', 'memcached_clone', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_create', 'memcached_create', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_create', 'memcached_free', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_create', 'memcached_servers_reset', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_delete', 'memcached_delete', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_delete', 'memcached_delete_by_key', u'libmemcached Documentation', [u'Brian Aker'], 3),
('libmemcached-1.0/memcached_touch', 'memcached_touch', u'libmemcached Documentation', [u'Brian Aker'], 3),
('libmemcached-1.0/memcached_touch', 'memcached_touch_by_key', u'libmemcached Documentation', [u'Brian Aker'], 3),
('libmemcached/memcached_exist', 'memcached_exist', u'libmemcached Documentation', [u'Brian Aker'], 3),
('libmemcached/memcached_exist', 'memcached_exist_by_key', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_dump', 'memcached_dump', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_flush', 'memcached_flush', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_flush_buffers', 'memcached_flush_buffers', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_generate_hash_value', 'memcached_generate_hash', u'Generating hash values directly', [u'Brian Aker'], 3),
('memcached_generate_hash_value', 'memcached_generate_hash_value', u'Generating hash values directly', [u'Brian Aker'], 3),
('libmemcached/memcached_fetch', 'memcached_fetch', u'Retrieving data from the server', [u'Brian Aker'], 3),
('memcached_get', 'memcached_fetch_execute', u'Retrieving data from the server', [u'Brian Aker'], 3),
('memcached_get', 'memcached_fetch_result', u'Retrieving data from the server', [u'Brian Aker'], 3),
('memcached_get', 'memcached_get', u'Retrieving data from the server', [u'Brian Aker'], 3),
('memcached_get', 'memcached_get_by_key', u'Retrieving data from the server', [u'Brian Aker'], 3),
('libmemcached/memcached_return_t', 'memcached_return_t', u'Return type values ', [u'Brian Aker'], 3),
('memcached_get', 'memcached_mget', u'Retrieving data from the server', [u'Brian Aker'], 3),
('memcached_get', 'memcached_mget_by_key', u'Retrieving data from the server', [u'Brian Aker'], 3),
('memcached_get', 'memcached_mget_execute', u'Retrieving data from the server', [u'Brian Aker'], 3),
('memcached_get', 'memcached_mget_execute_by_key', u'Retrieving data from the server', [u'Brian Aker'], 3),
('libmemcached/memcached_last_error_message', 'memcached_last_error_message', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_memory_allocators', 'memcached_get_memory_allocators', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_memory_allocators', 'memcached_memory_allocators', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_memory_allocators', 'memcached_set_memory_allocators', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_memory_allocators', 'memcached_set_memory_allocators_context', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_pool', 'memcached_pool', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_pool', 'memcached_pool_behavior_get', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_pool', 'memcached_pool_behavior_set', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_pool', 'memcached_pool_create', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_pool', 'memcached_pool_destroy', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_pool', 'memcached_pool_fetch', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_pool', 'memcached_pool_pop', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_pool', 'memcached_pool_push', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_pool', 'memcached_pool_release', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_pool', 'memcached_pool_st', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_quit', 'memcached_quit', u'libmemcached Documentation', [u'Brian Aker'], 3),
('libmemcached-1.0/memcached_set_encoding_key', 'memcached_set_encoding_key', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_result_st', 'memcached_result_cas', u'Working with result sets', [u'Brian Aker'], 3),
('memcached_result_st', 'memcached_result_create', u'Working with result sets', [u'Brian Aker'], 3),
('memcached_result_st', 'memcached_result_flags', u'Working with result sets', [u'Brian Aker'], 3),
('memcached_result_st', 'memcached_result_free', u'Working with result sets', [u'Brian Aker'], 3),
('memcached_result_st', 'memcached_result_key_length', u'Working with result sets', [u'Brian Aker'], 3),
('memcached_result_st', 'memcached_result_key_value', u'Working with result sets', [u'Brian Aker'], 3),
('memcached_result_st', 'memcached_result_length', u'Working with result sets', [u'Brian Aker'], 3),
('memcached_result_st', 'memcached_result_st', u'Working with result sets', [u'Brian Aker'], 3),
('memcached_result_st', 'memcached_result_value', u'Working with result sets', [u'Brian Aker'], 3),
('memcached_sasl', 'memcached_destroy_sasl_auth_data', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_sasl', 'memcached_get_sasl_callbacks', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_sasl', 'memcached_sasl', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_sasl', 'memcached_sasl_set_auth_data', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_sasl', 'memcached_set_sasl_callbacks', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_server_st', 'memcached_server_list_append', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_server_st', 'memcached_server_list_count', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_server_st', 'memcached_server_list_free', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_server_st', 'memcached_servers_parse', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_servers', 'memcached_server_add', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_servers', 'memcached_server_add_unix_socket', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_servers', 'memcached_server_count', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_servers', 'memcached_server_cursor', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_servers', 'memcached_server_list', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_servers', 'memcached_server_push', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_servers', 'memcached_server_st', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_servers', 'memcached_servers', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_set', 'memcached_add', u'Storing and Replacing Data', [u'Brian Aker'], 3),
('memcached_set', 'memcached_add_by_key', u'Storing and Replacing Data', [u'Brian Aker'], 3),
('memcached_set', 'memcached_replace', u'Storing and Replacing Data', [u'Brian Aker'], 3),
('memcached_set', 'memcached_replace_by_key', u'Storing and Replacing Data', [u'Brian Aker'], 3),
('memcached_set', 'memcached_set', u'Storing and Replacing Data', [u'Brian Aker'], 3),
('memcached_set', 'memcached_set_by_key', u'Storing and Replacing Data', [u'Brian Aker'], 3),
('memcached_stats', 'memcached_stat', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_stats', 'memcached_stat_execute', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_stats', 'memcached_stat_get_keys', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_stats', 'memcached_stat_get_value', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_stats', 'memcached_stat_servername', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_stats', 'memcached_stats', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_strerror', 'memcached_strerror', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_user_data', 'memcached_get_user_data', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_user_data', 'memcached_set_user_data', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_user_data', 'memcached_user_data', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_verbosity', 'memcached_verbosity', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_version', 'memcached_lib_version', u'libmemcached Documentation', [u'Brian Aker'], 3),
('memcached_version', 'memcached_version', u'libmemcached Documentation', [u'Brian Aker'], 3),
('bin/memcapable', 'memcapable', u'libmemcached Documentation', [u'Brian Aker'], 1),
('bin/memcat', 'memcat', u'libmemcached Documentation', [u'Brian Aker'], 1),
('bin/memcp', 'memcp', u'libmemcached Documentation', [u'Brian Aker'], 1),
('bin/memdump', 'memdump', u'libmemcached Documentation', [u'Brian Aker'], 1),
('bin/memerror', 'memerror', u'libmemcached Documentation', [u'Brian Aker'], 1),
('bin/memflush', 'memflush', u'libmemcached Documentation', [u'Brian Aker'], 1),
('bin/memrm', 'memrm', u'libmemcached Documentation', [u'Brian Aker'], 1),
('bin/memaslap', 'memaslap', u'libmemcached Documentation', [u'Brian Aker'], 1),
('bin/memslap', 'memslap', u'libmemcached Documentation', [u'Brian Aker'], 1),
('bin/memstat', 'memstat', u'libmemcached Documentation', [u'Brian Aker'], 1),
('bin/memexist', 'memexist', u'libmemcached Documentation', [u'Brian Aker'], 1),
('bin/memparse', 'memparse', u'libmemcached Documentation', [u'Brian Aker'], 1),
('bin/memping', 'memping', u'libmemcached Documentation', [u'Brian Aker'], 1),
('bin/memtouch', 'memtouch', u'libmemcached Documentation', [u'Brian Aker'], 1),
]
|
crazyhuaer/netbeanscode
|
C/example/libmemcached-1.0.18/docs/conf.py
|
Python
|
cc0-1.0
| 21,258
|
[
"Brian"
] |
548f2d2713bcc3df78c640b9455d097b075e1a02de3c1e79c0b2d39baf0eaa61
|
"""Serve pre-compressed static content from GridFS with aiohttp.
Requires Python 3.5 or later and aiohttp 3.0 or later.
Start a MongoDB server on its default port, run this script, and visit:
http://localhost:8080/fs/my_file
"""
# -- include-start --
import asyncio
import gzip
import tempfile
import aiohttp.web
from motor.aiohttp import AIOHTTPGridFS
from motor.motor_asyncio import AsyncIOMotorClient, AsyncIOMotorGridFSBucket
client = AsyncIOMotorClient()
# Use Motor to put compressed data in GridFS, with filename "my_file".
async def put_gridfile():
with tempfile.TemporaryFile() as tmp:
with gzip.GzipFile(mode='wb', fileobj=tmp) as gzfile:
for _ in range(10):
gzfile.write(b'Nonesuch nonsense\n')
gfs = AsyncIOMotorGridFSBucket(client.my_database)
tmp.seek(0)
await gfs.upload_from_stream(filename='my_file',
source=tmp,
metadata={'contentType': 'text',
'compressed': True})
asyncio.get_event_loop().run_until_complete(put_gridfile())
# Add "Content-Encoding: gzip" header for compressed data.
def gzip_header(response, gridout):
if gridout.metadata.get('compressed'):
response.headers['Content-Encoding'] = 'gzip'
gridfs_handler = AIOHTTPGridFS(client.my_database,
set_extra_headers=gzip_header)
app = aiohttp.web.Application()
# The GridFS URL pattern must have a "{filename}" variable.
resource = app.router.add_resource('/fs/{filename}')
resource.add_route('GET', gridfs_handler)
resource.add_route('HEAD', gridfs_handler)
aiohttp.web.run_app(app)
|
wujuguang/motor
|
doc/examples/aiohttp_gridfs_example.py
|
Python
|
apache-2.0
| 1,705
|
[
"VisIt"
] |
672f492ffb677484814e34088fc6f69e92d9a4e0071a804db1a6ca5254c3d644
|
""" ModuleBase - contains the base class for workflow modules. Defines several common utility methods.
The modules defined within this package are developed in a way to be executed by a DIRAC.Core.Worfklow.Worfklow.
In particular, a DIRAC.Core.Workflow.Worfklow object will only call the "execute" function, that is defined here.
These modules, inspired by the LHCb experience, give the possibility to define simple user and production jobs.
Many VOs might want to extend this package. And actually, for some cases, it will be necessary. For example,
defining the LFN output at runtime (within the "UploadOutputs" module is a VO specific operation.
The DIRAC APIs are used to create Jobs that make use of these modules.
"""
import os, copy
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.WorkloadManagementSystem.Client.JobReport import JobReport
from DIRAC.TransformationSystem.Client.FileReport import FileReport
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.private.RequestValidator import gRequestValidator
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
class ModuleBase( object ):
""" Base class for Modules - works only within DIRAC workflows
This module, inheriting by "object", can use cooperative methods, very useful here.
"""
#############################################################################
def __init__( self, loggerIn = None ):
""" Initialization of module base.
loggerIn is a logger object that can be passed so that the logging will be more clear.
"""
if not loggerIn:
self.log = gLogger.getSubLogger( 'ModuleBase' )
else:
self.log = loggerIn
# These 2 are used in many places, so it's good to have them available here.
self.opsH = Operations()
self.dm = DataManager()
# Some job parameters
self.production_id = 0
self.prod_job_id = 0
self.jobID = 0
self.step_number = 0
self.step_id = 0
self.jobType = ''
self.executable = ''
self.command = None
self.workflowStatus = None
self.stepStatus = None
self.workflow_commons = None
self.step_commons = None
# These are useful objects (see the getFileReporter(), getJobReporter() and getRequestContainer() functions)
self.fileReport = None
self.jobReport = None
self.request = None
#############################################################################
def execute( self ):
""" Function called by all super classes. This is the only function that Workflow will call automatically.
The design adopted here is that all the modules are inheriting from this class,
and will NOT override this function. Instead, the inherited modules will override the following functions:
_resolveInputVariables()
_initialize()
_setCommand()
_executeCommand()
_execute()
that are called here exactly in this order.
Each implementation of these functions, in the subclasses, should never return S_OK, S_ERROR.
This choice has been made for convenience of coding, and for the high level of inheritance implemented here.
Instead, they should return:
- None when no issues arise
- a RuntimeError exception when there are issues
- a GracefulTermination exception (defined also here) when the module should be terminated gracefully
The various parameters in input to this method are used almost only for testing purposes.
"""
if not self.production_id:
# self.PRODUCTION_ID is always set by the workflow
self.production_id = int( self.PRODUCTION_ID )
if not self.prod_job_id:
# self.JOB_ID is set by the workflow, but this is not the WMS job id, but the transformation (production) task id
self.prod_job_id = int( self.JOB_ID )
if not self.jobID:
# this is the real wms job ID
if os.environ.has_key( 'JOBID' ):
self.jobID = int( os.environ['JOBID'] )
if not self.step_number:
# self.STEP_NUMBER is always set by the workflow
self.step_number = int( self.STEP_NUMBER )
if not self.step_id:
self.step_id = '%d_%d_%d' % ( self.production_id, self.prod_job_id, self.step_number )
try:
# This is what has to be extended in the modules
self._resolveInputVariables()
self._initialize()
self._setCommand()
self._executeCommand()
self._execute()
self._finalize()
# If everything is OK
except GracefulTermination, status:
self.setApplicationStatus( status )
self.log.info( status )
return S_OK( status )
# This catches everything that is voluntarily thrown within the modules, so an error
except RuntimeError, e:
self.log.error( e )
self.setApplicationStatus( e )
return S_ERROR( e )
# This catches everything that is not voluntarily thrown (here, really writing an exception)
except Exception, e:
self.log.exception( e )
self.setApplicationStatus( e )
return S_ERROR( e )
finally:
self.finalize()
def _resolveInputVariables( self ):
""" By convention the module input parameters are resolved here.
fileReport, jobReport, and request objects are instantiated/recorded here.
This will also call the resolution of the input workflow.
The resolution of the input step should instead be done on a step basis.
NB: Never forget to call this base method when extending it.
"""
self.log.verbose( "workflow_commons = ", self.workflow_commons )
self.log.verbose( "step_commons = ", self.step_commons )
if not self.fileReport:
self.fileReport = self._getFileReporter()
if not self.jobReport:
self.jobReport = self._getJobReporter()
if not self.request:
self.request = self._getRequestContainer()
self._resolveInputWorkflow()
def _initialize( self ):
""" TBE
For initializing the module, whatever operation this can be
"""
pass
def _setCommand( self ):
""" TBE
For "executors" modules, set the command to be used in the self.command variable.
"""
pass
def _executeCommand( self ):
""" TBE
For "executors" modules, executes self.command as set in the _setCommand() method
"""
pass
def _execute( self ):
""" TBE
Executes, whatever this means for the module implementing it
"""
pass
def _finalize( self, status = '' ):
""" TBE
By default, the module finalizes correctly
"""
if not status:
status = '%s correctly finalized' % str( self.__class__ )
raise GracefulTermination, status
#############################################################################
def finalize( self ):
""" Just finalizing the module execution by flushing the logs. This will be done always.
"""
self.log.flushAllMessages( 0 )
self.log.info( '===== Terminating ' + str( self.__class__ ) + ' ===== ' )
#############################################################################
def _getJobReporter( self ):
""" just return the job reporter (object, always defined by dirac-jobexec)
"""
if self.workflow_commons.has_key( 'JobReport' ):
return self.workflow_commons['JobReport']
else:
jobReport = JobReport( self.jobID )
self.workflow_commons['JobReport'] = jobReport
return jobReport
#############################################################################
def _getFileReporter( self ):
""" just return the file reporter (object)
"""
if self.workflow_commons.has_key( 'FileReport' ):
return self.workflow_commons['FileReport']
else:
fileReport = FileReport()
self.workflow_commons['FileReport'] = fileReport
return fileReport
#############################################################################
def _getRequestContainer( self ):
""" just return the RequestContainer reporter (object)
"""
if self.workflow_commons.has_key( 'Request' ):
return self.workflow_commons['Request']
else:
request = Request()
self.workflow_commons['Request'] = request
return request
#############################################################################
def _resolveInputWorkflow( self ):
""" Resolve the input variables that are in the workflow_commons
"""
if self.workflow_commons.has_key( 'JobType' ):
self.jobType = self.workflow_commons['JobType']
self.InputData = ''
if self.workflow_commons.has_key( 'InputData' ):
if self.workflow_commons['InputData']:
self.InputData = self.workflow_commons['InputData']
if self.workflow_commons.has_key( 'ParametricInputData' ):
pID = copy.deepcopy( self.workflow_commons['ParametricInputData'] )
if pID:
if type( pID ) == type( [] ):
pID = ';'.join( pID )
# self.InputData += ';' + pID
self.InputData = pID
self.InputData = self.InputData.rstrip( ';' )
if self.InputData == ';':
self.InputData = ''
self.inputDataList = [lfn.strip( 'LFN:' ) for lfn in self.InputData.split( ';' ) if lfn]
if self.workflow_commons.has_key( 'appSteps' ):
self.appSteps = self.workflow_commons['appSteps']
if self.workflow_commons.has_key( 'outputDataFileMask' ):
self.outputDataFileMask = self.workflow_commons['outputDataFileMask']
if not type( self.outputDataFileMask ) == type( [] ):
self.outputDataFileMask = [i.lower().strip() for i in self.outputDataFileMask.split( ';' )]
#############################################################################
def _resolveInputStep( self ):
""" Resolve the input variables for an application step
"""
self.stepName = self.step_commons['STEP_INSTANCE_NAME']
if self.step_commons.has_key( 'executable' ) and self.step_commons['executable']:
self.executable = self.step_commons['executable']
else:
self.executable = 'Unknown'
if self.step_commons.has_key( 'applicationName' ) and self.step_commons['applicationName']:
self.applicationName = self.step_commons['applicationName']
else:
self.applicationName = 'Unknown'
if self.step_commons.has_key( 'applicationVersion' ) and self.step_commons['applicationVersion']:
self.applicationVersion = self.step_commons['applicationVersion']
else:
self.applicationVersion = 'Unknown'
if self.step_commons.has_key( 'applicationLog' ):
self.applicationLog = self.step_commons['applicationLog']
else:
self.applicationLog = 'applicationLog.txt'
stepInputData = []
if self.step_commons.has_key( 'inputData' ):
if self.step_commons['inputData']:
stepInputData = self.step_commons['inputData']
elif self.InputData:
stepInputData = copy.deepcopy( self.InputData )
if stepInputData:
stepInputData = self._determineStepInputData( stepInputData, )
self.stepInputData = [sid.strip( 'LFN:' ) for sid in stepInputData]
#############################################################################
def _determineStepInputData( self, inputData ):
""" determine the input data for the step
"""
if inputData == 'previousStep':
stepIndex = self.appSteps.index( self.stepName )
previousStep = self.appSteps[stepIndex - 1]
stepInputData = []
for outputF in self.workflow_commons['outputList']:
try:
if outputF['stepName'] == previousStep and outputF['outputDataType'].lower() == self.inputDataType.lower():
stepInputData.append( outputF['outputDataName'] )
except KeyError:
raise RuntimeError, 'Can\'t find output of step %s' % previousStep
return stepInputData
else:
return [x.strip( 'LFN:' ) for x in inputData.split( ';' )]
#############################################################################
def setApplicationStatus( self, status, sendFlag = True ):
"""Wraps around setJobApplicationStatus of state update client
"""
if not self._WMSJob():
return 0 # e.g. running locally prior to submission
if self._checkWFAndStepStatus( noPrint = True ):
# The application status won't be updated in case the workflow or the step is failed already
if not type( status ) == type( '' ):
status = str( status )
self.log.verbose( 'setJobApplicationStatus(%d, %s)' % ( self.jobID, status ) )
jobStatus = self.jobReport.setApplicationStatus( status, sendFlag )
if not jobStatus['OK']:
self.log.warn( jobStatus['Message'] )
#############################################################################
def _WMSJob( self ):
""" Check if this job is running via WMS
"""
return True if self.jobID else False
#############################################################################
def _enableModule( self ):
""" Enable module if it's running via WMS
"""
if not self._WMSJob():
self.log.info( 'No WMS JobID found, disabling module via control flag' )
return False
else:
self.log.verbose( 'Found WMS JobID = %d' % self.jobID )
return True
#############################################################################
def _checkWFAndStepStatus( self, noPrint = False ):
""" Check the WF and Step status
"""
if not self.workflowStatus['OK'] or not self.stepStatus['OK']:
if not noPrint:
self.log.info( 'Skip this module, failure detected in a previous step :' )
self.log.info( 'Workflow status : %s' % ( self.workflowStatus ) )
self.log.info( 'Step Status : %s' % ( self.stepStatus ) )
return False
else:
return True
#############################################################################
def setJobParameter( self, name, value, sendFlag = True ):
"""Wraps around setJobParameter of state update client
"""
if not self._WMSJob():
return 0 # e.g. running locally prior to submission
self.log.verbose( 'setJobParameter(%d,%s,%s)' % ( self.jobID, name, value ) )
jobParam = self.jobReport.setJobParameter( str( name ), str( value ), sendFlag )
if not jobParam['OK']:
self.log.warn( jobParam['Message'] )
#############################################################################
def getCandidateFiles( self, outputList, outputLFNs, fileMask, stepMask = '' ):
""" Returns list of candidate files to upload, check if some outputs are missing.
outputList has the following structure:
[ {'outputDataType':'','outputDataSE':'','outputDataName':''} , {...} ]
outputLFNs is the list of output LFNs for the job
fileMask is the output file extensions to restrict the outputs to
returns dictionary containing type, SE and LFN for files restricted by mask
"""
fileInfo = {}
for outputFile in outputList:
if outputFile.has_key( 'outputDataType' ) \
and outputFile.has_key( 'outputDataSE' ) \
and outputFile.has_key( 'outputDataName' ):
fname = outputFile['outputDataName']
fileSE = outputFile['outputDataSE']
fileType = outputFile['outputDataType']
fileInfo[fname] = {'type':fileType, 'workflowSE':fileSE}
else:
self.log.error( 'Ignoring malformed output data specification', str( outputFile ) )
for lfn in outputLFNs:
if os.path.basename( lfn ) in fileInfo.keys():
fileInfo[os.path.basename( lfn )]['lfn'] = lfn
self.log.verbose( 'Found LFN %s for file %s' % ( lfn, os.path.basename( lfn ) ) )
# check local existance
self._checkLocalExistance( fileInfo.keys() )
# Select which files have to be uploaded: in principle all
candidateFiles = self._applyMask( fileInfo, fileMask, stepMask )
# Sanity check all final candidate metadata keys are present (return S_ERROR if not)
self._checkSanity( candidateFiles )
return candidateFiles
#############################################################################
def _applyMask( self, candidateFilesIn, fileMask, stepMask ):
""" Select which files have to be uploaded: in principle all
"""
candidateFiles = copy.deepcopy( candidateFilesIn )
if fileMask and type( fileMask ) != type( [] ):
fileMask = [fileMask]
if type( stepMask ) == type( 1 ):
stepMask = str( stepMask )
if stepMask and type( stepMask ) != type( [] ):
stepMask = [stepMask]
if fileMask and fileMask != ['']:
for fileName, metadata in candidateFiles.items():
if ( ( metadata['type'].lower() not in fileMask ) ): # and ( fileName.split( '.' )[-1] not in fileMask ) ):
del( candidateFiles[fileName] )
self.log.info( 'Output file %s was produced but will not be treated (fileMask is %s)' % ( fileName,
', '.join( fileMask ) ) )
else:
self.log.info( 'No outputDataFileMask provided, the files with all the extensions will be considered' )
if stepMask and stepMask != ['']:
# FIXME: This supposes that the LFN contains the step ID
for fileName, metadata in candidateFiles.items():
if fileName.split( '_' )[-1].split( '.' )[0] not in stepMask:
del( candidateFiles[fileName] )
self.log.info( 'Output file %s was produced but will not be treated (stepMask is %s)' % ( fileName,
', '.join( stepMask ) ) )
else:
self.log.info( 'No outputDataStep provided, the files output of all the steps will be considered' )
return candidateFiles
#############################################################################
def _checkSanity( self, candidateFiles ):
""" Sanity check all final candidate metadata keys are present
"""
notPresentKeys = []
mandatoryKeys = ['type', 'workflowSE', 'lfn'] # filedict is used for requests
for fileName, metadata in candidateFiles.items():
for key in mandatoryKeys:
if not metadata.has_key( key ):
notPresentKeys.append( ( fileName, key ) )
if notPresentKeys:
for fileName_keys in notPresentKeys:
self.log.error( 'File %s has missing %s' % ( fileName_keys[0], fileName_keys[1] ) )
raise ValueError
#############################################################################
def _checkLocalExistance( self, fileList ):
""" Check that the list of output files are present locally
"""
notPresentFiles = []
for fileName in fileList:
if not os.path.exists( fileName ):
notPresentFiles.append( fileName )
if notPresentFiles:
self.log.error( 'Output data file list %s does not exist locally' % notPresentFiles )
raise os.error
#############################################################################
def generateFailoverFile( self ):
""" Retrieve the accumulated reporting request, and produce a JSON file that is consumed by the JobWrapper
"""
reportRequest = None
result = self.jobReport.generateForwardDISET()
if not result['OK']:
self.log.warn( "Could not generate Operation for job report with result:\n%s" % ( result ) )
else:
reportRequest = result['Value']
if reportRequest:
self.log.info( "Populating request with job report information" )
self.request.addOperation( reportRequest )
accountingReport = None
if self.workflow_commons.has_key( 'AccountingReport' ):
accountingReport = self.workflow_commons['AccountingReport']
if accountingReport:
result = accountingReport.commit()
if not result['OK']:
self.log.error( "!!! Both accounting and RequestDB are down? !!!" )
return result
if len( self.request ):
isValid = gRequestValidator.validate( self.request )
if not isValid['OK']:
raise RuntimeError, "Failover request is not valid: %s" % isValid['Message']
else:
requestJSON = self.request.toJSON()
if requestJSON['OK']:
self.log.info( "Creating failover request for deferred operations for job %d" % self.jobID )
request_string = str( requestJSON['Value'] )
self.log.debug( request_string )
# Write out the request string
fname = '%d_%d_request.json' % ( self.production_id, self.prod_job_id )
jsonFile = open( fname, 'w' )
jsonFile.write( request_string )
jsonFile.close()
self.log.info( "Created file containing failover request %s" % fname )
result = self.request.getDigest()
if result['OK']:
self.log.info( "Digest of the request: %s" % result['Value'] )
else:
self.log.error( "No digest? That's not sooo important, anyway: %s" % result['Message'] )
else:
raise RuntimeError, requestJSON['Message']
#############################################################################
#############################################################################
class GracefulTermination( Exception ):
pass
#############################################################################
|
sposs/DIRAC
|
Workflow/Modules/ModuleBase.py
|
Python
|
gpl-3.0
| 21,525
|
[
"DIRAC"
] |
e6e58b9a38cac051314163f828e30a789846222284f589ffc6d7129317340c2f
|
# -*- coding: utf-8 -*-
# enzyme - Video metadata parser
# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com>
# Copyright 2003-2006 Dirk Meyer <dischi@freevo.org>
#
# This file is part of enzyme.
#
# enzyme is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# enzyme is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with enzyme. If not, see <http://www.gnu.org/licenses/>.
import string
import re
import struct
__all__ = ['resolve']
def resolve(code):
"""
Transform a twocc or fourcc code into a name. Returns a 2-tuple of (cc,
codec) where both are strings and cc is a string in the form '0xXX' if it's
a twocc, or 'ABCD' if it's a fourcc. If the given code is not a known
twocc or fourcc, the return value will be (None, 'Unknown'), unless the
code is otherwise a printable string in which case it will be returned as
the codec.
"""
if isinstance(code, basestring):
codec = u'Unknown'
# Check for twocc
if re.match(r'^0x[\da-f]{1,4}$', code, re.I):
# Twocc in hex form
return code, TWOCC.get(int(code, 16), codec)
elif code.isdigit() and 0 <= int(code) <= 0xff:
# Twocc in decimal form
return hex(int(code)), TWOCC.get(int(code), codec)
elif len(code) == 2:
code = struct.unpack('H', code)[0]
return hex(code), TWOCC.get(code, codec)
elif len(code) != 4 and len([x for x in code if x not in string.printable]) == 0:
# Code is a printable string.
codec = unicode(code)
if code[:2] == 'MS' and code[2:].upper() in FOURCC:
code = code[2:]
if code.upper() in FOURCC:
return code.upper(), unicode(FOURCC[code.upper()])
return None, codec
elif isinstance(code, (int, long)):
return hex(code), TWOCC.get(code, u'Unknown')
return None, u'Unknown'
TWOCC = {
0x0000: 'Unknown Wave Format',
0x0001: 'PCM',
0x0002: 'Microsoft ADPCM',
0x0003: 'IEEE Float',
0x0004: 'Compaq Computer VSELP',
0x0005: 'IBM CVSD',
0x0006: 'A-Law',
0x0007: 'mu-Law',
0x0008: 'Microsoft DTS',
0x0009: 'Microsoft DRM',
0x0010: 'OKI ADPCM',
0x0011: 'Intel DVI/IMA ADPCM',
0x0012: 'Videologic MediaSpace ADPCM',
0x0013: 'Sierra Semiconductor ADPCM',
0x0014: 'Antex Electronics G.723 ADPCM',
0x0015: 'DSP Solutions DigiSTD',
0x0016: 'DSP Solutions DigiFIX',
0x0017: 'Dialogic OKI ADPCM',
0x0018: 'MediaVision ADPCM',
0x0019: 'Hewlett-Packard CU',
0x0020: 'Yamaha ADPCM',
0x0021: 'Speech Compression Sonarc',
0x0022: 'DSP Group TrueSpeech',
0x0023: 'Echo Speech EchoSC1',
0x0024: 'Audiofile AF36',
0x0025: 'Audio Processing Technology APTX',
0x0026: 'AudioFile AF10',
0x0027: 'Prosody 1612',
0x0028: 'LRC',
0x0030: 'Dolby AC2',
0x0031: 'Microsoft GSM 6.10',
0x0032: 'MSNAudio',
0x0033: 'Antex Electronics ADPCME',
0x0034: 'Control Resources VQLPC',
0x0035: 'DSP Solutions DigiREAL',
0x0036: 'DSP Solutions DigiADPCM',
0x0037: 'Control Resources CR10',
0x0038: 'Natural MicroSystems VBXADPCM',
0x0039: 'Crystal Semiconductor IMA ADPCM',
0x003A: 'EchoSC3',
0x003B: 'Rockwell ADPCM',
0x003C: 'Rockwell Digit LK',
0x003D: 'Xebec',
0x0040: 'Antex Electronics G.721 ADPCM',
0x0041: 'G.728 CELP',
0x0042: 'MSG723',
0x0043: 'IBM AVC ADPCM',
0x0045: 'ITU-T G.726 ADPCM',
0x0050: 'MPEG 1, Layer 1,2',
0x0052: 'RT24',
0x0053: 'PAC',
0x0055: 'MPEG Layer 3',
0x0059: 'Lucent G.723',
0x0060: 'Cirrus',
0x0061: 'ESPCM',
0x0062: 'Voxware',
0x0063: 'Canopus Atrac',
0x0064: 'G.726 ADPCM',
0x0065: 'G.722 ADPCM',
0x0066: 'DSAT',
0x0067: 'DSAT Display',
0x0069: 'Voxware Byte Aligned',
0x0070: 'Voxware AC8',
0x0071: 'Voxware AC10',
0x0072: 'Voxware AC16',
0x0073: 'Voxware AC20',
0x0074: 'Voxware MetaVoice',
0x0075: 'Voxware MetaSound',
0x0076: 'Voxware RT29HW',
0x0077: 'Voxware VR12',
0x0078: 'Voxware VR18',
0x0079: 'Voxware TQ40',
0x0080: 'Softsound',
0x0081: 'Voxware TQ60',
0x0082: 'MSRT24',
0x0083: 'G.729A',
0x0084: 'MVI MV12',
0x0085: 'DF G.726',
0x0086: 'DF GSM610',
0x0088: 'ISIAudio',
0x0089: 'Onlive',
0x0091: 'SBC24',
0x0092: 'Dolby AC3 SPDIF',
0x0093: 'MediaSonic G.723',
0x0094: 'Aculab PLC Prosody 8KBPS',
0x0097: 'ZyXEL ADPCM',
0x0098: 'Philips LPCBB',
0x0099: 'Packed',
0x00A0: 'Malden Electronics PHONYTALK',
0x00FF: 'AAC',
0x0100: 'Rhetorex ADPCM',
0x0101: 'IBM mu-law',
0x0102: 'IBM A-law',
0x0103: 'IBM AVC Adaptive Differential Pulse Code Modulation',
0x0111: 'Vivo G.723',
0x0112: 'Vivo Siren',
0x0123: 'Digital G.723',
0x0125: 'Sanyo LD ADPCM',
0x0130: 'Sipro Lab Telecom ACELP.net',
0x0131: 'Sipro Lab Telecom ACELP.4800',
0x0132: 'Sipro Lab Telecom ACELP.8V3',
0x0133: 'Sipro Lab Telecom ACELP.G.729',
0x0134: 'Sipro Lab Telecom ACELP.G.729A',
0x0135: 'Sipro Lab Telecom ACELP.KELVIN',
0x0140: 'Windows Media Video V8',
0x0150: 'Qualcomm PureVoice',
0x0151: 'Qualcomm HalfRate',
0x0155: 'Ring Zero Systems TUB GSM',
0x0160: 'Windows Media Audio V1 / DivX audio (WMA)',
0x0161: 'Windows Media Audio V7 / V8 / V9',
0x0162: 'Windows Media Audio Professional V9',
0x0163: 'Windows Media Audio Lossless V9',
0x0170: 'UNISYS NAP ADPCM',
0x0171: 'UNISYS NAP ULAW',
0x0172: 'UNISYS NAP ALAW',
0x0173: 'UNISYS NAP 16K',
0x0200: 'Creative Labs ADPCM',
0x0202: 'Creative Labs Fastspeech8',
0x0203: 'Creative Labs Fastspeech10',
0x0210: 'UHER Informatic ADPCM',
0x0215: 'Ulead DV ACM',
0x0216: 'Ulead DV ACM',
0x0220: 'Quarterdeck',
0x0230: 'I-link Worldwide ILINK VC',
0x0240: 'Aureal Semiconductor RAW SPORT',
0x0241: 'ESST AC3',
0x0250: 'Interactive Products HSX',
0x0251: 'Interactive Products RPELP',
0x0260: 'Consistent Software CS2',
0x0270: 'Sony ATRAC3 (SCX, same as MiniDisk LP2)',
0x0300: 'Fujitsu FM Towns Snd',
0x0400: 'BTV Digital',
0x0401: 'Intel Music Coder (IMC)',
0x0402: 'Ligos Indeo Audio',
0x0450: 'QDesign Music',
0x0680: 'VME VMPCM',
0x0681: 'AT&T Labs TPC',
0x0700: 'YMPEG Alpha',
0x08AE: 'ClearJump LiteWave',
0x1000: 'Olivetti GSM',
0x1001: 'Olivetti ADPCM',
0x1002: 'Olivetti CELP',
0x1003: 'Olivetti SBC',
0x1004: 'Olivetti OPR',
0x1100: 'Lernout & Hauspie LH Codec',
0x1101: 'Lernout & Hauspie CELP codec',
0x1102: 'Lernout & Hauspie SBC codec',
0x1103: 'Lernout & Hauspie SBC codec',
0x1104: 'Lernout & Hauspie SBC codec',
0x1400: 'Norris',
0x1401: 'AT&T ISIAudio',
0x1500: 'Soundspace Music Compression',
0x181C: 'VoxWare RT24 speech codec',
0x181E: 'Lucent elemedia AX24000P Music codec',
0x1C07: 'Lucent SX8300P speech codec',
0x1C0C: 'Lucent SX5363S G.723 compliant codec',
0x1F03: 'CUseeMe DigiTalk (ex-Rocwell)',
0x1FC4: 'NCT Soft ALF2CD ACM',
0x2000: 'AC3',
0x2001: 'Dolby DTS (Digital Theater System)',
0x2002: 'RealAudio 1 / 2 14.4',
0x2003: 'RealAudio 1 / 2 28.8',
0x2004: 'RealAudio G2 / 8 Cook (low bitrate)',
0x2005: 'RealAudio 3 / 4 / 5 Music (DNET)',
0x2006: 'RealAudio 10 AAC (RAAC)',
0x2007: 'RealAudio 10 AAC+ (RACP)',
0x3313: 'makeAVIS',
0x4143: 'Divio MPEG-4 AAC audio',
0x434C: 'LEAD Speech',
0x564C: 'LEAD Vorbis',
0x674F: 'Ogg Vorbis (mode 1)',
0x6750: 'Ogg Vorbis (mode 2)',
0x6751: 'Ogg Vorbis (mode 3)',
0x676F: 'Ogg Vorbis (mode 1+)',
0x6770: 'Ogg Vorbis (mode 2+)',
0x6771: 'Ogg Vorbis (mode 3+)',
0x7A21: 'GSM-AMR (CBR, no SID)',
0x7A22: 'GSM-AMR (VBR, including SID)',
0xDFAC: 'DebugMode SonicFoundry Vegas FrameServer ACM Codec',
0xF1AC: 'Free Lossless Audio Codec FLAC',
0xFFFE: 'Extensible wave format',
0xFFFF: 'development'
}
FOURCC = {
'1978': 'A.M.Paredes predictor (LossLess)',
'2VUY': 'Optibase VideoPump 8-bit 4:2:2 Component YCbCr',
'3IV0': 'MPEG4-based codec 3ivx',
'3IV1': '3ivx v1',
'3IV2': '3ivx v2',
'3IVD': 'FFmpeg DivX ;-) (MS MPEG-4 v3)',
'3IVX': 'MPEG4-based codec 3ivx',
'8BPS': 'Apple QuickTime Planar RGB with Alpha-channel',
'AAS4': 'Autodesk Animator codec (RLE)',
'AASC': 'Autodesk Animator',
'ABYR': 'Kensington ABYR',
'ACTL': 'Streambox ACT-L2',
'ADV1': 'Loronix WaveCodec',
'ADVJ': 'Avid M-JPEG Avid Technology Also known as AVRn',
'AEIK': 'Intel Indeo Video 3.2',
'AEMI': 'Array VideoONE MPEG1-I Capture',
'AFLC': 'Autodesk Animator FLC',
'AFLI': 'Autodesk Animator FLI',
'AHDV': 'CineForm 10-bit Visually Perfect HD',
'AJPG': '22fps JPEG-based codec for digital cameras',
'AMPG': 'Array VideoONE MPEG',
'ANIM': 'Intel RDX (ANIM)',
'AP41': 'AngelPotion Definitive',
'AP42': 'AngelPotion Definitive',
'ASLC': 'AlparySoft Lossless Codec',
'ASV1': 'Asus Video v1',
'ASV2': 'Asus Video v2',
'ASVX': 'Asus Video 2.0 (audio)',
'ATM4': 'Ahead Nero Digital MPEG-4 Codec',
'AUR2': 'Aura 2 Codec - YUV 4:2:2',
'AURA': 'Aura 1 Codec - YUV 4:1:1',
'AV1X': 'Avid 1:1x (Quick Time)',
'AVC1': 'H.264 AVC',
'AVD1': 'Avid DV (Quick Time)',
'AVDJ': 'Avid Meridien JFIF with Alpha-channel',
'AVDN': 'Avid DNxHD (Quick Time)',
'AVDV': 'Avid DV',
'AVI1': 'MainConcept Motion JPEG Codec',
'AVI2': 'MainConcept Motion JPEG Codec',
'AVID': 'Avid Motion JPEG',
'AVIS': 'Wrapper for AviSynth',
'AVMP': 'Avid IMX (Quick Time)',
'AVR ': 'Avid ABVB/NuVista MJPEG with Alpha-channel',
'AVRN': 'Avid Motion JPEG',
'AVUI': 'Avid Meridien Uncompressed with Alpha-channel',
'AVUP': 'Avid 10bit Packed (Quick Time)',
'AYUV': '4:4:4 YUV (AYUV)',
'AZPR': 'Quicktime Apple Video',
'AZRP': 'Quicktime Apple Video',
'BGR ': 'Uncompressed BGR32 8:8:8:8',
'BGR(15)': 'Uncompressed BGR15 5:5:5',
'BGR(16)': 'Uncompressed BGR16 5:6:5',
'BGR(24)': 'Uncompressed BGR24 8:8:8',
'BHIV': 'BeHere iVideo',
'BINK': 'RAD Game Tools Bink Video',
'BIT ': 'BI_BITFIELDS (Raw RGB)',
'BITM': 'Microsoft H.261',
'BLOX': 'Jan Jezabek BLOX MPEG Codec',
'BLZ0': 'DivX for Blizzard Decoder Filter',
'BT20': 'Conexant Prosumer Video',
'BTCV': 'Conexant Composite Video Codec',
'BTVC': 'Conexant Composite Video',
'BW00': 'BergWave (Wavelet)',
'BW10': 'Data Translation Broadway MPEG Capture',
'BXBG': 'BOXX BGR',
'BXRG': 'BOXX RGB',
'BXY2': 'BOXX 10-bit YUV',
'BXYV': 'BOXX YUV',
'CC12': 'Intel YUV12',
'CDV5': 'Canopus SD50/DVHD',
'CDVC': 'Canopus DV',
'CDVH': 'Canopus SD50/DVHD',
'CFCC': 'Digital Processing Systems DPS Perception',
'CFHD': 'CineForm 10-bit Visually Perfect HD',
'CGDI': 'Microsoft Office 97 Camcorder Video',
'CHAM': 'Winnov Caviara Champagne',
'CJPG': 'Creative WebCam JPEG',
'CLJR': 'Cirrus Logic YUV 4 pixels',
'CLLC': 'Canopus LossLess',
'CLPL': 'YV12',
'CMYK': 'Common Data Format in Printing',
'COL0': 'FFmpeg DivX ;-) (MS MPEG-4 v3)',
'COL1': 'FFmpeg DivX ;-) (MS MPEG-4 v3)',
'CPLA': 'Weitek 4:2:0 YUV Planar',
'CRAM': 'Microsoft Video 1 (CRAM)',
'CSCD': 'RenderSoft CamStudio lossless Codec',
'CTRX': 'Citrix Scalable Video Codec',
'CUVC': 'Canopus HQ',
'CVID': 'Radius Cinepak',
'CWLT': 'Microsoft Color WLT DIB',
'CYUV': 'Creative Labs YUV',
'CYUY': 'ATI YUV',
'D261': 'H.261',
'D263': 'H.263',
'DAVC': 'Dicas MPEGable H.264/MPEG-4 AVC base profile codec',
'DC25': 'MainConcept ProDV Codec',
'DCAP': 'Pinnacle DV25 Codec',
'DCL1': 'Data Connection Conferencing Codec',
'DCT0': 'WniWni Codec',
'DFSC': 'DebugMode FrameServer VFW Codec',
'DIB ': 'Full Frames (Uncompressed)',
'DIV1': 'FFmpeg-4 V1 (hacked MS MPEG-4 V1)',
'DIV2': 'MS MPEG-4 V2',
'DIV3': 'DivX v3 MPEG-4 Low-Motion',
'DIV4': 'DivX v3 MPEG-4 Fast-Motion',
'DIV5': 'DIV5',
'DIV6': 'DivX MPEG-4',
'DIVX': 'DivX',
'DM4V': 'Dicas MPEGable MPEG-4',
'DMB1': 'Matrox Rainbow Runner hardware MJPEG',
'DMB2': 'Paradigm MJPEG',
'DMK2': 'ViewSonic V36 PDA Video',
'DP02': 'DynaPel MPEG-4',
'DPS0': 'DPS Reality Motion JPEG',
'DPSC': 'DPS PAR Motion JPEG',
'DRWX': 'Pinnacle DV25 Codec',
'DSVD': 'DSVD',
'DTMT': 'Media-100 Codec',
'DTNT': 'Media-100 Codec',
'DUCK': 'Duck True Motion 1.0',
'DV10': 'BlueFish444 (lossless RGBA, YUV 10-bit)',
'DV25': 'Matrox DVCPRO codec',
'DV50': 'Matrox DVCPRO50 codec',
'DVAN': 'DVAN',
'DVC ': 'Apple QuickTime DV (DVCPRO NTSC)',
'DVCP': 'Apple QuickTime DV (DVCPRO PAL)',
'DVCS': 'MainConcept DV Codec',
'DVE2': 'InSoft DVE-2 Videoconferencing',
'DVH1': 'Pinnacle DVHD100',
'DVHD': 'DV 1125 lines at 30.00 Hz or 1250 lines at 25.00 Hz',
'DVIS': 'VSYNC DualMoon Iris DV codec',
'DVL ': 'Radius SoftDV 16:9 NTSC',
'DVLP': 'Radius SoftDV 16:9 PAL',
'DVMA': 'Darim Vision DVMPEG',
'DVOR': 'BlueFish444 (lossless RGBA, YUV 10-bit)',
'DVPN': 'Apple QuickTime DV (DV NTSC)',
'DVPP': 'Apple QuickTime DV (DV PAL)',
'DVR1': 'TARGA2000 Codec',
'DVRS': 'VSYNC DualMoon Iris DV codec',
'DVSD': 'DV',
'DVSL': 'DV compressed in SD (SDL)',
'DVX1': 'DVX1000SP Video Decoder',
'DVX2': 'DVX2000S Video Decoder',
'DVX3': 'DVX3000S Video Decoder',
'DX50': 'DivX v5',
'DXGM': 'Electronic Arts Game Video codec',
'DXSB': 'DivX Subtitles Codec',
'DXT1': 'Microsoft DirectX Compressed Texture (DXT1)',
'DXT2': 'Microsoft DirectX Compressed Texture (DXT2)',
'DXT3': 'Microsoft DirectX Compressed Texture (DXT3)',
'DXT4': 'Microsoft DirectX Compressed Texture (DXT4)',
'DXT5': 'Microsoft DirectX Compressed Texture (DXT5)',
'DXTC': 'Microsoft DirectX Compressed Texture (DXTC)',
'DXTN': 'Microsoft DirectX Compressed Texture (DXTn)',
'EKQ0': 'Elsa EKQ0',
'ELK0': 'Elsa ELK0',
'EM2V': 'Etymonix MPEG-2 I-frame',
'EQK0': 'Elsa graphics card quick codec',
'ESCP': 'Eidos Escape',
'ETV1': 'eTreppid Video ETV1',
'ETV2': 'eTreppid Video ETV2',
'ETVC': 'eTreppid Video ETVC',
'FFDS': 'FFDShow supported',
'FFV1': 'FFDShow supported',
'FFVH': 'FFVH codec',
'FLIC': 'Autodesk FLI/FLC Animation',
'FLJP': 'D-Vision Field Encoded Motion JPEG',
'FLV1': 'FLV1 codec',
'FMJP': 'D-Vision fieldbased ISO MJPEG',
'FRLE': 'SoftLab-NSK Y16 + Alpha RLE',
'FRWA': 'SoftLab-Nsk Forward Motion JPEG w/ alpha channel',
'FRWD': 'SoftLab-Nsk Forward Motion JPEG',
'FRWT': 'SoftLab-NSK Vision Forward Motion JPEG with Alpha-channel',
'FRWU': 'SoftLab-NSK Vision Forward Uncompressed',
'FVF1': 'Iterated Systems Fractal Video Frame',
'FVFW': 'ff MPEG-4 based on XviD codec',
'GEPJ': 'White Pine (ex Paradigm Matrix) Motion JPEG Codec',
'GJPG': 'Grand Tech GT891x Codec',
'GLCC': 'GigaLink AV Capture codec',
'GLZW': 'Motion LZW',
'GPEG': 'Motion JPEG',
'GPJM': 'Pinnacle ReelTime MJPEG Codec',
'GREY': 'Apparently a duplicate of Y800',
'GWLT': 'Microsoft Greyscale WLT DIB',
'H260': 'H.260',
'H261': 'H.261',
'H262': 'H.262',
'H263': 'H.263',
'H264': 'H.264 AVC',
'H265': 'H.265 HEVC',
'H266': 'H.266',
'H267': 'H.267',
'H268': 'H.268',
'H269': 'H.269',
'HD10': 'BlueFish444 (lossless RGBA, YUV 10-bit)',
'HDX4': 'Jomigo HDX4',
'HEVC': 'H.265 HEVC',
'HFYU': 'Huffman Lossless Codec',
'HMCR': 'Rendition Motion Compensation Format (HMCR)',
'HMRR': 'Rendition Motion Compensation Format (HMRR)',
'I263': 'Intel ITU H.263 Videoconferencing (i263)',
'I420': 'Intel Indeo 4',
'IAN ': 'Intel RDX',
'ICLB': 'InSoft CellB Videoconferencing',
'IDM0': 'IDM Motion Wavelets 2.0',
'IF09': 'Microsoft H.261',
'IGOR': 'Power DVD',
'IJPG': 'Intergraph JPEG',
'ILVC': 'Intel Layered Video',
'ILVR': 'ITU-T H.263+',
'IMC1': 'IMC1',
'IMC2': 'IMC2',
'IMC3': 'IMC3',
'IMC4': 'IMC4',
'IMJG': 'Accom SphereOUS MJPEG with Alpha-channel',
'IPDV': 'I-O Data Device Giga AVI DV Codec',
'IPJ2': 'Image Power JPEG2000',
'IR21': 'Intel Indeo 2.1',
'IRAW': 'Intel YUV Uncompressed',
'IUYV': 'Interlaced version of UYVY (line order 0,2,4 then 1,3,5 etc)',
'IV30': 'Ligos Indeo 3.0',
'IV31': 'Ligos Indeo 3.1',
'IV32': 'Ligos Indeo 3.2',
'IV33': 'Ligos Indeo 3.3',
'IV34': 'Ligos Indeo 3.4',
'IV35': 'Ligos Indeo 3.5',
'IV36': 'Ligos Indeo 3.6',
'IV37': 'Ligos Indeo 3.7',
'IV38': 'Ligos Indeo 3.8',
'IV39': 'Ligos Indeo 3.9',
'IV40': 'Ligos Indeo Interactive 4.0',
'IV41': 'Ligos Indeo Interactive 4.1',
'IV42': 'Ligos Indeo Interactive 4.2',
'IV43': 'Ligos Indeo Interactive 4.3',
'IV44': 'Ligos Indeo Interactive 4.4',
'IV45': 'Ligos Indeo Interactive 4.5',
'IV46': 'Ligos Indeo Interactive 4.6',
'IV47': 'Ligos Indeo Interactive 4.7',
'IV48': 'Ligos Indeo Interactive 4.8',
'IV49': 'Ligos Indeo Interactive 4.9',
'IV50': 'Ligos Indeo Interactive 5.0',
'IY41': 'Interlaced version of Y41P (line order 0,2,4,...,1,3,5...)',
'IYU1': '12 bit format used in mode 2 of the IEEE 1394 Digital Camera 1.04 spec',
'IYU2': '24 bit format used in mode 2 of the IEEE 1394 Digital Camera 1.04 spec',
'IYUV': 'Intel Indeo iYUV 4:2:0',
'JBYR': 'Kensington JBYR',
'JFIF': 'Motion JPEG (FFmpeg)',
'JPEG': 'Still Image JPEG DIB',
'JPG ': 'JPEG compressed',
'JPGL': 'Webcam JPEG Light',
'KMVC': 'Karl Morton\'s Video Codec',
'KPCD': 'Kodak Photo CD',
'L261': 'Lead Technologies H.261',
'L263': 'Lead Technologies H.263',
'LAGS': 'Lagarith LossLess',
'LBYR': 'Creative WebCam codec',
'LCMW': 'Lead Technologies Motion CMW Codec',
'LCW2': 'LEADTools MCMW 9Motion Wavelet)',
'LEAD': 'LEAD Video Codec',
'LGRY': 'Lead Technologies Grayscale Image',
'LJ2K': 'LEADTools JPEG2000',
'LJPG': 'LEAD MJPEG Codec',
'LMP2': 'LEADTools MPEG2',
'LOCO': 'LOCO Lossless Codec',
'LSCR': 'LEAD Screen Capture',
'LSVM': 'Vianet Lighting Strike Vmail (Streaming)',
'LZO1': 'LZO compressed (lossless codec)',
'M261': 'Microsoft H.261',
'M263': 'Microsoft H.263',
'M4CC': 'ESS MPEG4 Divio codec',
'M4S2': 'Microsoft MPEG-4 (M4S2)',
'MC12': 'ATI Motion Compensation Format (MC12)',
'MC24': 'MainConcept Motion JPEG Codec',
'MCAM': 'ATI Motion Compensation Format (MCAM)',
'MCZM': 'Theory MicroCosm Lossless 64bit RGB with Alpha-channel',
'MDVD': 'Alex MicroDVD Video (hacked MS MPEG-4)',
'MDVF': 'Pinnacle DV/DV50/DVHD100',
'MHFY': 'A.M.Paredes mhuffyYUV (LossLess)',
'MJ2C': 'Morgan Multimedia Motion JPEG2000',
'MJPA': 'Pinnacle ReelTime MJPG hardware codec',
'MJPB': 'Motion JPEG codec',
'MJPG': 'Motion JPEG DIB',
'MJPX': 'Pegasus PICVideo Motion JPEG',
'MMES': 'Matrox MPEG-2 I-frame',
'MNVD': 'MindBend MindVid LossLess',
'MP2A': 'MPEG-2 Audio',
'MP2T': 'MPEG-2 Transport Stream',
'MP2V': 'MPEG-2 Video',
'MP41': 'Microsoft MPEG-4 V1 (enhansed H263)',
'MP42': 'Microsoft MPEG-4 (low-motion)',
'MP43': 'Microsoft MPEG-4 (fast-motion)',
'MP4A': 'MPEG-4 Audio',
'MP4S': 'Microsoft MPEG-4 (MP4S)',
'MP4T': 'MPEG-4 Transport Stream',
'MP4V': 'Apple QuickTime MPEG-4 native',
'MPEG': 'MPEG-1',
'MPG1': 'FFmpeg-1',
'MPG2': 'FFmpeg-1',
'MPG3': 'Same as Low motion DivX MPEG-4',
'MPG4': 'Microsoft MPEG-4 Video High Speed Compressor',
'MPGI': 'Sigma Designs MPEG',
'MPNG': 'Motion PNG codec',
'MRCA': 'Martin Regen Codec',
'MRLE': 'Run Length Encoding',
'MSS1': 'Windows Screen Video',
'MSS2': 'Windows Media 9',
'MSUC': 'MSU LossLess',
'MSVC': 'Microsoft Video 1',
'MSZH': 'Lossless codec (ZIP compression)',
'MTGA': 'Motion TGA images (24, 32 bpp)',
'MTX1': 'Matrox MTX1',
'MTX2': 'Matrox MTX2',
'MTX3': 'Matrox MTX3',
'MTX4': 'Matrox MTX4',
'MTX5': 'Matrox MTX5',
'MTX6': 'Matrox MTX6',
'MTX7': 'Matrox MTX7',
'MTX8': 'Matrox MTX8',
'MTX9': 'Matrox MTX9',
'MV12': 'MV12',
'MVI1': 'Motion Pixels MVI',
'MVI2': 'Motion Pixels MVI',
'MWV1': 'Aware Motion Wavelets',
'MYUV': 'Media-100 844/X Uncompressed',
'NAVI': 'nAVI',
'NDIG': 'Ahead Nero Digital MPEG-4 Codec',
'NHVU': 'NVidia Texture Format (GEForce 3)',
'NO16': 'Theory None16 64bit uncompressed RAW',
'NT00': 'NewTek LigtWave HDTV YUV with Alpha-channel',
'NTN1': 'Nogatech Video Compression 1',
'NTN2': 'Nogatech Video Compression 2 (GrabBee hardware coder)',
'NUV1': 'NuppelVideo',
'NV12': '8-bit Y plane followed by an interleaved U/V plane with 2x2 subsampling',
'NV21': 'As NV12 with U and V reversed in the interleaved plane',
'NVDS': 'nVidia Texture Format',
'NVHS': 'NVidia Texture Format (GEForce 3)',
'NVS0': 'nVidia GeForce Texture',
'NVS1': 'nVidia GeForce Texture',
'NVS2': 'nVidia GeForce Texture',
'NVS3': 'nVidia GeForce Texture',
'NVS4': 'nVidia GeForce Texture',
'NVS5': 'nVidia GeForce Texture',
'NVT0': 'nVidia GeForce Texture',
'NVT1': 'nVidia GeForce Texture',
'NVT2': 'nVidia GeForce Texture',
'NVT3': 'nVidia GeForce Texture',
'NVT4': 'nVidia GeForce Texture',
'NVT5': 'nVidia GeForce Texture',
'PDVC': 'I-O Data Device Digital Video Capture DV codec',
'PGVV': 'Radius Video Vision',
'PHMO': 'IBM Photomotion',
'PIM1': 'Pegasus Imaging',
'PIM2': 'Pegasus Imaging',
'PIMJ': 'Pegasus Imaging Lossless JPEG',
'PIXL': 'MiroVideo XL (Motion JPEG)',
'PNG ': 'Apple PNG',
'PNG1': 'Corecodec.org CorePNG Codec',
'PVEZ': 'Horizons Technology PowerEZ',
'PVMM': 'PacketVideo Corporation MPEG-4',
'PVW2': 'Pegasus Imaging Wavelet Compression',
'PVWV': 'Pegasus Imaging Wavelet 2000',
'PXLT': 'Apple Pixlet (Wavelet)',
'Q1.0': 'Q-Team QPEG 1.0 (www.q-team.de)',
'Q1.1': 'Q-Team QPEG 1.1 (www.q-team.de)',
'QDGX': 'Apple QuickDraw GX',
'QPEG': 'Q-Team QPEG 1.0',
'QPEQ': 'Q-Team QPEG 1.1',
'R210': 'BlackMagic YUV (Quick Time)',
'R411': 'Radius DV NTSC YUV',
'R420': 'Radius DV PAL YUV',
'RAVI': 'GroupTRON ReferenceAVI codec (dummy for MPEG compressor)',
'RAV_': 'GroupTRON ReferenceAVI codec (dummy for MPEG compressor)',
'RAW ': 'Full Frames (Uncompressed)',
'RGB ': 'Full Frames (Uncompressed)',
'RGB(15)': 'Uncompressed RGB15 5:5:5',
'RGB(16)': 'Uncompressed RGB16 5:6:5',
'RGB(24)': 'Uncompressed RGB24 8:8:8',
'RGB1': 'Uncompressed RGB332 3:3:2',
'RGBA': 'Raw RGB with alpha',
'RGBO': 'Uncompressed RGB555 5:5:5',
'RGBP': 'Uncompressed RGB565 5:6:5',
'RGBQ': 'Uncompressed RGB555X 5:5:5 BE',
'RGBR': 'Uncompressed RGB565X 5:6:5 BE',
'RGBT': 'Computer Concepts 32-bit support',
'RL4 ': 'RLE 4bpp RGB',
'RL8 ': 'RLE 8bpp RGB',
'RLE ': 'Microsoft Run Length Encoder',
'RLE4': 'Run Length Encoded 4',
'RLE8': 'Run Length Encoded 8',
'RMP4': 'REALmagic MPEG-4 Video Codec',
'ROQV': 'Id RoQ File Video Decoder',
'RPZA': 'Apple Video 16 bit "road pizza"',
'RT21': 'Intel Real Time Video 2.1',
'RTV0': 'NewTek VideoToaster',
'RUD0': 'Rududu video codec',
'RV10': 'RealVideo codec',
'RV13': 'RealVideo codec',
'RV20': 'RealVideo G2',
'RV30': 'RealVideo 8',
'RV40': 'RealVideo 9',
'RVX ': 'Intel RDX (RVX )',
'S263': 'Sorenson Vision H.263',
'S422': 'Tekram VideoCap C210 YUV 4:2:2',
'SAMR': 'Adaptive Multi-Rate (AMR) audio codec',
'SAN3': 'MPEG-4 codec (direct copy of DivX 3.11a)',
'SDCC': 'Sun Communication Digital Camera Codec',
'SEDG': 'Samsung MPEG-4 codec',
'SFMC': 'CrystalNet Surface Fitting Method',
'SHR0': 'BitJazz SheerVideo',
'SHR1': 'BitJazz SheerVideo',
'SHR2': 'BitJazz SheerVideo',
'SHR3': 'BitJazz SheerVideo',
'SHR4': 'BitJazz SheerVideo',
'SHR5': 'BitJazz SheerVideo',
'SHR6': 'BitJazz SheerVideo',
'SHR7': 'BitJazz SheerVideo',
'SJPG': 'CUseeMe Networks Codec',
'SL25': 'SoftLab-NSK DVCPRO',
'SL50': 'SoftLab-NSK DVCPRO50',
'SLDV': 'SoftLab-NSK Forward DV Draw codec',
'SLIF': 'SoftLab-NSK MPEG2 I-frames',
'SLMJ': 'SoftLab-NSK Forward MJPEG',
'SMC ': 'Apple Graphics (SMC) codec (256 color)',
'SMSC': 'Radius SMSC',
'SMSD': 'Radius SMSD',
'SMSV': 'WorldConnect Wavelet Video',
'SNOW': 'SNOW codec',
'SP40': 'SunPlus YUV',
'SP44': 'SunPlus Aiptek MegaCam Codec',
'SP53': 'SunPlus Aiptek MegaCam Codec',
'SP54': 'SunPlus Aiptek MegaCam Codec',
'SP55': 'SunPlus Aiptek MegaCam Codec',
'SP56': 'SunPlus Aiptek MegaCam Codec',
'SP57': 'SunPlus Aiptek MegaCam Codec',
'SP58': 'SunPlus Aiptek MegaCam Codec',
'SPIG': 'Radius Spigot',
'SPLC': 'Splash Studios ACM Audio Codec',
'SPRK': 'Sorenson Spark',
'SQZ2': 'Microsoft VXTreme Video Codec V2',
'STVA': 'ST CMOS Imager Data (Bayer)',
'STVB': 'ST CMOS Imager Data (Nudged Bayer)',
'STVC': 'ST CMOS Imager Data (Bunched)',
'STVX': 'ST CMOS Imager Data (Extended CODEC Data Format)',
'STVY': 'ST CMOS Imager Data (Extended CODEC Data Format with Correction Data)',
'SV10': 'Sorenson Video R1',
'SVQ1': 'Sorenson Video R3',
'SVQ3': 'Sorenson Video 3 (Apple Quicktime 5)',
'SWC1': 'MainConcept Motion JPEG Codec',
'T420': 'Toshiba YUV 4:2:0',
'TGA ': 'Apple TGA (with Alpha-channel)',
'THEO': 'FFVFW Supported Codec',
'TIFF': 'Apple TIFF (with Alpha-channel)',
'TIM2': 'Pinnacle RAL DVI',
'TLMS': 'TeraLogic Motion Intraframe Codec (TLMS)',
'TLST': 'TeraLogic Motion Intraframe Codec (TLST)',
'TM20': 'Duck TrueMotion 2.0',
'TM2A': 'Duck TrueMotion Archiver 2.0',
'TM2X': 'Duck TrueMotion 2X',
'TMIC': 'TeraLogic Motion Intraframe Codec (TMIC)',
'TMOT': 'Horizons Technology TrueMotion S',
'TR20': 'Duck TrueMotion RealTime 2.0',
'TRLE': 'Akula Alpha Pro Custom AVI (LossLess)',
'TSCC': 'TechSmith Screen Capture Codec',
'TV10': 'Tecomac Low-Bit Rate Codec',
'TVJP': 'TrueVision Field Encoded Motion JPEG',
'TVMJ': 'Truevision TARGA MJPEG Hardware Codec',
'TY0N': 'Trident TY0N',
'TY2C': 'Trident TY2C',
'TY2N': 'Trident TY2N',
'U263': 'UB Video StreamForce H.263',
'U<Y ': 'Discreet UC YUV 4:2:2:4 10 bit',
'U<YA': 'Discreet UC YUV 4:2:2:4 10 bit (with Alpha-channel)',
'UCOD': 'eMajix.com ClearVideo',
'ULTI': 'IBM Ultimotion',
'UMP4': 'UB Video MPEG 4',
'UYNV': 'UYVY',
'UYVP': 'YCbCr 4:2:2',
'UYVU': 'SoftLab-NSK Forward YUV codec',
'UYVY': 'UYVY 4:2:2 byte ordering',
'V210': 'Optibase VideoPump 10-bit 4:2:2 Component YCbCr',
'V261': 'Lucent VX2000S',
'V422': '24 bit YUV 4:2:2 Format',
'V655': '16 bit YUV 4:2:2 Format',
'VBLE': 'MarcFD VBLE Lossless Codec',
'VCR1': 'ATI VCR 1.0',
'VCR2': 'ATI VCR 2.0',
'VCR3': 'ATI VCR 3.0',
'VCR4': 'ATI VCR 4.0',
'VCR5': 'ATI VCR 5.0',
'VCR6': 'ATI VCR 6.0',
'VCR7': 'ATI VCR 7.0',
'VCR8': 'ATI VCR 8.0',
'VCR9': 'ATI VCR 9.0',
'VDCT': 'Video Maker Pro DIB',
'VDOM': 'VDOnet VDOWave',
'VDOW': 'VDOnet VDOLive (H.263)',
'VDST': 'VirtualDub remote frameclient ICM driver',
'VDTZ': 'Darim Vison VideoTizer YUV',
'VGPX': 'VGPixel Codec',
'VIDM': 'DivX 5.0 Pro Supported Codec',
'VIDS': 'YUV 4:2:2 CCIR 601 for V422',
'VIFP': 'VIFP',
'VIV1': 'Vivo H.263',
'VIV2': 'Vivo H.263',
'VIVO': 'Vivo H.263 v2.00',
'VIXL': 'Miro Video XL',
'VLV1': 'Videologic VLCAP.DRV',
'VP30': 'On2 VP3.0',
'VP31': 'On2 VP3.1',
'VP40': 'On2 TrueCast VP4',
'VP50': 'On2 TrueCast VP5',
'VP60': 'On2 TrueCast VP6',
'VP61': 'On2 TrueCast VP6.1',
'VP62': 'On2 TrueCast VP6.2',
'VP70': 'On2 TrueMotion VP7',
'VQC1': 'Vector-quantised codec 1',
'VQC2': 'Vector-quantised codec 2',
'VR21': 'BlackMagic YUV (Quick Time)',
'VSSH': 'Vanguard VSS H.264',
'VSSV': 'Vanguard Software Solutions Video Codec',
'VSSW': 'Vanguard VSS H.264',
'VTLP': 'Alaris VideoGramPixel Codec',
'VX1K': 'VX1000S Video Codec',
'VX2K': 'VX2000S Video Codec',
'VXSP': 'VX1000SP Video Codec',
'VYU9': 'ATI Technologies YUV',
'VYUY': 'ATI Packed YUV Data',
'WBVC': 'Winbond W9960',
'WHAM': 'Microsoft Video 1 (WHAM)',
'WINX': 'Winnov Software Compression',
'WJPG': 'AverMedia Winbond JPEG',
'WMV1': 'Windows Media Video V7',
'WMV2': 'Windows Media Video V8',
'WMV3': 'Windows Media Video V9',
'WMVA': 'WMVA codec',
'WMVP': 'Windows Media Video V9',
'WNIX': 'WniWni Codec',
'WNV1': 'Winnov Hardware Compression',
'WNVA': 'Winnov hw compress',
'WRLE': 'Apple QuickTime BMP Codec',
'WRPR': 'VideoTools VideoServer Client Codec',
'WV1F': 'WV1F codec',
'WVLT': 'IllusionHope Wavelet 9/7',
'WVP2': 'WVP2 codec',
'X263': 'Xirlink H.263',
'X264': 'XiWave GNU GPL x264 MPEG-4 Codec',
'X265': 'H.265 HEVC',
'XLV0': 'NetXL Video Decoder',
'XMPG': 'Xing MPEG (I-Frame only)',
'XVID': 'XviD MPEG-4',
'XVIX': 'Based on XviD MPEG-4 codec',
'XWV0': 'XiWave Video Codec',
'XWV1': 'XiWave Video Codec',
'XWV2': 'XiWave Video Codec',
'XWV3': 'XiWave Video Codec (Xi-3 Video)',
'XWV4': 'XiWave Video Codec',
'XWV5': 'XiWave Video Codec',
'XWV6': 'XiWave Video Codec',
'XWV7': 'XiWave Video Codec',
'XWV8': 'XiWave Video Codec',
'XWV9': 'XiWave Video Codec',
'XXAN': 'XXAN',
'XYZP': 'Extended PAL format XYZ palette',
'Y211': 'YUV 2:1:1 Packed',
'Y216': 'Pinnacle TARGA CineWave YUV (Quick Time)',
'Y411': 'YUV 4:1:1 Packed',
'Y41B': 'YUV 4:1:1 Planar',
'Y41P': 'PC1 4:1:1',
'Y41T': 'PC1 4:1:1 with transparency',
'Y422': 'Y422',
'Y42B': 'YUV 4:2:2 Planar',
'Y42T': 'PCI 4:2:2 with transparency',
'Y444': 'IYU2',
'Y8 ': 'Grayscale video',
'Y800': 'Simple grayscale video',
'YC12': 'Intel YUV12 Codec',
'YMPG': 'YMPEG Alpha',
'YU12': 'ATI YV12 4:2:0 Planar',
'YU92': 'Intel - YUV',
'YUNV': 'YUNV',
'YUV2': 'Apple Component Video (YUV 4:2:2)',
'YUV8': 'Winnov Caviar YUV8',
'YUV9': 'Intel YUV9',
'YUVP': 'YCbCr 4:2:2',
'YUY2': 'Uncompressed YUV 4:2:2',
'YUYV': 'Canopus YUV',
'YV12': 'YVU12 Planar',
'YV16': 'Elecard YUV 4:2:2 Planar',
'YV92': 'Intel Smart Video Recorder YVU9',
'YVU9': 'Intel YVU9 Planar',
'YVYU': 'YVYU 4:2:2 byte ordering',
'ZLIB': 'ZLIB',
'ZPEG': 'Metheus Video Zipper',
'ZYGO': 'ZyGo Video Codec'
}
# make it fool prove
for code, value in FOURCC.items():
if not code.upper() in FOURCC:
FOURCC[code.upper()] = value
if code.endswith(' '):
FOURCC[code.strip().upper()] = value
|
seppi91/CouchPotatoServer
|
libs/enzyme/fourcc.py
|
Python
|
gpl-3.0
| 31,592
|
[
"CRYSTAL"
] |
707d0207034a608eae0f5c09dfd202b7352f3008327f86fb676b197c31e091ff
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import warnings
import numpy as np
from pymatgen.core.structure import Specie, Structure
from pymatgen.electronic_structure.core import Magmom
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.transformations.standard_transformations import AutoOxiStateDecorationTransformation
from pymatgen.analysis.bond_valence import BVAnalyzer
from monty.serialization import loadfn
from enum import Enum, unique
import itertools
import os
"""
This module provides some useful functions for dealing with magnetic Structures
(e.g. Structures with associated magmom tags).
"""
__author__ = "Matthew Horton"
__copyright__ = "Copyright 2017, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matthew Horton"
__email__ = "mkhorton@lbl.gov"
__status__ = "Development"
__date__ = "Feb 2017"
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
DEFAULT_MAGMOMS = loadfn(os.path.join(MODULE_DIR, "default_magmoms.yaml"))
@unique
class Ordering(Enum):
FM = 'FM' # Ferromagnetic
AFM = 'AFM' # Antiferromagnetic
FiM = 'FiM' # Ferrimagnetic
NM = 'NM' # Non-magnetic
Unknown = 'Unknown'
class CollinearMagneticStructureAnalyzer:
def __init__(self, structure,
overwrite_magmom_mode="none",
round_magmoms=False,
detect_valences=False,
make_primitive=True,
default_magmoms=None,
threshold=0.1):
"""
A class which provides a few helpful methods to analyze
collinear magnetic structures.
If magnetic moments are not defined, moments will be
taken either from default_magmoms.yaml (similar to the
default magmoms in MPRelaxSet, with a few extra definitions)
or from a specie:magmom dict provided by the default_magmoms
kwarg.
Input magmoms can be replaced using the 'overwrite_magmom_mode'
kwarg. This can be "none" to do nothing, "respect_sign" which will
overwrite existing magmoms with those from default_magmoms but will
keep positive magmoms positive, negative magmoms negative and zero
magmoms zero, "respect_zeros", which will give a ferromagnetic structure
(all positive magmoms from default_magmoms) but still keep zero magmoms
as zero, or "replace_all" which will try to guess initial magmoms for
all species in the structure irrespective of input structure.
This is most suitable for an initial DFT calculation.
:param structure: Structure object
:param overwrite_magmom_mode (str): default "none"
:param round_magmoms (int): will round input magmoms to
specified number of decimal places, suggest value of 1 or False
for typical DFT calculations depending on application
:param detect_valences (bool): if True, will attempt to assign valences
to input structure
:param make_primitive (bool): if True, will transform to primitive
magnetic cell
:param default_magmoms (dict): (optional) dict specifying default magmoms
:param threshold (float): number (in Bohr magnetons) below which magmoms
will be rounded to zero, default of 0.1 can probably be increased for many
magnetic systems, depending on your application
"""
if default_magmoms:
self.default_magmoms = default_magmoms
else:
self.default_magmoms = DEFAULT_MAGMOMS
structure = structure.copy()
# check for disorder
if not structure.is_ordered:
raise NotImplementedError("Not implemented for disordered structures, "
"make ordered approximation first.")
if detect_valences:
trans = AutoOxiStateDecorationTransformation()
bva = BVAnalyzer()
try:
structure = trans.apply_transformation(structure)
except ValueError:
warnings.warn("Could not assign valences "
"for {}".format(structure.composition.reduced_formula))
# check to see if structure has magnetic moments
# on site properties or species spin properties,
# prioritize site properties
has_magmoms = bool(structure.site_properties.get('magmom', False))
has_spin = False
for comp in structure.species_and_occu:
for sp, occu in comp.items():
if getattr(sp, 'spin', False):
has_spin = True
# perform input sanitation ...
# rest of class will assume magnetic moments
# are stored on site properties:
# this is somewhat arbitrary, arguments can
# be made for both approaches
if has_magmoms and has_spin:
raise ValueError("Structure contains magnetic moments on both "
"magmom site properties and spin species "
"properties. This is ambiguous. Remove one or "
"the other.")
elif has_magmoms:
if None in structure.site_properties['magmom']:
warnings.warn("Be careful with mixing types in your magmom "
"site properties. Any 'None' magmoms have been "
"replaced with zero.")
magmoms = [m if m else 0 for m in structure.site_properties['magmom']]
elif has_spin:
magmoms = [getattr(sp, 'spin', 0) for sp
in structure.species]
structure.remove_spin()
else:
# no magmoms present, add zero magmoms for now
magmoms = [0]*len(structure)
# and overwrite magmoms with default magmoms later unless otherwise stated
if overwrite_magmom_mode == "none":
overwrite_magmom_mode = "replace_all"
# test to see if input structure has collinear magmoms
self.is_collinear = Magmom.are_collinear(magmoms)
if not self.is_collinear:
warnings.warn("This class is not designed to be used with "
"non-collinear structures. If your structure is "
"only slightly non-collinear (e.g. canted) may still "
"give useful results, but use with caution.")
# this is for collinear structures only, make sure magmoms
# are all floats
magmoms = list(map(float, magmoms))
# set properties that should be done /before/ we process input magmoms
self.total_magmoms = sum(magmoms)
self.magnetization = sum(magmoms)/structure.volume
# round magmoms below threshold to zero
magmoms = [m if abs(m) > threshold else 0 for m in magmoms]
# overwrite existing magmoms with default_magmoms
if overwrite_magmom_mode not in ("none", "respect_sign",
"respect_zeros", "replace_all"):
raise ValueError("Unsupported mode.")
for idx, site in enumerate(structure):
if site.species_string in self.default_magmoms:
# look for species first, e.g. Fe2+
default_magmom = self.default_magmoms[site.species_string]
elif isinstance(site.specie, Specie) and \
str(site.specie.element) in self.default_magmoms:
# look for element, e.g. Fe
default_magmom = self.default_magmoms[str(site.specie.element)]
else:
default_magmom = 0
# overwrite_magmom_mode = "respect_sign" will change magnitude of
# existing moments only, and keep zero magmoms as
# zero: it will keep the magnetic ordering intact
if overwrite_magmom_mode == "respect_sign":
if magmoms[idx] > 0:
magmoms[idx] = default_magmom
elif magmoms[idx] < 0:
magmoms[idx] = -default_magmom
# overwrite_magmom_mode = "respect_zeros" will give a ferromagnetic
# structure but will keep zero magmoms as zero
elif overwrite_magmom_mode == "respect_zeros":
if magmoms[idx] != 0:
magmoms[idx] = default_magmom
# overwrite_magmom_mode = "replace_all" will ignore input magmoms
# and give a ferromagnetic structure with magnetic
# moments on *all* atoms it thinks could be magnetic
elif overwrite_magmom_mode == "replace_all":
magmoms[idx] = default_magmom
# round magmoms to specified number of
# decimal places, used to smooth out
# computational data
# TODO: be a bit smarter about rounding magmoms!
if round_magmoms:
magmoms = np.around(structure.site_properties['magmom'],
decimals=round_magmoms)
structure.add_site_property(magmoms)
structure.add_site_property('magmom', magmoms)
if make_primitive:
structure = structure.get_primitive_structure(use_site_props=True)
self.structure = structure
def get_structure_with_spin(self):
"""
Returns a Structure with species decorated with spin values instead
of using magmom site properties.
:return: Structure
"""
structure = self.structure.copy()
structure.add_spin_by_site(structure.site_properties['magmom'])
structure.remove_site_property('magmom')
return structure
def get_structure_with_only_magnetic_atoms(self, make_primitive=True):
"""
Returns a Structure with only magnetic atoms present.
:return: Structure
"""
sites = [site for site in self.structure
if abs(site.properties['magmom']) > 0]
structure = Structure.from_sites(sites)
if make_primitive:
structure = structure.get_primitive_structure(use_site_props=True)
return structure
def get_nonmagnetic_structure(self, make_primitive=True):
"""
Returns a Structure without magnetic moments defined.
:param make_primitive (bool): Return a primitive
structure, defaults to True.
:return: Structure
"""
structure = self.structure.copy()
structure.remove_site_property('magmom')
if make_primitive:
structure = structure.get_primitive_structure()
return structure
def get_ferromagnetic_structure(self, make_primitive=True):
"""
Returns a Structure with all magnetic moments positive
or zero.
:param make_primitive (bool): Return a primitive
structure, defaults to True.
:return: Structure
"""
structure = self.structure.copy()
structure.add_site_property('magmom',
[abs(m) for m in self.magmoms])
if make_primitive:
structure = structure.get_primitive_structure(use_site_props=True)
return structure
@property
def magmoms(self):
"""
Convenience property, returns magmoms as a numpy array.
:return: np.array
"""
return np.array(self.structure.site_properties['magmom'])
@property
def types_of_magnetic_specie(self):
"""
Equivalent to Structure.types_of_specie but only returns
magnetic species.
:return: types of Specie
"""
structure = self.get_structure_with_only_magnetic_atoms()
return structure.types_of_specie
@property
def magnetic_species_and_magmoms(self):
"""
Returns a dict of magnetic species and the magnitude of
their associated magmoms. Implicitly assumes the magnetic
moment is the same magnitude for a given species.
:return: dict of magnetic species and magmoms
"""
# TODO: improve detection when magnitude of magmoms varies
structure = self.get_ferromagnetic_structure()
magtypes = {str(site.specie): site.properties['magmom'] for site in structure
if site.properties['magmom'] > 0}
return magtypes
@property
def number_of_magnetic_sites(self):
"""
:return (int): Number of magnetic sites present in structure.
"""
return np.sum([abs(m) > 0 for m in self.magmoms])
def number_of_unique_magnetic_sites(self, symprec=1e-3, angle_tolerance=5):
"""
:param symprec (float): same as in SpacegroupAnalyzer
:param angle_tolerance (float): same as in SpacegroupAnalyzer
:return (int): Number of symmetrically-distinct magnetic sites present
in structure.
"""
structure = self.get_nonmagnetic_structure()
sga = SpacegroupAnalyzer(structure, symprec=symprec,
angle_tolerance=angle_tolerance)
symm_structure = sga.get_symmetrized_structure()
num_unique_mag_sites = 0
for group_of_sites in symm_structure.equivalent_sites:
if group_of_sites[0].specie in self.types_of_magnetic_specie:
num_unique_mag_sites += 1
return num_unique_mag_sites
@property
def ordering(self):
"""
Applies heuristics to return a magnetic ordering for a collinear
magnetic structure. Result is not guaranteed for correctness.
:return: Ordering Enum ('FiM' is used as the abbreviation for
ferrimagnetic)
"""
if not self.is_collinear:
warnings.warn('Detecting ordering in non-collinear structures not yet implemented.')
return Ordering.Unknown
magmoms = self.magmoms
max_magmom = max(magmoms)
total_magnetization = abs(sum(magmoms))
is_potentially_ferromagnetic = np.all(magmoms >= 0) or np.all(magmoms <= 0)
if total_magnetization > 0 and is_potentially_ferromagnetic:
return Ordering.FM
elif total_magnetization > 0:
return Ordering.FiM
elif max_magmom > 0:
return Ordering.AFM
else:
return Ordering.NM
def get_exchange_group_info(self, symprec=1e-2, angle_tolerance=5.0):
"""
Returns the information on the symmetry of the Hamiltonian
describing the exchange energy of the system, taking into
account relative direction of magnetic moments but not their
absolute direction.
This is not strictly accurate (e.g. some/many atoms will
have zero magnetic moments), but defining symmetry this
way is a useful way of keeping track of distinct magnetic
orderings within pymatgen.
:param symprec: same as SpacegroupAnalyzer
:param angle_tolerance: same as SpacegroupAnalyzer
:return: spacegroup_symbol, international_number
"""
structure = self.get_structure_with_spin()
return structure.get_space_group_info(symprec=symprec,
angle_tolerance=angle_tolerance)
def matches_ordering(self, other):
"""
Compares the magnetic orderings of one structure with another.
:param other: Structure
:return (bool):
"""
a = CollinearMagneticStructureAnalyzer(self.structure,
overwrite_magmom_mode="respect_sign")\
.get_structure_with_spin()
b = CollinearMagneticStructureAnalyzer(other,
overwrite_magmom_mode="respect_sign") \
.get_structure_with_spin()
if a.matches(b): # sometimes returns None (bug?)
return True
else:
return False
@property
def propagation_vector(self):
return NotImplementedError
def __str__(self):
"""
Sorts a Structure (by fractional co-ordinate), and
prints sites with magnetic information. This is
useful over Structure.__str__ because sites are in
a consistent order, which makes visual comparison between
two identical Structures with different magnetic orderings
easier.
:return:
"""
frac_coords = self.structure.frac_coords
sorted_indices = np.lexsort((frac_coords[:, 2],
frac_coords[:, 1],
frac_coords[:, 0]))
s = Structure.from_sites([self.structure[idx] for idx in sorted_indices])
# adapted from Structure.__repr__
outs = ["Structure Summary", repr(s.lattice)]
outs.append("Magmoms Sites")
for site in s:
if site.properties['magmom'] != 0:
prefix = "{:+.2f} ".format(site.properties['magmom'])
else:
prefix = " "
outs.append(prefix+repr(site))
return "\n".join(outs)
|
Bismarrck/pymatgen
|
pymatgen/analysis/magnetism/analyzer.py
|
Python
|
mit
| 17,193
|
[
"pymatgen"
] |
4f4bf31bec0e27100d0fcbab5830a1bf4d187befe44cde789c811297a6b8b99b
|
import pytest
from django.contrib.auth.models import User
from graphapi.tests.utils import populate_db
from openstates.data.models import Person
from profiles.models import Subscription
COMPLEX_STR = (
"Bills matching 'topic' from AK, upper chamber, "
"classified as bill, including subjects 'MOOSE, WILDLIFE', "
"status includes 'passed_lower', sponsored by Amanda Adams"
)
@pytest.mark.django_db
def setup():
populate_db()
def _one_of_each():
user = User.objects.create(username="testuser")
bs = Subscription(user=user, bill_id="ocd-bill/1")
qs = Subscription(user=user, query="topic", state="ak")
ss = Subscription(user=user, sponsor=Person.objects.get(name="Amanda Adams"))
return bs, qs, ss
@pytest.mark.django_db
def test_subscription_type():
bs, qs, ss = _one_of_each()
assert bs.subscription_type == "bill"
assert qs.subscription_type == "query"
assert ss.subscription_type == "sponsor"
@pytest.mark.django_db
def test_subscription_pretty():
bs, qs, ss = _one_of_each()
assert bs.pretty == "Updates on HB 1 in Alaska 2018"
assert qs.pretty == "Bills matching 'topic' from AK"
assert ss.pretty == "Bills sponsored by Amanda Adams"
@pytest.mark.django_db
def test_complex_pretty():
cs = Subscription(
query="topic",
state="ak",
chamber="upper",
classification="bill",
subjects=["MOOSE", "WILDLIFE"],
status=["passed_lower"],
sponsor=Person.objects.get(name="Amanda Adams"),
)
assert cs.pretty == COMPLEX_STR
@pytest.mark.django_db
def test_subscription_site_url():
bs, qs, ss = _one_of_each()
assert bs.site_url == "/ak/bills/2018/HB1/"
assert qs.site_url == "/ak/bills/?query=topic"
assert ss.site_url.startswith("/person/amanda-adams")
qs.state = None
assert qs.site_url == "/search/?query=topic"
|
openstates/openstates.org
|
profiles/tests/test_models.py
|
Python
|
mit
| 1,878
|
[
"MOOSE"
] |
78c312201877001bbc803141170fd821520d93dbc9f78d5db7889e0d3146ac45
|
# Copyright (C) 2017, Jaguar Land Rover
#
# This program is licensed under the terms and conditions of the
# Mozilla Public License, version 2.0. The full text of the
# Mozilla Public License is at https://www.mozilla.org/MPL/2.0/
#
import inspect
import importlib
class LoaderError(Exception):
"""Base exception for all plugin loader errors."""
pass
def _load_module(modulename):
try:
return importlib.import_module(modulename)
except ImportError:
raise LoaderError("error loading module: {}".format(modulename))
def _method_exists(module, method):
return hasattr(module, method) and inspect.isroutine(getattr(module, method))
def load_plugin(modulename):
"""
This method will load plugin 'modulename'.
It will override the send and receive functions in the caller module.
"""
# Load plugin module.
module = _load_module(modulename)
# Inspect caller module.
caller_info = inspect.stack()[1]
caller_module = inspect.getmodule(caller_info[0])
# Override receive/send functions.
if _method_exists(caller_module, 'receive') and \
_method_exists(module, 'receive'):
caller_module.receive = module.receive
else:
raise LoaderError("error: missing 'receive' method")
if _method_exists(caller_module, 'send') and \
_method_exists(module, 'send'):
caller_module.send = module.send
else:
raise LoaderError("error: missing 'send' method")
if _method_exists(module, 'connect'):
module.connect()
|
shanefagan/vehicle_signal_manager
|
ipc/loader.py
|
Python
|
mpl-2.0
| 1,553
|
[
"Jaguar"
] |
9cbe420d762edd5292e33ac4982fda090990e249ccbd32604d11a531d9699e9c
|
# FILE COPIED FROM conf.orig.py; DO NOT CHANGE
# -*- coding: utf-8 -*-
# Copyright 2010, 2014-2015 VPAC
#
# Karaage documentation build configuration file, created by
# sphinx-quickstart on Thu Jan 16 14:28:57 2014.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# -- General configuration ----------------------------------------------------
exec(open("../conf.py", "rb").read())
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Karaage'
copyright = '2014, VPAC'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Karaage-admin'
# -- Options for LaTeX output -------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author,
# documentclass [howto/manual]).
latex_documents = [
('index', 'Karaage.tex', 'Karaage Admin Documentation',
'Brian May', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'karaage',
'Karaage Admin Documentation', [u'Brian May'], 8),
('ref/cmd/kg-manage', 'kg-manage',
'Management for Karaage', [u'Brian May'], 8),
('ref/cmd/kg-set-secret-key', 'kg_set_secret_key',
'Set secret key for Karaage', [u'Brian May'], 8),
('ref/cmd/kg-migrate-south', 'kg-migrate-south',
'Run South migrations for Karaage', [u'Brian May'], 8),
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Karaage', 'Karaage Admin Documentation',
'Brian May', 'Karaage',
'Karaage is a cluster account management tool.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# -- Options for Epub output --------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = 'Karaage Admin Documentation'
epub_author = 'Brian May'
epub_publisher = 'Brian May'
epub_copyright = '2014, Brian May'
# The language of the text. It defaults to the language option
# or en if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
# epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
# epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
Karaage-Cluster/karaage-debian
|
docs/admin/conf.py
|
Python
|
gpl-3.0
| 9,316
|
[
"Brian"
] |
613d252646b29d0b542fafc3ab11d8c1478a3be8b76c319ee2c49622fa3aa7ee
|
"""Guess the MIME type of a file.
This module defines two useful functions:
guess_type(url) -- guess the MIME type and encoding of a URL.
guess_extension(type) -- guess the extension for a given MIME type.
It also contains the following, for tuning the behavior:
Data:
knownfiles -- list of files to parse
inited -- flag set when init() has been called
suffixes_map -- dictionary mapping suffixes to suffixes
encodings_map -- dictionary mapping suffixes to encodings
types_map -- dictionary mapping suffixes to types
Functions:
init([files]) -- parse a list of files, default knownfiles
read_mime_types(file) -- parse one file, return a dictionary or None
"""
import posixpath
import urllib
__all__ = ["guess_type","guess_extension","read_mime_types","init"]
knownfiles = [
"/usr/local/etc/httpd/conf/mime.types",
"/usr/local/lib/netscape/mime.types",
"/usr/local/etc/httpd/conf/mime.types", # Apache 1.2
"/usr/local/etc/mime.types", # Apache 1.3
]
inited = 0
def guess_type(url):
"""Guess the type of a file based on its URL.
Return value is a tuple (type, encoding) where type is None if the
type can't be guessed (no or unknown suffix) or a string of the
form type/subtype, usable for a MIME Content-type header; and
encoding is None for no encoding or the name of the program used
to encode (e.g. compress or gzip). The mappings are table
driven. Encoding suffixes are case sensitive; type suffixes are
first tried case sensitive, then case insensitive.
The suffixes .tgz, .taz and .tz (case sensitive!) are all mapped
to ".tar.gz". (This is table-driven too, using the dictionary
suffix_map).
"""
if not inited:
init()
scheme, url = urllib.splittype(url)
if scheme == 'data':
# syntax of data URLs:
# dataurl := "data:" [ mediatype ] [ ";base64" ] "," data
# mediatype := [ type "/" subtype ] *( ";" parameter )
# data := *urlchar
# parameter := attribute "=" value
# type/subtype defaults to "text/plain"
comma = url.find(',')
if comma < 0:
# bad data URL
return None, None
semi = url.find(';', 0, comma)
if semi >= 0:
type = url[:semi]
else:
type = url[:comma]
if '=' in type or '/' not in type:
type = 'text/plain'
return type, None # never compressed, so encoding is None
base, ext = posixpath.splitext(url)
while suffix_map.has_key(ext):
base, ext = posixpath.splitext(base + suffix_map[ext])
if encodings_map.has_key(ext):
encoding = encodings_map[ext]
base, ext = posixpath.splitext(base)
else:
encoding = None
if types_map.has_key(ext):
return types_map[ext], encoding
elif types_map.has_key(ext.lower()):
return types_map[ext.lower()], encoding
else:
return None, encoding
def guess_extension(type):
"""Guess the extension for a file based on its MIME type.
Return value is a string giving a filename extension, including the
leading dot ('.'). The extension is not guaranteed to have been
associated with any particular data stream, but would be mapped to the
MIME type `type' by guess_type(). If no extension can be guessed for
`type', None is returned.
"""
global inited
if not inited:
init()
type = type.lower()
for ext, stype in types_map.items():
if type == stype:
return ext
return None
def init(files=None):
global inited
for file in files or knownfiles:
s = read_mime_types(file)
if s:
for key, value in s.items():
types_map[key] = value
inited = 1
def read_mime_types(file):
try:
f = open(file)
except IOError:
return None
map = {}
while 1:
line = f.readline()
if not line: break
words = line.split()
for i in range(len(words)):
if words[i][0] == '#':
del words[i:]
break
if not words: continue
type, suffixes = words[0], words[1:]
for suff in suffixes:
map['.'+suff] = type
f.close()
return map
suffix_map = {
'.tgz': '.tar.gz',
'.taz': '.tar.gz',
'.tz': '.tar.gz',
}
encodings_map = {
'.gz': 'gzip',
'.Z': 'compress',
}
types_map = {
'.a': 'application/octet-stream',
'.ai': 'application/postscript',
'.aif': 'audio/x-aiff',
'.aifc': 'audio/x-aiff',
'.aiff': 'audio/x-aiff',
'.au': 'audio/basic',
'.avi': 'video/x-msvideo',
'.bcpio': 'application/x-bcpio',
'.bin': 'application/octet-stream',
'.cdf': 'application/x-netcdf',
'.cpio': 'application/x-cpio',
'.csh': 'application/x-csh',
'.dll': 'application/octet-stream',
'.dvi': 'application/x-dvi',
'.exe': 'application/octet-stream',
'.eps': 'application/postscript',
'.etx': 'text/x-setext',
'.gif': 'image/gif',
'.gtar': 'application/x-gtar',
'.hdf': 'application/x-hdf',
'.htm': 'text/html',
'.html': 'text/html',
'.ief': 'image/ief',
'.jpe': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.jpg': 'image/jpeg',
'.js': 'application/x-javascript',
'.latex': 'application/x-latex',
'.man': 'application/x-troff-man',
'.me': 'application/x-troff-me',
'.mif': 'application/x-mif',
'.mov': 'video/quicktime',
'.movie': 'video/x-sgi-movie',
'.mpe': 'video/mpeg',
'.mpeg': 'video/mpeg',
'.mpg': 'video/mpeg',
'.ms': 'application/x-troff-ms',
'.nc': 'application/x-netcdf',
'.o': 'application/octet-stream',
'.obj': 'application/octet-stream',
'.oda': 'application/oda',
'.pbm': 'image/x-portable-bitmap',
'.pdf': 'application/pdf',
'.pgm': 'image/x-portable-graymap',
'.pnm': 'image/x-portable-anymap',
'.png': 'image/png',
'.ppm': 'image/x-portable-pixmap',
'.py': 'text/x-python',
'.pyc': 'application/x-python-code',
'.ps': 'application/postscript',
'.qt': 'video/quicktime',
'.ras': 'image/x-cmu-raster',
'.rgb': 'image/x-rgb',
'.rdf': 'application/xml',
'.roff': 'application/x-troff',
'.rtf': 'application/rtf',
'.rtx': 'text/richtext',
'.sgm': 'text/x-sgml',
'.sgml': 'text/x-sgml',
'.sh': 'application/x-sh',
'.shar': 'application/x-shar',
'.snd': 'audio/basic',
'.so': 'application/octet-stream',
'.src': 'application/x-wais-source',
'.sv4cpio': 'application/x-sv4cpio',
'.sv4crc': 'application/x-sv4crc',
'.t': 'application/x-troff',
'.tar': 'application/x-tar',
'.tcl': 'application/x-tcl',
'.tex': 'application/x-tex',
'.texi': 'application/x-texinfo',
'.texinfo': 'application/x-texinfo',
'.tif': 'image/tiff',
'.tiff': 'image/tiff',
'.tr': 'application/x-troff',
'.tsv': 'text/tab-separated-values',
'.txt': 'text/plain',
'.ustar': 'application/x-ustar',
'.wav': 'audio/x-wav',
'.xbm': 'image/x-xbitmap',
'.xml': 'text/xml',
'.xsl': 'application/xml',
'.xpm': 'image/x-xpixmap',
'.xwd': 'image/x-xwindowdump',
'.zip': 'application/zip',
}
if __name__ == '__main__':
import sys
print guess_type(sys.argv[1])
|
ai-ku/langvis
|
jython-2.1/Lib/mimetypes.py
|
Python
|
mit
| 7,593
|
[
"NetCDF"
] |
de25ba44837f167a88917f98e15e3c73dbb26dba90fd5ac0226e5905d085d1c2
|
#build list of available data
import sys
builds= []
try:
#read db names from file, this file is also used in galaxy/util.py
for line in open("static/ucsc/builds.txt"):
if line[0:1] == "#": continue
try:
fields = line.replace("\r","").replace("\n","").split("\t")
builds.append((fields[1], fields[0], False))
except: continue
except Exception, exc:
print >>sys.stdout, 'upload_code.py initialization error -> %s' % exc
#return available builds
def get_available_builds():
try:
available_options = builds[0:]
except:
available_options = []
if len(available_options) < 1:
available_options.append(('unspecified','?',True))
return available_options
|
jmchilton/galaxy-central
|
tools/data_source/upload_code.py
|
Python
|
mit
| 772
|
[
"Galaxy"
] |
36ca2c697622f9c8ca94c98f773da6d1b62c0bb791b79f4d6b5702bd2ffe8aa0
|
import tkSimpleDialog
import tkMessageBox
from pymol.wizard import Wizard
from pymol import cmd, preset
import os,sys
cwd = os.getcwd() #ensure pymol can find libraries
sys.path.append(cwd)
import selector
import p3d.protein
import p3d.geo
cmd.set_wizard(selector.selector('{name}','{chain}','{resid}','{resid}'))
cmd.load('{prot}_all.pdb')
#initial view
preset.publication('all')
cmd.show("lines",'resid {resid}')
cmd.hide("sticks")
cmd.zoom('resid {resid}')
#tkMessageBox.showerror('Testing','Testing')
#new = tkSimpleDialog.askstring('Testing:','TT')
#model = p3d.protein.Protein('2jg4a.pdb')
#print model.output()
print "Yes"
#print new
|
tmorrell/SamStruct
|
inputs/pymol_view.py
|
Python
|
gpl-2.0
| 650
|
[
"PyMOL"
] |
22594e0485746121bab77cdd311286f3a91e90ea6a78c2fe66bde54a4bdc4c52
|
import numpy as np
import tensorflow as tf
import argparse
import time
import os
import cPickle
from utils import DataLoader
from model import Model
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--rnn_size', type=int, default=256,
help='size of RNN hidden state')
parser.add_argument('--num_layers', type=int, default=2,
help='number of layers in the RNN')
parser.add_argument('--model', type=str, default='lstm',
help='rnn, gru, or lstm')
parser.add_argument('--batch_size', type=int, default=50,
help='minibatch size')
parser.add_argument('--seq_length', type=int, default=300,
help='RNN sequence length')
parser.add_argument('--num_epochs', type=int, default=30,
help='number of epochs')
parser.add_argument('--save_every', type=int, default=500,
help='save frequency')
parser.add_argument('--grad_clip', type=float, default=10.,
help='clip gradients at this value')
parser.add_argument('--learning_rate', type=float, default=0.005,
help='learning rate')
parser.add_argument('--decay_rate', type=float, default=0.95,
help='decay rate for rmsprop')
parser.add_argument('--num_mixture', type=int, default=20,
help='number of gaussian mixtures')
parser.add_argument('--data_scale', type=float, default=20,
help='factor to scale raw data down by')
parser.add_argument('--keep_prob', type=float, default=0.8,
help='dropout keep probability')
args = parser.parse_args()
train(args)
def train(args):
data_loader = DataLoader(args.batch_size, args.seq_length, args.data_scale)
with open(os.path.join('save', 'config.pkl'), 'w') as f:
cPickle.dump(args, f)
model = Model(args)
with tf.Session() as sess:
tf.initialize_all_variables().run()
saver = tf.train.Saver(tf.all_variables())
for e in xrange(args.num_epochs):
sess.run(tf.assign(model.lr, args.learning_rate * (args.decay_rate ** e)))
data_loader.reset_batch_pointer()
state = model.initial_state.eval()
for b in xrange(data_loader.num_batches):
start = time.time()
x, y = data_loader.next_batch()
feed = {model.input_data: x, model.target_data: y, model.initial_state: state}
train_loss, state, _ = sess.run([model.cost, model.final_state, model.train_op], feed)
end = time.time()
print "{}/{} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}" \
.format(e * data_loader.num_batches + b,
args.num_epochs * data_loader.num_batches,
e, train_loss, end - start)
if (e * data_loader.num_batches + b) % args.save_every == 0 and ((e * data_loader.num_batches + b) > 0):
checkpoint_path = os.path.join('save', 'model.ckpt')
saver.save(sess, checkpoint_path, global_step = e * data_loader.num_batches + b)
print "model saved to {}".format(checkpoint_path)
if __name__ == '__main__':
main()
|
ruohoruotsi/Wavelet-Tree-Synth
|
nnet/write-rnn-tensorflow-master/train.py
|
Python
|
gpl-2.0
| 3,320
|
[
"Gaussian"
] |
8a9fd8c1ba1cea6016b260be140739c1bbea03b907cd14f5c44cf68578ae432a
|
# -*- coding: utf-8 -*-
#
# crud_fusion documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'crud_fusion'
copyright = u"2015, Brian Criswell"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'crud_fusiondoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'crud_fusion.tex',
u'crud_fusion Documentation',
u"Brian Criswell", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'crud_fusion', u'crud_fusion Documentation',
[u"Brian Criswell"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'crud_fusion', u'crud_fusion Documentation',
u"Brian Criswell", 'crud_fusion',
'A demo Django application with very basic CRUD functionality.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
|
BCriswell/crud-fusion
|
docs/conf.py
|
Python
|
bsd-3-clause
| 7,863
|
[
"Brian"
] |
8e1cc737b3955d0b40644bdfc5986f2ae6d16db5cbce72bb09f85125c43dfb1d
|
import warnings
import mdtraj as md
import numpy as np
from msmbuilder.featurizer import LigandContactFeaturizer
from msmbuilder.featurizer import BinaryLigandContactFeaturizer
from msmbuilder.featurizer import LigandRMSDFeaturizer
def _random_trajs():
top = md.Topology()
c = top.add_chain()
r = top.add_residue('HET', c)
r2 = top.add_residue('HET', c)
r3 = top.add_residue('HET', c)
cx = top.add_chain()
rx = top.add_residue('HET', cx)
for _ in range(10):
top.add_atom('CA', md.element.carbon, r)
top.add_atom('CA', md.element.carbon, r2)
top.add_atom('CA', md.element.carbon, r3)
for _ in range(10):
top.add_atom('CA', md.element.carbon, rx)
traj = md.Trajectory(xyz=np.random.uniform(size=(100, 40, 3)),
topology=top,
time=np.arange(100))
ref = md.Trajectory(xyz=np.random.uniform(size=(1, 40, 3)),
topology=top,
time=np.arange(1))
return traj, ref
def test_chain_guessing():
traj, ref = _random_trajs()
feat = LigandContactFeaturizer(reference_frame=ref)
contacts = feat.transform(traj)
assert feat.protein_chain == 0
assert feat.ligand_chain == 1
assert len(contacts) == 100
assert contacts[0].shape[1] == 3
def test_binding_pocket():
traj, ref = _random_trajs()
feat = LigandContactFeaturizer(reference_frame=ref)
pocket_ref = feat.transform([ref])
limit = (max(pocket_ref[0][0]) + min(pocket_ref[0][0]))/2.0
number_included = sum(pocket_ref[0][0] < limit)
pocket_feat = LigandContactFeaturizer(reference_frame=ref,
binding_pocket=limit)
pocket_contacts = pocket_feat.transform(traj)
assert len(pocket_contacts[0][0]) == number_included
def test_binaries():
traj, ref = _random_trajs()
feat = BinaryLigandContactFeaturizer(reference_frame=ref, cutoff=0.1)
binaries = feat.transform(traj)
assert np.sum(binaries[:]) <= len(binaries)*binaries[0].shape[1]
def test_binaries_binding_pocket():
traj, ref = _random_trajs()
feat = LigandContactFeaturizer(reference_frame=ref)
pocket_ref = feat.transform([ref])
limit = (max(pocket_ref[0][0]) + min(pocket_ref[0][0]))/2.0
cutoff = limit*0.8
number_included = sum(pocket_ref[0][0] < limit)
pocket_feat = BinaryLigandContactFeaturizer(reference_frame=ref,
cutoff=cutoff,
binding_pocket=limit)
pocket_binaries = pocket_feat.transform(traj)
assert len(pocket_binaries[0][0]) == number_included
assert (np.sum(pocket_binaries[:]) <=
len(pocket_binaries)*pocket_binaries[0].shape[1])
def test_single_index_rmsd():
traj, ref = _random_trajs()
feat = LigandRMSDFeaturizer(reference_frame=ref,
calculate_indices=[ref.n_atoms-1])
single_cindex = feat.transform([traj])
assert np.unique(single_cindex).shape[0] > 1
# this actually won't pass for standard mdtraj rmsd
# with len(atom_indices)=1, I think because of the superposition
# built into the calculation
def test_mdtraj_equivalence():
traj, ref = _random_trajs()
feat = LigandRMSDFeaturizer(reference_frame=ref, align_by='custom',
calculate_for='custom', align_indices=range(ref.n_atoms),
calculate_indices=range(ref.n_atoms))
multi_chain = feat.transform([traj])
md_traj = md.rmsd(traj,ref,frame=0)
np.testing.assert_almost_equal(multi_chain[0][:, 0], md_traj, decimal=4)
|
mpharrigan/mixtape
|
msmbuilder/tests/test_ligandfeaturizers.py
|
Python
|
lgpl-2.1
| 3,659
|
[
"MDTraj"
] |
f7c55832c929604f8d1e1bce93d728df5b8b42345684d5c14a1c6ab9504855ab
|
# Orca
#
# Copyright 2005-2009 Sun Microsystems Inc.
# Copyright 2010 Joanmarie Diggs, Mesar Hameed
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Holds state that is shared among many modules.
"""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2005-2009 Sun Microsystems Inc." \
"Copyright (c) 2010 Joanmarie Diggs, Mesar Hameed."
__license__ = "LGPL"
# NOTE: resist the temptation to do any imports here. They can
# easily cause circular imports.
#
# The Accessible that has visual focus.
#
locusOfFocus = None
# The currently active window.
#
activeWindow = None
# The currently active script.
#
activeScript = None
# The currently active mode (focus, say all, flat review, etc.)
activeMode = None
# Used to capture keys to redefine key bindings by the user.
#
capturingKeys = False
# The last non-modifier key event received.
#
lastNonModifierKeyEvent = None
# The InputEvent instance representing the last input event. This is
# set each time a mouse, keyboard or braille event is received.
#
lastInputEvent = None
# Used to determine if the user wishes Orca to pass the next command
# along to the current application rather than consuming it.
#
bypassNextCommand = False
# The last searchQuery
#
searchQuery = None
# Assists with learn mode (what you enter when you press Insert+F1
# and exit when you press escape.
#
learnModeEnabled = False
# Handle to the Orca Preferences Glade GUI object.
#
orcaOS = None
listNotificationsModeEnabled = False
# Set to True if the last key opened the preferences dialog
#
openingDialog = False
# The AT-SPI device (needed for key grabs). Will be set to None if AT-SPI
# is too old to support the new device API.
#
device = None
|
GNOME/orca
|
src/orca/orca_state.py
|
Python
|
lgpl-2.1
| 2,456
|
[
"ORCA"
] |
4281e87108050eea5f560bbcbecccb8552173dfd2be31efffaf581d27f641500
|
from neuron import Neuron
from inputprocessor import InputProcessor
import numpy as np
import pickle
class NeuralNetwork:
def __init__(self,num_dim,num_hidden,num_out):
self.num_hidden = num_hidden
self.num_out = num_out
self.hidden_layer = [Neuron(num_dim) for i in xrange(num_hidden)]
self.output_layer = [Neuron(num_hidden) for i in xrange(num_out)]
def feed_forward(self,x):
hidden_layer_out = []
total_error = 0
for neuron in self.hidden_layer:
hidden_layer_out.append(neuron.out_value(x))
output_layer_out = []
hidden_layer_out.append(1)
for neuron in self.output_layer:
h = neuron.out_value(hidden_layer_out)
output_layer_out.append(h)
#print output_layer_out
def train(self,input_val,output_val):
i = 0
iter = 0
while iter < 100:
iter += 1
#print iter
for k in xrange(len(input_val)):
hidden_layer_out = []
total_error = 0
for neuron in self.hidden_layer:
#print 'Hidden'+str(iter)
hidden_layer_out.append(neuron.out_value(input_val[k]))
output_layer_out = []
hidden_layer_out.append(1)
for neuron in self.output_layer:
h = neuron.out_value(hidden_layer_out)
output_layer_out.append(h)
total_error += neuron.calculate_error(output_val[k])
#print total_error
delta_k = []
for n in xrange(len(self.output_layer)):
delta_k.append(self.output_layer[n].update_weight_hidden(hidden_layer_out,output_val[k][n]))
for n in xrange(len(self.hidden_layer)):
self.hidden_layer[n].update_weight_input(input_val[k],self.hidden_layer,n,self.output_layer,delta_k)
def run_validation(self,input,output):
e = 0
for i in xrange(len(input)):
self.feed_forward(input[i])
for j in xrange(len(self.output_layer)):
# print self.output_layer
# print output
e += self.output_layer[j].calculate_error(output[i][j])
#print e
print e/len(input)
if __name__ == '__main__':
ip = InputProcessor('data/optdigits-orig.tra')
dataset = ip.read_input()
cv = InputProcessor('data/optdigits-orig.cv')
cvset = cv.read_input()
#dataset = ip.read_processed_input()
#print dataset
#print dataset['input'].shape[1]
#print np.unique(dataset['output']).shape[0]
#Credits for this heuristic for number of hidden layer neurons http://stats.stackexchange.com/questions/181/how-to-choose-the-number-of-hidden-layers-and-nodes-in-a-feedforward-neural-netw
#alpha = 2
#nh = dataset['input'].shape[0]/(alpha*(dataset['input'].shape[1]+np.unique(dataset['output']).shape[0]))
#print (alpha*(dataset['input'].shape[1]+np.unique(dataset['output']).shape[0]))
#print dataset['input'].shape[0]
#print dataset['input'].shape[1]
for nh in xrange(2,8):
print nh
n = NeuralNetwork(dataset['input'].shape[1]-1, nh ,np.unique(dataset['output']).shape[0])
n.run_validation(cvset['input'],cvset['output'])
# input_val = np.array([[-5],[-1],[1],[6]])
# output_val = np.array([0,1,1,0])
#input_val = np.array([[2, 7,1], [8, 1,1], [7, 5,1], [6, 3,1],[7, 8,1],[5, 9,1],[4, 5,1],[4, 2,1],[-1, -1,1],[1, 3,1], [3, -2,1], [5, 3.25,1], [2, 4,1],[7, 1,1]])
#output_val = np.array([1,1,0,0,1,1,1,1,0,1,1,1,0,1])
#input_val = np.array([[7,1], [1, 1], [-5,1], [-3,1],[3,1],[-8,1],[5,1],[2,1],[-1,1],[3,1], [-9,1], [3.25,1], [-4,1],[0,1]])
#input_val = np.array([[1,1,1],[1,0,1],[0,1,1],[0,0,1]])
#output_val = np.array([0,1,1,0])
n.train(dataset['input'],dataset['output'])
# print 'HIDDEN LAYER WEIGHTS'
# for h in n.hidden_layer:
# print h.w
# print '==========='
# print 'OUT LAYER WEIGHTS'
# for h in n.output_layer:
# print h.w
n.run_validation(cvset['input'],cvset['output'])
pickle.dumps(n)
# n.feed_forward(cvset['input'][0])
# n.feed_forward(cvset['input'][1])
# n.feed_forward(cvset['input'][2])
#n.feed_forward([3,1])
#n.feed_forward([-2,1])
#n.feed_forward([7,1])
#n.feed_forward([5,1])
#n.feed_forward([-1,1])
#n.feed_forward([-6,1])
#print output_layer_out
#print hidden_layer_out
|
SahilC/NeuralNetworks
|
neuralnetwork.py
|
Python
|
mit
| 4,595
|
[
"NEURON"
] |
8e1a5b9726ffe4e47922a7732e17bfc9760d55bc0a398727d0562fe450a73c2b
|
#!/usr/bin/env python
#
# This sctipts reads a json file and output a csv file
# Note: Does not decompose lower level objects
# Brian McKean
#
import fileinput
import json
import csv
import sys
lines = []
for line in fileinput.input():
lines.append(line)
myjson = json.loads(''.join(lines))
keys = {}
for i in myjson:
for k in i.keys():
keys[k] = 1
mycsv = csv.DictWriter(sys.stdout, fieldnames=keys.keys(),
quoting=csv.QUOTE_MINIMAL)
mycsv.writeheader()
for row in myjson:
mycsv.writerow(row)
|
co-bri/book
|
hackathons/fcq/json2csv1.py
|
Python
|
mit
| 540
|
[
"Brian"
] |
4ede9ac15cfafa9590ef2115c197de21c9c0ce740c30a1d2f7c1910d02ab1fc8
|
'''
@file : testFile6.py
@author (A) : Madhu Kumar Dadi.
@project : Social List
@function :
test6(postags) : checks for presence of nouns in the hashtag
@postags : a list containing pos tags for a hashtag
return : count of nouns
@Licence :
This work is licensed under the
Creative Commons Attribution-NonCommercial-ShareAlike 4.0
International License. To view a copy of this license,
visit http://creativecommons.org/licenses/by-nc-sa/4.0/.
'''
from collections import Counter
def test6(postags):
postagsent = "".join(postags)
count = Counter(postagsent)
if count['N']+count['^'] == 0:
return "0"
return count['N']+count['^']
|
SummerProject16/project
|
CMUTweetTagger/testFile6.py
|
Python
|
cc0-1.0
| 650
|
[
"VisIt"
] |
f6602d4230d1c80fc3ae451d6c9001bee8364dec87200c0353f3ca45d67ebaa6
|
#!/usr/bin/python2.6
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittest for fake_filesystem module."""
import errno
import os
import re
import stat
import sys
import time
import unittest
import fake_filesystem
def _GetDummyTime(start_time, increment):
def _DummyTime():
_DummyTime._curr_time += increment
return _DummyTime._curr_time
_DummyTime._curr_time = start_time - increment # pylint: disable-msg=W0612
return _DummyTime
class TestCase(unittest.TestCase):
def assertModeEqual(self, expected, actual):
return self.assertEqual(stat.S_IMODE(expected), stat.S_IMODE(actual))
class FakeDirectoryUnitTest(unittest.TestCase):
def setUp(self):
self.orig_time = time.time
time.time = _GetDummyTime(10, 1)
self.fake_file = fake_filesystem.FakeFile('foobar', contents='dummy_file')
self.fake_dir = fake_filesystem.FakeDirectory('somedir')
def tearDown(self):
time.time = self.orig_time
def testNewFileAndDirectory(self):
self.assertTrue(stat.S_IFREG & self.fake_file.st_mode)
self.assertTrue(stat.S_IFDIR & self.fake_dir.st_mode)
self.assertEqual({}, self.fake_dir.contents)
self.assertEqual(10, self.fake_file.st_ctime)
def testAddEntry(self):
self.fake_dir.AddEntry(self.fake_file)
self.assertEqual({'foobar': self.fake_file}, self.fake_dir.contents)
def testGetEntry(self):
self.fake_dir.AddEntry(self.fake_file)
self.assertEqual(self.fake_file, self.fake_dir.GetEntry('foobar'))
def testRemoveEntry(self):
self.fake_dir.AddEntry(self.fake_file)
self.assertEqual(self.fake_file, self.fake_dir.GetEntry('foobar'))
self.fake_dir.RemoveEntry('foobar')
self.assertRaises(KeyError, self.fake_dir.GetEntry, 'foobar')
def testShouldThrowIfSetSizeIsNotInteger(self):
self.assertRaises(IOError, self.fake_file.SetSize, 0.1)
def testShouldThrowIfSetSizeIsNegative(self):
self.assertRaises(IOError, self.fake_file.SetSize, -1)
def testProduceEmptyFileIfSetSizeIsZero(self):
self.fake_file.SetSize(0)
self.assertEqual('', self.fake_file.contents)
def testSetsContentEmptyIfSetSizeIsZero(self):
self.fake_file.SetSize(0)
self.assertEqual('', self.fake_file.contents)
def testTruncateFileIfSizeIsSmallerThanCurrentSize(self):
self.fake_file.SetSize(6)
self.assertEqual('dummy_', self.fake_file.contents)
def testLeaveFileUnchangedIfSizeIsEqualToCurrentSize(self):
self.fake_file.SetSize(10)
self.assertEqual('dummy_file', self.fake_file.contents)
def testPadsFileContentWithNullBytesIfSizeIsGreaterThanCurrentSize(self):
self.fake_file.SetSize(13)
self.assertEqual('dummy_file\0\0\0', self.fake_file.contents)
def testSetMTime(self):
self.assertEqual(10, self.fake_file.st_mtime)
self.fake_file.SetMTime(13)
self.assertEqual(13, self.fake_file.st_mtime)
self.fake_file.SetMTime(131)
self.assertEqual(131, self.fake_file.st_mtime)
def testFileInode(self):
filesystem = fake_filesystem.FakeFilesystem()
fake_os = fake_filesystem.FakeOsModule(filesystem)
file_path = 'some_file1'
filesystem.CreateFile(file_path, contents='contents here1', inode=42)
self.assertEqual(42, fake_os.stat(file_path)[stat.ST_INO])
file_obj = filesystem.GetObject(file_path)
file_obj.SetIno(43)
self.assertEqual(43, fake_os.stat(file_path)[stat.ST_INO])
def testDirectoryInode(self):
filesystem = fake_filesystem.FakeFilesystem()
fake_os = fake_filesystem.FakeOsModule(filesystem)
dirpath = 'testdir'
filesystem.CreateDirectory(dirpath, inode=42)
self.assertEqual(42, fake_os.stat(dirpath)[stat.ST_INO])
dir_obj = filesystem.GetObject(dirpath)
dir_obj.SetIno(43)
self.assertEqual(43, fake_os.stat(dirpath)[stat.ST_INO])
class SetLargeFileSizeTest(FakeDirectoryUnitTest):
def testShouldThrowIfSizeIsNotInteger(self):
self.assertRaises(IOError, self.fake_file.SetLargeFileSize, 0.1)
def testShouldThrowIfSizeIsNegative(self):
self.assertRaises(IOError, self.fake_file.SetLargeFileSize, -1)
def testSetsContentNoneIfSizeIsNonNegativeInteger(self):
self.fake_file.SetLargeFileSize(1000000000)
self.assertEqual(None, self.fake_file.contents)
self.assertEqual(1000000000, self.fake_file.st_size)
class NormalizePathTest(unittest.TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem()
self.root_name = os.path.sep
def testEmptyPathShouldGetNormalizedToRootPath(self):
self.assertEqual(self.root_name, self.filesystem.NormalizePath(''))
def testRootPathRemainsUnchanged(self):
self.assertEqual(self.root_name,
self.filesystem.NormalizePath(self.root_name))
def testRelativePathForcedToCwd(self):
path = 'bar'
self.filesystem.cwd = '/foo'
self.assertEqual('/foo/bar', self.filesystem.NormalizePath(path))
def testAbsolutePathRemainsUnchanged(self):
path = '/foo/bar'
self.assertEqual(path, self.filesystem.NormalizePath(path))
def testDottedPathIsNormalized(self):
path = '/foo/..'
self.assertEqual('/', self.filesystem.NormalizePath(path))
def testDotPathIsNormalized(self):
path = '.'
self.assertEqual('/', self.filesystem.NormalizePath(path))
class GetPathComponentsTest(unittest.TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem()
self.root_name = os.path.sep
def testRootPathShouldReturnEmptyList(self):
self.assertEqual([], self.filesystem.GetPathComponents(self.root_name))
def testEmptyPathShouldReturnEmptyList(self):
self.assertEqual([], self.filesystem.GetPathComponents(''))
def testRelativePathWithOneComponentShouldReturnComponent(self):
self.assertEqual(['foo'], self.filesystem.GetPathComponents('foo'))
def testAbsolutePathWithOneComponentShouldReturnComponent(self):
self.assertEqual(['foo'], self.filesystem.GetPathComponents('/foo'))
def testTwoLevelRelativePathShouldReturnComponents(self):
self.assertEqual(['foo', 'bar'],
self.filesystem.GetPathComponents('foo/bar'))
def testTwoLevelAbsolutePathShouldReturnComponents(self):
self.assertEqual(['foo', 'bar'],
self.filesystem.GetPathComponents('/foo/bar'))
class FakeFilesystemUnitTest(unittest.TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem()
self.root_name = os.path.sep
self.fake_file = fake_filesystem.FakeFile('foobar')
self.fake_child = fake_filesystem.FakeDirectory('foobaz')
self.fake_grandchild = fake_filesystem.FakeDirectory('quux')
def testNewFilesystem(self):
self.assertEqual(os.path.sep, self.filesystem.path_separator)
self.assertTrue(stat.S_IFDIR & self.filesystem.root.st_mode)
self.assertEqual(self.root_name, self.filesystem.root.name)
self.assertEqual({}, self.filesystem.root.contents)
def testNoneRaisesTypeError(self):
self.assertRaises(TypeError, self.filesystem.Exists, None)
def testEmptyStringDoesNotExist(self):
self.assertFalse(self.filesystem.Exists(''))
def testExistsRoot(self):
self.assertTrue(self.filesystem.Exists(self.root_name))
def testExistsUnaddedFile(self):
self.assertFalse(self.filesystem.Exists(self.fake_file.name))
def testGetRootObject(self):
self.assertEqual(self.filesystem.root,
self.filesystem.GetObject(self.root_name))
def testAddObjectToRoot(self):
self.filesystem.AddObject(self.root_name, self.fake_file)
self.assertEqual({'foobar': self.fake_file}, self.filesystem.root.contents)
def testExistsAddedFile(self):
self.filesystem.AddObject(self.root_name, self.fake_file)
self.assertTrue(self.filesystem.Exists(self.fake_file.name))
def testExistsRelativePath(self):
self.filesystem.CreateFile('/a/b/file_one')
self.filesystem.CreateFile('/a/c/file_two')
self.assertTrue(self.filesystem.Exists('a/b/../c/file_two'))
self.assertTrue(self.filesystem.Exists('/a/c/../b/file_one'))
self.assertTrue(self.filesystem.Exists('/a/c/../../a/b/file_one'))
self.assertFalse(self.filesystem.Exists('a/b/../z/d'))
self.assertFalse(self.filesystem.Exists('a/b/../z/../c/file_two'))
self.filesystem.cwd = '/a/c'
self.assertTrue(self.filesystem.Exists('../b/file_one'))
self.assertTrue(self.filesystem.Exists('../../a/b/file_one'))
self.assertTrue(self.filesystem.Exists('../../a/b/../../a/c/file_two'))
self.assertFalse(self.filesystem.Exists('../z/file_one'))
self.assertFalse(self.filesystem.Exists('../z/../c/file_two'))
def testGetObjectFromRoot(self):
self.filesystem.AddObject(self.root_name, self.fake_file)
self.assertEqual(self.fake_file, self.filesystem.GetObject('foobar'))
def testGetNonexistentObjectFromRootError(self):
self.filesystem.AddObject(self.root_name, self.fake_file)
self.assertEqual(self.fake_file, self.filesystem.GetObject('foobar'))
self.assertRaises(IOError, self.filesystem.GetObject,
'some_bogus_filename')
def testRemoveObjectFromRoot(self):
self.filesystem.AddObject(self.root_name, self.fake_file)
self.filesystem.RemoveObject(self.fake_file.name)
self.assertRaises(IOError, self.filesystem.GetObject, self.fake_file.name)
def testRemoveNonexistenObjectFromRootError(self):
self.assertRaises(IOError, self.filesystem.RemoveObject,
'some_bogus_filename')
def testExistsRemovedFile(self):
self.filesystem.AddObject(self.root_name, self.fake_file)
self.filesystem.RemoveObject(self.fake_file.name)
self.assertFalse(self.filesystem.Exists(self.fake_file.name))
def testAddObjectToChild(self):
self.filesystem.AddObject(self.root_name, self.fake_child)
self.filesystem.AddObject(self.fake_child.name, self.fake_file)
self.assertEqual(
{self.fake_file.name: self.fake_file},
self.filesystem.root.GetEntry(self.fake_child.name).contents)
def testAddObjectToRegularFileError(self):
self.filesystem.AddObject(self.root_name, self.fake_file)
self.assertRaises(IOError, self.filesystem.AddObject,
self.fake_file.name, self.fake_file)
def testExistsFileAddedToChild(self):
self.filesystem.AddObject(self.root_name, self.fake_child)
self.filesystem.AddObject(self.fake_child.name, self.fake_file)
path = os.path.join(self.fake_child.name, self.fake_file.name)
self.assertTrue(self.filesystem.Exists(path))
def testGetObjectFromChild(self):
self.filesystem.AddObject(self.root_name, self.fake_child)
self.filesystem.AddObject(self.fake_child.name, self.fake_file)
self.assertEqual(self.fake_file,
self.filesystem.GetObject(
os.path.join(self.fake_child.name,
self.fake_file.name)))
def testGetNonexistentObjectFromChildError(self):
self.filesystem.AddObject(self.root_name, self.fake_child)
self.filesystem.AddObject(self.fake_child.name, self.fake_file)
self.assertRaises(IOError, self.filesystem.GetObject,
os.path.join(self.fake_child.name,
'some_bogus_filename'))
def testRemoveObjectFromChild(self):
self.filesystem.AddObject(self.root_name, self.fake_child)
self.filesystem.AddObject(self.fake_child.name, self.fake_file)
target_path = os.path.join(self.fake_child.name, self.fake_file.name)
self.filesystem.RemoveObject(target_path)
self.assertRaises(IOError, self.filesystem.GetObject, target_path)
def testRemoveObjectFromChildError(self):
self.filesystem.AddObject(self.root_name, self.fake_child)
self.assertRaises(IOError, self.filesystem.RemoveObject,
os.path.join(self.fake_child.name,
'some_bogus_filename'))
def testRemoveObjectFromNonDirectoryError(self):
self.filesystem.AddObject(self.root_name, self.fake_file)
self.assertRaises(
IOError, self.filesystem.RemoveObject,
os.path.join('%s' % self.fake_file.name,
'file_does_not_matter_since_parent_not_a_directory'))
def testExistsFileRemovedFromChild(self):
self.filesystem.AddObject(self.root_name, self.fake_child)
self.filesystem.AddObject(self.fake_child.name, self.fake_file)
path = os.path.join(self.fake_child.name, self.fake_file.name)
self.filesystem.RemoveObject(path)
self.assertFalse(self.filesystem.Exists(path))
def testOperateOnGrandchildDirectory(self):
self.filesystem.AddObject(self.root_name, self.fake_child)
self.filesystem.AddObject(self.fake_child.name, self.fake_grandchild)
grandchild_directory = os.path.join(self.fake_child.name,
self.fake_grandchild.name)
grandchild_file = os.path.join(grandchild_directory, self.fake_file.name)
self.assertRaises(IOError, self.filesystem.GetObject, grandchild_file)
self.filesystem.AddObject(grandchild_directory, self.fake_file)
self.assertEqual(self.fake_file,
self.filesystem.GetObject(grandchild_file))
self.assertTrue(self.filesystem.Exists(grandchild_file))
self.filesystem.RemoveObject(grandchild_file)
self.assertRaises(IOError, self.filesystem.GetObject, grandchild_file)
self.assertFalse(self.filesystem.Exists(grandchild_file))
def testCreateDirectoryInRootDirectory(self):
path = 'foo'
self.filesystem.CreateDirectory(path)
new_dir = self.filesystem.GetObject(path)
self.assertEqual(os.path.basename(path), new_dir.name)
self.assertTrue(stat.S_IFDIR & new_dir.st_mode)
def testCreateDirectoryInRootDirectoryAlreadyExistsError(self):
path = 'foo'
self.filesystem.CreateDirectory(path)
self.assertRaises(OSError, self.filesystem.CreateDirectory, path)
def testCreateDirectory(self):
path = 'foo/bar/baz'
self.filesystem.CreateDirectory(path)
new_dir = self.filesystem.GetObject(path)
self.assertEqual(os.path.basename(path), new_dir.name)
self.assertTrue(stat.S_IFDIR & new_dir.st_mode)
# Create second directory to make sure first is OK.
path = '%s/quux' % path
self.filesystem.CreateDirectory(path)
new_dir = self.filesystem.GetObject(path)
self.assertEqual(os.path.basename(path), new_dir.name)
self.assertTrue(stat.S_IFDIR & new_dir.st_mode)
def testCreateDirectoryAlreadyExistsError(self):
path = 'foo/bar/baz'
self.filesystem.CreateDirectory(path)
self.assertRaises(OSError, self.filesystem.CreateDirectory, path)
def testCreateFileInCurrentDirectory(self):
path = 'foo'
contents = 'dummy data'
self.filesystem.CreateFile(path, contents=contents)
self.assertTrue(self.filesystem.Exists(path))
self.assertFalse(self.filesystem.Exists(os.path.dirname(path)))
path = './%s' % path
self.assertTrue(self.filesystem.Exists(os.path.dirname(path)))
def testCreateFileInRootDirectory(self):
path = '/foo'
contents = 'dummy data'
self.filesystem.CreateFile(path, contents=contents)
new_file = self.filesystem.GetObject(path)
self.assertTrue(self.filesystem.Exists(path))
self.assertTrue(self.filesystem.Exists(os.path.dirname(path)))
self.assertEqual(os.path.basename(path), new_file.name)
self.assertTrue(stat.S_IFREG & new_file.st_mode)
self.assertEqual(contents, new_file.contents)
def testCreateFileWithSizeButNoContentCreatesLargeFile(self):
path = 'large_foo_bar'
self.filesystem.CreateFile(path, st_size=100000000)
new_file = self.filesystem.GetObject(path)
self.assertEqual(None, new_file.contents)
self.assertEqual(100000000, new_file.st_size)
def testCreateFileInRootDirectoryAlreadyExistsError(self):
path = 'foo'
self.filesystem.CreateFile(path)
self.assertRaises(IOError, self.filesystem.CreateFile, path)
def testCreateFile(self):
path = 'foo/bar/baz'
retval = self.filesystem.CreateFile(path, contents='dummy_data')
self.assertTrue(self.filesystem.Exists(path))
self.assertTrue(self.filesystem.Exists(os.path.dirname(path)))
new_file = self.filesystem.GetObject(path)
self.assertEqual(os.path.basename(path), new_file.name)
self.assertTrue(stat.S_IFREG & new_file.st_mode)
self.assertEqual(new_file, retval)
def testCreateFileAlreadyExistsError(self):
path = 'foo/bar/baz'
self.filesystem.CreateFile(path, contents='dummy_data')
self.assertRaises(IOError, self.filesystem.CreateFile, path)
def testCreateLink(self):
path = 'foo/bar/baz'
target_path = 'foo/bar/quux'
new_file = self.filesystem.CreateLink(path, 'quux')
# Neither the path not the final target exists before we actually write to
# one of them, even though the link appears in the file system.
self.assertFalse(self.filesystem.Exists(path))
self.assertFalse(self.filesystem.Exists(target_path))
self.assertTrue(stat.S_IFLNK & new_file.st_mode)
# but once we write the linked to file, they both will exist.
self.filesystem.CreateFile(target_path)
self.assertTrue(self.filesystem.Exists(path))
self.assertTrue(self.filesystem.Exists(target_path))
def testResolveObject(self):
target_path = 'dir/target'
target_contents = '0123456789ABCDEF'
link_name = 'x'
self.filesystem.CreateDirectory('dir')
self.filesystem.CreateFile('dir/target', contents=target_contents)
self.filesystem.CreateLink(link_name, target_path)
obj = self.filesystem.ResolveObject(link_name)
self.assertEqual('target', obj.name)
self.assertEqual(target_contents, obj.contents)
def testLresolveObject(self):
target_path = 'dir/target'
target_contents = '0123456789ABCDEF'
link_name = 'x'
self.filesystem.CreateDirectory('dir')
self.filesystem.CreateFile('dir/target', contents=target_contents)
self.filesystem.CreateLink(link_name, target_path)
obj = self.filesystem.LResolveObject(link_name)
self.assertEqual(link_name, obj.name)
self.assertEqual(target_path, obj.contents)
def testDirectoryAccessOnFile(self):
self.filesystem.CreateFile('not_a_dir')
self.assertRaises(IOError, self.filesystem.ResolveObject, 'not_a_dir/foo')
self.assertRaises(IOError, self.filesystem.ResolveObject,
'not_a_dir/foo/bar')
self.assertRaises(IOError, self.filesystem.LResolveObject, 'not_a_dir/foo')
self.assertRaises(IOError, self.filesystem.LResolveObject,
'not_a_dir/foo/bar')
class FakeOsModuleTest(TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem()
self.os = fake_filesystem.FakeOsModule(self.filesystem)
self.rwx = self.os.R_OK | self.os.W_OK | self.os.X_OK
self.rw = self.os.R_OK | self.os.W_OK
self.orig_time = time.time
time.time = _GetDummyTime(200, 20)
def tearDown(self):
time.time = self.orig_time
def assertRaisesWithRegexpMatch(self, expected_exception, expected_regexp,
callable_obj, *args, **kwargs):
"""Asserts that the message in a raised exception matches the given regexp.
Args:
expected_exception: Exception class expected to be raised.
expected_regexp: Regexp (re pattern object or string) expected to be
found in error message.
callable_obj: Function to be called.
*args: Extra args.
**kwargs: Extra kwargs.
"""
try:
callable_obj(*args, **kwargs)
except expected_exception as err:
if isinstance(expected_regexp, str):
expected_regexp = re.compile(expected_regexp)
self.assertTrue(
expected_regexp.search(str(err)),
'"%s" does not match "%s"' % (expected_regexp.pattern, str(err)))
else:
self.fail(expected_exception.__name__ + ' not raised')
def testChdir(self):
"""chdir should work on a directory."""
directory = '/foo'
self.filesystem.CreateDirectory(directory)
self.os.chdir(directory)
def testChdirFailsNonExist(self):
"""chdir should raise OSError if the target does not exist."""
directory = '/no/such/directory'
self.assertRaises(OSError, self.os.chdir, directory)
def testChdirFailsNonDirectory(self):
"""chdir should raies OSError if the target is not a directory."""
filename = '/foo/bar'
self.filesystem.CreateFile(filename)
self.assertRaises(OSError, self.os.chdir, filename)
def testConsecutiveChdir(self):
"""Consecutive relative chdir calls should work."""
dir1 = 'foo'
dir2 = 'bar'
full_dirname = self.os.path.join(dir1, dir2)
self.filesystem.CreateDirectory(full_dirname)
self.os.chdir(dir1)
self.os.chdir(dir2)
self.assertEqual(self.os.getcwd(), self.os.path.sep + full_dirname)
def testBackwardsChdir(self):
"""chdir into '..' should behave appropriately."""
rootdir = self.os.getcwd()
dirname = 'foo'
abs_dirname = self.os.path.abspath(dirname)
self.filesystem.CreateDirectory(dirname)
self.os.chdir(dirname)
self.assertEqual(abs_dirname, self.os.getcwd())
self.os.chdir('..')
self.assertEqual(rootdir, self.os.getcwd())
self.os.chdir(self.os.path.join(dirname, '..'))
self.assertEqual(rootdir, self.os.getcwd())
def testGetCwd(self):
dirname = '/foo/bar'
self.filesystem.CreateDirectory(dirname)
self.assertEqual(self.os.getcwd(), self.os.path.sep)
self.os.chdir(dirname)
self.assertEqual(self.os.getcwd(), dirname)
def testListdir(self):
directory = 'xyzzy/plugh'
files = ['foo', 'bar', 'baz']
for f in files:
self.filesystem.CreateFile('%s/%s' % (directory, f))
files.sort()
self.assertEqual(files, self.os.listdir(directory))
def testListdirOnSymlink(self):
directory = 'xyzzy'
files = ['foo', 'bar', 'baz']
for f in files:
self.filesystem.CreateFile('%s/%s' % (directory, f))
self.filesystem.CreateLink('symlink', 'xyzzy')
files.sort()
self.assertEqual(files, self.os.listdir('symlink'))
def testListdirError(self):
file_path = 'foo/bar/baz'
self.filesystem.CreateFile(file_path)
self.assertRaises(OSError, self.os.listdir, file_path)
def testExistsCurrentDir(self):
self.assertTrue(self.filesystem.Exists('.'))
def testListdirCurrent(self):
files = ['foo', 'bar', 'baz']
for f in files:
self.filesystem.CreateFile('%s' % f)
files.sort()
self.assertEqual(files, self.os.listdir('.'))
def testFdopen(self):
fake_open = fake_filesystem.FakeFileOpen(self.filesystem)
file_path1 = 'some_file1'
self.filesystem.CreateFile(file_path1, contents='contents here1')
fake_file1 = fake_open(file_path1, 'r')
self.assertEqual(0, fake_file1.fileno())
self.assertFalse(self.os.fdopen(0) is fake_file1)
self.assertRaises(TypeError, self.os.fdopen, None)
self.assertRaises(TypeError, self.os.fdopen, 'a string')
def testOutOfRangeFdopen(self):
# We haven't created any files, so even 0 is out of range.
self.assertRaises(OSError, self.os.fdopen, 0)
def testClosedFileDescriptor(self):
fake_open = fake_filesystem.FakeFileOpen(self.filesystem)
first_path = 'some_file1'
second_path = 'some_file2'
third_path = 'some_file3'
self.filesystem.CreateFile(first_path, contents='contents here1')
self.filesystem.CreateFile(second_path, contents='contents here2')
self.filesystem.CreateFile(third_path, contents='contents here3')
fake_file1 = fake_open(first_path, 'r')
fake_file2 = fake_open(second_path, 'r')
fake_file3 = fake_open(third_path, 'r')
self.assertEqual(0, fake_file1.fileno())
self.assertEqual(1, fake_file2.fileno())
self.assertEqual(2, fake_file3.fileno())
fileno2 = fake_file2.fileno()
self.os.close(fileno2)
self.assertRaises(OSError, self.os.close, fileno2)
self.assertEqual(0, fake_file1.fileno())
self.assertEqual(2, fake_file3.fileno())
self.assertFalse(self.os.fdopen(0) is fake_file1)
self.assertFalse(self.os.fdopen(2) is fake_file3)
self.assertRaises(OSError, self.os.fdopen, 1)
def testFdopenMode(self):
fake_open = fake_filesystem.FakeFileOpen(self.filesystem)
file_path1 = 'some_file1'
self.filesystem.CreateFile(file_path1, contents='contents here1',
st_mode=((stat.S_IFREG | 0o666) ^ stat.S_IWRITE))
fake_file1 = fake_open(file_path1, 'r')
self.assertEqual(0, fake_file1.fileno())
self.os.fdopen(0)
self.os.fdopen(0, mode='r')
exception = OSError if sys.version_info < (3, 0) else IOError
self.assertRaises(exception, self.os.fdopen, 0, 'w')
def testLowLevelOpenCreate(self):
file_path = 'file1'
# this is the low-level open, not FakeFileOpen
fileno = self.os.open(file_path, self.os.O_CREAT)
self.assertEqual(0, fileno)
self.assertTrue(self.os.path.exists(file_path))
def testLowLevelOpenCreateMode(self):
file_path = 'file1'
fileno = self.os.open(file_path, self.os.O_CREAT, 0o700)
self.assertEqual(0, fileno)
self.assertTrue(self.os.path.exists(file_path))
self.assertModeEqual(0o700, self.os.stat(file_path).st_mode)
def testLowLevelOpenCreateModeUnsupported(self):
file_path = 'file1'
fake_flag = 0b100000000000000000000000
self.assertRaises(NotImplementedError, self.os.open, file_path, fake_flag)
def testLowLevelWriteRead(self):
file_path = 'file1'
self.filesystem.CreateFile(file_path, contents='orig contents')
new_contents = '1234567890abcdef'
fake_open = fake_filesystem.FakeFileOpen(self.filesystem)
fh = fake_open(file_path, 'w')
fileno = fh.fileno()
self.assertEqual(len(new_contents), self.os.write(fileno, new_contents))
self.assertEqual(new_contents,
self.filesystem.GetObject(file_path).contents)
self.os.close(fileno)
fh = fake_open(file_path, 'r')
fileno = fh.fileno()
self.assertEqual('', self.os.read(fileno, 0))
self.assertEqual(new_contents[0:2], self.os.read(fileno, 2))
self.assertEqual(new_contents[2:10], self.os.read(fileno, 8))
self.assertEqual(new_contents[10:], self.os.read(fileno, 100))
self.assertEqual('', self.os.read(fileno, 10))
self.os.close(fileno)
self.assertRaises(OSError, self.os.write, fileno, new_contents)
self.assertRaises(OSError, self.os.read, fileno, 10)
def testFstat(self):
directory = 'xyzzy'
file_path = '%s/plugh' % directory
self.filesystem.CreateFile(file_path, contents='ABCDE')
fake_open = fake_filesystem.FakeFileOpen(self.filesystem)
file_obj = fake_open(file_path)
fileno = file_obj.fileno()
self.assertTrue(stat.S_IFREG & self.os.fstat(fileno)[stat.ST_MODE])
self.assertTrue(stat.S_IFREG & self.os.fstat(fileno).st_mode)
self.assertEqual(5, self.os.fstat(fileno)[stat.ST_SIZE])
def testStat(self):
directory = 'xyzzy'
file_path = '%s/plugh' % directory
self.filesystem.CreateFile(file_path, contents='ABCDE')
self.assertTrue(stat.S_IFDIR & self.os.stat(directory)[stat.ST_MODE])
self.assertTrue(stat.S_IFREG & self.os.stat(file_path)[stat.ST_MODE])
self.assertTrue(stat.S_IFREG & self.os.stat(file_path).st_mode)
self.assertEqual(5, self.os.stat(file_path)[stat.ST_SIZE])
def testLstat(self):
directory = 'xyzzy'
base_name = 'plugh'
file_contents = 'frobozz'
# Just make sure we didn't accidentally make our test data meaningless.
self.assertNotEqual(len(base_name), len(file_contents))
file_path = '%s/%s' % (directory, base_name)
link_path = '%s/link' % directory
self.filesystem.CreateFile(file_path, contents=file_contents)
self.filesystem.CreateLink(link_path, base_name)
self.assertEqual(len(file_contents), self.os.lstat(file_path)[stat.ST_SIZE])
self.assertEqual(len(base_name), self.os.lstat(link_path)[stat.ST_SIZE])
def testStatNonExistentFile(self):
# set up
file_path = '/non/existent/file'
self.assertFalse(self.filesystem.Exists(file_path))
# actual tests
try:
# Use try-catch to check exception attributes.
self.os.stat(file_path)
self.fail('Exception is expected.') # COV_NF_LINE
except OSError as os_error:
self.assertEqual(errno.ENOENT, os_error.errno)
self.assertEqual(file_path, os_error.filename)
def testReadlink(self):
link_path = 'foo/bar/baz'
target = 'tarJAY'
self.filesystem.CreateLink(link_path, target)
self.assertEqual(self.os.readlink(link_path), target)
def testReadlinkRaisesIfPathIsNotALink(self):
file_path = 'foo/bar/eleventyone'
self.filesystem.CreateFile(file_path)
self.assertRaises(OSError, self.os.readlink, file_path)
def testReadlinkRaisesIfPathDoesNotExist(self):
self.assertRaises(OSError, self.os.readlink, '/this/path/does/not/exist')
def testReadlinkRaisesIfPathIsNone(self):
self.assertRaises(TypeError, self.os.readlink, None)
def testReadlinkWithLinksInPath(self):
self.filesystem.CreateLink('/meyer/lemon/pie', 'yum')
self.filesystem.CreateLink('/geo/metro', '/meyer')
self.assertEqual('yum', self.os.readlink('/geo/metro/lemon/pie'))
def testReadlinkWithChainedLinksInPath(self):
self.filesystem.CreateLink('/eastern/european/wolfhounds/chase', 'cats')
self.filesystem.CreateLink('/russian', '/eastern/european')
self.filesystem.CreateLink('/dogs', '/russian/wolfhounds')
self.assertEqual('cats', self.os.readlink('/dogs/chase'))
def testRemoveDir(self):
directory = 'xyzzy'
dir_path = '/%s/plugh' % directory
self.filesystem.CreateDirectory(dir_path)
self.assertTrue(self.filesystem.Exists(dir_path))
self.assertRaises(OSError, self.os.remove, dir_path)
self.assertTrue(self.filesystem.Exists(dir_path))
self.os.chdir(directory)
self.assertRaises(OSError, self.os.remove, 'plugh')
self.assertTrue(self.filesystem.Exists(dir_path))
self.assertRaises(OSError, self.os.remove, '/plugh')
def testRemoveFile(self):
directory = 'zzy'
file_path = '%s/plugh' % directory
self.filesystem.CreateFile(file_path)
self.assertTrue(self.filesystem.Exists(file_path))
self.os.remove(file_path)
self.assertFalse(self.filesystem.Exists(file_path))
def testRemoveDirRaisesError(self):
directory = 'zzy'
self.filesystem.CreateDirectory(directory)
self.assertRaises(OSError,
self.os.remove,
directory)
def testRemoveSymlinkToDir(self):
directory = 'zzy'
link = 'link_to_dir'
self.filesystem.CreateDirectory(directory)
self.os.symlink(directory, link)
self.assertTrue(self.filesystem.Exists(directory))
self.assertTrue(self.filesystem.Exists(link))
self.os.remove(link)
self.assertTrue(self.filesystem.Exists(directory))
self.assertFalse(self.filesystem.Exists(link))
def testUnlink(self):
self.assertTrue(self.os.unlink == self.os.remove)
def testUnlinkRaisesIfNotExist(self):
file_path = '/file/does/not/exist'
self.assertFalse(self.filesystem.Exists(file_path))
self.assertRaises(OSError, self.os.unlink, file_path)
def testRenameToNonexistentFile(self):
"""Can rename a file to an unused name."""
directory = 'xyzzy'
old_file_path = '%s/plugh_old' % directory
new_file_path = '%s/plugh_new' % directory
self.filesystem.CreateFile(old_file_path, contents='test contents')
self.assertTrue(self.filesystem.Exists(old_file_path))
self.assertFalse(self.filesystem.Exists(new_file_path))
self.os.rename(old_file_path, new_file_path)
self.assertFalse(self.filesystem.Exists(old_file_path))
self.assertTrue(self.filesystem.Exists(new_file_path))
self.assertEqual('test contents',
self.filesystem.GetObject(new_file_path).contents)
def testRenameDirectory(self):
"""Can rename a directory to an unused name."""
for old_path, new_path in [('wxyyw', 'xyzzy'), ('/abccb', 'cdeed')]:
self.filesystem.CreateFile('%s/plugh' % old_path, contents='test')
self.assertTrue(self.filesystem.Exists(old_path))
self.assertFalse(self.filesystem.Exists(new_path))
self.os.rename(old_path, new_path)
self.assertFalse(self.filesystem.Exists(old_path))
self.assertTrue(self.filesystem.Exists(new_path))
self.assertEqual(
'test', self.filesystem.GetObject('%s/plugh' % new_path).contents)
def testRenameToExistentFile(self):
"""Can rename a file to a used name."""
directory = 'xyzzy'
old_file_path = '%s/plugh_old' % directory
new_file_path = '%s/plugh_new' % directory
self.filesystem.CreateFile(old_file_path, contents='test contents 1')
self.filesystem.CreateFile(new_file_path, contents='test contents 2')
self.assertTrue(self.filesystem.Exists(old_file_path))
self.assertTrue(self.filesystem.Exists(new_file_path))
self.os.rename(old_file_path, new_file_path)
self.assertFalse(self.filesystem.Exists(old_file_path))
self.assertTrue(self.filesystem.Exists(new_file_path))
self.assertEqual('test contents 1',
self.filesystem.GetObject(new_file_path).contents)
def testRenameToNonexistentDir(self):
"""Can rename a file to a name in a nonexistent dir."""
directory = 'xyzzy'
old_file_path = '%s/plugh_old' % directory
new_file_path = '%s/no_such_path/plugh_new' % directory
self.filesystem.CreateFile(old_file_path, contents='test contents')
self.assertTrue(self.filesystem.Exists(old_file_path))
self.assertFalse(self.filesystem.Exists(new_file_path))
self.assertRaises(IOError, self.os.rename, old_file_path, new_file_path)
self.assertTrue(self.filesystem.Exists(old_file_path))
self.assertFalse(self.filesystem.Exists(new_file_path))
self.assertEqual('test contents',
self.filesystem.GetObject(old_file_path).contents)
def testRenameNonexistentFileShouldRaiseError(self):
"""Can't rename a file that doesn't exist."""
self.assertRaises(OSError,
self.os.rename,
'nonexistent-foo',
'doesn\'t-matter-bar')
def testRenameEmptyDir(self):
"""Test a rename of an empty directory."""
directory = 'xyzzy'
before_dir = '%s/empty' % directory
after_dir = '%s/unused' % directory
self.filesystem.CreateDirectory(before_dir)
self.assertTrue(self.filesystem.Exists('%s/.' % before_dir))
self.assertFalse(self.filesystem.Exists(after_dir))
self.os.rename(before_dir, after_dir)
self.assertFalse(self.filesystem.Exists(before_dir))
self.assertTrue(self.filesystem.Exists('%s/.' % after_dir))
def testRenameDir(self):
"""Test a rename of a directory."""
directory = 'xyzzy'
before_dir = '%s/before' % directory
before_file = '%s/before/file' % directory
after_dir = '%s/after' % directory
after_file = '%s/after/file' % directory
self.filesystem.CreateDirectory(before_dir)
self.filesystem.CreateFile(before_file, contents='payload')
self.assertTrue(self.filesystem.Exists(before_dir))
self.assertTrue(self.filesystem.Exists(before_file))
self.assertFalse(self.filesystem.Exists(after_dir))
self.assertFalse(self.filesystem.Exists(after_file))
self.os.rename(before_dir, after_dir)
self.assertFalse(self.filesystem.Exists(before_dir))
self.assertFalse(self.filesystem.Exists(before_file))
self.assertTrue(self.filesystem.Exists(after_dir))
self.assertTrue(self.filesystem.Exists(after_file))
self.assertEqual('payload',
self.filesystem.GetObject(after_file).contents)
def testRenamePreservesStat(self):
"""Test if rename preserves mtime."""
directory = 'xyzzy'
old_file_path = '%s/plugh_old' % directory
new_file_path = '%s/plugh_new' % directory
old_file = self.filesystem.CreateFile(old_file_path)
old_file.SetMTime(old_file.st_mtime - 3600)
self.os.chown(old_file_path, 200, 200)
self.os.chmod(old_file_path, 0o222)
new_file = self.filesystem.CreateFile(new_file_path)
self.assertNotEqual(new_file.st_mtime, old_file.st_mtime)
self.os.rename(old_file_path, new_file_path)
new_file = self.filesystem.GetObject(new_file_path)
self.assertEqual(new_file.st_mtime, old_file.st_mtime)
self.assertEqual(new_file.st_mode, old_file.st_mode)
self.assertEqual(new_file.st_uid, old_file.st_uid)
self.assertEqual(new_file.st_gid, old_file.st_gid)
def testRmdir(self):
"""Can remove a directory."""
directory = 'xyzzy'
sub_dir = '/xyzzy/abccd'
other_dir = '/xyzzy/cdeed'
self.filesystem.CreateDirectory(directory)
self.assertTrue(self.filesystem.Exists(directory))
self.os.rmdir(directory)
self.assertFalse(self.filesystem.Exists(directory))
self.filesystem.CreateDirectory(sub_dir)
self.filesystem.CreateDirectory(other_dir)
self.os.chdir(sub_dir)
self.os.rmdir('../cdeed')
self.assertFalse(self.filesystem.Exists(other_dir))
self.os.chdir('..')
self.os.rmdir('abccd')
self.assertFalse(self.filesystem.Exists(sub_dir))
def testRmdirRaisesIfNotEmpty(self):
"""Raises an exception if the target directory is not empty."""
directory = 'xyzzy'
file_path = '%s/plugh' % directory
self.filesystem.CreateFile(file_path)
self.assertTrue(self.filesystem.Exists(file_path))
self.assertRaises(OSError, self.os.rmdir, directory)
def testRmdirRaisesIfNotDirectory(self):
"""Raises an exception if the target is not a directory."""
directory = 'xyzzy'
file_path = '%s/plugh' % directory
self.filesystem.CreateFile(file_path)
self.assertTrue(self.filesystem.Exists(file_path))
self.assertRaises(OSError, self.os.rmdir, file_path)
self.assertRaises(OSError, self.os.rmdir, '.')
def testRmdirRaisesIfNotExist(self):
"""Raises an exception if the target does not exist."""
directory = 'xyzzy'
self.assertFalse(self.filesystem.Exists(directory))
self.assertRaises(OSError, self.os.rmdir, directory)
def RemovedirsCheck(self, directory):
self.assertTrue(self.filesystem.Exists(directory))
self.os.removedirs(directory)
return not self.filesystem.Exists(directory)
def testRemovedirs(self):
data = ['test1', 'test1/test2', 'test1/extra', 'test1/test2/test3']
for directory in data:
self.filesystem.CreateDirectory(directory)
self.assertTrue(self.filesystem.Exists(directory))
self.assertRaises(OSError, self.RemovedirsCheck, data[0])
self.assertRaises(OSError, self.RemovedirsCheck, data[1])
self.assertTrue(self.RemovedirsCheck(data[3]))
self.assertTrue(self.filesystem.Exists(data[0]))
self.assertFalse(self.filesystem.Exists(data[1]))
self.assertTrue(self.filesystem.Exists(data[2]))
# Should raise because '/test1/extra' is all that is left, and
# removedirs('/test1/extra') will eventually try to rmdir('/').
self.assertRaises(OSError, self.RemovedirsCheck, data[2])
# However, it will still delete '/test1') in the process.
self.assertFalse(self.filesystem.Exists(data[0]))
self.filesystem.CreateDirectory('test1/test2')
# Add this to the root directory to avoid raising an exception.
self.filesystem.CreateDirectory('test3')
self.assertTrue(self.RemovedirsCheck('test1/test2'))
self.assertFalse(self.filesystem.Exists('test1/test2'))
self.assertFalse(self.filesystem.Exists('test1'))
def testRemovedirsRaisesIfRemovingRoot(self):
"""Raises exception if asked to remove '/'."""
directory = '/'
self.assertTrue(self.filesystem.Exists(directory))
self.assertRaises(OSError, self.os.removedirs, directory)
def testRemovedirsRaisesIfCascadeRemovingRoot(self):
"""Raises exception if asked to remove '/' as part of a larger operation.
All of other directories should still be removed, though.
"""
directory = '/foo/bar/'
self.filesystem.CreateDirectory(directory)
self.assertTrue(self.filesystem.Exists(directory))
self.assertRaises(OSError, self.os.removedirs, directory)
head, unused_tail = self.os.path.split(directory)
while head != '/':
self.assertFalse(self.filesystem.Exists(directory))
head, unused_tail = self.os.path.split(head)
def testRemovedirsWithTrailingSlash(self):
"""removedirs works on directory names with trailing slashes."""
# separate this case from the removing-root-directory case
self.filesystem.CreateDirectory('/baz')
directory = '/foo/bar/'
self.filesystem.CreateDirectory(directory)
self.assertTrue(self.filesystem.Exists(directory))
self.os.removedirs(directory)
self.assertFalse(self.filesystem.Exists(directory))
def testMkdir(self):
"""mkdir can create a relative directory."""
directory = 'xyzzy'
self.assertFalse(self.filesystem.Exists(directory))
self.os.mkdir(directory)
self.assertTrue(self.filesystem.Exists('/%s' % directory))
self.os.chdir(directory)
self.os.mkdir(directory)
self.assertTrue(self.filesystem.Exists('/%s/%s' % (directory, directory)))
self.os.chdir(directory)
self.os.mkdir('../abccb')
self.assertTrue(self.filesystem.Exists('/%s/abccb' % directory))
def testMkdirWithTrailingSlash(self):
"""mkdir can create a directory named with a trailing slash."""
directory = '/foo/'
self.assertFalse(self.filesystem.Exists(directory))
self.os.mkdir(directory)
self.assertTrue(self.filesystem.Exists(directory))
self.assertTrue(self.filesystem.Exists('/foo'))
def testMkdirRaisesIfEmptyDirectoryName(self):
"""mkdir raises exeption if creating directory named ''."""
directory = ''
self.assertRaises(OSError, self.os.mkdir, directory)
def testMkdirRaisesIfNoParent(self):
"""mkdir raises exception if parent directory does not exist."""
parent = 'xyzzy'
directory = '%s/foo' % (parent,)
self.assertFalse(self.filesystem.Exists(parent))
self.assertRaises(Exception, self.os.mkdir, directory)
def testMkdirRaisesIfDirectoryExists(self):
"""mkdir raises exception if directory already exists."""
directory = 'xyzzy'
self.filesystem.CreateDirectory(directory)
self.assertTrue(self.filesystem.Exists(directory))
self.assertRaises(Exception, self.os.mkdir, directory)
def testMkdirRaisesIfFileExists(self):
"""mkdir raises exception if name already exists as a file."""
directory = 'xyzzy'
file_path = '%s/plugh' % directory
self.filesystem.CreateFile(file_path)
self.assertTrue(self.filesystem.Exists(file_path))
self.assertRaises(Exception, self.os.mkdir, file_path)
def testMkdirRaisesWithSlashDot(self):
"""mkdir raises exception if mkdir foo/. (trailing /.)."""
self.assertRaises(Exception, self.os.mkdir, '/.')
directory = '/xyzzy/.'
self.assertRaises(Exception, self.os.mkdir, directory)
self.filesystem.CreateDirectory('/xyzzy')
self.assertRaises(Exception, self.os.mkdir, directory)
def testMkdirRaisesWithDoubleDots(self):
"""mkdir raises exception if mkdir foo/foo2/../foo3."""
self.assertRaises(Exception, self.os.mkdir, '/..')
directory = '/xyzzy/dir1/dir2/../../dir3'
self.assertRaises(Exception, self.os.mkdir, directory)
self.filesystem.CreateDirectory('/xyzzy')
self.assertRaises(Exception, self.os.mkdir, directory)
self.filesystem.CreateDirectory('/xyzzy/dir1')
self.assertRaises(Exception, self.os.mkdir, directory)
self.filesystem.CreateDirectory('/xyzzy/dir1/dir2')
self.os.mkdir(directory)
self.assertTrue(self.filesystem.Exists(directory))
directory = '/xyzzy/dir1/..'
self.assertRaises(Exception, self.os.mkdir, directory)
def testMkdirRaisesIfParentIsReadOnly(self):
"""mkdir raises exception if parent is read only."""
directory = '/a'
self.os.mkdir(directory)
# Change directory permissions to be read only.
self.os.chmod(directory, 0o400)
directory = '/a/b'
self.assertRaises(Exception, self.os.mkdir, directory)
def testMakedirs(self):
"""makedirs can create a directory even in parent does not exist."""
parent = 'xyzzy'
directory = '%s/foo' % (parent,)
self.assertFalse(self.filesystem.Exists(parent))
self.os.makedirs(directory)
self.assertTrue(self.filesystem.Exists(parent))
def testMakedirsRaisesIfParentIsFile(self):
"""makedirs raises exception if a parent component exists as a file."""
file_path = 'xyzzy'
directory = '%s/plugh' % file_path
self.filesystem.CreateFile(file_path)
self.assertTrue(self.filesystem.Exists(file_path))
self.assertRaises(Exception, self.os.makedirs, directory)
def testMakedirsRaisesIfAccessDenied(self):
"""makedirs raises exception if access denied."""
directory = '/a'
self.os.mkdir(directory)
# Change directory permissions to be read only.
self.os.chmod(directory, 0o400)
directory = '/a/b'
self.assertRaises(Exception, self.os.makedirs, directory)
def _CreateTestFile(self, path):
self.filesystem.CreateFile(path)
self.assertTrue(self.filesystem.Exists(path))
st = self.os.stat(path)
self.assertEqual(0o666, stat.S_IMODE(st.st_mode))
self.assertTrue(st.st_mode & stat.S_IFREG)
self.assertFalse(st.st_mode & stat.S_IFDIR)
def _CreateTestDirectory(self, path):
self.filesystem.CreateDirectory(path)
self.assertTrue(self.filesystem.Exists(path))
st = self.os.stat(path)
self.assertEqual(0o777, stat.S_IMODE(st.st_mode))
self.assertFalse(st.st_mode & stat.S_IFREG)
self.assertTrue(st.st_mode & stat.S_IFDIR)
def testAccess700(self):
# set up
path = '/some_file'
self._CreateTestFile(path)
self.os.chmod(path, 0o700)
self.assertModeEqual(0o700, self.os.stat(path).st_mode)
# actual tests
self.assertTrue(self.os.access(path, self.os.F_OK))
self.assertTrue(self.os.access(path, self.os.R_OK))
self.assertTrue(self.os.access(path, self.os.W_OK))
self.assertTrue(self.os.access(path, self.os.X_OK))
self.assertTrue(self.os.access(path, self.rwx))
def testAccess600(self):
# set up
path = '/some_file'
self._CreateTestFile(path)
self.os.chmod(path, 0o600)
self.assertModeEqual(0o600, self.os.stat(path).st_mode)
# actual tests
self.assertTrue(self.os.access(path, self.os.F_OK))
self.assertTrue(self.os.access(path, self.os.R_OK))
self.assertTrue(self.os.access(path, self.os.W_OK))
self.assertFalse(self.os.access(path, self.os.X_OK))
self.assertFalse(self.os.access(path, self.rwx))
self.assertTrue(self.os.access(path, self.rw))
def testAccess400(self):
# set up
path = '/some_file'
self._CreateTestFile(path)
self.os.chmod(path, 0o400)
self.assertModeEqual(0o400, self.os.stat(path).st_mode)
# actual tests
self.assertTrue(self.os.access(path, self.os.F_OK))
self.assertTrue(self.os.access(path, self.os.R_OK))
self.assertFalse(self.os.access(path, self.os.W_OK))
self.assertFalse(self.os.access(path, self.os.X_OK))
self.assertFalse(self.os.access(path, self.rwx))
self.assertFalse(self.os.access(path, self.rw))
def testAccessNonExistentFile(self):
# set up
path = '/non/existent/file'
self.assertFalse(self.filesystem.Exists(path))
# actual tests
self.assertFalse(self.os.access(path, self.os.F_OK))
self.assertFalse(self.os.access(path, self.os.R_OK))
self.assertFalse(self.os.access(path, self.os.W_OK))
self.assertFalse(self.os.access(path, self.os.X_OK))
self.assertFalse(self.os.access(path, self.rwx))
self.assertFalse(self.os.access(path, self.rw))
def testChmod(self):
# set up
path = '/some_file'
self._CreateTestFile(path)
# actual tests
self.os.chmod(path, 0o6543)
st = self.os.stat(path)
self.assertModeEqual(0o6543, st.st_mode)
self.assertTrue(st.st_mode & stat.S_IFREG)
self.assertFalse(st.st_mode & stat.S_IFDIR)
def testChmodDir(self):
# set up
path = '/some_dir'
self._CreateTestDirectory(path)
# actual tests
self.os.chmod(path, 0o1234)
st = self.os.stat(path)
self.assertModeEqual(0o1234, st.st_mode)
self.assertFalse(st.st_mode & stat.S_IFREG)
self.assertTrue(st.st_mode & stat.S_IFDIR)
def testChmodNonExistent(self):
# set up
path = '/non/existent/file'
self.assertFalse(self.filesystem.Exists(path))
# actual tests
try:
# Use try-catch to check exception attributes.
self.os.chmod(path, 0o777)
self.fail('Exception is expected.') # COV_NF_LINE
except OSError as os_error:
self.assertEqual(errno.ENOENT, os_error.errno)
self.assertEqual(path, os_error.filename)
def testChmodStCtime(self):
# set up
file_path = 'some_file'
self.filesystem.CreateFile(file_path)
self.assertTrue(self.filesystem.Exists(file_path))
st = self.os.stat(file_path)
self.assertEqual(200, st.st_ctime)
# tests
self.os.chmod(file_path, 0o765)
st = self.os.stat(file_path)
self.assertEqual(220, st.st_ctime)
def testUtimeSetsCurrentTimeIfArgsIsNone(self):
# set up
path = '/some_file'
self._CreateTestFile(path)
st = self.os.stat(path)
# 200 is the current time established in setUp().
self.assertEqual(200, st.st_atime)
self.assertEqual(200, st.st_mtime)
# actual tests
self.os.utime(path, None)
st = self.os.stat(path)
self.assertEqual(220, st.st_atime)
self.assertEqual(240, st.st_mtime)
def testUtimeSetsCurrentTimeIfArgsIsNoneWithFloats(self):
# set up
# time.time can report back floats, but it should be converted to ints
# since atime/ctime/mtime are all defined as seconds since epoch.
time.time = _GetDummyTime(200.0123, 20)
path = '/some_file'
self._CreateTestFile(path)
st = self.os.stat(path)
# 200 is the current time established above (if converted to int).
self.assertEqual(200, st.st_atime)
self.assertEqual(200, st.st_mtime)
# actual tests
self.os.utime(path, None)
st = self.os.stat(path)
self.assertEqual(220, st.st_atime)
self.assertEqual(240, st.st_mtime)
def testUtimeSetsSpecifiedTime(self):
# set up
path = '/some_file'
self._CreateTestFile(path)
st = self.os.stat(path)
# actual tests
self.os.utime(path, (1, 2))
st = self.os.stat(path)
self.assertEqual(1, st.st_atime)
self.assertEqual(2, st.st_mtime)
def testUtimeDir(self):
# set up
path = '/some_dir'
self._CreateTestDirectory(path)
# actual tests
self.os.utime(path, (1.0, 2.0))
st = self.os.stat(path)
self.assertEqual(1.0, st.st_atime)
self.assertEqual(2.0, st.st_mtime)
def testUtimeNonExistent(self):
# set up
path = '/non/existent/file'
self.assertFalse(self.filesystem.Exists(path))
# actual tests
try:
# Use try-catch to check exception attributes.
self.os.utime(path, (1, 2))
self.fail('Exception is expected.') # COV_NF_LINE
except OSError as os_error:
self.assertEqual(errno.ENOENT, os_error.errno)
self.assertEqual(path, os_error.filename)
def testUtimeTupleArgIsOfIncorrectLength(self):
# set up
path = '/some_dir'
self._CreateTestDirectory(path)
# actual tests
self.assertRaisesWithRegexpMatch(
TypeError, r'utime\(\) arg 2 must be a tuple \(atime, mtime\)',
self.os.utime, path, (1, 2, 3))
def testUtimeTupleArgContainsIncorrectType(self):
# set up
path = '/some_dir'
self._CreateTestDirectory(path)
# actual tests
self.assertRaisesWithRegexpMatch(
TypeError, 'an integer is required',
self.os.utime, path, (1, 'str'))
def testChownExistingFile(self):
# set up
file_path = 'some_file'
self.filesystem.CreateFile(file_path)
# first set it make sure it's set
self.os.chown(file_path, 100, 100)
st = self.os.stat(file_path)
self.assertEqual(st[stat.ST_UID], 100)
self.assertEqual(st[stat.ST_GID], 100)
# we can make sure it changed
self.os.chown(file_path, 200, 200)
st = self.os.stat(file_path)
self.assertEqual(st[stat.ST_UID], 200)
self.assertEqual(st[stat.ST_GID], 200)
# setting a value to -1 leaves it unchanged
self.os.chown(file_path, -1, -1)
st = self.os.stat(file_path)
self.assertEqual(st[stat.ST_UID], 200)
self.assertEqual(st[stat.ST_GID], 200)
def testChownNonexistingFileShouldRaiseOsError(self):
file_path = 'some_file'
self.assertFalse(self.filesystem.Exists(file_path))
self.assertRaises(OSError, self.os.chown, file_path, 100, 100)
def testClassifyDirectoryContents(self):
"""Directory classification should work correctly."""
root_directory = '/foo'
test_directories = ['bar1', 'baz2']
test_files = ['baz1', 'bar2', 'baz3']
self.filesystem.CreateDirectory(root_directory)
for directory in test_directories:
directory = self.os.path.join(root_directory, directory)
self.filesystem.CreateDirectory(directory)
for test_file in test_files:
test_file = self.os.path.join(root_directory, test_file)
self.filesystem.CreateFile(test_file)
test_directories.sort()
test_files.sort()
generator = self.os.walk(root_directory)
root, dirs, files = next(generator)
dirs.sort()
files.sort()
self.assertEqual(root_directory, root)
self.assertEqual(test_directories, dirs)
self.assertEqual(test_files, files)
def testClassifyDoesNotHideExceptions(self):
"""_ClassifyDirectoryContents should not hide exceptions."""
directory = '/foo'
self.assertEqual(False, self.filesystem.Exists(directory))
self.assertRaises(OSError, self.os._ClassifyDirectoryContents, directory)
def testWalkTopDown(self):
"""Walk down ordering is correct."""
self.filesystem.CreateFile('foo/1.txt')
self.filesystem.CreateFile('foo/bar1/2.txt')
self.filesystem.CreateFile('foo/bar1/baz/3.txt')
self.filesystem.CreateFile('foo/bar2/4.txt')
expected = [
('foo', ['bar1', 'bar2'], ['1.txt']),
('foo/bar1', ['baz'], ['2.txt']),
('foo/bar1/baz', [], ['3.txt']),
('foo/bar2', [], ['4.txt']),
]
self.assertEqual(expected, [step for step in self.os.walk('foo')])
def testWalkBottomUp(self):
"""Walk up ordering is correct."""
self.filesystem.CreateFile('foo/bar1/baz/1.txt')
self.filesystem.CreateFile('foo/bar1/2.txt')
self.filesystem.CreateFile('foo/bar2/3.txt')
self.filesystem.CreateFile('foo/4.txt')
expected = [
('foo/bar1/baz', [], ['1.txt']),
('foo/bar1', ['baz'], ['2.txt']),
('foo/bar2', [], ['3.txt']),
('foo', ['bar1', 'bar2'], ['4.txt']),
]
self.assertEqual(expected,
[step for step in self.os.walk('foo', topdown=False)])
def testWalkRaisesIfNonExistent(self):
"""Raises an exception when attempting to walk non-existent directory."""
directory = '/foo/bar'
self.assertEqual(False, self.filesystem.Exists(directory))
generator = self.os.walk(directory)
self.assertRaises(StopIteration, next, generator)
def testWalkRaisesIfNotDirectory(self):
"""Raises an exception when attempting to walk a non-directory."""
filename = '/foo/bar'
self.filesystem.CreateFile(filename)
generator = self.os.walk(filename)
self.assertRaises(StopIteration, next, generator)
def testMkNodeCanCreateAFile(self):
filename = 'foo'
self.assertFalse(self.filesystem.Exists(filename))
self.os.mknod(filename)
self.assertTrue(self.filesystem.Exists(filename))
def testMkNodeRaisesIfEmptyFileName(self):
filename = ''
self.assertRaises(OSError, self.os.mknod, filename)
def testMkNodeRaisesIfParentDirDoesntExist(self):
parent = 'xyzzy'
filename = '%s/foo' % (parent,)
self.assertFalse(self.filesystem.Exists(parent))
self.assertRaises(OSError, self.os.mknod, filename)
def testMkNodeRaisesIfFileExists(self):
filename = '/tmp/foo'
self.filesystem.CreateFile(filename)
self.assertTrue(self.filesystem.Exists(filename))
self.assertRaises(OSError, self.os.mknod, filename)
def testMkNodeRaisesIfFilenameIsDot(self):
filename = '/tmp/.'
self.assertRaises(OSError, self.os.mknod, filename)
def testMkNodeRaisesIfFilenameIsDoubleDot(self):
filename = '/tmp/..'
self.assertRaises(OSError, self.os.mknod, filename)
def testMknodEmptyTailForExistingFileRaises(self):
filename = '/tmp/foo'
self.filesystem.CreateFile(filename)
self.assertTrue(self.filesystem.Exists(filename))
self.assertRaises(OSError, self.os.mknod, filename)
def testMknodEmptyTailForNonexistentFileRaises(self):
filename = '/tmp/foo'
self.assertRaises(OSError, self.os.mknod, filename)
def testMknodRaisesIfFilenameIsEmptyString(self):
filename = ''
self.assertRaises(OSError, self.os.mknod, filename)
def testMknodeRaisesIfUnsupportedOptions(self):
filename = 'abcde'
self.assertRaises(OSError, self.os.mknod, filename,
mode=stat.S_IFCHR)
def testMknodeRaisesIfParentIsNotADirectory(self):
filename1 = '/tmp/foo'
self.filesystem.CreateFile(filename1)
self.assertTrue(self.filesystem.Exists(filename1))
filename2 = '/tmp/foo/bar'
self.assertRaises(OSError, self.os.mknod, filename2)
def ResetErrno(self):
"""Reset the last seen errno."""
self.last_errno = False
def StoreErrno(self, os_error):
"""Store the last errno we saw."""
self.last_errno = os_error.errno
def GetErrno(self):
"""Return the last errno we saw."""
return self.last_errno
def testWalkCallsOnErrorIfNonExistent(self):
"""Calls onerror with correct errno when walking non-existent directory."""
self.ResetErrno()
directory = '/foo/bar'
self.assertEqual(False, self.filesystem.Exists(directory))
# Calling os.walk on a non-existent directory should trigger a call to the
# onerror method. We do not actually care what, if anything, is returned.
for unused_entry in self.os.walk(directory, onerror=self.StoreErrno):
pass
self.assertEqual(errno.ENOENT, self.GetErrno())
def testWalkCallsOnErrorIfNotDirectory(self):
"""Calls onerror with correct errno when walking non-directory."""
self.ResetErrno()
filename = '/foo/bar'
self.filesystem.CreateFile(filename)
self.assertEqual(True, self.filesystem.Exists(filename))
# Calling os.walk on a file should trigger a call to the onerror method.
# We do not actually care what, if anything, is returned.
for unused_entry in self.os.walk(filename, onerror=self.StoreErrno):
pass
self.assertEqual(errno.ENOTDIR, self.GetErrno())
def testWalkSkipsRemovedDirectories(self):
"""Caller can modify list of directories to visit while walking."""
root = '/foo'
visit = 'visit'
no_visit = 'no_visit'
self.filesystem.CreateFile('%s/bar' % (root,))
self.filesystem.CreateFile('%s/%s/1.txt' % (root, visit))
self.filesystem.CreateFile('%s/%s/2.txt' % (root, visit))
self.filesystem.CreateFile('%s/%s/3.txt' % (root, no_visit))
self.filesystem.CreateFile('%s/%s/4.txt' % (root, no_visit))
generator = self.os.walk('/foo')
root_contents = next(generator)
root_contents[1].remove(no_visit)
visited_visit_directory = False
for root, unused_dirs, unused_files in iter(generator):
self.assertEqual(False, root.endswith('/%s' % (no_visit)))
if root.endswith('/%s' % (visit)):
visited_visit_directory = True
self.assertEqual(True, visited_visit_directory)
def testSymlink(self):
file_path = 'foo/bar/baz'
self.os.symlink('bogus', file_path)
self.assertTrue(self.os.path.lexists(file_path))
self.assertFalse(self.os.path.exists(file_path))
self.filesystem.CreateFile('foo/bar/bogus')
self.assertTrue(self.os.path.lexists(file_path))
self.assertTrue(self.os.path.exists(file_path))
def testUMask(self):
umask = os.umask(0o22)
os.umask(umask)
self.assertEqual(umask, self.os.umask(0o22))
def testMkdirUmaskApplied(self):
"""mkdir creates a directory with umask applied."""
self.os.umask(0o22)
self.os.mkdir('dir1')
self.assertModeEqual(0o755, self.os.stat('dir1').st_mode)
self.os.umask(0o67)
self.os.mkdir('dir2')
self.assertModeEqual(0o710, self.os.stat('dir2').st_mode)
def testMakedirsUmaskApplied(self):
"""makedirs creates a directories with umask applied."""
self.os.umask(0o22)
self.os.makedirs('/p1/dir1')
self.assertModeEqual(0o755, self.os.stat('/p1').st_mode)
self.assertModeEqual(0o755, self.os.stat('/p1/dir1').st_mode)
self.os.umask(0o67)
self.os.makedirs('/p2/dir2')
self.assertModeEqual(0o710, self.os.stat('/p2').st_mode)
self.assertModeEqual(0o710, self.os.stat('/p2/dir2').st_mode)
def testMknodeUmaskApplied(self):
"""mkdir creates a device with umask applied."""
self.os.umask(0o22)
self.os.mknod('nod1')
self.assertModeEqual(0o644, self.os.stat('nod1').st_mode)
self.os.umask(0o27)
self.os.mknod('nod2')
self.assertModeEqual(0o640, self.os.stat('nod2').st_mode)
def testOpenUmaskApplied(self):
"""open creates a file with umask applied."""
fake_open = fake_filesystem.FakeFileOpen(self.filesystem)
self.os.umask(0o22)
fake_open('file1', 'w').close()
self.assertModeEqual(0o644, self.os.stat('file1').st_mode)
self.os.umask(0o27)
fake_open('file2', 'w').close()
self.assertModeEqual(0o640, self.os.stat('file2').st_mode)
class StatPropagationTest(unittest.TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem()
self.os = fake_filesystem.FakeOsModule(self.filesystem)
self.open = fake_filesystem.FakeFileOpen(self.filesystem)
def testFileSizeUpdatedViaClose(self):
"""test that file size gets updated via close()."""
file_dir = 'xyzzy'
file_path = 'xyzzy/close'
content = 'This is a test.'
self.os.mkdir(file_dir)
fh = self.open(file_path, 'w')
self.assertEqual(0, self.os.stat(file_path)[stat.ST_SIZE])
self.assertEqual('', self.filesystem.GetObject(file_path).contents)
fh.write(content)
self.assertEqual(0, self.os.stat(file_path)[stat.ST_SIZE])
self.assertEqual('', self.filesystem.GetObject(file_path).contents)
fh.close()
self.assertEqual(len(content), self.os.stat(file_path)[stat.ST_SIZE])
self.assertEqual(content, self.filesystem.GetObject(file_path).contents)
def testFileSizeNotResetAfterClose(self):
file_dir = 'xyzzy'
file_path = 'xyzzy/close'
self.os.mkdir(file_dir)
size = 1234
# The file has size, but no content. When the file is opened for reading,
# its size should be preserved.
self.filesystem.CreateFile(file_path, st_size=size)
fh = self.open(file_path, 'r')
fh.close()
self.assertEqual(size, self.open(file_path, 'r').Size())
def testFileSizeAfterWrite(self):
file_path = 'test_file'
original_content = 'abcdef'
original_size = len(original_content)
self.filesystem.CreateFile(file_path, contents=original_content)
added_content = 'foo bar'
expected_size = original_size + len(added_content)
fh = self.open(file_path, 'a')
fh.write(added_content)
self.assertEqual(expected_size, fh.Size())
fh.close()
self.assertEqual(expected_size, self.open(file_path, 'r').Size())
def testLargeFileSizeAfterWrite(self):
file_path = 'test_file'
original_content = 'abcdef'
original_size = len(original_content)
self.filesystem.CreateFile(file_path, st_size=original_size)
added_content = 'foo bar'
fh = self.open(file_path, 'a')
# We can't use assertRaises, because the exception is thrown
# in __getattr__, so just saying 'fh.write' causes the exception.
try:
fh.write(added_content)
except fake_filesystem.FakeLargeFileIoException:
return
self.fail('Writing to a large file should not be allowed')
def testFileSizeUpdatedViaFlush(self):
"""test that file size gets updated via flush()."""
file_dir = 'xyzzy'
file_name = 'flush'
file_path = self.os.path.join(file_dir, file_name)
content = 'This might be a test.'
self.os.mkdir(file_dir)
fh = self.open(file_path, 'w')
self.assertEqual(0, self.os.stat(file_path)[stat.ST_SIZE])
self.assertEqual('', self.filesystem.GetObject(file_path).contents)
fh.write(content)
self.assertEqual(0, self.os.stat(file_path)[stat.ST_SIZE])
self.assertEqual('', self.filesystem.GetObject(file_path).contents)
fh.flush()
self.assertEqual(len(content), self.os.stat(file_path)[stat.ST_SIZE])
self.assertEqual(content, self.filesystem.GetObject(file_path).contents)
fh.close()
self.assertEqual(len(content), self.os.stat(file_path)[stat.ST_SIZE])
self.assertEqual(content, self.filesystem.GetObject(file_path).contents)
def testFileSizeTruncation(self):
"""test that file size gets updated via open()."""
file_dir = 'xyzzy'
file_path = 'xyzzy/truncation'
content = 'AAA content.'
# pre-create file with content
self.os.mkdir(file_dir)
fh = self.open(file_path, 'w')
fh.write(content)
fh.close()
self.assertEqual(len(content), self.os.stat(file_path)[stat.ST_SIZE])
self.assertEqual(content, self.filesystem.GetObject(file_path).contents)
# test file truncation
fh = self.open(file_path, 'w')
self.assertEqual(0, self.os.stat(file_path)[stat.ST_SIZE])
self.assertEqual('', self.filesystem.GetObject(file_path).contents)
fh.close()
class OsPathInjectionRegressionTest(unittest.TestCase):
"""Test faking os.path before calling os.walk.
Found when investigating a problem with
gws/tools/labrat/rat_utils_unittest, which was faking out os.path
before calling os.walk.
"""
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem()
self.os_path = os.path
# The bug was that when os.path gets faked, the FakePathModule doesn't get
# called in self.os.walk(). FakePathModule now insists that it is created
# as part of FakeOsModule.
self.os = fake_filesystem.FakeOsModule(self.filesystem)
def tearDown(self):
os.path = self.os_path
def testCreateTopLevelDirectory(self):
top_level_dir = '/x'
self.assertFalse(self.filesystem.Exists(top_level_dir))
self.filesystem.CreateDirectory(top_level_dir)
self.assertTrue(self.filesystem.Exists('/'))
self.assertTrue(self.filesystem.Exists(top_level_dir))
self.filesystem.CreateDirectory('%s/po' % top_level_dir)
self.filesystem.CreateFile('%s/po/control' % top_level_dir)
self.filesystem.CreateFile('%s/po/experiment' % top_level_dir)
self.filesystem.CreateDirectory('%s/gv' % top_level_dir)
self.filesystem.CreateFile('%s/gv/control' % top_level_dir)
expected = [
('/', ['x'], []),
('/x', ['gv', 'po'], []),
('/x/gv', [], ['control']),
('/x/po', [], ['control', 'experiment']),
]
self.assertEqual(expected, [step for step in self.os.walk('/')])
class FakePathModuleTest(unittest.TestCase):
def setUp(self):
self.orig_time = time.time
time.time = _GetDummyTime(10, 1)
self.filesystem = fake_filesystem.FakeFilesystem()
self.os = fake_filesystem.FakeOsModule(self.filesystem)
self.path = self.os.path
def tearDown(self):
time.time = self.orig_time
def testAbspath(self):
"""abspath should return a consistent representation of a file."""
filename = 'foo'
abspath = '/%s' % filename
self.filesystem.CreateFile(abspath)
self.assertEqual(abspath, self.path.abspath(abspath))
self.assertEqual(abspath, self.path.abspath(filename))
self.assertEqual(abspath, self.path.abspath('../%s' % filename))
def testAbspathDealsWithRelativeNonRootPath(self):
"""abspath should correctly handle relative paths from a non-/ directory.
This test is distinct from the basic functionality test because
fake_filesystem has historically been based in /.
"""
filename = '/foo/bar/baz'
file_components = filename.split(self.path.sep)
basedir = '/%s' % (file_components[0],)
self.filesystem.CreateFile(filename)
self.os.chdir(basedir)
self.assertEqual(basedir, self.path.abspath(self.path.curdir))
self.assertEqual('/', self.path.abspath('..'))
self.assertEqual(self.path.join(basedir, file_components[1]),
self.path.abspath(file_components[1]))
def testRelpath(self):
path_foo = '/path/to/foo'
path_bar = '/path/to/bar'
path_other = '/some/where/else'
self.assertRaises(ValueError, self.path.relpath, None)
self.assertRaises(ValueError, self.path.relpath, '')
self.assertEqual(path_foo[1:],
self.path.relpath(path_foo))
self.assertEqual('../foo',
self.path.relpath(path_foo, path_bar))
self.assertEqual('../../..%s' % path_other,
self.path.relpath(path_other, path_bar))
self.assertEqual('.',
self.path.relpath(path_bar, path_bar))
def testRealpathVsAbspath(self):
self.filesystem.CreateFile('/george/washington/bridge')
self.filesystem.CreateLink('/first/president', '/george/washington')
self.assertEqual('/first/president/bridge',
self.os.path.abspath('/first/president/bridge'))
self.assertEqual('/george/washington/bridge',
self.os.path.realpath('/first/president/bridge'))
self.os.chdir('/first/president')
self.assertEqual('/george/washington/bridge',
self.os.path.realpath('bridge'))
def testExists(self):
file_path = 'foo/bar/baz'
self.filesystem.CreateFile(file_path)
self.assertTrue(self.path.exists(file_path))
self.assertFalse(self.path.exists('/some/other/bogus/path'))
def testLexists(self):
file_path = 'foo/bar/baz'
self.filesystem.CreateDirectory('foo/bar')
self.filesystem.CreateLink(file_path, 'bogus')
self.assertTrue(self.path.lexists(file_path))
self.assertFalse(self.path.exists(file_path))
self.filesystem.CreateFile('foo/bar/bogus')
self.assertTrue(self.path.exists(file_path))
def testDirname(self):
dirname = 'foo/bar'
self.assertEqual(dirname, self.path.dirname('%s/baz' % dirname))
def testJoin(self):
components = ['foo', 'bar', 'baz']
self.assertEqual(os.path.join(*components), self.path.join(*components))
def testExpandUser(self):
self.assertEqual(self.path.expanduser('~'), self.os.environ['HOME'])
self.assertEqual('/root', self.path.expanduser('~root'))
def testGetsizePathNonexistent(self):
file_path = 'foo/bar/baz'
self.assertRaises(IOError, self.path.getsize, file_path)
def testGetsizeFileEmpty(self):
file_path = 'foo/bar/baz'
self.filesystem.CreateFile(file_path)
self.assertEqual(0, self.path.getsize(file_path))
def testGetsizeFileNonZeroSize(self):
file_path = 'foo/bar/baz'
self.filesystem.CreateFile(file_path, contents='1234567')
self.assertEqual(7, self.path.getsize(file_path))
def testGetsizeDirEmpty(self):
# For directories, only require that the size is non-negative.
dir_path = 'foo/bar'
self.filesystem.CreateDirectory(dir_path)
size = self.path.getsize(dir_path)
self.assertFalse(int(size) < 0,
'expected non-negative size; actual: %s' % size)
def testGetsizeDirNonZeroSize(self):
# For directories, only require that the size is non-negative.
dir_path = 'foo/bar'
self.filesystem.CreateFile(os.path.join(dir_path, 'baz'))
size = self.path.getsize(dir_path)
self.assertFalse(int(size) < 0,
'expected non-negative size; actual: %s' % size)
def testIsdir(self):
self.filesystem.CreateFile('foo/bar')
self.assertTrue(self.path.isdir('foo'))
self.assertFalse(self.path.isdir('foo/bar'))
self.assertFalse(self.path.isdir('it_dont_exist'))
def testIsdirWithCwdChange(self):
self.filesystem.CreateFile('/foo/bar/baz')
self.assertTrue(self.path.isdir('/foo'))
self.assertTrue(self.path.isdir('/foo/bar'))
self.assertTrue(self.path.isdir('foo'))
self.assertTrue(self.path.isdir('foo/bar'))
self.filesystem.cwd = '/foo'
self.assertTrue(self.path.isdir('/foo'))
self.assertTrue(self.path.isdir('/foo/bar'))
self.assertTrue(self.path.isdir('bar'))
def testIsfile(self):
self.filesystem.CreateFile('foo/bar')
self.assertFalse(self.path.isfile('foo'))
self.assertTrue(self.path.isfile('foo/bar'))
self.assertFalse(self.path.isfile('it_dont_exist'))
def testGetMtime(self):
test_file = self.filesystem.CreateFile('foo/bar1.txt')
# The root directory ('', effectively '/') is created at time 10,
# the parent directory ('foo') at time 11, and the file at time 12.
self.assertEqual(12, test_file.st_mtime)
test_file.SetMTime(24)
self.assertEqual(24, self.path.getmtime('foo/bar1.txt'))
def testGetMtimeRaisesOSError(self):
self.assertFalse(self.path.exists('it_dont_exist'))
self.assertRaises(OSError, self.path.getmtime, 'it_dont_exist')
def testIslink(self):
self.filesystem.CreateDirectory('foo')
self.filesystem.CreateFile('foo/regular_file')
self.filesystem.CreateLink('foo/link_to_file', 'regular_file')
self.assertFalse(self.path.islink('foo'))
# An object can be both a link and a file or file, according to the
# comments in Python/Lib/posixpath.py.
self.assertTrue(self.path.islink('foo/link_to_file'))
self.assertTrue(self.path.isfile('foo/link_to_file'))
self.assertTrue(self.path.isfile('foo/regular_file'))
self.assertFalse(self.path.islink('foo/regular_file'))
self.assertFalse(self.path.islink('it_dont_exist'))
def testWalk(self):
# os.path.walk deprecrated in Python 3
if sys.version_info >= (3, 0):
return
self.filesystem.CreateFile('/foo/bar/baz')
self.filesystem.CreateFile('/foo/bar/xyzzy/plugh')
visited_nodes = []
def RecordVisitedNodes(visited, dirname, fnames):
visited.extend(((dirname, fname) for fname in fnames))
self.path.walk('/foo', RecordVisitedNodes, visited_nodes)
expected = [('/foo', 'bar'),
('/foo/bar', 'baz'),
('/foo/bar', 'xyzzy'),
('/foo/bar/xyzzy', 'plugh')]
self.assertEqual(expected, visited_nodes)
def testWalkFromNonexistentTopDoesNotThrow(self):
# os.path.walk deprecrated in Python 3
if sys.version_info >= (3, 0):
return
visited_nodes = []
def RecordVisitedNodes(visited, dirname, fnames):
visited.extend(((dirname, fname) for fname in fnames))
self.path.walk('/foo', RecordVisitedNodes, visited_nodes)
self.assertEqual([], visited_nodes)
class FakeFileOpenTestBase(TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem()
self.file = fake_filesystem.FakeFileOpen(self.filesystem)
self.open = self.file
self.os = fake_filesystem.FakeOsModule(self.filesystem)
self.orig_time = time.time
time.time = _GetDummyTime(100, 10)
def tearDown(self):
time.time = self.orig_time
class FakeFileOpenTest(FakeFileOpenTestBase):
def testOpenNoParentDir(self):
"""Expect raise when open'ing a file in a missing directory."""
file_path = 'foo/bar.txt'
self.assertRaises(IOError, self.file, file_path, 'w')
def testDeleteOnClose(self):
file_dir = 'boo'
file_path = 'boo/far'
self.os.mkdir(file_dir)
self.file = fake_filesystem.FakeFileOpen(self.filesystem,
delete_on_close=True)
fh = self.file(file_path, 'w')
self.assertTrue(self.filesystem.Exists(file_path))
fh.close()
self.assertFalse(self.filesystem.Exists(file_path))
def testNoDeleteOnCloseByDefault(self):
file_dir = 'boo'
file_path = 'boo/czar'
self.file = fake_filesystem.FakeFileOpen(self.filesystem)
self.os.mkdir(file_dir)
fh = self.file(file_path, 'w')
self.assertTrue(self.filesystem.Exists(file_path))
fh.close()
self.assertTrue(self.filesystem.Exists(file_path))
def testCompatibilityOfWithStatement(self):
self.file = fake_filesystem.FakeFileOpen(self.filesystem,
delete_on_close=True)
file_path = 'foo'
self.assertFalse(self.filesystem.Exists(file_path))
with self.file(file_path, 'w') as _:
self.assertTrue(self.filesystem.Exists(file_path))
# After the 'with' statement, the close() method should have been called.
self.assertFalse(self.filesystem.Exists(file_path))
def testOpenValidFile(self):
contents = [
'I am he as\n',
'you are he as\n',
'you are me and\n',
'we are all together\n'
]
file_path = 'foo/bar.txt'
self.filesystem.CreateFile(file_path, contents=''.join(contents))
self.assertEqual(contents, self.file(file_path).readlines())
def testOpenValidArgs(self):
contents = [
"Bang bang Maxwell's silver hammer\n",
'Came down on her head',
]
file_path = 'abbey_road/maxwell'
self.filesystem.CreateFile(file_path, contents=''.join(contents))
self.assertEqual(
contents, self.open(file_path, mode='r', buffering=1).readlines())
if sys.version_info >= (3, 0):
self.assertEqual(
contents, self.open(file_path, mode='r', buffering=1,
encoding='utf-8', errors='strict', newline='\n',
closefd=False, opener=False).readlines())
def testOpenNewlineArg(self):
if sys.version_info < (3, 0):
return
file_path = 'some_file'
file_contents = 'two\r\nlines'
self.filesystem.CreateFile(file_path, contents=file_contents)
fake_file = self.open(file_path, mode='r', newline=None)
self.assertEqual(['two\n', 'lines'], fake_file.readlines())
fake_file = self.open(file_path, mode='r', newline='')
self.assertEqual(['two\r\n', 'lines'], fake_file.readlines())
fake_file = self.open(file_path, mode='r', newline='\r')
self.assertEqual(['two\r', '\r', 'lines'], fake_file.readlines())
fake_file = self.open(file_path, mode='r', newline='\n')
self.assertEqual(['two\r\n', 'lines'], fake_file.readlines())
fake_file = self.open(file_path, mode='r', newline='\r\n')
self.assertEqual(['two\r\r\n', 'lines'], fake_file.readlines())
def testOpenValidFileWithCwd(self):
contents = [
'I am he as\n',
'you are he as\n',
'you are me and\n',
'we are all together\n'
]
file_path = '/foo/bar.txt'
self.filesystem.CreateFile(file_path, contents=''.join(contents))
self.filesystem.cwd = '/foo'
self.assertEqual(contents, self.file(file_path).readlines())
def testIterateOverFile(self):
contents = [
"Bang bang Maxwell's silver hammer",
'Came down on her head',
]
file_path = 'abbey_road/maxwell'
self.filesystem.CreateFile(file_path, contents='\n'.join(contents))
result = [line.rstrip() for line in self.file(file_path)]
self.assertEqual(contents, result)
def testOpenDirectoryError(self):
directory_path = 'foo/bar'
self.filesystem.CreateDirectory(directory_path)
self.assertRaises(IOError, self.file.__call__, directory_path)
def testCreateFileWithWrite(self):
contents = [
"Here comes the sun, little darlin'",
'Here comes the sun, and I say,',
"It's alright",
]
file_dir = 'abbey_road'
file_path = 'abbey_road/here_comes_the_sun'
self.os.mkdir(file_dir)
fake_file = self.file(file_path, 'w')
for line in contents:
fake_file.write(line + '\n')
fake_file.close()
result = [line.rstrip() for line in self.file(file_path)]
self.assertEqual(contents, result)
def testCreateFileWithAppend(self):
contents = [
"Here comes the sun, little darlin'",
'Here comes the sun, and I say,',
"It's alright",
]
file_dir = 'abbey_road'
file_path = 'abbey_road/here_comes_the_sun'
self.os.mkdir(file_dir)
fake_file = self.file(file_path, 'a')
for line in contents:
fake_file.write(line + '\n')
fake_file.close()
result = [line.rstrip() for line in self.file(file_path)]
self.assertEqual(contents, result)
def testOverwriteExistingFile(self):
file_path = 'overwrite/this/file'
self.filesystem.CreateFile(file_path, contents='To disappear')
new_contents = [
'Only these lines',
'should be in the file.',
]
fake_file = self.file(file_path, 'w')
for line in new_contents:
fake_file.write(line + '\n')
fake_file.close()
result = [line.rstrip() for line in self.file(file_path)]
self.assertEqual(new_contents, result)
def testAppendExistingFile(self):
file_path = 'append/this/file'
contents = [
'Contents of original file'
'Appended contents',
]
self.filesystem.CreateFile(file_path, contents=contents[0])
fake_file = self.file(file_path, 'a')
for line in contents[1:]:
fake_file.write(line + '\n')
fake_file.close()
result = [line.rstrip() for line in self.file(file_path)]
self.assertEqual(contents, result)
def testOpenWithWplus(self):
# set up
file_path = 'wplus_file'
self.filesystem.CreateFile(file_path, contents='old contents')
self.assertTrue(self.filesystem.Exists(file_path))
fake_file = self.file(file_path, 'r')
self.assertEqual('old contents', fake_file.read())
fake_file.close()
# actual tests
fake_file = self.file(file_path, 'w+')
fake_file.write('new contents')
fake_file.seek(0)
self.assertTrue('new contents', fake_file.read())
fake_file.close()
def testOpenWithWplusTruncation(self):
# set up
file_path = 'wplus_file'
self.filesystem.CreateFile(file_path, contents='old contents')
self.assertTrue(self.filesystem.Exists(file_path))
fake_file = self.file(file_path, 'r')
self.assertEqual('old contents', fake_file.read())
fake_file.close()
# actual tests
fake_file = self.file(file_path, 'w+')
fake_file.seek(0)
self.assertEqual('', fake_file.read())
fake_file.close()
def testOpenWithAppendFlag(self):
contents = [
'I am he as\n',
'you are he as\n',
'you are me and\n',
'we are all together\n'
]
additional_contents = [
'These new lines\n',
'like you a lot.\n'
]
file_path = 'append/this/file'
self.filesystem.CreateFile(file_path, contents=''.join(contents))
fake_file = self.file(file_path, 'a')
self.assertRaises(IOError, fake_file.read)
self.assertEqual('', fake_file.read(0))
self.assertEqual('', fake_file.readline(0))
self.assertEqual(len(''.join(contents)), fake_file.tell())
fake_file.seek(0)
self.assertEqual(0, fake_file.tell())
fake_file.writelines(additional_contents)
fake_file.close()
result = self.file(file_path).readlines()
self.assertEqual(contents + additional_contents, result)
def testAppendWithAplus(self):
# set up
file_path = 'aplus_file'
self.filesystem.CreateFile(file_path, contents='old contents')
self.assertTrue(self.filesystem.Exists(file_path))
fake_file = self.file(file_path, 'r')
self.assertEqual('old contents', fake_file.read())
fake_file.close()
# actual tests
fake_file = self.file(file_path, 'a+')
self.assertEqual(0, fake_file.tell())
fake_file.seek(6, 1)
fake_file.write('new contents')
self.assertEqual(24, fake_file.tell())
fake_file.seek(0)
self.assertEqual('old contentsnew contents', fake_file.read())
fake_file.close()
def testAppendWithAplusReadWithLoop(self):
# set up
file_path = 'aplus_file'
self.filesystem.CreateFile(file_path, contents='old contents')
self.assertTrue(self.filesystem.Exists(file_path))
fake_file = self.file(file_path, 'r')
self.assertEqual('old contents', fake_file.read())
fake_file.close()
# actual tests
fake_file = self.file(file_path, 'a+')
fake_file.seek(0)
fake_file.write('new contents')
fake_file.seek(0)
for line in fake_file:
self.assertEqual('old contentsnew contents', line)
fake_file.close()
def testReadEmptyFileWithAplus(self):
file_path = 'aplus_file'
fake_file = self.file(file_path, 'a+')
self.assertEqual('', fake_file.read())
fake_file.close()
def testReadWithRplus(self):
# set up
file_path = 'rplus_file'
self.filesystem.CreateFile(file_path, contents='old contents here')
self.assertTrue(self.filesystem.Exists(file_path))
fake_file = self.file(file_path, 'r')
self.assertEqual('old contents here', fake_file.read())
fake_file.close()
# actual tests
fake_file = self.file(file_path, 'r+')
self.assertEqual('old contents here', fake_file.read())
fake_file.seek(0)
fake_file.write('new contents')
fake_file.seek(0)
self.assertEqual('new contents here', fake_file.read())
fake_file.close()
def testOpenStCtime(self):
# set up
file_path = 'some_file'
self.assertFalse(self.filesystem.Exists(file_path))
# tests
fake_file = self.file(file_path, 'w')
fake_file.close()
st = self.os.stat(file_path)
self.assertEqual(100, st.st_ctime)
fake_file = self.file(file_path, 'w')
fake_file.close()
st = self.os.stat(file_path)
self.assertEqual(110, st.st_ctime)
fake_file = self.file(file_path, 'w+')
fake_file.close()
st = self.os.stat(file_path)
self.assertEqual(120, st.st_ctime)
fake_file = self.file(file_path, 'r')
fake_file.close()
st = self.os.stat(file_path)
self.assertEqual(120, st.st_ctime)
def _CreateWithPermission(self, file_path, perm_bits):
self.filesystem.CreateFile(file_path)
self.os.chmod(file_path, perm_bits)
st = self.os.stat(file_path)
self.assertModeEqual(perm_bits, st.st_mode)
self.assertTrue(st.st_mode & stat.S_IFREG)
self.assertFalse(st.st_mode & stat.S_IFDIR)
def testOpenFlags700(self):
# set up
file_path = 'target_file'
self._CreateWithPermission(file_path, 0o700)
# actual tests
self.file(file_path, 'r').close()
self.file(file_path, 'w').close()
self.file(file_path, 'w+').close()
self.assertRaises(IOError, self.file, file_path, 'INV')
def testOpenFlags400(self):
# set up
file_path = 'target_file'
self._CreateWithPermission(file_path, 0o400)
# actual tests
self.file(file_path, 'r').close()
self.assertRaises(IOError, self.file, file_path, 'w')
self.assertRaises(IOError, self.file, file_path, 'w+')
def testOpenFlags200(self):
# set up
file_path = 'target_file'
self._CreateWithPermission(file_path, 0o200)
# actual tests
self.assertRaises(IOError, self.file, file_path, 'r')
self.file(file_path, 'w').close()
self.assertRaises(IOError, self.file, file_path, 'w+')
def testOpenFlags100(self):
# set up
file_path = 'target_file'
self._CreateWithPermission(file_path, 0o100)
# actual tests 4
self.assertRaises(IOError, self.file, file_path, 'r')
self.assertRaises(IOError, self.file, file_path, 'w')
self.assertRaises(IOError, self.file, file_path, 'w+')
def testFollowLinkRead(self):
link_path = '/foo/bar/baz'
target = '/tarJAY'
target_contents = 'real baz contents'
self.filesystem.CreateFile(target, contents=target_contents)
self.filesystem.CreateLink(link_path, target)
self.assertEqual(target, self.os.readlink(link_path))
fh = self.open(link_path, 'r')
got_contents = fh.read()
fh.close()
self.assertEqual(target_contents, got_contents)
def testFollowLinkWrite(self):
link_path = '/foo/bar/TBD'
target = '/tarJAY'
target_contents = 'real baz contents'
self.filesystem.CreateLink(link_path, target)
self.assertFalse(self.filesystem.Exists(target))
fh = self.open(link_path, 'w')
fh.write(target_contents)
fh.close()
fh = self.open(target, 'r')
got_contents = fh.read()
fh.close()
self.assertEqual(target_contents, got_contents)
def testFollowIntraPathLinkWrite(self):
# Test a link in the middle of of a file path.
link_path = '/foo/build/local_machine/output/1'
target = '/tmp/output/1'
self.filesystem.CreateDirectory('/tmp/output')
self.filesystem.CreateLink('/foo/build/local_machine', '/tmp')
self.assertFalse(self.filesystem.Exists(link_path))
self.assertFalse(self.filesystem.Exists(target))
target_contents = 'real baz contents'
fh = self.open(link_path, 'w')
fh.write(target_contents)
fh.close()
fh = self.open(target, 'r')
got_contents = fh.read()
fh.close()
self.assertEqual(target_contents, got_contents)
def testFileDescriptorsForDifferentFiles(self):
first_path = 'some_file1'
second_path = 'some_file2'
third_path = 'some_file3'
self.filesystem.CreateFile(first_path, contents='contents here1')
self.filesystem.CreateFile(second_path, contents='contents here2')
self.filesystem.CreateFile(third_path, contents='contents here3')
fake_file1 = self.open(first_path, 'r')
fake_file2 = self.open(second_path, 'r')
fake_file3 = self.open(third_path, 'r')
self.assertEqual(0, fake_file1.fileno())
self.assertEqual(1, fake_file2.fileno())
self.assertEqual(2, fake_file3.fileno())
def testFileDescriptorsForTheSameFileAreDifferent(self):
first_path = 'some_file1'
second_path = 'some_file2'
self.filesystem.CreateFile(first_path, contents='contents here1')
self.filesystem.CreateFile(second_path, contents='contents here2')
fake_file1 = self.open(first_path, 'r')
fake_file2 = self.open(second_path, 'r')
fake_file1a = self.open(first_path, 'r')
self.assertEqual(0, fake_file1.fileno())
self.assertEqual(1, fake_file2.fileno())
self.assertEqual(2, fake_file1a.fileno())
def testReusedFileDescriptorsDoNotAffectOthers(self):
first_path = 'some_file1'
second_path = 'some_file2'
third_path = 'some_file3'
self.filesystem.CreateFile(first_path, contents='contents here1')
self.filesystem.CreateFile(second_path, contents='contents here2')
self.filesystem.CreateFile(third_path, contents='contents here3')
fake_file1 = self.open(first_path, 'r')
fake_file2 = self.open(second_path, 'r')
fake_file3 = self.open(third_path, 'r')
fake_file1a = self.open(first_path, 'r')
self.assertEqual(0, fake_file1.fileno())
self.assertEqual(1, fake_file2.fileno())
self.assertEqual(2, fake_file3.fileno())
self.assertEqual(3, fake_file1a.fileno())
fake_file1.close()
fake_file2.close()
fake_file2 = self.open(second_path, 'r')
fake_file1b = self.open(first_path, 'r')
self.assertEqual(0, fake_file2.fileno())
self.assertEqual(1, fake_file1b.fileno())
self.assertEqual(2, fake_file3.fileno())
self.assertEqual(3, fake_file1a.fileno())
def testIntertwinedReadWrite(self):
file_path = 'some_file'
self.filesystem.CreateFile(file_path)
with self.open(file_path, 'a') as writer:
with self.open(file_path, 'r') as reader:
writes = ['hello', 'world\n', 'somewhere\nover', 'the\n', 'rainbow']
reads = []
# when writes are flushes, they are piped to the reader
for write in writes:
writer.write(write)
writer.flush()
reads.append(reader.read())
reader.flush()
self.assertEqual(writes, reads)
writes = ['nothing', 'to\nsee', 'here']
reads = []
# when writes are not flushed, the reader doesn't read anything new
for write in writes:
writer.write(write)
reads.append(reader.read())
self.assertEqual(['' for i in writes], reads)
def testOpenIoErrors(self):
file_path = 'some_file'
self.filesystem.CreateFile(file_path)
with self.open(file_path, 'a') as fh:
self.assertRaises(IOError, fh.read)
self.assertRaises(IOError, fh.readlines)
with self.open(file_path, 'w') as fh:
self.assertRaises(IOError, fh.read)
self.assertRaises(IOError, fh.readlines)
with self.open(file_path, 'r') as fh:
self.assertRaises(IOError, fh.truncate)
self.assertRaises(IOError, fh.write, 'contents')
self.assertRaises(IOError, fh.writelines, ['con', 'tents'])
def _IteratorOpen(file_path, mode):
for _ in self.open(file_path, mode):
pass
self.assertRaises(IOError, _IteratorOpen, file_path, 'w')
self.assertRaises(IOError, _IteratorOpen, file_path, 'a')
class OpenWithFileDescriptorTest(FakeFileOpenTestBase):
def testOpenWithFileDescriptor(self):
if sys.version_info < (3, 0):
return
file_path = 'this/file'
self.filesystem.CreateFile(file_path)
fd = self.os.open(file_path, os.O_CREAT)
self.assertEqual(fd, self.open(fd, 'r').fileno())
def testClosefdWithFileDescriptor(self):
if sys.version_info < (3, 0):
return
file_path = 'this/file'
self.filesystem.CreateFile(file_path)
fd = self.os.open(file_path, os.O_CREAT)
fh = self.open(fd, 'r', closefd=False)
fh.close()
self.assertIsNotNone(self.filesystem.open_files[fd])
fh = self.open(fd, 'r', closefd=True)
fh.close()
self.assertIsNone(self.filesystem.open_files[fd])
class OpenWithIgnoredFlagsTest(unittest.TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem()
self.file = fake_filesystem.FakeFileOpen(self.filesystem)
self.os = fake_filesystem.FakeOsModule(self.filesystem)
self.file_path = 'some_file'
self.read_contents = self.file_contents = 'two\r\nlines'
# For python 3.x, text file newlines are converted to \n
if sys.version_info >= (3, 0):
self.read_contents = 'two\nlines'
self.filesystem.CreateFile(self.file_path, contents=self.file_contents)
# It's resonable to assume the file exists at this point
# Shouldn't need a tearDown()
def OpenFakeFile(self, mode):
return self.file(self.file_path, mode=mode)
def testReadBinary(self):
fake_file = self.OpenFakeFile('rb')
self.assertEqual(self.file_contents, fake_file.read())
def testReadText(self):
fake_file = self.OpenFakeFile('rt')
self.assertEqual(self.read_contents, fake_file.read())
def testReadUniversalNewlines(self):
fake_file = self.OpenFakeFile('rU')
self.assertEqual(self.read_contents, fake_file.read())
def testUniversalNewlines(self):
fake_file = self.OpenFakeFile('U')
self.assertEqual(self.read_contents, fake_file.read())
def OpenFileAndSeek(self, mode):
fake_file = self.file(self.file_path, mode=mode)
fake_file.seek(0, 2)
return fake_file
def WriteAndReopenFile(self, fake_file, mode='r'):
fake_file.write(self.file_contents)
fake_file.close()
return self.file(self.file_path, mode=mode)
def testWriteBinary(self):
fake_file = self.OpenFileAndSeek('wb')
self.assertEqual(0, fake_file.tell())
fake_file = self.WriteAndReopenFile(fake_file, mode='rb')
self.assertEqual(self.file_contents, fake_file.read())
def testWriteText(self):
fake_file = self.OpenFileAndSeek('wt')
self.assertEqual(0, fake_file.tell())
fake_file = self.WriteAndReopenFile(fake_file)
self.assertEqual(self.read_contents, fake_file.read())
def testWriteAndReadBinary(self):
fake_file = self.OpenFileAndSeek('w+b')
self.assertEqual(0, fake_file.tell())
fake_file = self.WriteAndReopenFile(fake_file, mode='rb')
self.assertEqual(self.file_contents, fake_file.read())
def testWriteAndReadTextBinary(self):
fake_file = self.OpenFileAndSeek('w+bt')
self.assertEqual(0, fake_file.tell())
fake_file = self.WriteAndReopenFile(fake_file, mode='rb')
self.assertEqual(self.file_contents, fake_file.read())
class OpenWithInvalidFlagsTest(FakeFileOpenTestBase):
def testCapitalR(self):
self.assertRaises(IOError, self.file, 'some_file', 'R')
def testCapitalW(self):
self.assertRaises(IOError, self.file, 'some_file', 'W')
def testCapitalA(self):
self.assertRaises(IOError, self.file, 'some_file', 'A')
def testLowerU(self):
self.assertRaises(IOError, self.file, 'some_file', 'u')
def testLowerRw(self):
self.assertRaises(IOError, self.file, 'some_file', 'rw')
class ResolvePathTest(FakeFileOpenTestBase):
def __WriteToFile(self, file_name):
fh = self.open(file_name, 'w')
fh.write('x')
fh.close()
def testNoneFilepathRaisesTypeError(self):
self.assertRaises(TypeError, self.open, None, 'w')
def testEmptyFilepathRaisesIOError(self):
self.assertRaises(IOError, self.open, '', 'w')
def testNormalPath(self):
self.__WriteToFile('foo')
self.assertTrue(self.filesystem.Exists('foo'))
def testLinkWithinSameDirectory(self):
final_target = '/foo/baz'
self.filesystem.CreateLink('/foo/bar', 'baz')
self.__WriteToFile('/foo/bar')
self.assertTrue(self.filesystem.Exists(final_target))
self.assertEqual(1, self.os.stat(final_target)[stat.ST_SIZE])
def testLinkToSubDirectory(self):
final_target = '/foo/baz/bip'
self.filesystem.CreateDirectory('/foo/baz')
self.filesystem.CreateLink('/foo/bar', 'baz/bip')
self.__WriteToFile('/foo/bar')
self.assertTrue(self.filesystem.Exists(final_target))
self.assertEqual(1, self.os.stat(final_target)[stat.ST_SIZE])
self.assertTrue(self.filesystem.Exists('/foo/baz'))
# Make sure that intermediate directory got created.
new_dir = self.filesystem.GetObject('/foo/baz')
self.assertTrue(stat.S_IFDIR & new_dir.st_mode)
def testLinkToParentDirectory(self):
final_target = '/baz/bip'
self.filesystem.CreateDirectory('/foo')
self.filesystem.CreateDirectory('/baz')
self.filesystem.CreateLink('/foo/bar', '../baz')
self.__WriteToFile('/foo/bar/bip')
self.assertTrue(self.filesystem.Exists(final_target))
self.assertEqual(1, self.os.stat(final_target)[stat.ST_SIZE])
self.assertTrue(self.filesystem.Exists('/foo/bar'))
def testLinkToAbsolutePath(self):
final_target = '/foo/baz/bip'
self.filesystem.CreateDirectory('/foo/baz')
self.filesystem.CreateLink('/foo/bar', final_target)
self.__WriteToFile('/foo/bar')
self.assertTrue(self.filesystem.Exists(final_target))
def testRelativeLinksWorkAfterChdir(self):
final_target = '/foo/baz/bip'
self.filesystem.CreateDirectory('/foo/baz')
self.filesystem.CreateLink('/foo/bar', './baz/bip')
self.assertEqual(final_target,
self.filesystem.ResolvePath('/foo/bar'))
os_module = fake_filesystem.FakeOsModule(self.filesystem)
self.assertTrue(os_module.path.islink('/foo/bar'))
os_module.chdir('/foo')
self.assertEqual('/foo', os_module.getcwd())
self.assertTrue(os_module.path.islink('bar'))
self.assertEqual('/foo/baz/bip',
self.filesystem.ResolvePath('bar'))
self.__WriteToFile('/foo/bar')
self.assertTrue(self.filesystem.Exists(final_target))
def testAbsoluteLinksWorkAfterChdir(self):
final_target = '/foo/baz/bip'
self.filesystem.CreateDirectory('/foo/baz')
self.filesystem.CreateLink('/foo/bar', final_target)
self.assertEqual(final_target,
self.filesystem.ResolvePath('/foo/bar'))
os_module = fake_filesystem.FakeOsModule(self.filesystem)
self.assertTrue(os_module.path.islink('/foo/bar'))
os_module.chdir('/foo')
self.assertEqual('/foo', os_module.getcwd())
self.assertTrue(os_module.path.islink('bar'))
self.assertEqual('/foo/baz/bip',
self.filesystem.ResolvePath('bar'))
self.__WriteToFile('/foo/bar')
self.assertTrue(self.filesystem.Exists(final_target))
def testChdirThroughRelativeLink(self):
self.filesystem.CreateDirectory('/x/foo')
self.filesystem.CreateDirectory('/x/bar')
self.filesystem.CreateLink('/x/foo/bar', '../bar')
self.assertEqual('/x/bar', self.filesystem.ResolvePath('/x/foo/bar'))
os_module = fake_filesystem.FakeOsModule(self.filesystem)
os_module.chdir('/x/foo')
self.assertEqual('/x/foo', os_module.getcwd())
self.assertEqual('/x/bar', self.filesystem.ResolvePath('bar'))
os_module.chdir('bar')
self.assertEqual('/x/bar', os_module.getcwd())
def testReadLinkToLink(self):
# Write into the final link target and read back from a file which will
# point to that.
self.filesystem.CreateLink('/foo/bar', 'link')
self.filesystem.CreateLink('/foo/link', 'baz')
self.__WriteToFile('/foo/baz')
fh = self.open('/foo/bar', 'r')
self.assertEqual('x', fh.read())
def testWriteLinkToLink(self):
final_target = '/foo/baz'
self.filesystem.CreateLink('/foo/bar', 'link')
self.filesystem.CreateLink('/foo/link', 'baz')
self.__WriteToFile('/foo/bar')
self.assertTrue(self.filesystem.Exists(final_target))
def testMultipleLinks(self):
final_target = '/a/link1/c/link2/e'
self.os.makedirs('/a/link1/c/link2')
self.filesystem.CreateLink('/a/b', 'link1')
self.assertEqual('/a/link1', self.filesystem.ResolvePath('/a/b'))
self.assertEqual('/a/link1/c', self.filesystem.ResolvePath('/a/b/c'))
self.filesystem.CreateLink('/a/link1/c/d', 'link2')
self.assertTrue(self.filesystem.Exists('/a/link1/c/d'))
self.assertTrue(self.filesystem.Exists('/a/b/c/d'))
final_target = '/a/link1/c/link2/e'
self.assertFalse(self.filesystem.Exists(final_target))
self.__WriteToFile('/a/b/c/d/e')
self.assertTrue(self.filesystem.Exists(final_target))
def testTooManyLinks(self):
self.filesystem.CreateLink('/a/loop', 'loop')
self.assertFalse(self.filesystem.Exists('/a/loop'))
class PathManipulationTests(unittest.TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem(path_separator='|')
class CollapsePathPipeSeparatorTest(PathManipulationTests):
"""Tests CollapsePath (mimics os.path.normpath) using | as path separator."""
def testEmptyPathBecomesDotPath(self):
self.assertEqual('.', self.filesystem.CollapsePath(''))
def testDotPathUnchanged(self):
self.assertEqual('.', self.filesystem.CollapsePath('.'))
def testSlashesAreNotCollapsed(self):
"""Tests that '/' is not treated specially if the path separator is '|'.
In particular, multiple slashes should not be collapsed.
"""
self.assertEqual('/', self.filesystem.CollapsePath('/'))
self.assertEqual('/////', self.filesystem.CollapsePath('/////'))
def testRootPath(self):
self.assertEqual('|', self.filesystem.CollapsePath('|'))
def testMultipleSeparatorsCollapsedIntoRootPath(self):
self.assertEqual('|', self.filesystem.CollapsePath('|||||'))
def testAllDotPathsRemovedButOne(self):
self.assertEqual('.', self.filesystem.CollapsePath('.|.|.|.'))
def testAllDotPathsRemovedIfAnotherPathComponentExists(self):
self.assertEqual('|', self.filesystem.CollapsePath('|.|.|.|'))
self.assertEqual('foo|bar', self.filesystem.CollapsePath('foo|.|.|.|bar'))
def testIgnoresUpLevelReferencesStartingFromRoot(self):
self.assertEqual('|', self.filesystem.CollapsePath('|..|..|..|'))
self.assertEqual('|', self.filesystem.CollapsePath('||..|.|..||'))
self.assertEqual(
'|', self.filesystem.CollapsePath('|..|..|foo|bar|..|..|'))
def testConservesUpLevelReferencesStartingFromCurrentDirectory(self):
self.assertEqual(
'..|..', self.filesystem.CollapsePath('..|foo|bar|..|..|..'))
def testCombineDotAndUpLevelReferencesInAbsolutePath(self):
self.assertEqual(
'|yes', self.filesystem.CollapsePath('|||||.|..|||yes|no|..|.|||'))
class SplitPathTest(PathManipulationTests):
"""Tests SplitPath (which mimics os.path.split) using | as path separator."""
def testEmptyPath(self):
self.assertEqual(('', ''), self.filesystem.SplitPath(''))
def testNoSeparators(self):
self.assertEqual(('', 'ab'), self.filesystem.SplitPath('ab'))
def testSlashesDoNotSplit(self):
"""Tests that '/' is not treated specially if the path separator is '|'."""
self.assertEqual(('', 'a/b'), self.filesystem.SplitPath('a/b'))
def testEliminateTrailingSeparatorsFromHead(self):
self.assertEqual(('a', 'b'), self.filesystem.SplitPath('a|b'))
self.assertEqual(('a', 'b'), self.filesystem.SplitPath('a|||b'))
self.assertEqual(('|a', 'b'), self.filesystem.SplitPath('|a||b'))
self.assertEqual(('a|b', 'c'), self.filesystem.SplitPath('a|b|c'))
self.assertEqual(('|a|b', 'c'), self.filesystem.SplitPath('|a|b|c'))
def testRootSeparatorIsNotStripped(self):
self.assertEqual(('|', ''), self.filesystem.SplitPath('|||'))
self.assertEqual(('|', 'a'), self.filesystem.SplitPath('|a'))
self.assertEqual(('|', 'a'), self.filesystem.SplitPath('|||a'))
def testEmptyTailIfPathEndsInSeparator(self):
self.assertEqual(('a|b', ''), self.filesystem.SplitPath('a|b|'))
def testEmptyPathComponentsArePreservedInHead(self):
self.assertEqual(('|a||b', 'c'), self.filesystem.SplitPath('|a||b||c'))
class JoinPathTest(PathManipulationTests):
"""Tests JoinPath (which mimics os.path.join) using | as path separator."""
def testOneEmptyComponent(self):
self.assertEqual('', self.filesystem.JoinPaths(''))
def testMultipleEmptyComponents(self):
self.assertEqual('', self.filesystem.JoinPaths('', '', ''))
def testSeparatorsNotStrippedFromSingleComponent(self):
self.assertEqual('||a||', self.filesystem.JoinPaths('||a||'))
def testOneSeparatorAddedBetweenComponents(self):
self.assertEqual('a|b|c|d', self.filesystem.JoinPaths('a', 'b', 'c', 'd'))
def testNoSeparatorAddedForComponentsEndingInSeparator(self):
self.assertEqual('a|b|c', self.filesystem.JoinPaths('a|', 'b|', 'c'))
self.assertEqual('a|||b|||c',
self.filesystem.JoinPaths('a|||', 'b|||', 'c'))
def testComponentsPrecedingAbsoluteComponentAreIgnored(self):
self.assertEqual('|c|d', self.filesystem.JoinPaths('a', '|b', '|c', 'd'))
def testOneSeparatorAddedForTrailingEmptyComponents(self):
self.assertEqual('a|', self.filesystem.JoinPaths('a', ''))
self.assertEqual('a|', self.filesystem.JoinPaths('a', '', ''))
def testNoSeparatorAddedForLeadingEmptyComponents(self):
self.assertEqual('a', self.filesystem.JoinPaths('', 'a'))
def testInternalEmptyComponentsIgnored(self):
self.assertEqual('a|b', self.filesystem.JoinPaths('a', '', 'b'))
self.assertEqual('a|b|', self.filesystem.JoinPaths('a|', '', 'b|'))
class PathSeparatorTest(unittest.TestCase):
def testOsPathSepMatchesFakeFilesystemSeparator(self):
filesystem = fake_filesystem.FakeFilesystem(path_separator='!')
fake_os = fake_filesystem.FakeOsModule(filesystem)
self.assertEqual('!', fake_os.sep)
self.assertEqual('!', fake_os.path.sep)
if __name__ == '__main__':
unittest.main()
|
rec/echomesh
|
code/python/external/fake/fake_filesystem_test.py
|
Python
|
mit
| 107,596
|
[
"VisIt"
] |
95635fc61138c1f3a36a159f016e978bca4a60bf5641d5436666863293f2af1c
|
import time
import urllib2
import sys
"""
next step: make it create new file each time, run cleanup op
"""
def cc_pull(x):
try:
iteration = x[1] + 1
url = x[0]
callsign = x[2]
filename = callsign + str(iteration).rjust(3,'0') + ".txt"
record = open(filename,"w")
last_song = x[3]
response = urllib2.urlopen(url)
counter = 0
offset = 0
new_last_song = last_song
page = response.read()
while (counter < 20):
offset = page.find('}},{"track":')
song = page[page.find('":"')+3:page.find('","')]
artist = page[page.find('artistName":"')+13:page.find('","amgArtistId"')]
page = page[offset + 3:]
song = song.replace("\/","/")
artist = artist.replace("\/","/")
counter = counter + 1
entry = song + "|" + artist + "|" + callsign + "|" + str(time.time()) + "\n"
if (song == last_song):
break
elif (counter == 1):
new_last_song = song
record.write(entry)
else:
record.write(entry)
y = (x[0],iteration,x[2],new_last_song)
time.sleep(3)
record.close()
return y
except:
time.sleep(3)
return x
def gm_pull(x):
try:
iteration = x[1] + 1
url = x[0]
callsign = x[2]
filename = callsign + str(iteration).rjust(3,'0') + ".txt"
record = open(filename,"w")
last_song = x[3]
response = urllib2.urlopen(url)
counter = 0
first = True
new_last_song = last_song
while (counter < 10000):
line = response.readline()
if '" -' in line:
song = line[line.find('"')+1:line.find(" -")-1]
artist = line[line.find("- ")+1:]
artist = artist.strip()
entry = song + "|" + artist + "|" + callsign + "|" + str(time.time()) + "\n"
record.write(entry)
"""
line = response.readline()
line = response.readline()
line = response.readline()
line = response.readline()
if "Visit iTunes" in line:
itunes_link = line[line.find('href="')+5:line.find('" target="')]
TO DO: CREATE FILE
WRITE ITUNES LINKS TO IT
THEN, LATER, GRAB ALBUM RELEASE YEARS
"""
counter = counter + 1
y = (x[0],iteration,x[2],new_last_song)
record.close()
time.sleep(3)
return y
except:
time.sleep(3)
return x
def cx_pull(x):
try:
iteration = x[1] + 1
url = x[0]
callsign = x[2]
filename = callsign + str(iteration).rjust(3,'0') + ".txt"
record = open(filename,"w")
last_song = x[3]
response = urllib2.urlopen(url)
counter = 0
first = True
new_last_song = last_song
while (counter < 10000):
line = response.readline()
if 'cmPlaylistContent' in line:
song = line[line.find('/">')+3:line.find("</a></strong>")]
artist = line[line.find("alt=")+5:line.find('" class="')]
artist = artist.strip()
song = song.replace("'","'")
artist = artist.replace("'","'")
entry = song + "|" + artist + "|" + callsign + "|" + str(time.time()) + "\n"
if (song == last_song):
break
elif first:
new_last_song = song
record.write(entry)
first = False
else:
record.write(entry)
"""
if "Download Song:" in line:
line = response.readline()
line = response.readline()
if "apple" in line:
itunes_link = line[line.find('href="')+5:line.find('">iTu')
TO DO: CREATE FILE
WRITE ITUNES LINKS TO IT
THEN, LATER, GRAB ALBUM RELEASE YEARS
"""
counter = counter + 1
y = (x[0],iteration,x[2],new_last_song)
record.close()
time.sleep(3)
return y
except:
time.sleep(3)
return x
def cb_pull(x):
try:
iteration = x[1] + 1
url = x[0]
callsign = x[2]
filename = callsign + str(iteration).rjust(3,'0') + ".txt"
record = open(filename,"w")
last_song = x[3]
response = urllib2.urlopen(url)
counter = 0
first = True
new_last_song = last_song
while (counter < 10000):
line = response.readline()
if '<div class="track_title"' in line:
song = line[line.find('rel=')+5:line.find('">')]
line = response.readline()
line = response.readline()
artist = line[line.find('rel=')+5:line.find('">')]
line = response.readline()
line = response.readline()
album = line[line.find('rel=')+5:line.find('">')]
song = song.replace("'","'")
artist = artist.replace("'","'")
album = album.replace("'","'")
entry = song + "|" + artist + "|" + callsign + "|" + str(time.time()) + "\n"
record.write(entry)
counter = counter + 1
y = (x[0],iteration,x[2],new_last_song)
time.sleep(3)
record.close()
return y
except:
time.sleep(3)
return x
def tg_pull(x):
try:
iteration = x[1] + 1
url = x[0]
callsign = x[2]
filename = callsign + str(iteration).rjust(3,'0') + ".txt"
record = open(filename,"w")
last_song = x[3]
response = urllib2.urlopen(url)
counter = 0
first = True
new_last_song = last_song
while (counter < 10000):
line = response.readline()
if '<div class="song"><' in line:
counter = counter + 1
elif '<div class="song">' in line:
song = line[line.find('"song">')+7:line.find('</div>')]
song = song.replace("'","'")
line = response.readline()
artist = line[line.find('<div>')+5:line.find(' <span')]
song = song.replace("'","'")
artist = artist.replace("'","'")
song = song.replace("&","&")
artist = artist.replace("&","&")
entry = song + "|" + artist + "|" + callsign + "|" + str(time.time()) + "\n"
if (song == last_song):
break
elif first:
new_last_song = song
record.write(entry)
first = False
else:
record.write(entry)
counter = counter + 1
y = (x[0],iteration,x[2],new_last_song)
time.sleep(3)
record.close()
return y
except:
time.sleep(3)
return x
def ll_pull(x):
try:
iteration = x[1] + 1
url = x[0]
callsign = x[2]
filename = callsign + str(iteration).rjust(3,'0') + ".txt"
record = open(filename,"w")
last_song = x[3]
response = urllib2.urlopen(url)
counter = 0
new_last_song = last_song
while (counter < 10000):
line = response.readline()
if 'var songs = ' in line:
tencount = 0
while (tencount < 10):
song = line[line.find('"title":"')+9:line.find('","')]
line = line[line.find('"artist":')+10:]
artist = line[:line.find('"')]
line = line[line.find('},{"timestamp":'):]
entry = song + "|" + artist + "|" + callsign + "|" + str(time.time()) + "\n"
new_last_song = song
record.write(entry)
tencount = tencount + 1
break
counter = counter + 1
y = (x[0],iteration,x[2],new_last_song)
time.sleep(3)
record.close()
return y
except:
time.sleep(3)
return x
def kx_pull(x):
try:
iteration = x[1] + 1
url = x[0]
callsign = x[2]
filename = callsign + str(iteration).rjust(3,'0') + ".txt"
record = open(filename,"w")
last_song = x[3]
response = urllib2.urlopen(url)
counter = 0
first = True
new_last_song = last_song
while (counter < 10000):
line = response.readline()
if 'play-song' in line:
song = line[line.find('>')+1:line.find("</")]
line = response.readline()
artist = line[line.find('by ')+3:line.find('</')]
entry = song + "|" + artist + "|" + callsign + "|" + str(time.time()) + "\n"
if (song == last_song):
break
elif first:
new_last_song = song
record.write(entry)
first = False
else:
record.write(entry)
counter = counter + 1
y = (x[0],iteration,x[2],new_last_song)
time.sleep(3)
record.close()
return y
except:
time.sleep(3)
return x
def ke_pull(x):
try:
iteration = x[1] + 1
url = x[0]
callsign = x[2]
filename = callsign + str(iteration).rjust(3,'0') + ".txt"
record = open(filename,"w")
last_song = x[3]
response = urllib2.urlopen(url)
counter = 0
first = True
new_last_song = last_song
while (counter < 10000):
line = response.readline()
if 'views-field-field-title' in line:
song = line[line.find('field-content">')+15:line.find("</div>")]
line = response.readline()
artist = line[line.find('<span>')+6:line.find('</span>')]
song = song.replace("'","'")
artist = artist.replace("'","'")
entry = song + "|" + artist + "|" + callsign + "|" + str(time.time()) + "\n"
if (song == last_song):
break
elif first:
new_last_song = song
record.write(entry)
first = False
else:
record.write(entry)
counter = counter + 1
y = (x[0],iteration,x[2],new_last_song)
record.close()
time.sleep(3)
return y
except:
time.sleep(3)
return x
cc1 = ("http://www.q1043.com/services/now_playing.html?streamId=1465&limit=25",0,"WAXQ","")
cc2 = ("http://www.lonestar925.com/services/now_playing.html?streamId=3379&limit=25",0,"KZPS","")
cc3 = ("http://www.wbig.com/services/now_playing.html?streamId=2505&limit=25",0,"WBIG","")
cc4 = ("http://www.big1059.com/services/now_playing.html?streamId=557&limit=25",0,"WBGG","")
cc5 = ("http://www.thefox.com/services/now_playing.html?streamId=393&limit=25",0,"KRFX","")
cc6 = ("http://www.dve.com/services/now_playing.html?streamId=2017&limit=25",0,"WDVE","")
cc7 = ("http://www.wrfx.com/services/now_playing.html?streamId=1613&limit=25",0,"WRFX","")
cc8 = ("http://www.kzep.com/services/now_playing.html?streamId=4051&limit=25",0,"KZEP","")
cc9 = ("http://www.101kgb.com/services/now_playing.html?streamId=237&limit=25",0,"KGB","")
gm1 = ("http://www.wcsx.com/recentlyplayed.aspx",0,"WCSX","")
gm2 = ("http://www.wmgk.com/broadcasthistory.aspx",0,"WMGK","")
cx1 = ("http://www.1073theeagle.com/lsp/",0,"WXGL","")
cx2 = ("http://www.houstonseagle.com/lsp/",0,"KGLK","")
cx3 = ("http://www.971theriver.com/lsp/",0,"WSRV","")
cb1 = ("http://wzlx.cbslocal.com/playlist/",0,"WZLX","")
cb2 = ("http://wncx.cbslocal.com/playlist/",0,"WNCX","")
cb3 = ("http://kzok.cbslocal.com/playlist/",0,"KZOK","")
tg1 = ("http://wlup.tunegenie.com/onair/",0,"WLUP","")
tg2 = ("http://wofx.tunegenie.com/onair/",0,"WOFX","")
tg3 = ("http://kgon.tunegenie.com/onair/",0,"KGON","")
tg4 = ("http://kcfx.tunegenie.com/onair/",0,"KCFX","")
tg5 = ("http://klos.tunegenie.com/onair/",0,"KLOS","")
tg6 = ("http://kseg.tunegenie.com/onair/",0,"KSEG","")
tg7 = ("http://kufx.tunegenie.com/onair/",0,"KUFX","")
ll1 = ("http://player.listenlive.co/24751/en/songhistory",0,"KQRS","")
ll2 = ("http://player.listenlive.co/25951/en/songhistory",0,"KSAN","")
ke1 = ("http://www.kshe95.com/broadcasthistory",0,"KSHE","")
kx1 = ("http://kslx.com/playlist",0,"KSLX","")
while True:
now = time.time()
timer = time.localtime(now)
#on the hour
if (timer[4] == 58):
cc1 = cc_pull(cc1)
cc2 = cc_pull(cc2)
cc3 = cc_pull(cc3)
cc4 = cc_pull(cc4)
cc5 = cc_pull(cc5)
cc6 = cc_pull(cc6)
cc7 = cc_pull(cc7)
cc8 = cc_pull(cc8)
cc9 = cc_pull(cc9)
cx1 = cx_pull(cx1)
cx2 = cx_pull(cx2)
cx3 = cx_pull(cx3)
tg1 = tg_pull(tg1)
tg2 = tg_pull(tg2)
tg3 = tg_pull(tg3)
tg4 = tg_pull(tg4)
tg5 = tg_pull(tg5)
tg6 = tg_pull(tg6)
tg7 = tg_pull(tg7)
ll1 = ll_pull(ll1)
ll2 = ll_pull(ll2)
ke1 = ke_pull(ke1)
kx1 = kx_pull(kx1)
time.sleep(30)
elif (timer[4] == 28):
cx1 = cx_pull(cx1)
cx2 = cx_pull(cx2)
cx3 = cx_pull(cx3)
ll1 = ll_pull(ll1)
ll2 = ll_pull(ll2)
kx1 = kx_pull(kx1)
time.sleep(30)
elif (timer[4] == 54 and timer[3] == 23):
gm1 = gm_pull(gm1)
gm2 = gm_pull(gm2)
cb1 = cb_pull(cb1)
cb2 = cb_pull(cb2)
cb3 = cb_pull(cb3)
time.sleep(30)
time.sleep(30)
|
shamindrasorg/eda_play
|
data/classic-rock/radio.py
|
Python
|
mit
| 11,402
|
[
"VisIt"
] |
91786eb2f74c422412ebb079922c30c867c590db28db2b40d300d5121921c229
|
from chiplotle.core.visitor import Visitor
class TransformVisitor(Visitor):
'''"Crawler" pattern encapsulation for transformations applied to _Shapes.
Separates the "what it does" (action) from "how it does it" (traversal).'''
def __init__(self, transform):
self.transform = transform
def visit_Group(self, node, *args, **kwargs):
for s in node:
self.visit(s, *args, **kwargs)
def visit_TransformLock(self, node, *args, **kwargs):
if self.transform.func_name in node.lock_transforms:
self._handle_transform_map(node, *args, **kwargs)
else:
for s in node:
self.visit(s, *args, **kwargs)
def visit__Shape(self, node, *args, **kwargs):
node.points = self.transform(node.points, *args, **kwargs)
## private ##
def _handle_transform_map(self, node, *args, **kwargs):
tmp = self.transform
t, p = node.get_transform(self.transform)(node.points, *args, **kwargs)
self.transform = t
for s in node:
self.visit(s, *p)
self.transform = tmp
|
drepetto/chiplotle
|
chiplotle/geometry/transforms/transformvisitor.py
|
Python
|
gpl-3.0
| 1,114
|
[
"VisIt"
] |
cc1a156674a910aa3e2224a405ce080885ccf95019cdd2a6ebb4da03c68f1075
|
## This file is part of PyANTLR. See LICENSE.txt for license
## details..........Copyright (C) Wolfgang Haefelinger, 2004.
## get sys module
import sys
version = sys.version.split()[0]
if version < '2.2.1':
False = 0
if version < '2.3':
True = not False
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### global symbols ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### ANTLR Standard Tokens
SKIP = -1
INVALID_TYPE = 0
EOF_TYPE = 1
EOF = 1
NULL_TREE_LOOKAHEAD = 3
MIN_USER_TYPE = 4
### ANTLR's EOF Symbol
EOF_CHAR = ''
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### general functions ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
def error(fmt,*args):
if fmt:
print "error: ", fmt % tuple(args)
def ifelse(cond,_then,_else):
if cond :
r = _then
else:
r = _else
return r
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### ANTLR Exceptions ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
class ANTLRException(Exception):
def __init__(self, *args):
Exception.__init__(self, *args)
class RecognitionException(ANTLRException):
def __init__(self, *args):
ANTLRException.__init__(self, *args)
self.fileName = None
self.line = -1
self.column = -1
if len(args) >= 2:
self.fileName = args[1]
if len(args) >= 3:
self.line = args[2]
if len(args) >= 4:
self.column = args[3]
def __str__(self):
buf = ['']
if self.fileName:
buf.append(self.fileName + ":")
if self.line != -1:
if not self.fileName:
buf.append("line ")
buf.append(str(self.line))
if self.column != -1:
buf.append(":" + str(self.column))
buf.append(":")
buf.append(" ")
return str('').join(buf)
__repr__ = __str__
class NoViableAltException(RecognitionException):
def __init__(self, *args):
RecognitionException.__init__(self, *args)
self.token = None
self.node = None
if isinstance(args[0],AST):
self.node = args[0]
elif isinstance(args[0],Token):
self.token = args[0]
else:
raise TypeError("NoViableAltException requires Token or AST argument")
def __str__(self):
if self.token:
line = self.token.getLine()
col = self.token.getColumn()
text = self.token.getText()
return "unexpected symbol at line %s (column %s): \"%s\"" % (line,col,text)
if self.node == ASTNULL:
return "unexpected end of subtree"
assert self.node
### hackish, we assume that an AST contains method getText
return "unexpected node: %s" % (self.node.getText())
__repr__ = __str__
class NoViableAltForCharException(RecognitionException):
def __init__(self, *args):
self.foundChar = None
if len(args) == 2:
self.foundChar = args[0]
scanner = args[1]
RecognitionException.__init__(self, "NoViableAlt",
scanner.getFilename(),
scanner.getLine(),
scanner.getColumn())
elif len(args) == 4:
self.foundChar = args[0]
fileName = args[1]
line = args[2]
column = args[3]
RecognitionException.__init__(self, "NoViableAlt",
fileName, line, column)
else:
RecognitionException.__init__(self, "NoViableAlt",
'', -1, -1)
def __str__(self):
mesg = "unexpected char: "
if self.foundChar >= ' ' and self.foundChar <= '~':
mesg += "'" + self.foundChar + "'"
elif self.foundChar:
mesg += "0x" + hex(ord(self.foundChar)).upper()[2:]
else:
mesg += "<None>"
return mesg
__repr__ = __str__
class SemanticException(RecognitionException):
def __init__(self, *args):
RecognitionException.__init__(self, *args)
class MismatchedCharException(RecognitionException):
NONE = 0
CHAR = 1
NOT_CHAR = 2
RANGE = 3
NOT_RANGE = 4
SET = 5
NOT_SET = 6
def __init__(self, *args):
self.args = args
if len(args) == 5:
# Expected range / not range
if args[3]:
self.mismatchType = MismatchedCharException.NOT_RANGE
else:
self.mismatchType = MismatchedCharException.RANGE
self.foundChar = args[0]
self.expecting = args[1]
self.upper = args[2]
self.scanner = args[4]
RecognitionException.__init__(self, "Mismatched char range",
self.scanner.getFilename(),
self.scanner.getLine(),
self.scanner.getColumn())
elif len(args) == 4 and isinstance(args[1], str):
# Expected char / not char
if args[2]:
self.mismatchType = MismatchedCharException.NOT_CHAR
else:
self.mismatchType = MismatchedCharException.CHAR
self.foundChar = args[0]
self.expecting = args[1]
self.scanner = args[3]
RecognitionException.__init__(self, "Mismatched char",
self.scanner.getFilename(),
self.scanner.getLine(),
self.scanner.getColumn())
elif len(args) == 4 and isinstance(args[1], BitSet):
# Expected BitSet / not BitSet
if args[2]:
self.mismatchType = MismatchedCharException.NOT_SET
else:
self.mismatchType = MismatchedCharException.SET
self.foundChar = args[0]
self.set = args[1]
self.scanner = args[3]
RecognitionException.__init__(self, "Mismatched char set",
self.scanner.getFilename(),
self.scanner.getLine(),
self.scanner.getColumn())
else:
self.mismatchType = MismatchedCharException.NONE
RecognitionException.__init__(self, "Mismatched char")
## Append a char to the msg buffer. If special,
# then show escaped version
#
def appendCharName(self, sb, c):
if not c or c == 65535:
# 65535 = (char) -1 = EOF
sb.append("'<EOF>'")
elif c == '\n':
sb.append("'\\n'")
elif c == '\r':
sb.append("'\\r'");
elif c == '\t':
sb.append("'\\t'")
else:
sb.append('\'' + c + '\'')
##
# Returns an error message with line number/column information
#
def __str__(self):
sb = ['']
sb.append(RecognitionException.__str__(self))
if self.mismatchType == MismatchedCharException.CHAR:
sb.append("expecting ")
self.appendCharName(sb, self.expecting)
sb.append(", found ")
self.appendCharName(sb, self.foundChar)
elif self.mismatchType == MismatchedCharException.NOT_CHAR:
sb.append("expecting anything but '")
self.appendCharName(sb, self.expecting)
sb.append("'; got it anyway")
elif self.mismatchType in [MismatchedCharException.RANGE, MismatchedCharException.NOT_RANGE]:
sb.append("expecting char ")
if self.mismatchType == MismatchedCharException.NOT_RANGE:
sb.append("NOT ")
sb.append("in range: ")
appendCharName(sb, self.expecting)
sb.append("..")
appendCharName(sb, self.upper)
sb.append(", found ")
appendCharName(sb, self.foundChar)
elif self.mismatchType in [MismatchedCharException.SET, MismatchedCharException.NOT_SET]:
sb.append("expecting ")
if self.mismatchType == MismatchedCharException.NOT_SET:
sb.append("NOT ")
sb.append("one of (")
for i in range(len(self.set)):
self.appendCharName(sb, self.set[i])
sb.append("), found ")
self.appendCharName(sb, self.foundChar)
return str().join(sb).strip()
__repr__ = __str__
class MismatchedTokenException(RecognitionException):
NONE = 0
TOKEN = 1
NOT_TOKEN = 2
RANGE = 3
NOT_RANGE = 4
SET = 5
NOT_SET = 6
def __init__(self, *args):
self.args = args
self.tokenNames = []
self.token = None
self.tokenText = ''
self.node = None
if len(args) == 6:
# Expected range / not range
if args[3]:
self.mismatchType = MismatchedTokenException.NOT_RANGE
else:
self.mismatchType = MismatchedTokenException.RANGE
self.tokenNames = args[0]
self.expecting = args[2]
self.upper = args[3]
self.fileName = args[5]
elif len(args) == 4 and isinstance(args[2], int):
# Expected token / not token
if args[3]:
self.mismatchType = MismatchedTokenException.NOT_TOKEN
else:
self.mismatchType = MismatchedTokenException.TOKEN
self.tokenNames = args[0]
self.expecting = args[2]
elif len(args) == 4 and isinstance(args[2], BitSet):
# Expected BitSet / not BitSet
if args[3]:
self.mismatchType = MismatchedTokenException.NOT_SET
else:
self.mismatchType = MismatchedTokenException.SET
self.tokenNames = args[0]
self.set = args[2]
else:
self.mismatchType = MismatchedTokenException.NONE
RecognitionException.__init__(self, "Mismatched Token: expecting any AST node", "<AST>", -1, -1)
if len(args) >= 2:
if isinstance(args[1],Token):
self.token = args[1]
self.tokenText = self.token.getText()
RecognitionException.__init__(self, "Mismatched Token",
self.fileName,
self.token.getLine(),
self.token.getColumn())
elif isinstance(args[1],AST):
self.node = args[1]
self.tokenText = str(self.node)
RecognitionException.__init__(self, "Mismatched Token",
"<AST>",
self.node.getLine(),
self.node.getColumn())
else:
self.tokenText = "<empty tree>"
RecognitionException.__init__(self, "Mismatched Token",
"<AST>", -1, -1)
def appendTokenName(self, sb, tokenType):
if tokenType == INVALID_TYPE:
sb.append("<Set of tokens>")
elif tokenType < 0 or tokenType >= len(self.tokenNames):
sb.append("<" + str(tokenType) + ">")
else:
sb.append(self.tokenNames[tokenType])
##
# Returns an error message with line number/column information
#
def __str__(self):
sb = ['']
sb.append(RecognitionException.__str__(self))
if self.mismatchType == MismatchedTokenException.TOKEN:
sb.append("expecting ")
self.appendTokenName(sb, self.expecting)
sb.append(", found " + self.tokenText)
elif self.mismatchType == MismatchedTokenException.NOT_TOKEN:
sb.append("expecting anything but '")
self.appendTokenName(sb, self.expecting)
sb.append("'; got it anyway")
elif self.mismatchType in [MismatchedTokenException.RANGE, MismatchedTokenException.NOT_RANGE]:
sb.append("expecting token ")
if self.mismatchType == MismatchedTokenException.NOT_RANGE:
sb.append("NOT ")
sb.append("in range: ")
appendTokenName(sb, self.expecting)
sb.append("..")
appendTokenName(sb, self.upper)
sb.append(", found " + self.tokenText)
elif self.mismatchType in [MismatchedTokenException.SET, MismatchedTokenException.NOT_SET]:
sb.append("expecting ")
if self.mismatchType == MismatchedTokenException.NOT_SET:
sb.append("NOT ")
sb.append("one of (")
for i in range(len(self.set)):
self.appendTokenName(sb, self.set[i])
sb.append("), found " + self.tokenText)
return str().join(sb).strip()
__repr__ = __str__
class TokenStreamException(ANTLRException):
def __init__(self, *args):
ANTLRException.__init__(self, *args)
# Wraps an Exception in a TokenStreamException
class TokenStreamIOException(TokenStreamException):
def __init__(self, *args):
if args and isinstance(args[0], Exception):
io = args[0]
TokenStreamException.__init__(self, str(io))
self.io = io
else:
TokenStreamException.__init__(self, *args)
self.io = self
# Wraps a RecognitionException in a TokenStreamException
class TokenStreamRecognitionException(TokenStreamException):
def __init__(self, *args):
if args and isinstance(args[0], RecognitionException):
recog = args[0]
TokenStreamException.__init__(self, str(recog))
self.recog = recog
else:
raise TypeError("TokenStreamRecognitionException requires RecognitionException argument")
def __str__(self):
return str(self.recog)
__repr__ = __str__
class TokenStreamRetryException(TokenStreamException):
def __init__(self, *args):
TokenStreamException.__init__(self, *args)
class CharStreamException(ANTLRException):
def __init__(self, *args):
ANTLRException.__init__(self, *args)
# Wraps an Exception in a CharStreamException
class CharStreamIOException(CharStreamException):
def __init__(self, *args):
if args and isinstance(args[0], Exception):
io = args[0]
CharStreamException.__init__(self, str(io))
self.io = io
else:
CharStreamException.__init__(self, *args)
self.io = self
class TryAgain(Exception):
pass
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### Token ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
class Token(object):
SKIP = -1
INVALID_TYPE = 0
EOF_TYPE = 1
EOF = 1
NULL_TREE_LOOKAHEAD = 3
MIN_USER_TYPE = 4
def __init__(self,**argv):
try:
self.type = argv['type']
except:
self.type = INVALID_TYPE
try:
self.text = argv['text']
except:
self.text = "<no text>"
def isEOF(self):
return (self.type == EOF_TYPE)
def getColumn(self):
return 0
def getLine(self):
return 0
def getFilename(self):
return None
def setFilename(self,name):
return self
def getText(self):
return "<no text>"
def setText(self,text):
if isinstance(text,str):
pass
else:
raise TypeError("Token.setText requires string argument")
return self
def setColumn(self,column):
return self
def setLine(self,line):
return self
def getType(self):
return self.type
def setType(self,type):
if isinstance(type,int):
self.type = type
else:
raise TypeError("Token.setType requires integer argument")
return self
def toString(self):
## not optimal
type_ = self.type
if type_ == 3:
tval = 'NULL_TREE_LOOKAHEAD'
elif type_ == 1:
tval = 'EOF_TYPE'
elif type_ == 0:
tval = 'INVALID_TYPE'
elif type_ == -1:
tval = 'SKIP'
else:
tval = type_
return '["%s",<%s>]' % (self.getText(),tval)
__str__ = toString
__repr__ = toString
### static attribute ..
Token.badToken = Token( type=INVALID_TYPE, text="<no text>")
if __name__ == "__main__":
print "testing .."
T = Token.badToken
print T
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### CommonToken ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
class CommonToken(Token):
def __init__(self,**argv):
Token.__init__(self,**argv)
self.line = 0
self.col = 0
try:
self.line = argv['line']
except:
pass
try:
self.col = argv['col']
except:
pass
def getLine(self):
return self.line
def getText(self):
return self.text
def getColumn(self):
return self.col
def setLine(self,line):
self.line = line
return self
def setText(self,text):
self.text = text
return self
def setColumn(self,col):
self.col = col
return self
def toString(self):
## not optimal
type_ = self.type
if type_ == 3:
tval = 'NULL_TREE_LOOKAHEAD'
elif type_ == 1:
tval = 'EOF_TYPE'
elif type_ == 0:
tval = 'INVALID_TYPE'
elif type_ == -1:
tval = 'SKIP'
else:
tval = type_
d = {
'text' : self.text,
'type' : tval,
'line' : self.line,
'colm' : self.col
}
fmt = '["%(text)s",<%(type)s>,line=%(line)s,col=%(colm)s]'
return fmt % d
__str__ = toString
__repr__ = toString
if __name__ == '__main__' :
T = CommonToken()
print T
T = CommonToken(col=15,line=1,text="some text", type=5)
print T
T = CommonToken()
T.setLine(1).setColumn(15).setText("some text").setType(5)
print T
print T.getLine()
print T.getColumn()
print T.getText()
print T.getType()
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### CommonHiddenStreamToken ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
class CommonHiddenStreamToken(CommonToken):
def __init__(self,*args):
CommonToken.__init__(self,*args)
self.hiddenBefore = None
self.hiddenAfter = None
def getHiddenAfter(self):
return self.hiddenAfter
def getHiddenBefore(self):
return self.hiddenBefore
def setHiddenAfter(self,t):
self.hiddenAfter = t
def setHiddenBefore(self, t):
self.hiddenBefore = t
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### Queue ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
## Shall be a circular buffer on tokens ..
class Queue(object):
def __init__(self):
self.buffer = [] # empty list
def append(self,item):
self.buffer.append(item)
def elementAt(self,index):
return self.buffer[index]
def reset(self):
self.buffer = []
def removeFirst(self):
self.buffer.pop(0)
def length(self):
return len(self.buffer)
def __str__(self):
return str(self.buffer)
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### InputBuffer ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
class InputBuffer(object):
def __init__(self):
self.nMarkers = 0
self.markerOffset = 0
self.numToConsume = 0
self.queue = Queue()
def __str__(self):
return "(%s,%s,%s,%s)" % (
self.nMarkers,
self.markerOffset,
self.numToConsume,
self.queue)
def __repr__(self):
return str(self)
def commit(self):
self.nMarkers -= 1
def consume(self) :
self.numToConsume += 1
## probably better to return a list of items
## because of unicode. Or return a unicode
## string ..
def getLAChars(self) :
i = self.markerOffset
n = self.queue.length()
s = ''
while i<n:
s += self.queue.elementAt(i)
return s
## probably better to return a list of items
## because of unicode chars
def getMarkedChars(self) :
s = ''
i = 0
n = self.markerOffset
while i<n:
s += self.queue.elementAt(i)
return s
def isMarked(self) :
return self.nMarkers != 0
def fill(self,k):
### abstract method
raise NotImplementedError()
def LA(self,k) :
self.fill(k)
return self.queue.elementAt(self.markerOffset + k - 1)
def mark(self) :
self.syncConsume()
self.nMarkers += 1
return self.markerOffset
def rewind(self,mark) :
self.syncConsume()
self.markerOffset = mark
self.nMarkers -= 1
def reset(self) :
self.nMarkers = 0
self.markerOffset = 0
self.numToConsume = 0
self.queue.reset()
def syncConsume(self) :
while self.numToConsume > 0:
if self.nMarkers > 0:
# guess mode -- leave leading characters and bump offset.
self.markerOffset += 1
else:
# normal mode -- remove first character
self.queue.removeFirst()
self.numToConsume -= 1
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### CharBuffer ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
class CharBuffer(InputBuffer):
def __init__(self,reader):
##assert isinstance(reader,file)
super(CharBuffer,self).__init__()
## a reader is supposed to be anything that has
## a method 'read(int)'.
self.input = reader
def __str__(self):
base = super(CharBuffer,self).__str__()
return "CharBuffer{%s,%s" % (base,str(input))
def fill(self,amount):
try:
self.syncConsume()
while self.queue.length() < (amount + self.markerOffset) :
## retrieve just one char - what happend at end
## of input?
c = self.input.read(1)
### python's behaviour is to return the empty string on
### EOF, ie. no exception whatsoever is thrown. An empty
### python string has the nice feature that it is of
### type 'str' and "not ''" would return true. Contrary,
### one can't do this: '' in 'abc'. This should return
### false, but all we get is then a TypeError as an
### empty string is not a character.
### Let's assure then that we have either seen a
### character or an empty string (EOF).
assert len(c) == 0 or len(c) == 1
### And it shall be of type string (ASCII or UNICODE).
assert isinstance(c,str) or isinstance(c,unicode)
### Just append EOF char to buffer. Note that buffer may
### contain then just more than one EOF char ..
### use unicode chars instead of ASCII ..
self.queue.append(c)
except Exception,e:
raise CharStreamIOException(e)
##except: # (mk) Cannot happen ...
##error ("unexpected exception caught ..")
##assert 0
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### LexerSharedInputState ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
class LexerSharedInputState(object):
def __init__(self,ibuf):
assert isinstance(ibuf,InputBuffer)
self.input = ibuf
self.column = 1
self.line = 1
self.tokenStartColumn = 1
self.tokenStartLine = 1
self.guessing = 0
self.filename = None
def reset(self):
self.column = 1
self.line = 1
self.tokenStartColumn = 1
self.tokenStartLine = 1
self.guessing = 0
self.filename = None
self.input.reset()
def LA(self,k):
return self.input.LA(k)
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### TokenStream ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
class TokenStream(object):
def nextToken(self):
pass
def __iter__(self):
return TokenStreamIterator(self)
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### TokenStreamIterator ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
class TokenStreamIterator(object):
def __init__(self,inst):
if isinstance(inst,TokenStream):
self.inst = inst
return
raise TypeError("TokenStreamIterator requires TokenStream object")
def next(self):
assert self.inst
item = self.inst.nextToken()
if not item or item.isEOF():
raise StopIteration()
return item
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### TokenStreamSelector ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
class TokenStreamSelector(TokenStream):
def __init__(self):
self._input = None
self._stmap = {}
self._stack = []
def addInputStream(self,stream,key):
self._stmap[key] = stream
def getCurrentStream(self):
return self._input
def getStream(self,sname):
try:
stream = self._stmap[sname]
except:
raise ValueError("TokenStream " + sname + " not found");
return stream;
def nextToken(self):
while 1:
try:
return self._input.nextToken()
except TokenStreamRetryException,r:
### just retry "forever"
pass
def pop(self):
stream = self._stack.pop();
self.select(stream);
return stream;
def push(self,arg):
self._stack.append(self._input);
self.select(arg)
def retry(self):
raise TokenStreamRetryException()
def select(self,arg):
if isinstance(arg,TokenStream):
self._input = arg
return
if isinstance(arg,str):
self._input = self.getStream(arg)
return
raise TypeError("TokenStreamSelector.select requires " +
"TokenStream or string argument")
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### TokenStreamBasicFilter ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
class TokenStreamBasicFilter(TokenStream):
def __init__(self,input):
self.input = input;
self.discardMask = BitSet()
def discard(self,arg):
if isinstance(arg,int):
self.discardMask.add(arg)
return
if isinstance(arg,BitSet):
self.discardMark = arg
return
raise TypeError("TokenStreamBasicFilter.discard requires" +
"integer or BitSet argument")
def nextToken(self):
tok = self.input.nextToken()
while tok and self.discardMask.member(tok.getType()):
tok = self.input.nextToken()
return tok
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### TokenStreamHiddenTokenFilter ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
class TokenStreamHiddenTokenFilter(TokenStreamBasicFilter):
def __init__(self,input):
TokenStreamBasicFilter.__init__(self,input)
self.hideMask = BitSet()
self.nextMonitoredToken = None
self.lastHiddenToken = None
self.firstHidden = None
def consume(self):
self.nextMonitoredToken = self.input.nextToken()
def consumeFirst(self):
self.consume()
p = None;
while self.hideMask.member(self.LA(1).getType()) or \
self.discardMask.member(self.LA(1).getType()):
if self.hideMask.member(self.LA(1).getType()):
if not p:
p = self.LA(1)
else:
p.setHiddenAfter(self.LA(1))
self.LA(1).setHiddenBefore(p)
p = self.LA(1)
self.lastHiddenToken = p
if not self.firstHidden:
self.firstHidden = p
self.consume()
def getDiscardMask(self):
return self.discardMask
def getHiddenAfter(self,t):
return t.getHiddenAfter()
def getHiddenBefore(self,t):
return t.getHiddenBefore()
def getHideMask(self):
return self.hideMask
def getInitialHiddenToken(self):
return self.firstHidden
def hide(self,m):
if isinstance(m,int):
self.hideMask.add(m)
return
if isinstance(m.BitMask):
self.hideMask = m
return
def LA(self,i):
return self.nextMonitoredToken
def nextToken(self):
if not self.LA(1):
self.consumeFirst()
monitored = self.LA(1)
monitored.setHiddenBefore(self.lastHiddenToken)
self.lastHiddenToken = None
self.consume()
p = monitored
while self.hideMask.member(self.LA(1).getType()) or \
self.discardMask.member(self.LA(1).getType()):
if self.hideMask.member(self.LA(1).getType()):
p.setHiddenAfter(self.LA(1))
if p != monitored:
self.LA(1).setHiddenBefore(p)
p = self.lastHiddenToken = self.LA(1)
self.consume()
return monitored
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### StringBuffer ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
class StringBuffer:
def __init__(self,string=None):
if string:
self.text = list(string)
else:
self.text = []
def setLength(self,sz):
if not sz :
self.text = []
return
assert sz>0
if sz >= self.length():
return
### just reset to empty buffer
self.text = self.text[0:sz]
def length(self):
return len(self.text)
def append(self,c):
self.text.append(c)
### return buffer as string. Arg 'a' is used as index
## into the buffer and 2nd argument shall be the length.
## If 2nd args is absent, we return chars till end of
## buffer starting with 'a'.
def getString(self,a=None,length=None):
if not a :
a = 0
assert a>=0
if a>= len(self.text) :
return ""
if not length:
## no second argument
L = self.text[a:]
else:
assert (a+length) <= len(self.text)
b = a + length
L = self.text[a:b]
s = ""
for x in L : s += x
return s
toString = getString ## alias
def __str__(self):
return str(self.text)
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### Reader ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
## When reading Japanese chars, it happens that a stream returns a
## 'char' of length 2. This looks like a bug in the appropriate
## codecs - but I'm rather unsure about this. Anyway, if this is
## the case, I'm going to split this string into a list of chars
## and put them on hold, ie. on a buffer. Next time when called
## we read from buffer until buffer is empty.
## wh: nov, 25th -> problem does not appear in Python 2.4.0.c1.
class Reader(object):
def __init__(self,stream):
self.cin = stream
self.buf = []
def read(self,num):
assert num==1
if len(self.buf):
return self.buf.pop()
## Read a char - this may return a string.
## Is this a bug in codecs/Python?
c = self.cin.read(1)
if not c or len(c)==1:
return c
L = list(c)
L.reverse()
for x in L:
self.buf.append(x)
## read one char ..
return self.read(1)
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### CharScanner ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
class CharScanner(TokenStream):
## class members
NO_CHAR = 0
EOF_CHAR = '' ### EOF shall be the empty string.
def __init__(self, *argv, **kwargs):
super(CharScanner, self).__init__()
self.saveConsumedInput = True
self.tokenClass = None
self.caseSensitive = True
self.caseSensitiveLiterals = True
self.literals = None
self.tabsize = 8
self._returnToken = None
self.commitToPath = False
self.traceDepth = 0
self.text = StringBuffer()
self.hashString = hash(self)
self.setTokenObjectClass(CommonToken)
self.setInput(*argv)
def __iter__(self):
return CharScannerIterator(self)
def setInput(self,*argv):
## case 1:
## if there's no arg we default to read from
## standard input
if not argv:
import sys
self.setInput(sys.stdin)
return
## get 1st argument
arg1 = argv[0]
## case 2:
## if arg1 is a string, we assume it's a file name
## and open a stream using 2nd argument as open
## mode. If there's no 2nd argument we fall back to
## mode '+rb'.
if isinstance(arg1,str):
f = open(arg1,"rb")
self.setInput(f)
self.setFilename(arg1)
return
## case 3:
## if arg1 is a file we wrap it by a char buffer (
## some additional checks?? No, can't do this in
## general).
if isinstance(arg1,file):
self.setInput(CharBuffer(arg1))
return
## case 4:
## if arg1 is of type SharedLexerInputState we use
## argument as is.
if isinstance(arg1,LexerSharedInputState):
self.inputState = arg1
return
## case 5:
## check whether argument type is of type input
## buffer. If so create a SharedLexerInputState and
## go ahead.
if isinstance(arg1,InputBuffer):
self.setInput(LexerSharedInputState(arg1))
return
## case 6:
## check whether argument type has a method read(int)
## If so create CharBuffer ...
try:
if arg1.read:
rd = Reader(arg1)
cb = CharBuffer(rd)
ss = LexerSharedInputState(cb)
self.inputState = ss
return
except:
pass
## case 7:
## raise wrong argument exception
raise TypeError(argv)
def setTabSize(self,size) :
self.tabsize = size
def getTabSize(self) :
return self.tabsize
def setCaseSensitive(self,t) :
self.caseSensitive = t
def setCommitToPath(self,commit) :
self.commitToPath = commit
def setFilename(self,f) :
self.inputState.filename = f
def setLine(self,line) :
self.inputState.line = line
def setText(self,s) :
self.resetText()
self.text.append(s)
def getCaseSensitive(self) :
return self.caseSensitive
def getCaseSensitiveLiterals(self) :
return self.caseSensitiveLiterals
def getColumn(self) :
return self.inputState.column
def setColumn(self,c) :
self.inputState.column = c
def getCommitToPath(self) :
return self.commitToPath
def getFilename(self) :
return self.inputState.filename
def getInputBuffer(self) :
return self.inputState.input
def getInputState(self) :
return self.inputState
def setInputState(self,state) :
assert isinstance(state,LexerSharedInputState)
self.inputState = state
def getLine(self) :
return self.inputState.line
def getText(self) :
return str(self.text)
def getTokenObject(self) :
return self._returnToken
def LA(self,i) :
c = self.inputState.input.LA(i)
if not self.caseSensitive:
### E0006
c = c.__class__.lower(c)
return c
def makeToken(self,type) :
try:
## dynamically load a class
assert self.tokenClass
tok = self.tokenClass()
tok.setType(type)
tok.setColumn(self.inputState.tokenStartColumn)
tok.setLine(self.inputState.tokenStartLine)
return tok
except:
self.panic("unable to create new token")
return Token.badToken
def mark(self) :
return self.inputState.input.mark()
def _match_bitset(self,b) :
if b.member(self.LA(1)):
self.consume()
else:
raise MismatchedCharException(self.LA(1), b, False, self)
def _match_string(self,s) :
for c in s:
if self.LA(1) == c:
self.consume()
else:
raise MismatchedCharException(self.LA(1), c, False, self)
def match(self,item):
if isinstance(item,str) or isinstance(item,unicode):
return self._match_string(item)
else:
return self._match_bitset(item)
def matchNot(self,c) :
if self.LA(1) != c:
self.consume()
else:
raise MismatchedCharException(self.LA(1), c, True, self)
def matchRange(self,c1,c2) :
if self.LA(1) < c1 or self.LA(1) > c2 :
raise MismatchedCharException(self.LA(1), c1, c2, False, self)
else:
self.consume()
def newline(self) :
self.inputState.line += 1
self.inputState.column = 1
def tab(self) :
c = self.getColumn()
nc = ( ((c-1)/self.tabsize) + 1) * self.tabsize + 1
self.setColumn(nc)
def panic(self,s='') :
print "CharScanner: panic: " + s
sys.exit(1)
def reportError(self,ex) :
print ex
def reportError(self,s) :
if not self.getFilename():
print "error: " + str(s)
else:
print self.getFilename() + ": error: " + str(s)
def reportWarning(self,s) :
if not self.getFilename():
print "warning: " + str(s)
else:
print self.getFilename() + ": warning: " + str(s)
def resetText(self) :
self.text.setLength(0)
self.inputState.tokenStartColumn = self.inputState.column
self.inputState.tokenStartLine = self.inputState.line
def rewind(self,pos) :
self.inputState.input.rewind(pos)
def setTokenObjectClass(self,cl):
self.tokenClass = cl
def testForLiteral(self,token):
if not token:
return
assert isinstance(token,Token)
_type = token.getType()
## special tokens can't be literals
if _type in [SKIP,INVALID_TYPE,EOF_TYPE,NULL_TREE_LOOKAHEAD] :
return
_text = token.getText()
if not _text:
return
assert isinstance(_text,str) or isinstance(_text,unicode)
_type = self.testLiteralsTable(_text,_type)
token.setType(_type)
return _type
def testLiteralsTable(self,*args):
if isinstance(args[0],str) or isinstance(args[0],unicode):
s = args[0]
i = args[1]
else:
s = self.text.getString()
i = args[0]
## check whether integer has been given
if not isinstance(i,int):
assert isinstance(i,int)
## check whether we have a dict
assert isinstance(self.literals,dict)
try:
## E0010
if not self.caseSensitiveLiterals:
s = s.__class__.lower(s)
i = self.literals[s]
except:
pass
return i
def toLower(self,c):
return c.__class__.lower()
def traceIndent(self):
print ' ' * self.traceDepth
def traceIn(self,rname):
self.traceDepth += 1
self.traceIndent()
print "> lexer %s c== %s" % (rname,self.LA(1))
def traceOut(self,rname):
self.traceIndent()
print "< lexer %s c== %s" % (rname,self.LA(1))
self.traceDepth -= 1
def uponEOF(self):
pass
def append(self,c):
if self.saveConsumedInput :
self.text.append(c)
def commit(self):
self.inputState.input.commit()
def consume(self):
if not self.inputState.guessing:
c = self.LA(1)
if self.caseSensitive:
self.append(c)
else:
# use input.LA(), not LA(), to get original case
# CharScanner.LA() would toLower it.
c = self.inputState.input.LA(1)
self.append(c)
if c and c in "\t":
self.tab()
else:
self.inputState.column += 1
self.inputState.input.consume()
## Consume chars until one matches the given char
def consumeUntil_char(self,c):
while self.LA(1) != EOF_CHAR and self.LA(1) != c:
self.consume()
## Consume chars until one matches the given set
def consumeUntil_bitset(self,bitset):
while self.LA(1) != EOF_CHAR and not self.set.member(self.LA(1)):
self.consume()
### If symbol seen is EOF then generate and set token, otherwise
### throw exception.
def default(self,la1):
if not la1 :
self.uponEOF()
self._returnToken = self.makeToken(EOF_TYPE)
else:
self.raise_NoViableAlt(la1)
def filterdefault(self,la1,*args):
if not la1:
self.uponEOF()
self._returnToken = self.makeToken(EOF_TYPE)
return
if not args:
self.consume()
raise TryAgain()
else:
### apply filter object
self.commit();
try:
func=args[0]
args=args[1:]
apply(func,args)
except RecognitionException, e:
## catastrophic failure
self.reportError(e);
self.consume();
raise TryAgain()
def raise_NoViableAlt(self,la1=None):
if not la1: la1 = self.LA(1)
fname = self.getFilename()
line = self.getLine()
col = self.getColumn()
raise NoViableAltForCharException(la1,fname,line,col)
def set_return_token(self,_create,_token,_ttype,_offset):
if _create and not _token and (not _ttype == SKIP):
string = self.text.getString(_offset)
_token = self.makeToken(_ttype)
_token.setText(string)
self._returnToken = _token
return _token
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### CharScannerIterator ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
class CharScannerIterator:
def __init__(self,inst):
if isinstance(inst,CharScanner):
self.inst = inst
return
raise TypeError("CharScannerIterator requires CharScanner object")
def next(self):
assert self.inst
item = self.inst.nextToken()
if not item or item.isEOF():
raise StopIteration()
return item
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### BitSet ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### I'm assuming here that a long is 64bits. It appears however, that
### a long is of any size. That means we can use a single long as the
### bitset (!), ie. Python would do almost all the work (TBD).
class BitSet(object):
BITS = 64
NIBBLE = 4
LOG_BITS = 6
MOD_MASK = BITS -1
def __init__(self,data=None):
if not data:
BitSet.__init__(self,[long(0)])
return
if isinstance(data,int):
BitSet.__init__(self,[long(data)])
return
if isinstance(data,long):
BitSet.__init__(self,[data])
return
if not isinstance(data,list):
raise TypeError("BitSet requires integer, long, or " +
"list argument")
for x in data:
if not isinstance(x,long):
raise TypeError(self,"List argument item is " +
"not a long: %s" % (x))
self.data = data
def __str__(self):
bits = len(self.data) * BitSet.BITS
s = ""
for i in xrange(0,bits):
if self.at(i):
s += "1"
else:
s += "o"
if not ((i+1) % 10):
s += '|%s|' % (i+1)
return s
def __repr__(self):
return str(self)
def member(self,item):
if not item:
return False
if isinstance(item,int):
return self.at(item)
if not (isinstance(item,str) or isinstance(item,unicode)):
raise TypeError(self,"char or unichar expected: %s" % (item))
## char is a (unicode) string with at most lenght 1, ie.
## a char.
if len(item) != 1:
raise TypeError(self,"char expected: %s" % (item))
### handle ASCII/UNICODE char
num = ord(item)
### check whether position num is in bitset
return self.at(num)
def wordNumber(self,bit):
return bit >> BitSet.LOG_BITS
def bitMask(self,bit):
pos = bit & BitSet.MOD_MASK ## bit mod BITS
return (1L << pos)
def set(self,bit,on=True):
# grow bitset as required (use with care!)
i = self.wordNumber(bit)
mask = self.bitMask(bit)
if i>=len(self.data):
d = i - len(self.data) + 1
for x in xrange(0,d):
self.data.append(0L)
assert len(self.data) == i+1
if on:
self.data[i] |= mask
else:
self.data[i] &= (~mask)
### make add an alias for set
add = set
def off(self,bit,off=True):
self.set(bit,not off)
def at(self,bit):
i = self.wordNumber(bit)
v = self.data[i]
m = self.bitMask(bit)
return v & m
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### some further funcs ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
def illegalarg_ex(func):
raise ValueError(
"%s is only valid if parser is built for debugging" %
(func.func_name))
def runtime_ex(func):
raise RuntimeException(
"%s is only valid if parser is built for debugging" %
(func.func_name))
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### TokenBuffer ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
class TokenBuffer(object):
def __init__(self,stream):
self.input = stream
self.nMarkers = 0
self.markerOffset = 0
self.numToConsume = 0
self.queue = Queue()
def reset(self) :
self.nMarkers = 0
self.markerOffset = 0
self.numToConsume = 0
self.queue.reset()
def consume(self) :
self.numToConsume += 1
def fill(self, amount):
self.syncConsume()
while self.queue.length() < (amount + self.markerOffset):
self.queue.append(self.input.nextToken())
def getInput(self):
return self.input
def LA(self,k) :
self.fill(k)
return self.queue.elementAt(self.markerOffset + k - 1).type
def LT(self,k) :
self.fill(k)
return self.queue.elementAt(self.markerOffset + k - 1)
def mark(self) :
self.syncConsume()
self.nMarkers += 1
return self.markerOffset
def rewind(self,mark) :
self.syncConsume()
self.markerOffset = mark
self.nMarkers -= 1
def syncConsume(self) :
while self.numToConsume > 0:
if self.nMarkers > 0:
# guess mode -- leave leading characters and bump offset.
self.markerOffset += 1
else:
# normal mode -- remove first character
self.queue.removeFirst()
self.numToConsume -= 1
def __str__(self):
return "(%s,%s,%s,%s,%s)" % (
self.input,
self.nMarkers,
self.markerOffset,
self.numToConsume,
self.queue)
def __repr__(self):
return str(self)
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### ParserSharedInputState ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
class ParserSharedInputState(object):
def __init__(self):
self.input = None
self.reset()
def reset(self):
self.guessing = 0
self.filename = None
if self.input:
self.input.reset()
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### Parser ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
class Parser(object):
def __init__(self, *args, **kwargs):
self.tokenNames = None
self.returnAST = None
self.astFactory = None
self.tokenTypeToASTClassMap = {}
self.ignoreInvalidDebugCalls = False
self.traceDepth = 0
if not args:
self.inputState = ParserSharedInputState()
return
arg0 = args[0]
assert isinstance(arg0,ParserSharedInputState)
self.inputState = arg0
return
def getTokenTypeToASTClassMap(self):
return self.tokenTypeToASTClassMap
def addMessageListener(self, l):
if not self.ignoreInvalidDebugCalls:
illegalarg_ex(addMessageListener)
def addParserListener(self,l) :
if (not self.ignoreInvalidDebugCalls) :
illegalarg_ex(addParserListener)
def addParserMatchListener(self, l) :
if (not self.ignoreInvalidDebugCalls) :
illegalarg_ex(addParserMatchListener)
def addParserTokenListener(self, l) :
if (not self.ignoreInvalidDebugCalls):
illegalarg_ex(addParserTokenListener)
def addSemanticPredicateListener(self, l) :
if (not self.ignoreInvalidDebugCalls):
illegalarg_ex(addSemanticPredicateListener)
def addSyntacticPredicateListener(self, l) :
if (not self.ignoreInvalidDebugCalls):
illegalarg_ex(addSyntacticPredicateListener)
def addTraceListener(self, l) :
if (not self.ignoreInvalidDebugCalls):
illegalarg_ex(addTraceListener)
def consume(self):
raise NotImplementedError()
def _consumeUntil_type(self,tokenType):
while self.LA(1) != EOF_TYPE and self.LA(1) != tokenType:
self.consume()
def _consumeUntil_bitset(self, set):
while self.LA(1) != EOF_TYPE and not set.member(self.LA(1)):
self.consume()
def consumeUntil(self,arg):
if isinstance(arg,int):
self._consumeUntil_type(arg)
else:
self._consumeUntil_bitset(arg)
def defaultDebuggingSetup(self):
pass
def getAST(self) :
return self.returnAST
def getASTFactory(self) :
return self.astFactory
def getFilename(self) :
return self.inputState.filename
def getInputState(self) :
return self.inputState
def setInputState(self, state) :
self.inputState = state
def getTokenName(self,num) :
return self.tokenNames[num]
def getTokenNames(self) :
return self.tokenNames
def isDebugMode(self) :
return self.false
def LA(self, i):
raise NotImplementedError()
def LT(self, i):
raise NotImplementedError()
def mark(self):
return self.inputState.input.mark()
def _match_int(self,t):
if (self.LA(1) != t):
raise MismatchedTokenException(
self.tokenNames, self.LT(1), t, False, self.getFilename())
else:
self.consume()
def _match_set(self, b):
if (not b.member(self.LA(1))):
raise MismatchedTokenException(
self.tokenNames,self.LT(1), b, False, self.getFilename())
else:
self.consume()
def match(self,set) :
if isinstance(set,int):
self._match_int(set)
return
if isinstance(set,BitSet):
self._match_set(set)
return
raise TypeError("Parser.match requires integer ot BitSet argument")
def matchNot(self,t):
if self.LA(1) == t:
raise MismatchedTokenException(
tokenNames, self.LT(1), t, True, self.getFilename())
else:
self.consume()
def removeMessageListener(self, l) :
if (not self.ignoreInvalidDebugCalls):
runtime_ex(removeMessageListener)
def removeParserListener(self, l) :
if (not self.ignoreInvalidDebugCalls):
runtime_ex(removeParserListener)
def removeParserMatchListener(self, l) :
if (not self.ignoreInvalidDebugCalls):
runtime_ex(removeParserMatchListener)
def removeParserTokenListener(self, l) :
if (not self.ignoreInvalidDebugCalls):
runtime_ex(removeParserTokenListener)
def removeSemanticPredicateListener(self, l) :
if (not self.ignoreInvalidDebugCalls):
runtime_ex(removeSemanticPredicateListener)
def removeSyntacticPredicateListener(self, l) :
if (not self.ignoreInvalidDebugCalls):
runtime_ex(removeSyntacticPredicateListener)
def removeTraceListener(self, l) :
if (not self.ignoreInvalidDebugCalls):
runtime_ex(removeTraceListener)
def reportError(self,x) :
fmt = "syntax error:"
f = self.getFilename()
if f:
fmt = ("%s:" % f) + fmt
if isinstance(x,Token):
line = x.getColumn()
col = x.getLine()
text = x.getText()
fmt = fmt + 'unexpected symbol at line %s (column %s) : "%s"'
print >>sys.stderr, fmt % (line,col,text)
else:
print >>sys.stderr, fmt,str(x)
def reportWarning(self,s):
f = self.getFilename()
if f:
print "%s:warning: %s" % (f,str(x))
else:
print "warning: %s" % (str(x))
def rewind(self, pos) :
self.inputState.input.rewind(pos)
def setASTFactory(self, f) :
self.astFactory = f
def setASTNodeClass(self, cl) :
self.astFactory.setASTNodeType(cl)
def setASTNodeType(self, nodeType) :
self.setASTNodeClass(nodeType)
def setDebugMode(self, debugMode) :
if (not self.ignoreInvalidDebugCalls):
runtime_ex(setDebugMode)
def setFilename(self, f) :
self.inputState.filename = f
def setIgnoreInvalidDebugCalls(self, value) :
self.ignoreInvalidDebugCalls = value
def setTokenBuffer(self, t) :
self.inputState.input = t
def traceIndent(self):
print " " * self.traceDepth
def traceIn(self,rname):
self.traceDepth += 1
self.trace("> ", rname)
def traceOut(self,rname):
self.trace("< ", rname)
self.traceDepth -= 1
### wh: moved from ASTFactory to Parser
def addASTChild(self,currentAST, child):
if not child:
return
if not currentAST.root:
currentAST.root = child
elif not currentAST.child:
currentAST.root.setFirstChild(child)
else:
currentAST.child.setNextSibling(child)
currentAST.child = child
currentAST.advanceChildToEnd()
### wh: moved from ASTFactory to Parser
def makeASTRoot(self,currentAST,root) :
if root:
### Add the current root as a child of new root
root.addChild(currentAST.root)
### The new current child is the last sibling of the old root
currentAST.child = currentAST.root
currentAST.advanceChildToEnd()
### Set the new root
currentAST.root = root
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### LLkParser ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
class LLkParser(Parser):
def __init__(self, *args, **kwargs):
try:
arg1 = args[0]
except:
arg1 = 1
if isinstance(arg1,int):
super(LLkParser,self).__init__()
self.k = arg1
return
if isinstance(arg1,ParserSharedInputState):
super(LLkParser,self).__init__(arg1)
self.set_k(1,*args)
return
if isinstance(arg1,TokenBuffer):
super(LLkParser,self).__init__()
self.setTokenBuffer(arg1)
self.set_k(1,*args)
return
if isinstance(arg1,TokenStream):
super(LLkParser,self).__init__()
tokenBuf = TokenBuffer(arg1)
self.setTokenBuffer(tokenBuf)
self.set_k(1,*args)
return
### unknown argument
raise TypeError("LLkParser requires integer, " +
"ParserSharedInputStream or TokenStream argument")
def consume(self):
self.inputState.input.consume()
def LA(self,i):
return self.inputState.input.LA(i)
def LT(self,i):
return self.inputState.input.LT(i)
def set_k(self,index,*args):
try:
self.k = args[index]
except:
self.k = 1
def trace(self,ee,rname):
print type(self)
self.traceIndent()
guess = ""
if self.inputState.guessing > 0:
guess = " [guessing]"
print(ee + rname + guess)
for i in xrange(1,self.k+1):
if i != 1:
print(", ")
if self.LT(i) :
v = self.LT(i).getText()
else:
v = "null"
print "LA(%s) == %s" % (i,v)
print("\n")
def traceIn(self,rname):
self.traceDepth += 1;
self.trace("> ", rname);
def traceOut(self,rname):
self.trace("< ", rname);
self.traceDepth -= 1;
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### TreeParserSharedInputState ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
class TreeParserSharedInputState(object):
def __init__(self):
self.guessing = 0
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### TreeParser ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
class TreeParser(object):
def __init__(self, *args, **kwargs):
self.inputState = TreeParserSharedInputState()
self._retTree = None
self.tokenNames = []
self.returnAST = None
self.astFactory = ASTFactory()
self.traceDepth = 0
def getAST(self):
return self.returnAST
def getASTFactory(self):
return self.astFactory
def getTokenName(self,num) :
return self.tokenNames[num]
def getTokenNames(self):
return self.tokenNames
def match(self,t,set) :
assert isinstance(set,int) or isinstance(set,BitSet)
if not t or t == ASTNULL:
raise MismatchedTokenException(self.getTokenNames(), t,set, False)
if isinstance(set,int) and t.getType() != set:
raise MismatchedTokenException(self.getTokenNames(), t,set, False)
if isinstance(set,BitSet) and not set.member(t.getType):
raise MismatchedTokenException(self.getTokenNames(), t,set, False)
def matchNot(self,t, ttype) :
if not t or (t == ASTNULL) or (t.getType() == ttype):
raise MismatchedTokenException(getTokenNames(), t, ttype, True)
def reportError(self,ex):
print >>sys.stderr,"error:",ex
def reportWarning(self, s):
print "warning:",s
def setASTFactory(self,f):
self.astFactory = f
def setASTNodeType(self,nodeType):
self.setASTNodeClass(nodeType)
def setASTNodeClass(self,nodeType):
self.astFactory.setASTNodeType(nodeType)
def traceIndent(self):
print " " * self.traceDepth
def traceIn(self,rname,t):
self.traceDepth += 1
self.traceIndent()
print("> " + rname + "(" +
ifelse(t,str(t),"null") + ")" +
ifelse(self.inputState.guessing>0,"[guessing]",""))
def traceOut(self,rname,t):
self.traceIndent()
print("< " + rname + "(" +
ifelse(t,str(t),"null") + ")" +
ifelse(self.inputState.guessing>0,"[guessing]",""))
self.traceDepth -= 1
### wh: moved from ASTFactory to TreeParser
def addASTChild(self,currentAST, child):
if not child:
return
if not currentAST.root:
currentAST.root = child
elif not currentAST.child:
currentAST.root.setFirstChild(child)
else:
currentAST.child.setNextSibling(child)
currentAST.child = child
currentAST.advanceChildToEnd()
### wh: moved from ASTFactory to TreeParser
def makeASTRoot(self,currentAST,root):
if root:
### Add the current root as a child of new root
root.addChild(currentAST.root)
### The new current child is the last sibling of the old root
currentAST.child = currentAST.root
currentAST.advanceChildToEnd()
### Set the new root
currentAST.root = root
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### funcs to work on trees ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
def rightmost(ast):
if ast:
while(ast.right):
ast = ast.right
return ast
def cmptree(s,t,partial):
while(s and t):
### as a quick optimization, check roots first.
if not s.equals(t):
return False
### if roots match, do full list match test on children.
if not cmptree(s.getFirstChild(),t.getFirstChild(),partial):
return False
s = s.getNextSibling()
t = t.getNextSibling()
r = ifelse(partial,not t,not s and not t)
return r
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### AST ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
class AST(object):
def __init__(self):
pass
def addChild(self, c):
pass
def equals(self, t):
return False
def equalsList(self, t):
return False
def equalsListPartial(self, t):
return False
def equalsTree(self, t):
return False
def equalsTreePartial(self, t):
return False
def findAll(self, tree):
return None
def findAllPartial(self, subtree):
return None
def getFirstChild(self):
return self
def getNextSibling(self):
return self
def getText(self):
return ""
def getType(self):
return INVALID_TYPE
def getLine(self):
return 0
def getColumn(self):
return 0
def getNumberOfChildren(self):
return 0
def initialize(self, t, txt):
pass
def initialize(self, t):
pass
def setFirstChild(self, c):
pass
def setNextSibling(self, n):
pass
def setText(self, text):
pass
def setType(self, ttype):
pass
def toString(self):
self.getText()
__str__ = toString
def toStringList(self):
return self.getText()
def toStringTree(self):
return self.getText()
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### ASTNULLType ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### There is only one instance of this class **/
class ASTNULLType(AST):
def __init__(self):
AST.__init__(self)
pass
def getText(self):
return "<ASTNULL>"
def getType(self):
return NULL_TREE_LOOKAHEAD
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### BaseAST ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
class BaseAST(AST):
verboseStringConversion = False
tokenNames = None
def __init__(self):
self.down = None ## kid
self.right = None ## sibling
def addChild(self,node):
if node:
t = rightmost(self.down)
if t:
t.right = node
else:
assert not self.down
self.down = node
def getNumberOfChildren(self):
t = self.down
n = 0
while t:
n += 1
t = t.right
return n
def doWorkForFindAll(self,v,target,partialMatch):
sibling = self
while sibling:
c1 = partialMatch and sibling.equalsTreePartial(target)
if c1:
v.append(sibling)
else:
c2 = not partialMatch and sibling.equalsTree(target)
if c2:
v.append(sibling)
### regardless of match or not, check any children for matches
if sibling.getFirstChild():
sibling.getFirstChild().doWorkForFindAll(v,target,partialMatch)
sibling = sibling.getNextSibling()
### Is node t equal to 'self' in terms of token type and text?
def equals(self,t):
if not t:
return False
return self.getText() == t.getText() and self.getType() == t.getType()
### Is t an exact structural and equals() match of this tree. The
### 'self' reference is considered the start of a sibling list.
###
def equalsList(self, t):
return cmptree(self, t, partial=False)
### Is 't' a subtree of this list?
### The siblings of the root are NOT ignored.
###
def equalsListPartial(self,t):
return cmptree(self,t,partial=True)
### Is tree rooted at 'self' equal to 't'? The siblings
### of 'self' are ignored.
###
def equalsTree(self, t):
return self.equals(t) and \
cmptree(self.getFirstChild(), t.getFirstChild(), partial=False)
### Is 't' a subtree of the tree rooted at 'self'? The siblings
### of 'self' are ignored.
###
def equalsTreePartial(self, t):
if not t:
return True
return self.equals(t) and cmptree(
self.getFirstChild(), t.getFirstChild(), partial=True)
### Walk the tree looking for all exact subtree matches. Return
### an ASTEnumerator that lets the caller walk the list
### of subtree roots found herein.
def findAll(self,target):
roots = []
### the empty tree cannot result in an enumeration
if not target:
return None
# find all matches recursively
self.doWorkForFindAll(roots, target, False)
return roots
### Walk the tree looking for all subtrees. Return
### an ASTEnumerator that lets the caller walk the list
### of subtree roots found herein.
def findAllPartial(self,sub):
roots = []
### the empty tree cannot result in an enumeration
if not sub:
return None
self.doWorkForFindAll(roots, sub, True) ### find all matches recursively
return roots
### Get the first child of this node None if not children
def getFirstChild(self):
return self.down
### Get the next sibling in line after this one
def getNextSibling(self):
return self.right
### Get the token text for this node
def getText(self):
return ""
### Get the token type for this node
def getType(self):
return 0
def getLine(self):
return 0
def getColumn(self):
return 0
### Remove all children */
def removeChildren(self):
self.down = None
def setFirstChild(self,c):
self.down = c
def setNextSibling(self, n):
self.right = n
### Set the token text for this node
def setText(self, text):
pass
### Set the token type for this node
def setType(self, ttype):
pass
### static
def setVerboseStringConversion(verbose,names):
verboseStringConversion = verbose
tokenNames = names
setVerboseStringConversion = staticmethod(setVerboseStringConversion)
### Return an array of strings that maps token ID to it's text.
## @since 2.7.3
def getTokenNames():
return tokenNames
def toString(self):
return self.getText()
### return tree as lisp string - sibling included
def toStringList(self):
ts = self.toStringTree()
sib = self.getNextSibling()
if sib:
ts += sib.toStringList()
return ts
__str__ = toStringList
### return tree as string - siblings ignored
def toStringTree(self):
ts = ""
kid = self.getFirstChild()
if kid:
ts += " ("
ts += " " + self.toString()
if kid:
ts += kid.toStringList()
ts += " )"
return ts
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### CommonAST ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### Common AST node implementation
class CommonAST(BaseAST):
def __init__(self,token=None):
super(CommonAST,self).__init__()
self.ttype = INVALID_TYPE
self.text = "<no text>"
self.initialize(token)
#assert self.text
### Get the token text for this node
def getText(self):
return self.text
### Get the token type for this node
def getType(self):
return self.ttype
def initialize(self,*args):
if not args:
return
arg0 = args[0]
if isinstance(arg0,int):
arg1 = args[1]
self.setType(arg0)
self.setText(arg1)
return
if isinstance(arg0,AST) or isinstance(arg0,Token):
self.setText(arg0.getText())
self.setType(arg0.getType())
return
### Set the token text for this node
def setText(self,text_):
assert isinstance(text_,str)
self.text = text_
### Set the token type for this node
def setType(self,ttype_):
assert isinstance(ttype_,int)
self.ttype = ttype_
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### CommonASTWithHiddenTokens ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
class CommonASTWithHiddenTokens(CommonAST):
def __init__(self,*args):
CommonAST.__init__(self,*args)
self.hiddenBefore = None
self.hiddenAfter = None
def getHiddenAfter(self):
return self.hiddenAfter
def getHiddenBefore(self):
return self.hiddenBefore
def initialize(self,*args):
CommonAST.initialize(self,*args)
if args and isinstance(args[0],Token):
assert isinstance(args[0],CommonHiddenStreamToken)
self.hideenBefore = args[0].getHiddenBefore()
self.hiddenAfter = args[0].getHiddenAfter()
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### ASTPair ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
class ASTPair(object):
def __init__(self):
self.root = None ### current root of tree
self.child = None ### current child to which siblings are added
### Make sure that child is the last sibling */
def advanceChildToEnd(self):
if self.child:
while self.child.getNextSibling():
self.child = self.child.getNextSibling()
### Copy an ASTPair. Don't call it clone() because we want type-safety */
def copy(self):
tmp = ASTPair()
tmp.root = self.root
tmp.child = self.child
return tmp
def toString(self):
r = ifelse(not root,"null",self.root.getText())
c = ifelse(not child,"null",self.child.getText())
return "[%s,%s]" % (r,c)
__str__ = toString
__repr__ = toString
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### ASTFactory ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
class ASTFactory(object):
def __init__(self,table=None):
self._class = None
self._classmap = ifelse(table,table,None)
def create(self,*args):
if not args:
return self.create(INVALID_TYPE)
arg0 = args[0]
arg1 = None
arg2 = None
try:
arg1 = args[1]
arg2 = args[2]
except:
pass
# ctor(int)
if isinstance(arg0,int) and not arg2:
### get class for 'self' type
c = self.getASTNodeType(arg0)
t = self.create(c)
if t:
t.initialize(arg0, ifelse(arg1,arg1,""))
return t
# ctor(int,something)
if isinstance(arg0,int) and arg2:
t = self.create(arg2)
if t:
t.initialize(arg0,arg1)
return t
# ctor(AST)
if isinstance(arg0,AST):
t = self.create(arg0.getType())
if t:
t.initialize(arg0)
return t
# ctor(token)
if isinstance(arg0,Token) and not arg1:
ttype = arg0.getType()
assert isinstance(ttype,int)
t = self.create(ttype)
if t:
t.initialize(arg0)
return t
# ctor(token,class)
if isinstance(arg0,Token) and arg1:
assert isinstance(arg1,type)
assert issubclass(arg1,AST)
# this creates instance of 'arg1' using 'arg0' as
# argument. Wow, that's magic!
t = arg1(arg0)
assert t and isinstance(t,AST)
return t
# ctor(class)
if isinstance(arg0,type):
### next statement creates instance of type (!)
t = arg0()
assert isinstance(t,AST)
return t
def setASTNodeClass(self,className=None):
if not className:
return
assert isinstance(className,type)
assert issubclass(className,AST)
self._class = className
### kind of misnomer - use setASTNodeClass instead.
setASTNodeType = setASTNodeClass
def getASTNodeClass(self):
return self._class
def getTokenTypeToASTClassMap(self):
return self._classmap
def setTokenTypeToASTClassMap(self,amap):
self._classmap = amap
def error(self, e):
import sys
print >> sys.stderr, e
def setTokenTypeASTNodeType(self, tokenType, className):
"""
Specify a mapping between a token type and a (AST) class.
"""
if not self._classmap:
self._classmap = {}
if not className:
try:
del self._classmap[tokenType]
except:
pass
else:
### here we should also perform actions to ensure that
### a. class can be loaded
### b. class is a subclass of AST
###
assert isinstance(className,type)
assert issubclass(className,AST) ## a & b
### enter the class
self._classmap[tokenType] = className
def getASTNodeType(self,tokenType):
"""
For a given token type return the AST node type. First we
lookup a mapping table, second we try _class
and finally we resolve to "antlr.CommonAST".
"""
# first
if self._classmap:
try:
c = self._classmap[tokenType]
if c:
return c
except:
pass
# second
if self._class:
return self._class
# default
return CommonAST
### methods that have been moved to file scope - just listed
### here to be somewhat consistent with original API
def dup(self,t):
return antlr.dup(t,self)
def dupList(self,t):
return antlr.dupList(t,self)
def dupTree(self,t):
return antlr.dupTree(t,self)
### methods moved to other classes
### 1. makeASTRoot -> Parser
### 2. addASTChild -> Parser
### non-standard: create alias for longish method name
maptype = setTokenTypeASTNodeType
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### ASTVisitor ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
class ASTVisitor(object):
def __init__(self,*args):
pass
def visit(self,ast):
pass
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### static methods and variables ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
ASTNULL = ASTNULLType()
### wh: moved from ASTFactory as there's nothing ASTFactory-specific
### in this method.
def make(*nodes):
if not nodes:
return None
for i in xrange(0,len(nodes)):
node = nodes[i]
if node:
assert isinstance(node,AST)
root = nodes[0]
tail = None
if root:
root.setFirstChild(None)
for i in xrange(1,len(nodes)):
if not nodes[i]:
continue
if not root:
root = tail = nodes[i]
elif not tail:
root.setFirstChild(nodes[i])
tail = root.getFirstChild()
else:
tail.setNextSibling(nodes[i])
tail = tail.getNextSibling()
### Chase tail to last sibling
while tail.getNextSibling():
tail = tail.getNextSibling()
return root
def dup(t,factory):
if not t:
return None
if factory:
dup_t = factory.create(t.__class__)
else:
raise TypeError("dup function requires ASTFactory argument")
dup_t.initialize(t)
return dup_t
def dupList(t,factory):
result = dupTree(t,factory)
nt = result
while t:
## for each sibling of the root
t = t.getNextSibling()
nt.setNextSibling(dupTree(t,factory))
nt = nt.getNextSibling()
return result
def dupTree(t,factory):
result = dup(t,factory)
if t:
result.setFirstChild(dupList(t.getFirstChild(),factory))
return result
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
### $Id: antlr.py,v 1.2 2005/10/26 07:44:24 rvk Exp $
# Local Variables: ***
# mode: python ***
# py-indent-offset: 4 ***
# End: ***
|
edisonlz/fruit
|
web_project/base/site-packages/pyExcelerator/antlr.py
|
Python
|
apache-2.0
| 81,931
|
[
"VisIt"
] |
42dfbc6ecc65418f090b184af99188e8017f0f110eadd9793b3b4fea98cdb741
|
#!/usr/bin/env python
from keras.models import Sequential, model_from_json, model_from_yaml
from keras.layers import Dense, Dropout, Activation, Merge, Flatten, \
Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import PReLU
from keras.optimizers import SGD
from keras.callbacks import EarlyStopping
from keras import backend as K
# from keras.utils.visualize_util import plot as kplt
import theano as T
from os import path
import traceback
import numpy as np
from numpy import matlib
from scipy import signal, fft, ifft
from scipy.fftpack import dct, idct
import scipy.io as sio
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from pylab import figure, plot, subplot, show, imshow, colorbar, axis, title
from mpl_toolkits.axes_grid1 import make_axes_locatable, ImageGrid
import h5py as h5
# Data Class
class DataSet:
# data = {}
# scan = {}
def __init__(self, name):
# TODO: better initialize
self.name = name
self.data = {} # storing the data
self.probe_geom = ()
self.angles = ()
self.fs = 0
self.fm = 0
self.c0 = 0
self.num_chn = 0
self.csm = () # cross spectral matrix
self.scan = {}
self.dist = ()
self.pht_pos = ()
self.scat_pos = ()
def __print_name(self, name):
print(name)
def import_data(self, file_path, file_name):
# TODO
# try:
assert path.exists(file_path+file_name), 'File not found.'
with h5.File(file_path + file_name, 'r') as hf:
print('This %s dataset contains: ' % file_name)
hf.visit(self.__print_name)
print
# except IOError, e:
# print(IOError, ':', e)
def preprocess(self):
# Cross spectral matrix
mode = 1 # 0: without average, of shape (num_angles, nFFT, num_channels, num_channels)
# 1: with average, of shape (num_angles, num_channels, num_channels)
(num_angles, num_channels, num_samples) = self.data['real'].shape
nFFT = 512 # 256
if mode == 0:
# (samples, channels, rows, cols)
self.csm = np.zeros((num_angles, nFFT, num_channels, num_channels), dtype=complex)
for k in np.arange(num_angles):
for i in np.arange(num_channels):
s1 = sef.data['real'][k,i,:] # ignore imaginary part
for j in np.arange(i+1,num_channels):
s2 = self.data['real'][k,j,:]
_, self.csm[k,:,j,i] = signal.csd(s1, s2, fs=FREQ_S, nperseg=nFFT, \
nfft=nFFT, scaling='density')
# TODO
# Diagnal removal: use a better algorithm
# lambda filter map reduce
elif mode == 1:
self.csm = np.zeros((num_angles, num_channels, num_channels), dtype=float)
for k in np.arange(num_angles):
for i in np.arange(num_channels):
s1 = self.data['real'][k,i,:]
for j in np.arange(i+1,num_channels):
s2 = self.data['real'][k,j,:]
_, tmp = signal.csd(s1, s2, fs=FREQ_S, nperseg=nFFT, \
nfft=nFFT, scaling='density')
self.csm[k,j,i] = np.abs(np.sum(tmp) / nFFT) # sum,average,abs
self.csm[k,i,(i+1):num_channels] = self.csm[k,(i+1):num_channels,i]
print(self.csm.shape)
print(self.csm)
with h5.File('csm_h5', 'w') as hf:
hf['csm'] = self.csm
# csm_t = self.csm[1,:,:]
# img = csm_t.reshape(num_channels, num_channels)
# plt.figure()
# plt.imshow(img)
# plt.show()
# Lower triangle trim
# Normalize: /Gxx Gyy
def compute_dist(self):
# Distance matrix
num_x = len(self.scan['x_axis'])
num_z = len(self.scan['z_axis'])
self.dist = np.zeros((num_x, num_z), dtype=float)
for i in range(num_x):
for j in range(num_z):
self.dist[i,j] = np.sqrt(self.scan['x_axis'][i]**2 \
+ self.scan['z_axis'][j]**2)
def write_data(self, filename, channel_id):
with h5.File(filename, 'w') as hf:
# DEBUG: complex value OR absolute value ??
(num_angles, num_channels, num_samples) = self.data['real'].shape
one_ch_data = np.sqrt(self.data['real'][channel_id, :, :]**2 \
+ self.data['imag'][channel_id, :, :]**2)
hf['time_data'] = one_ch_data.T
# mul_ch_data = np.sqrt( \
# self.data['real'].reshape(num_angles*num_channels, num_samples)**2 \
# + self.data['imag'].reshape(num_angles*num_channels, num_samples)**2 \
# )
# hf['time_data'] = mul_ch_data.T
def show_image(self, prange):
num_slices = self.data['real'].shape[0]
plt.figure()
for i in np.arange(num_slices):
amp = np.sqrt(self.data['real'][i, :, :]**2 + self.data['imag'][i, :, :]**2)
plt.subplot(2, 2, i+1)
plt.imshow(amp, extent=prange)
plt.title(i+1)
plt.show()
def img_norm(img):
min = np.amin(img)
max = np.amax(img)
return (img-min) / (max-min)
def nice_show(fig, data, vmin=None, vmax=None, cmap=None):
'''
data is 3D (nCH, nCol, nRow)
'''
assert data.ndim==3, 'Data dimension must be 3!'
if cmap is None:
cmap = cm.jet
if vmin is None:
vmin = data.min()
if vmax is None:
vmax = data.max()
nCH,_,_= data.shape
nr = int(np.ceil(np.sqrt(nCH)))
assert nr<=10, 'Too many data channels (>10)!'
grid = ImageGrid(fig, 111, \
nrows_ncols=(nr, nr),\
axes_pad=0.1,\
add_all=True,\
label_mode='L')
for i in range(nCH):
ax = grid[i]
im = ax.imshow(data[i,:,:], vmin=vmin, vmax=vmax, \
interpolation='nearest', cmap=cmap)
# div = make_axes_locatable(ax)
# cax = div.append_axes('right', size='5%', pad=0.05) # colorbar axis to the right
# plt.colorbar(im, cax=cax)
class ANN(object):
"""Docstring for ANN. """
def __init__(self):
self.in_real = ()
self.in_imag = ()
self.out_real = ()
self.out_imag = ()
def train_mlp(self, input, output):
self.in_real = input.data['real']
self.in_imag = input.data['imag']
self.out_real = output.data['real']
self.out_imag = output.data['imag']
(i_dim_x, i_dim_y, i_dim_z) = self.in_real.shape
in_dim = i_dim_x*i_dim_y*i_dim_z
input_data = self.in_real.reshape(in_dim, 1)
(o_dim_x, o_dim_y, o_dim_z) = self.out_real.shape
out_dim = o_dim_x*o_dim_y*o_dim_z
output_data = self.out_real.reshape(out_dim, 1)
model = Sequential()
model.add(Dense(200, input_dim=in_dim, init='uniform'))
model.add(Activation('relu'))
# model.add(Dropout(0.25))
model.add(Dense(200))#, init='uniform'))
model.add(Activation('relu'))
# model.add(Dropout(0.25))
model.add(Dense(out_dim))#, init='uniform'))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='sgd',\
metrics=['accuracy'])
early_stop = EarlyStopping(monitor='val_loss', patience=2)
hist = model.fit(input_data, output_data, nb_epoch=50, \
batch_size=64, validation_split=0.2, \
shuffle=True, callbacks=[early_stop])
print(hist.history)
#TODO: batch train
model.train_on_batch()
# Save model
model_to_save_json = model.to_json()
open('model_architecture.json', 'w').write(model_to_save_json)
model_to_save_yaml = model.to_yaml()
open('model_architecture.yaml', 'w').write(model_to_save_yaml)
model.save_weights('weights.h5')
def train_cnn(self, input, output):
num_samples, num_channels, num_rows, num_cols = input.shape
_, out_dim = output.shape
# Configurations
batch_size = 30 # note to adjust with the total number of samples
num_epoch = 10
model = Sequential()
model.add(ZeroPadding2D((1,1),input_shape=(num_channels, num_rows, num_cols)))
model.add(Convolution2D(64,3,3))
act1 = Activation('relu')
model.add(act1)
# model.add(BatchNormalization(mode=0, axis=1))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64,3,3))
act2 = Activation('relu')
model.add(act2)
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128,3,3))
act3 = Activation('relu')
model.add(act3)
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128,3,3))
act4 = Activation('relu')
model.add(act4)
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256,3,3))
act5 = Activation('relu')
model.add(act5)
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256,3,3))
act6 = Activation('relu')
model.add(act6)
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256,3,3))
act7 = Activation('relu')
model.add(act7)
model.add(MaxPooling2D((2,2), strides=(2,2)))
'''
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512,3,3))
act8 = Activation('relu')
model.add(act8)
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512,3,3))
act9 = Activation('relu')
model.add(act9)
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512,3,3))
act10 = Activation('relu')
model.add(act10)
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512,3,3))
act11 = Activation('relu')
model.add(act11)
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512,3,3))
act12 = Activation('relu')
model.add(act12)
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512,3,3))
act13 = Activation('relu')
model.add(act13)
model.add(MaxPooling2D((2,2), strides=(2,2)))
'''
model.add(Flatten())
model.add(Dense(4096))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(4096))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(out_dim))
'''
# Net structure
'''
# Compile
# sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
# model.compile( optimizer=sgd, \
# loss='categorical_crossentropy' )
model.compile( optimizer='adam', \
loss='mean_squared_error' )
# early_stop = EarlyStopping(monitor='val_loss', patience=2)
hist = model.fit(input, output, \
batch_size=batch_size, nb_epoch=num_epoch, verbose=1, \
validation_split=0.1, shuffle=True)
# callbacks=[early_stop])
print(hist.history)
model.get_config()
# kplt(model, to_file='model.png', show_shapes=True)
# Visualization
I1 = input
print("I1 shape: ", I1.shape)
print('layer 0: ', model.layers[0].get_config())
print
l1f = T.function([model.layers[0].input], \
model.layers[1].output, allow_input_downcast=True)
l1o = np.array(l1f(I1))
print('layer 1: ', model.layers[1].get_config())
print("l1o shape: ", l1o.shape)
l1w = np.squeeze(model.layers[1].W.get_value(borrow=True))
# W1 = model.layers[1].get_weights()[0] # 0 is W, 1 is b
print("l1w shape: ", l1w.shape)
print
l2f = T.function([model.layers[1].input], \
act1.output, allow_input_downcast=True)
l2o = np.array(l2f(I1))
print('layer 2: ', model.layers[2].get_config())
print("l2o shape: ", l2o.shape)
print
l3f = T.function([model.layers[0].input], \
model.layers[3].output, allow_input_downcast=True)
l3o = np.array(l3f(I1))
print('layer 3: ', model.layers[3].get_config())
print("l3o shape: ", l3o.shape)
l4f = T.function([model.layers[0].input], \
model.layers[4].output, allow_input_downcast=True)
l4o = np.array(l4f(I1))
print('layer 4: ', model.layers[4].get_config())
print("l4o shape: ", l4o.shape)
l4w = np.squeeze(model.layers[4].W.get_value(borrow=True))
print("l4w shape: ", l4w.shape)
l5f = T.function([model.layers[1].input], \
act2.output, allow_input_downcast=True)
l5o = np.array(l5f(I1))
print('layer 5: ', model.layers[5].get_config())
print("l5o shape: ", l5o.shape)
l6f = T.function([model.layers[0].input], \
model.layers[6].output, allow_input_downcast=True)
l6o = np.array(l6f(I1))
print('layer 6: ', model.layers[6].get_config())
print("l6o shape: ", l6o.shape)
f = plt.figure()
plt.title('I1')
nice_show(f,I1[0])
f = plt.figure()
plt.title('l1w')
nice_show(f,l1w)
f = plt.figure()
plt.title('l2o')
nice_show(f,l2o[0])
f = plt.figure()
plt.title('l4w')
nice_show(f,l4w[0])
f = plt.figure()
plt.title('l5o')
nice_show(f,l5o[0])
plt.show()
# TODO: move Prediction to a seperated func
# Prediction
predict = model.predict(input, batch_size=batch_size)
# rmse = np.sqrt(((predict-output)**2).mean(axis=0))
# print("rmse = ")
# print(rmse)
# model.train_on_batch(self.in_real, out_data_r)
# model.train_on_batch(self.in_imag, out_data_i)
# TODO: save model
#model_to_save_json = model.to_json()
#open('model_architecture.json', 'w').write(model_to_save_json)
#model_to_save_yaml = model.to_yaml()
#open('model_architecture.yaml', 'w').write(model_to_save_yaml)
#model.save_weights('weights.h5')
return predict
def predict(self, X_test, Y_test):
model = model_from_json(open('model_architecture.json').read())
model = model_from_yaml(open('model_architecture.yaml').read())
model.load_weights('weights.h5')
loss_and_metrics = model.evaluate(X_test, Y_test, batch_size=32)
classes = model.predict_classes(X_test, batch_size=32)
proba = model.predict_proba(X_test, batch_size=32)
def get_interlayer_output(self, num_layer):
"""TODO: Docstring for get_interlayer_output.
:returns: TODO
"""
pass
def get_2D_dct(img):
return dct(dct(img.T, norm='ortho').T, norm='ortho')
def get_2D_idct(coeff):
return idct(idct(coeff.T, norm='ortho').T, norm='ortho')
def test_net():
num_samples = 86
num_channels = 1
num_rows = 66
num_cols = 66
label_size = 32
dct_size = 25
input_data = np.zeros((num_samples, num_channels, num_rows, num_cols))
with h5.File('input_h5', 'r') as hf:
input_data = np.array(hf['input_data'])
with h5.File('output_h5', 'r') as hf:
output_data = np.array(hf['output_data'])
# Train
ann = ANN()
pred = ann.train_cnn(input_data, output_data)
pred = pred.reshape(num_samples, dct_size, dct_size)
images_pred = []
for i in range(num_samples):
dct_pr = pred[i,:]
dct_pr_cp = np.zeros((label_size,label_size))
dct_pr_cp[:dct_size,:dct_size] = dct_pr.copy()
img_pr = get_2D_idct(dct_pr_cp)
images_pred.append(img_pr)
with h5.File('images_pred.h5', 'w') as hf:
hf['images_pred'] = images_pred
# print('amp_pr is ')
# print(amp_pr)
# plt.figure()
# plt.imshow(amp_pr[0,:,:], extent=(0,0.1,0,0.1))
# plt.show()
def test_import():
num_samples = 86
num_channels = 66
num_rows = 64
num_cols = 64
dataset_name = 'animals_n500_s32_30-Jul-2016'
# Read Inputs
prefix = ''.join(['./sim_data_', dataset_name])
input_data = np.zeros((num_samples, num_channels, num_rows, num_cols))
sam_id = 43
for sid in range(1):
ln_id = 100
for cid in range(1):
inname = ''.join(['phant_',str(sam_id),'_rf_ln',str(ln_id),'.mat'])
print('inname', inname)
input_data[sid,cid,:,:] = sio.loadmat(path.join(prefix,inname))['csm']
ln_id += 1
sam_id += 1
# with h5.File('input_h5', 'w') as hf:
# hf['input'] = input_data
f = plt.figure()
img = input_data[0,0,:,:].reshape(1,num_rows,num_cols)
nice_show(f,img)
plt.show()
# Read Outputs
label_data_path = './'
label_data_name = ''.join([dataset_name, '.mat'])
label_size = 32
dct_size = 25
label_data = sio.loadmat(''.join([label_data_path,label_data_name]))['phantom_c']
output_data = np.zeros((num_samples, label_size**2))
for i in range(num_samples):
output_data[i,:] = label_data[i,0][:,:,0].ravel()
img = label_data[i,0][:,:,0]
# Get DCT Coeff
dct_coeff = get_2D_dct(img)
# plt.matshow(np.abs(dct_coeff), cmap=plt.cm.Paired)
# Compress Coeff
dct_coeff_cp = dct_coeff.copy()
dct_coeff_cp[dct_size:,:] = 0.0
dct_coeff_cp[:,dct_size:] = 0.0
# Alternative
# v = np.mean(dct_coeff_cp) + 1.0*np.std(dct_coeff_cp)
# ind = np.nonzero(dct_coeff_cp<v)
# dct_coeff_cp[ind] = 0.0
# print("len ind")
# print(len(np.array(ind).ravel())
'''
# Reconstruction
img_re = get_2D_idct(dct_coeff_cp)
img_m = np.mean(img_re)
ind_1 = np.nonzero(img_re>img_m)
ind_0 = np.nonzero(img_re<img_m)
img_re[ind_1] = 1
img_re[ind_0] = 0
print("img_re")
print(img_re)
plt.figure()
plt.imshow(img_re)#, cmap=plt.cm.gray)
plt.show()
'''
# dct_clip = np.array(filter(lambda x:x>0.0, dct_coeff_cp.reshape(-1,1)))
dct_clip = dct_coeff_cp[:dct_size,:dct_size].ravel()
# with h5.File('output_h5', 'w') as hf:
# hf['output'] = output_data
f = plt.figure()
nice_show(f, np.array(label_data[sam_id,0])[:,:,0].reshape(1,32,32))
plt.show()
def test_results():
label_data_path = './'
label_data_name = 'animals_n100_26-Jul-2016.mat'
label_data = sio.loadmat(''.join([label_data_path,label_data_name]))['phantom_c']
print(label_data.shape)
plt.figure()
for i in range(10):
img = label_data[i,0][:,:,0]
plt.subplot(4,3,i+1)
plt.imshow(img)
with h5.File('images_pred_h5', 'r') as hf:
images = np.array(hf['images_pred'])
plt.figure()
for i in range(10):
img_t = images[i,:,:]
vm = np.mean(img_t)
img_t[np.nonzero(img_t<vm)] = 0
img_t[np.nonzero(img_t>vm)] = 1
plt.subplot(4,3,i+1)
plt.imshow(img_t)
plt.show()
'''
l = 0
m = 0
n = 0
for i in range(1,87):
m = 0
for j in range(63,129):
tmp = sio.loadmat(path.join(prefix, \
''.join(['phant_',str(i),'_rf_ln',str(j),'.mat'])))
tstart = tmp['tstart']
rf1 = np.array(tmp['rf_data'][int(tstart*fs):])[0:lenRF,0]
n = m+1
for k in range(j+1,129):
tmp = sio.loadmat(path.join(prefix, \
''.join(['phant_',str(i),'_rf_ln',str(k),'.mat'])))
tstart = tmp['tstart']
rf2 = np.array(tmp['rf_data'][int(tstart*fs):])[0:lenRF,0]
# Cross Spectrum
_, csd = signal.csd(rf1, rf2, fs=fs, nperseg=nFFT, \
nfft=nFFT, scaling='density')
input_data[l,0,m,n] = np.abs(np.sum(csd) / nFFT) # sum,average,abs
n += 1
input_data[l,0,(m+1):,m] = input_data[l,0,m,(m+1):]
m += 1
l += 1
var_min = np.amin(input_data)
var_max = np.amax(input_data)
input_data = 10*((input_data-var_min) / (var_max-var_min))
print(input_data)
with h5.File('csm_large_h5', 'w') as hf:
hf['csm'] = input_data
'''
if __name__ == '__main__':
test_import();
# test_net()
# test_results()
|
waynezv/ANN
|
ANN_large_v2.py
|
Python
|
mit
| 20,943
|
[
"VisIt"
] |
c3d7ebd5cdea116d2c4a825bce4a9a1a23bd54240e43d67ce69f1d951b83df19
|
"""TransformationInfo class to be used by ILCTransformation System"""
from collections import OrderedDict, defaultdict
from itertools import zip_longest
from DIRAC import gLogger, S_OK
from DIRAC.Core.Utilities.List import breakListIntoChunks
from DIRAC.Core.Utilities.Proxy import UserProxy
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC.TransformationSystem.Utilities.JobInfo import JobInfo
from DIRAC.WorkloadManagementSystem.Client import JobStatus
from DIRAC.WorkloadManagementSystem.Client.JobStateUpdateClient import JobStateUpdateClient
class TransformationInfo(object):
"""Hold information about a transformation."""
def __init__(self, transformationID, transInfoDict, enabled, tClient, fcClient, jobMon):
"""Store clients etc."""
self.log = gLogger.getSubLogger(__name__ + "[%s]" % transformationID)
self.enabled = enabled
self.tID = transformationID
self.transName = transInfoDict["TransformationName"]
self.tClient = tClient
self.jobMon = jobMon
self.fcClient = fcClient
self.transType = transInfoDict["Type"]
self.authorDN = transInfoDict["AuthorDN"]
self.authorGroup = transInfoDict["AuthorGroup"]
self.jobStateClient = JobStateUpdateClient()
def checkTasksStatus(self):
"""Check the status for the task of given transformation and taskID"""
res = self.tClient.getTransformationFiles(condDict={"TransformationID": self.tID})
if not res["OK"]:
raise RuntimeError("Failed to get transformation tasks: %s" % res["Message"])
tasksDict = defaultdict(list)
for task in res["Value"]:
taskID = task["TaskID"]
lfn = task["LFN"]
status = task["Status"]
fileID = task["FileID"]
errorCount = task["ErrorCount"]
tasksDict[taskID].append(dict(FileID=fileID, LFN=lfn, Status=status, ErrorCount=errorCount))
return tasksDict
def setJobDone(self, job):
"""set the taskID to Done"""
if not self.enabled:
return
self.__setTaskStatus(job, "Done")
if job.status != JobStatus.DONE:
self.__updateJobStatus(job.jobID, JobStatus.DONE, "Job forced to Done")
def setJobFailed(self, job):
"""set the taskID to Done"""
if not self.enabled:
return
self.__setTaskStatus(job, "Failed")
if job.status != JobStatus.FAILED:
self.__updateJobStatus(job.jobID, JobStatus.FAILED, "Job forced to Failed")
def setInputUnused(self, job):
"""Set the inputfiles to unused"""
self.__setInputStatus(job, "Unused")
def setInputMaxReset(self, job):
"""set the inputfile to MaxReset"""
self.__setInputStatus(job, "MaxReset")
def setInputProcessed(self, job):
"""set the inputfile to processed"""
self.__setInputStatus(job, "Processed")
def setInputDeleted(self, job):
"""set the inputfile to processed"""
self.__setInputStatus(job, "Deleted")
def __setInputStatus(self, job, status):
"""set the input file to status"""
if self.enabled:
result = self.tClient.setFileStatusForTransformation(self.tID, status, job.inputFiles, force=True)
if not result["OK"]:
gLogger.error("Failed updating status", result["Message"])
raise RuntimeError("Failed updating file status")
def __setTaskStatus(self, job, status):
"""update the task in the TransformationDB"""
taskID = job.taskID
res = self.tClient.setTaskStatus(self.transName, taskID, status)
if not res["OK"]:
raise RuntimeError("Failed updating task status: %s" % res["Message"])
def __updateJobStatus(self, jobID, status, minorstatus=""):
"""Update the job status."""
if self.enabled:
source = "DataRecoveryAgent"
result = self.jobStateClient.setJobStatus(jobID, status, minorstatus, source, None, True)
else:
return S_OK("DisabledMode")
if not result["OK"]:
self.log.error("Failed to update job status", result["Message"])
raise RuntimeError("Failed to update job status")
return result
def __findAllDescendants(self, lfnList):
"""Find all descendants of a list of LFNs"""
allDescendants = []
result = self.fcClient.getFileDescendents(lfnList, list(range(1, 8)))
if not result["OK"]:
return allDescendants
for dummy_lfn, descendants in result["Value"]["Successful"].items():
allDescendants.extend(descendants)
return allDescendants
def cleanOutputs(self, jobInfo):
"""Remove all job outputs for job represented by jobInfo object.
Including removal of descendents, if defined.
"""
if len(jobInfo.outputFiles) == 0:
return
descendants = self.__findAllDescendants(jobInfo.outputFiles)
existingOutputFiles = [
lfn for lfn, status in zip_longest(jobInfo.outputFiles, jobInfo.outputFileStatus) if status == "Exists"
]
filesToDelete = existingOutputFiles + descendants
if not filesToDelete:
return
if not self.enabled:
self.log.notice("Would have removed these files: \n +++ %s " % "\n +++ ".join(filesToDelete))
return
self.log.notice("Remove these files: \n +++ %s " % "\n +++ ".join(filesToDelete))
errorReasons = defaultdict(list)
successfullyRemoved = 0
for lfnList in breakListIntoChunks(filesToDelete, 200):
with UserProxy(proxyUserDN=self.authorDN, proxyUserGroup=self.authorGroup) as proxyResult:
if not proxyResult["OK"]:
raise RuntimeError("Failed to get a proxy: %s" % proxyResult["Message"])
result = DataManager().removeFile(lfnList)
if not result["OK"]:
self.log.error("Failed to remove LFNs", result["Message"])
raise RuntimeError("Failed to remove LFNs: %s" % result["Message"])
for lfn, err in result["Value"]["Failed"].items():
reason = str(err)
errorReasons[reason].append(lfn)
successfullyRemoved += len(result["Value"]["Successful"])
for reason, lfns in errorReasons.items():
self.log.error("Failed to remove %d files with error: %s" % (len(lfns), reason))
self.log.notice("Successfully removed %d files" % successfullyRemoved)
def getJobs(self, statusList=None):
"""Get done and failed jobs.
:param list statusList: optional list of status to find jobs
:returns: 3-tuple of OrderedDict of JobInfo objects, keyed by jobID;
number of Done jobs; number of Failed jobs
"""
done = S_OK([])
failed = S_OK([])
if statusList is None:
statusList = [JobStatus.DONE, JobStatus.FAILED]
if "Done" in statusList:
self.log.notice("Getting 'Done' Jobs...")
done = self.__getJobs([JobStatus.DONE])
if "Failed" in statusList:
self.log.notice("Getting 'Failed' Jobs...")
failed = self.__getJobs([JobStatus.FAILED])
done = done["Value"]
failed = failed["Value"]
jobsUnsorted = {}
for job in done:
jobsUnsorted[int(job)] = JobInfo(job, JobStatus.DONE, self.tID, self.transType)
for job in failed:
jobsUnsorted[int(job)] = JobInfo(job, JobStatus.FAILED, self.tID, self.transType)
jobs = OrderedDict(sorted(jobsUnsorted.items(), key=lambda t: t[0]))
self.log.notice("Found %d Done Jobs " % len(done))
self.log.notice("Found %d Failed Jobs " % len(failed))
return jobs, len(done), len(failed)
def __getJobs(self, status):
"""Return list of jobs with given status.
:param list status: list of status to find
:returns: S_OK with result
:raises: RuntimeError when failing to find jobs
"""
attrDict = dict(Status=status, JobGroup="%08d" % int(self.tID))
res = self.jobMon.getJobs(attrDict)
if res["OK"]:
self.log.debug("Found Trans jobs: %s" % res["Value"])
return res
else:
self.log.error("Error finding jobs: ", res["Message"])
raise RuntimeError("Failed to get jobs")
|
DIRACGrid/DIRAC
|
src/DIRAC/TransformationSystem/Utilities/TransformationInfo.py
|
Python
|
gpl-3.0
| 8,549
|
[
"DIRAC"
] |
762d47be9eddd0d72c9ec6e1b2ad37b50c9ffebc4f02062b7f2542e1d91f75f6
|
#!/usr/bin/python
#----------------------------------------------------------------------------#
# #
# ozz-animation is hosted at http://github.com/guillaumeblanc/ozz-animation #
# and distributed under the MIT License (MIT). #
# #
# Copyright (c) 2015 Guillaume Blanc #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the "Software"), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL #
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
#----------------------------------------------------------------------------#
# CMake python helper script.
import subprocess
import multiprocessing
import shutil
import sys
import os
import re
from functools import partial
# Build global path variables.
root = os.path.abspath(os.path.join(os.getcwd(), '.'))
build_dir = os.path.join(root, 'build')
build_dir_cc = os.path.join(root, 'build-cc')
cmake_cache_file = os.path.join(build_dir, 'CMakeCache.txt')
config = 'Release'
generators = {0: 'default'}
generator = generators[0]
emscripten_path = os.environ.get('EMSCRIPTEN')
def ValidateCMake():
try:
# Test that cmake can be executed, silently...
pipe = subprocess.Popen(['cmake'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = pipe.communicate()
except OSError as e:
print("CMake is not installed or properly setup. Please visit www.cmake.org.")
return False
print("CMake is installed and setup properly.")
return True
def CheckEmscripten():
if(emscripten_path == None):
return False
try:
# Test that cmake can be executed, silently...
pipe = subprocess.Popen(['emcc'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = pipe.communicate()
except OSError as e:
print("Emscripten is not installed or properly setup.")
return False
print("Emscripten is installed and setup properly.")
return True
def MakeBuildDir(_build_dir = build_dir):
print("Creating out-of-source build directory: \"" + _build_dir + "\".")
if not os.path.exists(_build_dir):
os.makedirs(_build_dir)
return True
def CleanBuildDir():
print("Deleting out-of-source build directory: \"" + build_dir + "\".")
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
print("Deleting out-of-source cross compilation build directory: \"" + build_dir_cc + "\".")
if os.path.exists(build_dir_cc):
shutil.rmtree(build_dir_cc)
return True
def Configure():
# Configure build process.
print("Configuring build project.")
options = ['cmake']
options += ['-D', 'CMAKE_BUILD_TYPE=' + config]
global generator
if(generator != 'default'):
options += ['-G', generator]
options += [root]
config_process = subprocess.Popen(options, cwd=build_dir)
config_process.wait()
if(config_process.returncode != 0):
print("Configuration failed.")
return False
print("Configuration succeeded.")
# Updates generator once configuration is complete
generator = DetectGenerator()
return True
def ConfigureCC():
# Configure build process.
print("Configuring cross compilation build project.")
options = ['cmake']
options += ['-D', 'CMAKE_BUILD_TYPE=' + config]
options += ['-D', 'CMAKE_TOOLCHAIN_FILE=' + emscripten_path + '/cmake/Platform/Emscripten.cmake']
options += ['-D', 'dae2anim_DIR=' + build_dir]
options += ['-D', 'dae2skel_DIR=' + build_dir]
options += ['-G', 'MinGW Makefiles']
options += [root]
config_process = subprocess.Popen(options, cwd=build_dir_cc)
config_process.wait()
if(config_process.returncode != 0):
print("Configuration failed.")
return False
print("Configuration succeeded.")
# Updates generator once configuration is complete
generator = DetectGenerator()
return True
def Build(_build_dir = build_dir):
# Configure build process.
print("Building project.")
options = ['cmake', '--build', _build_dir, '--config', config, '--use-stderr'];
# Appends parallel build option if supported by the generator.
if "Unix Makefiles" in generator:
options += ['--', '-j' + str(multiprocessing.cpu_count())]
config_process = subprocess.Popen(options, cwd=_build_dir)
config_process.wait()
if(config_process.returncode != 0):
print("Build failed.")
return False
print("Build succeeded.")
return True
def Test():
# Configure Test process.
print("Running unit tests.")
options = ['ctest' ,'--output-on-failure', '-j' + str(multiprocessing.cpu_count()), '--build-config', config]
config_process = subprocess.Popen(options, cwd=build_dir)
config_process.wait()
if(config_process.returncode != 0):
print("Testing failed.")
return False
print("Testing succeeded.")
return True
def PackSources(_type):
print("Packing sources.")
options = ['cpack', '-G', _type, '--config', 'CPackSourceConfig.cmake']
config_process = subprocess.Popen(options, cwd=build_dir)
config_process.wait()
if(config_process.returncode != 0):
print("Packing sources of type " + _type + " failed.")
return False
print("Packing sources of type " + _type + " succeeded.")
return True
def PackBinaries(_type, _build_dir = build_dir):
print("Packing binaries.")
options = ['cpack', '-G', _type, '-C', config]
config_process = subprocess.Popen(options, cwd=_build_dir)
config_process.wait()
if(config_process.returncode != 0):
print("Packing binaries of type " + _type + " failed.")
return False
print("Packing binaries of type " + _type + " succeeded.")
return True
def SelecConfig():
configs = {
1: 'Debug',
2: 'Release',
3: 'RelWithDebInfo',
4: 'MinSizeRel'}
while True:
print("Select build configuration:")
for num, message in sorted(configs.iteritems()):
print("%d: %s") % (num, message)
# Get input and check validity
try:
answer = int(raw_input("Enter a value: "))
except:
continue
if not answer in configs:
continue
# Affect global configuration variable
global config
config = configs[answer]
return True
def FindGenerators():
# Finds all generators outputted from cmake usage
process = subprocess.Popen(['cmake'], stdout=subprocess.PIPE)
stdout = process.communicate()[0]
sub_stdout = stdout[stdout.rfind('Generators'):]
matches = re.findall(r"\s*(.+)\s*=.+", sub_stdout, re.MULTILINE)
# Fills generators list
global generators
for match in matches:
generator_name = match.strip()
generators[len(generators)] = generator_name
# Appends also Win64 option if generator is VS
if "Visual Studio" in generator_name:
generators[len(generators)] = generator_name + " Win64"
def FindInCache(_regex):
try:
cache_file = open(cmake_cache_file)
except:
return None
return re.search(_regex, cache_file.read())
def DetectGenerator():
match = FindInCache(r"CMAKE_GENERATOR:INTERNAL=(.*)")
if match:
global generators
global generator
for num, message in sorted(generators.iteritems()):
if match.group(1) == message:
return message
return 'default'
def SelecGenerator():
global generators
while True:
print("Select generator:")
for num, message in sorted(generators.iteritems()):
print("%d: %s") % (num, message)
# Get input and check validity
try:
answer = int(raw_input("Enter a value: "))
except:
continue
if not answer in generators:
continue
# Check if this is the current generator
current_generator = DetectGenerator()
if current_generator == 'default':
global generator
generator = generators[answer]
return True
if current_generator != generators[answer]:
print("Selected generator '%s' is different from the current one '%s'.") % (generators[answer], current_generator)
clean = raw_input("Do you want to clean build directory to apply the change? (y/n): ") == "y"
if clean:
generator = generators[answer]
return CleanBuildDir()
return True
def ClearScreen():
os.system('cls' if os.name=='nt' else 'clear')
def Exit():
sys.exit(0)
return True
def main():
# Checks CMake installation is correct.
if not ValidateCMake():
return
# Emscripten is optional
CheckEmscripten()
# Detects available generators
FindGenerators()
# Update current generator
print("DetectGenerator")
global generator
generator = DetectGenerator()
options = {
'1': ["Build", [MakeBuildDir, Configure, Build]],
'2': ["Run unit tests", [MakeBuildDir, Configure, Build, Test]],
'3': ["Execute CMake generation step (don't build)", [MakeBuildDir, Configure]],
'4': ["Clean out-of-source build directory\n ------------------", [CleanBuildDir]],
'5': ["Pack binaries", [MakeBuildDir, Configure, Build, partial(PackBinaries, "ZIP"), partial(PackBinaries, "TBZ2")]],
'6': ["Pack sources\n ------------------", [MakeBuildDir, Configure, partial(PackSources, "ZIP"), partial(PackSources, "TBZ2")]],
'7': ["Select build configuration", [SelecConfig]],
'8': ["Select cmake generator\n ------------------", [SelecGenerator]],
'9': ["Exit\n------------------", [Exit]]}
# Adds emscripten
global emscripten_path
if emscripten_path != None:
options['1a'] = ["Build emscripten", [MakeBuildDir, Configure, Build, partial(MakeBuildDir, build_dir_cc), ConfigureCC, partial(Build, build_dir_cc)]]
options['5a'] = ["Pack emscripten binaries", [MakeBuildDir, Configure, Build, partial(MakeBuildDir, build_dir_cc), ConfigureCC, partial(Build, build_dir_cc), partial(PackBinaries, "ZIP", build_dir_cc)]]
while True:
# Displays options
ClearScreen()
print("ozz CMake build helper tool")
print("")
print("Selected build configuration: %s") % config
print("Selected generator: %s") % generator
print("")
print("Choose an option:")
print("------------------")
for key, message in sorted(options.iteritems()):
print(" %s: %s") % (key, message[0])
# Get input and check validity
answer = raw_input("Enter a value: ")
if not answer in options:
continue
# Execute command in a try catch to avoid crashes and allow retries.
ClearScreen()
try:
for command in options[answer][1]:
if command():
print("\nExecution success.\n")
else:
print("\nExecution failed.\n")
break
except Exception, e:
print("\nAn error occured during script execution: %s\n") % e
raw_input("Press enter to continue...")
return 0
if __name__ == '__main__':
main()
|
dgu123/ozz-animation-1
|
build-helper.py
|
Python
|
mit
| 12,044
|
[
"VisIt"
] |
9aabbfa7170bce167cf7b7fced2d48a24bfd6d979390631c011101426b4b2071
|
import types
import terrain_modifiers
import numpy as np
import terrainblocks
from game import *
import random
def get_modifiers():
"""
Obtains a reference to all the modifier functions.
:return:
"""
return [terrain_modifiers.__dict__.get(a) for a in dir(terrain_modifiers) if isinstance(getattr(terrain_modifiers, a, None), types.FunctionType)]
def destroy_circle(terrain, radius, origin):
"""
Sets terrain within a certain radius of a point to 0.
:param terrain: 2D numpy array of the entire terrain.
:param radius: Int.
:param origin: Tuple of (x, y)
:return: None, as it *should* modify the terrain in place via side effects
"""
subset = terrain[(origin[0] - radius) : (origin[0] + radius), (origin[1] - radius) : (origin[1] + radius)]
# Create a distance array to every cell
distances = np.zeros(shape=subset.shape)
for i in range(distances.shape[0]):
for j in range(distances.shape[1]):
distances[i, j] = np.sqrt((i-radius)**2 + (j-radius)**2)
subset[distances <= radius] |= -128
def get_planet_params(archetype, planet_info):
"""
Retrieves a dictionary containing planetry information.
:param archetype: A string.
:param info: Various planetary factors.
:return:
"""
# Load defaults and gradually overwrite
params = default_values
tparams = terrain_params[archetype]
# DEBUGGING ONLY
# TODO Refactor and remove the stuff used for debugging.
if planet_info is not None:
params['gravity_mean'] = planet_info['size']
params['modifier_params']['crater']['frequency'] = 0.01 + min(0.1, 0.5 / (0.1 + planet_info['dist_to_asteroid_belt'] ))
planet_seed = planet_info['seed']
# I.e. don't need to create this intermediatary values in params dictionary
params['temp_mean'] = tparams['mean_temp'] - 0.2 * planet_info['orbit_radius_x']
else:
planet_seed = 17
r = random.Random(planet_seed)
seed = r.getrandbits(32)
r_params = random.Random(seed)
gravity = max(0.1, r_params.gauss(params['gravity_mean'], params['gravity_sd']))
atmosphere = r_params.uniform( *tparams['atmos'] )
water_prob = max(0, r_params.gauss(tparams['mean_water'], tparams['sd_water']))
params['modifier_params']['vegetation']['seed_mod'] = 1.0 - abs(atmosphere - 0.5)
params['modifier_params']['crater']['radius_mean'] = max(6.0, 2.0 / max(0.2, atmosphere))
params['modifier_params']['tunnel']['width_mean'] = 2.0 * tparams['softness']
params['modifier_params']['tunnel']['width_sd'] = 0.1 * tparams['softness']
params['gravity'] = gravity
params['atmosphere'] = atmosphere
params['temp'] = max(100, r_params.gauss(params['temp_mean'], params['temp_sd']))
params['oxygen'] = max(0.01, r_params.gauss(tparams['mean_oxygen'], tparams['sd_oxygen']))
params['water_prob'] = min(0, max(1, r_params.gauss(tparams['mean_water'], tparams['sd_water'])))
params['water'] = r_params.uniform(0, 1) < water_prob
return params
default_values = {
'gravity_mean': 10,
'gravity_sd': 0.3,
'water_prob': 0.15,
'oxygen_mean': 0.20,
'oxygen_sd': 0.02,
'temp_mean': 300,
'temp_sd': 4,
'modifier_params':
{'tunnel': {
'frequency': 0.05,
'depth_mean': 0.3,
'depth_sd': 0.05,
'width_mean': 2,
'width_sd': 0.1
},
'crater': {
'frequency': 0.02,
'radius_mean': 10,
'radius_sd': 2,
},
'vegetation': {
'seed_mod': 1.0,
'types': [
{ # Blue fungus
'grow_block':4,
'seedrate':0.8, #TODO: Scale with environment
'root_block':14,
'root_depth':1,
'grow_height':1,
},
{ # Purple leaves
'grow_block':3,
'seedrate':0.1, #TODO: Scale with environment
'root_block':4,
'root_depth':2,
'grow_height':5,
},
{ # Grass
'grow_block':9,
'seedrate':0.9, #TODO: Scale with environment
'root_block':2,
'root_depth':1,
'grow_height':1,
},
]
},
'water': {
# ...
}
}
}
# Ground "ground level"
# Layers are "dig depth"
terrain_params = {
# Type: [ [ depth, ratio, blocktype ] ... ]
'earth': {
'atmos': (0.3, 0.5),
'softness': 1.5,
'depth': 80,
'ratio': 0.5,
'base': 1, # Rock
'mean_temp': 300,
'mean_oxygen': 0.21,
'sd_oxygen': 0.04,
'mean_water': 0.8,
'sd_water': 0.07,
'layers': [
[ 10, 0.6, 2 ], # Dirt
]
},
'rock': {
'atmos': (0.1, 0.5),
'softness': 0.7,
'depth': 80,
'ratio': 0.7,
'mean_temp': 280,
'mean_oxygen': 0.23,
'sd_oxygen': 0.04,
'mean_water': 0.2,
'sd_water': 0.05,
'base': 1, # Rock
'layers': [
[ 5, 0.3, 8 ], # Dust
[ 16, 0.6, 7 ], # Cobble
]
},
'desert': {
'atmos': (0.3, 0.9),
'softness': 2.5,
'depth': 80,
'ratio': 0.2,
'mean_temp': 320,
'mean_oxygen': 0.15,
'sd_oxygen': 0.035,
'mean_water': 0.03,
'sd_water': 0.005,
'base': 10, # Mars
'layers': [
[ 10, 0.2, 12 ], # Sand
[ 16, 0.4, 11 ], # Red Sand
]
},
'other': {
'atmos': (0.5, 0.9),
'softness': 1.8,
'depth': 80,
'mean_temp': 400,
'mean_oxygen': 0.10,
'sd_oxygen': 0.015,
'mean_water': 0.5,
'sd_water': 0.15,
'ratio': 0.7,
'base': 13, # Orange Rock
'layers': [
[ 10, 0.9, 14 ], # Pink Sponge
]
},
'ice': {
'atmos': (0.1, 0.5),
'softness': 1.2,
'depth': 80,
'ratio': 0.5,
'mean_oxygen': 0.30,
'sd_oxygen': 0.005,
'mean_temp': 260,
'mean_water': 0.9,
'sd_water': 0.05,
'base': 1, # Rock
'layers': [
[ 15, 0.3, 16 ], # Snow
[ 10, 0.2, 5 ], # Ice
[ 16, 0.8, 15 ], # Permafrost
]
},
'gas': {
'atmos': (0.8, 1.0),
'softness': 0.6,
'depth': 500,
'ratio': 0.3,
'mean_oxygen': 0.20,
'sd_oxygen': 0.005,
'mean_temp': 280,
'mean_water': 0.5,
'sd_water': 0.005,
'base': 17, # Crystal
'layers': [
[ 10, 0.6, -128|20 ], # Cloud (WALL)
[ 80, 0.5, -128|19 ], # Fog (WALL)
[ 10, 0.2, 18 ], # Crystal
]
},
}
|
AndrewJamesTurner/Every-Womans-Ground
|
terrain_utils.py
|
Python
|
gpl-3.0
| 7,057
|
[
"CRYSTAL"
] |
a4ae09b89ab1eaac32b337b3da57e6ee1b058fcb7af92af4bd70ea93c09a8ccf
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.