repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
mdickinson/pcgrandom | pcgrandom/test/test_pcg_xsh_rs_v0.py | 1 | 1961 | # Copyright 2017 Mark Dickinson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for the PCG_XSH_RS_V0 generator.
"""
import pkgutil
import unittest
from pcgrandom.pcg_xsh_rs_v0 import PCG_XSH_RS_V0
from pcgrandom.test.test_pcg_common import TestPCGCommon
class Test_PCG_XSH_RS_V0(TestPCGCommon, unittest.TestCase):
gen_class = PCG_XSH_RS_V0
def setUp(self):
self.gen = self.gen_class(seed=15206, sequence=1729)
def test_agrees_with_reference_implementation_explicit_sequence(self):
# Comparison with the C++ PCG reference implementation, version 0.98.
gen = self.gen_class(seed=42, sequence=54)
expected_raw = pkgutil.get_data(
'pcgrandom.test', 'data/setseq_xsh_rs_64_32.txt')
expected_words = expected_raw.decode('utf-8').splitlines(False)
actual_words = [format(gen._next_output(), '#010x') for _ in range(32)]
self.assertEqual(actual_words, expected_words)
def test_agrees_with_reference_implementation_unspecified_sequence(self):
# Comparison with the C++ PCG reference implementation, version 0.98.
gen = self.gen_class(seed=123)
expected_raw = pkgutil.get_data(
'pcgrandom.test', 'data/oneseq_xsh_rs_64_32.txt')
expected_words = expected_raw.decode('utf-8').splitlines(False)
actual_words = [format(gen._next_output(), '#010x') for _ in range(32)]
self.assertEqual(actual_words, expected_words)
| apache-2.0 |
simonwydooghe/ansible | lib/ansible/modules/network/netvisor/pn_igmp_snooping.py | 52 | 6474 | #!/usr/bin/python
# Copyright: (c) 2018, Pluribus Networks
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: pn_igmp_snooping
author: "Pluribus Networks (@rajaspachipulusu17)"
version_added: "2.8"
short_description: CLI command to modify igmp-snooping
description:
- This module can be used to modify Internet Group Management Protocol (IGMP) snooping.
options:
pn_cliswitch:
description:
- Target switch to run the CLI on.
required: False
type: str
state:
description:
- State the action to perform. Use C(update) to modify the igmp-snooping.
required: True
type: str
choices: ['update']
pn_enable:
description:
- enable or disable IGMP snooping.
required: False
type: bool
pn_query_interval:
description:
- IGMP query interval in seconds.
required: False
type: str
pn_igmpv2_vlans:
description:
- VLANs on which to use IGMPv2 protocol.
required: False
type: str
pn_igmpv3_vlans:
description:
- VLANs on which to use IGMPv3 protocol.
required: False
type: str
pn_enable_vlans:
description:
- enable per VLAN IGMP snooping.
required: False
type: str
pn_vxlan:
description:
- enable or disable IGMP snooping on vxlans.
required: False
type: bool
pn_query_max_response_time:
description:
- maximum response time, in seconds, advertised in IGMP queries.
required: False
type: str
pn_scope:
description:
- IGMP snooping scope - fabric or local.
required: False
choices: ['local', 'fabric']
pn_no_snoop_linklocal_vlans:
description:
- Remove snooping of link-local groups(224.0.0.0/24) on these vlans.
required: False
type: str
pn_snoop_linklocal_vlans:
description:
- Allow snooping of link-local groups(224.0.0.0/24) on these vlans.
required: False
type: str
"""
EXAMPLES = """
- name: 'Modify IGMP Snooping'
pn_igmp_snooping:
pn_cliswitch: 'sw01'
state: 'update'
pn_vxlan: True
pn_enable_vlans: '1-399,401-4092'
pn_no_snoop_linklocal_vlans: 'none'
pn_igmpv3_vlans: '1-399,401-4092'
- name: 'Modify IGMP Snooping'
pn_igmp_snooping:
pn_cliswitch: 'sw01'
state: 'update'
pn_vxlan: False
pn_enable_vlans: '1-399'
pn_no_snoop_linklocal_vlans: 'none'
pn_igmpv3_vlans: '1-399'
"""
RETURN = """
command:
description: the CLI command run on the target node.
returned: always
type: str
stdout:
description: set of responses from the igmp-snooping command.
returned: always
type: list
stderr:
description: set of error responses from the igmp-snooping command.
returned: on error
type: list
changed:
description: indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli, booleanArgs
def main():
""" This section is for arguments parsing """
state_map = dict(
update='igmp-snooping-modify'
)
module = AnsibleModule(
argument_spec=dict(
pn_cliswitch=dict(required=False, type='str'),
state=dict(required=True, type='str',
choices=state_map.keys()),
pn_enable=dict(required=False, type='bool'),
pn_query_interval=dict(required=False, type='str'),
pn_igmpv2_vlans=dict(required=False, type='str'),
pn_igmpv3_vlans=dict(required=False, type='str'),
pn_enable_vlans=dict(required=False, type='str'),
pn_vxlan=dict(required=False, type='bool'),
pn_query_max_response_time=dict(required=False, type='str'),
pn_scope=dict(required=False, type='str',
choices=['local', 'fabric']),
pn_no_snoop_linklocal_vlans=dict(required=False, type='str'),
pn_snoop_linklocal_vlans=dict(required=False, type='str'),
),
required_one_of=[['pn_enable', 'pn_query_interval',
'pn_igmpv2_vlans',
'pn_igmpv3_vlans',
'pn_enable_vlans',
'pn_vxlan',
'pn_query_max_response_time',
'pn_scope',
'pn_no_snoop_linklocal_vlans',
'pn_snoop_linklocal_vlans']]
)
# Accessing the arguments
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
enable = module.params['pn_enable']
query_interval = module.params['pn_query_interval']
igmpv2_vlans = module.params['pn_igmpv2_vlans']
igmpv3_vlans = module.params['pn_igmpv3_vlans']
enable_vlans = module.params['pn_enable_vlans']
vxlan = module.params['pn_vxlan']
query_max_response_time = module.params['pn_query_max_response_time']
scope = module.params['pn_scope']
no_snoop_linklocal_vlans = module.params['pn_no_snoop_linklocal_vlans']
snoop_linklocal_vlans = module.params['pn_snoop_linklocal_vlans']
command = state_map[state]
# Building the CLI command string
cli = pn_cli(module, cliswitch)
if command == 'igmp-snooping-modify':
cli += ' %s ' % command
cli += booleanArgs(enable, 'enable', 'disable')
cli += booleanArgs(vxlan, 'vxlan', 'no-vxlan')
if query_interval:
cli += ' query-interval ' + query_interval
if igmpv2_vlans:
cli += ' igmpv2-vlans ' + igmpv2_vlans
if igmpv3_vlans:
cli += ' igmpv3-vlans ' + igmpv3_vlans
if enable_vlans:
cli += ' enable-vlans ' + enable_vlans
if query_max_response_time:
cli += ' query-max-response-time ' + query_max_response_time
if scope:
cli += ' scope ' + scope
if no_snoop_linklocal_vlans:
cli += ' no-snoop-linklocal-vlans ' + no_snoop_linklocal_vlans
if snoop_linklocal_vlans:
cli += ' snoop-linklocal-vlans ' + snoop_linklocal_vlans
run_cli(module, cli, state_map)
if __name__ == '__main__':
main()
| gpl-3.0 |
qifeigit/scikit-learn | sklearn/tests/test_isotonic.py | 230 | 11087 | import numpy as np
import pickle
from sklearn.isotonic import (check_increasing, isotonic_regression,
IsotonicRegression)
from sklearn.utils.testing import (assert_raises, assert_array_equal,
assert_true, assert_false, assert_equal,
assert_array_almost_equal,
assert_warns_message, assert_no_warnings)
from sklearn.utils import shuffle
def test_permutation_invariance():
# check that fit is permuation invariant.
# regression test of missing sorting of sample-weights
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0)
y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight)
y_transformed_s = ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x)
assert_array_equal(y_transformed, y_transformed_s)
def test_check_increasing_up():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1.5, 2.77, 8.99, 8.99, 50]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_down():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1.5, -2.77, -8.99, -8.99, -50]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_increasing_down_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, -2, -3, -4, -5]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, 2, -3, 4, -5]
# Check that we got increasing=False and CI interval warning
is_increasing = assert_warns_message(UserWarning, "interval",
check_increasing,
x, y)
assert_false(is_increasing)
def test_isotonic_regression():
y = np.array([3, 7, 5, 9, 8, 7, 10])
y_ = np.array([3, 6, 6, 8, 8, 8, 10])
assert_array_equal(y_, isotonic_regression(y))
x = np.arange(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(ir.transform(x), ir.predict(x))
# check that it is immune to permutation
perm = np.random.permutation(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
assert_array_equal(ir.fit_transform(x[perm], y[perm]),
ir.fit_transform(x, y)[perm])
assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm])
# check we don't crash when all x are equal:
ir = IsotonicRegression()
assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y))
def test_isotonic_regression_ties_min():
# Setup examples with ties on minimum
x = [0, 1, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5, 6]
y_true = [0, 1.5, 1.5, 3, 4, 5, 6]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_max():
# Setup examples with ties on maximum
x = [1, 2, 3, 4, 5, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1, 2, 3, 4, 5.5, 5.5]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_secondary_():
"""
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
"""
x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14]
y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25]
y_true = [22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222,
22.22222, 22.22222, 22.22222, 24.25, 24.25]
# Check fit, transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true, 4)
assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4)
def test_isotonic_regression_reversed():
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
y_ = IsotonicRegression(increasing=False).fit_transform(
np.arange(len(y)), y)
assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0))
def test_isotonic_regression_auto_decreasing():
# Set y and x for decreasing
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship decreases
is_increasing = y_[0] < y_[-1]
assert_false(is_increasing)
def test_isotonic_regression_auto_increasing():
# Set y and x for decreasing
y = np.array([5, 6.1, 6, 7, 10, 9, 10])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship increases
is_increasing = y_[0] < y_[-1]
assert_true(is_increasing)
def test_assert_raises_exceptions():
ir = IsotonicRegression()
rng = np.random.RandomState(42)
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6])
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7])
assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2])
assert_raises(ValueError, ir.transform, rng.randn(3, 10))
def test_isotonic_sample_weight_parameter_default_value():
# check if default value of sample_weight parameter is one
ir = IsotonicRegression()
# random test data
rng = np.random.RandomState(42)
n = 100
x = np.arange(n)
y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# check if value is correctly used
weights = np.ones(n)
y_set_value = ir.fit_transform(x, y, sample_weight=weights)
y_default_value = ir.fit_transform(x, y)
assert_array_equal(y_set_value, y_default_value)
def test_isotonic_min_max_boundaries():
# check if min value is used correctly
ir = IsotonicRegression(y_min=2, y_max=4)
n = 6
x = np.arange(n)
y = np.arange(n)
y_test = [2, 2, 2, 3, 4, 4]
y_result = np.round(ir.fit_transform(x, y))
assert_array_equal(y_result, y_test)
def test_isotonic_sample_weight():
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24]
received_y = ir.fit_transform(x, y, sample_weight=sample_weight)
assert_array_equal(expected_y, received_y)
def test_isotonic_regression_oob_raise():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
ir.fit(x, y)
# Check that an exception is thrown
assert_raises(ValueError, ir.predict, [min(x) - 10, max(x) + 10])
def test_isotonic_regression_oob_clip():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
# Predict from training and test x and check that min/max match.
y1 = ir.predict([min(x) - 10, max(x) + 10])
y2 = ir.predict(x)
assert_equal(max(y1), max(y2))
assert_equal(min(y1), min(y2))
def test_isotonic_regression_oob_nan():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="nan")
ir.fit(x, y)
# Predict from training and test x and check that we have two NaNs.
y1 = ir.predict([min(x) - 10, max(x) + 10])
assert_equal(sum(np.isnan(y1)), 2)
def test_isotonic_regression_oob_bad():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz")
# Make sure that we throw an error for bad out_of_bounds value
assert_raises(ValueError, ir.fit, x, y)
def test_isotonic_regression_oob_bad_after():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
# Make sure that we throw an error for bad out_of_bounds value in transform
ir.fit(x, y)
ir.out_of_bounds = "xyz"
assert_raises(ValueError, ir.transform, x)
def test_isotonic_regression_pickle():
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL)
ir2 = pickle.loads(ir_ser)
np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
def test_isotonic_duplicate_min_entry():
x = [0, 0, 1]
y = [0, 0, 1]
ir = IsotonicRegression(increasing=True, out_of_bounds="clip")
ir.fit(x, y)
all_predictions_finite = np.all(np.isfinite(ir.predict(x)))
assert_true(all_predictions_finite)
def test_isotonic_zero_weight_loop():
# Test from @ogrisel's issue:
# https://github.com/scikit-learn/scikit-learn/issues/4297
# Get deterministic RNG with seed
rng = np.random.RandomState(42)
# Create regression and samples
regression = IsotonicRegression()
n_samples = 50
x = np.linspace(-3, 3, n_samples)
y = x + rng.uniform(size=n_samples)
# Get some random weights and zero out
w = rng.uniform(size=n_samples)
w[5:8] = 0
regression.fit(x, y, sample_weight=w)
# This will hang in failure case.
regression.fit(x, y, sample_weight=w)
| bsd-3-clause |
davidzchen/tensorflow | tensorflow/python/ops/numpy_ops/np_array_ops_test.py | 5 | 41372 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf numpy array methods."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import sys
import numpy as np
from six.moves import range
from six.moves import zip
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.numpy_ops import np_array_ops
from tensorflow.python.ops.numpy_ops import np_arrays
from tensorflow.python.platform import test
_virtual_devices_ready = False
def set_up_virtual_devices():
global _virtual_devices_ready
if _virtual_devices_ready:
return
physical_devices = config.list_physical_devices('CPU')
config.set_logical_device_configuration(
physical_devices[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
_virtual_devices_ready = True
class ArrayCreationTest(test.TestCase):
def setUp(self):
super(ArrayCreationTest, self).setUp()
set_up_virtual_devices()
python_shapes = [
0, 1, 2, (), (1,), (2,), (1, 2, 3), [], [1], [2], [1, 2, 3]
]
self.shape_transforms = [
lambda x: x, lambda x: np.array(x, dtype=int),
lambda x: np_array_ops.array(x, dtype=int), tensor_shape.TensorShape
]
self.all_shapes = []
for fn in self.shape_transforms:
self.all_shapes.extend([fn(s) for s in python_shapes])
if sys.version_info.major == 3:
# There is a bug of np.empty (and alike) in Python 3 causing a crash when
# the `shape` argument is an np_arrays.ndarray scalar (or tf.Tensor
# scalar).
def not_ndarray_scalar(s):
return not (isinstance(s, np_arrays.ndarray) and s.ndim == 0)
self.all_shapes = list(filter(not_ndarray_scalar, self.all_shapes))
self.all_types = [
int, float, np.int16, np.int32, np.int64, np.float16, np.float32,
np.float64
]
source_array_data = [
1,
5.5,
7,
(),
(8, 10.),
((), ()),
((1, 4), (2, 8)),
[],
[7],
[8, 10.],
[[], []],
[[1, 4], [2, 8]],
([], []),
([1, 4], [2, 8]),
[(), ()],
[(1, 4), (2, 8)],
]
self.array_transforms = [
lambda x: x,
ops.convert_to_tensor,
np.array,
np_array_ops.array,
]
self.all_arrays = []
for fn in self.array_transforms:
self.all_arrays.extend([fn(s) for s in source_array_data])
def testEmpty(self):
for s in self.all_shapes:
actual = np_array_ops.empty(s)
expected = np.empty(s)
msg = 'shape: {}'.format(s)
self.match_shape(actual, expected, msg)
self.match_dtype(actual, expected, msg)
for s, t in itertools.product(self.all_shapes, self.all_types):
actual = np_array_ops.empty(s, t)
expected = np.empty(s, t)
msg = 'shape: {}, dtype: {}'.format(s, t)
self.match_shape(actual, expected, msg)
self.match_dtype(actual, expected, msg)
def testEmptyLike(self):
for a in self.all_arrays:
actual = np_array_ops.empty_like(a)
expected = np.empty_like(a)
msg = 'array: {}'.format(a)
self.match_shape(actual, expected, msg)
self.match_dtype(actual, expected, msg)
for a, t in itertools.product(self.all_arrays, self.all_types):
actual = np_array_ops.empty_like(a, t)
expected = np.empty_like(a, t)
msg = 'array: {} type: {}'.format(a, t)
self.match_shape(actual, expected, msg)
self.match_dtype(actual, expected, msg)
def testZeros(self):
for s in self.all_shapes:
actual = np_array_ops.zeros(s)
expected = np.zeros(s)
msg = 'shape: {}'.format(s)
self.match(actual, expected, msg)
for s, t in itertools.product(self.all_shapes, self.all_types):
actual = np_array_ops.zeros(s, t)
expected = np.zeros(s, t)
msg = 'shape: {}, dtype: {}'.format(s, t)
self.match(actual, expected, msg)
def testZerosLike(self):
for a in self.all_arrays:
actual = np_array_ops.zeros_like(a)
expected = np.zeros_like(a)
msg = 'array: {}'.format(a)
self.match(actual, expected, msg)
for a, t in itertools.product(self.all_arrays, self.all_types):
actual = np_array_ops.zeros_like(a, t)
expected = np.zeros_like(a, t)
msg = 'array: {} type: {}'.format(a, t)
self.match(actual, expected, msg)
def testOnes(self):
for s in self.all_shapes:
actual = np_array_ops.ones(s)
expected = np.ones(s)
msg = 'shape: {}'.format(s)
self.match(actual, expected, msg)
for s, t in itertools.product(self.all_shapes, self.all_types):
actual = np_array_ops.ones(s, t)
expected = np.ones(s, t)
msg = 'shape: {}, dtype: {}'.format(s, t)
self.match(actual, expected, msg)
def testOnesLike(self):
for a in self.all_arrays:
actual = np_array_ops.ones_like(a)
expected = np.ones_like(a)
msg = 'array: {}'.format(a)
self.match(actual, expected, msg)
for a, t in itertools.product(self.all_arrays, self.all_types):
actual = np_array_ops.ones_like(a, t)
expected = np.ones_like(a, t)
msg = 'array: {} type: {}'.format(a, t)
self.match(actual, expected, msg)
def testEye(self):
n_max = 3
m_max = 3
for n in range(1, n_max + 1):
self.match(np_array_ops.eye(n), np.eye(n))
for k in range(-n, n + 1):
self.match(np_array_ops.eye(n, k=k), np.eye(n, k=k))
for m in range(1, m_max + 1):
self.match(np_array_ops.eye(n, m), np.eye(n, m))
for k in range(-n, m):
self.match(np_array_ops.eye(n, k=k), np.eye(n, k=k))
self.match(np_array_ops.eye(n, m, k), np.eye(n, m, k))
for dtype in self.all_types:
for n in range(1, n_max + 1):
self.match(np_array_ops.eye(n, dtype=dtype), np.eye(n, dtype=dtype))
for k in range(-n, n + 1):
self.match(
np_array_ops.eye(n, k=k, dtype=dtype),
np.eye(n, k=k, dtype=dtype))
for m in range(1, m_max + 1):
self.match(
np_array_ops.eye(n, m, dtype=dtype), np.eye(n, m, dtype=dtype))
for k in range(-n, m):
self.match(
np_array_ops.eye(n, k=k, dtype=dtype),
np.eye(n, k=k, dtype=dtype))
self.match(
np_array_ops.eye(n, m, k, dtype=dtype),
np.eye(n, m, k, dtype=dtype))
def testIdentity(self):
n_max = 3
for n in range(1, n_max + 1):
self.match(np_array_ops.identity(n), np.identity(n))
for dtype in self.all_types:
for n in range(1, n_max + 1):
self.match(
np_array_ops.identity(n, dtype=dtype), np.identity(n, dtype=dtype))
def testFull(self):
# List of 2-tuples of fill value and shape.
data = [
(5, ()),
(5, (7,)),
(5., (7,)),
([5, 8], (2,)),
([5, 8], (3, 2)),
([[5], [8]], (2, 3)),
([[5], [8]], (3, 2, 5)),
([[5.], [8.]], (3, 2, 5)),
([[3, 4], [5, 6], [7, 8]], (3, 3, 2)),
]
for f, s in data:
for fn1, fn2 in itertools.product(self.array_transforms,
self.shape_transforms):
fill_value = fn1(f)
shape = fn2(s)
self.match(
np_array_ops.full(shape, fill_value), np.full(shape, fill_value))
for dtype in self.all_types:
self.match(
np_array_ops.full(shape, fill_value, dtype=dtype),
np.full(shape, fill_value, dtype=dtype))
def testFullLike(self):
# List of 2-tuples of fill value and shape.
data = [
(5, ()),
(5, (7,)),
(5., (7,)),
([5, 8], (2,)),
([5, 8], (3, 2)),
([[5], [8]], (2, 3)),
([[5], [8]], (3, 2, 5)),
([[5.], [8.]], (3, 2, 5)),
]
zeros_builders = [np_array_ops.zeros, np.zeros]
for f, s in data:
for fn1, fn2, arr_dtype in itertools.product(self.array_transforms,
zeros_builders,
self.all_types):
fill_value = fn1(f)
arr = fn2(s, arr_dtype)
self.match(
np_array_ops.full_like(arr, fill_value),
np.full_like(arr, fill_value))
for dtype in self.all_types:
self.match(
np_array_ops.full_like(arr, fill_value, dtype=dtype),
np.full_like(arr, fill_value, dtype=dtype))
def testArray(self):
ndmins = [0, 1, 2, 5]
for a, dtype, ndmin, copy in itertools.product(self.all_arrays,
self.all_types, ndmins,
[True, False]):
self.match(
np_array_ops.array(a, dtype=dtype, ndmin=ndmin, copy=copy),
np.array(a, dtype=dtype, ndmin=ndmin, copy=copy))
zeros_list = np_array_ops.zeros(5)
def test_copy_equal_false():
# Backing tensor is the same if copy=False, other attributes being None.
self.assertIs(
np_array_ops.array(zeros_list, copy=False).data, zeros_list.data)
self.assertIs(
np_array_ops.array(zeros_list.data, copy=False).data, zeros_list.data)
# Backing tensor is different if ndmin is not satisfied.
self.assertIsNot(
np_array_ops.array(zeros_list, copy=False, ndmin=2).data,
zeros_list.data)
self.assertIsNot(
np_array_ops.array(zeros_list.data, copy=False, ndmin=2).data,
zeros_list.data)
self.assertIs(
np_array_ops.array(zeros_list, copy=False, ndmin=1).data,
zeros_list.data)
self.assertIs(
np_array_ops.array(zeros_list.data, copy=False, ndmin=1).data,
zeros_list.data)
# Backing tensor is different if dtype is not satisfied.
self.assertIsNot(
np_array_ops.array(zeros_list, copy=False, dtype=int).data,
zeros_list.data)
self.assertIsNot(
np_array_ops.array(zeros_list.data, copy=False, dtype=int).data,
zeros_list.data)
self.assertIs(
np_array_ops.array(zeros_list, copy=False, dtype=float).data,
zeros_list.data)
self.assertIs(
np_array_ops.array(zeros_list.data, copy=False, dtype=float).data,
zeros_list.data)
test_copy_equal_false()
with ops.device('CPU:1'):
test_copy_equal_false()
self.assertNotIn('CPU:1', zeros_list.data.backing_device)
with ops.device('CPU:1'):
self.assertIn('CPU:1', np_array_ops.array(zeros_list, copy=True).data
.backing_device)
self.assertIn('CPU:1', np_array_ops.array(np.array(0), copy=True).data
.backing_device)
def testAsArray(self):
for a, dtype in itertools.product(self.all_arrays, self.all_types):
self.match(
np_array_ops.asarray(a, dtype=dtype), np.asarray(a, dtype=dtype))
zeros_list = np_array_ops.zeros(5)
# Same instance is returned if no dtype is specified and input is ndarray.
self.assertIs(np_array_ops.asarray(zeros_list), zeros_list)
with ops.device('CPU:1'):
self.assertIs(np_array_ops.asarray(zeros_list), zeros_list)
# Different instance is returned if dtype is specified and input is ndarray.
self.assertIsNot(np_array_ops.asarray(zeros_list, dtype=int), zeros_list)
def testAsAnyArray(self):
for a, dtype in itertools.product(self.all_arrays, self.all_types):
self.match(
np_array_ops.asanyarray(a, dtype=dtype),
np.asanyarray(a, dtype=dtype))
zeros_list = np_array_ops.zeros(5)
# Same instance is returned if no dtype is specified and input is ndarray.
self.assertIs(np_array_ops.asanyarray(zeros_list), zeros_list)
with ops.device('CPU:1'):
self.assertIs(np_array_ops.asanyarray(zeros_list), zeros_list)
# Different instance is returned if dtype is specified and input is ndarray.
self.assertIsNot(np_array_ops.asanyarray(zeros_list, dtype=int), zeros_list)
def testAsContiguousArray(self):
for a, dtype in itertools.product(self.all_arrays, self.all_types):
self.match(
np_array_ops.ascontiguousarray(a, dtype=dtype),
np.ascontiguousarray(a, dtype=dtype))
def testARange(self):
int_values = np.arange(-3, 3).tolist()
float_values = np.arange(-3.5, 3.5).tolist()
all_values = int_values + float_values
for dtype in self.all_types:
for start in all_values:
msg = 'dtype:{} start:{}'.format(dtype, start)
self.match(np_array_ops.arange(start), np.arange(start), msg=msg)
self.match(
np_array_ops.arange(start, dtype=dtype),
np.arange(start, dtype=dtype),
msg=msg)
for stop in all_values:
msg = 'dtype:{} start:{} stop:{}'.format(dtype, start, stop)
self.match(
np_array_ops.arange(start, stop), np.arange(start, stop), msg=msg)
# TODO(srbs): Investigate and remove check.
# There are some bugs when start or stop is float and dtype is int.
if not isinstance(start, float) and not isinstance(stop, float):
self.match(
np_array_ops.arange(start, stop, dtype=dtype),
np.arange(start, stop, dtype=dtype),
msg=msg)
# Note: We intentionally do not test with float values for step
# because numpy.arange itself returns inconsistent results. e.g.
# np.arange(0.5, 3, step=0.5, dtype=int) returns
# array([0, 1, 2, 3, 4])
for step in int_values:
msg = 'dtype:{} start:{} stop:{} step:{}'.format(
dtype, start, stop, step)
if not step:
with self.assertRaises(ValueError):
self.match(
np_array_ops.arange(start, stop, step),
np.arange(start, stop, step),
msg=msg)
if not isinstance(start, float) and not isinstance(stop, float):
self.match(
np_array_ops.arange(start, stop, step, dtype=dtype),
np.arange(start, stop, step, dtype=dtype),
msg=msg)
else:
self.match(
np_array_ops.arange(start, stop, step),
np.arange(start, stop, step),
msg=msg)
if not isinstance(start, float) and not isinstance(stop, float):
self.match(
np_array_ops.arange(start, stop, step, dtype=dtype),
np.arange(start, stop, step, dtype=dtype),
msg=msg)
def testDiag(self):
array_transforms = [
lambda x: x, # Identity,
ops.convert_to_tensor,
np.array,
lambda x: np.array(x, dtype=np.float32),
lambda x: np.array(x, dtype=np.float64),
np_array_ops.array,
lambda x: np_array_ops.array(x, dtype=np.float32),
lambda x: np_array_ops.array(x, dtype=np.float64)
]
def run_test(arr):
for fn in array_transforms:
arr = fn(arr)
self.match(
np_array_ops.diag(arr), np.diag(arr), msg='diag({})'.format(arr))
for k in range(-3, 3):
self.match(
np_array_ops.diag(arr, k),
np.diag(arr, k),
msg='diag({}, k={})'.format(arr, k))
# 2-d arrays.
run_test(np.arange(9).reshape((3, 3)).tolist())
run_test(np.arange(6).reshape((2, 3)).tolist())
run_test(np.arange(6).reshape((3, 2)).tolist())
run_test(np.arange(3).reshape((1, 3)).tolist())
run_test(np.arange(3).reshape((3, 1)).tolist())
run_test([[5]])
run_test([[]])
run_test([[], []])
# 1-d arrays.
run_test([])
run_test([1])
run_test([1, 2])
def testDiagFlat(self):
array_transforms = [
lambda x: x, # Identity,
ops.convert_to_tensor,
np.array,
lambda x: np.array(x, dtype=np.float32),
lambda x: np.array(x, dtype=np.float64),
np_array_ops.array,
lambda x: np_array_ops.array(x, dtype=np.float32),
lambda x: np_array_ops.array(x, dtype=np.float64)
]
def run_test(arr):
for fn in array_transforms:
arr = fn(arr)
self.match(
np_array_ops.diagflat(arr),
np.diagflat(arr),
msg='diagflat({})'.format(arr))
for k in range(-3, 3):
self.match(
np_array_ops.diagflat(arr, k),
np.diagflat(arr, k),
msg='diagflat({}, k={})'.format(arr, k))
# 1-d arrays.
run_test([])
run_test([1])
run_test([1, 2])
# 2-d arrays.
run_test([[]])
run_test([[5]])
run_test([[], []])
run_test(np.arange(4).reshape((2, 2)).tolist())
run_test(np.arange(2).reshape((2, 1)).tolist())
run_test(np.arange(2).reshape((1, 2)).tolist())
# 3-d arrays
run_test(np.arange(8).reshape((2, 2, 2)).tolist())
def match_shape(self, actual, expected, msg=None):
if msg:
msg = 'Shape match failed for: {}. Expected: {} Actual: {}'.format(
msg, expected.shape, actual.shape)
self.assertEqual(actual.shape, expected.shape, msg=msg)
if msg:
msg = 'Shape: {} is not a tuple for {}'.format(actual.shape, msg)
self.assertIsInstance(actual.shape, tuple, msg=msg)
def match_dtype(self, actual, expected, msg=None):
if msg:
msg = 'Dtype match failed for: {}. Expected: {} Actual: {}.'.format(
msg, expected.dtype, actual.dtype)
self.assertEqual(actual.dtype, expected.dtype, msg=msg)
def match(self, actual, expected, msg=None, almost=False, decimal=7):
msg_ = 'Expected: {} Actual: {}'.format(expected, actual)
if msg:
msg = '{} {}'.format(msg_, msg)
else:
msg = msg_
self.assertIsInstance(actual, np_arrays.ndarray)
self.match_dtype(actual, expected, msg)
self.match_shape(actual, expected, msg)
if not almost:
if not actual.shape:
self.assertEqual(actual.tolist(), expected.tolist())
else:
self.assertSequenceEqual(actual.tolist(), expected.tolist())
else:
np.testing.assert_almost_equal(
actual.tolist(), expected.tolist(), decimal=decimal)
def testIndexedSlices(self):
dtype = dtypes.int64
iss = indexed_slices.IndexedSlices(
values=np_array_ops.ones([2, 3], dtype=dtype),
indices=constant_op.constant([1, 9]),
dense_shape=[10, 3])
a = np_array_ops.array(iss, copy=False)
expected = array_ops.scatter_nd([[1], [9]],
array_ops.ones([2, 3], dtype=dtype),
[10, 3])
self.assertAllEqual(expected, a)
class ArrayMethodsTest(test.TestCase):
def setUp(self):
super(ArrayMethodsTest, self).setUp()
set_up_virtual_devices()
self.array_transforms = [
lambda x: x,
ops.convert_to_tensor,
np.array,
np_array_ops.array,
]
def testAllAny(self):
def run_test(arr, *args, **kwargs):
for fn in self.array_transforms:
arr = fn(arr)
self.match(
np_array_ops.all(arr, *args, **kwargs),
np.all(arr, *args, **kwargs))
self.match(
np_array_ops.any(arr, *args, **kwargs),
np.any(arr, *args, **kwargs))
run_test(0)
run_test(1)
run_test([])
run_test([[True, False], [True, True]])
run_test([[True, False], [True, True]], axis=0)
run_test([[True, False], [True, True]], axis=0, keepdims=True)
run_test([[True, False], [True, True]], axis=1)
run_test([[True, False], [True, True]], axis=1, keepdims=True)
run_test([[True, False], [True, True]], axis=(0, 1))
run_test([[True, False], [True, True]], axis=(0, 1), keepdims=True)
run_test([5.2, 3.5], axis=0)
run_test([1, 0], axis=0)
def testCompress(self):
def run_test(condition, arr, *args, **kwargs):
for fn1 in self.array_transforms:
for fn2 in self.array_transforms:
arg1 = fn1(condition)
arg2 = fn2(arr)
self.match(
np_array_ops.compress(arg1, arg2, *args, **kwargs),
np.compress(
np.asarray(arg1).astype(np.bool), arg2, *args, **kwargs))
run_test([True], 5)
run_test([False], 5)
run_test([], 5)
run_test([True, False, True], [1, 2, 3])
run_test([True, False], [1, 2, 3])
run_test([False, True], [[1, 2], [3, 4]])
run_test([1, 0, 1], [1, 2, 3])
run_test([1, 0], [1, 2, 3])
run_test([0, 1], [[1, 2], [3, 4]])
run_test([True], [[1, 2], [3, 4]])
run_test([False, True], [[1, 2], [3, 4]], axis=1)
run_test([False, True], [[1, 2], [3, 4]], axis=0)
run_test([False, True], [[1, 2], [3, 4]], axis=-1)
run_test([False, True], [[1, 2], [3, 4]], axis=-2)
def testCopy(self):
def run_test(arr, *args, **kwargs):
for fn in self.array_transforms:
arg = fn(arr)
self.match(
np_array_ops.copy(arg, *args, **kwargs),
np.copy(arg, *args, **kwargs))
run_test([])
run_test([1, 2, 3])
run_test([1., 2., 3.])
run_test([True])
run_test(np.arange(9).reshape((3, 3)).tolist())
a = np_array_ops.asarray(0)
self.assertNotIn('CPU:1', a.data.backing_device)
with ops.device('CPU:1'):
self.assertIn('CPU:1', np_array_ops.array(a, copy=True).data
.backing_device)
self.assertIn('CPU:1', np_array_ops.array(np.array(0), copy=True).data
.backing_device)
def testCumProdAndSum(self):
def run_test(arr, *args, **kwargs):
for fn in self.array_transforms:
arg = fn(arr)
self.match(
np_array_ops.cumprod(arg, *args, **kwargs),
np.cumprod(arg, *args, **kwargs))
self.match(
np_array_ops.cumsum(arg, *args, **kwargs),
np.cumsum(arg, *args, **kwargs))
run_test([])
run_test([1, 2, 3])
run_test([1, 2, 3], dtype=float)
run_test([1, 2, 3], dtype=np.float32)
run_test([1, 2, 3], dtype=np.float64)
run_test([1., 2., 3.])
run_test([1., 2., 3.], dtype=int)
run_test([1., 2., 3.], dtype=np.int32)
run_test([1., 2., 3.], dtype=np.int64)
run_test([[1, 2], [3, 4]], axis=1)
run_test([[1, 2], [3, 4]], axis=0)
run_test([[1, 2], [3, 4]], axis=-1)
run_test([[1, 2], [3, 4]], axis=-2)
def testImag(self):
def run_test(arr, *args, **kwargs):
for fn in self.array_transforms:
arg = fn(arr)
self.match(
np_array_ops.imag(arg, *args, **kwargs),
# np.imag may return a scalar so we convert to a np.ndarray.
np.array(np.imag(arg, *args, **kwargs)))
run_test(1)
run_test(5.5)
run_test(5 + 3j)
run_test(3j)
run_test([])
run_test([1, 2, 3])
run_test([1 + 5j, 2 + 3j])
run_test([[1 + 5j, 2 + 3j], [1 + 7j, 2 + 8j]])
def testAMaxAMin(self):
def run_test(arr, *args, **kwargs):
axis = kwargs.pop('axis', None)
for fn1 in self.array_transforms:
for fn2 in self.array_transforms:
arr_arg = fn1(arr)
axis_arg = fn2(axis) if axis is not None else None
self.match(
np_array_ops.amax(arr_arg, axis=axis_arg, *args, **kwargs),
np.amax(arr_arg, axis=axis, *args, **kwargs))
self.match(
np_array_ops.amin(arr_arg, axis=axis_arg, *args, **kwargs),
np.amin(arr_arg, axis=axis, *args, **kwargs))
run_test([1, 2, 3])
run_test([1., 2., 3.])
run_test([[1, 2], [3, 4]], axis=1)
run_test([[1, 2], [3, 4]], axis=0)
run_test([[1, 2], [3, 4]], axis=-1)
run_test([[1, 2], [3, 4]], axis=-2)
run_test([[1, 2], [3, 4]], axis=(0, 1))
run_test(np.arange(8).reshape((2, 2, 2)).tolist(), axis=(0, 2))
run_test(
np.arange(8).reshape((2, 2, 2)).tolist(), axis=(0, 2), keepdims=True)
run_test(np.arange(8).reshape((2, 2, 2)).tolist(), axis=(2, 0))
run_test(
np.arange(8).reshape((2, 2, 2)).tolist(), axis=(2, 0), keepdims=True)
def testMean(self):
def run_test(arr, *args, **kwargs):
axis = kwargs.pop('axis', None)
for fn1 in self.array_transforms:
for fn2 in self.array_transforms:
arr_arg = fn1(arr)
axis_arg = fn2(axis) if axis is not None else None
self.match(
np_array_ops.mean(arr_arg, axis=axis_arg, *args, **kwargs),
np.mean(arr_arg, axis=axis, *args, **kwargs))
run_test([1, 2, 1])
run_test([1., 2., 1.])
run_test([1., 2., 1.], dtype=int)
run_test([[1, 2], [3, 4]], axis=1)
run_test([[1, 2], [3, 4]], axis=0)
run_test([[1, 2], [3, 4]], axis=-1)
run_test([[1, 2], [3, 4]], axis=-2)
run_test([[1, 2], [3, 4]], axis=(0, 1))
run_test(np.arange(8).reshape((2, 2, 2)).tolist(), axis=(0, 2))
run_test(
np.arange(8).reshape((2, 2, 2)).tolist(), axis=(0, 2), keepdims=True)
run_test(np.arange(8).reshape((2, 2, 2)).tolist(), axis=(2, 0))
run_test(
np.arange(8).reshape((2, 2, 2)).tolist(), axis=(2, 0), keepdims=True)
def testProd(self):
def run_test(arr, *args, **kwargs):
for fn in self.array_transforms:
arg = fn(arr)
self.match(
np_array_ops.prod(arg, *args, **kwargs),
np.prod(arg, *args, **kwargs))
run_test([1, 2, 3])
run_test([1., 2., 3.])
run_test(np.array([1, 2, 3], dtype=np.int16))
run_test([[1, 2], [3, 4]], axis=1)
run_test([[1, 2], [3, 4]], axis=0)
run_test([[1, 2], [3, 4]], axis=-1)
run_test([[1, 2], [3, 4]], axis=-2)
run_test([[1, 2], [3, 4]], axis=(0, 1))
run_test(np.arange(8).reshape((2, 2, 2)).tolist(), axis=(0, 2))
run_test(
np.arange(8).reshape((2, 2, 2)).tolist(), axis=(0, 2), keepdims=True)
run_test(np.arange(8).reshape((2, 2, 2)).tolist(), axis=(2, 0))
run_test(
np.arange(8).reshape((2, 2, 2)).tolist(), axis=(2, 0), keepdims=True)
def _testReduce(self, math_fun, np_fun, name):
axis_transforms = [
lambda x: x, # Identity,
ops.convert_to_tensor,
np.array,
np_array_ops.array,
lambda x: np_array_ops.array(x, dtype=np.float32),
lambda x: np_array_ops.array(x, dtype=np.float64),
]
def run_test(a, **kwargs):
axis = kwargs.pop('axis', None)
for fn1 in self.array_transforms:
for fn2 in axis_transforms:
arg1 = fn1(a)
axis_arg = fn2(axis) if axis is not None else None
self.match(
math_fun(arg1, axis=axis_arg, **kwargs),
np_fun(arg1, axis=axis, **kwargs),
msg='{}({}, axis={}, keepdims={})'.format(name, arg1, axis,
kwargs.get('keepdims')))
run_test(5)
run_test([2, 3])
run_test([[2, -3], [-6, 7]])
run_test([[2, -3], [-6, 7]], axis=0)
run_test([[2, -3], [-6, 7]], axis=0, keepdims=True)
run_test([[2, -3], [-6, 7]], axis=1)
run_test([[2, -3], [-6, 7]], axis=1, keepdims=True)
run_test([[2, -3], [-6, 7]], axis=(0, 1))
run_test([[2, -3], [-6, 7]], axis=(1, 0))
def testSum(self):
self._testReduce(np_array_ops.sum, np.sum, 'sum')
def testAmax(self):
self._testReduce(np_array_ops.amax, np.amax, 'amax')
def testSize(self):
def run_test(arr, axis=None):
onp_arr = np.array(arr)
self.assertEqual(np_array_ops.size(arr, axis), np.size(onp_arr, axis))
run_test(np_array_ops.array([1]))
run_test(np_array_ops.array([1, 2, 3, 4, 5]))
run_test(np_array_ops.ones((2, 3, 2)))
run_test(np_array_ops.ones((3, 2)))
run_test(np_array_ops.zeros((5, 6, 7)))
run_test(1)
run_test(np_array_ops.ones((3, 2, 1)))
run_test(constant_op.constant(5))
run_test(constant_op.constant([1, 1, 1]))
self.assertRaises(NotImplementedError, np_array_ops.size, np.ones((2, 2)),
1)
@def_function.function(input_signature=[tensor_spec.TensorSpec(shape=None)])
def f(arr):
arr = np_array_ops.asarray(arr)
return np_array_ops.size(arr)
self.assertEqual(f(np_array_ops.ones((3, 2))).data.numpy(), 6)
def testRavel(self):
def run_test(arr, *args, **kwargs):
for fn in self.array_transforms:
arg = fn(arr)
self.match(
np_array_ops.ravel(arg, *args, **kwargs),
np.ravel(arg, *args, **kwargs))
run_test(5)
run_test(5.)
run_test([])
run_test([[]])
run_test([[], []])
run_test([1, 2, 3])
run_test([1., 2., 3.])
run_test([[1, 2], [3, 4]])
run_test(np.arange(8).reshape((2, 2, 2)).tolist())
def testReal(self):
def run_test(arr, *args, **kwargs):
for fn in self.array_transforms:
arg = fn(arr)
self.match(
np_array_ops.real(arg, *args, **kwargs),
np.array(np.real(arg, *args, **kwargs)))
run_test(1)
run_test(5.5)
run_test(5 + 3j)
run_test(3j)
run_test([])
run_test([1, 2, 3])
run_test([1 + 5j, 2 + 3j])
run_test([[1 + 5j, 2 + 3j], [1 + 7j, 2 + 8j]])
def testRepeat(self):
def run_test(arr, repeats, *args, **kwargs):
for fn1 in self.array_transforms:
for fn2 in self.array_transforms:
arr_arg = fn1(arr)
repeats_arg = fn2(repeats)
self.match(
np_array_ops.repeat(arr_arg, repeats_arg, *args, **kwargs),
np.repeat(arr_arg, repeats_arg, *args, **kwargs))
run_test(1, 2)
run_test([1, 2], 2)
run_test([1, 2], [2])
run_test([1, 2], [1, 2])
run_test([[1, 2], [3, 4]], 3, axis=0)
run_test([[1, 2], [3, 4]], 3, axis=1)
run_test([[1, 2], [3, 4]], [3], axis=0)
run_test([[1, 2], [3, 4]], [3], axis=1)
run_test([[1, 2], [3, 4]], [3, 2], axis=0)
run_test([[1, 2], [3, 4]], [3, 2], axis=1)
run_test([[1, 2], [3, 4]], [3, 2], axis=-1)
run_test([[1, 2], [3, 4]], [3, 2], axis=-2)
def testAround(self):
def run_test(arr, *args, **kwargs):
for fn in self.array_transforms:
arg = fn(arr)
self.match(
np_array_ops.around(arg, *args, **kwargs),
np.around(arg, *args, **kwargs))
run_test(5.5)
run_test(5.567, decimals=2)
run_test([])
run_test([1.27, 2.49, 2.75], decimals=1)
run_test([23.6, 45.1], decimals=-1)
def testReshape(self):
def run_test(arr, newshape, *args, **kwargs):
for fn1 in self.array_transforms:
for fn2 in self.array_transforms:
arr_arg = fn1(arr)
newshape_arg = fn2(newshape)
self.match(
np_array_ops.reshape(arr_arg, newshape_arg, *args, **kwargs),
np.reshape(arr_arg, newshape, *args, **kwargs))
run_test(5, [-1])
run_test([], [-1])
run_test([1, 2, 3], [1, 3])
run_test([1, 2, 3], [3, 1])
run_test([1, 2, 3, 4], [2, 2])
run_test([1, 2, 3, 4], [2, 1, 2])
def testExpandDims(self):
def run_test(arr, axis):
self.match(np_array_ops.expand_dims(arr, axis), np.expand_dims(arr, axis))
run_test([1, 2, 3], 0)
run_test([1, 2, 3], 1)
def testSqueeze(self):
def run_test(arr, *args, **kwargs):
for fn in self.array_transforms:
arg = fn(arr)
# Note: np.squeeze ignores the axis arg for non-ndarray objects.
# This looks like a bug: https://github.com/numpy/numpy/issues/8201
# So we convert the arg to np.ndarray before passing to np.squeeze.
self.match(
np_array_ops.squeeze(arg, *args, **kwargs),
np.squeeze(np.array(arg), *args, **kwargs))
run_test(5)
run_test([])
run_test([5])
run_test([[1, 2, 3]])
run_test([[[1], [2], [3]]])
run_test([[[1], [2], [3]]], axis=0)
run_test([[[1], [2], [3]]], axis=2)
run_test([[[1], [2], [3]]], axis=(0, 2))
run_test([[[1], [2], [3]]], axis=-1)
run_test([[[1], [2], [3]]], axis=-3)
def testTranspose(self):
def run_test(arr, axes=None):
for fn1 in self.array_transforms:
for fn2 in self.array_transforms:
arr_arg = fn1(arr)
axes_arg = fn2(axes) if axes is not None else None
self.match(
np_array_ops.transpose(arr_arg, axes_arg),
np.transpose(arr_arg, axes))
run_test(5)
run_test([])
run_test([5])
run_test([5, 6, 7])
run_test(np.arange(30).reshape(2, 3, 5).tolist())
run_test(np.arange(30).reshape(2, 3, 5).tolist(), [0, 1, 2])
run_test(np.arange(30).reshape(2, 3, 5).tolist(), [0, 2, 1])
run_test(np.arange(30).reshape(2, 3, 5).tolist(), [1, 0, 2])
run_test(np.arange(30).reshape(2, 3, 5).tolist(), [1, 2, 0])
run_test(np.arange(30).reshape(2, 3, 5).tolist(), [2, 0, 1])
run_test(np.arange(30).reshape(2, 3, 5).tolist(), [2, 1, 0])
def match_shape(self, actual, expected, msg=None):
if msg:
msg = 'Shape match failed for: {}. Expected: {} Actual: {}'.format(
msg, expected.shape, actual.shape)
self.assertEqual(actual.shape, expected.shape, msg=msg)
if msg:
msg = 'Shape: {} is not a tuple for {}'.format(actual.shape, msg)
self.assertIsInstance(actual.shape, tuple, msg=msg)
def match_dtype(self, actual, expected, msg=None):
if msg:
msg = 'Dtype match failed for: {}. Expected: {} Actual: {}.'.format(
msg, expected.dtype, actual.dtype)
self.assertEqual(actual.dtype, expected.dtype, msg=msg)
def match(self, actual, expected, msg=None, check_dtype=True):
msg_ = 'Expected: {} Actual: {}'.format(expected, actual)
if msg:
msg = '{} {}'.format(msg_, msg)
else:
msg = msg_
self.assertIsInstance(actual, np_arrays.ndarray)
if check_dtype:
self.match_dtype(actual, expected, msg)
self.match_shape(actual, expected, msg)
if not actual.shape:
self.assertAllClose(actual.tolist(), expected.tolist())
else:
self.assertAllClose(actual.tolist(), expected.tolist())
def testPad(self):
t = [[1, 2, 3], [4, 5, 6]]
paddings = [[
1,
1,
], [2, 2]]
self.assertAllEqual(
np_array_ops.pad(t, paddings, 'constant'),
[[0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 2, 3, 0, 0], [0, 0, 4, 5, 6, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
self.assertAllEqual(
np_array_ops.pad(t, paddings, 'reflect'),
[[6, 5, 4, 5, 6, 5, 4], [3, 2, 1, 2, 3, 2, 1], [6, 5, 4, 5, 6, 5, 4],
[3, 2, 1, 2, 3, 2, 1]])
self.assertAllEqual(
np_array_ops.pad(t, paddings, 'symmetric'),
[[2, 1, 1, 2, 3, 3, 2], [2, 1, 1, 2, 3, 3, 2], [5, 4, 4, 5, 6, 6, 5],
[5, 4, 4, 5, 6, 6, 5]])
def testTake(self):
a = [4, 3, 5, 7, 6, 8]
indices = [0, 1, 4]
self.assertAllEqual([4, 3, 6], np_array_ops.take(a, indices))
indices = [[0, 1], [2, 3]]
self.assertAllEqual([[4, 3], [5, 7]], np_array_ops.take(a, indices))
a = [[4, 3, 5], [7, 6, 8]]
self.assertAllEqual([[4, 3], [5, 7]], np_array_ops.take(a, indices))
a = np.random.rand(2, 16, 3)
axis = 1
self.assertAllEqual(
np.take(a, indices, axis=axis),
np_array_ops.take(a, indices, axis=axis))
def testWhere(self):
self.assertAllEqual([[1.0, 1.0], [1.0, 1.0]],
np_array_ops.where([True], [1.0, 1.0],
[[0, 0], [0, 0]]))
def testShape(self):
self.assertAllEqual((1, 2), np_array_ops.shape([[0, 0]]))
def testSwapaxes(self):
x = [[1, 2, 3]]
self.assertAllEqual([[1], [2], [3]], np_array_ops.swapaxes(x, 0, 1))
self.assertAllEqual([[1], [2], [3]], np_array_ops.swapaxes(x, -2, -1))
x = [[[0, 1], [2, 3]], [[4, 5], [6, 7]]]
self.assertAllEqual([[[0, 4], [2, 6]], [[1, 5], [3, 7]]],
np_array_ops.swapaxes(x, 0, 2))
self.assertAllEqual([[[0, 4], [2, 6]], [[1, 5], [3, 7]]],
np_array_ops.swapaxes(x, -3, -1))
def testMoveaxis(self):
def _test(*args):
expected = np.moveaxis(*args)
raw_ans = np_array_ops.moveaxis(*args)
self.assertAllEqual(expected, raw_ans)
a = np.random.rand(1, 2, 3, 4, 5, 6)
# Basic
_test(a, (0, 2), (3, 5))
_test(a, (0, 2), (-1, -3))
_test(a, (-6, -4), (3, 5))
_test(a, (-6, -4), (-1, -3))
_test(a, 0, 4)
_test(a, -6, -2)
_test(a, tuple(range(6)), tuple(range(6)))
_test(a, tuple(range(6)), tuple(reversed(range(6))))
_test(a, (), ())
def testNdim(self):
self.assertAllEqual(0, np_array_ops.ndim(0.5))
self.assertAllEqual(1, np_array_ops.ndim([1, 2]))
def testIsscalar(self):
self.assertTrue(np_array_ops.isscalar(0.5))
self.assertTrue(np_array_ops.isscalar(5))
self.assertTrue(np_array_ops.isscalar(False))
self.assertFalse(np_array_ops.isscalar([1, 2]))
def assertListEqual(self, a, b):
self.assertAllEqual(len(a), len(b))
for x, y in zip(a, b):
self.assertAllEqual(x, y)
def testSplit(self):
x = np_array_ops.arange(9)
y = np_array_ops.split(x, 3)
self.assertListEqual([([0, 1, 2]), ([3, 4, 5]), ([6, 7, 8])], y)
x = np_array_ops.arange(8)
y = np_array_ops.split(x, [3, 5, 6, 10])
self.assertListEqual([([0, 1, 2]), ([3, 4]), ([5]), ([6, 7]), ([])], y)
def testSign(self):
state = np.random.RandomState(0)
test_types = [np.float16, np.float32, np.float64, np.int32, np.int64,
np.complex64, np.complex128]
test_shapes = [(), (1,), (2, 3, 4), (2, 3, 0, 4)]
for dtype in test_types:
for shape in test_shapes:
if np.issubdtype(dtype, np.complex):
arr = (np.asarray(state.randn(*shape) * 100, dtype=dtype) +
1j * np.asarray(state.randn(*shape) * 100, dtype=dtype))
else:
arr = np.asarray(state.randn(*shape) * 100, dtype=dtype)
self.match(np_array_ops.sign(arr), np.sign(arr))
class ArrayManipulationTest(test.TestCase):
def setUp(self):
super(ArrayManipulationTest, self).setUp()
self.array_transforms = [
lambda x: x,
ops.convert_to_tensor,
np.array,
np_array_ops.array,
]
def testBroadcastTo(self):
def run_test(arr, shape):
for fn in self.array_transforms:
arg1 = fn(arr)
self.match(
np_array_ops.broadcast_to(arg1, shape),
np.broadcast_to(arg1, shape))
run_test(1, 2)
run_test(1, (2, 2))
run_test([1, 2], (2, 2))
run_test([[1], [2]], (2, 2))
run_test([[1, 2]], (3, 2))
run_test([[[1, 2]], [[3, 4]], [[5, 6]]], (3, 4, 2))
def testIx_(self):
possible_arys = [[True, True], [True, False], [False, False],
list(range(5)), np_array_ops.empty(0, dtype=np.int64)]
for r in range(len(possible_arys)):
for arys in itertools.combinations_with_replacement(possible_arys, r):
tnp_ans = np_array_ops.ix_(*arys)
onp_ans = np.ix_(*arys)
for t, o in zip(tnp_ans, onp_ans):
self.match(t, o)
def match_shape(self, actual, expected, msg=None):
if msg:
msg = 'Shape match failed for: {}. Expected: {} Actual: {}'.format(
msg, expected.shape, actual.shape)
self.assertEqual(actual.shape, expected.shape, msg=msg)
if msg:
msg = 'Shape: {} is not a tuple for {}'.format(actual.shape, msg)
self.assertIsInstance(actual.shape, tuple, msg=msg)
def match_dtype(self, actual, expected, msg=None):
if msg:
msg = 'Dtype match failed for: {}. Expected: {} Actual: {}.'.format(
msg, expected.dtype, actual.dtype)
self.assertEqual(actual.dtype, expected.dtype, msg=msg)
def match(self, actual, expected, msg=None):
msg_ = 'Expected: {} Actual: {}'.format(expected, actual)
if msg:
msg = '{} {}'.format(msg_, msg)
else:
msg = msg_
self.assertIsInstance(actual, np_arrays.ndarray)
self.match_dtype(actual, expected, msg)
self.match_shape(actual, expected, msg)
if not actual.shape:
self.assertEqual(actual.tolist(), expected.tolist())
else:
self.assertSequenceEqual(actual.tolist(), expected.tolist())
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
| apache-2.0 |
cryptobanana/ansible | lib/ansible/modules/cloud/ovirt/ovirt_tags.py | 75 | 7807 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_tags
short_description: Module to manage tags in oVirt/RHV
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "This module manage tags in oVirt/RHV. It can also manage assignments
of those tags to entities."
options:
name:
description:
- "Name of the tag to manage."
required: true
state:
description:
- "Should the tag be present/absent/attached/detached."
- "C(Note): I(attached) and I(detached) states are supported since version 2.4."
choices: ['present', 'absent', 'attached', 'detached']
default: present
description:
description:
- "Description of the tag to manage."
parent:
description:
- "Name of the parent tag."
vms:
description:
- "List of the VMs names, which should have assigned this tag."
hosts:
description:
- "List of the hosts names, which should have assigned this tag."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Create(if not exists) and assign tag to vms vm1 and vm2:
- ovirt_tags:
name: mytag
vms:
- vm1
- vm2
# Attach a tag to VM 'vm1', keeping the rest already attached tags on VM:
- ovirt_tags:
name: mytag
state: attached
vms:
- vm3
# Detach a tag from VM 'vm1', keeping the rest already attached tags on VM:
- ovirt_tags:
name: mytag
state: detached
vms:
- vm3
# To detach all VMs from tag:
- ovirt_tags:
name: mytag
vms: []
# Remove tag
- ovirt_tags:
state: absent
name: mytag
'''
RETURN = '''
id:
description: ID of the tag which is managed
returned: On success if tag is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
tag:
description: "Dictionary of all the tag attributes. Tag attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/tag."
returned: On success if tag is found.
type: dict
'''
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
create_connection,
equal,
get_id_by_name,
ovirt_full_argument_spec,
)
class TagsModule(BaseModule):
def build_entity(self):
return otypes.Tag(
name=self._module.params['name'],
description=self._module.params['description'],
parent=otypes.Tag(
name=self._module.params['parent'],
) if self._module.params['parent'] else None,
)
def post_create(self, entity):
self.update_check(entity)
def _update_tag_assignments(self, entity, name):
if self._module.params[name] is None:
return
state = self.param('state')
entities_service = getattr(self._connection.system_service(), '%s_service' % name)()
current_vms = [
vm.name
for vm in entities_service.list(search='tag=%s' % self._module.params['name'])
]
# Assign tags:
if state in ['present', 'attached', 'detached']:
for entity_name in self._module.params[name]:
entity_id = get_id_by_name(entities_service, entity_name)
tags_service = entities_service.service(entity_id).tags_service()
current_tags = [tag.name for tag in tags_service.list()]
# Assign the tag:
if state in ['attached', 'present']:
if self._module.params['name'] not in current_tags:
if not self._module.check_mode:
tags_service.add(
tag=otypes.Tag(
name=self._module.params['name'],
),
)
self.changed = True
# Detach the tag:
elif state == 'detached':
if self._module.params['name'] in current_tags:
tag_id = get_id_by_name(tags_service, self.param('name'))
if not self._module.check_mode:
tags_service.tag_service(tag_id).remove()
self.changed = True
# Unassign tags:
if state == 'present':
for entity_name in [e for e in current_vms if e not in self._module.params[name]]:
if not self._module.check_mode:
entity_id = get_id_by_name(entities_service, entity_name)
tags_service = entities_service.service(entity_id).tags_service()
tag_id = get_id_by_name(tags_service, self.param('name'))
tags_service.tag_service(tag_id).remove()
self.changed = True
def _get_parent(self, entity):
parent = None
if entity.parent:
parent = self._connection.follow_link(entity.parent).name
return parent
def update_check(self, entity):
self._update_tag_assignments(entity, 'vms')
self._update_tag_assignments(entity, 'hosts')
return (
equal(self._module.params.get('description'), entity.description) and
equal(self._module.params.get('parent'), self._get_parent(entity))
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent', 'attached', 'detached'],
default='present',
),
name=dict(default=None, required=True),
description=dict(default=None),
parent=dict(default=None),
vms=dict(default=None, type='list'),
hosts=dict(default=None, type='list'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
tags_service = connection.system_service().tags_service()
tags_module = TagsModule(
connection=connection,
module=module,
service=tags_service,
)
state = module.params['state']
if state in ['present', 'attached', 'detached']:
ret = tags_module.create()
elif state == 'absent':
ret = tags_module.remove()
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
| gpl-3.0 |
praekelt/python-gitmodel | gitmodel/test/fields/models.py | 2 | 1451 | import pygit2
from gitmodel import fields
from gitmodel import models
class Person(models.GitModel):
slug = fields.SlugField()
first_name = fields.CharField()
last_name = fields.CharField()
email = fields.EmailField()
age = fields.IntegerField(required=False)
account_balance = fields.DecimalField(required=False)
birth_date = fields.DateField(required=False)
active = fields.BooleanField(required=False)
tax_rate = fields.FloatField(required=False)
wake_up_call = fields.TimeField(required=False)
date_joined = fields.DateTimeField(required=False)
class Author(models.GitModel):
first_name = fields.CharField()
last_name = fields.CharField()
email = fields.EmailField()
language = fields.CharField(default='en-US')
url = fields.URLField(schemes=('http', 'https'), required=False)
class Post(models.GitModel):
author = fields.RelatedField(Author)
slug = fields.SlugField(id=True)
title = fields.CharField()
body = fields.CharField()
image = fields.BlobField(required=False)
metadata = fields.JSONField(required=False)
class User(Person):
password = fields.CharField()
last_login = fields.DateTimeField(required=False)
last_read = fields.RelatedField(Post, required=False)
class GitObjectTestModel(models.GitModel):
blob = fields.GitObjectField()
commit = fields.GitObjectField(type=pygit2.Commit)
tree = fields.GitObjectField()
| bsd-3-clause |
edmorley/treeherder | tests/webapp/api/test_version.py | 2 | 1331 | from django.conf import settings
from rest_framework.decorators import APIView
from rest_framework.exceptions import NotAcceptable
from rest_framework.response import Response
from rest_framework.test import APIRequestFactory
class RequestVersionView(APIView):
def get(self, request, *args, **kwargs):
return Response({'version': request.version})
factory = APIRequestFactory()
def test_unsupported_version():
view = RequestVersionView.as_view()
request = factory.get('/endpoint/', HTTP_ACCEPT='application/json; version=foo.bar')
try:
response = view(request)
except NotAcceptable:
pass
assert response.data == {u'detail': u'Invalid version in "Accept" header.'}
def test_correct_version():
view = RequestVersionView.as_view()
version = settings.REST_FRAMEWORK['ALLOWED_VERSIONS'][0]
request = factory.get('/endpoint/',
HTTP_ACCEPT='application/json; version={0}'.format(version))
response = view(request)
assert response.data == {'version': version}
def test_default_version():
view = RequestVersionView.as_view()
request = factory.get('/endpoint/', HTTP_ACCEPT='application/json')
response = view(request)
version = settings.REST_FRAMEWORK['DEFAULT_VERSION']
assert response.data == {'version': version}
| mpl-2.0 |
tensorflow/model-optimization | tensorflow_model_optimization/python/core/clustering/keras/clustering_algorithm.py | 1 | 7243 | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Abstract base class for clustering algorithm."""
import abc
import six
import tensorflow as tf
from tensorflow_model_optimization.python.core.clustering.keras.cluster_config import GradientAggregation
@six.add_metaclass(abc.ABCMeta)
class ClusteringAlgorithm(object):
"""Class to implement highly efficient vectorised look-ups.
We do not utilise looping for that purpose, instead we `smartly` reshape and
tile arrays. The trade-off is that we are potentially using way more memory
than we would have if looping is used.
Each class that inherits from this class is supposed to implement a
particular lookup function for a certain shape.
For example, look-ups for 2D table will be different in the case of 3D.
"""
def __init__(
self,
clusters_centroids,
cluster_gradient_aggregation=GradientAggregation.SUM,
):
"""Generating clustered tensors.
For generating clustered tensors we will need two things: cluster
centroids and the final shape tensor must have.
Args:
clusters_centroids: An array of shape (N,) that contains initial values of
clusters centroids.
cluster_gradient_aggregation: An enum that specify the aggregation method
of the cluster gradient.
"""
if not isinstance(clusters_centroids, tf.Variable):
raise ValueError("clusters_centroids should be a tf.Variable.")
self.cluster_centroids = clusters_centroids
self.cluster_gradient_aggregation = cluster_gradient_aggregation
def get_pulling_indices(self, weight):
"""Returns indices of closest cluster centroids.
Takes a weight(can be 1D, 2D or ND) and creates tf.int32 array of the
same shape that will hold indices of cluster centroids clustered arrays
elements will be pulled from.
In the current setup pulling indices are meant to be created once and
used everywhere.
Args:
weight: ND array of weights. For each weight in this array the closest
cluster centroids is found.
Returns:
ND array of the same shape as `weight` parameter of the type
tf.int32. The returned array contain weight lookup indices
"""
# We find the nearest cluster centroids and store them so that ops can build
# their kernels upon it.
pulling_indices = tf.argmin(
tf.abs(tf.expand_dims(weight, axis=-1) - self.cluster_centroids),
axis=-1)
return pulling_indices
def get_clustered_weight(self, pulling_indices, original_weight):
"""Returns clustered weights with custom gradients.
Take indices (pulling_indices) as input and then form a new array
by gathering cluster centroids based on the given pulling indices.
The original gradients will also be modified in two ways:
- By averaging the gradient of cluster_centroids based on the size of
each cluster.
- By adding an estimated gradient onto the non-differentiable
original weight.
Args:
pulling_indices: a tensor of indices used for lookup of the same size as
original_weight.
original_weight: the original weights of the wrapped layer.
Returns:
array with the same shape as `pulling_indices`. Each array element
is a member of self.cluster_centroids. The backward pass is modified by
adding custom gradients.
"""
@tf.custom_gradient
def average_centroids_gradient_by_cluster_size(cluster_centroids,
cluster_sizes):
def grad(d_cluster_centroids):
# Average the gradient based on the number of weights belonging to each
# cluster
d_cluster_centroids = tf.math.divide_no_nan(d_cluster_centroids,
cluster_sizes)
return d_cluster_centroids, None
return cluster_centroids, grad
@tf.custom_gradient
def add_gradient_to_original_weight(clustered_weight, original_weight):
"""Overrides gradients in the backprop stage.
This function overrides gradients in the backprop stage: the Jacobian
matrix of multiplication is replaced with the identity matrix, which
effectively changes multiplication into add in the backprop. Since
the gradient of tf.sign is 0, overwriting it with identity follows
the design of straight-through-estimator, which accepts all upstream
gradients and uses them to update original non-clustered weights of
the layer. Here, we assume the gradient updates on individual elements
inside a cluster will be different so that there is no point in mapping
the gradient updates back to original non-clustered weights using the LUT.
Args:
clustered_weight: clustered weights
original_weight: original weights
Returns:
result and custom gradient, as expected by @tf.custom_gradient
"""
override_weights = tf.sign(original_weight + 1e+6)
override_clustered_weight = clustered_weight * override_weights
def grad(d_override_clustered_weight):
return d_override_clustered_weight, d_override_clustered_weight
return override_clustered_weight, grad
if self.cluster_gradient_aggregation == GradientAggregation.SUM:
cluster_centroids = self.cluster_centroids
elif self.cluster_gradient_aggregation == GradientAggregation.AVG:
# Compute the size of each cluster
# (number of weights belonging to each cluster)
cluster_sizes = tf.math.bincount(
arr=tf.cast(pulling_indices, dtype=tf.int32),
minlength=tf.size(self.cluster_centroids),
dtype=self.cluster_centroids.dtype,
)
# Modify the gradient of cluster_centroids to be averaged by cluster sizes
cluster_centroids = average_centroids_gradient_by_cluster_size(
self.cluster_centroids,
tf.stop_gradient(cluster_sizes),
)
else:
raise ValueError(f"self.cluster_gradient_aggregation="
f"{self.cluster_gradient_aggregation} not implemented.")
# Gather the clustered weights based on cluster centroids and
# pulling indices.
clustered_weight = tf.gather(cluster_centroids, pulling_indices)
# Add an estimated gradient to the original weight
clustered_weight = add_gradient_to_original_weight(
clustered_weight,
# Fix the bug with MirroredVariable and tf.custom_gradient:
# tf.identity will transform a MirroredVariable into a Variable
tf.identity(original_weight),
)
return clustered_weight
| apache-2.0 |
timthelion/FreeCAD | src/Mod/Sandbox/exportDRAWEXE.py | 25 | 39258 |
#***************************************************************************
#* *
#* Copyright (c) 2014 Sebastian Hoogen <github@sebastianhoogen.de> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
__title__="FreeCAD OpenSCAD Workbench - DRAWEXE exporter"
__author__ = "Sebastian Hoogen <github@sebastianhoogen.de>"
import FreeCAD, Part
if open.__module__ == '__builtin__':
pythonopen = open
# unsupported primitives
# Part:: Wedge, Helix, Spiral, Elipsoid
# Draft: Rectangle, BSpline, BezCurve
def quaternionToString(rot):
def shorthexfloat(f):
s=f.hex()
mantisse, exponent = f.hex().split('p',1)
return '%sp%s' % (mantisse.rstrip('0'),exponent)
x,y,z,w=rot.Q
return 'q=%s+%s*i+%s*j+%s*k' % (shorthexfloat(w),shorthexfloat(x),
shorthexfloat(y),shorthexfloat(z))
def f2s(n,angle=False,axis=False):
'''convert to numerical value to string
try to remove no significant digits, by guessing a former rounding
'''
if abs(n) < 1e-14: return '0'
if angle and len(('%0.6e' % n).split('e')[0].rstrip('0') ) < 3:
return ('%0.5f' % n).rstrip('0').rstrip('.')
elif axis and len(('%0.13e' % n).split('e')[0].rstrip('0') ) < 6:
return ('%0.10f' % n).rstrip('0').rstrip('.')
else:
for i in range(20):
s = ('%%1.%df'% i) % n
if float(s) == n:
return s
for i in range(20):
s = ('%%0.%de'% i) % n
if float(s) == n:
return s
def ax2_xdir(normal):
#adaped from gp_Ax2.ccc (c) OpenCascade SAS LGPL 2.1+
xa=abs(normal.x)
ya=abs(normal.y)
za=abs(normal.z)
if ya <= xa and ya <= za:
if xa > za:
return FreeCAD.Vector(-normal.z,0, normal.x)
else:
return FreeCAD.Vector( normal.z,0,-normal.x)
elif xa <= ya and xa <= za:
if ya > za:
return FreeCAD.Vector(0,-normal.z, normal.y)
else:
return FreeCAD.Vector(0, normal.z,-normal.y)
else:
if xa > ya:
return FreeCAD.Vector(-normal.y, normal.x,0)
else:
return FreeCAD.Vector( normal.y,-normal.x,0)
def occversiontuple():
import FreeCAD,Part
occmajs,occmins,occfixs = FreeCAD.ConfigGet('OCC_VERSION').split('.')[:3]
return (int(occmajs),int(occmins),int(occfixs))
def polygonstr(r,pcount):
import math
v=FreeCAD.Vector(r,0,0)
m=FreeCAD.Matrix()
m.rotateZ(2*math.pi/pcount)
points=[]
for i in range(pcount):
points.append(v)
v=m.multiply(v)
points.append(v)
return ' '.join('%s %s %s'%(f2s(v.x),f2s(v.y),f2s(v.z)) \
for v in points)
def formatobjtype(ob):
objtype=ob.TypeId
if (ob.isDerivedFrom('Part::FeaturePython') or \
ob.isDerivedFrom('Part::Part2DObjectPython') or\
ob.isDerivedFrom('App::FeaturePython')) and \
hasattr(ob.Proxy,'__module__'):
return '%s::%s.%s' % (ob.TypeId,ob.Proxy.__module__,\
ob.Proxy.__class__.__name__)
else:
return ob.TypeId
def placement2draw(placement,name='object'):
"""converts a FreeCAD Placement to trotate and ttranslate commands"""
drawcommand=''
if not placement.Rotation.isNull():
import math
#dx,dy,dz=placement.Rotation.Axis
ax=placement.Rotation.Axis
import itertools
# denormalize rotation axis
for t in itertools.product((0,1,-1),repeat=3):
if t != (0,0,0):
if (ax-FreeCAD.Vector(*t).normalize()).Length < 1e-15:
dx,dy,dz = t
break
else:
dx,dy,dz=placement.Rotation.Axis
#drawcommand += "# %s\n" %quaternionToString(placement.Rotation)
an=math.degrees(placement.Rotation.Angle)
drawcommand += "trotate %s 0 0 0 %s %s %s %s\n" % (name,\
f2s(dx),f2s(dy),f2s(dz),\
# f2s(dx,axis=True),f2s(dy,axis=True),f2s(dz,axis=True),\
f2s(an,angle=True))
if placement.Base.Length > 1e-8:
x,y,z=placement.Base
drawcommand += "ttranslate %s %s %s %s\n" % \
(name,f2s(x),f2s(y),f2s(z))
return drawcommand
def saveShape(csg,filename,shape,name,hasplacement = True,cleanshape=False):
import os
spath,sname = os.path.split(filename)
sname=sname.replace('.','-')
uname='%s-%s' %(sname,name)
breppath=os.path.join(spath,'%s.brep'%uname)
csg.write("restore %s.brep %s\n" % (uname,name))
if cleanshape:
import Part
try:
shape = shape.cleaned()
except Part.OCCError:
shape = shape.copy()
if hasplacement is None: # saved with placement
hasplacement = False # saved with placement
shape.exportBrep(breppath)
elif not hasplacement: #doesn't matter
shape.exportBrep(breppath)
else: #remove placement
sh=shape.copy()
sh.Placement=FreeCAD.Placement()
# it not yet tested if changing the placement recreated the
# tesselation. but for now we simply do the cleaing once agian
# to stay on the safe side
if cleanshape:
shape = shape.cleaned()
sh.exportBrep(breppath)
return hasplacement
def isDraftFeature(ob):
if (ob.isDerivedFrom('Part::FeaturePython') or \
ob.isDerivedFrom('Part::Part2DObjectPython')) and \
hasattr(ob.Proxy,'__module__') and \
ob.Proxy.__module__ == 'Draft':
return True
def isDraftClone(ob):
if (ob.isDerivedFrom('Part::FeaturePython') or \
ob.isDerivedFrom('Part::Part2DObjectPython')) and \
hasattr(ob.Proxy,'__module__') and \
ob.Proxy.__module__ == 'Draft':
import Draft
return isinstance(ob.Proxy,Draft._Clone)
def isDraftCircle(ob):
if isDraftFeature(ob):
import Draft
return isinstance(ob.Proxy,Draft._Circle)
def isDraftEllipse(ob):
if isDraftFeature(ob):
import Draft
return isinstance(ob.Proxy,Draft._Ellipse)
def isDraftPolygon(ob):
if isDraftFeature(ob):
import Draft
return isinstance(ob.Proxy,Draft._Polygon)
def isDraftPoint(ob):
if isDraftFeature(ob):
import Draft
return isinstance(ob.Proxy,Draft._Point)
def isDraftWire(ob):
if isDraftFeature(ob):
import Draft
if isinstance(ob.Proxy,Draft._Wire):
#only return true if we support all options
#"Closed" append last point at the end
#"MakeFace"
#"Points" data we need
# the usage of 'start' and 'end' is not clear
if ob.Base is None and ob.Tool is None and \
ob.FilletRadius.Value == 0.0 and \
ob.ChamferSize.Value == 0.0:
return True
def isDraftShape2DView(ob):
if isDraftFeature(ob):
import Draft
return isinstance(ob.Proxy,Draft._Shape2DView)
def isOpenSCADFeature(ob):
if ob.isDerivedFrom('Part::FeaturePython') and \
hasattr(ob.Proxy,'__module__') and \
ob.Proxy.__module__ == 'OpenSCADFeatures':
return True
def isOpenSCADMultMatrixFeature(ob):
if ob.isDerivedFrom('Part::FeaturePython') and \
hasattr(ob.Proxy,'__module__') and \
ob.Proxy.__module__ == 'OpenSCADFeatures':
import OpenSCADFeatures
return isinstance(ob.Proxy,OpenSCADFeatures.MatrixTransform)
def isDeform(ob):
"""tests whether the object is a Matrix transformation
that does a non-uniform scaling"""
# the [ is important to exclude cases with additional
# rotation or mirroring.
# TBD decompose complex matrix operations
return isOpenSCADMultMatrixFeature(ob) and \
ob.Matrix.analyze().startswith('Scale [')
class Drawexporter(object):
def __init__(self, filename):
self.objectcache=set()
self.csg = pythonopen(filename,'w')
#self.csg=csg
self.filename=filename
#settings
self.alwaysexplode = True
self.cleanshape = False
def __enter__(self):
return self
def write_header(self):
import FreeCAD
#self.csg.write('#!/usr/bin/env DRAWEXE\n')
self.csg.write('#generated by FreeCAD %s\n' % \
'.'.join(FreeCAD.Version()[0:3]))
self.csg.write('pload MODELING\n')
def write_displayonly(self,objlst):
self.csg.write('donly %s\n'%' '.join([obj.Name for obj in objlst]))
def saveSweep(self,ob):
import Part
spine,subshapelst=ob.Spine
#process_object(csg,spine,filename)
explodeshape = self.alwaysexplode or self.process_object(spine,True)
if explodeshape:
self.process_object(spine)
if len(subshapelst) and spine.Shape.ShapeType != 'Edge':
#raise NotImplementedError # hit the fallback
# currently all subshapes are edges
self.csg.write('explode %s E\n' % spine.Name )
edgelst = ' '.join(('%s_%s' % (spine.Name,ss[4:]) for ss \
in subshapelst))
spinename = '%s-0-spine' % ob.Name
self.csg.write('wire %s %s\n' %(spinename,edgelst))
elif spine.Shape.ShapeType == 'Wire':
spinename = spine.Name
elif spine.Shape.ShapeType == 'Edge':
spinename = '%s-0-spine' % ob.Name
self.csg.write('wire %s %s\n' %(spinename,spine.Name))
else: # extract only the used subshape
if len(subshapelst):
path=Part.Wire([spine.Shape.getElement(subshapename) for \
subshapename in subshapelst])
elif spine.Shape.ShapeType == 'Edge':
path = spine.Shape
elif spine.Shape.ShapeType == 'Wire':
path = Part.Wire(spine.Shape)
else:
raise ValueError('Unsuitabel Shape Type')
spinename = '%s-0-spine' % ob.Name
saveShape(self.csg,self.filename, path,spinename,None,\
self.cleanshape) # placement with shape
#safePlacement(ob.Placement,ob.Name)
self.csg.write('mksweep %s\n' % spinename)
#setsweep
setoptions=[]
buildoptions=[]
if ob.Frenet:
setoptions.append('-FR')
else:
setoptions.append('-CF')
if ob.Transition == 'Transformed':
buildoptions.append('-M')
elif ob.Transition == 'Right corner':
buildoptions.append('-C')
elif ob.Transition == 'Round corner':
buildoptions.append('-R')
if ob.Solid:
buildoptions.append('-S')
self.csg.write('setsweep %s\n' % (" ".join(setoptions)))
#addsweep
sections=ob.Sections
sectionnames = []
for i,subobj in enumerate(ob.Sections):
#process_object(csg,subobj,filename)
#sectionsnames.append(subobj.Name)
#d1['basename']=subobj.Name
sectionname = '%s-0-section-%02d-%s' % (ob.Name,i,subobj.Name)
addoptions=[]
explodeshape = self.alwaysexplode or \
self.process_object(subobj,True)
if explodeshape:
sh = subobj.Shape
if sh.ShapeType == 'Vertex' or sh.ShapeType == 'Wire' or \
sh.ShapeType == 'Edge' or \
sh.ShapeType == 'Face' and len(sh.Wires) == 1:
self.process_object(subobj)
if sh.ShapeType == 'Wire' or sh.ShapeType == 'Vertex':
#csg.write('tcopy %s %s\n' %(subobj.Name,sectionname))
sectionname = subobj.Name
if sh.ShapeType == 'Edge':
self.csg.write('explode %s E\n' % subobj.Name )
self.csg.write('wire %s %s_1\n' %(sectionname,subobj.Name))
if sh.ShapeType == 'Face':
#we should use outer wire when it becomes avaiable
self.csg.write('explode %s W\n' % subobj.Name )
#csg.write('tcopy %s_1 %s\n' %(subobj.Name,sectionname))
sectionname ='%s_1' % subobj.Name
else:
explodeshape = False
if not explodeshape: # extract only the used subshape
sh = subobj.Shape
if sh.ShapeType == 'Vertex':
pass
elif sh.ShapeType == 'Wire' or sh.ShapeType == 'Edge':
sh = Part.Wire(sh)
elif sh.ShapeType == 'Face':
sh = sh.OuterWire
else:
raise ValueError('Unrecognized Shape Type')
saveShape(self.csg,self.filename,sh,sectionname,None,\
self.cleanshape) # placement with shape
self.csg.write('addsweep %s %s\n' % \
(sectionname," ".join(addoptions)))
self.csg.write('buildsweep %s %s\n' % (ob.Name," ".join(buildoptions)))
def process_object(self,ob,checksupported=False,toplevel=False):
if not checksupported and ob.Name in self.objectcache:
return # object in present
if not checksupported:
self.objectcache.add(ob.Name)
d1 = {'name':ob.Name}
if hasattr(ob,'Placement'):
hasplacement = not ob.Placement.isNull()
else:
hasplacement = False
if ob.TypeId in ["Part::Cut","Part::Fuse","Part::Common",\
"Part::Section"]:
if checksupported: return True # The object is supported
d1.update({'part':ob.Base.Name,'tool':ob.Tool.Name,\
'command':'b%s' % ob.TypeId[6:].lower()})
self.process_object(ob.Base)
self.process_object(ob.Tool)
self.csg.write("%(command)s %(name)s %(part)s %(tool)s\n"%d1)
elif ob.TypeId == "Part::Plane" :
if checksupported: return True # The object is supported
d1.update({'uname':'%s-untrimmed' % d1['name'],\
'length': f2s(ob.Length),'width': f2s(ob.Width)})
self.csg.write("plane %s 0 0 0\n"%d1['uname'])
self.csg.write(\
"mkface %(name)s %(uname)s 0 %(length)s 0 %(width)s\n"%d1)
elif ob.TypeId == "Part::Ellipse" :
if checksupported: return True # The object is supported
d1.update({'uname':'%s-untrimmed'%d1['name'], 'maj':\
f2s(ob.MajorRadius), 'min': f2s(ob.MinorRadius),\
'pf':f2s(ob.Angle0.getValueAs('rad').Value), \
'pl':f2s(ob.Angle1.getValueAs('rad').Value)})
self.csg.write("ellipse %(uname)s 0 0 0 %(maj)s %(min)s\n"%d1)
self.csg.write('mkedge %(name)s %(uname)s %(pf)s %(pl)s\n' % d1)
elif ob.TypeId == "Part::Sphere" :
if checksupported: return True # The object is supported
d1.update({'radius':f2s(ob.Radius),'angle1':f2s(ob.Angle1),\
'angle2':f2s(ob.Angle2),'angle3':f2s(ob.Angle3)})
if ob.Angle1.Value == -90 and ob.Angle2.Value == 90 and \
ob.Angle3.Value == 360:
self.csg.write('psphere %(name)s %(radius)s\n'%d1)
else:
self.csg.write('psphere %(name)s %(radius)s %(angle1)s '
'%(angle2)s %(angle3)s\n'%d1)
elif ob.TypeId == "Part::Box" :
if checksupported: return True # The object is supported
d1.update({'dx':f2s(ob.Length),'dy':f2s(ob.Width),'dz':f2s(ob.Height)})
self.csg.write('box %(name)s %(dx)s %(dy)s %(dz)s\n'%d1)
elif ob.TypeId == "Part::Cylinder" :
if checksupported: return True # The object is supported
d1.update({'radius':f2s(ob.Radius),'height':f2s(ob.Height),\
'angle':f2s(ob.Angle)})
if ob.Angle.Value == 360:
self.csg.write('pcylinder %(name)s %(radius)s %(height)s\n'%d1)
else:
self.csg.write('pcylinder %(name)s %(radius)s %(height)s '\
'%(angle)s\n'%d1)
elif ob.TypeId == "Part::Cone" :
if checksupported: return True # The object is supported
d1.update({'radius1':f2s(ob.Radius1),'radius2':f2s(ob.Radius2),\
'height':f2s(ob.Height),'angle':f2s(ob.Angle)})
if ob.Angle.Value == 360:
self.csg.write('pcone %(name)s %(radius1)s %(radius2)s '\
'%(height)s\n'%d1)
else:
self.csg.write('pcone %(name)s %(radius1)s %(radius2)s '\
'%(height)s %(angle)s\n'%d1)
elif ob.TypeId == "Part::Torus" :
if checksupported: return True # The object is supported
d1.update({'radius1':f2s(ob.Radius1),'radius2':f2s(ob.Radius2),\
'angle1': f2s(ob.Angle1),'angle2':f2s(ob.Angle2),\
'angle3': f2s(ob.Angle3)})
if ob.Angle1.Value == -180 and ob.Angle2.Value == 180 and \
ob.Angle3.Value == 360:
self.csg.write('ptorus %(name)s %(radius1)s %(radius2)s\n'%d1)
else:
self.csg.write('ptorus %(name)s %(radius1)s %(radius2)s '\
'%(angle1)s %(angle2)s %(angle3)s\n' % d1)
elif ob.TypeId == "Part::Mirroring" :
if checksupported: return True # The object is supported
self.process_object(ob.Source)
self.csg.write('tcopy %s %s\n'%(ob.Source.Name,d1['name']))
b=ob.Base
d1['x']=f2s(ob.Base.x)
d1['y']=f2s(ob.Base.y)
d1['z']=f2s(ob.Base.z)
d1['dx']=f2s(ob.Normal.x)
d1['dy']=f2s(ob.Normal.y)
d1['dz']=f2s(ob.Normal.z)
self.csg.write('tmirror %(name)s %(x)s %(y)s %(z)s %(dx)s %(dy)s %(dz)s\n' \
% d1)
elif ob.TypeId == 'Part::Compound':
if len(ob.Links) == 0:
pass
elif len(ob.Links) == 1:
if checksupported:
return self.process_object(ob.Links[0],True)
self.process_object(ob.Links[0])
self.csg.write('tcopy %s %s\n'%(ob.Links[0].Name,d1['name']))
else:
if checksupported: return True # The object is supported
basenames=[]
for i,subobj in enumerate(ob.Links):
self.process_object(subobj)
basenames.append(subobj.Name)
self.csg.write('compound %s %s\n' % (' '.join(basenames),ob.Name))
elif ob.TypeId in ["Part::MultiCommon", "Part::MultiFuse"]:
if len(ob.Shapes) == 0:
pass
elif len(ob.Shapes) == 1:
if checksupported:
return self.process_object(ob.Shapes[0],True)
self.process_object(ob.Shapes[0],)
self.csg.write('tcopy %s %s\n'%(ob.Shapes[0].Name,d1['name']))
elif ob.TypeId == "Part::MultiFuse" and \
occversiontuple() >= (6,8,1):
if checksupported: return True # The object is supported
for subobj in ob.Shapes:
self.process_object(subobj)
self.csg.write("bclearobjects\nbcleartools\n")
self.csg.write("baddobjects %s\n" % ob.Shapes[0].Name)
self.csg.write("baddtools %s\n" % " ".join(subobj.Name for \
subobj in ob.Shapes[1:]))
self.csg.write("bfillds\n")
self.csg.write("bbop %s 1\n" % ob.Name) #BOPAlgo_FUSE == 1
else:
if checksupported: return True # The object is supported
topname = ob.Name
command = 'b%s' % ob.TypeId[11:].lower()
lst1=ob.Shapes[:]
current=lst1.pop(0)
curname=current.Name
self.process_object(current)
i=1
while lst1:
if len(lst1) >= 2:
nxtname='to-%s-%03d-t'%(topname,i)
else:
nxtname=topname
nxt=lst1.pop(0)
self.process_object(nxt)
self.csg.write("%s %s %s %s\n"%(command,nxtname,curname,nxt.Name))
curname=nxtname
i+=1
elif (isDraftPolygon(ob) and ob.ChamferSize.Value == 0 and\
ob.FilletRadius.Value == 0 and ob.Support == None) or\
ob.TypeId == "Part::Prism" or \
ob.TypeId == "Part::RegularPolygon":
if checksupported: return True # The object is supported
draftpolygon = isDraftPolygon(ob)
if draftpolygon:
pcount = ob.FacesNumber
if ob.DrawMode =='inscribed':
r=ob.Radius.Value
elif ob.DrawMode =='circumscribed':
import math
r = ob.Radius.Value/math.cos(math.pi/pcount)
else:
raise ValueError
else:
pcount = ob.Polygon
r=ob.Circumradius.Value
justwire = ob.TypeId == "Part::RegularPolygon" or \
(draftpolygon and ob.MakeFace == False)
polyname = '%s-polyline' % d1['name']
if justwire:
wirename = d1['name']
else:
wirename = '%s-polywire' % d1['name']
if ob.TypeId == "Part::Prism":
facename = '%s-polyface' % d1['name']
else:
facename = d1['name']
self.csg.write('polyline %s %s\n' % (polyname,polygonstr(r,pcount)))
self.csg.write('wire %s %s\n' %(wirename,polyname))
if not justwire:
self.csg.write('mkplane %s %s\n' % (facename,polyname))
if ob.TypeId == "Part::Prism":
self.csg.write('prism %s %s 0 0 %s\n' % \
(d1['name'],facename, f2s(ob.Height.Value)))
elif ob.TypeId == "Part::Extrusion" and ob.TaperAngle.Value == 0:
if checksupported: return True # The object is supported
self.process_object(ob.Base)
#Warning does not fully ressemle the functionallity of
#Part::Extrusion
#csg.write('tcopy %s %s\n'%(ob.Base.Name,d1['name']))
facename=ob.Base.Name
self.csg.write('prism %s %s %s %s %s\n' % (d1['name'],facename,\
f2s(ob.Dir.x),f2s(ob.Dir.y),f2s(ob.Dir.z)))
elif ob.TypeId == "Part::Fillet" and True: #disabled
if checksupported: return True # The object is supported
self.process_object(ob.Base)
self.csg.write('explode %s E\n' % ob.Base.Name )
self.csg.write('blend %s %s %s\n' % (d1['name'],ob.Base.Name,\
' '.join(('%s %s'%(f2s(e[1]),'%s_%d' % (ob.Base.Name,e[0])) \
for e in ob.Edges))))
elif ob.TypeId == "Part::Thickness" and not ob.SelfIntersection and \
ob.Mode == 'Skin':
if checksupported: return True # The object is supported
jointype = {'Arc':'a','Intersection':'i','Tangent':'t'} #Join
inter = {False: 'p', True: 'c'} #Intersection
baseobj, facelist = ob.Faces
self.process_object(baseobj)
faces = ' '.join([('%s_%s' %(baseobj.Name,f[4:])) \
for f in facelist])
value = f2s(ob.Value)
self.csg.write('explode %s F\n' % baseobj.Name )
self.csg.write('offsetparameter 1e-7 %s %s\n' % \
(inter[ob.Intersection],jointype[ob.Join]))
self.csg.write('offsetload %s %s %s\n'%(baseobj.Name,value,faces))
self.csg.write('offsetperform %s\n' % d1['name'] )
elif ob.TypeId == "Part::Sweep" and True:
if checksupported: return True # The object is supported
self.saveSweep(ob)
elif ob.TypeId == "Part::Loft":
if checksupported: return True # The object is supported
sectionnames=[]
for i,subobj in enumerate(ob.Sections):
explodeshape = self.alwaysexplode or \
self.process_object(suboobj,True)
if explodeshape and False: #diabled TBD
try:
raise NotImplementedError
sectionname = '%s-%02d-section' % (ob.Name,i)
sh = subobj.Shape
if sh.isNull():
raise ValueError # hit the fallback
tempname=spine.Name
if sh.ShapeType == 'Compound':
sh = sh.childShapes()[0]
self.csg.write('explode %s\n' % tempname )
tempname = '%s_1' % tempname
if sh.ShapeType == 'Face':
#sh = sh.OuterWire #not available
if len(sh.Wires) == 1:
sh=sh.Wires[0]
self.csg.write('explode %s\n W' % tempname )
tempname = '%s_1' % tempname
else:
raise NotImplementedError
elif sh.ShapeType == 'Edge':
self.csg.write('wire %s %s\n' %(sectionname,tempname))
tempname = sectionname
sectionname = tempname
except NotImplementedError:
explodeshape = False # fallback
else:
explodeshape = False # fallback if we hit the False before
if not explodeshape: # extract only the used subshape
sh = subobj.Shape
if not sh.isNull():
if sh.ShapeType == 'Compound':
sh = sh.childShapes()[0]
if sh.ShapeType == 'Face':
sh = sh.OuterWire
elif sh.ShapeType == 'Edge':
import Part
sh = Part.Wire([sh])
elif sh.ShapeType == 'Wire':
import Part
sh = Part.Wire(sh)
elif sh.ShapeType == 'Vertex':
pass
else:
raise ValueError('Unsuitabel Shape Type')
sectionname = '%s-%02d-section' % (ob.Name,i)
saveShape(self.csg,self.filename, sh,sectionname,None,\
self.cleanshape) # placement with shape
sectionnames.append(sectionname)
if ob.Closed:
sectionnames.append(sectionnames[0])
self.csg.write('thrusections %s %d %d %s\n' % \
(ob.Name,int(ob.Solid),\
int(ob.Ruled), ' '.join(sectionnames)))
elif isDeform(ob): #non-uniform scaling
if checksupported: return True # The object is supported
m=ob.Matrix
self.process_object(ob.Base)
#csg.write('tcopy %s %s\n'%(ob.Base.Name,d1['name']))
d1['basename']=ob.Base.Name
d1['cx']=f2s(m.A11)
d1['cy']=f2s(m.A22)
d1['cz']=f2s(m.A33)
self.csg.write('deform %(name)s %(basename)s %(cx)s %(cy)s %(cz)s\n' % d1)
if m.A14 > 1e-8 or m.A24 > 1e-8 or m.A34 > 1e-8:
self.csg.write("ttranslate %s %s %s %s\n" % \
(ob.Name,f2s(m.A14),f2s(m.A24),f2s(m.A34)))
elif isDraftPoint(ob) or ob.TypeId == "Part::Vertex":
if checksupported: return True # The object is supported
d1['x']=f2s(ob.X)
d1['y']=f2s(ob.Y)
d1['z']=f2s(ob.Z)
self.csg.write('vertex %(name)s %(x)s %(y)s %(z)s\n' % d1)
elif isDraftCircle(ob) or ob.TypeId == "Part::Circle" or \
isDraftEllipse(ob):
if checksupported: return True # The object is supported
isdraftcircle=isDraftCircle(ob)
isdraftellipse=isDraftCircle(ob)
"circle name x y [z [dx dy dz]] [ux uy [uz]] radius"
curvename = '%s-curve' % d1['name']
if ob.TypeId == "Part::Circle":
radius=f2s(float(ob.Radius))
pfirst=f2s(ob.Angle0.getValueAs('rad').Value)
plast=f2s(ob.Angle1.getValueAs('rad').Value)
self.csg.write('circle %s 0 0 0 %s\n' % (curvename,radius))
self.csg.write('mkedge %s %s %s %s\n' % \
(d1['name'],curvename,pfirst,plast))
else: #draft
makeface = ob.MakeFace and \
(ob.Shape.isNull() or ob.Shape.ShapeType == 'Face')
#FreeCAD ignores a failed mkplane but it may
#break the model in DRAWEXE
edgename = '%s-edge' % d1['name']
if isdraftcircle:
pfirst=f2s(ob.FirstAngle.getValueAs('rad').Value)
plast=f2s(ob.LastAngle.getValueAs('rad').Value)
radius=f2s(ob.Radius.Value)
self.csg.write('circle %s 0 0 0 %s\n' % (curvename,radius))
else: #draft ellipse
import math
majr=f2s(float(ob.MajorRadius))
minr=f2s(float(ob.MinorRadius))
pfirst=f2s(math.radians(ob.FirstAngle))
plast =f2s(math.radians(ob.LastAngle))
self.csg.write('ellipse %s 0 0 0 %s %s\n' % \
(curvename,majr,minr))
self.csg.write('mkedge %s %s %s %s\n' % \
(edgename,curvename,pfirst,plast))
if makeface:
wirename = '%s-wire' % d1['name']
self.csg.write('wire %s %s\n' %(wirename,edgename))
self.csg.write('mkplane %s %s\n' % (d1['name'],wirename))
else:
self.csg.write('wire %s %s\n' %(d1['name'],edgename))
elif ob.TypeId == "Part::Line":
if checksupported: return True # The object is supported
self.csg.write('polyline %s %s %s %s %s %s %s\n' % \
(d1['name'],f2s(ob.X1),f2s(ob.Y1),f2s(ob.Z1),\
f2s(ob.X2),f2s(ob.Y2),f2s(ob.Z2)))
elif isDraftWire(ob):
if checksupported: return True # The object is supported
points=ob.Points
if ob.Closed:
points.append(points[0])
polyname = '%s-dwireline' % d1['name']
pointstr=' '.join('%s %s %s'%(f2s(v.x),f2s(v.y),f2s(v.z)) \
for v in points)
self.csg.write('polyline %s %s\n' % (polyname,pointstr))
if ob.MakeFace:
wirename = '%s-dwirewire' % d1['name']
self.csg.write('wire %s %s\n' %(wirename,polyname))
facename = d1['name']
self.csg.write('mkplane %s %s\n' % (facename,polyname))
else:
wirename = d1['name']
self.csg.write('wire %s %s\n' %(wirename,polyname))
elif isDraftClone(ob):
if checksupported: return True # The object is supported
x,y,z=ob.Scale
if x == y == z: #uniform scaling
d1['scale']=f2s(x)
else:
d1['cx']=f2s(x)
d1['cy']=f2s(y)
d1['cz']=f2s(z)
if len(ob.Objects) == 1:
d1['basename']=ob.Objects[0].Name
self.process_object(ob.Objects[0])
if x == y == z: #uniform scaling
self.csg.write('tcopy %(basename)s %(name)s\n' % d1)
self.csg.write('pscale %(name)s 0 0 0 %(scale)s\n' % d1)
else:
self.csg.write('deform %(name)s %(basename)s'\
' %(cx)s %(cy)s %(cz)s\n' % d1)
else: #compound
newnames=[]
for i,subobj in enumerate(ob.Objects):
self.process_object(subobj)
d1['basename']=subobj.Name
newname='%s-%2d' % (ob.Name,i)
d1['newname']=newname
newnames.append(newname)
if x == y == z: #uniform scaling
self.csg.write('tcopy %(basename)s %(newname)s\n' % d1)
self.csg.write('pscale %(newname)s 0 0 0 %(scale)s\n' % d1)
else:
self.csg.write('deform %(newname)s %(basename)s'\
' %(cx)s %(cy)s %(cz)s\n' % d1)
self.csg.write('compound %s %s\n' % (' '.join(newnames),ob.Name))
elif isDraftShape2DView(ob) and not ob.Tessellation and \
ob.ProjectionMode == "Solid" and ob.Base is not None and \
hasattr(ob.Base,'Shape'):
# not supported are groups, Arch/Sections and individual faces mode
if checksupported: return True # The object is supported
self.process_object(ob.Base)
v=ob.Projection
x=ax2_xdir(v)
self.csg.write('hprj %s_proj 0 0 0 %s %s %s %s %s %s\n' % \
( ob.Name,f2s(v.x),f2s(v.y),f2s(v.z)\
, f2s(x.x),f2s(x.y),f2s(x.z)))
self.csg.write('houtl %s_outl %s\n' % (ob.Name, ob.Base.Name))
self.csg.write('hfill %s_outl %s_proj 0\n' %(ob.Name,ob.Name)) #0?
self.csg.write('hload %s_outl\n' % (ob.Name))
self.csg.write('hsetprj %s_proj\n' % (ob.Name))
self.csg.write('hupdate\n')
self.csg.write('hhide\n')
self.csg.write('unset -nocomplain vl v1l vnl vol vil hl h1l hnl hol hil\n')
self.csg.write('hres2d\n')
if ob.HiddenLines:
self.csg.write('compound vl v1l vnl vol vil hl h1l hnl hol hil %s\n' % ob.Name)
else:
self.csg.write('compound vl v1l vnl vol vil %s\n' % ob.Name)
#elif ob.isDerivedFrom('Part::FeaturePython') and \
# hasattr(ob.Proxy,'__module__'):
# pass
elif ob.isDerivedFrom('Part::Feature') :
if ob.Shape.isNull(): #would crash in exportBrep otherwise
raise ValueError('Shape of %s is Null' % ob.Name)
if checksupported: return False # The object is not supported
self.csg.write('#saved shape of unsupported %s Object\n' % \
formatobjtype(ob))
hasplacement = saveShape(self.csg,self.filename,ob.Shape,ob.Name,\
hasplacement,self.cleanshape)
elif ob.isDerivedFrom('App::Annotation') :
return False # ignored here
#anntotations needs to be drawn after erase/donly
else: # not derived from Part::Feature
if not toplevel:
raise ValueError('Can not export child object')
else:
if ob.Name != ob.Label:
labelstr = 'Label %s' % ob.Label.encode('unicode-escape')
else:
labelstr = ''
self.csg.write('#omitted unsupported %s Object %s%s\n' %\
(formatobjtype(ob),ob.Name,labelstr))
self.csg.write('#Properties: %s\n' % \
','.join(ob.PropertiesList))
return False
#The object is not present and can not be referenced
if hasplacement:
self.csg.write(placement2draw(ob.Placement,ob.Name))
if ob.Name != ob.Label:
self.csg.write('#Object Label: %s\n' % ob.Label.encode('unicode-escape'))
return ob.Name #The object is present and can be referenced
def export_annotations(self,objlst):
for ob in objlst:
if ob.isDerivedFrom('App::Annotation') :
if ob.Name != ob.Label:
self.csg.write('#Annotation Name %s Label %s"\n' % \
(ob.Name,ob.Label.encode('unicode-escape')))
else:
self.csg.write('#Annotation %s\n' % (ob.Name))
v=ob.Position
self.csg.write('dtext %s %s %s "%s"\n' % \
(f2s(v.x),f2s(v.y),f2s(v.z), '\\n'.join(\
ob.LabelText).encode(\
'ascii', errors='xmlcharrefreplace')))
def export_objects(self,objlst,toplevel=True):
self.write_header()
toplevelobjs = [self.process_object(ob, toplevel=toplevel)\
for ob in objlst]
names = [name for name in toplevelobjs if name is not False]
self.csg.write('donly %s\n'%(' '.join(names)))
self.export_annotations(objlst)
#for ob in objlst:
# self.process_object(ob,toplevel=toplevel)
#self.write_displayonly(objlst)
def __exit__(self,exc_type, exc_val, exc_tb ):
self.csg.close()
def export(exportList,filename):
"called when freecad exports a file"
with Drawexporter(filename) as exporter:
exporter.export_objects(exportList)
if 'tcl' not in FreeCAD.getExportType():
FreeCAD.addExportType("DRAWEXE script (*.tcl)","exportDRAWEXE")
| lgpl-2.1 |
brev/nupic | tests/unit/nupic/math/sparse_binary_matrix_test.py | 35 | 44937 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for sparse binary matrix."""
import cPickle
import os
import numpy
import unittest2 as unittest
from nupic.bindings.math import SM32, SM_01_32_32
_RGEN = numpy.random.RandomState(37)
def error(str):
print 'Error:', str
class UnitTests(unittest.TestCase):
def setUp(self):
self.Matrix = SM_01_32_32(1)
def test_construction(self):
a = self.Matrix.__class__(4)
if a.nRows() != 0 or a.nCols() != 4:
error('constructor 1')
b = self.Matrix.__class__(a)
if b.nRows() != 0 or b.nCols() != 4:
error('constructor 2A')
if (a.toDense() != b.toDense()).any():
error('constructor 2B')
m = _RGEN.randint(1,10)
n = _RGEN.randint(5,10)
a = self.Matrix.__class__(n)
x = _RGEN.randint(0,2,(m,n))
x[m/2] = numpy.zeros((n))
for i in range(m):
a.appendSparseRow(numpy.where(x[i] > 0)[0].tolist())
b = self.Matrix.__class__(a)
if (a.toDense() != b.toDense()).any():
error('copy constructor')
c = self.Matrix.__class__(x)
if (c.toDense() != x).any():
error('constructor from numpy array')
s = c.toCSR()
d = self.Matrix.__class__(s)
if (d.toDense() != x).any():
error('constructor from csr string')
# Test construction from a SM
a = _RGEN.randint(0,10,(3,4))
a[2] = 0
a[:,3] = 0
a = SM32(a)
b = SM_01_32_32(a)
a = a.toDense()
w = numpy.where(a > 0)
a[w] = 1
if (a != b.toDense()).any():
error('construction from SM')
def testAccessors(self):
m = _RGEN.randint(1,10)
n = _RGEN.randint(5,10)
x = _RGEN.randint(0,2,(m,n))
x[m/2] = 0
a = self.Matrix.__class__(n)
print a.getVersion(), a.getVersion(True)
if a.nRows() != 0:
error('nRows 1')
if a.nCols() != n:
error('nCols 1')
for i in range(m):
a.appendSparseRow(numpy.where(x[i] > 0)[0].tolist())
if a.nRows() != m:
error('nRows 2')
if a.nCols() != n:
error('nCols 2')
if a.nNonZeros() != len(numpy.where(x > 0)[0]):
error('nNonZeros')
for i in range(m):
if a.nNonZerosOnRow(i) != x.sum(axis=1)[i]:
error('nNonZerosOnRow')
if (a.nNonZerosPerRow() != x.sum(axis=1)).any():
error('nNonZerosPerRow')
if (a.nNonZerosPerCol() != x.sum(axis=0)).any():
error('nNonZerosPerCol')
for i in range(m):
y = numpy.zeros((n))
for j in a.getRowSparse(i):
y[j] = 1
if (y != x[i]).any():
error('getRowSparse')
if a.capacity() < a.nNonZeros():
error('capacity')
m = _RGEN.randint(100,200)
n = _RGEN.randint(100,200)
x = _RGEN.randint(0,2,(m,n))
x[m/2] = 0
a = self.Matrix.__class__(x)
m1 = a.nBytes()
a.compact()
m2 = a.nBytes()
if (m2 > m1):
error('compact')
def testCopy(self):
m = _RGEN.randint(1,10)
n = _RGEN.randint(5,10)
x = _RGEN.randint(0,2,(m,n))
x[m/2] = numpy.zeros((n))
a = self.Matrix.__class__(x)
b = self.Matrix.__class__(1)
b.copy(a)
if a != b:
error('copy')
def testClear(self):
m = _RGEN.randint(1,10)
n = _RGEN.randint(5,10)
x = _RGEN.randint(0,2,(m,n))
x[m/2] = 0
a = self.Matrix.__class__(x)
a.clear()
if a.capacity() != 0:
error('clear /capacity')
if a.nRows() != 0:
error('clear /nRows')
if a.nCols() != 0:
error('clear /nCols')
if a.nNonZeros() != 0:
error('clear /nNonZeros')
def testResize(self):
# 1. Resize to 0,0 (equivalent to clear)
m = _RGEN.randint(4,10)
n = _RGEN.randint(6,10)
x = _RGEN.randint(0,2,(m,n))
x[m/2] = 0
a = self.Matrix.__class__(x)
a.resize(0,0)
if a.capacity() != 0:
error('resize to 0,0 /capacity')
if a.nRows() != 0:
error('resize to 0,0 /nRows')
if a.nCols() != 0:
error('resize to 0,0 /nCols')
if a.nNonZeros() != 0:
error('resize to 0,0 /nNonZeros')
# 2. Resize to larger size
m = _RGEN.randint(4,10)
n = _RGEN.randint(6,10)
x = _RGEN.randint(0,2,(m,n))
x[m/2] = 0
a = self.Matrix.__class__(x)
# 2.1 More rows only
old_nrows = a.nRows()
old_ncols = a.nCols()
old_nnzr = a.nNonZeros()
a.resize(2*a.nRows(),a.nCols())
if a.nRows() != 2*old_nrows or a.nCols() != old_ncols:
error('resize to more rows, 1')
if a.nNonZeros() != old_nnzr:
error('resize to more rows, 2')
# 2.2 More cols only
old_nrows = a.nRows()
a.resize(a.nRows(), 2*a.nCols())
if a.nRows() != old_nrows or a.nCols() != 2*old_ncols:
error('resize to more cols, 1')
if a.nNonZeros() != old_nnzr:
error('resize to more cols, 2')
# 2.3 More rows and cols
m = _RGEN.randint(4,10)
n = _RGEN.randint(6,10)
x = _RGEN.randint(0,2,(m,n))
x[m/2] = 0
a = self.Matrix.__class__(x)
old_nrows = a.nRows()
old_ncols = a.nCols()
old_nnzr = a.nNonZeros()
a.resize(2*a.nRows(),2*a.nCols())
if a.nRows() != 2*old_nrows or a.nCols() != 2*old_ncols:
error('resize to more rows and cols, 1')
if a.nNonZeros() != old_nnzr:
error('resize to more rows and cols, 2')
# 3. Resize to smaller size
m = _RGEN.randint(10,20)
n = _RGEN.randint(10,20)
x = _RGEN.randint(0,2,(m,n))
x[m/2] = 0
a = self.Matrix.__class__(x)
# 3.1 Less rows only
old_nrows = a.nRows()
old_ncols = a.nCols()
old_nnzr = a.nNonZeros()
a.resize(a.nRows()/2,a.nCols())
if a.nRows() != old_nrows/2 or a.nCols() != old_ncols:
error('resize to less rows, 1')
if a.nNonZeros() != numpy.sum(x[:old_nrows/2]):
error('resize to less rows, 2')
# 2.2 Less cols only
old_nrows = a.nRows()
a.resize(a.nRows(), a.nCols()/2)
if a.nRows() != old_nrows or a.nCols() != old_ncols/2:
error('resize to less cols, 1')
if a.nNonZeros() != numpy.sum(x[:a.nRows(),:old_ncols/2]):
error('resize to less cols, 2')
# 2.3 Less rows and cols
m = _RGEN.randint(10,20)
n = _RGEN.randint(10,20)
x = _RGEN.randint(0,2,(m,n))
x[m/2] = 0
a = self.Matrix.__class__(x)
old_nrows = a.nRows()
old_ncols = a.nCols()
old_nnzr = a.nNonZeros()
a.resize(a.nRows()/2,a.nCols()/2)
if a.nRows() != old_nrows/2 or a.nCols() != old_ncols/2:
error('resize to less rows and cols, 1')
if a.nNonZeros() != numpy.sum(x[:old_nrows/2,:old_ncols/2]):
error('resize to less rows and cols, 2')
def testEquals(self):
m = _RGEN.randint(1,10)
n = _RGEN.randint(5,10)
a = self.Matrix.__class__(n)
x = _RGEN.randint(0,2,(m,n))
x[m/2] = numpy.zeros((n))
a = self.Matrix.__class__(x)
b = self.Matrix.__class__(x)
if a != b:
error('equals 1')
b.set(m/2, n/2, 1)
if a == b:
error('equals 2')
def testSet(self):
m = _RGEN.randint(1,10)
n = _RGEN.randint(5,10)
x = _RGEN.randint(0,2,(m,n))
x[m/2] = numpy.zeros((n))
a = self.Matrix.__class__(x)
a.set(m/2, [0, 2, 4], 1)
x[m/2,0] = 1
x[m/2,2] = 1
x[m/2,4] = 1
if (a != x).any():
error('set on row 1')
a.set(m/2, [0,2,4], 0)
x[m/2,0] = 0
x[m/2,2] = 0
x[m/2,4] = 0
if (a != x).any():
error('set on row 1')
m = _RGEN.randint(1,10)
n = _RGEN.randint(5,10)
x = _RGEN.randint(0,2,(m,n))
x[m/2] = numpy.zeros((n))
a = self.Matrix.__class__(x)
a.setForAllRows([0,2,4], 1)
for i in range(m):
x[i,0] = 1
x[i,2] = 1
x[i,4] = 1
if (a != x).any():
error('set for all rows')
def testGetAllNonZeros(self):
for i in range(10):
m = _RGEN.randint(2,10)
n = _RGEN.randint(2,10)
a = _RGEN.randint(0,2,(m,n))
a[_RGEN.randint(0,m)] = 0
a[:,_RGEN.randint(0,n)] = 0
sm = self.Matrix.__class__(a)
ans_ind = numpy.where(a > 0)
ans_val = a[ans_ind]
ans = [(i,j,v) for i,j,v in zip(ans_ind[0], ans_ind[1], ans_val)]
# Returns one list of pairs by default
all_nz = sm.getAllNonZeros()
for x,y in zip(all_nz, ans):
if x[0] != y[0] or x[1] != y[1]:
error('getAllNonZeros 1 list of pairs')
# Test option to return 2 lists instead of 1 list of pairs
all_nz2 = sm.getAllNonZeros(True)
for i in range(len(ans_val)):
if all_nz2[0][i] != ans_ind[0][i] or all_nz2[1][i] != ans_ind[1][i]:
error('getAllNonZeros 2 lists')
def testSetAllNonZeros(self):
for i in range(10):
m = _RGEN.randint(2,10)
n = _RGEN.randint(2,10)
a = _RGEN.randint(0,2,(m,n))
a[_RGEN.randint(0,m)] = 0
a[:,_RGEN.randint(0,n)] = 0
a[0,0] = 1
a[m-1] = 0
a[:,n-1] = 0
nz = numpy.where(a > 0)
sm = self.Matrix.__class__(1)
# Assumes lexicographic order of the indices by default
sm.setAllNonZeros(a.shape[0], a.shape[1], nz[0],nz[1])
if (sm.toDense() != a).any():
error('setAllNonZeros, in order')
# Test when values come in out of (lexicographic) order
# and with duplicates
p = _RGEN.permutation(len(nz[0]))
nz_i2,nz_j2 = [],[]
for i in p:
nz_i2.append(nz[0][i])
nz_j2.append(nz[1][i])
for i in p:
nz_i2.append(nz[0][i])
nz_j2.append(nz[1][i])
sm2 = self.Matrix.__class__(1)
sm2.setAllNonZeros(a.shape[0], a.shape[1], nz_i2,nz_j2, False)
if (sm2.toDense() != a).any():
error('setAllNonZeros, out of order')
def testGetCol(self):
for i in range(10):
m = _RGEN.randint(2,10)
n = _RGEN.randint(2,10)
a = _RGEN.randint(0,2,(m,n)).astype(numpy.float32)
a[_RGEN.randint(0,m)] = 0
a[:,_RGEN.randint(0,n)] = 0
a[0,0] = 1
a[m/2] = 0
a[:,n/2] = 0
sm = self.Matrix.__class__(a)
for j in range(n):
if (sm.getCol(j) != a[:,j]).any():
error('getCol')
def testSetSlice(self):
# With a sparse matrix
for i in range(10):
m = _RGEN.randint(10,20)
n = _RGEN.randint(10,20)
a = _RGEN.randint(0,2,(m,n)).astype(numpy.float32)
a[_RGEN.randint(0,m)] = 0
a[:,_RGEN.randint(0,n)] = 0
a[0,0] = 1
a[m/2] = 0
a[:,n/2] = 0
sm = self.Matrix.__class__(a)
b = _RGEN.randint(0,2,(m/4,n/4)).astype(numpy.float32)
slice = self.Matrix.__class__(b)
x,y = _RGEN.randint(0,m/2), _RGEN.randint(0,n/2)
sm.setSlice(x,y,slice)
ans = numpy.array(a)
for i in range(b.shape[0]):
for j in range(b.shape[1]):
ans[x+i,y+j] = slice.get(i,j)
if (sm.toDense() != ans).any():
error('setSlice')
# With a numpy array
for i in range(10):
m = _RGEN.randint(10,20)
n = _RGEN.randint(10,20)
a = _RGEN.randint(0,2,(m,n)).astype(numpy.float32)
a[_RGEN.randint(0,m)] = 0
a[:,_RGEN.randint(0,n)] = 0
a[numpy.where(a < 25)] = 0
a[0,0] = 1
a[m/2] = 0
a[:,n/2] = 0
sm = self.Matrix.__class__(a)
slice = _RGEN.randint(0,2,(m/4,n/4)).astype(numpy.float32)
x,y = _RGEN.randint(0,m/2), _RGEN.randint(0,n/2)
sm.setSlice(x,y,slice)
ans = numpy.array(a)
for i in range(slice.shape[0]):
for j in range(slice.shape[1]):
ans[x+i,y+j] = slice[i,j]
if (sm.toDense() != ans).any():
error('setSlice/dense')
def testNNonZerosPerBox(self):
for i in range(10):
m = _RGEN.randint(2,10)
n = _RGEN.randint(2,10)
a = _RGEN.randint(0,2,(m,n)).astype(numpy.float32)
a[_RGEN.randint(0,m)] = 0
a[:,_RGEN.randint(0,n)] = 0
a[0,0] = 1
a[m/2] = 0
a[:,n/2] = 0
sm = self.Matrix.__class__(a)
nnzpb = sm.nNonZerosPerBox([m/2, m], [n/2, n])
ans = numpy.zeros((2,2))
ans[0,0] = numpy.sum(a[:m/2,:n/2])
ans[0,1] = numpy.sum(a[:m/2,n/2:])
ans[1,0] = numpy.sum(a[m/2:,:n/2])
ans[1,1] = numpy.sum(a[m/2:,n/2:])
if (nnzpb.toDense() != ans).any():
error('nNonZerosPerBox')
def testAppendSparseRow(self):
m = _RGEN.randint(1,10)
n = _RGEN.randint(5,10)
a = self.Matrix.__class__(n)
x = _RGEN.randint(0,2,(m,n))
x[m/2] = numpy.zeros((n))
for i in range(m):
a.appendSparseRow(numpy.where(x[i] > 0)[0].tolist())
if (a.toDense() != x).any():
error('appendSparseRow')
if a.nRows() != m:
error('appendSparseRow nRows')
if (numpy.array(a.nNonZerosPerRow()) != x.sum(axis=1)).any():
error('appendSparseRow nNonZerosPerRow')
if a.nNonZeros() != len(numpy.where(x > 0)[0]):
error('appendSparseRow nNonZeros')
def testAppendDenseRow(self):
m = _RGEN.randint(1,10)
n = _RGEN.randint(5,10)
a = self.Matrix.__class__(n)
x = _RGEN.randint(0,2,(m,n))
x[m/2] = numpy.zeros((n))
for i in range(m):
a.appendDenseRow(x[i])
if (a.toDense() != x).any():
error('appendDenseRow')
if a.nRows() != m:
error('appendDenseRow nRows')
if (numpy.array(a.nNonZerosPerRow()) != x.sum(axis=1)).any():
error('appendDenseRow nNonZerosPerRow')
if a.nNonZeros() != len(numpy.where(x > 0)[0]):
error('appendDenseRow nNonZeros')
def testReplaceSparseRow(self):
m = _RGEN.randint(1,10)
n = _RGEN.randint(5,10)
a = self.Matrix.__class__(n)
x = _RGEN.randint(0,2,(m,n))
x[m/2] = numpy.zeros((n))
for i in range(m):
a.appendSparseRow(numpy.where(x[i] > 0)[0].tolist())
for i in range(m):
x[i] = _RGEN.randint(0,2,(n))
a.replaceSparseRow(i, numpy.where(x[i] > 0)[0].tolist())
if (a.toDense() != x).any():
error('replaceSparseRow')
if (numpy.array(a.nNonZerosPerRow()) != x.sum(axis=1)).any():
error('replaceSparseRow nNonZerosPerRow')
if a.nNonZeros() != len(numpy.where(x > 0)[0]):
error('replaceSparseRow nNonZeros')
if a.nRows() != m:
error('replaceSparseRow nRows')
def testFindRowSparse(self):
m = _RGEN.randint(1,10)
n = _RGEN.randint(5,10)
a = self.Matrix.__class__(n)
x = _RGEN.randint(0,2,(m,n))
x[m/2] = numpy.zeros((n))
for i in range(m):
a.appendSparseRow(numpy.where(x[i] > 0)[0].tolist())
for i in range(m):
w = a.findRowSparse(numpy.where(x[i] > 0)[0].tolist())
if (x[w] != x[i]).any():
error('findRowSparse')
def testFindRowDense(self):
m = _RGEN.randint(1,10)
n = _RGEN.randint(5,10)
x = _RGEN.randint(0,2,(m,n))
x[m/2] = numpy.zeros((n))
a = self.Matrix.__class__(x)
for i in range(m):
w = a.findRowDense(x[i])
if (x[w] != x[i]).any():
error('findRowDense')
def testGet(self):
m = _RGEN.randint(1,10)
n = _RGEN.randint(5,10)
x = _RGEN.randint(0,2,(m,n))
x[m/2] = numpy.zeros((n))
a = self.Matrix.__class__(n)
a.fromDense(x)
for i in range(m):
for j in range(n):
if a.get(i,j) != x[i,j]:
error('get')
def testSet(self):
m = _RGEN.randint(1,10)
n = _RGEN.randint(5,10)
x = _RGEN.randint(0,2,(m,n))
x[m/2] = 0
a = self.Matrix.__class__(x)
for i in range(m):
for j in range(n):
v = _RGEN.randint(0,2)
a.set(i,j,v)
x[i,j] = v
if (a.toDense() != x).any():
error('set')
a.set(0,n-1,2)
x[0,n-1] = 1
if (a.toDense() != x).any():
error('set 2')
x[m/2] = 0
a.fromDense(x)
for j in range(n):
a.set(m/2,j,1)
x[m/2] = 1
if (a.toDense() != x).any():
error('set 3')
def testSetRangeToZero(self):
m = _RGEN.randint(1,10)
n = _RGEN.randint(5,10)
x = _RGEN.randint(0,2,(m,n))
x[m/2] = 0
a = self.Matrix.__class__(n)
a.fromDense(x)
for i in range(m):
begin = _RGEN.randint(0,n)
end = _RGEN.randint(begin, n+1)
a.setRangeToZero(i, begin, end)
x[i][begin:end] = 0
if (a.toDense() != x).any():
error('setRangeToZero 1')
a.setRangeToZero(0, 0, 0)
if (a.toDense() != x).any():
error('setRangeToZero 2')
a.setRangeToZero(0, n, n)
if (a.toDense() != x).any():
error('setRangeToZero 3')
a.setRangeToZero(0, 3, 3)
if (a.toDense() != x).any():
error('setRangeToZero 4')
a.setRangeToZero(0, 0, n)
x[0] = 0
if (a.toDense() != x).any():
error('setRangeToZero 5')
def testSetRangeToOne(self):
m = _RGEN.randint(1,10)
n = _RGEN.randint(5,10)
x = _RGEN.randint(0,2,(m,n))
x[m/2] = numpy.zeros((n))
a = self.Matrix.__class__(n)
a.fromDense(x)
for i in range(m):
begin = _RGEN.randint(0,n)
end = _RGEN.randint(begin, n+1)
a.setRangeToOne(i, begin, end)
x[i][begin:end] = 1
if (a.toDense() != x).any():
error('setRangeToOne 1')
a.setRangeToOne(0, 0, 0)
if (a.toDense() != x).any():
error('setRangeToOne 2')
a.setRangeToOne(0, n, n)
if (a.toDense() != x).any():
error('setRangeToOne 3')
a.setRangeToOne(0, 3, 3)
if (a.toDense() != x).any():
error('setRangeToOne 4')
a.setRangeToOne(0, 0, n)
x[0] = 1
if (a.toDense() != x).any():
error('setRangeToOne 5')
def testTranspose(self):
for k in range(10):
m = _RGEN.randint(4,10)
n = _RGEN.randint(5,10)
a = self.Matrix.__class__(n)
x = _RGEN.randint(0,2,(m,n))
x[m/2] = 0
x[:n/2] = 0
for i in range(m):
a.appendDenseRow(x[i])
a.transpose()
if (a.toDense() != numpy.transpose(x)).any():
error('numpy.transpose')
if (numpy.array(a.nNonZerosPerRow()) != x.sum(axis=0)).any():
error('numpy.transpose nNonZerosPerRow')
if a.nNonZeros() != len(numpy.where(x > 0)[0]):
error('numpy.transpose nNonZeros')
a.transpose()
if (a.toDense() != x).any():
error('numpy.transpose 2')
if (numpy.array(a.nNonZerosPerRow()) != x.sum(axis=1)).any():
error('numpy.transpose nNonZerosPerRow 2')
if a.nNonZeros() != len(numpy.where(x > 0)[0]):
error('numpy.transpose nNonZeros 2')
def testCSR(self):
m = _RGEN.randint(10,20)
n = _RGEN.randint(10,20)
x = _RGEN.randint(0,2,(m,n))
x[m/2] = 0
x[:,n/2] = 0
a = self.Matrix.__class__(x)
csr = a.toCSR()
b = self.Matrix.__class__(1)
b.fromCSR(csr)
if (a.toDense() != b.toDense()).any():
error('toCSR/fromCSR')
if (numpy.array(a.nNonZerosPerRow()) != numpy.array(b.nNonZerosPerRow())).any():
error('toCSR/fromCSR nNonZerosPerRow')
if b.nNonZeros() != len(numpy.where(x > 0)[0]):
error('toCSR/fromCSR nNonZeros')
def testGetstateSetstate(self):
m = _RGEN.randint(10,20)
n = _RGEN.randint(10,20)
x = _RGEN.randint(0,2,(m,n))
x[m/2] = 0
x[:,n/2] = 0
a = self.Matrix.__class__(x)
s = a.__getstate__()
b = self.Matrix.__class__(1)
b.__setstate__(s)
if a != b:
error('__geststate__/__setstate__')
def testCSRToFromFile(self):
m = _RGEN.randint(10,20)
n = _RGEN.randint(10,20)
x = _RGEN.randint(0,2,(m,n))
x[m/2] = 0
x[:,n/2] = 0
a = self.Matrix.__class__(x)
a.CSRSaveToFile('test_csr2.txt')
b = self.Matrix.__class__(1)
b.CSRLoadFromFile('test_csr2.txt')
if a != b:
error('CSRSaveToFile/CSRLoadFromFile')
os.unlink('test_csr2.txt')
def testCSRSize(self):
for k in range(5):
m = _RGEN.randint(10,100)
n = _RGEN.randint(10,100)
x = _RGEN.randint(0,100,(m,n))
x[m/2] = 0
a = self.Matrix.__class__(x)
for i in range(10):
s_estimated = a.CSRSize()
a.CSRSaveToFile('test_csr.txt')
s_real = os.path.getsize('test_csr.txt')
if s_estimated != s_real:
error('CSRSize')
for j in range(1000):
a.set(_RGEN.randint(0,m),_RGEN.randint(0,n), 0)
os.unlink('test_csr.txt')
def testBinary(self):
m = _RGEN.randint(10,20)
n = _RGEN.randint(10,20)
x = _RGEN.randint(0,2,(m,n))
x[m/2] = 0
x[:,n/2] = 0
a = self.Matrix.__class__(x)
a.binarySaveToFile('test_binary.bin')
b = self.Matrix.__class__(1)
b.binaryLoadFromFile('test_binary.bin')
if a != b:
error('binarySaveToFile/binaryLoadFromFile')
os.unlink('test_binary.bin')
def testToFromSparseVector(self):
m = _RGEN.randint(1,10)
n = _RGEN.randint(5,10)
a = self.Matrix.__class__(1)
x = _RGEN.randint(0,2,(m,n))
x[m/2] = numpy.zeros((n))
x = x.reshape((m*n))
indices = numpy.where(x > 0)[0].tolist()
a.fromSparseVector(m, n, indices)
x = x.reshape((m,n))
if (a.toDense() != x).any():
error('fromSparseVector')
if (numpy.array(a.nNonZerosPerRow()) != x.sum(axis=1)).any():
error('fromSparseVector nNonZerosPerRow')
if a.nNonZeros() != len(numpy.where(x > 0)[0]):
error('fromSparseVector nNonZeros')
x = x.reshape(m*n)
y = a.toSparseVector()
if (y != numpy.where(x > 0)[0].tolist()).any():
error('toSparseVector')
if a.nNonZeros() != len(numpy.where(x > 0)[0]):
error('toSparseVector nNonZeros 2')
# Need to make the same matrix can go through
# fromSparseVector again with a different x
x = _RGEN.randint(0,2,(n,m))
x = x.reshape((m*n))
indices = numpy.where(x > 0)[0].tolist()
a.fromSparseVector(n, m, indices)
x = x.reshape((n, m))
if (a.toDense() != x).any():
error('fromSparseVector 2')
if (numpy.array(a.nNonZerosPerRow()) != x.sum(axis=1)).any():
error('fromSparseVector nNonZerosPerRow 2')
if a.nNonZeros() != len(numpy.where(x > 0)[0]):
error('fromSparseVector nNonZeros 2')
x = x.reshape(m*n)
y = a.toSparseVector()
if (y != numpy.where(x > 0)[0].tolist()).any():
error('toSparseVector 2')
if a.nNonZeros() != len(numpy.where(x > 0)[0]):
error('toSparseVector nNonZeros 2')
def testToFromDense(self):
m = _RGEN.randint(1,10)
n = _RGEN.randint(5,10)
x = _RGEN.randint(0,2,(m,n))
x[m/2] = numpy.zeros((n))
a = self.Matrix.__class__(1)
a.fromDense(x)
if (a.toDense() != x).any():
error('fromDense')
if (numpy.array(a.nNonZerosPerRow()) != x.sum(axis=1)).any():
error('fromDense nNonZerosPerRow')
if a.nNonZeros() != len(numpy.where(x > 0)[0]):
error('fromDense nNonZeros')
# Need to make sure the same matrix can go
# through another fromDense with a different x
x = _RGEN.randint(0,2,(n,m))
a.fromDense(x)
if (a.toDense() != x).any():
error('fromDense 2')
if (numpy.array(a.nNonZerosPerRow()) != x.sum(axis=1)).any():
error('fromDense nNonZerosPerRow 2')
if a.nNonZeros() != len(numpy.where(x > 0)[0]):
error('fromDense nNonZeros 2')
def testRowToFromDense(self):
m = _RGEN.randint(1,10)
n = _RGEN.randint(5,10)
x = _RGEN.randint(0,2,(m,n))
x[m/2] = numpy.zeros((n))
b = numpy.zeros((m,n))
a = self.Matrix.__class__(n)
for i in range(m):
a.appendDenseRow(numpy.zeros((n)))
a.rowFromDense(i, x[i])
if (a.toDense() != x).any():
error('rowFromDense')
for i in range(m):
b[i] = a.rowToDense(i)
if (b != x).any():
error('rowToDense')
def testLogicalNot(self):
m = _RGEN.randint(1,10)
n = _RGEN.randint(5,10)
a = self.Matrix.__class__(n)
x = _RGEN.randint(0,2,(m,n))
x[m/2] = numpy.zeros((n))
for i in range(m):
a.appendSparseRow(numpy.where(x[i] > 0)[0].tolist())
a.logicalNot()
y = 1 - x
if (a.toDense() != y).any():
error('logicalNot')
def testLogicalOr(self):
show = False
a = self.Matrix.__class__(1)
a.fromDense([[0,0,1,1,1,0,0],
[0,1,0,0,0,1,0],
[0,1,0,0,0,1,0],
[0,0,1,1,1,0,0]])
b = self.Matrix.__class__(1)
b.fromDense([[0,0,0,0,0,0,0],
[0,0,1,1,1,0,0],
[0,0,1,1,1,0,0],
[0,0,0,0,0,0,0]])
a.logicalOr(b)
if show: print a
a.logicalOr(a)
if show: print a
a = self.Matrix.__class__(1)
a.fromDense([[0,0,1,1,1,0,0],
[0,1,0,0,0,1,0],
[0,1,0,0,0,1,0],
[0,0,1,1,1,0,0]])
b = self.Matrix.__class__(1)
b.fromDense([[0,0,0,0,0,0,0],
[0,0,1,1,1,0,0],
[0,0,1,1,1,0,0],
[0,0,0,0,0,0,0]])
b.logicalNot()
if show: print b
b.logicalOr(a)
if show: print b
a = self.Matrix.__class__([[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
,[1,1,1,1,1,0,0,0,0,0,0,0,0,0,1]
,[1,1,1,1,0,0,0,0,0,0,0,0,0,0,1]
,[1,0,0,0,0,0,0,0,0,0,0,0,0,0,1]
,[1,0,0,0,0,0,0,0,0,0,0,0,0,0,1]
,[1,0,0,0,0,0,0,0,0,0,0,0,0,0,1]
,[1,0,0,0,0,0,0,0,0,0,0,0,0,0,1]
,[1,0,0,0,0,0,0,0,0,0,0,0,0,0,1]
,[1,0,0,0,0,0,0,0,0,0,0,0,0,0,1]
,[1,0,0,0,0,0,0,1,1,1,1,1,0,0,1]
,[1,0,0,0,1,1,1,1,1,1,1,1,0,0,1]
,[1,0,0,0,0,1,1,1,1,1,1,0,0,0,1]
,[1,0,0,0,0,0,0,1,1,1,0,0,0,0,1]
,[1,0,0,0,0,0,0,1,1,1,0,0,0,0,1]
,[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]])
a.logicalNot()
if show: print a
b = self.Matrix.__class__(a)
b.inside()
if show: print b
a.logicalOr(b)
if show: print a
def testLogicalAnd(self):
show = False
a = self.Matrix.__class__(1)
a.fromDense([[0,0,1,1,1,0,0],
[0,1,0,0,0,1,0],
[0,1,0,0,0,1,0],
[0,0,1,1,1,0,0]])
b = self.Matrix.__class__(1)
b.fromDense([[0,0,0,0,0,0,0],
[0,0,1,1,1,0,0],
[0,0,1,1,1,0,0],
[0,0,0,0,0,0,0]])
a.logicalAnd(b)
if show: print a
a.logicalAnd(a)
if show: print a
a = self.Matrix.__class__(1)
a.fromDense([[0,0,1,1,1,0,0],
[0,1,0,0,0,1,0],
[0,1,0,0,0,1,0],
[0,0,1,1,1,0,0]])
b = self.Matrix.__class__(1)
b.fromDense([[0,0,0,0,0,0,0],
[0,0,1,1,1,0,0],
[0,0,1,1,1,0,0],
[0,0,0,0,0,0,0]])
b.logicalNot()
if show: print b
b.logicalAnd(a)
if show: print b
def testOverlap(self):
x = [[0,1,1,0,0,1],
[1,1,1,1,1,1],
[0,0,0,0,0,0],
[1,0,1,0,1,0],
[1,1,1,0,0,0],
[0,0,0,1,1,1],
[1,1,0,0,1,1]]
ans = [[3,3,0,1,2,1,2],
[3,6,0,3,3,3,4],
[0,0,0,0,0,0,0],
[1,3,0,3,2,1,2],
[2,3,0,2,3,0,2],
[1,3,0,1,0,3,2],
[2,4,0,2,2,2,4]]
a = self.Matrix.__class__(1)
a.fromDense(x)
for xv,yv in zip(x,ans):
y = a.overlap(xv)
if (y != yv).any():
error('overlap')
def testMaxAllowedOverlap(self):
for i in range(10):
m = _RGEN.randint(5,10)
maxDistance = .5
n = _RGEN.randint(10,20)
x = _RGEN.randint(0,2,(m,n))
a = self.Matrix.__class__(1)
a.fromDense(x)
for i in range(10):
coinc = _RGEN.randint(0,2,(n))
overlaps = a.overlap(coinc)
longSums = numpy.maximum(a.rowSums(), coinc.sum())
maxAllowedOverlaps = (1.0 - maxDistance) * longSums
py_accepted = True
if (overlaps > maxAllowedOverlaps).any():
py_accepted = False
if a.maxAllowedOverlap(maxDistance, coinc) != py_accepted:
error('maxAllowedOverlap')
def testSubtract(self):
a = numpy.array([[0,1,0],
[1,0,1],
[0,1,0]])
b = numpy.array([[1,1,1],
[1,0,1],
[1,1,1]])
c = b - a
a = self.Matrix.__class__(a)
b = self.Matrix.__class__(b)
a.logicalNot()
b.logicalAnd(a)
if (c != b.toDense()).any():
error('subtract')
def testInsideAndEdges(self):
show = False
def printSideBySide(before, after):
for i in range(before.nRows()):
line = ''
for j in range(before.nCols()):
line += '#' if before.get(i,j) == 1 else '.'
line += ' -> '
for j in range(before.nCols()):
line += '#' if after.get(i,j) == 1 else '.'
print line
print
def sideBySide(a, edges=False):
a = self.Matrix.__class__(a)
orig = self.Matrix.__class__(a)
if edges:
a.edges(2)
else:
a.inside()
if show:
printSideBySide(orig, a)
for edges in [False, True]:
sideBySide([[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0]], edges)
sideBySide([[1,1,1,1,1,1],
[1,0,0,0,0,1],
[1,1,1,1,1,1]], edges)
sideBySide([[1,1,1,1,1,1],
[1,1,0,0,0,1],
[1,1,1,1,1,1]], edges)
sideBySide([[1,1,1,1,1,1],
[1,1,0,0,1,1],
[1,1,1,1,1,1]], edges)
sideBySide([[1,1,1,1,1,1],
[1,1,1,0,0,1],
[1,1,1,1,1,1]], edges)
sideBySide([[1,1,1,1,1,1],
[1,1,1,0,1,1],
[1,1,1,1,1,1]], edges)
sideBySide([[1,1,1,1,1,1],
[1,1,1,1,1,1],
[1,1,1,1,1,1]], edges)
sideBySide([[0,0,0,0,0,0,0,0],
[0,1,1,1,1,1,1,0],
[0,1,0,0,0,0,1,0],
[0,1,1,1,1,1,1,0],
[0,0,0,0,0,0,0,0]], edges)
sideBySide([[0,0,0,0,0,0,0,0],
[0,1,1,1,1,1,1,0],
[0,1,0,0,0,0,1,0],
[0,1,0,0,0,0,1,0],
[0,1,1,1,1,1,1,0],
[0,0,0,0,0,0,0,0]], edges)
sideBySide([[0,0,0,0,0,0,0,0],
[0,1,1,1,1,1,1,0],
[0,1,1,0,0,0,1,0],
[0,1,1,0,0,0,1,0],
[0,1,1,1,1,1,1,0],
[0,0,0,0,0,0,0,0]], edges)
sideBySide([[0,0,0,0,0,0,0,0],
[0,1,1,1,1,1,1,0],
[0,1,1,1,0,0,1,0],
[0,1,1,1,0,0,1,0],
[0,1,1,1,1,1,1,0],
[0,0,0,0,0,0,0,0]], edges)
sideBySide([[1,1,1,1,1,1,0],
[1,1,1,1,0,0,1],
[1,1,0,0,0,0,1],
[0,1,1,1,1,1,0]], edges)
sideBySide([[0,0,1,1,1,0,0],
[0,1,0,0,0,1,0],
[0,1,0,0,0,1,0],
[0,0,1,1,1,0,0]], edges)
sideBySide([[0,0,1,1,1,0,0],
[0,1,0,1,0,1,0],
[0,1,0,1,0,1,0],
[0,0,1,1,1,0,0]], edges)
sideBySide([[0,0,1,1,1,0,0],
[0,1,0,0,0,0,0],
[0,1,0,0,0,0,0],
[0,0,1,1,1,0,0]], edges)
sideBySide([[0,0,1,1,1,1,0],
[0,1,0,1,0,0,0],
[0,1,0,1,0,0,0],
[0,0,1,1,1,1,0]], edges)
sideBySide([[1,1,1,1,1,1,0],
[1,1,0,0,0,1,1],
[1,0,0,0,0,0,1],
[0,1,0,0,1,1,0]], edges)
sideBySide([[1,1,1,1,1,1,0],
[1,1,1,0,0,1,1],
[1,1,0,0,0,0,1],
[0,1,1,1,1,1,0]], edges)
sideBySide([[ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[ 0,0,0,1,1,1,1,1,1,1,1,1,0,0,0],
[ 0,0,0,1,1,1,1,1,1,1,1,1,1,0,0],
[ 0,0,0,1,1,1,1,1,1,1,1,1,1,0,0],
[ 0,0,0,1,1,1,1,1,1,1,1,1,1,0,0],
[ 0,0,0,0,1,1,1,1,1,1,1,1,1,0,0],
[ 0,0,0,0,1,1,1,1,1,1,1,1,1,0,0],
[ 0,0,0,0,1,1,1,1,1,1,1,1,1,0,0],
[ 0,0,0,1,1,1,1,1,1,1,1,1,0,0,0],
[ 0,0,0,1,1,1,1,1,1,1,1,1,0,0,0],
[ 0,0,0,1,1,1,1,1,1,1,1,1,0,0,0],
[ 0,0,0,1,1,1,1,1,1,1,1,1,0,0,0],
[ 0,0,0,1,1,1,1,1,1,1,1,1,0,0,0],
[ 0,0,0,1,1,1,1,1,1,1,1,1,0,0,0],
[ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]], edges)
sideBySide([[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
,[1,1,1,1,1,0,0,0,0,0,0,0,0,0,1]
,[1,1,1,1,0,0,0,0,0,0,0,0,0,0,1]
,[1,0,0,0,0,0,0,0,0,0,0,0,0,0,1]
,[1,0,0,0,0,0,0,0,0,0,0,0,0,0,1]
,[1,0,0,0,0,0,0,0,0,0,0,0,0,0,1]
,[1,0,0,0,0,0,0,0,0,0,0,0,0,0,1]
,[1,0,0,0,0,0,0,0,0,0,0,0,0,0,1]
,[1,0,0,0,0,0,0,0,0,0,0,0,0,0,1]
,[1,0,0,0,0,0,0,1,1,1,1,1,0,0,1]
,[1,0,0,0,1,1,1,1,1,1,1,1,0,0,1]
,[1,0,0,0,0,1,1,1,1,1,1,0,0,0,1]
,[1,0,0,0,0,0,0,1,1,1,0,0,0,0,1]
,[1,0,0,0,0,0,0,1,1,1,0,0,0,0,1]
,[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]], edges)
a = self.Matrix.__class__([[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
,[1,1,1,1,1,0,0,0,0,0,0,0,0,0,1]
,[1,1,1,1,0,0,0,0,0,0,0,0,0,0,1]
,[1,0,0,0,0,0,0,0,0,0,0,0,0,0,1]
,[1,0,0,0,0,0,0,0,0,0,0,0,0,0,1]
,[1,0,0,0,0,0,0,0,0,0,0,0,0,0,1]
,[1,0,0,0,0,0,0,0,0,0,0,0,0,0,1]
,[1,0,0,0,0,0,0,0,0,0,0,0,0,0,1]
,[1,0,0,0,0,0,0,0,0,0,0,0,0,0,1]
,[1,0,0,0,0,0,0,1,1,1,1,1,0,0,1]
,[1,0,0,0,1,1,1,1,1,1,1,1,0,0,1]
,[1,0,0,0,0,1,1,1,1,1,1,0,0,0,1]
,[1,0,0,0,0,0,0,1,1,1,0,0,0,0,1]
,[1,0,0,0,0,0,0,1,1,1,0,0,0,0,1]
,[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]])
a.logicalNot()
sideBySide(a, edges)
sideBySide([[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
,[1,1,1,1,1,0,0,0,0,0,0,0,0,0,1]
,[1,1,1,1,0,0,0,0,0,0,0,0,0,0,1]
,[1,0,0,0,0,0,0,0,1,1,1,0,0,0,1]
,[1,0,0,0,0,0,1,1,1,0,1,0,0,0,1]
,[1,0,0,0,0,0,1,0,0,0,0,0,0,0,1]
,[1,0,0,0,0,0,1,1,1,0,0,0,0,0,1]
,[1,0,0,0,0,0,0,0,0,0,0,0,0,0,1]
,[1,0,0,0,0,0,0,0,0,0,0,0,0,0,1]
,[1,0,0,0,0,0,0,1,1,1,1,1,0,0,1]
,[1,0,0,0,1,1,1,1,1,1,1,1,0,0,1]
,[1,0,0,0,0,1,1,1,1,1,1,0,0,0,1]
,[1,0,0,0,0,0,0,1,1,1,0,0,0,0,1]
,[1,0,0,0,0,0,0,1,1,1,0,0,0,0,1]
,[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]], edges)
sideBySide([[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[1,1,1,1,1,0,0,0,0,0,0,0,1,1,1],
[1,1,1,1,0,0,0,0,0,0,0,0,0,0,1],
[1,1,1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,1,0,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[1,1,0,0,0,0,0,0,0,0,0,0,0,1,1],
[1,1,1,1,1,1,0,0,0,0,0,0,1,1,1],
[1,1,1,1,1,1,0,0,0,0,0,0,1,1,1],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]], edges)
a = self.Matrix.__class__([[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[1,1,1,1,1,0,0,0,0,0,0,0,1,1,1],
[1,1,1,1,0,0,0,0,0,0,0,0,0,0,1],
[1,1,1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,1,0,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[1,1,0,0,0,0,0,0,0,0,0,0,0,1,1],
[1,1,1,1,1,1,0,0,0,0,0,0,1,1,1],
[1,1,1,1,1,1,0,0,0,0,0,0,1,1,1],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]])
a.logicalNot()
sideBySide(a, edges)
def testRightVecSumAtNZ(self):
# Regular matrix vector product, on the right side, all the values in the
# matrix being 1. The fast version doesn't allocate memory for the result
# and uses a pre-allocated buffer instead.
for i in range(10):
m = _RGEN.randint(1,10)
n = _RGEN.randint(5,10)
mat = _RGEN.randint(0,2,(m,n))
mat[m/2] = numpy.zeros((n))
a = self.Matrix.__class__(1)
a.fromDense(mat)
x = _RGEN.lognormal(size=n).astype(numpy.float32)
y = a.rightVecSumAtNZ(x)
answer = numpy.dot(mat, x)
if (max(y - answer) > 1e-5).any():
error('rightVecSumAtNZ')
y2 = numpy.zeros((m)).astype(numpy.float32)
a.rightVecSumAtNZ_fast(x, y2)
if (y != y2).any():
error('rightVecSumAtNZ_fast')
def testRightVecArgMaxAtNZ(self):
for k in range(10):
m = _RGEN.randint(1,10)
n = _RGEN.randint(5,10)
mat = _RGEN.randint(0,2,(m,n))
mat[m/2] = numpy.zeros((n))
a = self.Matrix.__class__(1)
a.fromDense(mat)
x = _RGEN.lognormal(size=n).astype(numpy.float32)
y = a.rightVecArgMaxAtNZ(x)
answer = numpy.zeros(m)
for i in xrange(m):
a = 0
for j in xrange(n):
if mat[i,j] > 0:
if x[j] > a:
a = x[j]
answer[i] = j
if (y != answer).any():
error('rightVecArgMaxAtNZ')
def testLeftVecSumAtNZ(self):
# Regular vector matrix product, on the left side, with all the values in the
# matrix being 1. The fast version doesn't allocate memory for the result
# and uses a pre-allocated buffer instead.
for i in range(10):
m = _RGEN.randint(1,10)
n = _RGEN.randint(5,10)
mat = _RGEN.randint(0,2,(m,n))
mat[m/2] = numpy.zeros((n))
a = self.Matrix.__class__(1)
a.fromDense(mat)
x = _RGEN.lognormal(size=m).astype(numpy.float32)
y = a.leftVecSumAtNZ(x)
answer = numpy.dot(x, mat)
if (max(y - answer) > 1e-5).any():
error('leftVecSumAtNZ')
y2 = numpy.zeros((n)).astype(numpy.float32)
a.leftVecSumAtNZ_fast(x, y2)
if (y != y2).any():
error('rightVecSumAtNZ_fast')
def testCompact(self):
m = _RGEN.randint(1,100)
n = _RGEN.randint(5,100)
mat = _RGEN.randint(0,2,(m,n))
mat[m/2] = numpy.zeros((n))
a = self.Matrix.__class__(1)
a.fromDense(mat)
needed = a.nNonZeros()
a.compact()
if a.capacity() != needed:
error('compact')
def testPickling(self):
m = _RGEN.randint(1,100)
n = _RGEN.randint(5,100)
mat = _RGEN.randint(0,2,(m,n))
mat[m/2] = numpy.zeros((n))
a = self.Matrix.__class__(1)
a.fromDense(mat)
cPickle.dump(a, open('test.bin', 'wb'))
b = cPickle.load(open('test.bin'))
if (a.toDense() != b.toDense()).any():
error('pickling')
os.unlink('test.bin')
def testMinHammingDistance(self):
m = _RGEN.randint(5,10)
n = _RGEN.randint(5,10)
mat = _RGEN.randint(0,2,(m,n))
mat[m/2] = numpy.zeros((n))
a = self.Matrix.__class__(mat)
for i in range(10):
x = _RGEN.randint(0,2,(n))
sparse_x = []
for i in range(n):
if x[i] == 1:
sparse_x.append(i)
min_row, min_d = 0, 9999
for i in range(m):
d = 0
for j in range(n):
if (x[j] == 1 and mat[i,j] == 0) \
or (x[j] == 0 and mat[i,j] == 1):
d += 1
if d < min_d:
min_d = d
min_row = i
r = a.minHammingDistance(sparse_x)
if r[0] != min_row or r[1] != min_d:
error('minHammingDistance')
def testFirstRowCloserThan(self):
m = _RGEN.randint(5,10)
n = _RGEN.randint(5,10)
mat = _RGEN.randint(0,2,(m,n))
mat[m/2] = numpy.zeros((n))
a = self.Matrix.__class__(mat)
for i in range(10):
x = _RGEN.randint(0,2,(n))
sparse_x = []
for i in range(n):
if x[i] == 1:
sparse_x.append(i)
min_row = m
for i in range(m):
d = 0
for j in range(n):
if (x[j] == 1 and mat[i,j] == 0) \
or (x[j] == 0 and mat[i,j] == 1):
d += 1
if d < 4:
min_row = i
break
r = a.firstRowCloserThan(sparse_x, 4)
if r != min_row:
error('firstRowCloserThan')
def testVecMaxProd(self):
m = _RGEN.randint(5,10)
n = _RGEN.randint(5,10)
mat = _RGEN.randint(0,2,(m,n))
mat[m/2] = numpy.zeros((n))
a = self.Matrix.__class__(mat)
for i in range(10):
x = _RGEN.lognormal(1,2,(n))
y = a.vecMaxProd(x)
truth = numpy.zeros((m))
for j in range(m):
max_v = 0
for k in range(n):
if mat[j,k] > 0 and x[k] > max_v:
max_v = x[k]
truth[j] = max_v
if max(y - truth) > 1e-4:
error('vecMaxProd')
def testLeftDenseMatSumAtNZ(self):
for i in range(10):
a = _RGEN.randint(0,2,(12,13))
m = self.Matrix.__class__(a)
b = _RGEN.randint(0,10,(11,12))
c = m.leftDenseMatSumAtNZ(b)
d = numpy.dot(b,a)
if (c != d).any():
print m
print a
print c
print d
error('leftDenseMatSumAtNZ')
def testLeftDenseMatMaxAtNZ(self):
for i in range(10):
a = _RGEN.randint(0,2,(6,4))
b = _RGEN.randint(0,10,(5,6))
c = numpy.zeros((b.shape[0],a.shape[1])).astype(numpy.int32)
for rowIdx in range(b.shape[0]):
for colIdx in range(a.shape[1]):
elements = (b[rowIdx] * a[:,colIdx])[a[:,colIdx] > 0]
if len(elements) > 0:
c[rowIdx,colIdx] = elements.max()
d = self.Matrix.__class__(a).leftDenseMatMaxAtNZ(b).astype(numpy.int32)
if (c != d).any():
error('leftDenseMatMaxAtNZ')
def testZeroRowsIndicator(self):
for i in range(10):
m = _RGEN.randint(10, 20)
n = _RGEN.randint(10, 20)
a = _RGEN.randint(0,100,(m,n))
a[numpy.where(a < 80)] = 0
if _RGEN.randint(0,100) > 50:
a[_RGEN.randint(0,m)] = 0
elif _RGEN.randint(0,100) > 50:
for k in range(m):
a[k,0] = 1
b = self.Matrix.__class__(a)
ans_v = a.sum(axis=1) == 0
ans_c = ans_v.sum()
c,v = b.zeroRowsIndicator()
if c != ans_c or (ans_v != v).any():
error('zeroRowsIndicator 1')
c2,v2 = b.nonZeroRowsIndicator()
if c + c2 != m:
error('zeroRowsIndicator 2')
for j in range(m):
if v[j] == v2[j]:
error('zeroRowsIndicator 3')
def testNonZeroRowsIndicator(self):
for i in range(10):
m = _RGEN.randint(10, 20)
n = _RGEN.randint(10, 20)
a = _RGEN.randint(0,100,(m,n))
a[numpy.where(a < 80)] = 0
if _RGEN.randint(0,100) > 50:
a[_RGEN.randint(0,m)] = 0
elif _RGEN.randint(0,100) > 50:
for k in range(m):
a[k,0] = 1
b = self.Matrix.__class__(a)
ans_v = a.sum(axis=1) != 0
ans_c = ans_v.sum()
c,v = b.nonZeroRowsIndicator()
if c != ans_c or (ans_v != v).any():
error('nonZeroRowsIndicator 1')
c2,v2 = b.zeroRowsIndicator()
if c + c2 != m:
error('nonZeroRowsIndicator 2')
for j in range(m):
if v[j] == v2[j]:
error('nonZeroRowsIndicator 3')
def testAppendSparseCol(self):
m = _RGEN.randint(10,20)
n = _RGEN.randint(10,20)
x = _RGEN.randint(0,2,(m,n))
a = self.Matrix.__class__(x)
a.appendEmptyCols(3)
if a.nRows() != m or a.nCols() != n + 3:
error('appendEmptyCols 1')
x = _RGEN.permutation(m)[:m/2].astype('int32')
a.appendSparseCol(x)
if a.nRows() != m or a.nCols() != n + 4:
error('appendSparseCol 1')
@unittest.skip("Not currently using...")
def testScalability(self):
# Make sure we can create a long matrix
a = self.Matrix.__class__(2)
for i in range(200000):
a.appendDenseRow([1,1])
a.CSRSaveToFile('test.txt')
b = self.Matrix.__class__(1)
b.CSRLoadFromFile('test.txt')
if (a.toDense() != b.toDense()).any():
error('scalability 1')
print 'Preparing'
n = 10000
a = self.Matrix.__class__(n)
mat = _RGEN.randint(0,100,(20000,n))
x = []
for row in mat:
x += [numpy.where(row > 90)[0]]
print 'Evaluating'
for i in range(len(x)):
if i % 100 == 0:
print i
if a.findRowSparse(x[i]) == a.nRows():
a.appendSparseRow(x[i])
if __name__ == "__main__":
unittest.main()
| agpl-3.0 |
piotr-dobrogost/sqlalchemy-continuum | tests/schema/test_update_property_mod_flags.py | 5 | 2946 | from copy import copy
import sqlalchemy as sa
from sqlalchemy_continuum import version_class
from sqlalchemy_continuum.plugins import PropertyModTrackerPlugin
from sqlalchemy_continuum.schema import update_property_mod_flags
from tests import TestCase
class TestSchemaTools(TestCase):
versioning_strategy = 'validity'
plugins = [PropertyModTrackerPlugin()]
def create_models(self):
class Article(self.Model):
__tablename__ = 'article'
__versioned__ = copy(self.options)
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
name = sa.Column(sa.Unicode(255), nullable=False)
self.Article = Article
def _insert(self, values):
table = version_class(self.Article).__table__
stmt = table.insert().values(values)
self.session.execute(stmt)
def test_something(self):
table = version_class(self.Article).__table__
self._insert(
{
'id': 1,
'transaction_id': 1,
'end_transaction_id': 2,
'name': u'Article 1',
'name_mod': False,
'operation_type': 1,
}
)
self._insert(
{
'id': 1,
'transaction_id': 2,
'end_transaction_id': 4,
'name': u'Article 1',
'name_mod': False,
'operation_type': 2,
}
)
self._insert(
{
'id': 2,
'transaction_id': 3,
'end_transaction_id': 5,
'name': u'Article 2',
'name_mod': False,
'operation_type': 1,
}
)
self._insert(
{
'id': 1,
'transaction_id': 4,
'end_transaction_id': None,
'name': u'Article 1 updated',
'name_mod': False,
'operation_type': 2,
}
)
self._insert(
{
'id': 2,
'transaction_id': 5,
'end_transaction_id': None,
'name': u'Article 2',
'name_mod': False,
'operation_type': 2,
}
)
update_property_mod_flags(
table,
['name'],
conn=self.session
)
rows = self.session.execute(
'SELECT * FROM article_version ORDER BY transaction_id'
).fetchall()
assert rows[0].transaction_id == 1
assert rows[0].name_mod
assert rows[1].transaction_id == 2
assert not rows[1].name_mod
assert rows[2].transaction_id == 3
assert rows[2].name_mod
assert rows[3].transaction_id == 4
assert rows[3].name_mod
assert rows[4].transaction_id == 5
assert not rows[4].name_mod
| bsd-3-clause |
rahul67/hue | desktop/core/ext-py/requests-2.6.0/requests/packages/chardet/big5prober.py | 2931 | 1684 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import Big5DistributionAnalysis
from .mbcssm import Big5SMModel
class Big5Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(Big5SMModel)
self._mDistributionAnalyzer = Big5DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "Big5"
| apache-2.0 |
EDUlib/edx-platform | openedx/core/djangolib/fields.py | 4 | 1479 | """
Custom Django fields.
"""
from django.db import models
class CharNullField(models.CharField):
"""
CharField that stores NULL but returns ''
"""
description = "CharField that stores NULL but returns ''"
def to_python(self, value):
"""Converts the value into the correct Python object."""
if isinstance(value, models.CharField):
return value
if value is None:
return ""
else:
return value
def get_db_prep_value(self, value, connection, prepared=False):
"""Converts value to a backend-specific value."""
if not prepared:
value = self.get_prep_value(value)
if value == "":
return None
else:
return value
class BigAutoField(models.AutoField):
"""
AutoField that uses BigIntegers.
This exists in Django as of version 1.10.
"""
def db_type(self, connection):
"""
The type of the field to insert into the database.
"""
conn_module = type(connection).__module__
if "mysql" in conn_module:
return "bigint AUTO_INCREMENT"
elif "postgres" in conn_module:
return "bigserial"
else:
return super().db_type(connection)
def rel_db_type(self, connection):
"""
The type to be used by relations pointing to this field.
Not used until Django 1.10.
"""
return "bigint"
| agpl-3.0 |
aslab/rct | higgs/branches/ros-fuerte/OMRosDrivers_py/src/meta_sensor_node.py | 1 | 1309 | #!/usr/bin/env python
'''
@author: Carlos Hernandez
@organization: ASLab
@summary: module that realises perception up to the sensory processing, on rosnodes
@status: second version - basic functionality but working
'''
#import sys
#import os
#import xmlrpclib
import roslib; roslib.load_manifest('OMRosDrivers_py')
import rospy
import rosnode
import roslib.scriptutil as scriptutil
# for the sensing perception ------------------------------
# sensing is performed in a cycle with continuous sending of meta_sensing info
from sensing import OMRosSensor
#----------------------------------------------------------
#####################################################################
def main():
rospy.init_node('meta_sensor')
rospy.loginfo("sensing node started...")
#--For sensing --------------------------------------------------------
sensor = OMRosSensor()
# working loop --------------------------------------------------------
while not rospy.is_shutdown():
sensor.sense()
rospy.sleep(2)
# termination ---------------------------------------------------------
rospy.loginfo("...meta_sensor ended")
#####################################################################
if __name__ == '__main__':
main() | gpl-3.0 |
tcwicklund/django | django/conf/global_settings.py | 55 | 21393 | # Default Django settings. Override these with settings in the module
# pointed-to by the DJANGO_SETTINGS_MODULE environment variable.
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
gettext_noop = lambda s: s
####################
# CORE #
####################
DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing situations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# Whether to use the "Etag" header. This saves bandwidth but slows down performance.
USE_ETAGS = False
# People who get code error notifications.
# In the format [('Full Name', 'email@example.com'), ('Full Name', 'anotheremail@example.com')]
ADMINS = []
# List of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = []
# Hosts/domain names that are valid for this site.
# "*" matches anything, ".example.com" matches example.com and all subdomains
ALLOWED_HOSTS = []
# Local time zone for this installation. All choices can be found here:
# https://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities). When USE_TZ is True, this is
# interpreted as the default user time zone.
TIME_ZONE = 'America/Chicago'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = False
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box.
LANGUAGES = [
('af', gettext_noop('Afrikaans')),
('ar', gettext_noop('Arabic')),
('ast', gettext_noop('Asturian')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('be', gettext_noop('Belarusian')),
('bn', gettext_noop('Bengali')),
('br', gettext_noop('Breton')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-au', gettext_noop('Australian English')),
('en-gb', gettext_noop('British English')),
('eo', gettext_noop('Esperanto')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('es-mx', gettext_noop('Mexican Spanish')),
('es-ni', gettext_noop('Nicaraguan Spanish')),
('es-ve', gettext_noop('Venezuelan Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hu', gettext_noop('Hungarian')),
('ia', gettext_noop('Interlingua')),
('id', gettext_noop('Indonesian')),
('io', gettext_noop('Ido')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('kk', gettext_noop('Kazakh')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lb', gettext_noop('Luxembourgish')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('mr', gettext_noop('Marathi')),
('my', gettext_noop('Burmese')),
('nb', gettext_noop('Norwegian Bokmal')),
('ne', gettext_noop('Nepali')),
('nl', gettext_noop('Dutch')),
('nn', gettext_noop('Norwegian Nynorsk')),
('os', gettext_noop('Ossetic')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('sw', gettext_noop('Swahili')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('tt', gettext_noop('Tatar')),
('udm', gettext_noop('Udmurt')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('vi', gettext_noop('Vietnamese')),
('zh-hans', gettext_noop('Simplified Chinese')),
('zh-hant', gettext_noop('Traditional Chinese')),
]
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ["he", "ar", "fa", "ur"]
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = []
# Settings for language cookie
LANGUAGE_COOKIE_NAME = 'django_language'
LANGUAGE_COOKIE_AGE = None
LANGUAGE_COOKIE_DOMAIN = None
LANGUAGE_COOKIE_PATH = '/'
# If you set this to True, Django will format dates, numbers and calendars
# according to user current locale.
USE_L10N = False
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various emails.
MANAGERS = ADMINS
# Default content type and charset to use for all HttpResponse objects, if a
# MIME type isn't manually specified. These are used to construct the
# Content-Type header.
DEFAULT_CONTENT_TYPE = 'text/html'
DEFAULT_CHARSET = 'utf-8'
# Encoding of files read from disk (template and initial SQL files).
FILE_CHARSET = 'utf-8'
# Email address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Database connection info. If left empty, will default to the dummy backend.
DATABASES = {}
# Classes used to implement DB routing behavior.
DATABASE_ROUTERS = []
# The email backend to use. For possible shortcuts see django.core.mail.
# The default is to use the SMTP backend.
# Third-party backends can be specified by providing a Python path
# to a module that defines an EmailBackend class.
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Host for sending email.
EMAIL_HOST = 'localhost'
# Port for sending email.
EMAIL_PORT = 25
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
EMAIL_USE_SSL = False
EMAIL_SSL_CERTFILE = None
EMAIL_SSL_KEYFILE = None
EMAIL_TIMEOUT = None
# List of strings representing installed apps.
INSTALLED_APPS = []
TEMPLATES = []
# Default email address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = [
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search')
# ]
DISALLOWED_USER_AGENTS = []
ABSOLUTE_URL_OVERRIDES = {}
# List of compiled regular expression objects representing URLs that need not
# be reported by BrokenLinkEmailsMiddleware. Here are a few examples:
# import re
# IGNORABLE_404_URLS = [
# re.compile(r'^/apple-touch-icon.*\.png$'),
# re.compile(r'^/favicon.ico$),
# re.compile(r'^/robots.txt$),
# re.compile(r'^/phpmyadmin/),
# re.compile(r'\.(cgi|php|pl)$'),
# ]
IGNORABLE_404_URLS = []
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = None
# URL that handles the static files served from STATIC_ROOT.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = None
# List of upload handler classes to be applied in order.
FILE_UPLOAD_HANDLERS = [
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
]
# Maximum size, in bytes, of a request before it will be streamed to the
# file system instead of into memory.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Directory in which upload streamed files will be temporarily saved. A value of
# `None` will make Django use the operating system's default temporary directory
# (i.e. "/tmp" on *nix systems).
FILE_UPLOAD_TEMP_DIR = None
# The numeric mode to set newly-uploaded files to. The value should be a mode
# you'd pass directly to os.chmod; see http://docs.python.org/lib/os-file-dir.html.
FILE_UPLOAD_PERMISSIONS = None
# The numeric mode to assign to newly-created directories, when uploading files.
# The value should be a mode as you'd pass to os.chmod;
# see http://docs.python.org/lib/os-file-dir.html.
FILE_UPLOAD_DIRECTORY_PERMISSIONS = None
# Python module path where user will place custom format definition.
# The directory where this setting is pointing should contain subdirectories
# named as the locales, containing a formats.py file
# (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use)
FORMAT_MODULE_PATH = None
# Default formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
TIME_FORMAT = 'P'
# Default formatting for date objects when only the year and month are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
YEAR_MONTH_FORMAT = 'F Y'
# Default formatting for date objects when only the month and day are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
MONTH_DAY_FORMAT = 'F j'
# Default short formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATE_FORMAT = 'm/d/Y'
# Default short formatting for datetime objects.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATETIME_FORMAT = 'm/d/Y P'
# Default formats to be used when parsing dates from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
'%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
'%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
'%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
'%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
]
# Default formats to be used when parsing times from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
TIME_INPUT_FORMATS = [
'%H:%M:%S', # '14:30:59'
'%H:%M:%S.%f', # '14:30:59.000200'
'%H:%M', # '14:30'
]
# Default formats to be used when parsing dates and times from input boxes,
# in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
]
# First day of week, to be used on calendars
# 0 means Sunday, 1 means Monday...
FIRST_DAY_OF_WEEK = 0
# Decimal separator symbol
DECIMAL_SEPARATOR = '.'
# Boolean that sets whether to add thousand separator when formatting numbers
USE_THOUSAND_SEPARATOR = False
# Number of digits that will be together, when splitting them by
# THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands...
NUMBER_GROUPING = 0
# Thousand separator symbol
THOUSAND_SEPARATOR = ','
# The tablespaces to use for each model when not specified otherwise.
DEFAULT_TABLESPACE = ''
DEFAULT_INDEX_TABLESPACE = ''
# Default X-Frame-Options header value
X_FRAME_OPTIONS = 'SAMEORIGIN'
USE_X_FORWARDED_HOST = False
USE_X_FORWARDED_PORT = False
# The Python dotted path to the WSGI application that Django's internal server
# (runserver) will use. If `None`, the return value of
# 'django.core.wsgi.get_wsgi_application' is used, thus preserving the same
# behavior as previous versions of Django. Otherwise this should point to an
# actual WSGI application object.
WSGI_APPLICATION = None
# If your Django app is behind a proxy that sets a header to specify secure
# connections, AND that proxy ensures that user-submitted headers with the
# same name are ignored (so that people can't spoof it), set this value to
# a tuple of (header_name, header_value). For any requests that come in with
# that header/value, request.is_secure() will return True.
# WARNING! Only set this if you fully understand what you're doing. Otherwise,
# you may be opening yourself up to a security risk.
SECURE_PROXY_SSL_HEADER = None
##############
# MIDDLEWARE #
##############
# List of middleware classes to use. Order is important; in the request phase,
# this middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = [
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
]
############
# SESSIONS #
############
# Cache to store session data if using the cache session backend.
SESSION_CACHE_ALIAS = 'default'
# Cookie name. This can be whatever you want.
SESSION_COOKIE_NAME = 'sessionid'
# Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2
# A string like ".example.com", or None for standard domain cookie.
SESSION_COOKIE_DOMAIN = None
# Whether the session cookie should be secure (https:// only).
SESSION_COOKIE_SECURE = False
# The path of the session cookie.
SESSION_COOKIE_PATH = '/'
# Whether to use the non-RFC standard httpOnly flag (IE, FF3+, others)
SESSION_COOKIE_HTTPONLY = True
# Whether to save the session data on every request.
SESSION_SAVE_EVERY_REQUEST = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# The module to store session data
SESSION_ENGINE = 'django.contrib.sessions.backends.db'
# Directory to store session files if using the file session module. If None,
# the backend will use a sensible default.
SESSION_FILE_PATH = None
# class to serialize session data
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
#########
# CACHE #
#########
# The cache backends to use.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 600
CACHE_MIDDLEWARE_ALIAS = 'default'
##################
# AUTHENTICATION #
##################
AUTH_USER_MODEL = 'auth.User'
AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend']
LOGIN_URL = '/accounts/login/'
LOGOUT_URL = '/accounts/logout/'
LOGIN_REDIRECT_URL = '/accounts/profile/'
# The number of days a password reset link is valid for
PASSWORD_RESET_TIMEOUT_DAYS = 3
# the first hasher in this list is the preferred algorithm. any
# password using different algorithms will be converted automatically
# upon login
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.UnsaltedSHA1PasswordHasher',
'django.contrib.auth.hashers.UnsaltedMD5PasswordHasher',
'django.contrib.auth.hashers.CryptPasswordHasher',
]
AUTH_PASSWORD_VALIDATORS = []
###########
# SIGNING #
###########
SIGNING_BACKEND = 'django.core.signing.TimestampSigner'
########
# CSRF #
########
# Dotted path to callable to be used as view when a request is
# rejected by the CSRF middleware.
CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure'
# Settings for CSRF cookie.
CSRF_COOKIE_NAME = 'csrftoken'
CSRF_COOKIE_AGE = 60 * 60 * 24 * 7 * 52
CSRF_COOKIE_DOMAIN = None
CSRF_COOKIE_PATH = '/'
CSRF_COOKIE_SECURE = False
CSRF_COOKIE_HTTPONLY = False
CSRF_HEADER_NAME = 'HTTP_X_CSRFTOKEN'
CSRF_TRUSTED_ORIGINS = []
############
# MESSAGES #
############
# Class to use as messages backend
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
# Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within
# django.contrib.messages to avoid imports in this settings file.
###########
# LOGGING #
###########
# The callable to use to configure logging
LOGGING_CONFIG = 'logging.config.dictConfig'
# Custom logging configuration.
LOGGING = {}
# Default exception reporter filter class used in case none has been
# specifically assigned to the HttpRequest instance.
DEFAULT_EXCEPTION_REPORTER_FILTER = 'django.views.debug.SafeExceptionReporterFilter'
###########
# TESTING #
###########
# The name of the class to use to run the test suite
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Apps that don't need to be serialized at test database creation time
# (only apps with migrations are to start with)
TEST_NON_SERIALIZED_APPS = []
############
# FIXTURES #
############
# The list of directories to search for fixtures
FIXTURE_DIRS = []
###############
# STATICFILES #
###############
# A list of locations of additional static files
STATICFILES_DIRS = []
# The default file storage backend used during the build process
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
]
##############
# MIGRATIONS #
##############
# Migration module overrides for apps, by app label.
MIGRATION_MODULES = {}
#################
# SYSTEM CHECKS #
#################
# List of all issues generated by system checks that should be silenced. Light
# issues like warnings, infos or debugs will not generate a message. Silencing
# serious issues like errors and criticals does not result in hiding the
# message, but Django will not stop you from e.g. running server.
SILENCED_SYSTEM_CHECKS = []
#######################
# SECURITY MIDDLEWARE #
#######################
SECURE_BROWSER_XSS_FILTER = False
SECURE_CONTENT_TYPE_NOSNIFF = False
SECURE_HSTS_INCLUDE_SUBDOMAINS = False
SECURE_HSTS_SECONDS = 0
SECURE_REDIRECT_EXEMPT = []
SECURE_SSL_HOST = None
SECURE_SSL_REDIRECT = False
| bsd-3-clause |
mrry/tensorflow | tensorflow/contrib/training/python/training/sequence_queueing_state_saver.py | 8 | 62383 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SequenceQueueingStateSaver and wrappers.
Please see the reading data how-to for context.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numbers
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.training import queue_runner
class _SequenceInputWrapper(object):
"""A wrapper object for storing sequence-related input.
The SequenceInputWapper accepts four objects:
length: A scalar int containing the length of the input sequence.
key: A scalar string containing the unique key of the input sequence.
sequences: A dict mapping labels, like `input`, to tensors
whose initial index dimension is at least size `length`.
context: A dict mapping labels, like `global_target`, to tensors
that represent data across the entire example.
"""
def __init__(self, length, key, sequences, context):
length = ops.convert_to_tensor(length, name="length")
key = ops.convert_to_tensor(key, name="key")
if not isinstance(sequences, dict):
raise TypeError("sequences must be a dict")
if not isinstance(context, dict):
raise TypeError("context must be a dict")
if not sequences:
raise ValueError("must have at least one sequence tensor")
for k in sequences.keys():
if not isinstance(k, six.string_types):
raise TypeError("sequence key must be string: %s" % k)
if ":" in k:
raise ValueError("sequence key may not have a colon: '%s'" % k)
for k in context.keys():
if not isinstance(k, six.string_types):
raise TypeError("context key must be string: %s" % k)
if ":" in k:
raise ValueError("context key may not have a colon: '%s'" % k)
sequences = dict(
(k, ops.convert_to_tensor(v, name="sequence_%s" % k))
for k, v in sequences.items())
context = dict(
(k, ops.convert_to_tensor(v, name="context_%s" % k))
for k, v in context.items())
self._length = length
self._key = key
self._sequences = sequences
self._context = context
@property
def length(self):
return self._length
@property
def key(self):
return self._key
@property
def sequences(self):
return self._sequences
@property
def context(self):
return self._context
def _check_multiple_of(value, multiple_of):
"""Checks that value `value` is a non-zero multiple of `multiple_of`.
Args:
value: an int32 scalar Tensor.
multiple_of: an int or int32 scalar Tensor.
Returns:
new_value: an int32 scalar Tensor matching `value`, but which includes an
assertion that `value` is a multiple of `multiple_of`.
"""
assert isinstance(value, ops.Tensor)
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.logical_and(
math_ops.equal(math_ops.mod(value, multiple_of), 0),
math_ops.not_equal(value, 0)),
[string_ops.string_join(
["Tensor %s should be a multiple of: " % value.name,
string_ops.as_string(multiple_of),
", but saw value: ",
string_ops.as_string(value),
". Consider setting pad=True."])])]):
new_value = array_ops.identity(
value, name="multiple_of_checked")
return new_value
def _check_rank(value, expected_rank):
"""Check the rank of Tensor `value`, via shape inference and assertions.
Args:
value: A Tensor, possibly with shape associated shape information.
expected_rank: int32 scalar (optionally a `Tensor`).
Returns:
new_value: A Tensor matching `value`. Accessing this tensor tests
assertions on its rank. If expected_rank is not a `Tensor`, then
new_value's shape's rank has been set.
Raises:
ValueError: if `expected_rank` is not a `Tensor` and the rank of `value`
is known and is not equal to `expected_rank`.
"""
assert isinstance(value, ops.Tensor)
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.equal(expected_rank, array_ops.rank(value)),
[string_ops.string_join(
["Rank of tensor %s should be: " % value.name,
string_ops.as_string(expected_rank),
", shape received:"]),
array_ops.shape(value)])]):
new_value = array_ops.identity(value, name="rank_checked")
if isinstance(expected_rank, ops.Tensor):
expected_rank_value = tensor_util.constant_value(expected_rank)
if expected_rank_value is not None:
expected_rank = int(expected_rank_value)
if not isinstance(expected_rank, ops.Tensor):
try:
new_value.set_shape(new_value.get_shape().with_rank(expected_rank))
except ValueError as e:
raise ValueError("Rank check failed for %s: %s"
% (value.name, str(e)))
return new_value
def _check_shape(value, expected_shape):
"""Check the shape of Tensor `value`, via shape inference and assertions.
Args:
value: A Tensor, possibly with shape associated shape information.
expected_shape: a `TensorShape`, list of `int32`, or a vector `Tensor`.
Returns:
new_value: A Tensor matching `value`. Accessing this tensor tests
assertions on its shape. If expected_shape is not a `Tensor`, then
new_value's shape has been set.
Raises:
ValueError: if `expected_shape` is not a `Tensor` and the shape of `value`
is known and is not equal to `expected_shape`.
"""
assert isinstance(value, ops.Tensor)
if isinstance(expected_shape, tensor_shape.TensorShape):
expected_shape = expected_shape.as_list()
if isinstance(expected_shape, ops.Tensor):
expected_shape_value = tensor_util.constant_value(expected_shape)
if expected_shape_value is not None:
expected_shape = [int(d) for d in expected_shape_value]
if isinstance(expected_shape, ops.Tensor):
value = _check_rank(value, array_ops.size(expected_shape))
else:
value = _check_rank(value, len(expected_shape))
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(expected_shape, array_ops.shape(
value))), [string_ops.string_join([
"Shape of tensor %s should be: " % value.name,
string_ops.as_string(expected_shape), ", shape received: ",
string_ops.as_string(array_ops.shape(value))
])])
]):
new_value = array_ops.identity(value, name="shape_checked")
if not isinstance(expected_shape, ops.Tensor):
try:
new_value.set_shape(new_value.get_shape().merge_with(expected_shape))
except ValueError as e:
raise ValueError("Shape check failed for %s: %s"
% (value.name, str(e)))
return new_value
def _check_dimensions(value, dimensions, expected_sizes, debug_prefix):
"""Check the dimensions of Tensor `value`, via shape inference and assertions.
Args:
value: A Tensor, with optional / partial shape associated shape information.
dimensions: An int list, the dimensions to check.
expected_sizes: list of mixed ints and int32 scalar tensors.
Optionally also a vector `Tensor`.
debug_prefix: A string, used for naming ops and printing debugging messages.
Returns:
new_value: A Tensor matching `value`. Accessing this tensor tests
assertions on its shape. If expected_sizes is not a `Tensor`, then
new_value's shape has been set for all `dimensions[i]` where
`expected_sizes[i]` is not a `Tensor`.
Raises:
TypeError: if any of the input contains invalid types:
if `value` is not a `Tensor`.
if `dimensions` is not a `list` or `tuple`.
ValueError: if input has incorrect sizes or inferred shapes do not match:
if `dimensions` contains repeated dimensions.
if `expected_sizes` is not a `Tensor` and its length does not match that
`dimensions`.
if `value`'s shape has a well-defined rank, and one of the values in
`dimensions` is equal to or above this rank.
if `value`'s shape is well defined for some `dimensions[i]`, and
`expected_sizes[i]` is not a `Tensor`, and these two values do
not match.
"""
if not isinstance(dimensions, (list, tuple)):
raise TypeError("dimensions must be a list or tuple")
if len(set(dimensions)) != len(dimensions):
raise ValueError("dimensions are not unique: %s" % dimensions)
if not isinstance(value, ops.Tensor):
raise TypeError("value is not a Tensor: %s" % value)
value_shape = value.get_shape()
if not isinstance(expected_sizes, ops.Tensor):
if len(dimensions) != len(expected_sizes):
raise ValueError("len(dimensions) != len(expected_sizes): %d vs. %d" % (
len(dimensions), len(expected_sizes)))
if value_shape.ndims is not None:
if value_shape.ndims <= max(dimensions):
raise ValueError(
"%s: rank of input is not greater than max(dimensions): "
"%d vs. %d" % (debug_prefix,
value.get_shape().ndims,
max(dimensions)))
value_dims = value_shape.as_list()
for d, s in zip(dimensions, expected_sizes):
if not isinstance(s, ops.Tensor):
value_dims[d] = s
try:
value.set_shape(value.get_shape().merge_with(value_dims))
except ValueError as e:
raise ValueError("Dimensions check failed for %s: %s"
% (debug_prefix, str(e)))
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.equal(expected_size, array_ops.shape(value)[dimension]),
[string_ops.string_join(
["Dimension %d of tensor labeled %s should be: "
% (dimension, debug_prefix),
string_ops.as_string(expected_size),
", shape received: ",
string_ops.as_string(array_ops.shape(value))])])
for (dimension, expected_size) in zip(dimensions, expected_sizes)]):
new_value = array_ops.identity(value, name="dims_checked_%s" % debug_prefix)
return new_value
def _prepare_sequence_inputs(inputs, states):
"""Convert input to tensors and validate shape information.
Args:
inputs: A `_SequenceInputWrapper` instance.
states: A dictionary mapping state names to input constants or tensors.
Returns:
The tuple (length, key, sorted_states, sorted_sequences, sorted_context),
where each value has been checked for valid shape, and the sorted_* dicts
are instances of OrderedDict; with key-value pairs sorted by key.
Raises:
ValueError: if the shapes of inputs.context.values(), states.values(),
or inputs.sequences.values() are not fully defined (with the exception
of the dimension of any `Tensor` in inputs.sequences.values()).
TypeError: if the dtype of length is not int32.
"""
# Convert state initial values to tensors
states = dict((k, ops.convert_to_tensor(v, name="state_%s" % k))
for k, v in states.items())
def _assert_fully_defined(label, dict_, ignore_first_dimension=False):
start_dimension = 1 if ignore_first_dimension else 0
for k, v in dict_.items():
if not v.get_shape()[start_dimension:].is_fully_defined():
raise ValueError(
"Shape for %s %s is not fully defined %s: %s"
% (label,
k,
"(ignoring first dimension)" if ignore_first_dimension else "",
v.get_shape()))
_assert_fully_defined("state", states)
_assert_fully_defined("context", inputs.context)
# Sequences' first dimension (time) may be variable
_assert_fully_defined(
"sequence", inputs.sequences, ignore_first_dimension=True)
# Get dictionaries' dtypes ordered by name - ordering is important
# when switching between dicts and tuples for passing to Barrier.
def _sort_by_name(d):
return collections.OrderedDict(
sorted(d.items(), key=lambda k_v: k_v[0]))
sorted_sequences = _sort_by_name(inputs.sequences)
sorted_context = _sort_by_name(inputs.context)
sorted_states = _sort_by_name(states)
length = _check_rank(inputs.length, 0)
key = _check_rank(inputs.key, 0)
if length.dtype != dtypes.int32:
raise TypeError("length dtype must be int32, but recieved: %s"
% length.dtype)
if key.dtype != dtypes.string:
raise TypeError("key dtype must be string, but received: %s"
% key.dtype)
return (length, key, sorted_states, sorted_sequences, sorted_context)
# NextQueuedSequenceBatch works closely with
# SequenceQueueingStateSaver and requires access to its private properties
# pylint: disable=protected-access
class NextQueuedSequenceBatch(object):
"""NextQueuedSequenceBatch stores deferred SequenceQueueingStateSaver data.
This class is instantiated by `SequenceQueueingStateSaver` and is accessible
via its `next_batch` property.
"""
def __init__(self, state_saver):
self._state_saver = state_saver
@property
def total_length(self):
"""The lengths of the original (non-truncated) unrolled examples.
Returns:
An integer vector of length `batch_size`, the total lengths.
"""
return self._state_saver._received_total_length
@property
def length(self):
"""The lengths of the given truncated unrolled examples.
For initial iterations, for which `sequence * num_unroll < length`,
this number is `num_unroll`. For the remainder,
this number is between `0` and `num_unroll`.
Returns:
An integer vector of length `batch_size`, the lengths.
"""
return self._state_saver._received_length
@property
def batch_size(self):
"""The batch_size of the given batch.
Usually, this is the batch_size requested when initializing the SQSS, but
if allow_small_batch=True this will become smaller when inputs are
exhausted.
Returns:
A scalar integer tensor, the batch_size
"""
return self._state_saver._received_batch_size
@property
def insertion_index(self):
"""The insertion indices of the examples (when they were first added).
These indices start with the value -2**63 and increase with every
call to the prefetch op. Each whole example gets its own insertion
index, and this is used to prioritize the example so that its truncated
segments appear in adjacent iterations, even if new examples are inserted
by the prefetch op between iterations.
Returns:
An int64 vector of length `batch_size`, the insertion indices.
"""
return self._state_saver._received_indices
@property
def key(self):
"""The key names of the given truncated unrolled examples.
The format of the key is:
```python
"%05d_of_%05d:%s" % (sequence, sequence_count, original_key)
```
where `original_key` is the unique key read in by the prefetcher.
Returns:
A string vector of length `batch_size`, the keys.
"""
return self._state_saver._received_keys
@property
def next_key(self):
"""The key names of the next (in iteration) truncated unrolled examples.
The format of the key is:
```python
"%05d_of_%05d:%s" % (sequence + 1, sequence_count, original_key)
```
if `sequence + 1 < sequence_count`, otherwise:
```python
"STOP:%s" % original_key
```
where `original_key` is the unique key read in by the prefetcher.
Returns:
A string vector of length `batch_size`, the keys.
"""
return self._state_saver._received_next_key
@property
def sequence(self):
"""An int32 vector, length `batch_size`: the sequence index of each entry.
When an input is split up, the sequence values
```
0, 1, ..., sequence_count - 1
```
are assigned to each split.
Returns:
An int32 vector `Tensor`.
"""
return self._state_saver._received_sequence
@property
def sequence_count(self):
"""An int32 vector, length `batch_size`: the sequence count of each entry.
When an input is split up, the number of splits is equal to:
`padded_length / num_unroll`. This is the sequence_count.
Returns:
An int32 vector `Tensor`.
"""
return self._state_saver._received_sequence_count
@property
def context(self):
"""A dict mapping keys of `input_context` to batched context.
Returns:
A dict mapping keys of `input_context` to tensors.
If we had at input:
```python
context["name"].get_shape() == [d1, d2, ...]
```
then for this property:
```python
context["name"].get_shape() == [batch_size, d1, d2, ...]
```
"""
return self._state_saver._received_context
@property
def sequences(self):
"""A dict mapping keys of `input_sequences` to split and rebatched data.
Returns:
A dict mapping keys of `input_sequences` to tensors.
If we had at input:
```python
sequences["name"].get_shape() == [None, d1, d2, ...]
```
where `None` meant the sequence time was dynamic, then for this property:
```python
sequences["name"].get_shape() == [batch_size, num_unroll, d1, d2, ...].
```
"""
return self._state_saver._received_sequences
def state(self, state_name):
"""Returns batched state tensors.
Args:
state_name: string, matches a key provided in `initial_states`.
Returns:
A `Tensor`: a batched set of states, either initial states (if this is
the first run of the given example), or a value as stored during
a previous iteration via `save_state` control flow.
Its type is the same as `initial_states["state_name"].dtype`.
If we had at input:
```python
initial_states[state_name].get_shape() == [d1, d2, ...],
```
then
```python
state(state_name).get_shape() == [batch_size, d1, d2, ...]
```
Raises:
KeyError: if `state_name` does not match any of the initial states
declared in `initial_states`.
"""
return self._state_saver._received_states[state_name]
def save_state(self, state_name, value, name=None):
"""Returns an op to save the current batch of state `state_name`.
Args:
state_name: string, matches a key provided in `initial_states`.
value: A `Tensor`.
Its type must match that of `initial_states[state_name].dtype`.
If we had at input:
```python
initial_states[state_name].get_shape() == [d1, d2, ...]
```
then the shape of `value` must match:
```python
tf.shape(value) == [batch_size, d1, d2, ...]
```
name: string (optional). The name scope for newly created ops.
Returns:
A control flow op that stores the new state of each entry into
the state saver. This op must be run for every iteration that
accesses data from the state saver (otherwise the state saver
will never progress through its states and run out of capacity).
Raises:
KeyError: if `state_name` does not match any of the initial states
declared in `initial_states`.
"""
if state_name not in self._state_saver._received_states.keys():
raise KeyError("state was not declared: %s" % state_name)
default_name = "InputQueueingStateSaver_SaveState"
with ops.name_scope(name, default_name, values=[value]):
# Place all operations on the CPU. Barriers and queues are only
# implemented for CPU, but all the other book-keeping operations
# (reshape, shape, range, ...) would be placed on GPUs if available,
# unless we explicitly tie them to CPU.
with ops.colocate_with(self._state_saver._capacity_queue.queue_ref):
indices_where_not_done = array_ops.reshape(array_ops.where(
math_ops.logical_not(self._state_saver._sequence_is_done)), [-1])
keeping_next_key = array_ops.gather(
self._state_saver._received_next_key, indices_where_not_done)
value = _check_shape(
array_ops.identity(value, name="convert_%s" % state_name),
array_ops.shape(self._state_saver._received_states[state_name]))
keeping_state = array_ops.gather(value, indices_where_not_done)
return self._state_saver._barrier.insert_many(
self._state_saver._get_barrier_index("state", state_name),
keeping_next_key, keeping_state,
name="BarrierInsertState_%s" % state_name)
# pylint: enable=protected-access
class SequenceQueueingStateSaver(object):
"""SequenceQueueingStateSaver provides access to stateful values from input.
This class is meant to be used instead of, e.g., a `Queue`, for splitting
variable-length sequence inputs into segments of sequences with fixed length
and batching them into mini-batches. It maintains contexts and state for a
sequence across the segments. It can be used in conjunction with a
`QueueRunner` (see the example below).
The `SequenceQueueingStateSaver` (SQSS) accepts one example at a time via the
inputs `input_length`, `input_key`, `input_sequences` (a dict),
`input_context` (a dict), and `initial_states` (a dict).
The sequences, values in `input_sequences`, may have variable first dimension
(the `padded_length`), though this dimension must always be a multiple of
`num_unroll`. All other dimensions must be fixed and accessible via
`get_shape` calls. The length prior to padding can be recorded in
`input_length`. The context values in `input_context` must all have fixed and
well defined dimensions. The initial state values must all have fixed and
well defined dimensions.
The SQSS splits the sequences of an input example into segments of length
`num_unroll`. Across examples minibatches of size `batch_size` are formed.
These minibatches contain a segment of the sequences, copy the context values,
and maintain state, length, and key information of the original input
examples. In the first segment of an example the state is still the initial
state. It can then be updated; and updated state values are accessible in
subsequent segments of the same example. After each segment
`batch.save_state()` must be called which is done by the state_saving_rnn.
Without this call, the dequeue op associated with the SQSS will not run.
Internally, SQSS has a queue for the input examples. Its `capacity` is
configurable. If set smaller than `batch_size` then the dequeue op will block
indefinitely. A small multiple of `batch_size` is a good rule of thumb to
prevent that queue from becoming a bottleneck and slowing down training.
If set too large (and note that it defaults to unbounded) memory consumption
goes up. Moreover, when iterating over the same input examples multiple times
reusing the same `key` the `capacity` must be smaller than the number of
examples.
The prefetcher, which reads one unrolled, variable-length input sequence at
a time, is accessible via `prefetch_op`. The underlying `Barrier` object
is accessible via `barrier`. Processed minibatches, as well as
state read and write capabilities are accessible via `next_batch`.
Specifically, `next_batch` provides access to all of the minibatched
data, including the following, see `NextQueuedSequenceBatch` for details:
* `total_length`, `length`, `insertion_index`, `key`, `next_key`,
* `sequence` (the index each minibatch entry's time segment index),
* `sequence_count` (the total time segment count for each minibatch entry),
* `context` (a dict of the copied minibatched context values),
* `sequences` (a dict of the split minibatched variable-length sequences),
* `state` (to access the states of the current segments of these entries)
* `save_state` (to save the states for the next segments of these entries)
Example usage:
```python
batch_size = 32
num_unroll = 20
lstm_size = 8
cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=lstm_size)
initial_state_values = tf.zeros(cell.state_size, dtype=tf.float32)
raw_data = get_single_input_from_input_reader()
length, key, sequences, context = my_parser(raw_data)
assert "input" in sequences.keys()
assert "label" in context.keys()
initial_states = {"lstm_state": initial_state_value}
stateful_reader = tf.SequenceQueueingStateSaver(
batch_size, num_unroll,
length=length, input_key=key, input_sequences=sequences,
input_context=context, initial_states=initial_states,
capacity=batch_size*100)
batch = stateful_reader.next_batch
inputs = batch.sequences["input"]
context_label = batch.context["label"]
inputs_by_time = tf.split(1, num_unroll, inputs)
assert len(inputs_by_time) == num_unroll
lstm_output, _ = tf.nn.state_saving_rnn(
cell,
inputs_by_time,
state_saver=batch,
state_name="lstm_state")
# Start a prefetcher in the background
sess = tf.Session()
num_threads = 3
queue_runner = tf.train.QueueRunner(
stateful_reader, [stateful_reader.prefetch_op] * num_threads)
tf.train.add_queue_runner(queue_runner)
tf.train.start_queue_runners(sess=session)
while True:
# Step through batches, perform training or inference...
session.run([lstm_output])
```
**Note**: Usually the barrier is given to a QueueRunner as in the
examples above. The QueueRunner will close the barrier if the prefetch_op
receives an OutOfRange Error from upstream input queues (i.e., reaches
the end of the input). If the barrier is closed no further new examples
are added to the SQSS. The underlying barrier might, however, still
contain further unroll-steps of examples that have not undergone all
iterations. To gracefully finish all examples, the flag
`allow_small_batch` must be set to true, which causes the SQSS to issue
progressively smaller mini-batches with the remaining examples.
"""
def __init__(self,
batch_size,
num_unroll,
input_length,
input_key,
input_sequences,
input_context,
initial_states,
capacity=None,
allow_small_batch=False,
name=None):
"""Creates the SequenceQueueingStateSaver.
Args:
batch_size: int or int32 scalar `Tensor`, how large minibatches should
be when accessing the `state()` method and `context`, `sequences`, etc,
properties.
num_unroll: Python integer, how many time steps to unroll at a time.
The input sequences of length `k` are then split into `k / num_unroll`
many segments.
input_length: An int32 scalar `Tensor`, the length of the sequence prior
to padding. This value may be at most `padded_length` for any given
input (see below for the definition of `padded_length`).
Batched and total lengths of the current iteration are made accessible
via the `length` and `total_length` properties. The shape of
input_length (scalar) must be fully specified.
input_key: A string scalar `Tensor`, the **unique** key for the given
input. This is used to keep track of the split minibatch elements
of this input. Batched keys of the current iteration are made
accessible via the `key` property. The shape of `input_key` (scalar)
must be fully specified.
input_sequences: A dict mapping string names to `Tensor` values. The
values must all have matching first dimension, called `padded_length`.
The `SequenceQueueingStateSaver` will split these tensors along
this first dimension into minibatch elements of dimension
`num_unroll`. Batched and segmented sequences of the current iteration
are made accessible via the `sequences` property.
**Note**: `padded_length` may be dynamic, and may vary from input
to input, but must always be a multiple of `num_unroll`. The remainder
of the shape (other than the first dimension) must be fully specified.
input_context: A dict mapping string names to `Tensor` values. The values
are treated as "global" across all time splits of the given input,
and will be copied across for all minibatch elements accordingly.
Batched and copied context of the current iteration are made
accessible via the `context` property.
**Note**: All input_context values must have fully defined shapes.
initial_states: A dict mapping string state names to multi-dimensional
values (e.g. constants or tensors). This input defines the set of
states that will be kept track of during computing iterations, and
which can be accessed via the `state` and `save_state` methods.
**Note**: All initial_state values must have fully defined shapes.
capacity: The max capacity of the SQSS in number of examples. Needs to be
at least `batch_size`. Defaults to unbounded.
allow_small_batch: If true, the SQSS will return smaller batches when
there aren't enough input examples to fill a whole batch and the end of
the input has been reached (i.e., the underlying barrier has been
closed).
name: An op name string (optional).
Raises:
TypeError: if any of the inputs is not an expected type.
ValueError: if any of the input values is inconsistent, e.g. if
not enough shape information is available from inputs to build
the state saver.
"""
if capacity is not None and capacity < batch_size:
raise ValueError("capacity must be larger or equal to batch_size")
# The barrier is ignorant of the number of actual examples, since a long
# example that requires many iterations produces more elements in the
# barrier than a short example. Furthermore, we don't have an upper bound
# on the length of examples, and hence have to keep the capacity of the
# barrier at infinite to avoid dead-lock. Instead we have to keep track of
# the number of active examples in this class, and block the prefetch_op
# when capacity is reached. To this end, we employ a FIFOQueue in which we
# store one token (its value doesn't matter) for each input example, and
# dequeue a token for each completed example. Since the capacity of this
# queue is limited the enqueue operation will block if capacity is reached.
self._capacity_queue = data_flow_ops.FIFOQueue(capacity=capacity,
dtypes=[dtypes.int32],
shapes=[[]])
# Place all operations on the CPU. Barriers and queues are only implemented
# for CPU, but all the other book-keeping operations
# (reshape, shape, range, ...) would be placed on GPUs if available,
# unless we explicitly tie them to CPU.
with ops.colocate_with(self._capacity_queue.queue_ref):
if not isinstance(initial_states, dict):
raise TypeError("initial_states must be a dictionary")
if not initial_states:
raise ValueError(
"initial_states may not be empty: at least one state variable is "
"required to properly enqueue split sequences to run in separate "
"iterations")
for k in initial_states:
if not isinstance(k, six.string_types):
raise TypeError("state name must be a string: %s" % k)
if ":" in k:
raise ValueError("state name may not have a colon: '%s'" % k)
op_vars = ([input_length, input_key]
+ list(input_sequences.values())
+ list(input_context.values()))
with ops.name_scope(name, "InputQueueingStateSaver", op_vars) as scope:
inputs = _SequenceInputWrapper(
input_length, input_key, input_sequences, input_context)
self._batch_size = batch_size
self._num_unroll = num_unroll
self._name = scope
# This step makes sure all shapes are well defined. We can now
# use get_shape() on any tensor in the output of this function
# and get a fully-defined shape.
(self._length, self._key, self._sorted_states, self._sorted_sequences,
self._sorted_context) = _prepare_sequence_inputs(inputs,
initial_states)
self._padded_length = array_ops.identity(
array_ops.shape(
six.next(six.itervalues(self._sorted_sequences)))[0],
name="padded_length") # The name is useful for debugging
self._padded_length = _check_multiple_of(
self._padded_length, self._num_unroll)
# sequences should have length == all matching
self._sorted_sequences = collections.OrderedDict(
(k, _check_dimensions(v, [0], [self._padded_length],
debug_prefix="sorted_sequences_%s" % k))
for k, v in self._sorted_sequences.items())
self._uninitialized_states = self._sorted_states
# Once this is set, self._get_barrier_*_index are available for use.
self._store_index_maps(
self._sorted_sequences, self._sorted_context, self._sorted_states)
# Make sure that the length is <= the padded_length
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.less_equal(self._length, self._padded_length),
["Input length should be <= than length from sequences:",
self._length, " vs. ", self._padded_length])]):
self._length = array_ops.identity(self._length)
# Only create barrier; enqueu and dequeue operations happen when you
# access prefetch_op and next_batch.
self._create_barrier()
self._scope = scope
self._allow_small_batch = allow_small_batch
self._prefetch_op = None
self._next_batch = None
@property
def name(self):
return self._name
@property
def barrier(self):
return self._barrier
@property
def batch_size(self):
return self._batch_size
@property
def num_unroll(self):
return self._num_unroll
@property
def prefetch_op(self):
"""The op used to prefetch new data into the state saver.
Running it once enqueues one new input example into the state saver.
The first time this gets called, it additionally creates the prefetch_op.
Subsequent calls simply return the previously created `prefetch_op`.
It should be run in a separate thread via e.g. a `QueueRunner`.
Returns:
An `Operation` that performs prefetching.
"""
if not self._prefetch_op:
with ops.name_scope(None), ops.name_scope(
self._scope, values=[self._barrier.barrier_ref]):
self._create_prefetch_op()
return self._prefetch_op
@property
def next_batch(self):
"""The `NextQueuedSequenceBatch` providing access to batched output data.
Also provides access to the `state` and `save_state` methods.
The first time this gets called, it additionally prepares barrier reads
and creates `NextQueuedSequenceBatch` / next_batch objects. Subsequent
calls simply return the previously created `next_batch`.
In order to access data in `next_batch` without blocking, the `prefetch_op`
must have been run at least `batch_size` times (ideally in a separate
thread, or launched via a `QueueRunner`). After processing a segment in
`next_batch()`, `batch.save_state()` must be called which is done by the
state_saving_rnn. Without this call, the dequeue op associated with the SQSS
will not run.
Returns:
A cached `NextQueuedSequenceBatch` instance.
"""
# This is needed to prevent errors if next_batch is called before
# prefetch_op is created.
if not self._prefetch_op:
with ops.name_scope(None), ops.name_scope(
self._scope, values=[self._barrier.barrier_ref]):
self._create_prefetch_op()
if not self._next_batch:
with ops.name_scope(None), ops.name_scope(
self._scope, values=[self._barrier.barrier_ref]):
self._prepare_barrier_reads()
return self._next_batch
def close(self, cancel_pending_enqueues=False, name=None):
"""Closes the barrier and the FIFOQueue.
This operation signals that no more segments of new sequences will be
enqueued. New segments of already inserted sequences may still be enqueued
and dequeued if there is a sufficient number filling a batch or
allow_small_batch is true. Otherwise dequeue operations will fail
immediately.
Args:
cancel_pending_enqueues: (Optional.) A boolean, defaulting to
`False`. If `True`, all pending enqueues to the underlying queues will
be cancelled, and completing already started sequences is not possible.
name: Optional name for the op.
Returns:
The operation that closes the barrier and the FIFOQueue.
"""
with ops.name_scope(name, "SQSSClose", [self._prefetch_op]) as name:
barrier_close = self.barrier.close(
cancel_pending_enqueues, "BarrierClose")
fifo_queue_close = self._capacity_queue.close(
cancel_pending_enqueues, "FIFOClose")
return control_flow_ops.group(barrier_close, fifo_queue_close, name=name)
def _store_index_maps(self, sequences, context, states):
"""Prepares the internal dictionaries _name_to_index and _index_to_name.
These dictionaries are used to keep track of indices into the barrier.
Args:
sequences: `OrderedDict` of string, `Tensor` pairs.
context: `OrderedDict` of string, `Tensor` pairs.
states: `OrderedDict` of string, `Tensor` pairs.
"""
assert isinstance(sequences, dict)
assert isinstance(context, dict)
assert isinstance(states, dict)
self._name_to_index = dict((name, ix) for (ix, name) in enumerate(
["__length", "__total_length", "__next_key",
"__sequence", "__sequence_count"]
+ ["__sequence__%s" % k for k in sequences.keys()]
+ ["__context__%s" % k for k in context.keys()]
+ ["__state__%s" % k for k in states.keys()]))
self._index_to_name = [
name for (name, _) in sorted(
self._name_to_index.items(), key=lambda n_ix: n_ix[1])]
def _get_barrier_length_index(self):
return self._name_to_index["__length"]
def _get_barrier_total_length_index(self):
return self._name_to_index["__total_length"]
def _get_barrier_next_key_index(self):
return self._name_to_index["__next_key"]
def _get_barrier_sequence_index(self):
return self._name_to_index["__sequence"]
def _get_barrier_sequence_count_index(self):
return self._name_to_index["__sequence_count"]
def _get_barrier_index(self, index_type, name):
assert index_type in ("sequence", "context", "state")
key = "__%s__%s" % (index_type, name)
assert key in self._name_to_index, (
"Requested a name not in the value type %s: %s" % (index_type, name))
return self._name_to_index[key]
def _create_barrier(self):
"""Create the barrier.
This method initializes the Barrier object with the right types and shapes.
"""
# Create the barrier
sequence_dtypes = [v.dtype for k, v in self._sorted_sequences.items()]
context_dtypes = [v.dtype for k, v in self._sorted_context.items()]
state_dtypes = [v.dtype for k, v in self._sorted_states.items()]
types = ([dtypes.int32, # length
dtypes.int32, # total_length
dtypes.string, # next_keys
dtypes.int32, # sequence
dtypes.int32] # expanded_sequence_count
+ sequence_dtypes + context_dtypes + state_dtypes)
sequence_shapes = [
[self._num_unroll] + self._sorted_sequences[k].get_shape().as_list()[1:]
for k in self._sorted_sequences.keys()]
context_shapes = [
self._sorted_context[k].get_shape().as_list()
for k in self._sorted_context.keys()]
state_shapes = [
self._sorted_states[k].get_shape().as_list()
for k in self._sorted_states.keys()]
shapes = ([(), # length
(), # total_length
(), # next_keys
(), # sequence
()] # expanded_sequence_count
+ sequence_shapes + context_shapes + state_shapes)
self._barrier = data_flow_ops.Barrier(types=types, shapes=shapes)
def _create_prefetch_op(self):
"""Group insert_many ops and create prefetch_op.
This method implements the "meat" of the logic underlying the
`SequenceQueueingStateSaver`. It performs dynamic reshaping of
sequences, copying of context, and initial insertion of these values,
as well as the key, next_key, sequence, sequence_count, and initial
states into the barrier.
"""
# Step 1: identify how many barrier entries to split this input
# into, store the result as a scalar
sequence_count = math_ops.div(self._padded_length, self._num_unroll)
sequence_count_vec = array_ops.expand_dims(sequence_count, 0)
# The final unrolled sequence's length is num_unroll only in
# the case that num_unroll divides it evenly.
ones = array_ops.ones(sequence_count_vec, dtype=dtypes.int32)
sequence = math_ops.range(sequence_count)
expanded_length = math_ops.maximum(
0, self._length - self._num_unroll * sequence)
expanded_length = math_ops.minimum(self._num_unroll, expanded_length)
expanded_total_length = self._length * ones
expanded_sequence_count = sequence_count * ones
current_keys = string_ops.string_join(
[string_ops.as_string(sequence, width=5, fill="0"),
"_of_",
string_ops.as_string(sequence_count, width=5, fill="0"),
":",
self._key],
name="StringJoinCurrentKeys")
next_keys = array_ops.concat(
0, [array_ops.slice(current_keys, [1], [-1]),
array_ops.expand_dims(string_ops.string_join(
["STOP:", self._key], name="StringJoinStop"), 0)],
name="concat_next_keys")
reshaped_sequences = collections.OrderedDict(
(k, _check_dimensions(
# Reshape sequences to sequence_count rows
array_ops.reshape(
v, array_ops.concat(
0, [array_ops.expand_dims(sequence_count, 0),
array_ops.expand_dims(self._num_unroll, 0),
v.get_shape().as_list()[1:]],
name="concat_sequences_%s" % k),
name="reshape_sequences_%s" % k),
[0, 1] + list(range(2, v.get_shape().ndims + 1)),
[sequence_count, self._num_unroll] + v.get_shape().as_list()[1:],
debug_prefix="reshaped_sequences_%s" % k))
for k, v in self._sorted_sequences.items())
expanded_context = collections.OrderedDict(
(k, _check_dimensions(
# Copy context to be sequence_count rows
array_ops.tile(
array_ops.expand_dims(v, 0),
array_ops.concat(
0, [array_ops.expand_dims(sequence_count, 0),
[1] * v.get_shape().ndims],
name="concat_context_%s" % k),
name="tile_context_%s" % k),
[0] + list(range(1, v.get_shape().ndims + 1)),
[sequence_count] + v.get_shape().as_list(),
debug_prefix="expanded_context_%s" % k))
for k, v in self._sorted_context.items())
# Storing into the barrier, for each current_key:
# sequence_ix, sequence_count, next_key, length,
# context... (copied), sequences... (truncated)
# Also storing into the barrier for the first key
# states (using initial_states).
insert_sequence_op = self._barrier.insert_many(
self._get_barrier_sequence_index(),
current_keys, sequence,
name="BarrierInsertSequence")
insert_sequence_count_op = self._barrier.insert_many(
self._get_barrier_sequence_count_index(),
current_keys, expanded_sequence_count,
name="BarrierInsertSequenceCount")
insert_next_key_op = self._barrier.insert_many(
self._get_barrier_next_key_index(),
current_keys, next_keys,
name="BarrierInsertNextKey")
insert_length_op = self._barrier.insert_many(
self._get_barrier_length_index(),
current_keys, expanded_length,
name="BarrierInsertLength")
insert_total_length_op = self._barrier.insert_many(
self._get_barrier_total_length_index(),
current_keys, expanded_total_length,
name="BarrierInsertTotalLength")
insert_context_ops = dict(
(name, self._barrier.insert_many(
self._get_barrier_index("context", name),
current_keys, value,
name="BarrierInsertContext_%s" % name))
for (name, value) in expanded_context.items())
insert_sequences_ops = dict(
(name, self._barrier.insert_many(
self._get_barrier_index("sequence", name),
current_keys, value,
name="BarrierInsertSequences_%s" % name))
for (name, value) in reshaped_sequences.items())
# An op that blocks if we reached capacity in number of active examples.
TOKEN_WITH_IGNORED_VALUE = 21051976 # pylint: disable=invalid-name
insert_capacity_token_op = self._capacity_queue.enqueue((
TOKEN_WITH_IGNORED_VALUE,))
# Insert just the initial state. Specifically force this to run
# the insert sequence op *first* so that the Barrier receives
# an insert with *all* the segments and the segments all get the same index.
with ops.control_dependencies([insert_sequence_op,
insert_capacity_token_op]):
insert_initial_state_ops = dict(
(name, self._barrier.insert_many(
self._get_barrier_index("state", name),
array_ops.pack([current_keys[0]]), array_ops.pack([value]),
name="BarrierInitialInsertState_%s" % name))
for (name, value) in self._uninitialized_states.items())
all_inserts = (
[insert_capacity_token_op,
insert_sequence_op,
insert_sequence_count_op,
insert_next_key_op,
insert_length_op,
insert_total_length_op]
+ list(insert_initial_state_ops.values())
+ list(insert_context_ops.values())
+ list(insert_sequences_ops.values()))
self._prefetch_op = control_flow_ops.group(
*all_inserts, name="StateSaverPrefetchGroup")
def _prepare_barrier_reads(self):
"""Creates ops for reading the barrier, as used by properties like `length`.
"""
# Ops for reading from the barrier. These ops must be run in a
# different thread than the prefetcher op to avoid blocking.
received = self._barrier.take_many(self._batch_size,
self._allow_small_batch,
name="BarrierTakeMany")
self._received_indices = received[0]
self._received_keys = received[1]
received_values = received[2]
self._received_sequence = received_values[
self._get_barrier_sequence_index()]
self._received_sequence_count = received_values[
self._get_barrier_sequence_count_index()]
self._received_next_key = received_values[
self._get_barrier_next_key_index()]
self._received_length = received_values[
self._get_barrier_length_index()]
self._received_total_length = received_values[
self._get_barrier_total_length_index()]
self._received_context = collections.OrderedDict(
(name, received_values[self._get_barrier_index("context", name)])
for name in self._sorted_context.keys())
self._received_sequences = collections.OrderedDict(
(name, received_values[self._get_barrier_index("sequence", name)])
for name in self._sorted_sequences.keys())
self._received_batch_size = array_ops.squeeze(array_ops.shape(
self._received_length))
# Which examples are we done with?
self._sequence_is_done = (
self._received_sequence + 1 >= self._received_sequence_count)
# Compute the number of finished sequences and dequeue as many tokens from
# the capacity queue.
finished_sequences = (math_ops.reduce_sum(math_ops.cast(
self._sequence_is_done, dtypes.int32)))
# TODO(ebrevdo): convert to dequeue_up_to when FIFOQueue supports it.
dequeue_op = self._capacity_queue.dequeue_many(finished_sequences)
# Tie the dequeue_op to the received_state, such that it is definitely
# carried out.
with ops.control_dependencies([dequeue_op]):
self._received_states = collections.OrderedDict((
name, array_ops.identity(received_values[self._get_barrier_index(
"state", name)])) for name in self._sorted_states.keys())
self._next_batch = NextQueuedSequenceBatch(self)
def batch_sequences_with_states(input_key, input_sequences, input_context,
input_length, initial_states, num_unroll,
batch_size, num_threads=3, capacity=1000,
allow_small_batch=True, pad=True, name=None):
"""Creates batches of segments of sequential input.
This method creates a `SequenceQueueingStateSaver` (SQSS) and adds it to
the queuerunners. It returns a `NextQueuedSequenceBatch`.
It accepts one example at a time identified by a unique `input_key`.
`input_sequence` is a dict with values that are tensors with time as first
dimension. This time dimension must be the same across those tensors of an
example. It can vary across examples. Although it always has to be a multiple
of `num_unroll`. Hence, padding may be necessary and it is turned on by
default by `pad=True`.
`input_length` is a Tensor scalar or an int recording the time dimension prior
to padding. It should be between 0 and the time dimension. One reason we want
to keep track of it is so that we can take it into consideration when
computing the loss. If `pad=True` then `input_length` can be `None` and will
be inferred.
This methods segments `input_sequence` into segments of length `num_unroll`.
It batches input sequences from `batch_size` many examples. These mini-batches
are available through the `sequence` property of the output. Moreover, for
each entry in the batch we can access its original `input_key` in `key` and
its input length in `total_length`. `length` records within this segment how
many non-padded time steps there are.
Static features of an example that do not vary across time can be part of the
`input_context`, a dict with Tensor values. This method copies the context for
each segment and makes it available in the `context` of the output.
This method can maintain and update a state for each example. It accepts some
initial_states as a dict with Tensor values. The first mini-batch an example
is contained has initial_states as entry of the `state`. If save_state is
called then the next segment will have the updated entry of the `state`.
See `NextQueuedSequenceBatch` for a complete list of properties and methods.
Example usage:
```python
batch_size = 32
num_unroll = 20
num_enqueue_threads = 3
lstm_size = 8
cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=lstm_size)
key, sequences, context = my_parser(raw_data)
initial_state_values = tf.zeros((state_size,), dtype=tf.float32)
initial_states = {"lstm_state": initial_state_values}
batch = tf.batch_sequences_with_states(
input_key=key,
input_sequences=sequences,
input_context=context,
initial_states=initial_states,
num_unroll=num_unroll,
batch_size=batch_size,
num_threads=num_enqueue_threads,
capacity=batch_size * num_enqueue_threads * 2)
inputs = batch.sequences["input"]
context_label = batch.context["label"]
inputs_by_time = tf.split(1, num_unroll, inputs)
assert len(inputs_by_time) == num_unroll
lstm_output, _ = tf.nn.state_saving_rnn(
cell,
inputs_by_time,
state_saver=batch,
state_name="lstm_state")
# Start a prefetcher in the background
sess = tf.Session()
tf.train.start_queue_runners(sess=session)
while True:
# Step through batches, perform training or inference...
session.run([lstm_output])
```
Args:
input_key: A string scalar `Tensor`, the **unique** key for the given
input example. This is used to keep track of the split minibatch elements
of this input. Batched keys of the current iteration are made
accessible via the `key` property. The shape of `input_key` (scalar) must
be fully specified.
input_sequences: A dict mapping string names to `Tensor` values. The values
must all have matching first dimension, called `value_length`. They may
vary from input to input. The remainder of the shape (other than the first
dimension) must be fully specified.
The `SequenceQueueingStateSaver` will split these tensors along
this first dimension into minibatch elements of dimension `num_unrolled`.
Batched and segmented sequences of the current iteration are made
accessible via the `sequences` property.
**Note**: if `pad=False`, then `value_length` must always be a multiple
of `num_unroll`.
input_context: A dict mapping string names to `Tensor` values. The values
are treated as "global" across all time splits of the given input example,
and will be copied across for all minibatch elements accordingly.
Batched and copied context of the current iteration are made
accessible via the `context` property.
**Note**: All input_context values must have fully defined shapes.
input_length: None or an int32 scalar `Tensor`, the length of the sequence
prior to padding. If `input_length=None` and `pad=True` then the length
will be inferred and will be equal to `value_length`. If `pad=False` then
`input_length` cannot be `None`: `input_length` must be specified. Its
shape of `input_length` (scalar) must be fully specified. Its value may be
at most `value_length` for any given input (see above for the definition
of `value_length`). Batched and total lengths of the current iteration are
made accessible via the `length` and `total_length` properties.
initial_states: A dict mapping string state names to multi-dimensional
values (e.g. constants or tensors). This input defines the set of
states that will be kept track of during computing iterations, and
which can be accessed via the `state` and `save_state` methods.
**Note**: All initial_state values must have fully defined shapes.
num_unroll: Python integer, how many time steps to unroll at a time.
The input sequences of length k are then split into k / num_unroll many
segments.
batch_size: int or int32 scalar `Tensor`, how large minibatches should
be when accessing the `state()` method and `context`, `sequences`, etc,
properties.
num_threads: The int number of threads enqueuing input examples into a
queue.
capacity: The max capacity of the queue in number of examples. Needs to be
at least `batch_size`. Defaults to 1000. When iterating over the same
input example multiple times reusing their keys the `capacity` must be
smaller than the number of examples.
allow_small_batch: If true, the queue will return smaller batches when
there aren't enough input examples to fill a whole batch and the end of
the input has been reached.
pad: If `True`, `input_sequences` will be padded to multiple of
`num_unroll`. In that case `input_length` may be `None` and is assumed to
be the length of first dimension of values in `input_sequences`
(i.e. `value_length`).
name: An op name string (optional).
Returns:
A NextQueuedSequenceBatch with segmented and batched inputs and their
states.
Raises:
TypeError: if any of the inputs is not an expected type.
ValueError: if any of the input values is inconsistent, e.g. if
not enough shape information is available from inputs to build
the state saver.
"""
tensor_list = (
list(input_sequences.values()) + list(input_context.values()) +
list(initial_states.values()))
with ops.name_scope(name, "batch_sequences_with_states", tensor_list) as name:
if pad:
length, input_sequences = _padding(input_sequences, num_unroll)
input_length = input_length if input_length is not None else length
elif input_sequences:
# Assert that value_length is a multiple of num_unroll.
for key, value in input_sequences.items():
value_length = array_ops.shape(value)[0]
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.logical_and(
math_ops.equal(value_length % num_unroll, 0),
math_ops.not_equal(value_length, 0)),
[string_ops.string_join(
["Tensor %s first dimension should be a multiple of: "
% key,
string_ops.as_string(num_unroll),
", but saw value: ",
string_ops.as_string(value_length),
". Consider setting pad=True."])])]):
input_sequences[key] = array_ops.identity(
value, name="multiple_of_checked")
# setup stateful queue reader
stateful_reader = SequenceQueueingStateSaver(
batch_size, num_unroll,
input_length=input_length,
input_key=input_key,
input_sequences=input_sequences,
input_context=input_context,
initial_states=initial_states,
capacity=capacity,
allow_small_batch=allow_small_batch)
barrier = stateful_reader.barrier
logging_ops.scalar_summary(
"queue/%s/ready_segment_batches_" % barrier.name,
math_ops.cast(barrier.ready_size(), dtypes.float32))
q_runner = queue_runner.QueueRunner(
stateful_reader, [stateful_reader.prefetch_op]*num_threads,
queue_closed_exception_types=(errors.OutOfRangeError,
errors.CancelledError))
queue_runner.add_queue_runner(q_runner)
return stateful_reader.next_batch
def _padding(sequences, num_unroll):
"""For a dictionary of sequences, pads tensors to a multiple of `num_unroll`.
Args:
sequences: dictionary with `Tensor` values.
num_unroll: int specifying to what multiple to pad sequences to.
Returns:
length: Scalar `Tensor` of dimension 0 of all the values in sequences.
padded_sequence: Dictionary of sequences that are padded to a multiple of
`num_unroll`.
Raises:
ValueError: If `num_unroll` not an int or sequences not a dictionary from
string to `Tensor`.
"""
if not isinstance(num_unroll, numbers.Integral):
raise ValueError("Unsupported num_unroll expected int, got: %s" %
str(num_unroll))
if not isinstance(sequences, dict):
raise TypeError("Unsupported sequences expected dict, got: %s" %
str(sequences))
for key, value in sequences.items():
if not isinstance(key, six.string_types):
raise TypeError("Unsupported sequences key expected string, got: %s" %
str(key))
if not sequences:
return 0, {}
sequences_dict = {}
for key, value in sequences.items():
sequences_dict[key] = ops.convert_to_tensor(value)
lengths = [array_ops.shape(value)[0] for value in sequences_dict.values()]
length = lengths[0]
all_lengths_equal = [
control_flow_ops.Assert(
math_ops.equal(l, length), [string_ops.string_join(
["All sequence lengths must match, but received lengths: ",
string_ops.as_string(lengths)])])
for l in lengths]
length = control_flow_ops.with_dependencies(all_lengths_equal, length)
unroll = array_ops.constant(num_unroll)
padded_length = length + ((unroll - (length % unroll)) % unroll)
padded_sequences = {}
for key, value in sequences_dict.items():
# 1. create shape of paddings
# first dimension of value will be increased by num_paddings to
# padded_length
num_paddings = [padded_length - array_ops.shape(value)[0]]
# the shape of the paddings that we concat with the original value will be
# [num_paddings, tf.shape(value)[1], tf.shape(value)[2], ...,
# tf.shape(value)[tf.rank(value) - 1])]
padding_shape = array_ops.concat(0, (
num_paddings, array_ops.shape(value)[1:]))
# 2. fill padding shape with dummies
dummy = array_ops.constant("" if value.dtype == dtypes.string else 0,
dtype=value.dtype)
paddings = array_ops.fill(dims=padding_shape, value=dummy)
# 3. concat values with paddings
padded_sequences[key] = array_ops.concat(0, [value, paddings])
return length, padded_sequences
| apache-2.0 |
mrkm4ntr/incubator-airflow | airflow/operators/mysql_operator.py | 7 | 1146 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.mysql.operators.mysql`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.mysql.operators.mysql import MySqlOperator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.mysql.operators.mysql`.",
DeprecationWarning,
stacklevel=2,
)
| apache-2.0 |
tccworld/qualitybots | src/client_setup/chrome_manager.py | 26 | 12730 | #!/usr/bin/python2.6
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A helper library which provides support for (windows) chrome installation.
The library provides methods to check if Chrome is installed and the ability to
install/uninstall it. Currently, there is no cleanup of previously downloaded
installers and their associated folders.
"""
import cStringIO
import csv
import os
import subprocess
import sys
import time
import urllib
import urllib2
import _winreg
import mylogger
logger = mylogger.InitLogging('Chrome_SiteCompat', True, True)
# Url that contains the list of the latest Chrome builds.
OMAHA_URL = 'https://omahaproxy.appspot.com/dl_urls'
# Installer flags and executable
CHROME_SILIENT_INSTALL_FLAGS = ' --do-not-launch-chrome --system-level'
CHROME_SILIENT_UNINSTALL_FLAGS = (' --uninstall --force-uninstall '
'--delete-profile --system-level')
INSTALLER_FILENAME = 'installer.exe'
# Registry keys
CHROME_EXE_KEY = (r'Software\Microsoft\Windows\CurrentVersion\App Paths'
r'\chrome.exe')
VERSION_KEY_PATH = r'Software\Google'
VERSION_KEY = 'bots_installed_version'
class ChromeAutomationHelperException(Exception):
pass
# TODO(user): Refactor this into base class and create factory which will
# return platform specific implementation.
class ChromeAutomationHelper(object):
"""Provides methods to support chrome automation."""
def InstallChrome(self, operating_system, channel, download_info=''):
"""Silent install of Chrome for all users.
Args:
operating_system: A string representing the desired operating system for
the build. Acceptable values are ['win'].
channel: A string representing which variant is desired. Acceptable
values are ['canary', 'dev', 'beta', 'stable'].
download_info: An optional string that represents the info necessary to
download the correct Chrome browser version.
Raises:
ChromeAutomationHelperException: Raised if something went wrong
retrieving information or downloading/installing of Chrome.
"""
logger.info('Downloading latest Chrome version information.')
(url, version) = self._GetLatestChromeDownloadUrl(
operating_system, channel, download_info=download_info)
if self.IsChromeInstalled():
if self._GetInstalledVersion() == version:
logger.info('Chrome already installed. Exiting.')
return
else:
logger.info('Uninstalling current version of Chrome because a new '
'version is available and will be installed.')
self.UninstallChrome()
logger.info('Installation of Chrome has begun.')
local_file = self._DownloadLatestBuild(url, version)
command = '"' + local_file + '"' + CHROME_SILIENT_INSTALL_FLAGS
logger.info('Installation command: ' + command)
self._ExecuteCommand(command)
if not self.IsChromeInstalled():
logger.info('Chrome not installed.')
self._LogAndRaise('Something went wrong, installation can not verify '
'installation.')
# Set the version of the newly installed chrome. Upon failure uninstall.
try:
self._SetInstalledVersion(version)
except ChromeAutomationHelperException, exception:
logger.info('Chrome not installed.')
self.UninstallChrome()
self._LogAndRaise(str(exception))
logger.info('Chrome installed successfully.')
def IsChromeInstalled(self):
"""Check if Chrome is installed.
Returns:
True if installed
False if not installed
"""
is_chrome_installed = False
key = None
try:
# Check for the regkey value presence.
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, CHROME_EXE_KEY)
chrome_exe_path = _winreg.QueryValueEx(key, None)[0]
is_chrome_installed = True
logger.info('IsInstalled: Chrome is installed at %s' % chrome_exe_path)
except WindowsError:
logger.info('IsInstalled: Chrome is not installed.')
finally:
if key:
_winreg.CloseKey(key)
return is_chrome_installed
def UninstallChrome(self):
"""Silent uninstall of Chrome for all users.
Raises:
ChromeAutomationHelperException: Raised if something went wrong
uninstalling Chrome.
"""
try:
version = self._GetInstalledVersion()
except ChromeAutomationHelperException:
logger.info('No version found, nothing to uninstall.')
return
local_file = self._GetOrCreateFilename(version)
if not os.path.exists(local_file):
self._LogAndRaise('Chrome installed but no installer to use for '
'uninstall.')
logger.info('Uninstalling Chrome.')
command = '"' + local_file + '"' + CHROME_SILIENT_UNINSTALL_FLAGS
logger.info(command)
self._ExecuteCommand(command)
if self.IsChromeInstalled():
self._LogAndRaise('Failed to uninstall Chrome.')
logger.info('Chrome has been successfully uninstalled.')
# TODO(user): Determine if it should go here or before the
# the uninstall. What is more important a spare key or a spare installed
# browser?
self._RemoveVersionKey()
def _DownloadLatestBuild(self, url, version):
"""Downloads the latest build from the given url.
Args:
url: The url from which to download the installer.
version: The version of the installer.
Returns:
A string specifying where the installer is located.
Raises:
ChromeAutomationHelperException: Raised if any of the information could
not be found.
"""
local_file = self._GetOrCreateFilename(version)
try:
urllib.urlretrieve(url, local_file)
except urllib.ContentTooShortError, content_exception:
self._LogAndRaise('Failed to download installer. The given error is: ' +
str(content_exception))
except IOError, url_exception:
self._LogAndRaise('Failed to retrieve chrome installer information '
'from ' + url + '. The given error is: ' +
str(url_exception))
finally:
urllib.urlcleanup()
if not os.path.exists(local_file):
self._LogAndRaise('Failed to download installer. File does not exist.')
return local_file
def _ExecuteCommand(self, command):
"""Executes a command on the command line.
Args:
command: A string representing the command to execute.
Raises:
ChromeAutomationHelperException: Raised if the command fails.
"""
try:
p = subprocess.Popen(command, shell=True, stdin=subprocess.PIPE)
p.stdin.close()
if p.wait() != 0:
self._LogAndRaise('Wait failed while executing command: ' + command)
except OSError, os_error:
self._LogAndRaise('An operating system error occurred with error:' +
os_error)
except subprocess.CalledProcessError, called_process_error:
self._LogAndRaise('Executed command returned a non-zero value with '
'error: ' + called_process_error)
except ValueError, value_error:
self._LogAndRaise('Invalid arguments given to the command with error: ' +
value_error)
# Sleep for few seconds to ensure all Window registries are updated.
time.sleep(5)
def _GetOrCreateFilename(self, version):
"""Creates a path to a file using the given version.
In addition to generating the path, it also will create any missing folders
needed by the path.
Args:
version: The version of chrome.
Returns:
A string representing the path to a specific installer file.
"""
local_path = os.path.join(os.path.dirname(sys.argv[0]), version)
if not os.path.exists(local_path):
os.mkdir(local_path)
local_file = os.path.join(local_path, INSTALLER_FILENAME)
return str(local_file)
def _GetInstalledVersion(self):
"""Retrieves the version number of the currently installed Chrome.
This function assumes that the installation of Chrome has already been
verified.
Returns:
A string representing the version number.
Raises:
ChromeAutomationHelperException: Raised if the version could not be
retrieved.
"""
key = None
try:
# Check for the regkey value presence.
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, VERSION_KEY_PATH)
version = _winreg.QueryValueEx(key, VERSION_KEY)[0]
return version
except WindowsError:
logger.error('Version not found.')
return None
finally:
if key:
_winreg.CloseKey(key)
def _GetLatestChromeDownloadUrl(self, operating_system, channel,
download_info=''):
"""Finds the url of the latest Chrome build.
Using an Omaha server, retrieve a list of the current builds and extract
the appropriate information. The format of each line in the downloaded
file is [os, channel, version, download url].
Args:
operating_system: A string representing the desired operating system for
the build. Acceptable values are ['win'].
channel: A string representing which variant is desired. Acceptable
values are ['canary', 'dev', 'beta', 'stable'].
download_info: An optional string that represents the info necessary to
download the correct Chrome browser version.
Returns:
Returns a tuple of strings (url, version).
Raises:
ChromeAutomationHelperException: Raised if any of the information could
not be found.
"""
retries = 10
response = None
# Access to the url can be unstable and can potentially require a large
# unknown number of retries.
if download_info:
response = cStringIO.StringIO(download_info)
else:
for retry in range(retries):
try:
response = urllib2.urlopen(OMAHA_URL)
break
except urllib2.URLError, url_exception:
logger.info('Retry (' + str(retry) + ') Failed to retrieve chrome ' +
'installer information from ' + OMAHA_URL +
'. The given error is: ' + str(url_exception))
if not response:
self._LogAndRaise('Failed to download list of latest builds.')
reader = csv.DictReader(response)
for line in reader:
if operating_system == line['os'] and channel == line['channel']:
return (line['dl_url'], line['current_version'])
self._LogAndRaise('Did not find the specified build in the list of latest '
'builds.')
def _LogAndRaise(self, message):
"""Logs a message and then raises an exception with the same value.
Args:
message: A string representing the message to log/raise.
Raises:
ChromeAutomationHelperException: Raised with the given message.
"""
logger.info(message)
raise ChromeAutomationHelperException(message)
def _RemoveVersionKey(self):
"""Removes the registry key for the version number.
Raises:
ChromeAutomationHelperException: Raised if the version could not be
retrieved.
"""
key = None
try:
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, VERSION_KEY_PATH,
0, _winreg.KEY_SET_VALUE)
_winreg.DeleteValue(key, VERSION_KEY)
except WindowsError:
self._LogAndRaise('Version information could not be removed.')
finally:
if key:
_winreg.CloseKey(key)
def _SetInstalledVersion(self, version):
"""Sets the version number of the currently installed Chrome.
Args:
version: A string representing the version of Chrome installed.
Raises:
ChromeAutomationHelperException: Raised if the version could not be
retrieved.
"""
key = None
try:
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, VERSION_KEY_PATH,
0, _winreg.KEY_SET_VALUE)
_winreg.SetValueEx(key, VERSION_KEY, 0, _winreg.REG_SZ,
version)
except WindowsError:
self._LogAndRaise('Version information could not be set.')
finally:
if key:
_winreg.CloseKey(key)
| apache-2.0 |
JohnDevitt/appengine-django-skeleton-master | lib/django/contrib/staticfiles/management/commands/findstatic.py | 463 | 1745 | from __future__ import unicode_literals
import os
from django.contrib.staticfiles import finders
from django.core.management.base import LabelCommand
from django.utils.encoding import force_text
class Command(LabelCommand):
help = "Finds the absolute paths for the given static file(s)."
label = 'static file'
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument('--first', action='store_false', dest='all',
default=True,
help="Only return the first match for each static file.")
def handle_label(self, path, **options):
verbosity = options['verbosity']
result = finders.find(path, all=options['all'])
path = force_text(path)
if verbosity >= 2:
searched_locations = ("Looking in the following locations:\n %s" %
"\n ".join(force_text(location)
for location in finders.searched_locations))
else:
searched_locations = ''
if result:
if not isinstance(result, (list, tuple)):
result = [result]
result = (force_text(os.path.realpath(path)) for path in result)
if verbosity >= 1:
file_list = '\n '.join(result)
return ("Found '%s' here:\n %s\n%s" %
(path, file_list, searched_locations))
else:
return '\n'.join(result)
else:
message = ["No matching file found for '%s'." % path]
if verbosity >= 2:
message.append(searched_locations)
if verbosity >= 1:
self.stderr.write('\n'.join(message))
| bsd-3-clause |
gromez/Sick-Beard | lib/requests/api.py | 55 | 4882 | # -*- coding: utf-8 -*-
"""
requests.api
~~~~~~~~~~~~
This module implements the Requests API.
:copyright: (c) 2012 by Kenneth Reitz.
:license: ISC, see LICENSE for more details.
"""
from . import sessions
from .safe_mode import catch_exceptions_if_in_safe_mode
@catch_exceptions_if_in_safe_mode
def request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of 'name': file-like-objects (or {'name': ('filename', fileobj)}) for multipart encoding upload.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) Float describing the timeout of the request.
:param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param return_response: (optional) If False, an un-sent Request object will returned.
:param session: (optional) A :class:`Session` object to be used for the request.
:param config: (optional) A configuration dictionary. See ``request.defaults`` for allowed keys and their default values.
:param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided.
:param prefetch: (optional) if ``True``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
"""
# if this session was passed in, leave it open (and retain pooled connections);
# if we're making it just for this call, then close it when we're done.
adhoc_session = False
session = kwargs.pop('session', None)
if session is None:
session = sessions.session()
adhoc_session = True
try:
return session.request(method=method, url=url, **kwargs)
finally:
if adhoc_session:
session.close()
def get(url, **kwargs):
"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return request('get', url, **kwargs)
def options(url, **kwargs):
"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return request('options', url, **kwargs)
def head(url, **kwargs):
"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', False)
return request('head', url, **kwargs)
def post(url, data=None, **kwargs):
"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('post', url, data=data, **kwargs)
def put(url, data=None, **kwargs):
"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('put', url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('patch', url, data=data, **kwargs)
def delete(url, **kwargs):
"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('delete', url, **kwargs)
| gpl-3.0 |
ville-k/tensorflow | tensorflow/contrib/data/python/kernel_tests/bucketing_test.py | 12 | 11950 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.data.python.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class BucketingTest(test.TestCase):
def testSimple(self):
components = np.random.randint(100, size=(200,)).astype(np.int64)
iterator = dataset_ops.Iterator.from_dataset(
dataset_ops.Dataset.from_tensor_slices(components).map(lambda x: x * x)
.group_by_window(lambda x: x % 2, lambda _, xs: xs.batch(4), 4))
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
counts = []
with self.assertRaises(errors.OutOfRangeError):
while True:
result = sess.run(get_next)
self.assertTrue(
all(x % 2 == 0 for x in result) or all(x % 2 == 1)
for x in result)
counts.append(result.shape[0])
self.assertEqual(len(components), sum(counts))
num_full_batches = len([c for c in counts if c == 4])
self.assertGreaterEqual(num_full_batches, 23)
self.assertTrue(all(c == 4 for c in counts[:num_full_batches]))
def testImmediateOutput(self):
components = np.array(
[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 0, 0, 2, 2, 0, 0], dtype=np.int64)
iterator = dataset_ops.Iterator.from_dataset(
dataset_ops.Dataset.from_tensor_slices(components).repeat(-1)
.group_by_window(lambda x: x % 3, lambda _, xs: xs.batch(4), 4))
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
# The input is infinite, so this test demonstrates that:
# 1. We produce output without having to consume the entire input,
# 2. Different buckets can produce output at different rates, and
# 3. For deterministic input, the output is deterministic.
for _ in range(3):
self.assertAllEqual([0, 0, 0, 0], sess.run(get_next))
self.assertAllEqual([1, 1, 1, 1], sess.run(get_next))
self.assertAllEqual([2, 2, 2, 2], sess.run(get_next))
self.assertAllEqual([0, 0, 0, 0], sess.run(get_next))
def testSmallGroups(self):
components = np.array([0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0], dtype=np.int64)
iterator = dataset_ops.Iterator.from_dataset(
dataset_ops.Dataset.from_tensor_slices(components)
.group_by_window(lambda x: x % 2, lambda _, xs: xs.batch(4), 4))
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
self.assertAllEqual([0, 0, 0, 0], sess.run(get_next))
self.assertAllEqual([1, 1, 1, 1], sess.run(get_next))
# The small outputs at the end are deterministically produced in key
# order.
self.assertAllEqual([0, 0, 0], sess.run(get_next))
self.assertAllEqual([1], sess.run(get_next))
def testReduceFuncError(self):
components = np.random.randint(100, size=(200,)).astype(np.int64)
def reduce_func(_, xs):
# Introduce an incorrect padded shape that cannot (currently) be
# detected at graph construction time.
return xs.padded_batch(
4,
padded_shapes=(tensor_shape.TensorShape([]),
constant_op.constant([5], dtype=dtypes.int64) * -1))
iterator = dataset_ops.Iterator.from_dataset(
dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: (x, ops.convert_to_tensor([x * x])))
.group_by_window(lambda x, _: x % 2, reduce_func, 32))
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
def testConsumeWindowDatasetMoreThanOnce(self):
components = np.random.randint(50, size=(200,)).astype(np.int64)
def reduce_func(key, window):
# Apply two different kinds of padding to the input: tight
# padding, and quantized (to a multiple of 10) padding.
return dataset_ops.Dataset.zip((window.padded_batch(
4,
padded_shapes=tensor_shape.TensorShape([None])), window.padded_batch(
4, padded_shapes=ops.convert_to_tensor([(key + 1) * 10])),))
iterator = dataset_ops.Iterator.from_dataset(
dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.fill([math_ops.cast(x, dtypes.int32)], x))
.group_by_window(
lambda x: math_ops.cast(array_ops.shape(x)[0] // 10, dtypes.int64),
reduce_func, 4))
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
counts = []
with self.assertRaises(errors.OutOfRangeError):
while True:
tight_result, multiple_of_10_result = sess.run(get_next)
self.assertEqual(0, multiple_of_10_result.shape[1] % 10)
self.assertAllEqual(tight_result,
multiple_of_10_result[:, :tight_result.shape[1]])
counts.append(tight_result.shape[0])
self.assertEqual(len(components), sum(counts))
# NOTE(mrry): These tests are based on the tests in
# bucket_ops_test.py. Currently, different batch sizes for each key
# are not supported, although this would be possible to add to
# `Dataset.group_by_window()`.
class BucketTest(test.TestCase):
def _dynamicPad(self, bucket, window, window_size):
# TODO(mrry): To match `tf.contrib.training.bucket()`, implement a
# generic form of padded_batch that pads every component
# dynamically and does not rely on static shape information about
# the arguments.
return dataset_ops.Dataset.zip(
(dataset_ops.Dataset.from_tensors(bucket), window.padded_batch(
32, (tensor_shape.TensorShape([]), tensor_shape.TensorShape([None]),
tensor_shape.TensorShape([3])))))
def testSingleBucket(self):
def _map_fn(v):
return (v, array_ops.fill([v], v),
array_ops.fill([3], string_ops.as_string(v)))
input_dataset = (
dataset_ops.Dataset.from_tensor_slices(math_ops.range(32)).map(_map_fn))
bucketed_dataset = input_dataset.group_by_window(
lambda x, y, z: 0, lambda k, bucket: self._dynamicPad(k, bucket, 32),
32)
iterator = dataset_ops.Iterator.from_dataset(bucketed_dataset)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
which_bucket, bucketed_values = sess.run(get_next)
self.assertEqual(0, which_bucket)
expected_scalar_int = np.arange(32, dtype=np.int64)
expected_unk_int64 = np.zeros((32, 31)).astype(np.int64)
for i in range(32):
expected_unk_int64[i, :i] = i
expected_vec3_str = np.vstack(3 * [np.arange(32).astype(bytes)]).T
self.assertAllEqual(expected_scalar_int, bucketed_values[0])
self.assertAllEqual(expected_unk_int64, bucketed_values[1])
self.assertAllEqual(expected_vec3_str, bucketed_values[2])
def testEvenOddBuckets(self):
def _map_fn(v):
return (v, array_ops.fill([v], v),
array_ops.fill([3], string_ops.as_string(v)))
input_dataset = (
dataset_ops.Dataset.from_tensor_slices(math_ops.range(64)).map(_map_fn))
bucketed_dataset = input_dataset.group_by_window(
lambda x, y, z: math_ops.cast(x % 2, dtypes.int64),
lambda k, bucket: self._dynamicPad(k, bucket, 32), 32)
iterator = dataset_ops.Iterator.from_dataset(bucketed_dataset)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
# Get two minibatches (one containing even values, one containing odds)
which_bucket_even, bucketed_values_even = sess.run(get_next)
which_bucket_odd, bucketed_values_odd = sess.run(get_next)
# Count number of bucket_tensors.
self.assertEqual(3, len(bucketed_values_even))
self.assertEqual(3, len(bucketed_values_odd))
# Ensure bucket 0 was used for all minibatch entries.
self.assertAllEqual(0, which_bucket_even)
self.assertAllEqual(1, which_bucket_odd)
# Test the first bucket outputted, the events starting at 0
expected_scalar_int = np.arange(0, 32 * 2, 2, dtype=np.int64)
expected_unk_int64 = np.zeros((32, 31 * 2)).astype(np.int64)
for i in range(0, 32):
expected_unk_int64[i, :2 * i] = 2 * i
expected_vec3_str = np.vstack(
3 * [np.arange(0, 32 * 2, 2).astype(bytes)]).T
self.assertAllEqual(expected_scalar_int, bucketed_values_even[0])
self.assertAllEqual(expected_unk_int64, bucketed_values_even[1])
self.assertAllEqual(expected_vec3_str, bucketed_values_even[2])
# Test the second bucket outputted, the odds starting at 1
expected_scalar_int = np.arange(1, 32 * 2 + 1, 2, dtype=np.int64)
expected_unk_int64 = np.zeros((32, 31 * 2 + 1)).astype(np.int64)
for i in range(0, 32):
expected_unk_int64[i, :2 * i + 1] = 2 * i + 1
expected_vec3_str = np.vstack(
3 * [np.arange(1, 32 * 2 + 1, 2).astype(bytes)]).T
self.assertAllEqual(expected_scalar_int, bucketed_values_odd[0])
self.assertAllEqual(expected_unk_int64, bucketed_values_odd[1])
self.assertAllEqual(expected_vec3_str, bucketed_values_odd[2])
def testEvenOddBucketsFilterOutAllOdd(self):
def _map_fn(v):
return (v, array_ops.fill([v], v),
array_ops.fill([3], string_ops.as_string(v)))
input_dataset = (
dataset_ops.Dataset.from_tensor_slices(math_ops.range(128)).map(_map_fn)
.filter(lambda x, y, z: math_ops.equal(x % 2, 0)))
bucketed_dataset = input_dataset.group_by_window(
lambda x, y, z: math_ops.cast(x % 2, dtypes.int64),
lambda k, bucket: self._dynamicPad(k, bucket, 32), 32)
iterator = dataset_ops.Iterator.from_dataset(bucketed_dataset)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
# Get two minibatches ([0, 2, ...] and [64, 66, ...])
which_bucket0, bucketed_values_even0 = sess.run(get_next)
which_bucket1, bucketed_values_even1 = sess.run(get_next)
# Ensure that bucket 1 was completely filtered out
self.assertAllEqual(0, which_bucket0)
self.assertAllEqual(0, which_bucket1)
self.assertAllEqual(
np.arange(0, 64, 2, dtype=np.int64), bucketed_values_even0[0])
self.assertAllEqual(
np.arange(64, 128, 2, dtype=np.int64), bucketed_values_even1[0])
if __name__ == "__main__":
test.main()
| apache-2.0 |
geo-poland/frappe | frappe/widgets/form/meta.py | 25 | 5903 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# metadata
from __future__ import unicode_literals
import frappe, os
from frappe.model.meta import Meta
from frappe.modules import scrub, get_module_path, load_doctype_module
from frappe.model.workflow import get_workflow_name
from frappe.utils import get_html_format
from frappe.translate import make_dict_from_messages, extract_messages_from_code
from frappe.utils.jinja import render_include
######
def get_meta(doctype, cached=True):
if cached:
meta = frappe.cache().get_value("form_meta:" + doctype, lambda: FormMeta(doctype))
else:
meta = FormMeta(doctype)
if frappe.local.lang != 'en':
meta.set_translations(frappe.local.lang)
return meta
class FormMeta(Meta):
def __init__(self, doctype):
super(FormMeta, self).__init__(doctype)
self.load_assets()
def load_assets(self):
self.add_search_fields()
if not self.istable:
self.add_linked_with()
self.add_code()
self.load_print_formats()
self.load_workflows()
self.load_templates()
def as_dict(self, no_nulls=False):
d = super(FormMeta, self).as_dict(no_nulls=no_nulls)
for k in ("__js", "__css", "__list_js", "__calendar_js", "__map_js",
"__linked_with", "__messages", "__print_formats", "__workflow_docs",
"__form_grid_templates", "__listview_template"):
d[k] = self.get(k)
for i, df in enumerate(d.get("fields")):
for k in ("link_doctype", "search_fields"):
df[k] = self.get("fields")[i].get(k)
return d
def add_code(self):
path = os.path.join(get_module_path(self.module), 'doctype', scrub(self.name))
def _get_path(fname):
return os.path.join(path, scrub(fname))
self._add_code(_get_path(self.name + '.js'), '__js')
self._add_code(_get_path(self.name + '.css'), "__css")
self._add_code(_get_path(self.name + '_list.js'), '__list_js')
self._add_code(_get_path(self.name + '_calendar.js'), '__calendar_js')
listview_template = _get_path(self.name + '_list.html')
if os.path.exists(listview_template):
self.set("__listview_template", get_html_format(listview_template))
self.add_code_via_hook("doctype_js", "__js")
self.add_custom_script()
def _add_code(self, path, fieldname):
js = frappe.read_file(path)
if js:
self.set(fieldname, (self.get(fieldname) or "") + "\n\n" + render_include(js))
def add_code_via_hook(self, hook, fieldname):
for app_name in frappe.get_installed_apps():
code_hook = frappe.get_hooks(hook, default={}, app_name=app_name)
if not code_hook:
continue
files = code_hook.get(self.name, [])
if not isinstance(files, list):
files = [files]
for file in files:
path = frappe.get_app_path(app_name, *file.strip("/").split("/"))
self._add_code(path, fieldname)
def add_custom_script(self):
"""embed all require files"""
# custom script
custom = frappe.db.get_value("Custom Script", {"dt": self.name,
"script_type": "Client"}, "script") or ""
self.set("__js", (self.get('__js') or '') + "\n\n" + custom)
def add_search_fields(self):
"""add search fields found in the doctypes indicated by link fields' options"""
for df in self.get("fields", {"fieldtype": "Link", "options":["!=", "[Select]"]}):
if df.options:
search_fields = frappe.get_meta(df.options).search_fields
if search_fields:
df.search_fields = map(lambda sf: sf.strip(), search_fields.split(","))
def add_linked_with(self):
"""add list of doctypes this doctype is 'linked' with"""
links = frappe.db.sql("""select parent, fieldname from tabDocField
where (fieldtype="Link" and options=%s)
or (fieldtype="Select" and options=%s)""", (self.name, "link:"+ self.name))
links += frappe.db.sql("""select dt as parent, fieldname from `tabCustom Field`
where (fieldtype="Link" and options=%s)
or (fieldtype="Select" and options=%s)""", (self.name, "link:"+ self.name))
links = dict(links)
if not links:
return {}
ret = {}
for dt in links:
ret[dt] = { "fieldname": links[dt] }
for grand_parent, options in frappe.db.sql("""select parent, options from tabDocField
where fieldtype="Table"
and options in (select name from tabDocType
where istable=1 and name in (%s))""" % ", ".join(["%s"] * len(links)) ,tuple(links)):
ret[grand_parent] = {"child_doctype": options, "fieldname": links[options] }
if options in ret:
del ret[options]
self.set("__linked_with", ret)
def load_print_formats(self):
print_formats = frappe.db.sql("""select * FROM `tabPrint Format`
WHERE doc_type=%s AND docstatus<2 and ifnull(disabled, 0)=0""", (self.name,), as_dict=1,
update={"doctype":"Print Format"})
self.set("__print_formats", print_formats)
def load_workflows(self):
# get active workflow
workflow_name = get_workflow_name(self.name)
workflow_docs = []
if workflow_name and frappe.db.exists("Workflow", workflow_name):
workflow = frappe.get_doc("Workflow", workflow_name)
workflow_docs.append(workflow)
for d in workflow.get("workflow_document_states"):
workflow_docs.append(frappe.get_doc("Workflow State", d.state))
self.set("__workflow_docs", workflow_docs)
def load_templates(self):
module = load_doctype_module(self.name)
app = module.__name__.split(".")[0]
templates = {}
if hasattr(module, "form_grid_templates"):
for key, path in module.form_grid_templates.iteritems():
templates[key] = get_html_format(frappe.get_app_path(app, path))
self.set("__form_grid_templates", templates)
def set_translations(self, lang):
self.set("__messages", frappe.get_lang_dict("doctype", self.name))
# set translations for grid templates
if self.get("__form_grid_templates"):
for content in self.get("__form_grid_templates").values():
messages = extract_messages_from_code(content)
messages = make_dict_from_messages(messages)
self.get("__messages").update(messages)
| mit |
webmasterraj/FogOrNot | flask/lib/python2.7/site-packages/pandas/tseries/tests/test_base.py | 2 | 51601 | from __future__ import print_function
import re
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas.tseries.base import DatetimeIndexOpsMixin
from pandas.util.testing import assertRaisesRegexp, assert_isinstance
from pandas.tseries.common import is_datetimelike
from pandas import (Series, Index, Int64Index, Timestamp, DatetimeIndex, PeriodIndex,
TimedeltaIndex, Timedelta, timedelta_range, date_range, Float64Index)
import pandas.tslib as tslib
import nose
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern',
'dateutil/Asia/Singapore', 'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: isinstance(x, DatetimeIndex) or isinstance(x, PeriodIndex)
self.is_valid_objs = [ o for o in self.objs if mask(o) ]
self.not_valid_objs = [ o for o in self.objs if not mask(o) ]
def test_ops_properties(self):
self.check_ops_properties(['year','month','day','hour','minute','second','weekofyear','week','dayofweek','dayofyear','quarter'])
self.check_ops_properties(['date','time','microsecond','nanosecond', 'is_month_start', 'is_month_end', 'is_quarter_start',
'is_quarter_end', 'is_year_start', 'is_year_end'], lambda x: isinstance(x,DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year','day','second','weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series,op))
# attribute access should still work!
s = Series(dict(year=2000,month=1,day=10))
self.assertEqual(s.year,2000)
self.assertEqual(s.month,1)
self.assertEqual(s.day,10)
self.assertRaises(AttributeError, lambda : s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M', name='idx')
expected_list = [pd.Timestamp('2013-01-31'), pd.Timestamp('2013-02-28'),
pd.Timestamp('2013-03-31'), pd.Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M', name='idx', tz='Asia/Tokyo')
expected_list = [pd.Timestamp('2013-01-31', tz='Asia/Tokyo'),
pd.Timestamp('2013-02-28', tz='Asia/Tokyo'),
pd.Timestamp('2013-03-31', tz='Asia/Tokyo'),
pd.Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [pd.Timestamp('2013-01-01'), pd.Timestamp('2013-01-02'),
pd.NaT, pd.Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), pd.Timestamp('2011-01-03', tz=tz))
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_representation(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """<class 'pandas.tseries.index.DatetimeIndex'>
Length: 0, Freq: D, Timezone: None"""
exp2 = """<class 'pandas.tseries.index.DatetimeIndex'>
[2011-01-01]
Length: 1, Freq: D, Timezone: None"""
exp3 = """<class 'pandas.tseries.index.DatetimeIndex'>
[2011-01-01, 2011-01-02]
Length: 2, Freq: D, Timezone: None"""
exp4 = """<class 'pandas.tseries.index.DatetimeIndex'>
[2011-01-01, ..., 2011-01-03]
Length: 3, Freq: D, Timezone: None"""
exp5 = """<class 'pandas.tseries.index.DatetimeIndex'>
[2011-01-01 09:00:00+09:00, ..., 2011-01-01 11:00:00+09:00]
Length: 3, Freq: H, Timezone: Asia/Tokyo"""
exp6 = """<class 'pandas.tseries.index.DatetimeIndex'>
[2011-01-01 09:00:00-05:00, ..., NaT]
Length: 3, Freq: None, Timezone: US/Eastern"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 to 2011-01-01 11:00:00+09:00
Freq: H"""
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U'],
['day', 'day', 'day', 'day',
'hour', 'minute', 'second', 'millisecond', 'microsecond']):
for tz in [None, 'Asia/Tokyo', 'US/Eastern']:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq, tz=tz)
self.assertEqual(idx.resolution, expected)
def test_add_iadd(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2),
(rng3, other3, expected3)]:
# GH9094
with tm.assert_produces_warning(FutureWarning):
result_add = rng + other
result_union = rng.union(other)
tm.assert_index_equal(result_add, expected)
tm.assert_index_equal(result_union, expected)
# GH9094
with tm.assert_produces_warning(FutureWarning):
rng += other
tm.assert_index_equal(rng, expected)
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00', '2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10, tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10, tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.difference(other)
tm.assert_index_equal(result_union, expected)
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'),
Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng - delta
expected = pd.date_range('1999-12-31 22:00', '2000-01-31 22:00', tz=tz)
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10, tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10, tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in [None, 'UTC', 'Asia/Tokyo', 'US/Eastern']:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)), tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10, tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
tm.assert_series_equal(idx.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10, tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 08:00', '2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'], tz=tz)
expected = Series([3, 2], index=exp_idx)
tm.assert_series_equal(idx.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00', pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
tm.assert_series_equal(idx.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [ o for o in self.objs if mask(o) ]
self.not_valid_objs = [ ]
def test_ops_properties(self):
self.check_ops_properties(['days','hours','minutes','seconds','milliseconds'])
self.check_ops_properties(['microseconds','nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'),Timedelta('2 days'),Timedelta('3 days'),
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1),timedelta(days=2),pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'),Timedelta('2 days'),pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """<class 'pandas.tseries.tdi.TimedeltaIndex'>
Length: 0, Freq: D"""
exp2 = """<class 'pandas.tseries.tdi.TimedeltaIndex'>
['1 days']
Length: 1, Freq: D"""
exp3 = """<class 'pandas.tseries.tdi.TimedeltaIndex'>
['1 days', '2 days']
Length: 2, Freq: D"""
exp4 = """<class 'pandas.tseries.tdi.TimedeltaIndex'>
['1 days', ..., '3 days']
Length: 3, Freq: D"""
exp5 = """<class 'pandas.tseries.tdi.TimedeltaIndex'>
['1 days 00:00:01', ..., '3 days 00:00:00']
Length: 3, Freq: None"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, '1 days' to '1 days'
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, '1 days' to '2 days'
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, '1 days' to '3 days'
Freq: D"""
exp5 = """TimedeltaIndex: 3 entries, '1 days 00:00:01' to '3 days 00:00:00'"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days','10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00','10 days 02:00:00',freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'),
Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days','10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'),
Timedelta(hours=2)]
rng = timedelta_range('1 days','10 days',name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda : rng * offset)
# divide
expected = Int64Index((np.arange(10)+1)*12,name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result,expected)
# divide with nats
rng = TimedeltaIndex(['1 days',pd.NaT,'2 days'],name='foo')
expected = Float64Index([12,np.nan,24])
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result,expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda : rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days',pd.NaT,'2 days'],name='foo')
dti = date_range('20130101',periods=3)
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda : tdi - dt)
self.assertRaises(TypeError, lambda : tdi - dti)
self.assertRaises(TypeError, lambda : td - dt)
self.assertRaises(TypeError, lambda : td - dti)
result = dt-dti
expected = TimedeltaIndex(['0 days','-1 days','-2 days'])
tm.assert_index_equal(result,expected)
result = dti-dt
expected = TimedeltaIndex(['0 days','1 days','2 days'])
tm.assert_index_equal(result,expected)
result = tdi-td
expected = TimedeltaIndex(['0 days',pd.NaT,'1 days'])
tm.assert_index_equal(result,expected)
result = td-tdi
expected = TimedeltaIndex(['0 days',pd.NaT,'-1 days'])
tm.assert_index_equal(result,expected)
result = dti-td
expected = DatetimeIndex(['20121231','20130101','20130102'])
tm.assert_index_equal(result,expected)
result = dt-tdi
expected = DatetimeIndex(['20121231',pd.NaT,'20121230'])
tm.assert_index_equal(result,expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101',periods=3)
ts = Timestamp('20130101')
dt = ts.to_datetime()
dti_tz = date_range('20130101',periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_datetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result,expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda : dt_tz - ts)
self.assertRaises(TypeError, lambda : dt_tz - dt)
self.assertRaises(TypeError, lambda : dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda : dt - dt_tz)
self.assertRaises(TypeError, lambda : ts - dt_tz)
self.assertRaises(TypeError, lambda : ts_tz2 - ts)
self.assertRaises(TypeError, lambda : ts_tz2 - dt)
self.assertRaises(TypeError, lambda : ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda : dti - ts_tz)
self.assertRaises(TypeError, lambda : dti_tz - ts)
self.assertRaises(TypeError, lambda : dti_tz - ts_tz2)
result = dti_tz-dt_tz
expected = TimedeltaIndex(['0 days','1 days','2 days'])
tm.assert_index_equal(result,expected)
result = dt_tz-dti_tz
expected = TimedeltaIndex(['0 days','-1 days','-2 days'])
tm.assert_index_equal(result,expected)
result = dti_tz-ts_tz
expected = TimedeltaIndex(['0 days','1 days','2 days'])
tm.assert_index_equal(result,expected)
result = ts_tz-dti_tz
expected = TimedeltaIndex(['0 days','-1 days','-2 days'])
tm.assert_index_equal(result,expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(['20121231','20130101','20130102'],tz='US/Eastern')
tm.assert_index_equal(result,expected)
def test_dti_dti_deprecated_ops(self):
# deprecated in 0.16.0 (GH9094)
# change to return subtraction -> TimeDeltaIndex in 0.17.0
# shoudl move to the appropriate sections above
dti = date_range('20130101',periods=3)
dti_tz = date_range('20130101',periods=3).tz_localize('US/Eastern')
with tm.assert_produces_warning(FutureWarning):
result = dti-dti
expected = Index([])
tm.assert_index_equal(result,expected)
with tm.assert_produces_warning(FutureWarning):
result = dti+dti
expected = dti
tm.assert_index_equal(result,expected)
with tm.assert_produces_warning(FutureWarning):
result = dti_tz-dti_tz
expected = Index([])
tm.assert_index_equal(result,expected)
with tm.assert_produces_warning(FutureWarning):
result = dti_tz+dti_tz
expected = dti_tz
tm.assert_index_equal(result,expected)
with tm.assert_produces_warning(FutureWarning):
result = dti_tz-dti
expected = dti_tz
tm.assert_index_equal(result,expected)
with tm.assert_produces_warning(FutureWarning):
result = dti-dti_tz
expected = dti
tm.assert_index_equal(result,expected)
with tm.assert_produces_warning(FutureWarning):
self.assertRaises(TypeError, lambda : dti_tz+dti)
with tm.assert_produces_warning(FutureWarning):
self.assertRaises(TypeError, lambda : dti+dti_tz)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days',pd.NaT,'2 days'],name='foo')
dti = date_range('20130101',periods=3)
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi-tdi
expected = TimedeltaIndex(['0 days',pd.NaT,'0 days'])
tm.assert_index_equal(result,expected)
result = tdi+tdi
expected = TimedeltaIndex(['2 days',pd.NaT,'4 days'])
tm.assert_index_equal(result,expected)
result = dti-tdi
expected = DatetimeIndex(['20121231',pd.NaT,'20130101'])
tm.assert_index_equal(result,expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days',pd.NaT,'2 days'],name='foo')
dti = date_range('20130101',periods=3)
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102',pd.NaT,'20130103'])
tm.assert_index_equal(result,expected)
result = dt + tdi
expected = DatetimeIndex(['20130102',pd.NaT,'20130103'])
tm.assert_index_equal(result,expected)
result = td + tdi
expected = TimedeltaIndex(['2 days',pd.NaT,'3 days'])
tm.assert_index_equal(result,expected)
result = tdi + td
expected = TimedeltaIndex(['2 days',pd.NaT,'3 days'])
tm.assert_index_equal(result,expected)
# unequal length
self.assertRaises(ValueError, lambda : tdi + dti[0:1])
self.assertRaises(ValueError, lambda : tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda : tdi + Int64Index([1,2,3]))
# this is a union!
#self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti
expected = DatetimeIndex(['20130102',pd.NaT,'20130105'])
tm.assert_index_equal(result,expected)
result = dti + tdi
expected = DatetimeIndex(['20130102',pd.NaT,'20130105'])
tm.assert_index_equal(result,expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result,expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result,expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
tm.assert_series_equal(idx.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00', '1 days 09:00:00',
'1 days 08:00:00', '1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
tm.assert_series_equal(idx.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00', pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
tm.assert_series_equal(idx.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
class TestPeriodIndexOps(Ops):
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: isinstance(x, DatetimeIndex) or isinstance(x, PeriodIndex)
self.is_valid_objs = [ o for o in self.objs if mask(o) ]
self.not_valid_objs = [ o for o in self.objs if not mask(o) ]
def test_ops_properties(self):
self.check_ops_properties(['year','month','day','hour','minute','second','weekofyear','week','dayofweek','dayofyear','quarter'])
self.check_ops_properties(['qyear'], lambda x: isinstance(x,PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M', name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'), pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'), pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT', '2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'), pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'), pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
for i in [0, 1, 3]:
self.assertTrue(result[i], expected[i])
self.assertTrue(result[2].ordinal, pd.tslib.iNaT)
self.assertTrue(result[2].freq, 'D')
self.assertEqual(result.name, expected.name)
result_list = idx.tolist()
for i in [0, 1, 3]:
self.assertTrue(result_list[i], expected_list[i])
self.assertTrue(result_list[2].ordinal, pd.tslib.iNaT)
self.assertTrue(result_list[2].freq, 'D')
def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], freq='D')
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D'))
for op in ['min', 'max']:
# Return NaT
obj = PeriodIndex([], freq='M')
result = getattr(obj, op)()
self.assertEqual(result.ordinal, tslib.iNaT)
self.assertEqual(result.freq, 'M')
obj = PeriodIndex([pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertEqual(result.ordinal, tslib.iNaT)
self.assertEqual(result.freq, 'M')
obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertEqual(result.ordinal, tslib.iNaT)
self.assertEqual(result.freq, 'M')
def test_representation(self):
# GH 7601
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """<class 'pandas.tseries.period.PeriodIndex'>
Length: 0, Freq: D"""
exp2 = """<class 'pandas.tseries.period.PeriodIndex'>
[2011-01-01]
Length: 1, Freq: D"""
exp3 = """<class 'pandas.tseries.period.PeriodIndex'>
[2011-01-01, 2011-01-02]
Length: 2, Freq: D"""
exp4 = """<class 'pandas.tseries.period.PeriodIndex'>
[2011-01-01, ..., 2011-01-03]
Length: 3, Freq: D"""
exp5 = """<class 'pandas.tseries.period.PeriodIndex'>
[2011, ..., 2013]
Length: 3, Freq: A-DEC"""
exp6 = """<class 'pandas.tseries.period.PeriodIndex'>
[2011-01-01 09:00, ..., NaT]
Length: 3, Freq: H"""
exp7 = """<class 'pandas.tseries.period.PeriodIndex'>
[2013Q1]
Length: 1, Freq: Q-DEC"""
exp8 = """<class 'pandas.tseries.period.PeriodIndex'>
[2013Q1, 2013Q2]
Length: 2, Freq: Q-DEC"""
exp9 = """<class 'pandas.tseries.period.PeriodIndex'>
[2013Q1, ..., 2013Q3]
Length: 3, Freq: Q-DEC"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5, exp6, exp7, exp8, exp9]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """PeriodIndex: 0 entries
Freq: D"""
exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """PeriodIndex: 3 entries, 2011 to 2013
Freq: A-DEC"""
exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT
Freq: H"""
exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1
Freq: Q-DEC"""
exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2
Freq: Q-DEC"""
exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3
Freq: Q-DEC"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5, exp6, exp7, exp8, exp9]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U'],
['day', 'day', 'day', 'day',
'hour', 'minute', 'second', 'millisecond', 'microsecond']):
idx = pd.period_range(start='2013-04-01', periods=30, freq=freq)
self.assertEqual(idx.resolution, expected)
def test_add_iadd(self):
# union
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=10)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=8)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = pd.PeriodIndex(['2000-01-01 09:00', '2000-01-01 10:00',
'2000-01-01 11:00', '2000-01-01 12:00',
'2000-01-01 13:00', '2000-01-02 09:00',
'2000-01-02 10:00', '2000-01-02 11:00',
'2000-01-02 12:00', '2000-01-02 13:00'],
freq='H')
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'
'2000-01-01 09:08'], freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05', '2000-01-01 09:08'],
freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=10)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('1998-01-01', freq='A', periods=10)
for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2),
(rng3, other3, expected3), (rng4, other4, expected4),
(rng5, other5, expected5), (rng6, other6, expected6),
(rng7, other7, expected7)]:
# GH9094
with tm.assert_produces_warning(FutureWarning):
result_add = rng + other
result_union = rng.union(other)
tm.assert_index_equal(result_add, expected)
tm.assert_index_equal(result_union, expected)
# GH 6527
# GH9094
with tm.assert_produces_warning(FutureWarning):
rng += other
tm.assert_index_equal(rng, expected)
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365), Timedelta(days=365)]:
with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
rng + o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3), np.timedelta64(3, 'D'),
pd.offsets.Hour(72), timedelta(minutes=60*24*3),
np.timedelta64(72, 'h'), Timedelta('72:00:00')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng + delta
expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
np.timedelta64(4, 'h'), timedelta(hours=23), Timedelta('23:00:00')]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
rng + o
offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'),
pd.offsets.Minute(120), timedelta(minutes=120),
np.timedelta64(120, 'm'), Timedelta(minutes=120)]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H')
result = rng + delta
expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00', freq='H')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's'), Timedelta(seconds=30)]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H')
with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
result = rng + delta
with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng + 1
expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# diff
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=5)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=3)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = rng4
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'], freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:03'], freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=3)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('2006-01-01', freq='A', periods=2)
for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2),
(rng3, other3, expected3), (rng4, other4, expected4),
(rng5, other5, expected5), (rng6, other6, expected6),
(rng7, other7, expected7),]:
result_union = rng.difference(other)
tm.assert_index_equal(result_union, expected)
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range('2009', '2019', freq='A')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
rng = pd.period_range('2014', '2024', freq='A')
with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
rng - o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range('2013-08', '2016-07', freq='M')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
rng - o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3), np.timedelta64(3, 'D'),
pd.offsets.Hour(72), timedelta(minutes=60*24*3), np.timedelta64(72, 'h')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng - delta
expected = pd.period_range('2014-04-28', '2014-05-12', freq='D')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
np.timedelta64(4, 'h'), timedelta(hours=23)]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
rng - o
offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'),
pd.offsets.Minute(120), timedelta(minutes=120), np.timedelta64(120, 'm')]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H')
result = rng - delta
expected = pd.period_range('2014-01-01 08:00', '2014-01-05 08:00', freq='H')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30), np.timedelta64(30, 's')]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H')
with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
result = rng + delta
with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng - 1
expected = pd.period_range('2000-01-01 08:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_value_counts_unique(self):
# GH 7735
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = PeriodIndex(np.repeat(idx.values, range(1, len(idx) + 1)), freq='H')
exp_idx = PeriodIndex(['2011-01-01 18:00', '2011-01-01 17:00', '2011-01-01 16:00',
'2011-01-01 15:00', '2011-01-01 14:00', '2011-01-01 13:00',
'2011-01-01 12:00', '2011-01-01 11:00', '2011-01-01 10:00',
'2011-01-01 09:00'], freq='H')
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
tm.assert_series_equal(idx.value_counts(), expected)
expected = pd.period_range('2011-01-01 09:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 08:00', '2013-01-01 08:00', pd.NaT], freq='H')
exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00'], freq='H')
expected = Series([3, 2], index=exp_idx)
tm.assert_series_equal(idx.value_counts(), expected)
exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00', pd.NaT], freq='H')
expected = Series([3, 2, 1], index=exp_idx)
tm.assert_series_equal(idx.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
# '--with-coverage', '--cover-package=pandas.core'],
exit=False)
| gpl-2.0 |
gregdek/ansible | lib/ansible/modules/network/f5/bigip_imish_config.py | 4 | 27901 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_imish_config
short_description: Manage BIG-IP advanced routing configuration sections
description:
- This module provides an implementation for working with advanced routing
configuration sections in a deterministic way.
version_added: 2.8
options:
route_domain:
description:
- Route domain to manage BGP configuration on.
default: 0
lines:
description:
- The ordered set of commands that should be configured in the
section.
- The commands must be the exact same commands as found in the device
running-config.
- Be sure to note the configuration command syntax as some commands
are automatically modified by the device config parser.
aliases: ['commands']
parents:
description:
- The ordered set of parents that uniquely identify the section or hierarchy
the commands should be checked against.
- If the C(parents) argument is omitted, the commands are checked against
the set of top level or global commands.
src:
description:
- The I(src) argument provides a path to the configuration file
to load into the remote system.
- The path can either be a full system path to the configuration
file if the value starts with / or relative to the root of the
implemented role or playbook.
- This argument is mutually exclusive with the I(lines) and
I(parents) arguments.
before:
description:
- The ordered set of commands to push on to the command stack if
a change needs to be made.
- This allows the playbook designer the opportunity to perform
configuration commands prior to pushing any changes without
affecting how the set of commands are matched against the system.
after:
description:
- The ordered set of commands to append to the end of the command
stack if a change needs to be made.
- Just like with I(before) this allows the playbook designer to
append a set of commands to be executed after the command set.
match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config.
- If match is set to I(line), commands are matched line by line.
- If match is set to I(strict), command lines are matched with respect
to position.
- If match is set to I(exact), command lines must be an equal match.
- Finally, if match is set to I(none), the module will not attempt to
compare the source configuration with the running configuration on
the remote device.
default: line
choices: ['line', 'strict', 'exact', 'none']
replace:
description:
- Instructs the module on the way to perform the configuration
on the device.
- If the replace argument is set to I(line) then the modified lines
are pushed to the device in configuration mode.
- If the replace argument is set to I(block) then the entire
command block is pushed to the device in configuration mode if any
line is not correct.
default: line
choices: ['line', 'block']
backup:
description:
- This argument will cause the module to create a full backup of
the current C(running-config) from the remote device before any
changes are made.
- The backup file is written to the C(backup) folder in the playbook
root directory or role root directory, if playbook is part of an
ansible role. If the directory does not exist, it is created.
type: bool
default: 'no'
running_config:
description:
- The module, by default, will connect to the remote device and
retrieve the current running-config to use as a base for comparing
against the contents of source.
- There are times when it is not desirable to have the task get the
current running-config for every task in a playbook.
- The I(running_config) argument allows the implementer to pass in
the configuration to use as the base config for comparison.
aliases: ['config']
save_when:
description:
- When changes are made to the device running-configuration, the
changes are not copied to non-volatile storage by default.
- If the argument is set to I(always), then the running-config will
always be copied to the startup-config and the I(modified) flag will
always be set to C(True).
- If the argument is set to I(modified), then the running-config
will only be copied to the startup-config if it has changed since
the last save to startup-config.
- If the argument is set to I(never), the running-config will never be
copied to the startup-config.
- If the argument is set to I(changed), then the running-config
will only be copied to the startup-config if the task has made a change.
default: never
choices: ['always', 'never', 'modified', 'changed']
diff_against:
description:
- When using the C(ansible-playbook --diff) command line argument
the module can generate diffs against different sources.
- When this option is configure as I(startup), the module will return
the diff of the running-config against the startup-config.
- When this option is configured as I(intended), the module will
return the diff of the running-config against the configuration
provided in the C(intended_config) argument.
- When this option is configured as I(running), the module will
return the before and after diff of the running-config with respect
to any changes made to the device configuration.
default: startup
choices: ['startup', 'intended', 'running']
diff_ignore_lines:
description:
- Use this argument to specify one or more lines that should be
ignored during the diff.
- This is used for lines in the configuration that are automatically
updated by the system.
- This argument takes a list of regular expressions or exact line matches.
intended_config:
description:
- The C(intended_config) provides the master configuration that
the node should conform to and is used to check the final
running-config against.
- This argument will not modify any settings on the remote device and
is strictly used to check the compliance of the current device's
configuration against.
- When specifying this argument, the task should also modify the
C(diff_against) value and set it to I(intended).
notes:
- Abbreviated commands are NOT idempotent, see
L(Network FAQ,../network/user_guide/faq.html#why-do-the-config-modules-always-return-changed-true-with-abbreviated-commands).
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: configure top level configuration and save it
bigip_imish_config:
lines: bfd slow-timer 2000
save_when: modified
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
- name: diff the running-config against a provided config
bigip_imish_config:
diff_against: intended
intended_config: "{{ lookup('file', 'master.cfg') }}"
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
- name: Add config to a parent block
bigip_imish_config:
lines:
- bgp graceful-restart restart-time 120
- redistribute kernel route-map rhi
- neighbor 10.10.10.11 remote-as 65000
- neighbor 10.10.10.11 fall-over bfd
- neighbor 10.10.10.11 remote-as 65000
- neighbor 10.10.10.11 fall-over bfd
parents: router bgp 64664
match: exact
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
- name: Remove an existing acl before writing it
bigip_imish_config:
lines:
- access-list 10 permit 20.20.20.20
- access-list 10 permit 20.20.20.21
- access-list 10 deny any
before: no access-list 10
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
- name: for idempotency, use full-form commands
bigip_imish_config:
lines:
# - desc My interface
- description My Interface
# parents: int ANYCAST-P2P-2
parents: interface ANYCAST-P2P-2
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
'''
RETURN = r'''
commands:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['interface ANYCAST-P2P-2', 'neighbor 20.20.20.21 remote-as 65000', 'neighbor 20.20.20.21 fall-over bfd']
updates:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['interface ANYCAST-P2P-2', 'neighbor 20.20.20.21 remote-as 65000', 'neighbor 20.20.20.21 fall-over bfd']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: str
sample: /playbooks/ansible/backup/bigip_imish_config.2016-07-16@22:28:34
'''
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import os
import tempfile
from ansible.module_utils.network.common.config import NetworkConfig, dumps
from ansible.module_utils.network.common.utils import to_list
from ansible.module_utils.basic import AnsibleModule
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
from library.module_utils.network.f5.icontrol import upload_file
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
from ansible.module_utils.network.f5.icontrol import upload_file
class Parameters(AnsibleF5Parameters):
api_map = {
}
api_attributes = [
]
returnables = [
'__backup__',
'commands',
'updates'
]
updatables = [
]
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
pass
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
result = dict()
changed = self.present()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def present(self):
result = dict(changed=False)
config = None
contents = None
if self.want.backup or (self.module._diff and self.want.diff_against == 'running'):
contents = self.read_current_from_device()
config = NetworkConfig(indent=1, contents=contents)
if self.want.backup:
# The backup file is created in the bigip_imish_config action plugin. Refer
# to that if you have questions. The key below is removed by the action plugin.
result['__backup__'] = contents
if any((self.want.src, self.want.lines)):
match = self.want.match
replace = self.want.replace
candidate = self.get_candidate()
running = self.get_running_config(contents)
response = self.get_diff(
candidate=candidate,
running=running,
diff_match=match,
diff_ignore_lines=self.want.diff_ignore_lines,
path=self.want.parents,
diff_replace=replace
)
config_diff = response['config_diff']
if config_diff:
commands = config_diff.split('\n')
if self.want.before:
commands[:0] = self.want.before
if self.want.after:
commands.extend(self.want.after)
result['commands'] = commands
result['updates'] = commands
if not self.module.check_mode:
self.load_config(commands)
result['changed'] = True
running_config = self.want.running_config
startup_config = None
if self.want.save_when == 'always':
self.save_config(result)
elif self.want.save_when == 'modified':
output = self.execute_show_commands(['show running-config', 'show startup-config'])
running_config = NetworkConfig(indent=1, contents=output[0], ignore_lines=self.want.diff_ignore_lines)
startup_config = NetworkConfig(indent=1, contents=output[1], ignore_lines=self.want.diff_ignore_lines)
if running_config.sha1 != startup_config.sha1:
self.save_config(result)
elif self.want.save_when == 'changed' and result['changed']:
self.save_on_device()
if self.module._diff:
if not running_config:
output = self.execute_show_commands('show running-config')
contents = output[0]
else:
contents = running_config
# recreate the object in order to process diff_ignore_lines
running_config = NetworkConfig(indent=1, contents=contents, ignore_lines=self.want.diff_ignore_lines)
if self.want.diff_against == 'running':
if self.module.check_mode:
self.module.warn("unable to perform diff against running-config due to check mode")
contents = None
else:
contents = config.config_text
elif self.want.diff_against == 'startup':
if not startup_config:
output = self.execute_show_commands('show startup-config')
contents = output[0]
else:
contents = startup_config.config_text
elif self.want.diff_against == 'intended':
contents = self.want.intended_config
if contents is not None:
base_config = NetworkConfig(indent=1, contents=contents, ignore_lines=self.want.diff_ignore_lines)
if running_config.sha1 != base_config.sha1:
if self.want.diff_against == 'intended':
before = running_config
after = base_config
elif self.want.diff_against in ('startup', 'running'):
before = base_config
after = running_config
result.update({
'changed': True,
'diff': {'before': str(before), 'after': str(after)}
})
self.changes.update(result)
return result['changed']
def load_config(self, commands):
content = StringIO("\n".join(commands))
file = tempfile.NamedTemporaryFile()
name = os.path.basename(file.name)
self.upload_file_to_device(content, name)
self.load_config_on_device(name)
self.remove_uploaded_file_from_device(name)
def remove_uploaded_file_from_device(self, name):
filepath = '/var/config/rest/downloads/{0}'.format(name)
params = {
"command": "run",
"utilCmdArgs": filepath
}
uri = "https://{0}:{1}/mgmt/tm/util/unix-rm".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def upload_file_to_device(self, content, name):
url = 'https://{0}:{1}/mgmt/shared/file-transfer/uploads'.format(
self.client.provider['server'],
self.client.provider['server_port']
)
try:
upload_file(self.client, url, content, name)
except F5ModuleError:
raise F5ModuleError(
"Failed to upload the file."
)
def load_config_on_device(self, name):
filepath = '/var/config/rest/downloads/{0}'.format(name)
command = 'imish -r {0} -f {1}'.format(self.want.route_domain, filepath)
params = {
"command": "run",
"utilCmdArgs": '-c "{0}"'.format(command)
}
uri = "https://{0}:{1}/mgmt/tm/util/bash".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
if 'commandResult' in response:
if 'Dynamic routing is not enabled' in response['commandResult']:
raise F5ModuleError(response['commandResult'])
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def read_current_from_device(self):
command = 'imish -r {0} -e \\\"show running-config\\\"'.format(self.want.route_domain)
params = {
"command": "run",
"utilCmdArgs": '-c "{0}"'.format(command)
}
uri = "https://{0}:{1}/mgmt/tm/util/bash".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
if 'commandResult' in response:
if 'Dynamic routing is not enabled' in response['commandResult']:
raise F5ModuleError(response['commandResult'])
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response['commandResult']
def save_on_device(self):
command = 'imish -e write'
params = {
"command": "run",
"utilCmdArgs": '-c "{0}"'.format(command)
}
uri = "https://{0}:{1}/mgmt/tm/util/bash".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def get_diff(self, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'):
diff = {}
# prepare candidate configuration
candidate_obj = NetworkConfig(indent=1)
candidate_obj.load(candidate)
if running and diff_match != 'none' and diff_replace != 'config':
# running configuration
running_obj = NetworkConfig(indent=1, contents=running, ignore_lines=diff_ignore_lines)
configdiffobjs = candidate_obj.difference(running_obj, path=path, match=diff_match, replace=diff_replace)
else:
configdiffobjs = candidate_obj.items
diff['config_diff'] = dumps(configdiffobjs, 'commands') if configdiffobjs else ''
return diff
def get_running_config(self, config=None):
contents = self.want.running_config
if not contents:
if config:
contents = config
else:
contents = self.read_current_from_device()
return contents
def get_candidate(self):
candidate = ''
if self.want.src:
candidate = self.want.src
elif self.want.lines:
candidate_obj = NetworkConfig(indent=1)
parents = self.want.parents or list()
candidate_obj.add(self.want.lines, parents=parents)
candidate = dumps(candidate_obj, 'raw')
return candidate
def execute_show_commands(self, commands):
body = []
uri = "https://{0}:{1}/mgmt/tm/util/bash".format(
self.client.provider['server'],
self.client.provider['server_port']
)
for command in to_list(commands):
command = 'imish -r {0} -e \\\"{1}\\\"'.format(self.want.route_domain, command)
params = {
"command": "run",
"utilCmdArgs": '-c "{0}"'.format(command)
}
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
if 'commandResult' in response:
if 'Dynamic routing is not enabled' in response['commandResult']:
raise F5ModuleError(response['commandResult'])
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
body.append(response['commandResult'])
return body
def save_config(self, result):
result['changed'] = True
if self.module.check_mode:
self.module.warn(
'Skipping command `copy running-config startup-config` '
'due to check_mode. Configuration not copied to '
'non-volatile storage'
)
return
self.save_on_device()
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
route_domain=dict(default=0),
src=dict(type='path'),
lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
before=dict(type='list'),
after=dict(type='list'),
match=dict(default='line', choices=['line', 'strict', 'exact', 'none']),
replace=dict(default='line', choices=['line', 'block']),
running_config=dict(aliases=['config']),
intended_config=dict(),
backup=dict(type='bool', default=False),
save_when=dict(choices=['always', 'never', 'modified', 'changed'], default='never'),
diff_against=dict(choices=['running', 'startup', 'intended'], default='startup'),
diff_ignore_lines=dict(type='list'),
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
self.mutually_exclusive = [
('lines', 'src'),
('parents', 'src'),
]
self.required_if = [
('match', 'strict', ['lines']),
('match', 'exact', ['lines']),
('replace', 'block', ['lines']),
('diff_against', 'intended', ['intended_config'])
]
self.add_file_common_args = True
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
mutually_exclusive=spec.mutually_exclusive,
required_if=spec.required_if,
add_file_common_args=spec.add_file_common_args,
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
exit_json(module, results, client)
except F5ModuleError as ex:
fail_json(module, ex, client)
if __name__ == '__main__':
main()
| gpl-3.0 |
ragavvenkatesan/Convolutional-Neural-Networks | pantry/tutorials/log_reg.py | 1 | 2122 | """
Notes:
This code contains one method that explains how to build a
logistic regression classifier for the MNIST dataset using
the yann toolbox.
For a more interactive tutorial refer the notebook at
yann/pantry/tutorials/notebooks/Logistic Regression.ipynb
"""
from yann.network import network
from yann.utils.graph import draw_network
def log_reg ( dataset ):
"""
This function is a demo example of logistic regression.
"""
# Create the yann network class with empty layers.
net = network()
# Setup the datastream module and add it to network.
dataset_params = { "dataset" : dataset,
"svm" : False,
"n_classes" : 10 }
net.add_module ( type = 'datastream', params = dataset_params )
# Create an input layer that feeds from the datastream modele.
net.add_layer ( type = "input", datastream_origin = 'data')
# Create a logistic regression layer.
# Creates a softmax layer.
net.add_layer ( type = "classifier", num_classes = 10 )
# Create an objective layer.
# Default is negative log likelihood.
# What ever the objective is, is always minimized.
net.add_layer ( type = "objective" )
# Cook the network.
net.cook()
# See how the network looks like.
net.pretty_print()
# Train the network.
net.train()
# Test for acccuracy.
net.test()
## Boiler Plate ##
if __name__ == '__main__':
dataset = None
import sys
if len(sys.argv) > 1:
if sys.argv[1] == 'create_dataset':
from yann.special.datasets import cook_mnist
data = cook_mnist (verbose = 3)
dataset = data.dataset_location()
else:
dataset = sys.argv[1]
else:
print "provide dataset"
if dataset is None:
print " creating a new dataset to run through"
from yann.special.datasets import cook_mnist
data = cook_mnist (verbose = 3)
dataset = data.dataset_location()
log_reg ( dataset ) | mit |
hseifeddine/dashviz-mean | node_modules/node-gyp/gyp/pylib/gyp/MSVSProject.py | 2736 | 6387 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
class Tool(object):
"""Visual Studio tool."""
def __init__(self, name, attrs=None):
"""Initializes the tool.
Args:
name: Tool name.
attrs: Dict of tool attributes; may be None.
"""
self._attrs = attrs or {}
self._attrs['Name'] = name
def _GetSpecification(self):
"""Creates an element for the tool.
Returns:
A new xml.dom.Element for the tool.
"""
return ['Tool', self._attrs]
class Filter(object):
"""Visual Studio filter - that is, a virtual folder."""
def __init__(self, name, contents=None):
"""Initializes the folder.
Args:
name: Filter (folder) name.
contents: List of filenames and/or Filter objects contained.
"""
self.name = name
self.contents = list(contents or [])
#------------------------------------------------------------------------------
class Writer(object):
"""Visual Studio XML project writer."""
def __init__(self, project_path, version, name, guid=None, platforms=None):
"""Initializes the project.
Args:
project_path: Path to the project file.
version: Format version to emit.
name: Name of the project.
guid: GUID to use for project, if not None.
platforms: Array of string, the supported platforms. If null, ['Win32']
"""
self.project_path = project_path
self.version = version
self.name = name
self.guid = guid
# Default to Win32 for platforms.
if not platforms:
platforms = ['Win32']
# Initialize the specifications of the various sections.
self.platform_section = ['Platforms']
for platform in platforms:
self.platform_section.append(['Platform', {'Name': platform}])
self.tool_files_section = ['ToolFiles']
self.configurations_section = ['Configurations']
self.files_section = ['Files']
# Keep a dict keyed on filename to speed up access.
self.files_dict = dict()
def AddToolFile(self, path):
"""Adds a tool file to the project.
Args:
path: Relative path from project to tool file.
"""
self.tool_files_section.append(['ToolFile', {'RelativePath': path}])
def _GetSpecForConfiguration(self, config_type, config_name, attrs, tools):
"""Returns the specification for a configuration.
Args:
config_type: Type of configuration node.
config_name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
Returns:
"""
# Handle defaults
if not attrs:
attrs = {}
if not tools:
tools = []
# Add configuration node and its attributes
node_attrs = attrs.copy()
node_attrs['Name'] = config_name
specification = [config_type, node_attrs]
# Add tool nodes and their attributes
if tools:
for t in tools:
if isinstance(t, Tool):
specification.append(t._GetSpecification())
else:
specification.append(Tool(t)._GetSpecification())
return specification
def AddConfig(self, name, attrs=None, tools=None):
"""Adds a configuration to the project.
Args:
name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
"""
spec = self._GetSpecForConfiguration('Configuration', name, attrs, tools)
self.configurations_section.append(spec)
def _AddFilesToNode(self, parent, files):
"""Adds files and/or filters to the parent node.
Args:
parent: Destination node
files: A list of Filter objects and/or relative paths to files.
Will call itself recursively, if the files list contains Filter objects.
"""
for f in files:
if isinstance(f, Filter):
node = ['Filter', {'Name': f.name}]
self._AddFilesToNode(node, f.contents)
else:
node = ['File', {'RelativePath': f}]
self.files_dict[f] = node
parent.append(node)
def AddFiles(self, files):
"""Adds files to the project.
Args:
files: A list of Filter objects and/or relative paths to files.
This makes a copy of the file/filter tree at the time of this call. If you
later add files to a Filter object which was passed into a previous call
to AddFiles(), it will not be reflected in this project.
"""
self._AddFilesToNode(self.files_section, files)
# TODO(rspangler) This also doesn't handle adding files to an existing
# filter. That is, it doesn't merge the trees.
def AddFileConfig(self, path, config, attrs=None, tools=None):
"""Adds a configuration to a file.
Args:
path: Relative path to the file.
config: Name of configuration to add.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
Raises:
ValueError: Relative path does not match any file added via AddFiles().
"""
# Find the file node with the right relative path
parent = self.files_dict.get(path)
if not parent:
raise ValueError('AddFileConfig: file "%s" not in project.' % path)
# Add the config to the file node
spec = self._GetSpecForConfiguration('FileConfiguration', config, attrs,
tools)
parent.append(spec)
def WriteIfChanged(self):
"""Writes the project file."""
# First create XML content definition
content = [
'VisualStudioProject',
{'ProjectType': 'Visual C++',
'Version': self.version.ProjectVersion(),
'Name': self.name,
'ProjectGUID': self.guid,
'RootNamespace': self.name,
'Keyword': 'Win32Proj'
},
self.platform_section,
self.tool_files_section,
self.configurations_section,
['References'], # empty section
self.files_section,
['Globals'] # empty section
]
easy_xml.WriteXmlIfChanged(content, self.project_path,
encoding="Windows-1252")
| mit |
ryanarnold/complaints_categorizer | categorizer/feature_selection.py | 1 | 2179 | from collections import Counter
def TFIDF(TF, complaints, term):
if TF >= 1:
n = len(complaints)
x = sum([1 for complaint in complaints if term in complaint['body']])
return log(TF + 1) * log(n / x)
else:
return 0
def DF(vocab, complaints):
term_DF = dict()
for term in vocab:
term_DF[term] = sum([1 for complaint in complaints if term in complaint['body']])
threshold = 3
features = [term for term in term_DF.keys() if term_DF[term] > threshold]
return features
def chi_square(vocab, complaints, categories):
features = []
chi_table = dict()
N = len(complaints)
for term in vocab:
chi_table[term] = dict()
for category in categories:
chi_table[term][category] = dict()
A = 0
B = 0
C = 0
D = 0
for complaint in complaints:
if term in complaint['body'] and complaint['category'] == category:
A += 1
if term in complaint['body'] and complaint['category'] != category:
B += 1
if term not in complaint['body'] and complaint['category'] == category:
C += 1
if term not in complaint['body'] and complaint['category'] != category:
D += 1
try:
chi_table[term][category]['chi'] = (N * ((A * D) - (C * B))**2) / ((A + C) * (B + D) * (A + B) * (C + D))
chi_table[term][category]['freq'] = A + C
except ZeroDivisionError:
print(term)
print(category)
print(A)
print(B)
print(C)
print(D)
input()
pass
chi_table[term]['chi_average'] = float()
for category in categories:
P = chi_table[term][category]['freq'] / N
chi_table[term]['chi_average'] += P * chi_table[term][category]['chi']
if chi_table[term]['chi_average'] > 3:
features.append(term)
print('Extracted {0} features'.format(len(features)))
return features
| mit |
shawnadelic/shuup | shuup/core/pricing/_utils.py | 2 | 3470 | # This file is part of Shuup.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from ._discounts import get_discount_modules
from ._module import get_pricing_module
def get_price_info(context, product, quantity=1):
"""
Get price info of product for given quantity.
Returned `PriceInfo` object contains calculated `price` and
`base_price`. The calculation of prices is handled in the
current pricing module and possibly configured discount modules.
:type context: shuup.core.pricing.PricingContextable
:param product: `Product` object or id of `Product`
:type product: shuup.core.models.Product|int
:type quantity: int
:rtype: shuup.core.pricing.PriceInfo
"""
(mod, ctx) = _get_module_and_context(context)
price_info = mod.get_price_info(ctx, product, quantity)
for module in get_discount_modules():
price_info = module.discount_price(ctx, product, price_info)
return price_info
def get_pricing_steps(context, product):
"""
Get context-specific list pricing steps for the given product.
Returns a list of PriceInfos, see `PricingModule.get_pricing_steps`
for description of its format.
:type context: shuup.core.pricing.PricingContextable
:param product: Product or product id
:type product: shuup.core.models.Product|int
:rtype: list[shuup.core.pricing.PriceInfo]
"""
(mod, ctx) = _get_module_and_context(context)
steps = mod.get_pricing_steps(ctx, product)
for module in get_discount_modules():
steps = module.get_pricing_steps(ctx, product, steps)
return steps
def get_price_infos(context, products, quantity=1):
"""
Get PriceInfo objects for a bunch of products.
Returns a dict with product id as key and PriceInfo as value.
May be faster than doing `get_price_info` for each product.
:param products: List of product objects or id's
:type products: Iterable[shuup.core.models.Product|int]
:rtype: dict[int,PriceInfo]
"""
(mod, ctx) = _get_module_and_context(context)
prices = mod.get_price_infos(ctx, products, quantity)
for module in get_discount_modules():
prices = module.discount_prices(ctx, products, prices)
return prices
def get_pricing_steps_for_products(context, products):
"""
Get pricing steps for a bunch of products.
Returns a dict with product id as key and step data (as list of
PriceInfos) as values.
May be faster than doing `get_pricing_steps` for each product
separately.
:param products: List of product objects or id's
:type products: Iterable[shuup.core.models.Product|int]
:rtype: dict[int,list[PriceInfo]]
"""
(mod, ctx) = _get_module_and_context(context)
steps = mod.get_pricing_steps_for_products(ctx, products)
for module in get_discount_modules():
steps = module.get_pricing_steps_for_products(ctx, products, steps)
return steps
def _get_module_and_context(context):
"""
Get current pricing module and context converted to pricing context.
:type context: shuup.core.pricing.PricingContextable
:rtype: (PricingModule,PricingContext)
"""
pricing_mod = get_pricing_module()
pricing_ctx = pricing_mod.get_context(context)
return (pricing_mod, pricing_ctx)
| agpl-3.0 |
kobox/achilles.pl | src/profiles/models.py | 1 | 1291 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
import uuid
from django.db import models
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
class BaseProfile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL,
primary_key=True)
slug = models.UUIDField(default=uuid.uuid4, blank=True, editable=False)
# Add more user profile fields here. Make sure they are nullable
# or with default values
picture = models.ImageField(_('Moje logo'),
upload_to='profile_pics/%Y-%m-%d/',
null=True,
blank=True)
bio = models.BooleanField(_("Artykuły Reklamowe"), default=True, blank=True)
newsp = models.BooleanField(_("Opakowania i materiały POS"), default=True, blank=True)
newsl = models.BooleanField(_("Usługi dla Poligrafii"), default=True, blank=True)
email_verified = models.BooleanField(_("Email zweryfikowany"), default=False)
class Meta:
abstract = True
@python_2_unicode_compatible
class Profile(BaseProfile):
def __str__(self):
return "{}'s profile". format(self.user)
| mit |
florianholzapfel/home-assistant | homeassistant/components/switch/verisure.py | 29 | 1902 | """
Support for Verisure Smartplugs.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.verisure/
"""
import logging
from homeassistant.components.verisure import HUB as hub
from homeassistant.components.verisure import CONF_SMARTPLUGS
from homeassistant.components.switch import SwitchDevice
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Verisure switch platform."""
if not int(hub.config.get(CONF_SMARTPLUGS, 1)):
return False
hub.update_smartplugs()
switches = []
switches.extend([
VerisureSmartplug(value.deviceLabel)
for value in hub.smartplug_status.values()])
add_devices(switches)
class VerisureSmartplug(SwitchDevice):
"""Representation of a Verisure smartplug."""
def __init__(self, device_id):
"""Initialize the Verisure device."""
self._id = device_id
@property
def name(self):
"""Return the name or location of the smartplug."""
return hub.smartplug_status[self._id].location
@property
def is_on(self):
"""Return true if on."""
return hub.smartplug_status[self._id].status == 'on'
@property
def available(self):
"""Return True if entity is available."""
return hub.available
def turn_on(self):
"""Set smartplug status on."""
hub.my_pages.smartplug.set(self._id, 'on')
hub.my_pages.smartplug.wait_while_updating(self._id, 'on')
self.update()
def turn_off(self):
"""Set smartplug status off."""
hub.my_pages.smartplug.set(self._id, 'off')
hub.my_pages.smartplug.wait_while_updating(self._id, 'off')
self.update()
def update(self):
"""Get the latest date of the smartplug."""
hub.update_smartplugs()
| mit |
dmillington/ansible-modules-core | network/nxos/nxos_vrf_interface.py | 8 | 15722 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: nxos_vrf_interface
version_added: "2.1"
short_description: Manages interface specific VRF configuration.
description:
- Manages interface specific VRF configuration.
extends_documentation_fragment: nxos
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- VRF needs to be added globally with M(nxos_vrf) before
adding a VRF to an interface.
- Remove a VRF from an interface will still remove
all L3 attributes just as it does from CLI.
- VRF is not read from an interface until IP address is
configured on that interface.
options:
vrf:
description:
- Name of VRF to be managed.
required: true
interface:
description:
- Full name of interface to be managed, i.e. Ethernet1/1.
required: true
state:
description:
- Manages desired state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: Ensure vrf ntc exists on Eth1/1
nxos_vrf_interface:
vrf: ntc
interface: Ethernet1/1
host: 68.170.147.165
state: present
- name: Ensure ntc VRF does not exist on Eth1/1
nxos_vrf_interface:
vrf: ntc
interface: Ethernet1/1
host: 68.170.147.165
state: absent
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"interface": "loopback16", "vrf": "ntc"}
existing:
description: k/v pairs of existing vrf on the interface
type: dict
sample: {"interface": "loopback16", "vrf": ""}
end_state:
description: k/v pairs of vrf after module execution
returned: always
type: dict
sample: {"interface": "loopback16", "vrf": "ntc"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["interface loopback16", "vrf member ntc"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import json
import collections
# COMMON CODE FOR MIGRATION
import re
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
from ansible.module_utils.shell import ShellError
try:
from ansible.module_utils.nxos import get_module
except ImportError:
from ansible.module_utils.nxos import NetworkModule
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class CustomNetworkConfig(NetworkConfig):
def expand_section(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj.children:
if child in S:
continue
self.expand_section(child, S)
return S
def get_object(self, path):
for item in self.items:
if item.text == path[-1]:
parents = [p.text for p in item.parents]
if parents == path[:-1]:
return item
def to_block(self, section):
return '\n'.join([item.raw for item in section])
def get_section(self, path):
try:
section = self.get_section_objects(path)
return self.to_block(section)
except ValueError:
return list()
def get_section_objects(self, path):
if not isinstance(path, list):
path = [path]
obj = self.get_object(path)
if not obj:
raise ValueError('path does not exist in config')
return self.expand_section(obj)
def add(self, lines, parents=None):
"""Adds one or lines of configuration
"""
ancestors = list()
offset = 0
obj = None
## global config command
if not parents:
for line in to_list(lines):
item = ConfigLine(line)
item.raw = line
if item not in self.items:
self.items.append(item)
else:
for index, p in enumerate(parents):
try:
i = index + 1
obj = self.get_section_objects(parents[:i])[0]
ancestors.append(obj)
except ValueError:
# add parent to config
offset = index * self.indent
obj = ConfigLine(p)
obj.raw = p.rjust(len(p) + offset)
if ancestors:
obj.parents = list(ancestors)
ancestors[-1].children.append(obj)
self.items.append(obj)
ancestors.append(obj)
# add child objects
for line in to_list(lines):
# check if child already exists
for child in ancestors[-1].children:
if child.text == line:
break
else:
offset = len(parents) * self.indent
item = ConfigLine(line)
item.raw = line.rjust(len(line) + offset)
item.parents = ancestors
ancestors[-1].children.append(item)
self.items.append(item)
def get_network_module(**kwargs):
try:
return get_module(**kwargs)
except NameError:
return NetworkModule(**kwargs)
def get_config(module, include_defaults=False):
config = module.params['config']
if not config:
try:
config = module.get_config()
except AttributeError:
defaults = module.params['include_defaults']
config = module.config.get_config(include_defaults=defaults)
return CustomNetworkConfig(indent=2, contents=config)
def load_config(module, candidate):
config = get_config(module)
commands = candidate.difference(config)
commands = [str(c).strip() for c in commands]
save_config = module.params['save']
result = dict(changed=False)
if commands:
if not module.check_mode:
try:
module.configure(commands)
except AttributeError:
module.config(commands)
if save_config:
try:
module.config.save_config()
except AttributeError:
module.execute(['copy running-config startup-config'])
result['changed'] = True
result['updates'] = commands
return result
# END OF COMMON CODE
WARNINGS = []
def execute_config_command(commands, module):
try:
module.configure(commands)
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
except AttributeError:
try:
commands.insert(0, 'configure')
module.cli.add_commands(commands, output='config')
module.cli.run_commands()
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
def get_cli_body_ssh_vrf_interface(command, response, module):
"""Get response for when transport=cli. This is kind of a hack and mainly
needed because these modules were originally written for NX-API. As such,
we assume if '^' is found in response, it is an invalid command. Instead,
the output will be a raw string when issuing commands containing 'show run'.
"""
if '^' in response[0]:
body = []
elif 'show run' in command:
body = response
else:
body = [json.loads(response[0])]
return body
def execute_show(cmds, module, command_type=None):
command_type_map = {
'cli_show': 'json',
'cli_show_ascii': 'text'
}
try:
if command_type:
response = module.execute(cmds, command_type=command_type)
else:
response = module.execute(cmds)
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
except AttributeError:
try:
if command_type:
command_type = command_type_map.get(command_type)
module.cli.add_commands(cmds, output=command_type)
response = module.cli.run_commands()
else:
module.cli.add_commands(cmds, raw=True)
response = module.cli.run_commands()
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
return response
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
if 'show run' not in command:
command += ' | json'
cmds = [command]
response = execute_show(cmds, module)
body = get_cli_body_ssh_vrf_interface(command, response, module)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = execute_show(cmds, module, command_type=command_type)
return body
def get_interface_type(interface):
if interface.upper().startswith('ET'):
return 'ethernet'
elif interface.upper().startswith('VL'):
return 'svi'
elif interface.upper().startswith('LO'):
return 'loopback'
elif interface.upper().startswith('MG'):
return 'management'
elif interface.upper().startswith('MA'):
return 'management'
elif interface.upper().startswith('PO'):
return 'portchannel'
else:
return 'unknown'
def get_interface_mode(interface, intf_type, module):
command = 'show interface {0}'.format(interface)
interface = {}
mode = 'unknown'
if intf_type in ['ethernet', 'portchannel']:
body = execute_show_command(command, module)[0]
interface_table = body['TABLE_interface']['ROW_interface']
mode = str(interface_table.get('eth_mode', 'layer3'))
if mode == 'access' or mode == 'trunk':
mode = 'layer2'
elif intf_type == 'loopback' or intf_type == 'svi':
mode = 'layer3'
return mode
def get_vrf_list(module):
command = 'show vrf all'
vrf_list = []
body = execute_show_command(command, module)[0]
try:
vrf_table = body['TABLE_vrf']['ROW_vrf']
except (KeyError, AttributeError):
return vrf_list
for each in vrf_table:
vrf_list.append(str(each['vrf_name']))
return vrf_list
def get_interface_info(interface, module):
command = 'show run | section interface.{0}'.format(interface.capitalize())
vrf_regex = ".*vrf\s+member\s+(?P<vrf>\S+).*"
try:
body = execute_show_command(command, module,
command_type='cli_show_ascii')[0]
match_vrf = re.match(vrf_regex, body, re.DOTALL)
group_vrf = match_vrf.groupdict()
vrf = group_vrf["vrf"]
except (AttributeError, TypeError):
return ""
return vrf
def is_default(interface, module):
command = 'show run interface {0}'.format(interface)
try:
body = execute_show_command(command, module,
command_type='cli_show_ascii')[0]
raw_list = body.split('\n')
if raw_list[-1].startswith('interface'):
return True
else:
return False
except (KeyError, IndexError):
return 'DNE'
def main():
argument_spec = dict(
vrf=dict(required=True),
interface=dict(type='str', required=True),
state=dict(default='present', choices=['present', 'absent'],
required=False),
include_defaults=dict(default=False),
config=dict(),
save=dict(type='bool', default=False)
)
module = get_network_module(argument_spec=argument_spec,
supports_check_mode=True)
vrf = module.params['vrf']
interface = module.params['interface'].lower()
state = module.params['state']
current_vrfs = get_vrf_list(module)
if vrf not in current_vrfs:
WARNINGS.append("The VRF is not present/active on the device. "
"Use nxos_vrf to fix this.")
intf_type = get_interface_type(interface)
if (intf_type != 'ethernet' and module.params['transport'] == 'cli'):
if is_default(interface, module) == 'DNE':
module.fail_json(msg="interface does not exist on switch. Verify "
"switch platform or create it first with "
"nxos_interface if it's a logical interface")
mode = get_interface_mode(interface, intf_type, module)
if mode == 'layer2':
module.fail_json(msg='Ensure interface is a Layer 3 port before '
'configuring a VRF on an interface. You can '
'use nxos_interface')
proposed = dict(interface=interface, vrf=vrf)
current_vrf = get_interface_info(interface, module)
existing = dict(interface=interface, vrf=current_vrf)
changed = False
end_state = existing
if vrf != existing['vrf'] and state == 'absent':
module.fail_json(msg='The VRF you are trying to remove '
'from the interface does not exist '
'on that interface.',
interface=interface, proposed_vrf=vrf,
existing_vrf=existing['vrf'])
commands = []
if existing:
if state == 'absent':
if existing and vrf == existing['vrf']:
command = 'no vrf member {0}'.format(vrf)
commands.append(command)
elif state == 'present':
if existing['vrf'] != vrf:
command = 'vrf member {0}'.format(vrf)
commands.append(command)
if commands:
commands.insert(0, 'interface {0}'.format(interface))
if commands:
if module.check_mode:
module.exit_json(changed=True, commands=commands)
else:
execute_config_command(commands, module)
changed = True
changed_vrf = get_interface_info(interface, module)
end_state = dict(interface=interface, vrf=changed_vrf)
if 'configure' in commands:
commands.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
results['updates'] = commands
results['changed'] = changed
if WARNINGS:
results['warnings'] = WARNINGS
module.exit_json(**results)
if __name__ == '__main__':
main() | gpl-3.0 |
persandstrom/home-assistant | tests/components/hangouts/test_config_flow.py | 8 | 3316 | """Tests for the Google Hangouts config flow."""
from unittest.mock import patch
from homeassistant import data_entry_flow
from homeassistant.components.hangouts import config_flow
async def test_flow_works(hass, aioclient_mock):
"""Test config flow without 2fa."""
flow = config_flow.HangoutsFlowHandler()
flow.hass = hass
with patch('hangups.get_auth'):
result = await flow.async_step_user(
{'email': 'test@test.com', 'password': '1232456'})
assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result['title'] == 'test@test.com'
async def test_flow_works_with_2fa(hass, aioclient_mock):
"""Test config flow with 2fa."""
from homeassistant.components.hangouts.hangups_utils import Google2FAError
flow = config_flow.HangoutsFlowHandler()
flow.hass = hass
with patch('hangups.get_auth', side_effect=Google2FAError):
result = await flow.async_step_user(
{'email': 'test@test.com', 'password': '1232456'})
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['step_id'] == '2fa'
with patch('hangups.get_auth'):
result = await flow.async_step_2fa({'2fa': 123456})
assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result['title'] == 'test@test.com'
async def test_flow_with_unknown_2fa(hass, aioclient_mock):
"""Test config flow with invalid 2fa method."""
from homeassistant.components.hangouts.hangups_utils import GoogleAuthError
flow = config_flow.HangoutsFlowHandler()
flow.hass = hass
with patch('hangups.get_auth',
side_effect=GoogleAuthError('Unknown verification code input')):
result = await flow.async_step_user(
{'email': 'test@test.com', 'password': '1232456'})
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['errors']['base'] == 'invalid_2fa_method'
async def test_flow_invalid_login(hass, aioclient_mock):
"""Test config flow with invalid 2fa method."""
from homeassistant.components.hangouts.hangups_utils import GoogleAuthError
flow = config_flow.HangoutsFlowHandler()
flow.hass = hass
with patch('hangups.get_auth',
side_effect=GoogleAuthError):
result = await flow.async_step_user(
{'email': 'test@test.com', 'password': '1232456'})
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['errors']['base'] == 'invalid_login'
async def test_flow_invalid_2fa(hass, aioclient_mock):
"""Test config flow with 2fa."""
from homeassistant.components.hangouts.hangups_utils import Google2FAError
flow = config_flow.HangoutsFlowHandler()
flow.hass = hass
with patch('hangups.get_auth', side_effect=Google2FAError):
result = await flow.async_step_user(
{'email': 'test@test.com', 'password': '1232456'})
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['step_id'] == '2fa'
with patch('hangups.get_auth', side_effect=Google2FAError):
result = await flow.async_step_2fa({'2fa': 123456})
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['errors']['base'] == 'invalid_2fa'
| apache-2.0 |
xsteadfastx/beets | beetsplug/hook.py | 11 | 4275 | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2015, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Allows custom commands to be run when an event is emitted by beets"""
from __future__ import division, absolute_import, print_function
import string
import subprocess
import six
from beets.plugins import BeetsPlugin
from beets.util import shlex_split, arg_encoding
class CodingFormatter(string.Formatter):
"""A variant of `string.Formatter` that converts everything to `unicode`
strings.
This is necessary on Python 2, where formatting otherwise occurs on
bytestrings. It intercepts two points in the formatting process to decode
the format string and all fields using the specified encoding. If decoding
fails, the values are used as-is.
"""
def __init__(self, coding):
"""Creates a new coding formatter with the provided coding."""
self._coding = coding
def format(self, format_string, *args, **kwargs):
"""Formats the provided string using the provided arguments and keyword
arguments.
This method decodes the format string using the formatter's coding.
See str.format and string.Formatter.format.
"""
try:
format_string = format_string.decode(self._coding)
except UnicodeEncodeError:
pass
return super(CodingFormatter, self).format(format_string, *args,
**kwargs)
def convert_field(self, value, conversion):
"""Converts the provided value given a conversion type.
This method decodes the converted value using the formatter's coding.
See string.Formatter.convert_field.
"""
converted = super(CodingFormatter, self).convert_field(value,
conversion)
try:
converted = converted.decode(self._coding)
except UnicodeEncodeError:
pass
return converted
class HookPlugin(BeetsPlugin):
"""Allows custom commands to be run when an event is emitted by beets"""
def __init__(self):
super(HookPlugin, self).__init__()
self.config.add({
'hooks': []
})
hooks = self.config['hooks'].get(list)
for hook_index in range(len(hooks)):
hook = self.config['hooks'][hook_index]
hook_event = hook['event'].as_str()
hook_command = hook['command'].as_str()
self.create_and_register_hook(hook_event, hook_command)
def create_and_register_hook(self, event, command):
def hook_function(**kwargs):
if command is None or len(command) == 0:
self._log.error('invalid command "{0}"', command)
return
# Use a string formatter that works on Unicode strings.
if six.PY2:
formatter = CodingFormatter(arg_encoding())
else:
formatter = string.Formatter()
command_pieces = shlex_split(command)
for i, piece in enumerate(command_pieces):
command_pieces[i] = formatter.format(piece, event=event,
**kwargs)
self._log.debug(u'running command "{0}" for event {1}',
u' '.join(command_pieces), event)
try:
subprocess.Popen(command_pieces).wait()
except OSError as exc:
self._log.error(u'hook for {0} failed: {1}', event, exc)
self.register_listener(event, hook_function)
| mit |
nikste/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/beta_test.py | 34 | 13683 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import special
from scipy import stats
from tensorflow.contrib.distributions.python.ops import beta as beta_lib
from tensorflow.contrib.distributions.python.ops import kullback_leibler
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
class BetaTest(test.TestCase):
def testSimpleShapes(self):
with self.test_session():
a = np.random.rand(3)
b = np.random.rand(3)
dist = beta_lib.Beta(a, b)
self.assertAllEqual([], dist.event_shape_tensor().eval())
self.assertAllEqual([3], dist.batch_shape_tensor().eval())
self.assertEqual(tensor_shape.TensorShape([]), dist.event_shape)
self.assertEqual(tensor_shape.TensorShape([3]), dist.batch_shape)
def testComplexShapes(self):
with self.test_session():
a = np.random.rand(3, 2, 2)
b = np.random.rand(3, 2, 2)
dist = beta_lib.Beta(a, b)
self.assertAllEqual([], dist.event_shape_tensor().eval())
self.assertAllEqual([3, 2, 2], dist.batch_shape_tensor().eval())
self.assertEqual(tensor_shape.TensorShape([]), dist.event_shape)
self.assertEqual(
tensor_shape.TensorShape([3, 2, 2]), dist.batch_shape)
def testComplexShapesBroadcast(self):
with self.test_session():
a = np.random.rand(3, 2, 2)
b = np.random.rand(2, 2)
dist = beta_lib.Beta(a, b)
self.assertAllEqual([], dist.event_shape_tensor().eval())
self.assertAllEqual([3, 2, 2], dist.batch_shape_tensor().eval())
self.assertEqual(tensor_shape.TensorShape([]), dist.event_shape)
self.assertEqual(
tensor_shape.TensorShape([3, 2, 2]), dist.batch_shape)
def testAlphaProperty(self):
a = [[1., 2, 3]]
b = [[2., 4, 3]]
with self.test_session():
dist = beta_lib.Beta(a, b)
self.assertEqual([1, 3], dist.concentration1.get_shape())
self.assertAllClose(a, dist.concentration1.eval())
def testBetaProperty(self):
a = [[1., 2, 3]]
b = [[2., 4, 3]]
with self.test_session():
dist = beta_lib.Beta(a, b)
self.assertEqual([1, 3], dist.concentration0.get_shape())
self.assertAllClose(b, dist.concentration0.eval())
def testPdfXProper(self):
a = [[1., 2, 3]]
b = [[2., 4, 3]]
with self.test_session():
dist = beta_lib.Beta(a, b, validate_args=True)
dist.prob([.1, .3, .6]).eval()
dist.prob([.2, .3, .5]).eval()
# Either condition can trigger.
with self.assertRaisesOpError("sample must be positive"):
dist.prob([-1., 0.1, 0.5]).eval()
with self.assertRaisesOpError("sample must be positive"):
dist.prob([0., 0.1, 0.5]).eval()
with self.assertRaisesOpError("sample must be no larger than `1`"):
dist.prob([.1, .2, 1.2]).eval()
def testPdfTwoBatches(self):
with self.test_session():
a = [1., 2]
b = [1., 2]
x = [.5, .5]
dist = beta_lib.Beta(a, b)
pdf = dist.prob(x)
self.assertAllClose([1., 3. / 2], pdf.eval())
self.assertEqual((2,), pdf.get_shape())
def testPdfTwoBatchesNontrivialX(self):
with self.test_session():
a = [1., 2]
b = [1., 2]
x = [.3, .7]
dist = beta_lib.Beta(a, b)
pdf = dist.prob(x)
self.assertAllClose([1, 63. / 50], pdf.eval())
self.assertEqual((2,), pdf.get_shape())
def testPdfUniformZeroBatch(self):
with self.test_session():
# This is equivalent to a uniform distribution
a = 1.
b = 1.
x = np.array([.1, .2, .3, .5, .8], dtype=np.float32)
dist = beta_lib.Beta(a, b)
pdf = dist.prob(x)
self.assertAllClose([1.] * 5, pdf.eval())
self.assertEqual((5,), pdf.get_shape())
def testPdfAlphaStretchedInBroadcastWhenSameRank(self):
with self.test_session():
a = [[1., 2]]
b = [[1., 2]]
x = [[.5, .5], [.3, .7]]
dist = beta_lib.Beta(a, b)
pdf = dist.prob(x)
self.assertAllClose([[1., 3. / 2], [1., 63. / 50]], pdf.eval())
self.assertEqual((2, 2), pdf.get_shape())
def testPdfAlphaStretchedInBroadcastWhenLowerRank(self):
with self.test_session():
a = [1., 2]
b = [1., 2]
x = [[.5, .5], [.2, .8]]
pdf = beta_lib.Beta(a, b).prob(x)
self.assertAllClose([[1., 3. / 2], [1., 24. / 25]], pdf.eval())
self.assertEqual((2, 2), pdf.get_shape())
def testPdfXStretchedInBroadcastWhenSameRank(self):
with self.test_session():
a = [[1., 2], [2., 3]]
b = [[1., 2], [2., 3]]
x = [[.5, .5]]
pdf = beta_lib.Beta(a, b).prob(x)
self.assertAllClose([[1., 3. / 2], [3. / 2, 15. / 8]], pdf.eval())
self.assertEqual((2, 2), pdf.get_shape())
def testPdfXStretchedInBroadcastWhenLowerRank(self):
with self.test_session():
a = [[1., 2], [2., 3]]
b = [[1., 2], [2., 3]]
x = [.5, .5]
pdf = beta_lib.Beta(a, b).prob(x)
self.assertAllClose([[1., 3. / 2], [3. / 2, 15. / 8]], pdf.eval())
self.assertEqual((2, 2), pdf.get_shape())
def testBetaMean(self):
with session.Session():
a = [1., 2, 3]
b = [2., 4, 1.2]
expected_mean = stats.beta.mean(a, b)
dist = beta_lib.Beta(a, b)
self.assertEqual(dist.mean().get_shape(), (3,))
self.assertAllClose(expected_mean, dist.mean().eval())
def testBetaVariance(self):
with session.Session():
a = [1., 2, 3]
b = [2., 4, 1.2]
expected_variance = stats.beta.var(a, b)
dist = beta_lib.Beta(a, b)
self.assertEqual(dist.variance().get_shape(), (3,))
self.assertAllClose(expected_variance, dist.variance().eval())
def testBetaMode(self):
with session.Session():
a = np.array([1.1, 2, 3])
b = np.array([2., 4, 1.2])
expected_mode = (a - 1) / (a + b - 2)
dist = beta_lib.Beta(a, b)
self.assertEqual(dist.mode().get_shape(), (3,))
self.assertAllClose(expected_mode, dist.mode().eval())
def testBetaModeInvalid(self):
with session.Session():
a = np.array([1., 2, 3])
b = np.array([2., 4, 1.2])
dist = beta_lib.Beta(a, b, allow_nan_stats=False)
with self.assertRaisesOpError("Condition x < y.*"):
dist.mode().eval()
a = np.array([2., 2, 3])
b = np.array([1., 4, 1.2])
dist = beta_lib.Beta(a, b, allow_nan_stats=False)
with self.assertRaisesOpError("Condition x < y.*"):
dist.mode().eval()
def testBetaModeEnableAllowNanStats(self):
with session.Session():
a = np.array([1., 2, 3])
b = np.array([2., 4, 1.2])
dist = beta_lib.Beta(a, b, allow_nan_stats=True)
expected_mode = (a - 1) / (a + b - 2)
expected_mode[0] = np.nan
self.assertEqual((3,), dist.mode().get_shape())
self.assertAllClose(expected_mode, dist.mode().eval())
a = np.array([2., 2, 3])
b = np.array([1., 4, 1.2])
dist = beta_lib.Beta(a, b, allow_nan_stats=True)
expected_mode = (a - 1) / (a + b - 2)
expected_mode[0] = np.nan
self.assertEqual((3,), dist.mode().get_shape())
self.assertAllClose(expected_mode, dist.mode().eval())
def testBetaEntropy(self):
with session.Session():
a = [1., 2, 3]
b = [2., 4, 1.2]
expected_entropy = stats.beta.entropy(a, b)
dist = beta_lib.Beta(a, b)
self.assertEqual(dist.entropy().get_shape(), (3,))
self.assertAllClose(expected_entropy, dist.entropy().eval())
def testBetaSample(self):
with self.test_session():
a = 1.
b = 2.
beta = beta_lib.Beta(a, b)
n = constant_op.constant(100000)
samples = beta.sample(n)
sample_values = samples.eval()
self.assertEqual(sample_values.shape, (100000,))
self.assertFalse(np.any(sample_values < 0.0))
self.assertLess(
stats.kstest(
# Beta is a univariate distribution.
sample_values,
stats.beta(a=1., b=2.).cdf)[0],
0.01)
# The standard error of the sample mean is 1 / (sqrt(18 * n))
self.assertAllClose(
sample_values.mean(axis=0), stats.beta.mean(a, b), atol=1e-2)
self.assertAllClose(
np.cov(sample_values, rowvar=0), stats.beta.var(a, b), atol=1e-1)
# Test that sampling with the same seed twice gives the same results.
def testBetaSampleMultipleTimes(self):
with self.test_session():
a_val = 1.
b_val = 2.
n_val = 100
random_seed.set_random_seed(654321)
beta1 = beta_lib.Beta(concentration1=a_val,
concentration0=b_val,
name="beta1")
samples1 = beta1.sample(n_val, seed=123456).eval()
random_seed.set_random_seed(654321)
beta2 = beta_lib.Beta(concentration1=a_val,
concentration0=b_val,
name="beta2")
samples2 = beta2.sample(n_val, seed=123456).eval()
self.assertAllClose(samples1, samples2)
def testBetaSampleMultidimensional(self):
with self.test_session():
a = np.random.rand(3, 2, 2).astype(np.float32)
b = np.random.rand(3, 2, 2).astype(np.float32)
beta = beta_lib.Beta(a, b)
n = constant_op.constant(100000)
samples = beta.sample(n)
sample_values = samples.eval()
self.assertEqual(sample_values.shape, (100000, 3, 2, 2))
self.assertFalse(np.any(sample_values < 0.0))
self.assertAllClose(
sample_values[:, 1, :].mean(axis=0),
stats.beta.mean(a, b)[1, :],
atol=1e-1)
def testBetaCdf(self):
with self.test_session():
shape = (30, 40, 50)
for dt in (np.float32, np.float64):
a = 10. * np.random.random(shape).astype(dt)
b = 10. * np.random.random(shape).astype(dt)
x = np.random.random(shape).astype(dt)
actual = beta_lib.Beta(a, b).cdf(x).eval()
self.assertAllEqual(np.ones(shape, dtype=np.bool), 0. <= x)
self.assertAllEqual(np.ones(shape, dtype=np.bool), 1. >= x)
self.assertAllClose(stats.beta.cdf(x, a, b), actual, rtol=1e-4, atol=0)
def testBetaLogCdf(self):
with self.test_session():
shape = (30, 40, 50)
for dt in (np.float32, np.float64):
a = 10. * np.random.random(shape).astype(dt)
b = 10. * np.random.random(shape).astype(dt)
x = np.random.random(shape).astype(dt)
actual = math_ops.exp(beta_lib.Beta(a, b).log_cdf(x)).eval()
self.assertAllEqual(np.ones(shape, dtype=np.bool), 0. <= x)
self.assertAllEqual(np.ones(shape, dtype=np.bool), 1. >= x)
self.assertAllClose(stats.beta.cdf(x, a, b), actual, rtol=1e-4, atol=0)
def testBetaWithSoftplusConcentration(self):
with self.test_session():
a, b = -4.2, -9.1
dist = beta_lib.BetaWithSoftplusConcentration(a, b)
self.assertAllClose(nn_ops.softplus(a).eval(), dist.concentration1.eval())
self.assertAllClose(nn_ops.softplus(b).eval(), dist.concentration0.eval())
def testBetaBetaKL(self):
with self.test_session() as sess:
for shape in [(10,), (4, 5)]:
a1 = 6.0 * np.random.random(size=shape) + 1e-4
b1 = 6.0 * np.random.random(size=shape) + 1e-4
a2 = 6.0 * np.random.random(size=shape) + 1e-4
b2 = 6.0 * np.random.random(size=shape) + 1e-4
# Take inverse softplus of values to test BetaWithSoftplusConcentration
a1_sp = np.log(np.exp(a1) - 1.0)
b1_sp = np.log(np.exp(b1) - 1.0)
a2_sp = np.log(np.exp(a2) - 1.0)
b2_sp = np.log(np.exp(b2) - 1.0)
d1 = beta_lib.Beta(concentration1=a1, concentration0=b1)
d2 = beta_lib.Beta(concentration1=a2, concentration0=b2)
d1_sp = beta_lib.BetaWithSoftplusConcentration(concentration1=a1_sp,
concentration0=b1_sp)
d2_sp = beta_lib.BetaWithSoftplusConcentration(concentration1=a2_sp,
concentration0=b2_sp)
kl_expected = (special.betaln(a2, b2) - special.betaln(a1, b1) +
(a1 - a2) * special.digamma(a1) +
(b1 - b2) * special.digamma(b1) +
(a2 - a1 + b2 - b1) * special.digamma(a1 + b1))
for dist1 in [d1, d1_sp]:
for dist2 in [d2, d2_sp]:
kl = kullback_leibler.kl(dist1, dist2)
kl_val = sess.run(kl)
self.assertEqual(kl.get_shape(), shape)
self.assertAllClose(kl_val, kl_expected)
# Make sure KL(d1||d1) is 0
kl_same = sess.run(kullback_leibler.kl(d1, d1))
self.assertAllClose(kl_same, np.zeros_like(kl_expected))
if __name__ == "__main__":
test.main()
| apache-2.0 |
dqnykamp/sympy | sympy/utilities/decorator.py | 24 | 5885 | """Useful utility decorators. """
from __future__ import print_function, division
import sys
import types
import inspect
from sympy.core.decorators import wraps
from sympy.core.compatibility import class_types, get_function_globals, get_function_name, iterable
def threaded_factory(func, use_add):
"""A factory for ``threaded`` decorators. """
from sympy.core import sympify
from sympy.matrices import Matrix
@wraps(func)
def threaded_func(expr, *args, **kwargs):
if isinstance(expr, Matrix):
return expr.applyfunc(lambda f: func(f, *args, **kwargs))
elif iterable(expr):
try:
return expr.__class__([func(f, *args, **kwargs) for f in expr])
except TypeError:
return expr
else:
expr = sympify(expr)
if use_add and expr.is_Add:
return expr.__class__(*[ func(f, *args, **kwargs) for f in expr.args ])
elif expr.is_Relational:
return expr.__class__(func(expr.lhs, *args, **kwargs),
func(expr.rhs, *args, **kwargs))
else:
return func(expr, *args, **kwargs)
return threaded_func
def threaded(func):
"""Apply ``func`` to sub--elements of an object, including :class:`Add`.
This decorator is intended to make it uniformly possible to apply a
function to all elements of composite objects, e.g. matrices, lists, tuples
and other iterable containers, or just expressions.
This version of :func:`threaded` decorator allows threading over
elements of :class:`Add` class. If this behavior is not desirable
use :func:`xthreaded` decorator.
Functions using this decorator must have the following signature::
@threaded
def function(expr, *args, **kwargs):
"""
return threaded_factory(func, True)
def xthreaded(func):
"""Apply ``func`` to sub--elements of an object, excluding :class:`Add`.
This decorator is intended to make it uniformly possible to apply a
function to all elements of composite objects, e.g. matrices, lists, tuples
and other iterable containers, or just expressions.
This version of :func:`threaded` decorator disallows threading over
elements of :class:`Add` class. If this behavior is not desirable
use :func:`threaded` decorator.
Functions using this decorator must have the following signature::
@xthreaded
def function(expr, *args, **kwargs):
"""
return threaded_factory(func, False)
def conserve_mpmath_dps(func):
"""After the function finishes, resets the value of mpmath.mp.dps to
the value it had before the function was run."""
import functools
from sympy import mpmath
def func_wrapper():
dps = mpmath.mp.dps
try:
func()
finally:
mpmath.mp.dps = dps
func_wrapper = functools.update_wrapper(func_wrapper, func)
return func_wrapper
class no_attrs_in_subclass(object):
"""Don't 'inherit' certain attributes from a base class
>>> from sympy.utilities.decorator import no_attrs_in_subclass
>>> class A(object):
... x = 'test'
>>> A.x = no_attrs_in_subclass(A, A.x)
>>> class B(A):
... pass
>>> hasattr(A, 'x')
True
>>> hasattr(B, 'x')
False
"""
def __init__(self, cls, f):
self.cls = cls
self.f = f
def __get__(self, instance, owner=None):
if owner == self.cls:
if hasattr(self.f, '__get__'):
return self.f.__get__(instance, owner)
return self.f
raise AttributeError
def doctest_depends_on(exe=None, modules=None, disable_viewers=None):
"""Adds metadata about the depenencies which need to be met for doctesting
the docstrings of the decorated objects."""
pyglet = False
if modules is not None and 'pyglet' in modules:
pyglet = True
def depends_on_deco(fn):
fn._doctest_depends_on = dict(exe=exe, modules=modules,
disable_viewers=disable_viewers,
pyglet=pyglet)
# once we drop py2.5 support and use class decorators this evaluates
# to True
if inspect.isclass(fn):
fn._doctest_depdends_on = no_attrs_in_subclass(fn, fn._doctest_depends_on)
return fn
return depends_on_deco
def public(obj):
"""
Append ``obj``'s name to global ``__all__`` variable (call site).
By using this decorator on functions or classes you achieve the same goal
as by filling ``__all__`` variables manually, you just don't have to repeat
your self (object's name). You also know if object is public at definition
site, not at some random location (where ``__all__`` was set).
Note that in multiple decorator setup, in almost all cases, ``@public``
decorator must be applied before any other decorators, because it relies
on the pointer to object's global namespace. If you apply other decorators
first, ``@public`` may end up modifying wrong namespace.
Example::
>>> from sympy.utilities.decorator import public
>>> __all__
Traceback (most recent call last):
...
NameError: name '__all__' is not defined
>>> @public
... def some_function():
... pass
>>> __all__
['some_function']
"""
if isinstance(obj, types.FunctionType):
ns = get_function_globals(obj)
name = get_function_name(obj)
elif isinstance(obj, (type(type), class_types)):
ns = sys.modules[obj.__module__].__dict__
name = obj.__name__
else:
raise TypeError("expected a function or a class, got %s" % obj)
if "__all__" not in ns:
ns["__all__"] = [name]
else:
ns["__all__"].append(name)
return obj
| bsd-3-clause |
John-Lin/ryu | ryu/tests/unit/ofproto/test_nx_flow_spec.py | 27 | 3578 | # Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import ryu.ofproto.ofproto_v1_3_parser as ofpp
class Test_FlowSpec(unittest.TestCase):
def test_flowspec_src_0_dst_0(self):
user = ofpp.NXFlowSpecMatch(src=('in_port', 0),
dst=('in_port', 0),
n_bits=16)
on_wire = (
b'\x00\x10'
b'\x80\x00\x00\x04\x00\x00'
b'\x80\x00\x00\x04\x00\x00'
)
self.assertEqual(on_wire, user.serialize())
(o, rest) = ofpp._NXFlowSpec.parse(on_wire)
self.assertEqual(user.to_jsondict(), o.to_jsondict())
self.assertEqual(str(user), str(o))
self.assertEqual(b'', rest)
def test_flowspec_src_1_dst_0(self):
user = ofpp.NXFlowSpecMatch(src=99,
dst=('in_port', 0),
n_bits=16)
on_wire = (
b'\x20\x10'
b'\x00\x63'
b'\x80\x00\x00\x04\x00\x00'
)
self.assertEqual(on_wire, user.serialize())
(o, rest) = ofpp._NXFlowSpec.parse(on_wire)
self.assertEqual(user.to_jsondict(), o.to_jsondict())
self.assertEqual(str(user), str(o))
self.assertEqual(b'', rest)
def test_flowspec_src_0_dst_1(self):
user = ofpp.NXFlowSpecLoad(src=('in_port', 0),
dst=('in_port', 0),
n_bits=16)
on_wire = (
b'\x08\x10'
b'\x80\x00\x00\x04\x00\x00'
b'\x80\x00\x00\x04\x00\x00'
)
self.assertEqual(on_wire, user.serialize())
(o, rest) = ofpp._NXFlowSpec.parse(on_wire)
self.assertEqual(user.to_jsondict(), o.to_jsondict())
self.assertEqual(str(user), str(o))
self.assertEqual(b'', rest)
def test_flowspec_src_1_dst_1(self):
user = ofpp.NXFlowSpecLoad(src=99,
dst=('in_port', 0),
n_bits=16)
on_wire = (
b'\x28\x10'
b'\x00\x63'
b'\x80\x00\x00\x04\x00\x00'
)
self.assertEqual(on_wire, user.serialize())
(o, rest) = ofpp._NXFlowSpec.parse(on_wire)
self.assertEqual(user.to_jsondict(), o.to_jsondict())
self.assertEqual(str(user), str(o))
self.assertEqual(b'', rest)
def test_flowspec_src_0_dst_2(self):
user = ofpp.NXFlowSpecOutput(src=('in_port', 0),
dst='',
n_bits=16)
on_wire = (
b'\x10\x10'
b'\x80\x00\x00\x04\x00\x00'
)
self.assertEqual(on_wire, user.serialize())
(o, rest) = ofpp._NXFlowSpec.parse(on_wire)
self.assertEqual(user.to_jsondict(), o.to_jsondict())
self.assertEqual(str(user), str(o))
self.assertEqual(b'', rest)
| apache-2.0 |
thomasballinger/lazyload | test.py | 1 | 2249 | import sys
from types import ModuleType
import unittest
from lazyload import make_lazy, _LazyModuleMarker
class LazyLoadTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.modname = 'abc'
def tearDown(self):
sys.modules.pop(self.modname, None) # remove from the modules.
def test_adds_to_modules(self):
"""
Tests that `make_lazy` adds an entry to `sys.modules`.
"""
make_lazy(self.modname)
abc = sys.modules.get(self.modname)
self.assertIsNotNone(
abc,
'make_lazy failed to add {mod} to the system modules'.format(
mod=self.modname,
),
)
self.assertIsInstance(
abc, ModuleType, '{mod} is not a module'.format(mod=self.modname),
)
def test_is_lazy_module(self):
"""
Tests that `make_lazy` adds lazy module objects
instead of strict module objects.
"""
make_lazy(self.modname)
mod = __import__(self.modname)
self.assertIsInstance(mod, _LazyModuleMarker)
def test_no_leaking_attributes(self):
"""
Tests that consumers of the objects added by `make_lazy`
cannot accidently get the attributes off of the proxy.
"""
mod = __import__(self.modname)
self.assertNotIsInstance(
mod,
_LazyModuleMarker,
'{mod} should not be lazy yet'.format(mod=self.modname),
)
self.assertFalse(
hasattr(mod, '__mro__'),
'The module object actually has an __mro__, pick another'
' attribute to test',
)
make_lazy(self.modname)
mod = __import__(self.modname)
self.assertIsInstance(
mod,
_LazyModuleMarker,
'{mod} should now be lazy'.format(mod=self.modname),
)
self.assertFalse(
hasattr(mod, '__mro__'),
'{mod} should not leak the abstraction by exposing __mro__'.format(
mod=self.modname,
),
)
if __name__ == '__main__':
unittest.TextTestRunner().run(
unittest.defaultTestLoader.loadTestsFromTestCase(LazyLoadTestCase),
)
| mit |
gvallarelli/inasafe | safe/common/numerics.py | 3 | 8722 | """**Numerical tools**
"""
import numpy
from utilities import verify
def ensure_numeric(A, typecode=None):
"""Ensure that sequence is a numeric array.
Args:
* A: Sequence. If A is already a numeric array it will be returned
unaltered
If not, an attempt is made to convert it to a numeric
array
* A: Scalar. Return 0-dimensional array containing that value. Note
that a 0-dim array DOES NOT HAVE A LENGTH UNDER numpy.
* A: String. Array of ASCII values (numpy can't handle this)
* typecode: numeric type. If specified, use this in the conversion.
If not, let numeric package decide.
typecode will always be one of num.float, num.int, etc.
Note :
that numpy.array(A, dtype) will sometimes copy. Use 'copy=False' to
copy only when required.
This function is necessary as array(A) can cause memory overflow.
"""
if isinstance(A, basestring):
msg = 'Sorry, cannot handle strings in ensure_numeric()'
# FIXME (Ole): Change this to whatever is the appropriate exception
# for wrong input type
raise Exception(msg)
if typecode is None:
if isinstance(A, numpy.ndarray):
return A
else:
return numpy.array(A)
else:
return numpy.array(A, dtype=typecode, copy=False)
def nanallclose(x, y, rtol=1.0e-5, atol=1.0e-8):
"""Numpy allclose function which allows NaN
Args:
* x, y: Either scalars or numpy arrays
Returns:
* True or False
Note:
Returns True if all non-nan elements pass.
"""
xn = numpy.isnan(x)
yn = numpy.isnan(y)
if numpy.any(xn != yn):
# Presence of NaNs is not the same in x and y
return False
if numpy.all(xn):
# Everything is NaN.
# This will also take care of x and y being NaN scalars
return True
# Filter NaN's out
if numpy.any(xn):
x = x[-xn]
y = y[-yn]
# Compare non NaN's and return
return numpy.allclose(x, y, rtol=rtol, atol=atol)
def normal_cdf(x, mu=0, sigma=1):
"""Cumulative Normal Distribution Function
Args:
* x: scalar or array of real numbers
* mu: Mean value. Default 0
* sigma: Standard deviation. Default 1
Returns:
* An approximation of the cdf of the normal
Note:
CDF of the normal distribution is defined as
\frac12 [1 + erf(\frac{x - \mu}{\sigma \sqrt{2}})], x \in \R
Source: http://en.wikipedia.org/wiki/Normal_distribution
"""
arg = (x - mu) / (sigma * numpy.sqrt(2))
res = (1 + erf(arg)) / 2
return res
def lognormal_cdf(x, median=1, sigma=1):
"""Cumulative Log Normal Distribution Function
Args:
* x: scalar or array of real numbers
* mu: Median (exp(mean of log(x)). Default 1
* sigma: Log normal standard deviation. Default 1
Returns:
* An approximation of the cdf of the normal
Note:
CDF of the normal distribution is defined as
\frac12 [1 + erf(\frac{x - \mu}{\sigma \sqrt{2}})], x \in \R
Source: http://en.wikipedia.org/wiki/Normal_distribution
"""
return normal_cdf(numpy.log(x), mu=numpy.log(median), sigma=sigma)
def erf(z):
"""Approximation to ERF
Note:
from:
http://www.cs.princeton.edu/introcs/21function/ErrorFunction.java.html
Implements the Gauss error function.
erf(z) = 2 / sqrt(pi) * integral(exp(-t*t), t = 0..z)
Fractional error in math formula less than 1.2 * 10 ^ -7.
although subject to catastrophic cancellation when z in very close to 0
from Chebyshev fitting formula for erf(z) from Numerical Recipes, 6.2
Source:
http://stackoverflow.com/questions/457408/
is-there-an-easily-available-implementation-of-erf-for-python
"""
# Input check
try:
len(z)
except TypeError:
scalar = True
z = [z]
else:
scalar = False
z = numpy.array(z)
# Begin algorithm
t = 1.0 / (1.0 + 0.5 * numpy.abs(z))
# Use Horner's method
ans = 1 - t * numpy.exp(-z * z - 1.26551223 +
t * (1.00002368 +
t * (0.37409196 +
t * (0.09678418 +
t * (-0.18628806 +
t * (0.27886807 +
t * (-1.13520398 +
t * (1.48851587 +
t * (-0.82215223 +
t * (0.17087277))))))))))
neg = (z < 0.0) # Mask for negative input values
ans[neg] = -ans[neg]
if scalar:
return ans[0]
else:
return ans
def axes2points(x, y):
"""Generate all combinations of grid point coordinates from x and y axes
Args:
* x: x coordinates (array)
* y: y coordinates (array)
Returns:
* P: Nx2 array consisting of coordinates for all
grid points defined by x and y axes. The x coordinate
will vary the fastest to match the way 2D numpy
arrays are laid out by default ('C' order). That way,
the x and y coordinates will match a corresponding
2D array A when flattened (A.flat[:] or A.reshape(-1))
Note:
Example
x = [1, 2, 3]
y = [10, 20]
P = [[1, 10],
[2, 10],
[3, 10],
[1, 20],
[2, 20],
[3, 20]]
"""
# Reverse y coordinates to have them start at bottom of array
y = numpy.flipud(y)
# Repeat x coordinates for each y (fastest varying)
X = numpy.kron(numpy.ones(len(y)), x)
# Repeat y coordinates for each x (slowest varying)
Y = numpy.kron(y, numpy.ones(len(x)))
# Check
N = len(X)
verify(len(Y) == N)
# Create Nx2 array of x and y coordinates
X = numpy.reshape(X, (N, 1))
Y = numpy.reshape(Y, (N, 1))
P = numpy.concatenate((X, Y), axis=1)
# Return
return P
def grid2points(A, x, y):
"""Convert grid data to point data
Args:
* A: Array of pixel values
* x: Longitudes corresponding to columns in A (left->right)
* y: Latitudes corresponding to rows in A (top->bottom)
Returns:
* P: Nx2 array of point coordinates
* V: N array of point values
"""
# Create Nx2 array of x, y points corresponding to each
# element in A.
points = axes2points(x, y)
# Create flat 1D row-major view of A cast as
# one column vector of length MxN where M, N = A.shape
#values = A.reshape((-1, 1))
values = A.reshape(-1)
# Concatenate coordinates with their values from the grid
#P = numpy.concatenate((points, values), axis=1)
# Return Nx3 array with rows: x, y, value
return points, values
def geotransform2axes(G, nx, ny):
"""Convert geotransform to coordinate axes
Args:
* G: GDAL geotransform (6-tuple).
(top left x, w-e pixel resolution, rotation,
top left y, rotation, n-s pixel resolution).
* nx: Number of cells in the w-e direction
* ny: Number of cells in the n-s direction
Returns:
* Return two vectors (longitudes and latitudes) representing the grid
defined by the geotransform.
The values are offset by half a pixel size to correspond to
pixel registration.
I.e. If the grid origin (top left corner) is (105, 10) and the
resolution is 1 degrees in each direction, then the vectors will
take the form
longitudes = [100.5, 101.5, ..., 109.5]
latitudes = [0.5, 1.5, ..., 9.5]
"""
lon_ul = float(G[0]) # Longitude of upper left corner
lat_ul = float(G[3]) # Latitude of upper left corner
dx = float(G[1]) # Longitudinal resolution
dy = - float(G[5]) # Latitudinal resolution (always(?) negative)
verify(dx > 0)
verify(dy > 0)
# Coordinates of lower left corner
lon_ll = lon_ul
lat_ll = lat_ul - ny * dy
# Coordinates of upper right corner
lon_ur = lon_ul + nx * dx
# Define pixel centers along each directions
# This is to achieve pixel registration rather
# than gridline registration
dx2 = dx / 2
dy2 = dy / 2
# Define longitudes and latitudes for each axes
x = numpy.linspace(lon_ll + dx2,
lon_ur - dx2, nx)
y = numpy.linspace(lat_ll + dy2,
lat_ul - dy2, ny)
# Return
return x, y
| gpl-3.0 |
SoCo/SoCo | doc/conf.py | 2 | 12634 | #!/usr/bin/env python3
#
# soco documentation build configuration file, created by
# sphinx-quickstart on Mon Sep 14 08:03:37 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
sys.path.insert(0, os.path.abspath('..'))
import soco
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.3'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.extlinks',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'SoCo'
copyright = '2015-2021, The SoCo Team'
author = "`The SoCo Team"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = soco.__version__
# The full version, including alpha/beta/rc tags.
release = soco.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'any'
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['soco.', 'soco.music_services.']
# If true, keep warnings as "system message" paragraphs in the built documents.
keep_warnings = True
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# Allow auto links into the Python and Requests docs
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'requests': ('https://requests.readthedocs.io/en/master/', None)
}
# Shortcuts to Github Issues etc. Use them like this:
# :issue:`123` (which will generate a link to issue 123)
extlinks = {
'issue': ('https://github.com/SoCo/SoCo/issues/%s', '#'),
'PR': ('https://github.com/SoCo/SoCo/pull/%s', '#')
}
# Document members by default, and in source order. This allows the stub files
# in the api directory to be much shorter.
autodoc_default_flags = ['members']
autodoc_member_order = 'bysource'
# Concatenate the class and __init__ docstrings
autoclass_content = 'both'
# Nicer inheritance graphs for RTD theme. NB the image map does not rescale
# properly, so we have had to add some javascript to handle it. See
# _templates and _static
inheritance_node_attrs = dict(
fontsize=14, height=0.75, color='dodgerblue', style='rounded',
)
inheritance_graph_attrs = dict(
rankdir="LR", size='""',
)
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'socodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'soco.tex', 'soco Documentation',
'Author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'soco', 'soco Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'soco', 'soco Documentation',
author, 'soco', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
# epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to
# save visual space.
# epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
# epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
# epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
# epub_fix_images = False
# Scale large images.
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# epub_show_urls = 'inline'
# If false, no index is generated.
# epub_use_index = True
| mit |
zheguang/voltdb | tests/scripts/examples/sql_coverage/index-count1-schema.py | 2 | 1555 | #!/usr/bin/env python
# This file is part of VoltDB.
# Copyright (C) 2008-2014 VoltDB Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
{
"P_COUNT": {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("POINTS", FastSerializer.VOLTTYPE_INTEGER)),
"partitions": ("ID",),
"indexes": ("POINTS",)
},
"R_COUNT": {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("POINTS", FastSerializer.VOLTTYPE_INTEGER)),
"partitions": (),
"indexes": ("POINTS",)
}
}
| agpl-3.0 |
D-K-E/cltk | src/cltk/ner/processes.py | 4 | 4577 | """This module holds the ``Process``es for NER."""
from copy import deepcopy
from dataclasses import dataclass
from typing import Any, List
from boltons.cacheutils import cachedproperty
from cltk.core.data_types import Doc, Process
from cltk.ner.ner import tag_ner
@dataclass
class NERProcess(Process):
"""To be inherited for each language's NER declarations.
>>> from cltk.core.data_types import Doc
>>> from cltk.ner.processes import NERProcess
>>> from cltk.core.data_types import Process
>>> issubclass(NERProcess, Process)
True
>>> emb_proc = NERProcess()
"""
language: str = None
@cachedproperty
def algorithm(self):
return tag_ner
def run(self, input_doc: Doc) -> Doc:
output_doc = deepcopy(input_doc)
ner_obj = self.algorithm
entity_values = ner_obj(
iso_code=self.language, input_tokens=input_doc.tokens
) # type: List[Any]
for index, word_obj in enumerate(output_doc.words):
word_obj.named_entity = entity_values[index]
output_doc.words[index] = word_obj
return output_doc
@dataclass
class GreekNERProcess(NERProcess):
"""The default Greek NER algorithm.
.. todo::
Update doctest w/ production model
>>> from cltk.core.data_types import Doc, Word
>>> from cltk.languages.example_texts import get_example_text
>>> from boltons.strutils import split_punct_ws
>>> text = "ἐπὶ δ᾽ οὖν τοῖς πρώτοις τοῖσδε Περικλῆς ὁ Ξανθίππου ᾑρέθη λέγειν. καὶ ἐπειδὴ καιρὸς ἐλάμβανε, προελθὼν ἀπὸ τοῦ σήματος ἐπὶ βῆμα ὑψηλὸν πεποιημένον, ὅπως ἀκούοιτο ὡς ἐπὶ πλεῖστον τοῦ ὁμίλου, ἔλεγε τοιάδε."
>>> tokens = [Word(string=token) for token in split_punct_ws(text)]
>>> a_process = GreekNERProcess()
>>> output_doc = a_process.run(Doc(raw=text, words=tokens))
>>> output_doc.words[7].string
'ὁ'
>>> output_doc.words[7].named_entity
False
>>> output_doc.words[8].string
'Ξανθίππου'
>>> output_doc.words[8].named_entity
False
"""
language: str = "grc"
description: str = "Default NER for Greek."
@dataclass
class OldEnglishNERProcess(NERProcess):
"""The default OE NER algorithm.
.. todo::
Update doctest w/ production model
>>> from cltk.core.data_types import Doc, Word
>>> from cltk.languages.example_texts import get_example_text
>>> from boltons.strutils import split_punct_ws
>>> text = get_example_text(iso_code="ang")
>>> tokens = [Word(string=token) for token in split_punct_ws(text)]
>>> a_process = OldEnglishNERProcess()
>>> output_doc = a_process.run(Doc(raw=text, words=tokens))
>>> output_doc.words[2].string, output_doc.words[2].named_entity
('Gardena', 'LOCATION')
"""
language: str = "ang"
description: str = "Default NER for Old English."
@dataclass
class LatinNERProcess(NERProcess):
"""The default Latin NER algorithm.
>>> from cltk.core.data_types import Doc, Word
>>> from cltk.languages.example_texts import get_example_text
>>> from boltons.strutils import split_punct_ws
>>> tokens = [Word(string=token) for token in split_punct_ws(get_example_text("lat"))]
>>> a_process = LatinNERProcess()
>>> output_doc = a_process.run(Doc(raw=get_example_text("lat"), words=tokens))
>>> [word.named_entity for word in output_doc.words][:20]
['LOCATION', False, False, False, False, False, False, False, False, False, 'LOCATION', False, 'LOCATION', False, False, False, False, 'LOCATION', False, 'LOCATION']
"""
language: str = "lat"
description: str = "Default NER for Latin."
@dataclass
class OldFrenchNERProcess(NERProcess):
"""The default Old French NER algorithm.
>>> from cltk.core.data_types import Doc, Word
>>> from cltk.languages.example_texts import get_example_text
>>> from boltons.strutils import split_punct_ws
>>> tokens = [Word(string=token) for token in split_punct_ws(get_example_text("fro"))]
>>> a_process = OldFrenchNERProcess()
>>> output_doc = a_process.run(Doc(raw=get_example_text("fro"), words=tokens))
>>> output_doc.words[30].string
'Bretaigne'
>>> output_doc.words[30].named_entity
'LOC'
>>> output_doc.words[31].named_entity
False
"""
language: str = "fro"
description: str = "Default NER for Old French."
| mit |
jth/tez | tez-tools/swimlanes/swimlane.py | 8 | 8470 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys,math,os.path
import StringIO
from amlogparser import AMLog
from getopt import getopt
class ColourManager(object):
def __init__(self):
# text-printable colours
self.colours = [
'#E4F5FC', '#62C2A2', '#E2F2D8', '#A9DDB4', '#E2F6E1', '#D8DAD7', '#BBBDBA', '#FEE6CE', '#FFCF9F',
'#FDAE69', '#FDE4DD', '#EDE6F2', '#A5BDDB', '#FDE1EE', '#D8B9D8', '#D7DCEC', '#BABDDA', '#FDC5BF',
'#FC9FB3', '#FDE1D2', '#FBBB9E', '#DBEF9F', '#AADD8E', '#81CDBB', '#C7EDE8', '#96D9C8', '#E3EBF4',
'#BAD3E5', '#9DBDD9', '#8996C8', '#CEEAC6', '#76CCC6', '#C7E9BE', '#9ED99C', '#71C572', '#EFF1EE',
'#949693', '#FD8D3D', '#FFF7ED', '#FED3AE', '#FEBB8F', '#FCE9CA', '#FED49B', '#FBBC85', '#FB8E58',
'#FFEEE8', '#D0D0E8', '#76A9CE', '#FDFFFC', '#E9E2EE', '#64A8D2', '#FAF7FC', '#F6ECF2', '#F8E7F0',
'#C994C6', '#E063B1', '#ECEDF7', '#DDD9EB', '#9B9BCA', '#FEDFDE', '#F8689F', '#FC9273', '#FC6948',
'#F6FDB6', '#78C67B', '#EBF9B0', '#C5E9B0', '#40B7C7', '#FDF7BA', '#FFE392', '#FFC34C', '#FF982A']
self.i = 0
def next(self):
self.i += 1
return self.colours[self.i % len(self.colours)]
def attempts(tree):
for d in tree.dags:
for a in d.attempts():
yield (a.vertex, a.name, a.container, a.start, a.finish)
def attrs(args):
s = ""
for k in args:
v = args[k]
k = k.replace("_","-") # css
if type(v) is str:
s += "%s='%s' " % (k,v)
else:
s += "%s=%s " % (k,str(v))
return s
class SVGHelper(object):
def __init__(self, w, h, parent=None):
self.width = w
self.height = h
self.parent = parent
if(not parent):
self.lines = StringIO.StringIO()
self.write("""<?xml version="1.0" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
""")
else:
self.lines = parent.lines
self.write("""<svg xmlns="http://www.w3.org/2000/svg" version="1.1" xmlns:xlink="http://www.w3.org/1999/xlink" height="%d" width="%d">""" % (h, w))
self.write("""
<script type="text/ecmascript" xlink:href="http://code.jquery.com/jquery-2.1.1.min.js" />
""")
def line(self, x1, y1, x2, y2, style="stroke: #000", **kwargs):
self.write("""<line x1="%d" y1="%d" x2="%d" y2="%d" style="%s" %s />""" % (x1, y1, x2, y2, style, attrs(kwargs)))
def rect(self, left, top, right, bottom, style="", title="", link=None):
w = (right-left)
h = (bottom-top)
if link:
self.write("<a xlink:href='%s'>" % link)
self.write("""<rect x="%d" y="%d" width="%d" height="%d" style="%s"><title>%s</title></rect>""" % (left, top, w, h, style, title))
if link:
self.write("</a>")
def text(self, x, y, text, style="", transform=""):
self.write("""<text x="%d" y="%d" style="%s" transform="%s">%s</text>""" % (x, y, style, transform, text))
def link(self, x, y, text, link, style=""):
self.write("<a xlink:href='%s'>" % link)
self.text(x, y, text, style)
self.write("</a>")
def write(self, s):
self.lines.write(s)
def flush(self):
self.write("</svg>")
if(self.parent):
self.parent.flush()
return self.lines.getvalue()
def usage():
sys.stderr.write("""
usage: swimlane.py [-t ms-per-pixel] [-o outputfile] [-f redline-fraction] <log-file>
Input files for this tool can be prepared by "yarn logs -applicationId <application_...> | grep HISTORY".
""")
def main(argv):
(opts, args) = getopt(argv, "o:t:f:")
out = sys.stdout
ticks = -1 # precision of 1/tick
fraction = -1
for k,v in opts:
if(k == "-o"):
out = open(v, "w")
if(k == "-t"):
ticks = int(v)
if(k == "-f"):
if(int(v) < 100):
fraction = int(v)/100.0
if len(args) == 0:
return usage()
log = AMLog(args[0]).structure()
lanes = [c.name for c in sorted(log.containers.values(), key=lambda a: a.start)]
marginTop = 128
marginRight = 100;
laneSize = 24
y = len(lanes)*laneSize
items = attempts(log)
maxx = max([a[4] for a in items])
if ticks == -1:
ticks = min(1000, (maxx - log.zero)/2048)
xdomain = lambda t : (t - log.zero)/ticks
x = xdomain(maxx)
svg = SVGHelper(x+2*marginRight+256, y+2*marginTop)
a = marginTop
svg.text(x/2, 32, log.name, style="font-size: 32px; text-anchor: middle")
containerMap = dict(zip(list(lanes), xrange(len(lanes))))
svg.text(marginRight - 16, marginTop - 32, "Container ID", "text-anchor:end; font-size: 16px;")
# draw a grid
for l in lanes:
a += laneSize
svg.text(marginRight - 4, a, l, "text-anchor:end; font-size: 16px;")
svg.line(marginRight, a, marginRight+x, a, "stroke: #ccc")
for x1 in set(range(0, x, 10*ticks)) | set([x]):
svg.text(marginRight+x1, marginTop-laneSize/2, "%0.2f s" % ((x1 * ticks)/1000), "text-anchor: middle; font-size: 12px")
svg.line(marginRight+x1, marginTop-laneSize/2, marginRight+x1, marginTop+y, "stroke: #ddd")
svg.line(marginRight, marginTop, marginRight+x, marginTop)
svg.line(marginRight, y+marginTop, marginRight+x, y+marginTop)
svg.line(marginRight, marginTop, marginRight, y+marginTop)
svg.line(marginRight+x, marginTop, marginRight+x, y+marginTop)
colourman = ColourManager()
for c in log.containers.values():
y1 = marginTop+(containerMap[c.name]*laneSize)
x1 = marginRight+xdomain(c.start)
svg.line(x1, y1, x1, y1 + laneSize, style="stroke: green")
if c.stop > c.start:
x2 = marginRight+xdomain(c.stop)
if (c.status == 0):
svg.line(x2, y1, x2, y1 + laneSize, style="stroke: green")
else:
svg.line(x2, y1, x2, y1 + laneSize, style="stroke: red")
svg.text(x2, y1, "%d" % (c.status), style="text-anchor: right; font-size: 12px; stroke: red", transform="rotate(90, %d, %d)" % (x2, y1))
svg.rect(x1, y1, x2, y1 + laneSize, style="fill: #ccc; opacity: 0.3")
elif c.stop == -1:
x2 = marginRight+x
svg.rect(x1, y1, x2, y1 + laneSize, style="fill: #ccc; opacity: 0.3")
for dag in log.dags:
x1 = marginRight+xdomain(dag.start)
svg.line(x1, marginTop-24, x1, marginTop+y, "stroke: black;", stroke_dasharray="8,4")
x2 = marginRight+xdomain(dag.finish)
svg.line(x2, marginTop-24, x2, marginTop+y, "stroke: black;", stroke_dasharray="8,4")
svg.line(x1, marginTop-24, x2, marginTop-24, "stroke: black")
svg.text((x1+x2)/2, marginTop-32, "%s (%0.1f s)" % (dag.name, (dag.finish-dag.start)/1000.0) , "text-anchor: middle; font-size: 12px;")
vertexes = set([v.name for v in dag.vertexes])
colourmap = dict([(v,colourman.next()) for v in list(vertexes)])
for c in dag.attempts():
colour = colourmap[c.vertex]
y1 = marginTop+(containerMap[c.container]*laneSize)+1
x1 = marginRight+xdomain(c.start)
x2 = marginRight+xdomain(c.finish)
y2 = y1 + laneSize - 2
locality = (c.kvs.has_key("DATA_LOCAL_TASKS") * 1) + (c.kvs.has_key("RACK_LOCAL_TASKS")*2)
link = c.kvs["completedLogs"]
svg.rect(x1, y1, x2, y2, title=c.name, style="fill: %s; stroke: #ccc;" % (colour), link=link)
if locality > 1: # rack-local (no-locality isn't counted)
svg.rect(x1, y2-4, x2, y2, style="fill: #f00; fill-opacity: 0.5;", link=link)
if x2 - x1 > 64:
svg.text((x1+x2)/2, y2-12, "%s (%05d_%d)" % (c.vertex, c.tasknum, c.attemptnum), style="text-anchor: middle; font-size: 9px;")
else:
svg.text((x1+x2)/2, y2-12, "%s" % c.vertex, style="text-anchor: middle; font-size: 9px;")
finishes = sorted([c.finish for c in dag.attempts()])
if(len(finishes) > 10 and fraction > 0):
percentX = finishes[int(len(finishes)*fraction)]
svg.line(marginRight+xdomain(percentX), marginTop, marginRight+xdomain(percentX), y+marginTop, style="stroke: red")
svg.text(marginRight+xdomain(percentX), y+marginTop+12, "%d%% (%0.1fs)" % (int(fraction*100), (percentX - dag.start)/1000.0), style="font-size:12px; text-anchor: middle")
out.write(svg.flush())
out.close()
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| apache-2.0 |
boompieman/iim_project | project_python2/lib/python2.7/site-packages/nltk/corpus/reader/reviews.py | 3 | 12832 | # Natural Language Toolkit: Product Reviews Corpus Reader
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Pierpaolo Pantone <24alsecondo@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
CorpusReader for reviews corpora (syntax based on Customer Review Corpus).
- Customer Review Corpus information -
Annotated by: Minqing Hu and Bing Liu, 2004.
Department of Computer Sicence
University of Illinois at Chicago
Contact: Bing Liu, liub@cs.uic.edu
http://www.cs.uic.edu/~liub
Distributed with permission.
The "product_reviews_1" and "product_reviews_2" datasets respectively contain
annotated customer reviews of 5 and 9 products from amazon.com.
Related papers:
- Minqing Hu and Bing Liu. "Mining and summarizing customer reviews".
Proceedings of the ACM SIGKDD International Conference on Knowledge
Discovery & Data Mining (KDD-04), 2004.
- Minqing Hu and Bing Liu. "Mining Opinion Features in Customer Reviews".
Proceedings of Nineteeth National Conference on Artificial Intelligence
(AAAI-2004), 2004.
- Xiaowen Ding, Bing Liu and Philip S. Yu. "A Holistic Lexicon-Based Appraoch to
Opinion Mining." Proceedings of First ACM International Conference on Web
Search and Data Mining (WSDM-2008), Feb 11-12, 2008, Stanford University,
Stanford, California, USA.
Symbols used in the annotated reviews:
[t] : the title of the review: Each [t] tag starts a review.
xxxx[+|-n]: xxxx is a product feature.
[+n]: Positive opinion, n is the opinion strength: 3 strongest, and 1 weakest.
Note that the strength is quite subjective.
You may want ignore it, but only considering + and -
[-n]: Negative opinion
## : start of each sentence. Each line is a sentence.
[u] : feature not appeared in the sentence.
[p] : feature not appeared in the sentence. Pronoun resolution is needed.
[s] : suggestion or recommendation.
[cc]: comparison with a competing product from a different brand.
[cs]: comparison with a competing product from the same brand.
Note: Some of the files (e.g. "ipod.txt", "Canon PowerShot SD500.txt") do not
provide separation between different reviews. This is due to the fact that
the dataset was specifically designed for aspect/feature-based sentiment
analysis, for which sentence-level annotation is sufficient. For document-
level classification and analysis, this peculiarity should be taken into
consideration.
"""
from __future__ import division
import re
from nltk.corpus.reader.api import *
from nltk.tokenize import *
TITLE = re.compile(r'^\[t\](.*)$') # [t] Title
FEATURES = re.compile(r'((?:(?:\w+\s)+)?\w+)\[((?:\+|\-)\d)\]') # find 'feature' in feature[+3]
NOTES = re.compile(r'\[(?!t)(p|u|s|cc|cs)\]') # find 'p' in camera[+2][p]
SENT = re.compile(r'##(.*)$') # find tokenized sentence
@compat.python_2_unicode_compatible
class Review(object):
"""
A Review is the main block of a ReviewsCorpusReader.
"""
def __init__(self, title=None, review_lines=None):
"""
:param title: the title of the review.
:param review_lines: the list of the ReviewLines that belong to the Review.
"""
self.title = title
if review_lines is None:
self.review_lines = []
else:
self.review_lines = review_lines
def add_line(self, review_line):
"""
Add a line (ReviewLine) to the review.
:param review_line: a ReviewLine instance that belongs to the Review.
"""
assert isinstance(review_line, ReviewLine)
self.review_lines.append(review_line)
def features(self):
"""
Return a list of features in the review. Each feature is a tuple made of
the specific item feature and the opinion strength about that feature.
:return: all features of the review as a list of tuples (feat, score).
:rtype: list(tuple)
"""
features = []
for review_line in self.review_lines:
features.extend(review_line.features)
return features
def sents(self):
"""
Return all tokenized sentences in the review.
:return: all sentences of the review as lists of tokens.
:rtype: list(list(str))
"""
return [review_line.sent for review_line in self.review_lines]
def __repr__(self):
return 'Review(title=\"{}\", review_lines={})'.format(self.title, self.review_lines)
@compat.python_2_unicode_compatible
class ReviewLine(object):
"""
A ReviewLine represents a sentence of the review, together with (optional)
annotations of its features and notes about the reviewed item.
"""
def __init__(self, sent, features=None, notes=None):
self.sent = sent
if features is None:
self.features = []
else:
self.features = features
if notes is None:
self.notes = []
else:
self.notes = notes
def __repr__(self):
return ('ReviewLine(features={}, notes={}, sent={})'.format(
self.features, self.notes, self.sent))
class ReviewsCorpusReader(CorpusReader):
"""
Reader for the Customer Review Data dataset by Hu, Liu (2004).
Note: we are not applying any sentence tokenization at the moment, just word
tokenization.
>>> from nltk.corpus import product_reviews_1
>>> camera_reviews = product_reviews_1.reviews('Canon_G3.txt')
>>> review = camera_reviews[0]
>>> review.sents()[0]
['i', 'recently', 'purchased', 'the', 'canon', 'powershot', 'g3', 'and', 'am',
'extremely', 'satisfied', 'with', 'the', 'purchase', '.']
>>> review.features()
[('canon powershot g3', '+3'), ('use', '+2'), ('picture', '+2'),
('picture quality', '+1'), ('picture quality', '+1'), ('camera', '+2'),
('use', '+2'), ('feature', '+1'), ('picture quality', '+3'), ('use', '+1'),
('option', '+1')]
We can also reach the same information directly from the stream:
>>> product_reviews_1.features('Canon_G3.txt')
[('canon powershot g3', '+3'), ('use', '+2'), ...]
We can compute stats for specific product features:
>>> from __future__ import division
>>> n_reviews = len([(feat,score) for (feat,score) in product_reviews_1.features('Canon_G3.txt') if feat=='picture'])
>>> tot = sum([int(score) for (feat,score) in product_reviews_1.features('Canon_G3.txt') if feat=='picture'])
>>> # We use float for backward compatibility with division in Python2.7
>>> mean = tot / n_reviews
>>> print(n_reviews, tot, mean)
15 24 1.6
"""
CorpusView = StreamBackedCorpusView
def __init__(self, root, fileids, word_tokenizer=WordPunctTokenizer(),
encoding='utf8'):
"""
:param root: The root directory for the corpus.
:param fileids: a list or regexp specifying the fileids in the corpus.
:param word_tokenizer: a tokenizer for breaking sentences or paragraphs
into words. Default: `WordPunctTokenizer`
:param encoding: the encoding that should be used to read the corpus.
"""
CorpusReader.__init__(self, root, fileids, encoding)
self._word_tokenizer = word_tokenizer
def features(self, fileids=None):
"""
Return a list of features. Each feature is a tuple made of the specific
item feature and the opinion strength about that feature.
:param fileids: a list or regexp specifying the ids of the files whose
features have to be returned.
:return: all features for the item(s) in the given file(s).
:rtype: list(tuple)
"""
if fileids is None:
fileids = self._fileids
elif isinstance(fileids, string_types):
fileids = [fileids]
return concat([self.CorpusView(fileid, self._read_features, encoding=enc)
for (fileid, enc) in self.abspaths(fileids, True)])
def raw(self, fileids=None):
"""
:param fileids: a list or regexp specifying the fileids of the files that
have to be returned as a raw string.
:return: the given file(s) as a single string.
:rtype: str
"""
if fileids is None:
fileids = self._fileids
elif isinstance(fileids, string_types):
fileids = [fileids]
return concat([self.open(f).read() for f in fileids])
def readme(self):
"""
Return the contents of the corpus README.txt file.
"""
return self.open("README.txt").read()
def reviews(self, fileids=None):
"""
Return all the reviews as a list of Review objects. If `fileids` is
specified, return all the reviews from each of the specified files.
:param fileids: a list or regexp specifying the ids of the files whose
reviews have to be returned.
:return: the given file(s) as a list of reviews.
"""
if fileids is None:
fileids = self._fileids
return concat([self.CorpusView(fileid, self._read_review_block, encoding=enc)
for (fileid, enc) in self.abspaths(fileids, True)])
def sents(self, fileids=None):
"""
Return all sentences in the corpus or in the specified files.
:param fileids: a list or regexp specifying the ids of the files whose
sentences have to be returned.
:return: the given file(s) as a list of sentences, each encoded as a
list of word strings.
:rtype: list(list(str))
"""
return concat([self.CorpusView(path, self._read_sent_block, encoding=enc)
for (path, enc, fileid)
in self.abspaths(fileids, True, True)])
def words(self, fileids=None):
"""
Return all words and punctuation symbols in the corpus or in the specified
files.
:param fileids: a list or regexp specifying the ids of the files whose
words have to be returned.
:return: the given file(s) as a list of words and punctuation symbols.
:rtype: list(str)
"""
return concat([self.CorpusView(path, self._read_word_block, encoding=enc)
for (path, enc, fileid)
in self.abspaths(fileids, True, True)])
def _read_features(self, stream):
features = []
for i in range(20):
line = stream.readline()
if not line:
return features
features.extend(re.findall(FEATURES, line))
return features
def _read_review_block(self, stream):
while True:
line = stream.readline()
if not line:
return [] # end of file.
title_match = re.match(TITLE, line)
if title_match:
review = Review(title=title_match.group(1).strip()) # We create a new review
break
# Scan until we find another line matching the regexp, or EOF.
while True:
oldpos = stream.tell()
line = stream.readline()
# End of file:
if not line:
return [review]
# Start of a new review: backup to just before it starts, and
# return the review we've already collected.
if re.match(TITLE, line):
stream.seek(oldpos)
return [review]
# Anything else is part of the review line.
feats = re.findall(FEATURES, line)
notes = re.findall(NOTES, line)
sent = re.findall(SENT, line)
if sent:
sent = self._word_tokenizer.tokenize(sent[0])
review_line = ReviewLine(sent=sent, features=feats, notes=notes)
review.add_line(review_line)
def _read_sent_block(self, stream):
sents = []
for review in self._read_review_block(stream):
sents.extend([sent for sent in review.sents()])
return sents
def _read_word_block(self, stream):
words = []
for i in range(20): # Read 20 lines at a time.
line = stream.readline()
sent = re.findall(SENT, line)
if sent:
words.extend(self._word_tokenizer.tokenize(sent[0]))
return words
| gpl-3.0 |
legacysurvey/legacypipe | py/legacyanalysis/test_djspsf.py | 2 | 2808 | from __future__ import print_function
import sys
if __name__ == '__main__':
import matplotlib
matplotlib.use('Agg')
import pylab as plt
import numpy as np
from astrometry.util.file import trymakedirs
from tractor import *
from tractor.psfex import *
from legacypipe.survey import *
def main():
plt.figure(figsize=(4,8))
plt.subplots_adjust(hspace=0.2, wspace=0.2)
#expnum = 349185
#ccdname = 'N7'
#djs = PixelizedPsfEx(fn='psf-c4d_140818_011313_ooi_z_v1.fits',
# psfexmodel=SchlegelPsfModel)
expnum = 396086
ccdname = 'S31'
djs = PixelizedPsfEx(fn='decam-00396086-S31.fits',
psfexmodel=SchlegelPsfModel)
print('Schlegel PSF', djs)
stampsz = 15
print('Plotting bases')
djs.psfex.plot_bases(autoscale=False)
plt.suptitle('DJS PSF basis functions')
plt.savefig('djs-bases.png')
djs.psfex.plot_bases(stampsize=stampsz, autoscale=False)
plt.suptitle('DJS PSF basis functions')
plt.savefig('djs-bases2.png')
H,W = 4096, 2048
nx,ny = 6,11
yy = np.linspace(0., H, ny+1)
xx = np.linspace(0., W, nx+1)
# center of cells
yy = yy[:-1] + (yy[1]-yy[0])/2.
xx = xx[:-1] + (xx[1]-xx[0])/2.
mx = djs.psfex.psfbases.max()
kwa = dict(vmin=-0.1*mx, vmax=mx)
plt.subplots_adjust(hspace=0.0, wspace=0.0)
print('Plotting grid')
djs.psfex.plot_grid(xx, yy, stampsize=stampsz, **kwa)
plt.suptitle('DJS PSF grid')
plt.savefig('djs-grid.png')
for i in range(djs.psfex.nbases):
print('Plotting grid for parameter', i)
djs.psfex.plot_grid(xx, yy, term=i, stampsize=stampsz, **kwa)
plt.savefig('djs-term%i.png' % i)
survey = LegacySurveyData()
ccds = survey.find_ccds(expnum=expnum,ccdname=ccdname)
ccd = ccds[0]
im = survey.get_image_object(ccd)
band = ccd.filter
im.run_calibs()
tim = im.get_tractor_image(pixPsf=True, nanomaggies=False, subsky=False)
psfex = tim.psf.psfex
plt.subplots_adjust(hspace=0.2, wspace=0.2)
psfex.plot_bases(autoscale=False)
plt.suptitle('PsfEx basis functions')
plt.savefig('psfex-bases.png')
psfex.plot_bases(stampsize=stampsz, autoscale=False)
plt.suptitle('PsfEx basis functions')
plt.savefig('psfex-bases2.png')
mx = psfex.psfbases.max()
kwa = dict(vmin=-0.1*mx, vmax=mx)
plt.subplots_adjust(hspace=0.0, wspace=0.0)
print('Plotting grid')
psfex.plot_grid(xx, yy, stampsize=stampsz, **kwa)
plt.suptitle('PsfEx grid')
plt.savefig('psfex-grid.png')
for i in range(psfex.nbases):
print('Plotting grid for parameter', i)
psfex.plot_grid(xx, yy, term=i, stampsize=stampsz, **kwa)
plt.savefig('psfex-term%i.png' % i)
if __name__ == '__main__':
main()
| bsd-3-clause |
navotsil/Open-Knesset | okhelptexts/migrations/0001_initial.py | 14 | 1844 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Helptext'
db.create_table('okhelptexts_helptext', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('fulltext', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('okhelptexts', ['Helptext'])
# Adding model 'Keyword'
db.create_table('okhelptexts_keyword', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('helptext', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['okhelptexts.Helptext'])),
('kw_text', self.gf('django.db.models.fields.CharField')(max_length=200)),
))
db.send_create_signal('okhelptexts', ['Keyword'])
def backwards(self, orm):
# Deleting model 'Helptext'
db.delete_table('okhelptexts_helptext')
# Deleting model 'Keyword'
db.delete_table('okhelptexts_keyword')
models = {
'okhelptexts.helptext': {
'Meta': {'object_name': 'Helptext'},
'fulltext': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'okhelptexts.keyword': {
'Meta': {'object_name': 'Keyword'},
'helptext': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['okhelptexts.Helptext']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kw_text': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['okhelptexts']
| bsd-3-clause |
ATIX-AG/ansible | test/units/module_utils/basic/test_imports.py | 42 | 5657 | # -*- coding: utf-8 -*-
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2016 Toshio Kuratomi <tkuratomi@ansible.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import sys
from units.mock.procenv import ModuleTestCase
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.module_utils.six.moves import builtins
realimport = builtins.__import__
class TestImports(ModuleTestCase):
def clear_modules(self, mods):
for mod in mods:
if mod in sys.modules:
del sys.modules[mod]
@patch.object(builtins, '__import__')
def test_module_utils_basic_import_syslog(self, mock_import):
def _mock_import(name, *args, **kwargs):
if name == 'syslog':
raise ImportError
return realimport(name, *args, **kwargs)
self.clear_modules(['syslog', 'ansible.module_utils.basic'])
mod = builtins.__import__('ansible.module_utils.basic')
self.assertTrue(mod.module_utils.basic.HAS_SYSLOG)
self.clear_modules(['syslog', 'ansible.module_utils.basic'])
mock_import.side_effect = _mock_import
mod = builtins.__import__('ansible.module_utils.basic')
self.assertFalse(mod.module_utils.basic.HAS_SYSLOG)
@patch.object(builtins, '__import__')
def test_module_utils_basic_import_selinux(self, mock_import):
def _mock_import(name, *args, **kwargs):
if name == 'selinux':
raise ImportError
return realimport(name, *args, **kwargs)
try:
self.clear_modules(['selinux', 'ansible.module_utils.basic'])
mod = builtins.__import__('ansible.module_utils.basic')
self.assertTrue(mod.module_utils.basic.HAVE_SELINUX)
except ImportError:
# no selinux on test system, so skip
pass
self.clear_modules(['selinux', 'ansible.module_utils.basic'])
mock_import.side_effect = _mock_import
mod = builtins.__import__('ansible.module_utils.basic')
self.assertFalse(mod.module_utils.basic.HAVE_SELINUX)
@patch.object(builtins, '__import__')
def test_module_utils_basic_import_json(self, mock_import):
def _mock_import(name, *args, **kwargs):
if name == 'json':
raise ImportError
elif name == 'simplejson':
sj = MagicMock()
sj.__version__ = '3.10.0'
return sj
return realimport(name, *args, **kwargs)
self.clear_modules(['json', 'ansible.module_utils.basic'])
mod = builtins.__import__('ansible.module_utils.basic')
self.clear_modules(['json', 'ansible.module_utils.basic'])
mock_import.side_effect = _mock_import
mod = builtins.__import__('ansible.module_utils.basic')
# FIXME: doesn't work yet
# @patch.object(builtins, 'bytes')
# def test_module_utils_basic_bytes(self, mock_bytes):
# mock_bytes.side_effect = NameError()
# from ansible.module_utils import basic
@patch.object(builtins, '__import__')
@unittest.skipIf(sys.version_info[0] >= 3, "literal_eval is available in every version of Python3")
def test_module_utils_basic_import_literal_eval(self, mock_import):
def _mock_import(name, *args, **kwargs):
try:
fromlist = kwargs.get('fromlist', args[2])
except IndexError:
fromlist = []
if name == 'ast' and 'literal_eval' in fromlist:
raise ImportError
return realimport(name, *args, **kwargs)
mock_import.side_effect = _mock_import
self.clear_modules(['ast', 'ansible.module_utils.basic'])
mod = builtins.__import__('ansible.module_utils.basic')
self.assertEqual(mod.module_utils.basic.literal_eval("'1'"), "1")
self.assertEqual(mod.module_utils.basic.literal_eval("1"), 1)
self.assertEqual(mod.module_utils.basic.literal_eval("-1"), -1)
self.assertEqual(mod.module_utils.basic.literal_eval("(1,2,3)"), (1, 2, 3))
self.assertEqual(mod.module_utils.basic.literal_eval("[1]"), [1])
self.assertEqual(mod.module_utils.basic.literal_eval("True"), True)
self.assertEqual(mod.module_utils.basic.literal_eval("False"), False)
self.assertEqual(mod.module_utils.basic.literal_eval("None"), None)
# self.assertEqual(mod.module_utils.basic.literal_eval('{"a": 1}'), dict(a=1))
self.assertRaises(ValueError, mod.module_utils.basic.literal_eval, "asdfasdfasdf")
@patch.object(builtins, '__import__')
def test_module_utils_basic_import_systemd_journal(self, mock_import):
def _mock_import(name, *args, **kwargs):
try:
fromlist = kwargs.get('fromlist', args[2])
except IndexError:
fromlist = []
if name == 'systemd' and 'journal' in fromlist:
raise ImportError
return realimport(name, *args, **kwargs)
self.clear_modules(['systemd', 'ansible.module_utils.basic'])
mod = builtins.__import__('ansible.module_utils.basic')
self.assertTrue(mod.module_utils.basic.has_journal)
self.clear_modules(['systemd', 'ansible.module_utils.basic'])
mock_import.side_effect = _mock_import
mod = builtins.__import__('ansible.module_utils.basic')
self.assertFalse(mod.module_utils.basic.has_journal)
| gpl-3.0 |
lncosie/antlr4 | runtime/Python2/src/antlr4/atn/ATNDeserializationOptions.py | 14 | 2185 | #[The "BSD license"]
# Copyright (c) 2013 Terence Parr
# Copyright (c) 2013 Sam Harwell
# Copyright (c) 2014 Eric Vergnaud
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class ATNDeserializationOptions(object):
defaultOptions = None
def __init__(self, copyFrom = None):
self.readOnly = False
self.verifyATN = True if copyFrom is None else copyFrom.verifyATN
self.generateRuleBypassTransitions = False if copyFrom is None else copyFrom.generateRuleBypassTransitions
def __setattr__(self, key, value):
if key!="readOnly" and self.readOnly:
raise Exception("The object is read only.")
super(type(self), self).__setattr__(key,value)
ATNDeserializationOptions.defaultOptions = ATNDeserializationOptions()
ATNDeserializationOptions.defaultOptions.readOnly = True
| bsd-3-clause |
andfoy/margffoy-tuay-server | env/lib/python2.7/site-packages/sockjs/tornado/periodic.py | 12 | 1944 | # -*- coding: utf-8 -*-
"""
sockjs.tornado.periodic
~~~~~~~~~~~~~~~~~~~~~~~
This module implements customized PeriodicCallback from tornado with
support of the sliding window.
"""
import time
import logging
LOG = logging.getLogger("tornado.general")
class Callback(object):
"""Custom implementation of the Tornado.Callback with support
of callback timeout delays.
"""
def __init__(self, callback, callback_time, io_loop):
"""Constructor.
`callback`
Callback function
`callback_time`
Callback timeout value (in milliseconds)
`io_loop`
io_loop instance
"""
self.callback = callback
self.callback_time = callback_time
self.io_loop = io_loop
self._running = False
self.next_run = None
def calculate_next_run(self):
"""Caltulate next scheduled run"""
return time.time() + self.callback_time / 1000.0
def start(self, timeout=None):
"""Start callbacks"""
self._running = True
if timeout is None:
timeout = self.calculate_next_run()
self.io_loop.add_timeout(timeout, self._run)
def stop(self):
"""Stop callbacks"""
self._running = False
def delay(self):
"""Delay callback"""
self.next_run = self.calculate_next_run()
def _run(self):
if not self._running:
return
# Support for shifting callback window
if self.next_run is not None and time.time() < self.next_run:
self.start(self.next_run)
self.next_run = None
return
next_call = None
try:
next_call = self.callback()
except (KeyboardInterrupt, SystemExit):
raise
except:
LOG.error("Error in periodic callback", exc_info=True)
if self._running:
self.start(next_call)
| gpl-2.0 |
FreekingDean/home-assistant | homeassistant/components/apcupsd.py | 31 | 2422 | """
Support for status output of APCUPSd via its Network Information Server (NIS).
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/apcupsd/
"""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.const import (CONF_HOST, CONF_PORT)
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
REQUIREMENTS = ['apcaccess==0.0.4']
_LOGGER = logging.getLogger(__name__)
CONF_TYPE = 'type'
DATA = None
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 3551
DOMAIN = 'apcupsd'
KEY_STATUS = 'STATUS'
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60)
VALUE_ONLINE = 'ONLINE'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}),
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Use config values to set up a function enabling status retrieval."""
global DATA
conf = config[DOMAIN]
host = conf.get(CONF_HOST)
port = conf.get(CONF_PORT)
DATA = APCUPSdData(host, port)
# It doesn't really matter why we're not able to get the status, just that
# we can't.
# pylint: disable=broad-except
try:
DATA.update(no_throttle=True)
except Exception:
_LOGGER.exception("Failure while testing APCUPSd status retrieval.")
return False
return True
class APCUPSdData(object):
"""Stores the data retrieved from APCUPSd.
For each entity to use, acts as the single point responsible for fetching
updates from the server.
"""
def __init__(self, host, port):
"""Initialize the data oject."""
from apcaccess import status
self._host = host
self._port = port
self._status = None
self._get = status.get
self._parse = status.parse
@property
def status(self):
"""Get latest update if throttle allows. Return status."""
self.update()
return self._status
def _get_status(self):
"""Get the status from APCUPSd and parse it into a dict."""
return self._parse(self._get(host=self._host, port=self._port))
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self, **kwargs):
"""Fetch the latest status from APCUPSd."""
self._status = self._get_status()
| mit |
aldian/tensorflow | tensorflow/python/kernel_tests/betainc_op_test.py | 87 | 7919 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for 3d convolutional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class BetaincTest(test.TestCase):
def _testBetaInc(self, a_s, b_s, x_s, dtype):
try:
from scipy import special # pylint: disable=g-import-not-at-top
np_dt = dtype.as_numpy_dtype
# Test random values
a_s = a_s.astype(np_dt) # in (0, infty)
b_s = b_s.astype(np_dt) # in (0, infty)
x_s = x_s.astype(np_dt) # in (0, 1)
tf_a_s = constant_op.constant(a_s, dtype=dtype)
tf_b_s = constant_op.constant(b_s, dtype=dtype)
tf_x_s = constant_op.constant(x_s, dtype=dtype)
tf_out_t = math_ops.betainc(tf_a_s, tf_b_s, tf_x_s)
with self.test_session():
tf_out = tf_out_t.eval()
scipy_out = special.betainc(a_s, b_s, x_s).astype(np_dt)
# the scipy version of betainc uses a double-only implementation.
# TODO(ebrevdo): identify reasons for (sometime) precision loss
# with doubles
tol = 1e-4 if dtype == dtypes.float32 else 5e-5
self.assertAllCloseAccordingToType(scipy_out, tf_out, rtol=tol, atol=0)
# Test out-of-range values (most should return nan output)
combinations = list(itertools.product([-1, 0, 0.5, 1.0, 1.5], repeat=3))
a_comb, b_comb, x_comb = np.asarray(list(zip(*combinations)), dtype=np_dt)
with self.test_session():
tf_comb = math_ops.betainc(a_comb, b_comb, x_comb).eval()
scipy_comb = special.betainc(a_comb, b_comb, x_comb).astype(np_dt)
self.assertAllCloseAccordingToType(scipy_comb, tf_comb)
# Test broadcasting between scalars and other shapes
with self.test_session():
self.assertAllCloseAccordingToType(
special.betainc(0.1, b_s, x_s).astype(np_dt),
math_ops.betainc(0.1, b_s, x_s).eval(),
rtol=tol,
atol=0)
self.assertAllCloseAccordingToType(
special.betainc(a_s, 0.1, x_s).astype(np_dt),
math_ops.betainc(a_s, 0.1, x_s).eval(),
rtol=tol,
atol=0)
self.assertAllCloseAccordingToType(
special.betainc(a_s, b_s, 0.1).astype(np_dt),
math_ops.betainc(a_s, b_s, 0.1).eval(),
rtol=tol,
atol=0)
self.assertAllCloseAccordingToType(
special.betainc(0.1, b_s, 0.1).astype(np_dt),
math_ops.betainc(0.1, b_s, 0.1).eval(),
rtol=tol,
atol=0)
self.assertAllCloseAccordingToType(
special.betainc(0.1, 0.1, 0.1).astype(np_dt),
math_ops.betainc(0.1, 0.1, 0.1).eval(),
rtol=tol,
atol=0)
with self.assertRaisesRegexp(ValueError, "must be equal"):
math_ops.betainc(0.5, [0.5], [[0.5]])
with self.test_session():
with self.assertRaisesOpError("Shapes of .* are inconsistent"):
a_p = array_ops.placeholder(dtype)
b_p = array_ops.placeholder(dtype)
x_p = array_ops.placeholder(dtype)
math_ops.betainc(a_p, b_p, x_p).eval(
feed_dict={a_p: 0.5,
b_p: [0.5],
x_p: [[0.5]]})
except ImportError as e:
tf_logging.warn("Cannot test special functions: %s" % str(e))
def testBetaIncFloat(self):
a_s = np.abs(np.random.randn(10, 10) * 30) # in (0, infty)
b_s = np.abs(np.random.randn(10, 10) * 30) # in (0, infty)
x_s = np.random.rand(10, 10) # in (0, 1)
self._testBetaInc(a_s, b_s, x_s, dtypes.float32)
def testBetaIncDouble(self):
a_s = np.abs(np.random.randn(10, 10) * 30) # in (0, infty)
b_s = np.abs(np.random.randn(10, 10) * 30) # in (0, infty)
x_s = np.random.rand(10, 10) # in (0, 1)
self._testBetaInc(a_s, b_s, x_s, dtypes.float64)
def testBetaIncDoubleVeryLargeValues(self):
a_s = np.abs(np.random.randn(10, 10) * 1e15) # in (0, infty)
b_s = np.abs(np.random.randn(10, 10) * 1e15) # in (0, infty)
x_s = np.random.rand(10, 10) # in (0, 1)
self._testBetaInc(a_s, b_s, x_s, dtypes.float64)
def testBetaIncDoubleVerySmallValues(self):
a_s = np.abs(np.random.randn(10, 10) * 1e-16) # in (0, infty)
b_s = np.abs(np.random.randn(10, 10) * 1e-16) # in (0, infty)
x_s = np.random.rand(10, 10) # in (0, 1)
self._testBetaInc(a_s, b_s, x_s, dtypes.float64)
def testBetaIncFloatVerySmallValues(self):
a_s = np.abs(np.random.randn(10, 10) * 1e-8) # in (0, infty)
b_s = np.abs(np.random.randn(10, 10) * 1e-8) # in (0, infty)
x_s = np.random.rand(10, 10) # in (0, 1)
self._testBetaInc(a_s, b_s, x_s, dtypes.float32)
def testBetaIncFpropAndBpropAreNeverNAN(self):
with self.test_session() as sess:
space = np.logspace(-8, 5).tolist()
space_x = np.linspace(1e-16, 1 - 1e-16).tolist()
ga_s, gb_s, gx_s = zip(*list(itertools.product(space, space, space_x)))
# Test grads are never nan
ga_s_t = constant_op.constant(ga_s, dtype=dtypes.float32)
gb_s_t = constant_op.constant(gb_s, dtype=dtypes.float32)
gx_s_t = constant_op.constant(gx_s, dtype=dtypes.float32)
tf_gout_t = math_ops.betainc(ga_s_t, gb_s_t, gx_s_t)
tf_gout, grads_x = sess.run(
[tf_gout_t,
gradients_impl.gradients(tf_gout_t, [ga_s_t, gb_s_t, gx_s_t])[2]])
# Equivalent to `assertAllFalse` (if it existed).
self.assertAllEqual(np.zeros_like(grads_x).astype(np.bool),
np.isnan(tf_gout))
self.assertAllEqual(np.zeros_like(grads_x).astype(np.bool),
np.isnan(grads_x))
def testBetaIncGrads(self):
err_tolerance = 1e-3
with self.test_session():
# Test gradient
ga_s = np.abs(np.random.randn(2, 2) * 30) # in (0, infty)
gb_s = np.abs(np.random.randn(2, 2) * 30) # in (0, infty)
gx_s = np.random.rand(2, 2) # in (0, 1)
tf_ga_s = constant_op.constant(ga_s, dtype=dtypes.float64)
tf_gb_s = constant_op.constant(gb_s, dtype=dtypes.float64)
tf_gx_s = constant_op.constant(gx_s, dtype=dtypes.float64)
tf_gout_t = math_ops.betainc(tf_ga_s, tf_gb_s, tf_gx_s)
err = gradient_checker.compute_gradient_error(
[tf_gx_s], [gx_s.shape], tf_gout_t, gx_s.shape)
print("betainc gradient err = %g " % err)
self.assertLess(err, err_tolerance)
# Test broadcast gradient
gx_s = np.random.rand() # in (0, 1)
tf_gx_s = constant_op.constant(gx_s, dtype=dtypes.float64)
tf_gout_t = math_ops.betainc(tf_ga_s, tf_gb_s, tf_gx_s)
err = gradient_checker.compute_gradient_error(
[tf_gx_s], [()], tf_gout_t, ga_s.shape)
print("betainc gradient err = %g " % err)
self.assertLess(err, err_tolerance)
if __name__ == "__main__":
test.main()
| apache-2.0 |
pengli09/Paddle | python/paddle/v2/dataset/tests/cifar_test.py | 16 | 1874 | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.v2.dataset.cifar
import unittest
class TestCIFAR(unittest.TestCase):
def check_reader(self, reader):
sum = 0
label = 0
for l in reader():
self.assertEqual(l[0].size, 3072)
if l[1] > label:
label = l[1]
sum += 1
return sum, label
def test_test10(self):
instances, max_label_value = self.check_reader(
paddle.v2.dataset.cifar.test10())
self.assertEqual(instances, 10000)
self.assertEqual(max_label_value, 9)
def test_train10(self):
instances, max_label_value = self.check_reader(
paddle.v2.dataset.cifar.train10())
self.assertEqual(instances, 50000)
self.assertEqual(max_label_value, 9)
def test_test100(self):
instances, max_label_value = self.check_reader(
paddle.v2.dataset.cifar.test100())
self.assertEqual(instances, 10000)
self.assertEqual(max_label_value, 99)
def test_train100(self):
instances, max_label_value = self.check_reader(
paddle.v2.dataset.cifar.train100())
self.assertEqual(instances, 50000)
self.assertEqual(max_label_value, 99)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
mKeRix/home-assistant | tests/util/test_volume.py | 23 | 1523 | """Test Home Assistant volume utility functions."""
import pytest
from homeassistant.const import (
VOLUME_FLUID_OUNCE,
VOLUME_GALLONS,
VOLUME_LITERS,
VOLUME_MILLILITERS,
)
import homeassistant.util.volume as volume_util
INVALID_SYMBOL = "bob"
VALID_SYMBOL = VOLUME_LITERS
def test_convert_same_unit():
"""Test conversion from any unit to same unit."""
assert volume_util.convert(2, VOLUME_LITERS, VOLUME_LITERS) == 2
assert volume_util.convert(3, VOLUME_MILLILITERS, VOLUME_MILLILITERS) == 3
assert volume_util.convert(4, VOLUME_GALLONS, VOLUME_GALLONS) == 4
assert volume_util.convert(5, VOLUME_FLUID_OUNCE, VOLUME_FLUID_OUNCE) == 5
def test_convert_invalid_unit():
"""Test exception is thrown for invalid units."""
with pytest.raises(ValueError):
volume_util.convert(5, INVALID_SYMBOL, VALID_SYMBOL)
with pytest.raises(ValueError):
volume_util.convert(5, VALID_SYMBOL, INVALID_SYMBOL)
def test_convert_nonnumeric_value():
"""Test exception is thrown for nonnumeric type."""
with pytest.raises(TypeError):
volume_util.convert("a", VOLUME_GALLONS, VOLUME_LITERS)
def test_convert_from_liters():
"""Test conversion from liters to other units."""
liters = 5
assert volume_util.convert(liters, VOLUME_LITERS, VOLUME_GALLONS) == 1.321
def test_convert_from_gallons():
"""Test conversion from gallons to other units."""
gallons = 5
assert volume_util.convert(gallons, VOLUME_GALLONS, VOLUME_LITERS) == 18.925
| mit |
eleonrk/SickRage | lib/future/backports/http/client.py | 64 | 47192 | """HTTP/1.1 client library
A backport of the Python 3.3 http/client.py module for python-future.
<intro stuff goes here>
<other stuff, too>
HTTPConnection goes through a number of "states", which define when a client
may legally make another request or fetch the response for a particular
request. This diagram details these state transitions:
(null)
|
| HTTPConnection()
v
Idle
|
| putrequest()
v
Request-started
|
| ( putheader() )* endheaders()
v
Request-sent
|
| response = getresponse()
v
Unread-response [Response-headers-read]
|\____________________
| |
| response.read() | putrequest()
v v
Idle Req-started-unread-response
______/|
/ |
response.read() | | ( putheader() )* endheaders()
v v
Request-started Req-sent-unread-response
|
| response.read()
v
Request-sent
This diagram presents the following rules:
-- a second request may not be started until {response-headers-read}
-- a response [object] cannot be retrieved until {request-sent}
-- there is no differentiation between an unread response body and a
partially read response body
Note: this enforcement is applied by the HTTPConnection class. The
HTTPResponse class does not enforce this state machine, which
implies sophisticated clients may accelerate the request/response
pipeline. Caution should be taken, though: accelerating the states
beyond the above pattern may imply knowledge of the server's
connection-close behavior for certain requests. For example, it
is impossible to tell whether the server will close the connection
UNTIL the response headers have been read; this means that further
requests cannot be placed into the pipeline until it is known that
the server will NOT be closing the connection.
Logical State __state __response
------------- ------- ----------
Idle _CS_IDLE None
Request-started _CS_REQ_STARTED None
Request-sent _CS_REQ_SENT None
Unread-response _CS_IDLE <response_class>
Req-started-unread-response _CS_REQ_STARTED <response_class>
Req-sent-unread-response _CS_REQ_SENT <response_class>
"""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from future.builtins import bytes, int, str, super
from future.utils import PY2
from future.backports.email import parser as email_parser
from future.backports.email import message as email_message
from future.backports.misc import create_connection as socket_create_connection
import io
import os
import socket
import collections
from future.backports.urllib.parse import urlsplit
import warnings
from array import array
__all__ = ["HTTPResponse", "HTTPConnection",
"HTTPException", "NotConnected", "UnknownProtocol",
"UnknownTransferEncoding", "UnimplementedFileMode",
"IncompleteRead", "InvalidURL", "ImproperConnectionState",
"CannotSendRequest", "CannotSendHeader", "ResponseNotReady",
"BadStatusLine", "error", "responses"]
HTTP_PORT = 80
HTTPS_PORT = 443
_UNKNOWN = 'UNKNOWN'
# connection states
_CS_IDLE = 'Idle'
_CS_REQ_STARTED = 'Request-started'
_CS_REQ_SENT = 'Request-sent'
# status codes
# informational
CONTINUE = 100
SWITCHING_PROTOCOLS = 101
PROCESSING = 102
# successful
OK = 200
CREATED = 201
ACCEPTED = 202
NON_AUTHORITATIVE_INFORMATION = 203
NO_CONTENT = 204
RESET_CONTENT = 205
PARTIAL_CONTENT = 206
MULTI_STATUS = 207
IM_USED = 226
# redirection
MULTIPLE_CHOICES = 300
MOVED_PERMANENTLY = 301
FOUND = 302
SEE_OTHER = 303
NOT_MODIFIED = 304
USE_PROXY = 305
TEMPORARY_REDIRECT = 307
# client error
BAD_REQUEST = 400
UNAUTHORIZED = 401
PAYMENT_REQUIRED = 402
FORBIDDEN = 403
NOT_FOUND = 404
METHOD_NOT_ALLOWED = 405
NOT_ACCEPTABLE = 406
PROXY_AUTHENTICATION_REQUIRED = 407
REQUEST_TIMEOUT = 408
CONFLICT = 409
GONE = 410
LENGTH_REQUIRED = 411
PRECONDITION_FAILED = 412
REQUEST_ENTITY_TOO_LARGE = 413
REQUEST_URI_TOO_LONG = 414
UNSUPPORTED_MEDIA_TYPE = 415
REQUESTED_RANGE_NOT_SATISFIABLE = 416
EXPECTATION_FAILED = 417
UNPROCESSABLE_ENTITY = 422
LOCKED = 423
FAILED_DEPENDENCY = 424
UPGRADE_REQUIRED = 426
PRECONDITION_REQUIRED = 428
TOO_MANY_REQUESTS = 429
REQUEST_HEADER_FIELDS_TOO_LARGE = 431
# server error
INTERNAL_SERVER_ERROR = 500
NOT_IMPLEMENTED = 501
BAD_GATEWAY = 502
SERVICE_UNAVAILABLE = 503
GATEWAY_TIMEOUT = 504
HTTP_VERSION_NOT_SUPPORTED = 505
INSUFFICIENT_STORAGE = 507
NOT_EXTENDED = 510
NETWORK_AUTHENTICATION_REQUIRED = 511
# Mapping status codes to official W3C names
responses = {
100: 'Continue',
101: 'Switching Protocols',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
306: '(Unused)',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request-URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
428: 'Precondition Required',
429: 'Too Many Requests',
431: 'Request Header Fields Too Large',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported',
511: 'Network Authentication Required',
}
# maximal amount of data to read at one time in _safe_read
MAXAMOUNT = 1048576
# maximal line length when calling readline().
_MAXLINE = 65536
_MAXHEADERS = 100
class HTTPMessage(email_message.Message):
# XXX The only usage of this method is in
# http.server.CGIHTTPRequestHandler. Maybe move the code there so
# that it doesn't need to be part of the public API. The API has
# never been defined so this could cause backwards compatibility
# issues.
def getallmatchingheaders(self, name):
"""Find all header lines matching a given header name.
Look through the list of headers and find all lines matching a given
header name (and their continuation lines). A list of the lines is
returned, without interpretation. If the header does not occur, an
empty list is returned. If the header occurs multiple times, all
occurrences are returned. Case is not important in the header name.
"""
name = name.lower() + ':'
n = len(name)
lst = []
hit = 0
for line in self.keys():
if line[:n].lower() == name:
hit = 1
elif not line[:1].isspace():
hit = 0
if hit:
lst.append(line)
return lst
def parse_headers(fp, _class=HTTPMessage):
"""Parses only RFC2822 headers from a file pointer.
email Parser wants to see strings rather than bytes.
But a TextIOWrapper around self.rfile would buffer too many bytes
from the stream, bytes which we later need to read as bytes.
So we read the correct bytes here, as bytes, for email Parser
to parse.
"""
headers = []
while True:
line = fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("header line")
headers.append(line)
if len(headers) > _MAXHEADERS:
raise HTTPException("got more than %d headers" % _MAXHEADERS)
if line in (b'\r\n', b'\n', b''):
break
hstring = bytes(b'').join(headers).decode('iso-8859-1')
return email_parser.Parser(_class=_class).parsestr(hstring)
_strict_sentinel = object()
class HTTPResponse(io.RawIOBase):
# See RFC 2616 sec 19.6 and RFC 1945 sec 6 for details.
# The bytes from the socket object are iso-8859-1 strings.
# See RFC 2616 sec 2.2 which notes an exception for MIME-encoded
# text following RFC 2047. The basic status line parsing only
# accepts iso-8859-1.
def __init__(self, sock, debuglevel=0, strict=_strict_sentinel, method=None, url=None):
# If the response includes a content-length header, we need to
# make sure that the client doesn't read more than the
# specified number of bytes. If it does, it will block until
# the server times out and closes the connection. This will
# happen if a self.fp.read() is done (without a size) whether
# self.fp is buffered or not. So, no self.fp.read() by
# clients unless they know what they are doing.
self.fp = sock.makefile("rb")
self.debuglevel = debuglevel
if strict is not _strict_sentinel:
warnings.warn("the 'strict' argument isn't supported anymore; "
"http.client now always assumes HTTP/1.x compliant servers.",
DeprecationWarning, 2)
self._method = method
# The HTTPResponse object is returned via urllib. The clients
# of http and urllib expect different attributes for the
# headers. headers is used here and supports urllib. msg is
# provided as a backwards compatibility layer for http
# clients.
self.headers = self.msg = None
# from the Status-Line of the response
self.version = _UNKNOWN # HTTP-Version
self.status = _UNKNOWN # Status-Code
self.reason = _UNKNOWN # Reason-Phrase
self.chunked = _UNKNOWN # is "chunked" being used?
self.chunk_left = _UNKNOWN # bytes left to read in current chunk
self.length = _UNKNOWN # number of bytes left in response
self.will_close = _UNKNOWN # conn will close at end of response
def _read_status(self):
line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1")
if len(line) > _MAXLINE:
raise LineTooLong("status line")
if self.debuglevel > 0:
print("reply:", repr(line))
if not line:
# Presumably, the server closed the connection before
# sending a valid response.
raise BadStatusLine(line)
try:
version, status, reason = line.split(None, 2)
except ValueError:
try:
version, status = line.split(None, 1)
reason = ""
except ValueError:
# empty version will cause next test to fail.
version = ""
if not version.startswith("HTTP/"):
self._close_conn()
raise BadStatusLine(line)
# The status code is a three-digit number
try:
status = int(status)
if status < 100 or status > 999:
raise BadStatusLine(line)
except ValueError:
raise BadStatusLine(line)
return version, status, reason
def begin(self):
if self.headers is not None:
# we've already started reading the response
return
# read until we get a non-100 response
while True:
version, status, reason = self._read_status()
if status != CONTINUE:
break
# skip the header from the 100 response
while True:
skip = self.fp.readline(_MAXLINE + 1)
if len(skip) > _MAXLINE:
raise LineTooLong("header line")
skip = skip.strip()
if not skip:
break
if self.debuglevel > 0:
print("header:", skip)
self.code = self.status = status
self.reason = reason.strip()
if version in ("HTTP/1.0", "HTTP/0.9"):
# Some servers might still return "0.9", treat it as 1.0 anyway
self.version = 10
elif version.startswith("HTTP/1."):
self.version = 11 # use HTTP/1.1 code for HTTP/1.x where x>=1
else:
raise UnknownProtocol(version)
self.headers = self.msg = parse_headers(self.fp)
if self.debuglevel > 0:
for hdr in self.headers:
print("header:", hdr, end=" ")
# are we using the chunked-style of transfer encoding?
tr_enc = self.headers.get("transfer-encoding")
if tr_enc and tr_enc.lower() == "chunked":
self.chunked = True
self.chunk_left = None
else:
self.chunked = False
# will the connection close at the end of the response?
self.will_close = self._check_close()
# do we have a Content-Length?
# NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked"
self.length = None
length = self.headers.get("content-length")
# are we using the chunked-style of transfer encoding?
tr_enc = self.headers.get("transfer-encoding")
if length and not self.chunked:
try:
self.length = int(length)
except ValueError:
self.length = None
else:
if self.length < 0: # ignore nonsensical negative lengths
self.length = None
else:
self.length = None
# does the body have a fixed length? (of zero)
if (status == NO_CONTENT or status == NOT_MODIFIED or
100 <= status < 200 or # 1xx codes
self._method == "HEAD"):
self.length = 0
# if the connection remains open, and we aren't using chunked, and
# a content-length was not provided, then assume that the connection
# WILL close.
if (not self.will_close and
not self.chunked and
self.length is None):
self.will_close = True
def _check_close(self):
conn = self.headers.get("connection")
if self.version == 11:
# An HTTP/1.1 proxy is assumed to stay open unless
# explicitly closed.
conn = self.headers.get("connection")
if conn and "close" in conn.lower():
return True
return False
# Some HTTP/1.0 implementations have support for persistent
# connections, using rules different than HTTP/1.1.
# For older HTTP, Keep-Alive indicates persistent connection.
if self.headers.get("keep-alive"):
return False
# At least Akamai returns a "Connection: Keep-Alive" header,
# which was supposed to be sent by the client.
if conn and "keep-alive" in conn.lower():
return False
# Proxy-Connection is a netscape hack.
pconn = self.headers.get("proxy-connection")
if pconn and "keep-alive" in pconn.lower():
return False
# otherwise, assume it will close
return True
def _close_conn(self):
fp = self.fp
self.fp = None
fp.close()
def close(self):
super().close() # set "closed" flag
if self.fp:
self._close_conn()
# These implementations are for the benefit of io.BufferedReader.
# XXX This class should probably be revised to act more like
# the "raw stream" that BufferedReader expects.
def flush(self):
super().flush()
if self.fp:
self.fp.flush()
def readable(self):
return True
# End of "raw stream" methods
def isclosed(self):
"""True if the connection is closed."""
# NOTE: it is possible that we will not ever call self.close(). This
# case occurs when will_close is TRUE, length is None, and we
# read up to the last byte, but NOT past it.
#
# IMPLIES: if will_close is FALSE, then self.close() will ALWAYS be
# called, meaning self.isclosed() is meaningful.
return self.fp is None
def read(self, amt=None):
if self.fp is None:
return bytes(b"")
if self._method == "HEAD":
self._close_conn()
return bytes(b"")
if amt is not None:
# Amount is given, so call base class version
# (which is implemented in terms of self.readinto)
return bytes(super(HTTPResponse, self).read(amt))
else:
# Amount is not given (unbounded read) so we must check self.length
# and self.chunked
if self.chunked:
return self._readall_chunked()
if self.length is None:
s = self.fp.read()
else:
try:
s = self._safe_read(self.length)
except IncompleteRead:
self._close_conn()
raise
self.length = 0
self._close_conn() # we read everything
return bytes(s)
def readinto(self, b):
if self.fp is None:
return 0
if self._method == "HEAD":
self._close_conn()
return 0
if self.chunked:
return self._readinto_chunked(b)
if self.length is not None:
if len(b) > self.length:
# clip the read to the "end of response"
b = memoryview(b)[0:self.length]
# we do not use _safe_read() here because this may be a .will_close
# connection, and the user is reading more bytes than will be provided
# (for example, reading in 1k chunks)
if PY2:
data = self.fp.read(len(b))
n = len(data)
b[:n] = data
else:
n = self.fp.readinto(b)
if not n and b:
# Ideally, we would raise IncompleteRead if the content-length
# wasn't satisfied, but it might break compatibility.
self._close_conn()
elif self.length is not None:
self.length -= n
if not self.length:
self._close_conn()
return n
def _read_next_chunk_size(self):
# Read the next chunk size from the file
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("chunk size")
i = line.find(b";")
if i >= 0:
line = line[:i] # strip chunk-extensions
try:
return int(line, 16)
except ValueError:
# close the connection as protocol synchronisation is
# probably lost
self._close_conn()
raise
def _read_and_discard_trailer(self):
# read and discard trailer up to the CRLF terminator
### note: we shouldn't have any trailers!
while True:
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("trailer line")
if not line:
# a vanishingly small number of sites EOF without
# sending the trailer
break
if line in (b'\r\n', b'\n', b''):
break
def _readall_chunked(self):
assert self.chunked != _UNKNOWN
chunk_left = self.chunk_left
value = []
while True:
if chunk_left is None:
try:
chunk_left = self._read_next_chunk_size()
if chunk_left == 0:
break
except ValueError:
raise IncompleteRead(bytes(b'').join(value))
value.append(self._safe_read(chunk_left))
# we read the whole chunk, get another
self._safe_read(2) # toss the CRLF at the end of the chunk
chunk_left = None
self._read_and_discard_trailer()
# we read everything; close the "file"
self._close_conn()
return bytes(b'').join(value)
def _readinto_chunked(self, b):
assert self.chunked != _UNKNOWN
chunk_left = self.chunk_left
total_bytes = 0
mvb = memoryview(b)
while True:
if chunk_left is None:
try:
chunk_left = self._read_next_chunk_size()
if chunk_left == 0:
break
except ValueError:
raise IncompleteRead(bytes(b[0:total_bytes]))
if len(mvb) < chunk_left:
n = self._safe_readinto(mvb)
self.chunk_left = chunk_left - n
return total_bytes + n
elif len(mvb) == chunk_left:
n = self._safe_readinto(mvb)
self._safe_read(2) # toss the CRLF at the end of the chunk
self.chunk_left = None
return total_bytes + n
else:
temp_mvb = mvb[0:chunk_left]
n = self._safe_readinto(temp_mvb)
mvb = mvb[n:]
total_bytes += n
# we read the whole chunk, get another
self._safe_read(2) # toss the CRLF at the end of the chunk
chunk_left = None
self._read_and_discard_trailer()
# we read everything; close the "file"
self._close_conn()
return total_bytes
def _safe_read(self, amt):
"""Read the number of bytes requested, compensating for partial reads.
Normally, we have a blocking socket, but a read() can be interrupted
by a signal (resulting in a partial read).
Note that we cannot distinguish between EOF and an interrupt when zero
bytes have been read. IncompleteRead() will be raised in this
situation.
This function should be used when <amt> bytes "should" be present for
reading. If the bytes are truly not available (due to EOF), then the
IncompleteRead exception can be used to detect the problem.
"""
s = []
while amt > 0:
chunk = self.fp.read(min(amt, MAXAMOUNT))
if not chunk:
raise IncompleteRead(bytes(b'').join(s), amt)
s.append(chunk)
amt -= len(chunk)
return bytes(b"").join(s)
def _safe_readinto(self, b):
"""Same as _safe_read, but for reading into a buffer."""
total_bytes = 0
mvb = memoryview(b)
while total_bytes < len(b):
if MAXAMOUNT < len(mvb):
temp_mvb = mvb[0:MAXAMOUNT]
n = self.fp.readinto(temp_mvb)
else:
n = self.fp.readinto(mvb)
if not n:
raise IncompleteRead(bytes(mvb[0:total_bytes]), len(b))
mvb = mvb[n:]
total_bytes += n
return total_bytes
def fileno(self):
return self.fp.fileno()
def getheader(self, name, default=None):
if self.headers is None:
raise ResponseNotReady()
headers = self.headers.get_all(name) or default
if isinstance(headers, str) or not hasattr(headers, '__iter__'):
return headers
else:
return ', '.join(headers)
def getheaders(self):
"""Return list of (header, value) tuples."""
if self.headers is None:
raise ResponseNotReady()
return list(self.headers.items())
# We override IOBase.__iter__ so that it doesn't check for closed-ness
def __iter__(self):
return self
# For compatibility with old-style urllib responses.
def info(self):
return self.headers
def geturl(self):
return self.url
def getcode(self):
return self.status
class HTTPConnection(object):
_http_vsn = 11
_http_vsn_str = 'HTTP/1.1'
response_class = HTTPResponse
default_port = HTTP_PORT
auto_open = 1
debuglevel = 0
def __init__(self, host, port=None, strict=_strict_sentinel,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None):
if strict is not _strict_sentinel:
warnings.warn("the 'strict' argument isn't supported anymore; "
"http.client now always assumes HTTP/1.x compliant servers.",
DeprecationWarning, 2)
self.timeout = timeout
self.source_address = source_address
self.sock = None
self._buffer = []
self.__response = None
self.__state = _CS_IDLE
self._method = None
self._tunnel_host = None
self._tunnel_port = None
self._tunnel_headers = {}
self._set_hostport(host, port)
def set_tunnel(self, host, port=None, headers=None):
""" Sets up the host and the port for the HTTP CONNECT Tunnelling.
The headers argument should be a mapping of extra HTTP headers
to send with the CONNECT request.
"""
self._tunnel_host = host
self._tunnel_port = port
if headers:
self._tunnel_headers = headers
else:
self._tunnel_headers.clear()
def _set_hostport(self, host, port):
if port is None:
i = host.rfind(':')
j = host.rfind(']') # ipv6 addresses have [...]
if i > j:
try:
port = int(host[i+1:])
except ValueError:
if host[i+1:] == "": # http://foo.com:/ == http://foo.com/
port = self.default_port
else:
raise InvalidURL("nonnumeric port: '%s'" % host[i+1:])
host = host[:i]
else:
port = self.default_port
if host and host[0] == '[' and host[-1] == ']':
host = host[1:-1]
self.host = host
self.port = port
def set_debuglevel(self, level):
self.debuglevel = level
def _tunnel(self):
self._set_hostport(self._tunnel_host, self._tunnel_port)
connect_str = "CONNECT %s:%d HTTP/1.0\r\n" % (self.host, self.port)
connect_bytes = connect_str.encode("ascii")
self.send(connect_bytes)
for header, value in self._tunnel_headers.items():
header_str = "%s: %s\r\n" % (header, value)
header_bytes = header_str.encode("latin-1")
self.send(header_bytes)
self.send(bytes(b'\r\n'))
response = self.response_class(self.sock, method=self._method)
(version, code, message) = response._read_status()
if code != 200:
self.close()
raise socket.error("Tunnel connection failed: %d %s" % (code,
message.strip()))
while True:
line = response.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("header line")
if not line:
# for sites which EOF without sending a trailer
break
if line in (b'\r\n', b'\n', b''):
break
def connect(self):
"""Connect to the host and port specified in __init__."""
self.sock = socket_create_connection((self.host,self.port),
self.timeout, self.source_address)
if self._tunnel_host:
self._tunnel()
def close(self):
"""Close the connection to the HTTP server."""
if self.sock:
self.sock.close() # close it manually... there may be other refs
self.sock = None
if self.__response:
self.__response.close()
self.__response = None
self.__state = _CS_IDLE
def send(self, data):
"""Send `data' to the server.
``data`` can be a string object, a bytes object, an array object, a
file-like object that supports a .read() method, or an iterable object.
"""
if self.sock is None:
if self.auto_open:
self.connect()
else:
raise NotConnected()
if self.debuglevel > 0:
print("send:", repr(data))
blocksize = 8192
# Python 2.7 array objects have a read method which is incompatible
# with the 2-arg calling syntax below.
if hasattr(data, "read") and not isinstance(data, array):
if self.debuglevel > 0:
print("sendIng a read()able")
encode = False
try:
mode = data.mode
except AttributeError:
# io.BytesIO and other file-like objects don't have a `mode`
# attribute.
pass
else:
if "b" not in mode:
encode = True
if self.debuglevel > 0:
print("encoding file using iso-8859-1")
while 1:
datablock = data.read(blocksize)
if not datablock:
break
if encode:
datablock = datablock.encode("iso-8859-1")
self.sock.sendall(datablock)
return
try:
self.sock.sendall(data)
except TypeError:
if isinstance(data, collections.Iterable):
for d in data:
self.sock.sendall(d)
else:
raise TypeError("data should be a bytes-like object "
"or an iterable, got %r" % type(data))
def _output(self, s):
"""Add a line of output to the current request buffer.
Assumes that the line does *not* end with \\r\\n.
"""
self._buffer.append(s)
def _send_output(self, message_body=None):
"""Send the currently buffered request and clear the buffer.
Appends an extra \\r\\n to the buffer.
A message_body may be specified, to be appended to the request.
"""
self._buffer.extend((bytes(b""), bytes(b"")))
msg = bytes(b"\r\n").join(self._buffer)
del self._buffer[:]
# If msg and message_body are sent in a single send() call,
# it will avoid performance problems caused by the interaction
# between delayed ack and the Nagle algorithm.
if isinstance(message_body, bytes):
msg += message_body
message_body = None
self.send(msg)
if message_body is not None:
# message_body was not a string (i.e. it is a file), and
# we must run the risk of Nagle.
self.send(message_body)
def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0):
"""Send a request to the server.
`method' specifies an HTTP request method, e.g. 'GET'.
`url' specifies the object being requested, e.g. '/index.html'.
`skip_host' if True does not add automatically a 'Host:' header
`skip_accept_encoding' if True does not add automatically an
'Accept-Encoding:' header
"""
# if a prior response has been completed, then forget about it.
if self.__response and self.__response.isclosed():
self.__response = None
# in certain cases, we cannot issue another request on this connection.
# this occurs when:
# 1) we are in the process of sending a request. (_CS_REQ_STARTED)
# 2) a response to a previous request has signalled that it is going
# to close the connection upon completion.
# 3) the headers for the previous response have not been read, thus
# we cannot determine whether point (2) is true. (_CS_REQ_SENT)
#
# if there is no prior response, then we can request at will.
#
# if point (2) is true, then we will have passed the socket to the
# response (effectively meaning, "there is no prior response"), and
# will open a new one when a new request is made.
#
# Note: if a prior response exists, then we *can* start a new request.
# We are not allowed to begin fetching the response to this new
# request, however, until that prior response is complete.
#
if self.__state == _CS_IDLE:
self.__state = _CS_REQ_STARTED
else:
raise CannotSendRequest(self.__state)
# Save the method we use, we need it later in the response phase
self._method = method
if not url:
url = '/'
request = '%s %s %s' % (method, url, self._http_vsn_str)
# Non-ASCII characters should have been eliminated earlier
self._output(request.encode('ascii'))
if self._http_vsn == 11:
# Issue some standard headers for better HTTP/1.1 compliance
if not skip_host:
# this header is issued *only* for HTTP/1.1
# connections. more specifically, this means it is
# only issued when the client uses the new
# HTTPConnection() class. backwards-compat clients
# will be using HTTP/1.0 and those clients may be
# issuing this header themselves. we should NOT issue
# it twice; some web servers (such as Apache) barf
# when they see two Host: headers
# If we need a non-standard port,include it in the
# header. If the request is going through a proxy,
# but the host of the actual URL, not the host of the
# proxy.
netloc = ''
if url.startswith('http'):
nil, netloc, nil, nil, nil = urlsplit(url)
if netloc:
try:
netloc_enc = netloc.encode("ascii")
except UnicodeEncodeError:
netloc_enc = netloc.encode("idna")
self.putheader('Host', netloc_enc)
else:
try:
host_enc = self.host.encode("ascii")
except UnicodeEncodeError:
host_enc = self.host.encode("idna")
# As per RFC 273, IPv6 address should be wrapped with []
# when used as Host header
if self.host.find(':') >= 0:
host_enc = bytes(b'[' + host_enc + b']')
if self.port == self.default_port:
self.putheader('Host', host_enc)
else:
host_enc = host_enc.decode("ascii")
self.putheader('Host', "%s:%s" % (host_enc, self.port))
# note: we are assuming that clients will not attempt to set these
# headers since *this* library must deal with the
# consequences. this also means that when the supporting
# libraries are updated to recognize other forms, then this
# code should be changed (removed or updated).
# we only want a Content-Encoding of "identity" since we don't
# support encodings such as x-gzip or x-deflate.
if not skip_accept_encoding:
self.putheader('Accept-Encoding', 'identity')
# we can accept "chunked" Transfer-Encodings, but no others
# NOTE: no TE header implies *only* "chunked"
#self.putheader('TE', 'chunked')
# if TE is supplied in the header, then it must appear in a
# Connection header.
#self.putheader('Connection', 'TE')
else:
# For HTTP/1.0, the server will assume "not chunked"
pass
def putheader(self, header, *values):
"""Send a request header line to the server.
For example: h.putheader('Accept', 'text/html')
"""
if self.__state != _CS_REQ_STARTED:
raise CannotSendHeader()
if hasattr(header, 'encode'):
header = header.encode('ascii')
values = list(values)
for i, one_value in enumerate(values):
if hasattr(one_value, 'encode'):
values[i] = one_value.encode('latin-1')
elif isinstance(one_value, int):
values[i] = str(one_value).encode('ascii')
value = bytes(b'\r\n\t').join(values)
header = header + bytes(b': ') + value
self._output(header)
def endheaders(self, message_body=None):
"""Indicate that the last header line has been sent to the server.
This method sends the request to the server. The optional message_body
argument can be used to pass a message body associated with the
request. The message body will be sent in the same packet as the
message headers if it is a string, otherwise it is sent as a separate
packet.
"""
if self.__state == _CS_REQ_STARTED:
self.__state = _CS_REQ_SENT
else:
raise CannotSendHeader()
self._send_output(message_body)
def request(self, method, url, body=None, headers={}):
"""Send a complete request to the server."""
self._send_request(method, url, body, headers)
def _set_content_length(self, body):
# Set the content-length based on the body.
thelen = None
try:
thelen = str(len(body))
except TypeError as te:
# If this is a file-like object, try to
# fstat its file descriptor
try:
thelen = str(os.fstat(body.fileno()).st_size)
except (AttributeError, OSError):
# Don't send a length if this failed
if self.debuglevel > 0: print("Cannot stat!!")
if thelen is not None:
self.putheader('Content-Length', thelen)
def _send_request(self, method, url, body, headers):
# Honor explicitly requested Host: and Accept-Encoding: headers.
header_names = dict.fromkeys([k.lower() for k in headers])
skips = {}
if 'host' in header_names:
skips['skip_host'] = 1
if 'accept-encoding' in header_names:
skips['skip_accept_encoding'] = 1
self.putrequest(method, url, **skips)
if body is not None and ('content-length' not in header_names):
self._set_content_length(body)
for hdr, value in headers.items():
self.putheader(hdr, value)
if isinstance(body, str):
# RFC 2616 Section 3.7.1 says that text default has a
# default charset of iso-8859-1.
body = body.encode('iso-8859-1')
self.endheaders(body)
def getresponse(self):
"""Get the response from the server.
If the HTTPConnection is in the correct state, returns an
instance of HTTPResponse or of whatever object is returned by
class the response_class variable.
If a request has not been sent or if a previous response has
not be handled, ResponseNotReady is raised. If the HTTP
response indicates that the connection should be closed, then
it will be closed before the response is returned. When the
connection is closed, the underlying socket is closed.
"""
# if a prior response has been completed, then forget about it.
if self.__response and self.__response.isclosed():
self.__response = None
# if a prior response exists, then it must be completed (otherwise, we
# cannot read this response's header to determine the connection-close
# behavior)
#
# note: if a prior response existed, but was connection-close, then the
# socket and response were made independent of this HTTPConnection
# object since a new request requires that we open a whole new
# connection
#
# this means the prior response had one of two states:
# 1) will_close: this connection was reset and the prior socket and
# response operate independently
# 2) persistent: the response was retained and we await its
# isclosed() status to become true.
#
if self.__state != _CS_REQ_SENT or self.__response:
raise ResponseNotReady(self.__state)
if self.debuglevel > 0:
response = self.response_class(self.sock, self.debuglevel,
method=self._method)
else:
response = self.response_class(self.sock, method=self._method)
response.begin()
assert response.will_close != _UNKNOWN
self.__state = _CS_IDLE
if response.will_close:
# this effectively passes the connection to the response
self.close()
else:
# remember this, so we can tell when it is complete
self.__response = response
return response
try:
import ssl
from ssl import SSLContext
except ImportError:
pass
else:
class HTTPSConnection(HTTPConnection):
"This class allows communication via SSL."
default_port = HTTPS_PORT
# XXX Should key_file and cert_file be deprecated in favour of context?
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=_strict_sentinel, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, **_3to2kwargs):
if 'check_hostname' in _3to2kwargs: check_hostname = _3to2kwargs['check_hostname']; del _3to2kwargs['check_hostname']
else: check_hostname = None
if 'context' in _3to2kwargs: context = _3to2kwargs['context']; del _3to2kwargs['context']
else: context = None
super(HTTPSConnection, self).__init__(host, port, strict, timeout,
source_address)
self.key_file = key_file
self.cert_file = cert_file
if context is None:
# Some reasonable defaults
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
will_verify = context.verify_mode != ssl.CERT_NONE
if check_hostname is None:
check_hostname = will_verify
elif check_hostname and not will_verify:
raise ValueError("check_hostname needs a SSL context with "
"either CERT_OPTIONAL or CERT_REQUIRED")
if key_file or cert_file:
context.load_cert_chain(cert_file, key_file)
self._context = context
self._check_hostname = check_hostname
def connect(self):
"Connect to a host on a given (SSL) port."
sock = socket_create_connection((self.host, self.port),
self.timeout, self.source_address)
if self._tunnel_host:
self.sock = sock
self._tunnel()
server_hostname = self.host if ssl.HAS_SNI else None
self.sock = self._context.wrap_socket(sock,
server_hostname=server_hostname)
try:
if self._check_hostname:
ssl.match_hostname(self.sock.getpeercert(), self.host)
except Exception:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
__all__.append("HTTPSConnection")
# ######################################
# # We use the old HTTPSConnection class from Py2.7, because ssl.SSLContext
# # doesn't exist in the Py2.7 stdlib
# class HTTPSConnection(HTTPConnection):
# "This class allows communication via SSL."
# default_port = HTTPS_PORT
# def __init__(self, host, port=None, key_file=None, cert_file=None,
# strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
# source_address=None):
# HTTPConnection.__init__(self, host, port, strict, timeout,
# source_address)
# self.key_file = key_file
# self.cert_file = cert_file
# def connect(self):
# "Connect to a host on a given (SSL) port."
# sock = socket_create_connection((self.host, self.port),
# self.timeout, self.source_address)
# if self._tunnel_host:
# self.sock = sock
# self._tunnel()
# self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file)
# __all__.append("HTTPSConnection")
# ######################################
class HTTPException(Exception):
# Subclasses that define an __init__ must call Exception.__init__
# or define self.args. Otherwise, str() will fail.
pass
class NotConnected(HTTPException):
pass
class InvalidURL(HTTPException):
pass
class UnknownProtocol(HTTPException):
def __init__(self, version):
self.args = version,
self.version = version
class UnknownTransferEncoding(HTTPException):
pass
class UnimplementedFileMode(HTTPException):
pass
class IncompleteRead(HTTPException):
def __init__(self, partial, expected=None):
self.args = partial,
self.partial = partial
self.expected = expected
def __repr__(self):
if self.expected is not None:
e = ', %i more expected' % self.expected
else:
e = ''
return 'IncompleteRead(%i bytes read%s)' % (len(self.partial), e)
def __str__(self):
return repr(self)
class ImproperConnectionState(HTTPException):
pass
class CannotSendRequest(ImproperConnectionState):
pass
class CannotSendHeader(ImproperConnectionState):
pass
class ResponseNotReady(ImproperConnectionState):
pass
class BadStatusLine(HTTPException):
def __init__(self, line):
if not line:
line = repr(line)
self.args = line,
self.line = line
class LineTooLong(HTTPException):
def __init__(self, line_type):
HTTPException.__init__(self, "got more than %d bytes when reading %s"
% (_MAXLINE, line_type))
# for backwards compatibility
error = HTTPException
| gpl-3.0 |
GenericStudent/home-assistant | tests/components/file/test_sensor.py | 6 | 2611 | """The tests for local file sensor platform."""
import pytest
from homeassistant.const import STATE_UNKNOWN
from homeassistant.setup import async_setup_component
from tests.async_mock import Mock, mock_open, patch
from tests.common import mock_registry
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@patch("os.path.isfile", Mock(return_value=True))
@patch("os.access", Mock(return_value=True))
async def test_file_value(hass, entity_reg):
"""Test the File sensor."""
config = {
"sensor": {"platform": "file", "name": "file1", "file_path": "mock.file1"}
}
m_open = mock_open(read_data="43\n45\n21")
with patch(
"homeassistant.components.file.sensor.open", m_open, create=True
), patch.object(hass.config, "is_allowed_path", return_value=True):
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
state = hass.states.get("sensor.file1")
assert state.state == "21"
@patch("os.path.isfile", Mock(return_value=True))
@patch("os.access", Mock(return_value=True))
async def test_file_value_template(hass, entity_reg):
"""Test the File sensor with JSON entries."""
config = {
"sensor": {
"platform": "file",
"name": "file2",
"file_path": "mock.file2",
"value_template": "{{ value_json.temperature }}",
}
}
data = '{"temperature": 29, "humidity": 31}\n' '{"temperature": 26, "humidity": 36}'
m_open = mock_open(read_data=data)
with patch(
"homeassistant.components.file.sensor.open", m_open, create=True
), patch.object(hass.config, "is_allowed_path", return_value=True):
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
state = hass.states.get("sensor.file2")
assert state.state == "26"
@patch("os.path.isfile", Mock(return_value=True))
@patch("os.access", Mock(return_value=True))
async def test_file_empty(hass, entity_reg):
"""Test the File sensor with an empty file."""
config = {"sensor": {"platform": "file", "name": "file3", "file_path": "mock.file"}}
m_open = mock_open(read_data="")
with patch(
"homeassistant.components.file.sensor.open", m_open, create=True
), patch.object(hass.config, "is_allowed_path", return_value=True):
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
state = hass.states.get("sensor.file3")
assert state.state == STATE_UNKNOWN
| apache-2.0 |
ujenmr/ansible | lib/ansible/plugins/become/doas.py | 9 | 4239 | # -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
become: doas
short_description: Do As user
description:
- This become plugins allows your remote/login user to execute commands as another user via the doas utility.
author: ansible (@core)
version_added: "2.8"
options:
become_user:
description: User you 'become' to execute the task
ini:
- section: privilege_escalation
key: become_user
- section: doas_become_plugin
key: user
vars:
- name: ansible_become_user
- name: ansible_doas_user
env:
- name: ANSIBLE_BECOME_USER
- name: ANSIBLE_DOAS_USER
become_exe:
description: Doas executable
default: doas
ini:
- section: privilege_escalation
key: become_exe
- section: doas_become_plugin
key: executable
vars:
- name: ansible_become_exe
- name: ansible_doas_exe
env:
- name: ANSIBLE_BECOME_EXE
- name: ANSIBLE_DOAS_EXE
become_flags:
description: Options to pass to doas
default:
ini:
- section: privilege_escalation
key: become_flags
- section: doas_become_plugin
key: flags
vars:
- name: ansible_become_flags
- name: ansible_doas_flags
env:
- name: ANSIBLE_BECOME_FLAGS
- name: ANSIBLE_DOAS_FLAGS
become_pass:
description: password for doas prompt
required: False
vars:
- name: ansible_become_password
- name: ansible_become_pass
- name: ansible_doas_pass
env:
- name: ANSIBLE_BECOME_PASS
- name: ANSIBLE_DOAS_PASS
ini:
- section: doas_become_plugin
key: password
prompt_l10n:
description:
- List of localized strings to match for prompt detection
- If empty we'll use the built in one
default: []
ini:
- section: doas_become_plugin
key: localized_prompts
vars:
- name: ansible_doas_prompt_l10n
env:
- name: ANSIBLE_DOAS_PROMPT_L10N
"""
import re
from ansible.module_utils._text import to_bytes
from ansible.plugins.become import BecomeBase
class BecomeModule(BecomeBase):
name = 'doas'
# messages for detecting prompted password issues
fail = ('Permission denied',)
missing = ('Authorization required',)
def check_password_prompt(self, b_output):
''' checks if the expected passwod prompt exists in b_output '''
# FIXME: more accurate would be: 'doas (%s@' % remote_user
# however become plugins don't have that information currently
b_prompts = [to_bytes(p) for p in self.get_option('prompt_l10n')] or [br'doas \(', br'Password:']
b_prompt = b"|".join(b_prompts)
return bool(re.match(b_prompt, b_output))
def build_become_command(self, cmd, shell):
super(BecomeModule, self).build_become_command(cmd, shell)
if not cmd:
return cmd
self.prompt = True
become_exe = self.get_option('become_exe') or self.name
flags = self.get_option('become_flags') or ''
if not self.get_option('become_pass') and '-n' not in flags:
flags += ' -n'
user = self.get_option('become_user') or ''
if user:
user = '-u %s' % (user)
success_cmd = self._build_success_command(cmd, shell, noexe=True)
executable = getattr(shell, 'executable', shell.SHELL_FAMILY)
return '%s %s %s %s -c %s' % (become_exe, flags, user, executable, success_cmd)
| gpl-3.0 |
mongodb/mongo-python-driver | test/test_errors.py | 2 | 4350 | # Copyright 2020-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
import sys
import traceback
sys.path[0:0] = [""]
from pymongo.errors import (BulkWriteError,
EncryptionError,
NotPrimaryError,
NotMasterError,
OperationFailure)
from test import (PyMongoTestCase,
unittest)
class TestErrors(PyMongoTestCase):
def test_not_primary_error(self):
exc = NotPrimaryError("not primary test", {"errmsg": "error"})
self.assertIn("full error", str(exc))
try:
raise exc
except NotPrimaryError:
self.assertIn("full error", traceback.format_exc())
def test_operation_failure(self):
exc = OperationFailure("operation failure test", 10,
{"errmsg": "error"})
self.assertIn("full error", str(exc))
try:
raise exc
except OperationFailure:
self.assertIn("full error", traceback.format_exc())
def _test_unicode_strs(self, exc):
if 'PyPy' in sys.version:
# PyPy displays unicode in repr differently.
self.assertEqual("unicode \U0001f40d, full error: {"
"'errmsg': 'unicode \\U0001f40d'}", str(exc))
else:
self.assertEqual("unicode \U0001f40d, full error: {"
"'errmsg': 'unicode \U0001f40d'}", str(exc))
try:
raise exc
except Exception:
self.assertIn("full error", traceback.format_exc())
def test_unicode_strs_operation_failure(self):
exc = OperationFailure('unicode \U0001f40d', 10,
{"errmsg": 'unicode \U0001f40d'})
self._test_unicode_strs(exc)
def test_unicode_strs_not_master_error(self):
exc = NotPrimaryError('unicode \U0001f40d',
{"errmsg": 'unicode \U0001f40d'})
self._test_unicode_strs(exc)
def assertPyMongoErrorEqual(self, exc1, exc2):
self.assertEqual(exc1._message, exc2._message)
self.assertEqual(exc1._error_labels, exc2._error_labels)
self.assertEqual(exc1.args, exc2.args)
self.assertEqual(str(exc1), str(exc2))
def assertOperationFailureEqual(self, exc1, exc2):
self.assertPyMongoErrorEqual(exc1, exc2)
self.assertEqual(exc1.code, exc2.code)
self.assertEqual(exc1.details, exc2.details)
self.assertEqual(exc1._max_wire_version, exc2._max_wire_version)
def test_pickle_NotPrimaryError(self):
exc = NotPrimaryError("not primary test", {"errmsg": "error"})
self.assertPyMongoErrorEqual(exc, pickle.loads(pickle.dumps(exc)))
def test_pickle_OperationFailure(self):
exc = OperationFailure('error', code=5, details={}, max_wire_version=7)
self.assertOperationFailureEqual(exc, pickle.loads(pickle.dumps(exc)))
def test_pickle_BulkWriteError(self):
exc = BulkWriteError({})
self.assertOperationFailureEqual(exc, pickle.loads(pickle.dumps(exc)))
self.assertIn("batch op errors occurred", str(exc))
def test_pickle_EncryptionError(self):
cause = OperationFailure('error', code=5, details={},
max_wire_version=7)
exc = EncryptionError(cause)
exc2 = pickle.loads(pickle.dumps(exc))
self.assertPyMongoErrorEqual(exc, exc2)
self.assertOperationFailureEqual(cause, exc2.cause)
def test_NotMasterError_catches_NotPrimaryError(self):
with self.assertRaises(NotMasterError) as exc:
raise NotPrimaryError("not primary test", {"errmsg": "error"})
self.assertIn("full error", str(exc.exception))
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
sdewald/madm | madm_restapi/menu/serializers.py | 1 | 1713 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from rest_framework import serializers
from rest_framework_recursive.fields import RecursiveField
class NodeAttributeSerializer(serializers.Serializer):
"""
Serializes ``NavigationNode.attr``
"""
auth_required = serializers.BooleanField()
is_home = serializers.BooleanField()
redirect_url = serializers.CharField()
reverse_id = serializers.CharField()
soft_root = serializers.BooleanField()
visible_for_anonymous = serializers.BooleanField()
visible_for_authenticated = serializers.BooleanField()
class NavigationNodeSerializer(serializers.Serializer):
"""
Serializes a ``NavigationNode``
"""
id = serializers.IntegerField()
title = serializers.CharField()
url = serializers.CharField()
selected = serializers.BooleanField()
namespace = serializers.CharField()
visible = serializers.BooleanField()
ancestor = serializers.BooleanField()
descendant = serializers.BooleanField()
sibling = serializers.BooleanField()
is_leaf_node = serializers.BooleanField(required=False)
menu_level = serializers.IntegerField(required=False)
parent_id = serializers.IntegerField()
parent_url = serializers.SerializerMethodField()
parent_namespace = serializers.CharField()
attrs = serializers.SerializerMethodField()
children = serializers.ListField(child=RecursiveField(), required=False)
@staticmethod
def get_attrs(instance):
return NodeAttributeSerializer(instance.attr, many=False).data
@staticmethod
def get_parent_url(instance):
if instance.parent:
return instance.parent.url
| gpl-3.0 |
OCA/l10n-switzerland | l10n_ch_isr_payment_grouping/tests/test_l10n_ch_payment_isr.py | 1 | 9692 | # Copyright 2020 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
import time
from odoo import tools
from odoo.modules.module import get_resource_path
from odoo.tests import Form, common, tagged
ISR1 = "703192500010549027000209403"
ISR2 = "120000000000234478943216899"
@tagged("post_install", "-at_install")
class PaymentISR(common.TransactionCase):
"""Test grouping of payment by ISR reference"""
def _load(self, module, *args):
tools.convert_file(
self.cr,
"l10n_ch",
get_resource_path(module, *args),
{},
"init",
False,
"test",
self.registry._assertion_report,
)
def create_supplier_invoice(
self, supplier, ref, currency_to_use="base.CHF", inv_date=None
):
""" Generates a test invoice """
f = Form(self.env["account.move"].with_context(default_type="in_invoice"))
f.partner_id = supplier
f.invoice_payment_ref = ref
f.currency_id = self.env.ref(currency_to_use)
f.invoice_date = inv_date or time.strftime("%Y") + "-12-22"
with f.invoice_line_ids.new() as line:
line.product_id = self.env.ref("product.product_product_4")
line.quantity = 1
line.price_unit = 42
invoice = f.save()
invoice.post()
return invoice
def create_bank_account(self, number, partner, bank=None):
""" Generates a test res.partner.bank. """
return self.env["res.partner.bank"].create(
{"acc_number": number, "bank_id": bank.id, "partner_id": partner.id}
)
def create_isrb_account(self, number, partner):
""" Generates a test res.partner.bank. """
return self.env["res.partner.bank"].create(
{
"acc_number": partner.name + number,
"l10n_ch_postal": number,
"partner_id": partner.id,
}
)
def setUp(self):
super().setUp()
self._load("account", "test", "account_minimal_test.xml")
self.payment_method_manual_in = self.env.ref(
"account.account_payment_method_manual_in"
)
abs_bank = self.env["res.bank"].create(
{"name": "Alternative Bank Schweiz", "bic": "ABSOCH22XXX"}
)
self.bank_journal_chf = self.env["account.journal"].create(
{"name": "Bank", "type": "bank", "code": "BNK41"}
)
self.supplier_isrb1 = self.env["res.partner"].create({"name": "Supplier ISR 1"})
self.create_isrb_account("01-162-8", self.supplier_isrb1)
self.supplier_isrb2 = self.env["res.partner"].create({"name": "Supplier ISR 2"})
self.create_isrb_account("01-162-8", self.supplier_isrb2)
self.supplier_iban = self.env["res.partner"].create({"name": "Supplier IBAN"})
self.create_bank_account(
"CH61 0839 0107 6280 0100 0", self.supplier_iban, abs_bank
)
def _filter_vals_to_test(self, vals):
return [
(
v["communication"],
v["partner_id"],
len(v["invoice_ids"][0][2]),
v["amount"],
)
for v in sorted(vals, key=lambda i: (i["communication"], i["partner_id"]))
]
def test_payment_isr_grouping(self):
"""Create multiple invoices to test grouping by partner and ISR
"""
invoices = (
self.create_supplier_invoice(self.supplier_isrb1, ISR1)
| self.create_supplier_invoice(self.supplier_isrb1, ISR2)
| self.create_supplier_invoice(
self.supplier_isrb1, ISR2, inv_date=time.strftime("%Y") + "-12-23"
)
| self.create_supplier_invoice(self.supplier_isrb2, ISR2)
| self.create_supplier_invoice(self.supplier_iban, "1234")
| self.create_supplier_invoice(self.supplier_iban, "5678")
)
# create an invoice where ref is set instead of invoice_payment_ref
inv_ref = self.create_supplier_invoice(self.supplier_isrb1, False)
inv_ref.ref = ISR2
invoices |= inv_ref
inv_no_ref = self.create_supplier_invoice(self.supplier_iban, False)
invoices |= inv_no_ref
PaymentRegister = self.env["account.payment.register"]
ctx = {"active_model": "account.move", "active_ids": invoices.ids}
register = PaymentRegister.with_context(ctx).create({"group_payment": True})
vals = register.get_payments_vals()
self.assertEqual(len(vals), 4)
expected_vals = [
# ref, partner, invoice count, amount
# 3 invoices #2, #3 and inv_ref grouped in one payment with a single ref
(ISR2, self.supplier_isrb1.id, 3, 126.0),
# different partner, different payment
(ISR2, self.supplier_isrb2.id, 1, 42.0),
# not ISR, standard grouping
("1234 5678 {}".format(inv_no_ref.name), self.supplier_iban.id, 3, 126.0,),
# different ISR reference, different payment
(ISR1, self.supplier_isrb1.id, 1, 42.0),
]
to_test_vals = self._filter_vals_to_test(vals)
self.assertEqual(to_test_vals, expected_vals)
def test_payment_isr_grouping_single_supplier(self):
"""Test grouping of ISR on a single supplier
No grouping of different ISR should apply
"""
invoices = (
self.create_supplier_invoice(self.supplier_isrb1, ISR1)
| self.create_supplier_invoice(self.supplier_isrb1, ISR2)
| self.create_supplier_invoice(
self.supplier_isrb1, ISR2, inv_date=time.strftime("%Y") + "-12-23"
)
)
PaymentRegister = self.env["account.payment.register"]
ctx = {"active_model": "account.move", "active_ids": invoices.ids}
register = PaymentRegister.with_context(ctx).create({"group_payment": True})
vals = register.get_payments_vals()
self.assertEqual(len(vals), 2)
expected_vals = [
# ref, partner, invoice count, amount
# 2 invoices with same ISR are grouped
(ISR2, self.supplier_isrb1.id, 2, 84.0),
# the invoice with a different ISR makes a different payment
(ISR1, self.supplier_isrb1.id, 1, 42.0),
]
to_test_vals = self._filter_vals_to_test(vals)
self.assertEqual(to_test_vals, expected_vals)
def test_payment_isr_single_supplier(self):
"""Test no grouping of ISR on a single supplier
No grouping on ISR should apply
"""
invoices = (
self.create_supplier_invoice(self.supplier_isrb1, ISR1)
| self.create_supplier_invoice(self.supplier_isrb1, ISR2)
| self.create_supplier_invoice(
self.supplier_isrb1, ISR2, inv_date=time.strftime("%Y") + "-12-23"
)
)
PaymentRegister = self.env["account.payment.register"]
ctx = {"active_model": "account.move", "active_ids": invoices.ids}
register = PaymentRegister.with_context(ctx).create({"group_payment": False})
vals = register.get_payments_vals()
self.assertEqual(len(vals), 3)
expected_vals = [
# no grouping expected
# ref, partner, invoice count, amount
(ISR2, self.supplier_isrb1.id, 1, 42.0),
(ISR2, self.supplier_isrb1.id, 1, 42.0),
(ISR1, self.supplier_isrb1.id, 1, 42.0),
]
to_test_vals = self._filter_vals_to_test(vals)
self.assertEqual(to_test_vals, expected_vals)
def test_payment_non_isr_grouping_single_supplier(self):
"""Test grouping of non ISR on a single partner
Grouping on free ref should apply
"""
invoices = self.create_supplier_invoice(
self.supplier_iban, "INV1"
) | self.create_supplier_invoice(self.supplier_iban, "INV2")
PaymentRegister = self.env["account.payment.register"]
ctx = {"active_model": "account.move", "active_ids": invoices.ids}
register = PaymentRegister.with_context(ctx).create({"group_payment": True})
vals = register.get_payments_vals()
self.assertEqual(len(vals), 1)
expected_vals = [
# 2 invoices grouped in one payment
# ref, partner, invoice count, amount
("INV1 INV2", self.supplier_iban.id, 2, 84.0)
]
to_test_vals = self._filter_vals_to_test(vals)
self.assertEqual(to_test_vals, expected_vals)
def test_payment_non_isr_single_supplier(self):
"""Test no grouping of non ISR on a single partner
No grouping on free ref applies
"""
# This differs from v12 where an automatic grouping is done anyway
# v13 respects the choice of the user
invoices = self.create_supplier_invoice(
self.supplier_iban, "INV1"
) | self.create_supplier_invoice(self.supplier_iban, "INV2")
PaymentRegister = self.env["account.payment.register"]
ctx = {"active_model": "account.move", "active_ids": invoices.ids}
register = PaymentRegister.with_context(ctx).create({"group_payment": False})
vals = register.get_payments_vals()
self.assertEqual(len(vals), 2)
expected_vals = [
# no grouping expected
# ref, partner, invoice count, amount
("INV1", self.supplier_iban.id, 1, 42.0),
("INV2", self.supplier_iban.id, 1, 42.0),
]
to_test_vals = self._filter_vals_to_test(vals)
self.assertEqual(to_test_vals, expected_vals)
| agpl-3.0 |
DESHRAJ/django-organizations | tests/test_models.py | 2 | 6989 | # -*- coding: utf-8 -*-
from functools import partial
from django.db import IntegrityError
from django.contrib.auth.models import User
from django.test import TestCase
from django.test.utils import override_settings
from organizations.models import (Organization, OrganizationUser,
OrganizationOwner)
from organizations.utils import create_organization
from test_accounts.models import Account
from test_custom.models import Team
@override_settings(USE_TZ=True)
class ActiveManagerTests(TestCase):
fixtures = ['users.json', 'orgs.json']
def test_active(self):
self.assertEqual(3, Organization.objects.all().count())
self.assertEqual(2, Organization.active.all().count())
def test_by_user(self):
user = User.objects.get(username="dave")
self.assertEqual(3, Organization.objects.get_for_user(user).count())
self.assertEqual(2, Organization.active.get_for_user(user).count())
@override_settings(USE_TZ=True)
class OrgModelTests(TestCase):
fixtures = ['users.json', 'orgs.json']
def setUp(self):
self.kurt = User.objects.get(username="kurt")
self.dave = User.objects.get(username="dave")
self.krist = User.objects.get(username="krist")
self.duder = User.objects.get(username="duder")
self.nirvana = Organization.objects.get(name="Nirvana")
self.foo = Organization.objects.get(name="Foo Fighters")
def test_org_string_representation(self):
"""Ensure that models' string representation are error free"""
self.foo.name = u"Föö Fíghterß"
self.assertTrue("{0}".format(self.foo))
self.assertTrue("{0}".format(self.foo.owner))
self.assertTrue("{0}".format(self.foo.owner.organization_user))
def test_relation_name(self):
"""Ensure user-related name is accessible from common attribute"""
self.assertEqual(self.foo.user_relation_name, "organizations_organization")
def test_duplicate_members(self):
"""Ensure that a User can only have one OrganizationUser object"""
self.assertRaises(IntegrityError, self.nirvana.add_user, self.dave)
def test_is_member(self):
self.assertTrue(self.nirvana.is_member(self.kurt))
self.assertTrue(self.nirvana.is_member(self.dave))
self.assertTrue(self.foo.is_member(self.dave))
self.assertFalse(self.foo.is_member(self.kurt))
def test_is_admin(self):
self.assertTrue(self.nirvana.is_admin(self.kurt))
self.assertTrue(self.nirvana.is_admin(self.krist))
self.assertFalse(self.nirvana.is_admin(self.dave))
self.assertTrue(self.foo.is_admin(self.dave))
def test_is_owner(self):
self.assertTrue(self.nirvana.is_owner(self.kurt))
self.assertTrue(self.foo.is_owner(self.dave))
self.assertFalse(self.nirvana.is_owner(self.dave))
self.assertFalse(self.nirvana.is_owner(self.krist))
def test_add_user(self):
new_guy = self.foo.add_user(self.krist)
self.assertTrue(isinstance(new_guy, OrganizationUser))
self.assertEqual(new_guy.organization, self.foo)
def test_remove_user(self):
new_guy = self.foo.add_user(self.krist)
self.foo.remove_user(self.krist)
self.assertFalse(self.foo.users.filter(pk=self.krist.pk).exists())
def test_get_or_add_user(self):
"""Ensure `get_or_add_user` adds a user IFF it exists"""
new_guy, created = self.foo.get_or_add_user(self.duder)
self.assertTrue(isinstance(new_guy, OrganizationUser))
self.assertEqual(new_guy.organization, self.foo)
self.assertTrue(created)
new_guy, created = self.foo.get_or_add_user(self.dave)
self.assertTrue(isinstance(new_guy, OrganizationUser))
self.assertFalse(created)
def test_delete_owner(self):
from organizations.exceptions import OwnershipRequired
owner = self.nirvana.owner.organization_user
self.assertRaises(OwnershipRequired, owner.delete)
def test_change_owner(self):
admin = self.nirvana.organization_users.get(user__username="krist")
self.nirvana.change_owner(admin)
owner = self.nirvana.owner.organization_user
self.assertEqual(owner, admin)
def test_delete_missing_owner(self):
"""Ensure an org user can be deleted when there is no owner"""
org = Organization.objects.create(name="Some test", slug="some-test")
# Avoid the Organization.add_user method which would make an owner
org_user = OrganizationUser.objects.create(user=self.kurt,
organization=org)
# Just make sure it doesn't raise an error
org_user.delete()
def test_nonmember_owner(self):
from organizations.exceptions import OrganizationMismatch
foo_user = self.foo.owner
self.nirvana.owner = foo_user
self.assertRaises(OrganizationMismatch, self.nirvana.owner.save)
@override_settings(USE_TZ=True)
class OrgDeleteTests(TestCase):
fixtures = ['users.json', 'orgs.json']
def test_delete_account(self):
"""Ensure Users are not deleted on the cascade"""
self.assertEqual(3, OrganizationOwner.objects.all().count())
self.assertEqual(4, User.objects.all().count())
scream = Organization.objects.get(name="Scream")
scream.delete()
self.assertEqual(2, OrganizationOwner.objects.all().count())
self.assertEqual(4, User.objects.all().count())
def test_delete_orguser(self):
"""Ensure the user is not deleted on the cascade"""
krist = User.objects.get(username="krist")
org_user = OrganizationUser.objects.filter(
organization__name="Nirvana", user=krist)
org_user.delete()
self.assertTrue(krist.pk)
class CustomModelTests(TestCase):
# Load the world as we know it.
fixtures = ['users.json', 'orgs.json']
def setUp(self):
self.kurt = User.objects.get(username="kurt")
self.dave = User.objects.get(username="dave")
self.krist = User.objects.get(username="krist")
self.duder = User.objects.get(username="duder")
self.red_account = Account.objects.create(
name="Red Account",
monthly_subscription=1200,
)
def test_org_string(self):
self.assertEqual(self.red_account.__str__(), "Red Account")
def test_relation_name(self):
"""Ensure user-related name is accessible from common attribute"""
self.assertEqual(self.red_account.user_relation_name,
"test_accounts_account")
def test_change_user(self):
"""Ensure custom organizations validate in owner change"""
create_team = partial(create_organization, model=Team)
hometeam = create_team(self.dave, "Hometeam")
duder_org_user = hometeam.add_user(self.duder)
hometeam.owner.organization_user = duder_org_user
hometeam.owner.save()
| bsd-2-clause |
yatinkumbhare/openstack-nova | nova/tests/functional/v3/test_flavor_access.py | 29 | 4566 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.tests.functional.v3 import api_sample_base
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.extensions')
class FlavorAccessSampleJsonTests(api_sample_base.ApiSampleTestBaseV3):
ADMIN_API = True
extension_name = 'flavor-access'
# TODO(Park): Overriding '_api_version' till all functional tests
# are merged between v2 and v2.1. After that base class variable
# itself can be changed to 'v2'
_api_version = 'v2'
def _get_flags(self):
f = super(FlavorAccessSampleJsonTests, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.'
'flavor_access.Flavor_access')
# FlavorAccess extension also needs Flavormanage to be loaded.
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.'
'flavormanage.Flavormanage')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.'
'flavor_disabled.Flavor_disabled')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.'
'flavorextradata.Flavorextradata')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.'
'flavor_swap.Flavor_swap')
return f
def _add_tenant(self):
subs = {
'tenant_id': 'fake_tenant',
'flavor_id': 10,
}
response = self._do_post('flavors/10/action',
'flavor-access-add-tenant-req',
subs)
self._verify_response('flavor-access-add-tenant-resp',
subs, response, 200)
def _create_flavor(self):
subs = {
'flavor_id': 10,
'flavor_name': 'test_flavor'
}
response = self._do_post("flavors",
"flavor-access-create-req",
subs)
subs.update(self._get_regexes())
self._verify_response("flavor-access-create-resp", subs, response, 200)
def test_flavor_access_create(self):
self._create_flavor()
def test_flavor_access_detail(self):
response = self._do_get('flavors/detail')
subs = self._get_regexes()
self._verify_response('flavor-access-detail-resp', subs, response, 200)
def test_flavor_access_list(self):
self._create_flavor()
self._add_tenant()
flavor_id = 10
response = self._do_get('flavors/%s/os-flavor-access' % flavor_id)
subs = {
'flavor_id': flavor_id,
'tenant_id': 'fake_tenant',
}
self._verify_response('flavor-access-list-resp', subs, response, 200)
def test_flavor_access_show(self):
flavor_id = 1
response = self._do_get('flavors/%s' % flavor_id)
subs = {
'flavor_id': flavor_id
}
subs.update(self._get_regexes())
self._verify_response('flavor-access-show-resp', subs, response, 200)
def test_flavor_access_add_tenant(self):
self._create_flavor()
self._add_tenant()
def test_flavor_access_remove_tenant(self):
self._create_flavor()
self._add_tenant()
subs = {
'tenant_id': 'fake_tenant',
}
response = self._do_post('flavors/10/action',
"flavor-access-remove-tenant-req",
subs)
exp_subs = {
"tenant_id": self.api.project_id,
"flavor_id": "10"
}
self._verify_response('flavor-access-remove-tenant-resp',
exp_subs, response, 200)
| apache-2.0 |
wilebeast/FireFox-OS | B2G/external/sonivox/jet_tools/JetCreator/JetFile.py | 7 | 32873 | """
File:
JetFile.py
Contents and purpose:
Auditions a jet file to simulate interactive music functions
Copyright (c) 2008 Android Open Source Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import with_statement
import logging
import ConfigParser
import struct
import os
import sys
import midifile
from JetUtils import *
from JetDefs import *
VERSION = '0.1'
# JET file defines
JET_HEADER_STRUCT = '<4sl'
JET_HEADER_TAG = 'JET '
JET_VERSION = 0x01000000
# JET chunk tags
JET_INFO_CHUNK = 'JINF'
JET_SMF_CHUNK = 'JSMF'
JET_DLS_CHUNK = 'JDLS'
# JINF defines
JINF_STRUCT = '<4sl4sl4sl4sl'
JINF_JET_VERSION = 'JVER'
JINF_NUM_SMF_CHUNKS = 'SMF#'
JINF_NUM_DLS_CHUNKS = 'DLS#'
# JCOP defines
JCOP_STRUCT = '<4sl'
JCOP_CHUNK = 'JCOP'
# JAPP defines
JAPP_STRUCT = '<4sl'
JAPP_CHUNK = 'JAPP'
# config file defines
OUTPUT_SECTION = 'output'
OUTPUT_FILENAME = 'filename'
OUTPUT_COPYRIGHT = 'copyright'
OUTPUT_APP_DATA = 'app_data'
OUTPUT_CHASE_CONTROLLERS = 'chase_controllers'
OUTPUT_OMIT_EMPTY_TRACKS = 'omit_empty_tracks'
SEGMENT_SECTION = 'segment'
SEGMENT_FILENAME = 'filename'
SEGMENT_DLSFILE = 'dlsfile'
SEGMENT_NAME = 'segname'
SEGMENT_START = 'start'
SEGMENT_END = 'end'
SEGMENT_END_MARKER = 'end_marker'
SEGMENT_QUANTIZE = 'quantize'
SEGMENT_OUTPUT = 'output'
SEGMENT_LENGTH = 'length'
SEGMENT_DUMP_FILE = 'dump'
SEGMENT_TRANSPOSE = 'transpose'
SEGMENT_REPEAT = 'repeat'
SEGMENT_MUTE_FLAGS = 'mute_flags'
LIBRARY_SECTION = 'libraries'
LIBRARY_FILENAME = 'lib'
CLIP_PREFIX = 'clip'
APP_PREFIX = 'app'
# JET events
JET_EVENT_MARKER = 102
JET_MARKER_LOOP_END = 0
JET_EVENT_TRIGGER_CLIP = 103
class JetSegment (object):
""" Class to hold segments """
def __init__ (self, segname, filename, start=None, end=None, length=None, output=None, quantize=None, jetevents=[], dlsfile=None, dump_file=None, transpose=0, repeat=0, mute_flags=0):
self.segname = segname
self.filename = filename
self.dlsfile = dlsfile
self.start = start
self.end = end
self.length = length
self.output = output
self.quantize = quantize
self.dump_file = dump_file
self.jetevents = jetevents
#API FIELDS FOR UI
self.transpose = transpose
self.repeat = repeat
self.mute_flags = mute_flags
class JetEvent (object):
""" Class to hold events """
def __init__(self, event_name, event_type, event_id, track_num, channel_num, event_start, event_end):
self.event_name = event_name
self.event_type = event_type
self.event_id = event_id
self.track_num = track_num
self.channel_num = channel_num
self.event_start = event_start
self.event_end = event_end
class JetFileException (Exception):
""" Exceptions class """
def __init__ (self, msg):
self.msg = msg
def __str__ (self):
return self.msg
class JetSegmentFile (midifile.MIDIFile):
def ConvertMusicTimeToTicks (self, s):
measures, beats, ticks = s.split(':',3)
return self.ConvertToTicks(int(measures), int(beats), int(ticks))
def ExtractEvents (self, start, end, length, quantize, chase_controllers):
if (start is None) and (end is None) and (length is None):
logging.debug('ExtractEvents: No change')
return
if start is not None:
start = self.ConvertMusicTimeToTicks(start)
else:
start = 0
if end is not None:
end = self.ConvertMusicTimeToTicks(end)
elif length is not None:
length = self.ConvertMusicTimeToTicks(length)
end = start + length
if quantize is not None:
quantize = int(quantize)
else:
quantize = 0
self.Trim(start, end, quantize, chase_controllers=chase_controllers)
#self.DumpTracks()
def SyncClips (self):
"""Add controller events to the start of a clip to keep it synced."""
values = None
last_seq = 0
for track in self.tracks:
for event in track.events:
# find start of clip and chase events from last save point
if (event.msg_type == midifile.CONTROL_CHANGE) and \
(event.controller == JET_EVENT_TRIGGER_CLIP) and \
((event.value & 0x40) == 0x40):
logging.debug('Syncing clip at %d ticks' % event.ticks)
values = track.events.ChaseControllers(event.seq, last_seq, values)
#BTH; Seems to fix chase controller bug when multiple clips within segment
#last_seq = event.seq
# generate event list from default values
clip_events = values.GenerateEventList(event.ticks)
#for evt in clip_events:
# logging.info(evt)
track.events.InsertEvents(clip_events, event.seq + 1)
def AddJetEvents (self, jetevents):
for jet_event in jetevents:
if jet_event.event_type == JetDefs.E_CLIP:
#DumpEvent(jet_event)
# sanity check
if jet_event.track_num >= len(self.tracks):
raise JetFileException('Track number %d of out of range for clip' % jet_event.track_num)
if jet_event.channel_num > 15:
raise JetFileException('Channel number %d of out of range for clip' % jet_event.channel_num)
if jet_event.event_id > 63:
raise JetFileException('event_id %d of out of range for clip' % jet_event.event_id)
logging.debug('Adding trigger event for clip %d @ %s and %s' % (jet_event.event_id, jet_event.event_start, jet_event.event_end))
events = midifile.EventList()
events.append(midifile.ControlChangeEvent(
self.ConvertMusicTimeToTicks(jet_event.event_start),
0,
jet_event.channel_num,
JET_EVENT_TRIGGER_CLIP,
jet_event.event_id | 0x40))
events.append(midifile.ControlChangeEvent(
self.ConvertMusicTimeToTicks(jet_event.event_end),
sys.maxint,
jet_event.channel_num,
JET_EVENT_TRIGGER_CLIP,
jet_event.event_id))
# merge trigger events
self.tracks[jet_event.track_num].events.MergeEvents(events)
elif jet_event.event_type == JetDefs.E_EOS:
if jet_event.track_num >= len(self.tracks):
raise JetFileException('Track number %d of out of range for end marker' % jet_event.track_num)
if jet_event.channel_num > 15:
raise JetFileException('Channel number %d of out of range for end marker' % jet_event.channel_num)
events = midifile.EventList()
logging.debug('Adding end marker at %s' % jet_event.event_start)
events.append(midifile.ControlChangeEvent(
self.ConvertMusicTimeToTicks(jet_event.event_start),
0,
jet_event.channel_num,
JET_EVENT_MARKER,
JET_MARKER_LOOP_END))
self.tracks[jet_event.track_num].events.MergeEvents(events)
elif jet_event.event_type == JetDefs.E_APP:
if jet_event.track_num >= len(self.tracks):
raise JetFileException('Track number %d of out of range for app marker' % jet_event.track_num)
if jet_event.channel_num > 15:
raise JetFileException('Channel number %d of out of range for app marker' % jet_event.channel_num)
if jet_event.event_id > 83 or jet_event.event_id < 80:
raise JetFileException('EventID %d out of range for application controller' % jet_event.event_id)
events = midifile.EventList()
logging.debug('Adding application controller at %s' % jet_event.event_start)
events.append(midifile.ControlChangeEvent(
self.ConvertMusicTimeToTicks(jet_event.event_start),
0,
jet_event.channel_num,
jet_event.event_id,
jet_event.event_id))
self.tracks[jet_event.track_num].events.MergeEvents(events)
class JetFile (object):
"""Write a JET file based on a configuration file."""
def __init__ (self, config_file, options):
self.config_file = config_file
self.config = config = ConfigParser.ConfigParser()
if self.config_file == "":
self.InitializeConfig(JetDefs.UNTITLED_FILE)
if not FileExists(self.config_file):
self.InitializeConfig(self.config_file)
config.read(self.config_file)
self.ParseConfig(options)
def DumpConfig (self):
"""Drump configuration to log file."""
# dump configuration
config = self.config
for section in config.sections():
logging.debug('[%s]' % section)
for option, value in config.items(section):
logging.debug('%s: %s' % (option, value))
def ParseConfig (self, options):
"""Validate the configuration."""
# check for output name
config = self.config
if config.has_option(OUTPUT_SECTION, OUTPUT_FILENAME):
config.filename = config.get(OUTPUT_SECTION, OUTPUT_FILENAME)
else:
raise JetFileException('No output filename in configuration file')
if config.filename == '' or config.filename == None:
config.filename = FileJustRoot(self.config_file) + ".JET"
config.chase_controllers = True
if config.has_option(OUTPUT_SECTION, OUTPUT_CHASE_CONTROLLERS):
try:
config.chase_controllers = config.getboolean(OUTPUT_SECTION, OUTPUT_CHASE_CONTROLLERS)
except:
pass
config.delete_empty_tracks = False
if config.has_option(OUTPUT_SECTION, OUTPUT_OMIT_EMPTY_TRACKS):
try:
config.delete_empty_tracks = config.getboolean(OUTPUT_SECTION, OUTPUT_OMIT_EMPTY_TRACKS)
except:
pass
config.copyright = None
if config.has_option(OUTPUT_SECTION, OUTPUT_COPYRIGHT):
config.copyright = config.get(OUTPUT_SECTION, OUTPUT_COPYRIGHT)
config.app_data = None
if config.has_option(OUTPUT_SECTION, OUTPUT_APP_DATA):
config.app_data = config.get(OUTPUT_SECTION, OUTPUT_APP_DATA)
# count segments
segments = []
seg_num = 0
while 1:
# check for segment section
segment_name = SEGMENT_SECTION + str(seg_num)
if not config.has_section(segment_name):
break
# initialize some parameters
start = end = length = output = end_marker = dlsfile = dump_file = None
transpose = repeat = mute_flags = 0
jetevents = []
# get the segment parameters
segname = config.get(segment_name, SEGMENT_NAME)
filename = config.get(segment_name, SEGMENT_FILENAME)
if config.has_option(segment_name, SEGMENT_DLSFILE):
dlsfile = config.get(segment_name, SEGMENT_DLSFILE)
if config.has_option(segment_name, SEGMENT_START):
start = config.get(segment_name, SEGMENT_START)
if config.has_option(segment_name, SEGMENT_END):
end = config.get(segment_name, SEGMENT_END)
if config.has_option(segment_name, SEGMENT_LENGTH):
length = config.get(segment_name, SEGMENT_LENGTH)
if config.has_option(segment_name, SEGMENT_OUTPUT):
output = config.get(segment_name, SEGMENT_OUTPUT)
if config.has_option(segment_name, SEGMENT_QUANTIZE):
quantize = config.get(segment_name, SEGMENT_QUANTIZE)
if config.has_option(segment_name, SEGMENT_DUMP_FILE):
dump_file = config.get(segment_name, SEGMENT_DUMP_FILE)
#API FIELDS
if config.has_option(segment_name, SEGMENT_TRANSPOSE):
transpose = config.get(segment_name, SEGMENT_TRANSPOSE)
if config.has_option(segment_name, SEGMENT_REPEAT):
repeat = config.get(segment_name, SEGMENT_REPEAT)
if config.has_option(segment_name, SEGMENT_MUTE_FLAGS):
mute_flags = config.get(segment_name, SEGMENT_MUTE_FLAGS)
if config.has_option(segment_name, SEGMENT_END_MARKER):
end_marker = config.get(segment_name, SEGMENT_END_MARKER)
track_num, channel_num, event_time = end_marker.split(',',2)
#jetevents.append((JetDefs.E_EOS, 0, int(track_num), int(channel_num), event_time, ''))
jetevents.append(JetEvent(JetDefs.E_EOS, JetDefs.E_EOS, 0, int(track_num), int(channel_num), event_time, event_time))
# check for jetevents
for jetevent, location in config.items(segment_name):
if jetevent.startswith(CLIP_PREFIX):
event_name, event_id, track_num, channel_num, event_start, event_end = location.split(',', 5)
jetevents.append(JetEvent(event_name, JetDefs.E_CLIP, int(event_id), int(track_num), int(channel_num), event_start, event_end))
# check for appevents
for jetevent, location in config.items(segment_name):
if jetevent.startswith(APP_PREFIX):
event_name, event_id, track_num, channel_num, event_start, event_end = location.split(',', 5)
jetevents.append(JetEvent(event_name, JetDefs.E_APP, int(event_id), int(track_num), int(channel_num), event_start, event_end))
segments.append(JetSegment(segname, filename, start, end, length, output, quantize, jetevents, dlsfile, dump_file, int(transpose), int(repeat), int(mute_flags)))
seg_num += 1
self.segments = segments
if not len(segments):
#TODO: Check for segments when writing
#raise JetFileException('No segments defined in configuration file')
pass
# count libraries
libraries = []
lib_num = 0
while 1:
library_name = LIBRARY_FILENAME + str(lib_num)
if not config.has_option(LIBRARY_SECTION, library_name):
break
libraries.append(config.get(LIBRARY_SECTION, library_name))
lib_num += 1
self.libraries = libraries
def WriteJetFileFromConfig (self, options):
"""Write JET file from config file."""
# open the output file and write the header
output_file = open(self.config.filename, 'wb')
jet_header = struct.pack(JET_HEADER_STRUCT, JET_HEADER_TAG, 0)
output_file.write(jet_header)
# write the JINF chunk
jet_info = struct.pack(JINF_STRUCT,
JET_INFO_CHUNK, struct.calcsize(JINF_STRUCT) - 8,
JINF_JET_VERSION, JET_VERSION,
JINF_NUM_SMF_CHUNKS, len(self.segments),
JINF_NUM_DLS_CHUNKS, len(self.libraries))
output_file.write(jet_info)
# write the JCOP chunk (if any)
if self.config.copyright is not None:
size = len(self.config.copyright) + 1
if size & 1:
size += 1
extra_byte = True
else:
extra_byte = False
jet_copyright = struct.pack(JCOP_STRUCT, JCOP_CHUNK, size)
output_file.write(jet_copyright)
output_file.write(self.config.copyright)
output_file.write(chr(0))
if extra_byte:
output_file.write(chr(0))
# write the app data chunk (if any)
if self.config.app_data is not None:
size = os.path.getsize(self.config.app_data)
if size & 1:
size += 1
extra_byte = True
else:
extra_byte = False
jet_app_data = struct.pack(JAPP_STRUCT, JAPP_CHUNK, size)
output_file.write(jet_app_data)
with open(self.config.app_data, 'rb') as f:
output_file.write(f.read())
if extra_byte:
output_file.write(chr(0))
# copy the MIDI segments
seg_num = 0
for segment in self.segments:
logging.debug('Writing segment %d' % seg_num)
# open SMF file and read it
jet_segfile = JetSegmentFile(segment.filename, 'rb')
jet_segfile.ReadFromStream()
# insert events
jet_segfile.AddJetEvents(segment.jetevents)
# trim to length specified in config file
jet_segfile.ExtractEvents(segment.start, segment.end, segment.length, segment.quantize, self.config.chase_controllers)
# chase controller events and fix them
if self.config.chase_controllers:
jet_segfile.SyncClips()
# delete empty tracks
if self.config.delete_empty_tracks:
jet_segfile.DeleteEmptyTracks()
# write separate output file if requested
if segment.output is not None:
jet_segfile.SaveAs(segment.output)
# write dump file
if segment.dump_file is not None:
with open(segment.dump_file, 'w') as f:
jet_segfile.DumpTracks(f)
# write the segment header
header_pos = output_file.tell()
smf_header = struct.pack(JET_HEADER_STRUCT, JET_SMF_CHUNK, 0)
output_file.write(smf_header)
start_pos = output_file.tell()
# write SMF file to output file
jet_segfile.Write(output_file, offset=start_pos)
jet_segfile.close()
# return to segment header and write actual size
end_pos = output_file.tell()
file_size = end_pos - start_pos
if file_size & 1:
file_size += 1
end_pos += 1
output_file.seek(header_pos, 0)
smf_header = struct.pack(JET_HEADER_STRUCT, JET_SMF_CHUNK, file_size)
output_file.write(smf_header)
output_file.seek(end_pos, 0)
seg_num += 1
# copy the DLS segments
for library in self.libraries:
if FileExists(library):
# open SMF file and get size
lib_file = (open(library,'rb'))
lib_file.seek(0,2)
file_size = lib_file.tell()
lib_file.seek(0)
# write the library header
dls_header = struct.pack(JET_HEADER_STRUCT, JET_DLS_CHUNK, file_size)
output_file.write(dls_header)
# copy DLS file to output file
output_file.write(lib_file.read())
lib_file.close()
# write the header with the read data size
file_size = output_file.tell()
output_file.seek(0)
jet_header = struct.pack(JET_HEADER_STRUCT, JET_HEADER_TAG, file_size - struct.calcsize(JET_HEADER_STRUCT))
output_file.write(jet_header)
output_file.close()
def GetMidiFiles(self):
""" Gets a list of midifiles """
midiFiles = []
for segment in self.segments:
if segment.filename not in midiFiles:
midiFiles.append(segment.filename)
return midiFiles
def GetLibraries(self):
""" Gets the libraries """
return self.libraries
def GetEvents(self, segName):
""" Gets the events for a segment """
for segment in self.segments:
if segment.segname == segName:
return segment.jetevents
return None
def GetEvent(self, segName, eventName):
""" Gets a single event from a segment """
for segment in self.segments:
if segment.segname == segName:
for event in segment.jetevents:
if event.event_name == eventName:
return event
return None
def AddEvent(self, segname, event_name, event_type, event_id, track_num, channel_num, event_start, event_end):
""" Adds an event """
for segment in self.segments:
if segment.segname == segname:
segment.jetevents.append(JetEvent(event_name, event_type, int(event_id), int(track_num), int(channel_num), event_start, event_end))
def ReplaceEvents(self, segname, newEvents):
""" Replaces all events """
for segment in self.segments:
if segment.segname == segname:
segment.jetevents = newEvents
return segment
def UpdateEvent(self, segname, orgeventname, event_name, event_type, event_id, track_num, channel_num, event_start, event_end):
""" Updates an event """
for segment in self.segments:
if segment.segname == segname:
for jetevent in segment.jetevents:
if jetevent.event_name == orgeventname:
jetevent.event_name = event_name
jetevent.event_type = event_type
jetevent.event_id = event_id
jetevent.track_num = track_num
jetevent.channel_num = channel_num
jetevent.event_start = event_start
jetevent.event_end = event_end
def DeleteSegmentsMatchingPrefix(self, prefix):
""" Deletes all segments matching name """
iOnce = True
iAgain = False
while(iOnce or iAgain):
iOnce = False
iAgain = False
for segment in self.segments:
if segment.segname[0:len(prefix)].upper() == prefix.upper():
self.segments.remove(segment)
iAgain = True
def DeleteEvent(self, segname, event_name):
""" Deletes an event """
for segment in self.segments:
if segment.segname == segname:
for jetevent in segment.jetevents:
if jetevent.event_name == event_name:
segment.jetevents.remove(jetevent)
def DeleteEventsMatchingPrefix(self, segname, prefix):
""" Deletes all events matching name """
for segment in self.segments:
if segment.segname == segname:
iOnce = True
iAgain = False
while(iOnce or iAgain):
iOnce = False
iAgain = False
for jetevent in segment.jetevents:
if jetevent.event_name[0:len(prefix)].upper() == prefix.upper():
segment.jetevents.remove(jetevent)
iAgain = True
def MoveEvent(self, segname, movename, event_start, event_end):
""" Move an event """
for segment in self.segments:
if segment.segname == segname:
for jetevent in segment.jetevents:
if jetevent.event_name == movename:
jetevent.event_start = event_start
jetevent.event_end = event_end
return
def GetSegments(self):
""" Gets all segments """
return self.segments
def GetSegment(self, segName):
""" Gets one segment by name """
for segment in self.segments:
if segment.segname == segName:
return segment
return None
def AddSegment(self, segname, filename, start, end, length, output, quantize, jetevents, dlsfile, dump_file, transpose, repeat, mute_flags):
""" Adds a segment """
if length == JetDefs.MBT_ZEROSTR:
length = None
if end == JetDefs.MBT_ZEROSTR:
end = None
self.segments.append(JetSegment(segname, filename, start, end, length, output, quantize, jetevents, dlsfile, dump_file, transpose, repeat, mute_flags))
def UpdateSegment(self, orgsegname, segname, filename, start, end, length, output, quantize, jetevents, dlsfile, dump_file, transpose, repeat, mute_flags):
""" Updates a segment """
if length == JetDefs.MBT_ZEROSTR:
length = None
if end == JetDefs.MBT_ZEROSTR:
end = None
for segment in self.segments:
if segment.segname == orgsegname:
segment.segname = segname
segment.filename = filename
segment.start = start
segment.end = end
segment.length = length
segment.output = output
segment.quantize = quantize
segment.dlsfile = dlsfile
segment.transpose = transpose
segment.repeat = repeat
segment.mute_flags = mute_flags
def MoveSegment(self, segname, start, end):
""" Moves a segment """
for segment in self.segments:
if segment.segname == segname:
segment.start = start
segment.end = end
return
def DeleteSegment(self, segname):
""" Deletes a segment """
for segment in self.segments:
if segment.segname == segname:
self.segments.remove(segment)
def SaveJetConfig(self, configFile):
""" Saves the jet config file """
if self.config.filename == '' or self.config.filename == None:
self.config.filename = FileJustRoot(configFile) + ".JET"
config = ConfigParser.ConfigParser()
config.add_section(OUTPUT_SECTION)
config.set(OUTPUT_SECTION, OUTPUT_FILENAME, self.config.filename)
config.set(OUTPUT_SECTION, OUTPUT_CHASE_CONTROLLERS, self.config.chase_controllers)
config.set(OUTPUT_SECTION, OUTPUT_OMIT_EMPTY_TRACKS, self.config.delete_empty_tracks)
if self.config.copyright is not None:
config.set(OUTPUT_SECTION, OUTPUT_COPYRIGHT, self.config.copyright)
if self.config.app_data is not None:
config.set(OUTPUT_SECTION, OUTPUT_APP_DATA, self.config.app_data)
self.libraries = []
seg_num = 0
for segment in self.segments:
segment_name = SEGMENT_SECTION + str(seg_num)
config.add_section(segment_name)
config.set(segment_name, SEGMENT_NAME, segment.segname)
config.set(segment_name, SEGMENT_FILENAME, segment.filename)
config.set(segment_name, SEGMENT_DLSFILE, segment.dlsfile)
if FileExists(segment.dlsfile):
if not segment.dlsfile in self.libraries:
self.libraries.append(segment.dlsfile)
config.set(segment_name, SEGMENT_START, segment.start)
if segment.end > JetDefs.MBT_ZEROSTR and len(segment.end) > 0:
config.set(segment_name, SEGMENT_END, segment.end)
if segment.length > JetDefs.MBT_ZEROSTR and len(segment.length) > 0:
config.set(segment_name, SEGMENT_LENGTH, segment.length)
config.set(segment_name, SEGMENT_OUTPUT, segment.output)
config.set(segment_name, SEGMENT_QUANTIZE, segment.quantize)
if segment.dump_file is not None:
config.set(segment_name, SEGMENT_DUMP_FILE, segment.dump_file)
config.set(segment_name, SEGMENT_TRANSPOSE, segment.transpose)
config.set(segment_name, SEGMENT_REPEAT, segment.repeat)
config.set(segment_name, SEGMENT_MUTE_FLAGS, segment.mute_flags)
clip_num = 0
app_num = 0
for jet_event in segment.jetevents:
if jet_event.event_type == JetDefs.E_CLIP:
clip_name = CLIP_PREFIX + str(clip_num)
s = "%s,%s,%s,%s,%s,%s" % (jet_event.event_name, jet_event.event_id, jet_event.track_num, jet_event.channel_num, jet_event.event_start, jet_event.event_end)
config.set(segment_name, clip_name, s)
clip_num += 1
elif jet_event.event_type == JetDefs.E_APP:
app_name = APP_PREFIX + str(app_num)
s = "%s,%s,%s,%s,%s,%s" % (jet_event.event_name, jet_event.event_id, jet_event.track_num, jet_event.channel_num, jet_event.event_start, jet_event.event_end)
config.set(segment_name, app_name, s)
app_num += 1
elif jet_event.event_type == JetDefs.E_EOS:
s = "%s,%s,%s" % (jet_event.track_num, jet_event.channel_num, jet_event.event_start)
config.set(segment_name, SEGMENT_END_MARKER, s)
seg_num += 1
lib_num = 0
config.add_section(LIBRARY_SECTION)
for library in self.libraries:
library_name = LIBRARY_FILENAME + str(lib_num)
config.set(LIBRARY_SECTION, library_name, library)
lib_num += 1
FileKillClean(configFile)
cfgfile = open(configFile,'w')
config.write(cfgfile)
cfgfile.close()
def InitializeConfig(self, configFile):
""" Initializes the values for an empty flag """
self.config.filename = FileJustRoot(configFile) + ".JET"
self.config.chase_controllers = True
self.config.delete_empty_tracks = False
self.config.copyright = None
self.config.app_data = None
self.segments = []
self.libraries = []
self.config_file = configFile
self.SaveJetConfig(configFile)
#---------------------------------------------------------------
# main
#---------------------------------------------------------------
if __name__ == '__main__':
sys = __import__('sys')
optparse = __import__('optparse')
# parse command line options
parser = optparse.OptionParser(version=VERSION)
parser.set_defaults(log_level=logging.INFO, log_file=None)
parser.add_option('-d', '--debug', action="store_const", const=logging.DEBUG, dest='log_level', help='Enable debug output')
parser.add_option('-l', '--log_file', dest='log_file', help='Write debug output to log file')
(options, args) = parser.parse_args()
# get master logger
logger = logging.getLogger('')
logger.setLevel(options.log_level)
# create console logger
console_logger = logging.StreamHandler()
console_logger.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(console_logger)
# create rotating file logger
if options.log_file is not None:
file_logger = logging.FileHandler(options.log_file, 'w')
file_logger.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(file_logger)
# process files
for arg in args:
print arg
jet_file = JetFile(arg, options)
jet_file.WriteJetFileFromConfig(options)
| apache-2.0 |
nazeehshoura/crawler | env/lib/python2.7/site-packages/django/contrib/auth/urls.py | 113 | 1203 | # The views used below are normally mapped in django.contrib.admin.urls.py
# This URLs file is used to provide a reliable view deployment for test purposes.
# It is also provided as a convenience to those who want to deploy these URLs
# elsewhere.
from django.conf.urls import patterns, url
urlpatterns = patterns('',
url(r'^login/$', 'django.contrib.auth.views.login', name='login'),
url(r'^logout/$', 'django.contrib.auth.views.logout', name='logout'),
url(r'^password_change/$', 'django.contrib.auth.views.password_change', name='password_change'),
url(r'^password_change/done/$', 'django.contrib.auth.views.password_change_done', name='password_change_done'),
url(r'^password_reset/$', 'django.contrib.auth.views.password_reset', name='password_reset'),
url(r'^password_reset/done/$', 'django.contrib.auth.views.password_reset_done', name='password_reset_done'),
url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
'django.contrib.auth.views.password_reset_confirm',
name='password_reset_confirm'),
url(r'^reset/done/$', 'django.contrib.auth.views.password_reset_complete', name='password_reset_complete'),
)
| mit |
coderbone/SickRage | lib/requests/packages/urllib3/util/retry.py | 699 | 9924 | import time
import logging
from ..exceptions import (
ConnectTimeoutError,
MaxRetryError,
ProtocolError,
ReadTimeoutError,
ResponseError,
)
from ..packages import six
log = logging.getLogger(__name__)
class Retry(object):
""" Retry configuration.
Each retry attempt will create a new Retry object with updated values, so
they can be safely reused.
Retries can be defined as a default for a pool::
retries = Retry(connect=5, read=2, redirect=5)
http = PoolManager(retries=retries)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', retries=Retry(10))
Retries can be disabled by passing ``False``::
response = http.request('GET', 'http://example.com/', retries=False)
Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
retries are disabled, in which case the causing exception will be raised.
:param int total:
Total number of retries to allow. Takes precedence over other counts.
Set to ``None`` to remove this constraint and fall back on other
counts. It's a good idea to set this to some sensibly-high value to
account for unexpected edge cases and avoid infinite retry loops.
Set to ``0`` to fail on the first retry.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int connect:
How many connection-related errors to retry on.
These are errors raised before the request is sent to the remote server,
which we assume has not triggered the server to process the request.
Set to ``0`` to fail on the first retry of this type.
:param int read:
How many times to retry on read errors.
These errors are raised after the request was sent to the server, so the
request may have side-effects.
Set to ``0`` to fail on the first retry of this type.
:param int redirect:
How many redirects to perform. Limit this to avoid infinite redirect
loops.
A redirect is a HTTP response with a status code 301, 302, 303, 307 or
308.
Set to ``0`` to fail on the first retry of this type.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param iterable method_whitelist:
Set of uppercased HTTP method verbs that we should retry on.
By default, we only retry on methods which are considered to be
indempotent (multiple requests with the same parameters end with the
same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`.
:param iterable status_forcelist:
A set of HTTP status codes that we should force a retry on.
By default, this is disabled with ``None``.
:param float backoff_factor:
A backoff factor to apply between attempts. urllib3 will sleep for::
{backoff factor} * (2 ^ ({number of total retries} - 1))
seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
for [0.1s, 0.2s, 0.4s, ...] between retries. It will never be longer
than :attr:`Retry.MAX_BACKOFF`.
By default, backoff is disabled (set to 0).
:param bool raise_on_redirect: Whether, if the number of redirects is
exhausted, to raise a MaxRetryError, or to return a response with a
response code in the 3xx range.
"""
DEFAULT_METHOD_WHITELIST = frozenset([
'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'])
#: Maximum backoff time.
BACKOFF_MAX = 120
def __init__(self, total=10, connect=None, read=None, redirect=None,
method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None,
backoff_factor=0, raise_on_redirect=True, _observed_errors=0):
self.total = total
self.connect = connect
self.read = read
if redirect is False or total is False:
redirect = 0
raise_on_redirect = False
self.redirect = redirect
self.status_forcelist = status_forcelist or set()
self.method_whitelist = method_whitelist
self.backoff_factor = backoff_factor
self.raise_on_redirect = raise_on_redirect
self._observed_errors = _observed_errors # TODO: use .history instead?
def new(self, **kw):
params = dict(
total=self.total,
connect=self.connect, read=self.read, redirect=self.redirect,
method_whitelist=self.method_whitelist,
status_forcelist=self.status_forcelist,
backoff_factor=self.backoff_factor,
raise_on_redirect=self.raise_on_redirect,
_observed_errors=self._observed_errors,
)
params.update(kw)
return type(self)(**params)
@classmethod
def from_int(cls, retries, redirect=True, default=None):
""" Backwards-compatibility for the old retries format."""
if retries is None:
retries = default if default is not None else cls.DEFAULT
if isinstance(retries, Retry):
return retries
redirect = bool(redirect) and None
new_retries = cls(retries, redirect=redirect)
log.debug("Converted retries value: %r -> %r" % (retries, new_retries))
return new_retries
def get_backoff_time(self):
""" Formula for computing the current backoff
:rtype: float
"""
if self._observed_errors <= 1:
return 0
backoff_value = self.backoff_factor * (2 ** (self._observed_errors - 1))
return min(self.BACKOFF_MAX, backoff_value)
def sleep(self):
""" Sleep between retry attempts using an exponential backoff.
By default, the backoff factor is 0 and this method will return
immediately.
"""
backoff = self.get_backoff_time()
if backoff <= 0:
return
time.sleep(backoff)
def _is_connection_error(self, err):
""" Errors when we're fairly sure that the server did not receive the
request, so it should be safe to retry.
"""
return isinstance(err, ConnectTimeoutError)
def _is_read_error(self, err):
""" Errors that occur after the request has been started, so we should
assume that the server began processing it.
"""
return isinstance(err, (ReadTimeoutError, ProtocolError))
def is_forced_retry(self, method, status_code):
""" Is this method/status code retryable? (Based on method/codes whitelists)
"""
if self.method_whitelist and method.upper() not in self.method_whitelist:
return False
return self.status_forcelist and status_code in self.status_forcelist
def is_exhausted(self):
""" Are we out of retries? """
retry_counts = (self.total, self.connect, self.read, self.redirect)
retry_counts = list(filter(None, retry_counts))
if not retry_counts:
return False
return min(retry_counts) < 0
def increment(self, method=None, url=None, response=None, error=None, _pool=None, _stacktrace=None):
""" Return a new Retry object with incremented retry counters.
:param response: A response object, or None, if the server did not
return a response.
:type response: :class:`~urllib3.response.HTTPResponse`
:param Exception error: An error encountered during the request, or
None if the response was received successfully.
:return: A new ``Retry`` object.
"""
if self.total is False and error:
# Disabled, indicate to re-raise the error.
raise six.reraise(type(error), error, _stacktrace)
total = self.total
if total is not None:
total -= 1
_observed_errors = self._observed_errors
connect = self.connect
read = self.read
redirect = self.redirect
cause = 'unknown'
if error and self._is_connection_error(error):
# Connect retry?
if connect is False:
raise six.reraise(type(error), error, _stacktrace)
elif connect is not None:
connect -= 1
_observed_errors += 1
elif error and self._is_read_error(error):
# Read retry?
if read is False:
raise six.reraise(type(error), error, _stacktrace)
elif read is not None:
read -= 1
_observed_errors += 1
elif response and response.get_redirect_location():
# Redirect retry?
if redirect is not None:
redirect -= 1
cause = 'too many redirects'
else:
# Incrementing because of a server error like a 500 in
# status_forcelist and a the given method is in the whitelist
_observed_errors += 1
cause = ResponseError.GENERIC_ERROR
if response and response.status:
cause = ResponseError.SPECIFIC_ERROR.format(
status_code=response.status)
new_retry = self.new(
total=total,
connect=connect, read=read, redirect=redirect,
_observed_errors=_observed_errors)
if new_retry.is_exhausted():
raise MaxRetryError(_pool, url, error or ResponseError(cause))
log.debug("Incremented Retry for (url='%s'): %r" % (url, new_retry))
return new_retry
def __repr__(self):
return ('{cls.__name__}(total={self.total}, connect={self.connect}, '
'read={self.read}, redirect={self.redirect})').format(
cls=type(self), self=self)
# For backwards compatibility (equivalent to pre-v1.9):
Retry.DEFAULT = Retry(3)
| gpl-3.0 |
adobe-flash/avmplus | build/buildbot/slaves/android/scripts/shell-client-android.py | 8 | 1874 | #!/usr/bin/env python
# -*- python -*-
# ex: set syntax=python:
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import sys,socket,os,time,re
port=None
host=None
if os.environ.has_key("SHELLPORT"):
try:
port=int(os.environ.get("SHELLPORT"))
except:
print("error: parsing SHELLPORT")
if os.environ.has_key("SHELLSERVER"):
host=os.environ.get("SHELLSERVER")
if len(sys.argv)>1 and re.search('^--shellserver=',sys.argv[1]):
shellserver=sys.argv[1][14:]
if shellserver.find(':')>-1:
host=shellserver[0:shellserver.find(':')]
try:
port=int(shellserver[shellserver.find(':')+1:])
except:
True
sys.argv=sys.argv[1:]
if (host==None or port==None):
print("error: SHELLPORT and SHELLSERVER must be set")
sys.exit(1)
args=""
for item in sys.argv[1:]:
args+=item+" "
s=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host,port))
s.send("abc_android %s" % args)
result=''
timeout=300
starttime=time.time()
while True:
newdata=s.recv(1024)
# print("data: "+newdata)
result+=newdata
if re.search("EXITCODE=[0-9]+\s",result):
break
if result.find("-- application argument")>-1:
break
if result.find("\n$")>-1:
break
if time.time()-starttime>timeout:
print("error: timeout detected")
break
s.close()
if re.search("EXITCODE=[0-9]+\s",result):
exitcode=result[result.find("EXITCODE")+9:]
if exitcode.find("$")>-1:
exitcode=exitcode[0:exitcode.find("$")]
try:
exitcode=int(exitcode.strip())
except:
True
result=result[0:result.find("EXITCODE")]
else:
exitcode=0
print(result)
sys.exit(exitcode)
| mpl-2.0 |
asm666/sympy | sympy/galgebra/tests/test_ga.py | 44 | 24800 | # sympy/galgebra/tests/test_ga.py
"""
The reference D&L is "Geometric Algebra for Physicists" by Doran and Lasenby
"""
from sympy.core import expand, Rational, S, Symbol, symbols
from sympy.core.compatibility import range
from sympy.functions import sin, cos
from sympy.galgebra.ga import MV, Nga, Com
from sympy.galgebra.printing import GA_Printer
from sympy.matrices import Matrix
from sympy.simplify import collect, simplify
from sympy.utilities.pytest import XFAIL, slow
def F(x, n, nbar):
"""
Conformal Mapping Function from 3D Euclidean space to 5D conformal space
where the images of all maps are null vectors.
"""
return Rational(1, 2)*((x*x)*n + 2*x - nbar)
def make_vector(a, m=3):
global n, nbar
if isinstance(a, str):
sym_str = ''
for i in range(m):
sym_str += a + str(i + 1) + ' '
sym_lst = list(symbols(sym_str))
sym_lst.append(S.Zero)
sym_lst.append(S.Zero)
a = MV(sym_lst, 'vector')
return F(a, n, nbar)
def test_rmul():
"""
Test for commutative scalar multiplication. Leftover from when sympy and
numpy were not working together and __mul__ and __rmul__ would not give the
same answer.
"""
x, y, z = MV.setup('x y z')
a, b, c = symbols('a b c')
assert 5*x == x*5
assert Rational(1, 2)*x == x*Rational(1, 2)
assert a*x == x*a
def test_contraction():
"""
Test for inner product and left and right contraction
"""
e_1, e_2, e_3 = MV.setup('e_1 e_2 e_3', '1 0 0, 0 1 0, 0 0 1')
assert ((e_1 ^ e_3) | e_1) == -e_3
assert ((e_1 ^ e_3) > e_1) == -e_3
assert (e_1 | (e_1 ^ e_3)) == e_3
assert (e_1 < (e_1 ^ e_3)) == e_3
assert ((e_1 ^ e_3) < e_1) == 0
assert (e_1 > (e_1 ^ e_3)) == 0
def test_substitution():
e_x, e_y, e_z = MV.setup('e_x e_y e_z', '1 0 0, 0 1 0, 0 0 1')
x, y, z = symbols('x y z')
X = x*e_x + y*e_y + z*e_z
Y = X.subs([(x, 2), (y, 3), (z, 4)])
assert Y == 2*e_x + 3*e_y + 4*e_z
def test_vector_extraction():
"""
Show that conformal bivector encodes two points. See D&L Section 10.4.1
"""
metric = ' 0 -1 #,' + \
'-1 0 #,' + \
' # # #,'
P1, P2, a = MV.setup('P1 P2 a', metric)
"""
P1 and P2 are null vectors and hence encode points in conformal space.
Show that P1 and P2 can be extracted from the bivector B = P1^P2. a is a
third vector in the conformal space with a.B not 0.
"""
B = P1 ^ P2
Bsq = B*B
ap = a - (a ^ B)*B
Ap = ap + ap*B
Am = ap - ap*B
P1dota = Symbol('(P1.a)')
P2dota = Symbol('(P2.a)')
Ap_test = (-2*P2dota)*P1
Am_test = (-2*P1dota)*P2
assert Ap == Ap_test
assert Am == Am_test
Ap2 = Ap*Ap
Am2 = Am*Am
assert Ap2 == S.Zero
assert Am2 == S.Zero
def test_metrics():
"""
Test specific metrics (diagpq, arbitrary_metric, arbitrary_metric_conformal)
"""
from sympy.galgebra.ga import diagpq, arbitrary_metric
metric = diagpq(3)
p1, p2, p3 = MV.setup('p1 p2 p3', metric, debug=0)
x1, y1, z1 = symbols('x1 y1 z1')
x2, y2, z2 = symbols('x2 y2 z2')
v1 = x1*p1 + y1*p2 + z1*p3
v2 = x2*p1 + y2*p2 + z2*p3
prod1 = v1*v2
prod2 = (v1|v2) + (v1^v2)
diff = prod1 - prod2
assert diff == MV(S.Zero)
metric = arbitrary_metric(3)
p1, p2, p3 = MV.setup('p1 p2 p3', metric, debug=0)
v1 = x1*p1 + y1*p2 + z1*p3
v2 = x2*p1 + y2*p2 + z2*p3
prod1 = v1*v2
prod2 = (v1|v2) + (v1^v2)
diff = prod1 - prod2
assert diff == MV(S.Zero)
@XFAIL
def test_metrics_xfail():
from sympy.galgebra.ga import arbitrary_metric_conformal
metric = arbitrary_metric_conformal(3)
p1, p2, p3 = MV.setup('p1 p2 p3', metric, debug=0)
v1 = x1*p1 + y1*p2 + z1*p3
v2 = x2*p1 + y2*p2 + z2*p3
prod1 = v1*v2
prod2 = (v1|v2) + (v1^v2)
diff = prod1 - prod2
assert diff == MV(S.Zero)
def test_geometry():
"""
Test conformal geometric description of circles, lines, spheres, and planes.
"""
metric = '1 0 0 0 0,' + \
'0 1 0 0 0,' + \
'0 0 1 0 0,' + \
'0 0 0 0 2,' + \
'0 0 0 2 0'
e0, e1, e2, n, nbar = MV.setup('e0 e1 e2 n nbar', metric, debug=0)
e = n + nbar
#conformal representation of points
A = F(e0, n, nbar) # point a = (1,0,0) A = F(a)
B = F(e1, n, nbar) # point b = (0,1,0) B = F(b)
C = F(-1*e0, n, nbar) # point c = (-1,0,0) C = F(c)
D = F(e2, n, nbar) # point d = (0,0,1) D = F(d)
x0, x1, x2 = symbols('x0 x1 x2')
X = F(MV([x0, x1, x2], 'vector'), n, nbar)
Circle = A ^ B ^ C ^ X
Line = A ^ B ^ n ^ X
Sphere = A ^ B ^ C ^ D ^ X
Plane = A ^ B ^ n ^ D ^ X
#Circle through a, b, and c
Circle_test = -x2*(e0 ^ e1 ^ e2 ^ n) + x2*(
e0 ^ e1 ^ e2 ^ nbar) + Rational(1, 2)*(-1 + x0**2 + x1**2 + x2**2)*(e0 ^ e1 ^ n ^ nbar)
diff = Circle - Circle_test
assert diff == S.Zero
#Line through a and b
Line_test = -x2*(e0 ^ e1 ^ e2 ^ n) + \
Rational(1, 2)*(-1 + x0 + x1)*(e0 ^ e1 ^ n ^ nbar) + \
(Rational(1, 2)*x2)*(e0 ^ e2 ^ n ^ nbar) + \
(-Rational(1, 2)*x2)*(e1 ^ e2 ^ n ^ nbar)
diff = Line - Line_test
assert diff == S.Zero
#Sphere through a, b, c, and d
Sphere_test = Rational(1, 2)*(1 - x0**2 - x1**2 - x2**2)*(e0 ^ e1 ^ e2 ^ n ^ nbar)
diff = Sphere - Sphere_test
assert diff == S.Zero
#Plane through a, b, and d
Plane_test = Rational(1, 2)*(1 - x0 - x1 - x2)*(e0 ^ e1 ^ e2 ^ n ^ nbar)
diff = Plane - Plane_test
assert diff == S.Zero
@slow
def test_extract_plane_and_line():
"""
Show that conformal trivector encodes planes and lines. See D&L section
10.4.2
"""
metric = '# # # 0 0,' + \
'# # # 0 0,' + \
'# # # 0 0,' + \
'0 0 0 0 2,' + \
'0 0 0 2 0'
p1, p2, p3, n, nbar = MV.setup('p1 p2 p3 n nbar', metric, debug=0)
P1 = F(p1, n, nbar)
P2 = F(p2, n, nbar)
P3 = F(p3, n, nbar)
#Line through p1 and p2
L = P1 ^ P2 ^ n
delta = (L | n) | nbar
delta_test = 2*p1 - 2*p2
diff = delta - delta_test
assert diff == S.Zero
#Plane through p1, p2, and p3
C = P1 ^ P2 ^ P3
delta = ((C ^ n) | n) | nbar
delta_test = 2*(p1 ^ p2) - 2*(p1 ^ p3) + 2*(p2 ^ p3)
diff = delta - delta_test
assert diff == S.Zero
@XFAIL
def test_reciprocal_frame():
"""
Test of formula for general reciprocal frame of three vectors.
Let three independent vectors be e1, e2, and e3. The reciprocal
vectors E1, E2, and E3 obey the relations:
e_i.E_j = delta_ij*(e1^e2^e3)**2
"""
metric = '1 # #,' + \
'# 1 #,' + \
'# # 1,'
e1, e2, e3 = MV.setup('e1 e2 e3', metric)
E = e1 ^ e2 ^ e3
Esq = (E*E)()
Esq_inv = 1/Esq
E1 = (e2 ^ e3)*E
E2 = (-1)*(e1 ^ e3)*E
E3 = (e1 ^ e2)*E
w = (E1 | e2)
w.collect(MV.g)
w = w().expand()
w = (E1 | e3)
w.collect(MV.g)
w = w().expand()
assert w == 0
w = (E2 | e1)
w.collect(MV.g)
w = w().expand()
assert w == 0
w = (E2 | e3)
w.collect(MV.g)
w = w().expand()
assert w == 0
w = (E3 | e1)
w.collect(MV.g)
w = w().expand()
assert w == 0
w = (E3 | e2)
w.collect(MV.g)
w = w().expand()
assert w == 0
w = (E1 | e1)
w = w().expand()
Esq = Esq.expand()
assert w/Esq == 1
w = (E2 | e2)
w = w().expand()
assert w/Esq == 1
w = (E3 | e3)
w = w().expand()
assert w/Esq == 1
@XFAIL
def test_derivative():
coords = x, y, z = symbols('x y z')
e_x, e_y, e_z, _ = MV.setup('e', '1 0 0, 0 1 0, 0 0 1', coords=coords)
X = x*e_x + y*e_y + z*e_z
a = MV('a', 'vector')
assert ((X | a).grad()) == a
assert ((X*X).grad()) == 2*X
assert (X*X*X).grad() == 5*X*X
assert X.grad_int() == 3
@XFAIL
def test_str():
e_1, e_2, e_3 = MV.setup('e_1 e_2 e_3', '1 0 0, 0 1 0, 0 0 1')
X = MV('x')
assert str(X) == 'x + x__1*e_1 + x__2*e_2 + x__3*e_3 + x__12*e_1^e_2 + x__13*e_1^e_3 + x__23*e_2^e_3 + x__123**e_1^e_2^e_3'
Y = MV('y', 'spinor')
assert str(Y) == 'y + y__12*e_1^e_2 + y__13*e_1^e_3 + y__23*e_2^e_3'
Z = X + Y
assert str(Z) == 'x + y + x__1*e_1 + x__2*e_2 + x__3*e_3 + (x__12 + y__12)*e_1^e_2 + (x__13 + y__13)*e_1^e_3 + (x__23 + y__23)*e_2^e_3 + x__123*e_1^e_2^e_3'
assert str(e_1 | e_1) == '1'
@XFAIL
def test_metric():
MV.setup('e_1 e_2 e_3', '[1,1,1]')
assert MV.metric == Matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
@XFAIL
def test_constructor():
"""
Test various multivector constructors
"""
e_1, e_2, e_3 = MV.setup('e_1 e_2 e_3', '[1,1,1]')
assert str(MV('a', 'scalar')) == 'a'
assert str(MV('a', 'vector')) == 'a__1*e_1 + a__2*e_2 + a__3*e_3'
assert str(MV('a', 'pseudo')) == 'a__123*e_1^e_2^e_3'
assert str(MV('a', 'spinor')) == 'a + a__12*e_1^e_2 + a__13*e_1^e_3 + a__23*e_2^e_3'
assert str(MV('a')) == 'a + a__1*e_1 + a__2*e_2 + a__3*e_3 + a__12*e_1^e_2 + a__13*e_1^e_3 + a__23*e_2^e_3 + a__123*e_1^e_2^e_3'
assert str(MV([2, 'a'], 'grade')) == 'a__12*e_1^e_2 + a__13*e_1^e_3 + a__23*e_2^e_3'
assert str(MV('a', 'grade2')) == 'a__12*e_1^e_2 + a__13*e_1^e_3 + a__23*e_2^e_3'
def test_basic_multivector_operations():
with GA_Printer():
(ex, ey, ez) = MV.setup('e*x|y|z')
A = MV('A', 'mv')
assert str(A) == 'A + A__x*e_x + A__y*e_y + A__z*e_z + A__xy*e_x^e_y + A__xz*e_x^e_z + A__yz*e_y^e_z + A__xyz*e_x^e_y^e_z'
assert str(A) == 'A + A__x*e_x + A__y*e_y + A__z*e_z + A__xy*e_x^e_y + A__xz*e_x^e_z + A__yz*e_y^e_z + A__xyz*e_x^e_y^e_z'
assert str(A) == 'A + A__x*e_x + A__y*e_y + A__z*e_z + A__xy*e_x^e_y + A__xz*e_x^e_z + A__yz*e_y^e_z + A__xyz*e_x^e_y^e_z'
X = MV('X', 'vector')
Y = MV('Y', 'vector')
assert str(X) == 'X__x*e_x + X__y*e_y + X__z*e_z'
assert str(Y) == 'Y__x*e_x + Y__y*e_y + Y__z*e_z'
assert str((X*Y)) == '(e_x.e_x)*X__x*Y__x + (e_x.e_y)*X__x*Y__y + (e_x.e_y)*X__y*Y__x + (e_x.e_z)*X__x*Y__z + (e_x.e_z)*X__z*Y__x + (e_y.e_y)*X__y*Y__y + (e_y.e_z)*X__y*Y__z + (e_y.e_z)*X__z*Y__y + (e_z.e_z)*X__z*Y__z + (X__x*Y__y - X__y*Y__x)*e_x^e_y + (X__x*Y__z - X__z*Y__x)*e_x^e_z + (X__y*Y__z - X__z*Y__y)*e_y^e_z'
assert str((X ^ Y)) == '(X__x*Y__y - X__y*Y__x)*e_x^e_y + (X__x*Y__z - X__z*Y__x)*e_x^e_z + (X__y*Y__z - X__z*Y__y)*e_y^e_z'
assert str((X | Y)) == '(e_x.e_x)*X__x*Y__x + (e_x.e_y)*X__x*Y__y + (e_x.e_y)*X__y*Y__x + (e_x.e_z)*X__x*Y__z + (e_x.e_z)*X__z*Y__x + (e_y.e_y)*X__y*Y__y + (e_y.e_z)*X__y*Y__z + (e_y.e_z)*X__z*Y__y + (e_z.e_z)*X__z*Y__z'
(ex, ey) = MV.setup('e*x|y')
X = MV('X', 'vector')
A = MV('A', 'spinor')
assert str(X) == 'X__x*e_x + X__y*e_y'
assert str(A) == 'A + A__xy*e_x^e_y'
assert str((X | A)) == '(-A__xy*((e_x.e_y)*X__x + (e_y.e_y)*X__y))*e_x + (A__xy*((e_x.e_x)*X__x + (e_x.e_y)*X__y))*e_y'
assert str((X < A)) == '(-A__xy*((e_x.e_y)*X__x + (e_y.e_y)*X__y))*e_x + (A__xy*((e_x.e_x)*X__x + (e_x.e_y)*X__y))*e_y'
assert str((A > X)) == '(A__xy*((e_x.e_y)*X__x + (e_y.e_y)*X__y))*e_x + (-A__xy*((e_x.e_x)*X__x + (e_x.e_y)*X__y))*e_y'
(ex, ey) = MV.setup('e*x|y', metric='[1,1]')
X = MV('X', 'vector')
A = MV('A', 'spinor')
assert str(X) == 'X__x*e_x + X__y*e_y'
assert str(A) == 'A + A__xy*e_x^e_y'
assert str((X*A)) == '(A*X__x - A__xy*X__y)*e_x + (A*X__y + A__xy*X__x)*e_y'
assert str((X | A)) == '-A__xy*X__y*e_x + A__xy*X__x*e_y'
assert str((X < A)) == '-A__xy*X__y*e_x + A__xy*X__x*e_y'
assert str((X > A)) == 'A*X__x*e_x + A*X__y*e_y'
assert str((A*X)) == '(A*X__x + A__xy*X__y)*e_x + (A*X__y - A__xy*X__x)*e_y'
assert str((A | X)) == 'A__xy*X__y*e_x - A__xy*X__x*e_y'
assert str((A < X)) == 'A*X__x*e_x + A*X__y*e_y'
assert str((A > X)) == 'A__xy*X__y*e_x - A__xy*X__x*e_y'
return
@slow
def test_check_generalized_BAC_CAB_formulas():
with GA_Printer():
(a, b, c, d, e) = MV.setup('a b c d e')
assert str(a | (b*c)) == '-(a.c)*b + (a.b)*c'
assert str(a | (b ^ c)) == '-(a.c)*b + (a.b)*c'
assert str(a | (b ^ c ^ d)) == '(a.d)*b^c - (a.c)*b^d + (a.b)*c^d'
assert str((a | (b ^ c)) + (c | (a ^ b)) + (b | (c ^ a))) == '0'
assert str(a*(b ^ c) - b*(a ^ c) + c*(a ^ b)) == '3*a^b^c'
assert str(a*(b ^ c ^ d) - b*(a ^ c ^ d) + c*(a ^ b ^ d) - d*(a ^ b ^ c)) == '4*a^b^c^d'
assert str((a ^ b) | (c ^ d)) == '-(a.c)*(b.d) + (a.d)*(b.c)'
assert str(((a ^ b) | c) | d) == '-(a.c)*(b.d) + (a.d)*(b.c)'
assert str(Com(a ^ b, c ^ d)) == '-(b.d)*a^c + (b.c)*a^d + (a.d)*b^c - (a.c)*b^d'
assert str((a | (b ^ c)) | (d ^ e)) == '(-(a.b)*(c.e) + (a.c)*(b.e))*d + ((a.b)*(c.d) - (a.c)*(b.d))*e'
return
def test_derivatives_in_rectangular_coordinates():
with GA_Printer():
X = (x, y, z) = symbols('x y z')
(ex, ey, ez, grad) = MV.setup('e_x e_y e_z', metric='[1,1,1]', coords=X)
f = MV('f', 'scalar', fct=True)
A = MV('A', 'vector', fct=True)
B = MV('B', 'grade2', fct=True)
C = MV('C', 'mv', fct=True)
assert str(f) == 'f'
assert str(A) == 'A__x*e_x + A__y*e_y + A__z*e_z'
assert str(B) == 'B__xy*e_x^e_y + B__xz*e_x^e_z + B__yz*e_y^e_z'
assert str(C) == 'C + C__x*e_x + C__y*e_y + C__z*e_z + C__xy*e_x^e_y + C__xz*e_x^e_z + C__yz*e_y^e_z + C__xyz*e_x^e_y^e_z'
assert str(grad*f) == 'D{x}f*e_x + D{y}f*e_y + D{z}f*e_z'
assert str(grad | A) == 'D{x}A__x + D{y}A__y + D{z}A__z'
assert str(grad*A) == 'D{x}A__x + D{y}A__y + D{z}A__z + (-D{y}A__x + D{x}A__y)*e_x^e_y + (-D{z}A__x + D{x}A__z)*e_x^e_z + (-D{z}A__y + D{y}A__z)*e_y^e_z'
assert str(-MV.I*(grad ^ A)) == '(-D{z}A__y + D{y}A__z)*e_x + (D{z}A__x - D{x}A__z)*e_y + (-D{y}A__x + D{x}A__y)*e_z'
assert str(grad*B) == '(-(D{y}B__xy + D{z}B__xz))*e_x + (D{x}B__xy - D{z}B__yz)*e_y + (D{x}B__xz + D{y}B__yz)*e_z + (D{z}B__xy - D{y}B__xz + D{x}B__yz)*e_x^e_y^e_z'
assert str(grad ^ B) == '(D{z}B__xy - D{y}B__xz + D{x}B__yz)*e_x^e_y^e_z'
assert str(grad | B) == '(-(D{y}B__xy + D{z}B__xz))*e_x + (D{x}B__xy - D{z}B__yz)*e_y + (D{x}B__xz + D{y}B__yz)*e_z'
assert str(grad < A) == 'D{x}A__x + D{y}A__y + D{z}A__z'
assert str(grad > A) == 'D{x}A__x + D{y}A__y + D{z}A__z'
assert str(grad < B) == '(-(D{y}B__xy + D{z}B__xz))*e_x + (D{x}B__xy - D{z}B__yz)*e_y + (D{x}B__xz + D{y}B__yz)*e_z'
assert str(grad > B) == '0'
assert str(grad < C) == 'D{x}C__x + D{y}C__y + D{z}C__z + (-(D{y}C__xy + D{z}C__xz))*e_x + (D{x}C__xy - D{z}C__yz)*e_y + (D{x}C__xz + D{y}C__yz)*e_z + D{z}C__xyz*e_x^e_y - D{y}C__xyz*e_x^e_z + D{x}C__xyz*e_y^e_z'
assert str(grad > C) == 'D{x}C__x + D{y}C__y + D{z}C__z + D{x}C*e_x + D{y}C*e_y + D{z}C*e_z'
return
def test_derivatives_in_spherical_coordinates():
with GA_Printer():
X = (r, th, phi) = symbols('r theta phi')
curv = [[r*cos(phi)*sin(th), r*sin(phi)*sin(th), r*cos(th)], [1, r, r*sin(th)]]
(er, eth, ephi, grad) = MV.setup('e_r e_theta e_phi', metric='[1,1,1]', coords=X, curv=curv)
f = MV('f', 'scalar', fct=True)
A = MV('A', 'vector', fct=True)
B = MV('B', 'grade2', fct=True)
assert str(f) == 'f'
assert str(A) == 'A__r*e_r + A__theta*e_theta + A__phi*e_phi'
assert str(B) == 'B__rtheta*e_r^e_theta + B__rphi*e_r^e_phi + B__thetaphi*e_theta^e_phi'
assert str(grad*f) == 'D{r}f*e_r + D{theta}f/r*e_theta + D{phi}f/(r*sin(theta))*e_phi'
assert str(grad | A) == 'D{r}A__r + 2*A__r/r + A__theta*cos(theta)/(r*sin(theta)) + D{theta}A__theta/r + D{phi}A__phi/(r*sin(theta))'
assert str(-MV.I*(grad ^ A)) == '((A__phi*cos(theta)/sin(theta) + D{theta}A__phi - D{phi}A__theta/sin(theta))/r)*e_r + (-D{r}A__phi - A__phi/r + D{phi}A__r/(r*sin(theta)))*e_theta + (D{r}A__theta + A__theta/r - D{theta}A__r/r)*e_phi'
assert str(grad ^ B) == '(D{r}B__thetaphi - B__rphi*cos(theta)/(r*sin(theta)) + 2*B__thetaphi/r - D{theta}B__rphi/r + D{phi}B__rtheta/(r*sin(theta)))*e_r^e_theta^e_phi'
return
def test_rounding_numerical_components():
with GA_Printer():
(ex, ey, ez) = MV.setup('e_x e_y e_z', metric='[1,1,1]')
X = 1.2*ex + 2.34*ey + 0.555*ez
Y = 0.333*ex + 4*ey + 5.3*ez
assert str(X) == '1.20000000000000*e_x + 2.34000000000000*e_y + 0.555000000000000*e_z'
assert str(Nga(X, 2)) == '1.2*e_x + 2.3*e_y + 0.55*e_z'
assert str(X*Y) == '12.7011000000000 + 4.02078000000000*e_x^e_y + 6.17518500000000*e_x^e_z + 10.1820000000000*e_y^e_z'
assert str(Nga(X*Y, 2)) == '13. + 4.0*e_x^e_y + 6.2*e_x^e_z + 10.*e_y^e_z'
return
def test_noneuclidian_distance_calculation():
from sympy import solve, sqrt
with GA_Printer():
metric = '0 # #,# 0 #,# # 1'
(X, Y, e) = MV.setup('X Y e', metric)
assert str((X ^ Y)*(X ^ Y)) == '(X.Y)**2'
L = X ^ Y ^ e
B = L*e
assert str(B) == 'X^Y - (Y.e)*X^e + (X.e)*Y^e'
Bsq = B*B
assert str(Bsq) == '(X.Y)*((X.Y) - 2*(X.e)*(Y.e))'
Bsq = Bsq.scalar()
assert str(B) == 'X^Y - (Y.e)*X^e + (X.e)*Y^e'
BeBr = B*e*B.rev()
assert str(BeBr) == '((X.Y)*(-(X.Y) + 2*(X.e)*(Y.e)))*e'
assert str(B*B) == '(X.Y)*((X.Y) - 2*(X.e)*(Y.e))'
assert str(L*L) == '(X.Y)*((X.Y) - 2*(X.e)*(Y.e))'
(s, c, Binv, M, BigS, BigC, alpha, XdotY, Xdote, Ydote) = symbols('s c (1/B) M S C alpha (X.Y) (X.e) (Y.e)')
Bhat = Binv*B
R = c + s*Bhat
assert str(R) == 'c + (1/B)*s*X^Y - (1/B)*(Y.e)*s*X^e + (1/B)*(X.e)*s*Y^e'
Z = R*X*R.rev()
Z.obj = expand(Z.obj)
Z.obj = Z.obj.collect([Binv, s, c, XdotY])
assert str(Z) == '((1/B)**2*(X.Y)**2*s**2 - 2*(1/B)**2*(X.Y)*(X.e)*(Y.e)*s**2 + 2*(1/B)*(X.Y)*c*s - 2*(1/B)*(X.e)*(Y.e)*c*s + c**2)*X + 2*(1/B)*(X.e)**2*c*s*Y + (2*(1/B)*(X.Y)*(X.e)*s*(-(1/B)*(X.Y)*s + 2*(1/B)*(X.e)*(Y.e)*s - c))*e'
W = Z | Y
# From this point forward all calculations are with sympy scalars
W = W.scalar()
assert str(W) == '(1/B)**2*(X.Y)**3*s**2 - 4*(1/B)**2*(X.Y)**2*(X.e)*(Y.e)*s**2 + 4*(1/B)**2*(X.Y)*(X.e)**2*(Y.e)**2*s**2 + 2*(1/B)*(X.Y)**2*c*s - 4*(1/B)*(X.Y)*(X.e)*(Y.e)*c*s + (X.Y)*c**2'
W = expand(W)
W = simplify(W)
W = W.collect([s*Binv])
M = 1/Bsq
W = W.subs(Binv**2, M)
W = simplify(W)
Bmag = sqrt(XdotY**2 - 2*XdotY*Xdote*Ydote)
W = W.collect([Binv*c*s, XdotY])
#Double angle substitutions
W = W.subs(2*XdotY**2 - 4*XdotY*Xdote*Ydote, 2/(Binv**2))
W = W.subs(2*c*s, BigS)
W = W.subs(c**2, (BigC + 1)/2)
W = W.subs(s**2, (BigC - 1)/2)
W = simplify(W)
W = expand(W)
W = W.subs(1/Binv, Bmag)
assert str(W) == '(X.Y)*C - (X.e)*(Y.e)*C + (X.e)*(Y.e) + S*sqrt((X.Y)**2 - 2*(X.Y)*(X.e)*(Y.e))'
Wd = collect(W, [BigC, BigS], exact=True, evaluate=False)
Wd_1 = Wd[S.One]
Wd_C = Wd[BigC]
Wd_S = Wd[BigS]
assert str(Wd_1) == '(X.e)*(Y.e)'
assert str(Wd_C) == '(X.Y) - (X.e)*(Y.e)'
assert str(Wd_S) == 'sqrt((X.Y)**2 - 2*(X.Y)*(X.e)*(Y.e))'
assert str(Bmag) == 'sqrt((X.Y)**2 - 2*(X.Y)*(X.e)*(Y.e))'
Wd_1 = Wd_1.subs(Bmag, 1/Binv)
Wd_C = Wd_C.subs(Bmag, 1/Binv)
Wd_S = Wd_S.subs(Bmag, 1/Binv)
lhs = Wd_1 + Wd_C*BigC
rhs = -Wd_S*BigS
lhs = lhs**2
rhs = rhs**2
W = expand(lhs - rhs)
W = expand(W.subs(1/Binv**2, Bmag**2))
W = expand(W.subs(BigS**2, BigC**2 - 1))
W = W.collect([BigC, BigC**2], evaluate=False)
a = simplify(W[BigC**2])
b = simplify(W[BigC])
c = simplify(W[S.One])
assert str(a) == '(X.e)**2*(Y.e)**2'
assert str(b) == '2*(X.e)*(Y.e)*((X.Y) - (X.e)*(Y.e))'
assert str(c) == '(X.Y)**2 - 2*(X.Y)*(X.e)*(Y.e) + (X.e)**2*(Y.e)**2'
x = Symbol('x')
C = solve(a*x**2 + b*x + c, x)[0]
assert str(expand(simplify(expand(C)))) == '-(X.Y)/((X.e)*(Y.e)) + 1'
return
def test_conformal_representations_of_circles_lines_spheres_and_planes():
global n, nbar
with GA_Printer():
metric = '1 0 0 0 0,0 1 0 0 0,0 0 1 0 0,0 0 0 0 2,0 0 0 2 0'
(e1, e2, e3, n, nbar) = MV.setup('e_1 e_2 e_3 n nbar', metric)
e = n + nbar
#conformal representation of points
A = make_vector(e1)
B = make_vector(e2)
C = make_vector(-e1)
D = make_vector(e3)
X = make_vector('x', 3)
assert str(A) == 'e_1 + 1/2*n - 1/2*nbar'
assert str(B) == 'e_2 + 1/2*n - 1/2*nbar'
assert str(C) == '-e_1 + 1/2*n - 1/2*nbar'
assert str(D) == 'e_3 + 1/2*n - 1/2*nbar'
assert str(X) == 'x1*e_1 + x2*e_2 + x3*e_3 + ((x1**2 + x2**2 + x3**2)/2)*n - 1/2*nbar'
assert str((A ^ B ^ C ^ X)) == '-x3*e_1^e_2^e_3^n + x3*e_1^e_2^e_3^nbar + ((x1**2 + x2**2 + x3**2 - 1)/2)*e_1^e_2^n^nbar'
assert str((A ^ B ^ n ^ X)) == '-x3*e_1^e_2^e_3^n + ((x1 + x2 - 1)/2)*e_1^e_2^n^nbar + x3/2*e_1^e_3^n^nbar - x3/2*e_2^e_3^n^nbar'
assert str((((A ^ B) ^ C) ^ D) ^ X) == '((-x1**2 - x2**2 - x3**2 + 1)/2)*e_1^e_2^e_3^n^nbar'
assert str((A ^ B ^ n ^ D ^ X)) == '((-x1 - x2 - x3 + 1)/2)*e_1^e_2^e_3^n^nbar'
L = (A ^ B ^ e) ^ X
assert str(L) == '-x3*e_1^e_2^e_3^n - x3*e_1^e_2^e_3^nbar + (-x1**2/2 + x1 - x2**2/2 + x2 - x3**2/2 - 1/2)*e_1^e_2^n^nbar + x3*e_1^e_3^n^nbar - x3*e_2^e_3^n^nbar'
return
@slow
def test_properties_of_geometric_objects():
with GA_Printer():
metric = '# # # 0 0,' + \
'# # # 0 0,' + \
'# # # 0 0,' + \
'0 0 0 0 2,' + \
'0 0 0 2 0'
(p1, p2, p3, n, nbar) = MV.setup('p1 p2 p3 n nbar', metric)
P1 = F(p1, n, nbar)
P2 = F(p2, n, nbar)
P3 = F(p3, n, nbar)
L = P1 ^ P2 ^ n
delta = (L | n) | nbar
assert str(delta) == '2*p1 - 2*p2'
C = P1 ^ P2 ^ P3
delta = ((C ^ n) | n) | nbar
assert str(delta) == '2*p1^p2 - 2*p1^p3 + 2*p2^p3'
assert str((p2 - p1) ^ (p3 - p1)) == 'p1^p2 - p1^p3 + p2^p3'
return
def test_extracting_vectors_from_conformal_2_blade():
with GA_Printer():
metric = ' 0 -1 #,' + \
'-1 0 #,' + \
' # # #,'
(P1, P2, a) = MV.setup('P1 P2 a', metric)
B = P1 ^ P2
Bsq = B*B
assert str(Bsq) == '1'
ap = a - (a ^ B)*B
assert str(ap) == '-(P2.a)*P1 - (P1.a)*P2'
Ap = ap + ap*B
Am = ap - ap*B
assert str(Ap) == '-2*(P2.a)*P1'
assert str(Am) == '-2*(P1.a)*P2'
assert str(Ap*Ap) == '0'
assert str(Am*Am) == '0'
aB = a | B
assert str(aB) == '-(P2.a)*P1 + (P1.a)*P2'
return
def test_reciprocal_frame_test():
with GA_Printer():
metric = '1 # #,' + \
'# 1 #,' + \
'# # 1,'
(e1, e2, e3) = MV.setup('e1 e2 e3', metric)
E = e1 ^ e2 ^ e3
Esq = (E*E).scalar()
assert str(E) == 'e1^e2^e3'
assert str(Esq) == '(e1.e2)**2 - 2*(e1.e2)*(e1.e3)*(e2.e3) + (e1.e3)**2 + (e2.e3)**2 - 1'
Esq_inv = 1/Esq
E1 = (e2 ^ e3)*E
E2 = (-1)*(e1 ^ e3)*E
E3 = (e1 ^ e2)*E
assert str(E1) == '((e2.e3)**2 - 1)*e1 + ((e1.e2) - (e1.e3)*(e2.e3))*e2 + (-(e1.e2)*(e2.e3) + (e1.e3))*e3'
assert str(E2) == '((e1.e2) - (e1.e3)*(e2.e3))*e1 + ((e1.e3)**2 - 1)*e2 + (-(e1.e2)*(e1.e3) + (e2.e3))*e3'
assert str(E3) == '(-(e1.e2)*(e2.e3) + (e1.e3))*e1 + (-(e1.e2)*(e1.e3) + (e2.e3))*e2 + ((e1.e2)**2 - 1)*e3'
w = (E1 | e2)
w = w.expand()
assert str(w) == '0'
w = (E1 | e3)
w = w.expand()
assert str(w) == '0'
w = (E2 | e1)
w = w.expand()
assert str(w) == '0'
w = (E2 | e3)
w = w.expand()
assert str(w) == '0'
w = (E3 | e1)
w = w.expand()
assert str(w) == '0'
w = (E3 | e2)
w = w.expand()
assert str(w) == '0'
w = (E1 | e1)
w = (w.expand()).scalar()
Esq = expand(Esq)
assert str(simplify(w/Esq)) == '1'
w = (E2 | e2)
w = (w.expand()).scalar()
assert str(simplify(w/Esq)) == '1'
w = (E3 | e3)
w = (w.expand()).scalar()
assert str(simplify(w/Esq)) == '1'
return
| bsd-3-clause |
handroissuazo/tensorflow | tensorflow/contrib/learn/python/learn/estimators/__init__.py | 12 | 11510 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An estimator is a rule for calculating an estimate of a given quantity.
# Estimators
* **Estimators** are used to train and evaluate TensorFlow models.
They support regression and classification problems.
* **Classifiers** are functions that have discrete outcomes.
* **Regressors** are functions that predict continuous values.
## Choosing the correct estimator
* For **Regression** problems use one of the following:
* `LinearRegressor`: Uses linear model.
* `DNNRegressor`: Uses DNN.
* `DNNLinearCombinedRegressor`: Uses Wide & Deep.
* `TensorForestEstimator`: Uses RandomForest. Use `.predict()` for
regression problems.
* `Estimator`: Use when you need a custom model.
* For **Classification** problems use one of the following:
* `LinearClassifier`: Multiclass classifier using Linear model.
* `DNNClassifier`: Multiclass classifier using DNN.
* `DNNLinearCombinedClassifier`: Multiclass classifier using Wide & Deep.
* `TensorForestEstimator`: Uses RandomForest. Use `.predict_proba()` when
using for binary classification problems.
* `SVM`: Binary classifier using linear SVMs.
* `LogisticRegressor`: Use when you need custom model for binary
classification.
* `Estimator`: Use when you need custom model for N class classification.
## Pre-canned Estimators
Pre-canned estimators are machine learning estimators premade for general
purpose problems. If you need more customization, you can always write your
own custom estimator as described in the section below.
Pre-canned estimators are tested and optimized for speed and quality.
### Define the feature columns
Here are some possible types of feature columns used as inputs to a pre-canned
estimator.
Feature columns may vary based on the estimator used. So you can see which
feature columns are fed to each estimator in the below section.
```python
sparse_feature_a = sparse_column_with_keys(
column_name="sparse_feature_a", keys=["AB", "CD", ...])
embedding_feature_a = embedding_column(
sparse_id_column=sparse_feature_a, dimension=3, combiner="sum")
sparse_feature_b = sparse_column_with_hash_bucket(
column_name="sparse_feature_b", hash_bucket_size=1000)
embedding_feature_b = embedding_column(
sparse_id_column=sparse_feature_b, dimension=16, combiner="sum")
crossed_feature_a_x_b = crossed_column(
columns=[sparse_feature_a, sparse_feature_b], hash_bucket_size=10000)
real_feature = real_valued_column("real_feature")
real_feature_buckets = bucketized_column(
source_column=real_feature,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
```
### Create the pre-canned estimator
DNNClassifier, DNNRegressor, and DNNLinearCombinedClassifier are all pretty
similar to each other in how you use them. You can easily plug in an
optimizer and/or regularization to those estimators.
#### DNNClassifier
A classifier for TensorFlow DNN models.
```python
my_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNClassifier(
feature_columns=my_features,
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### DNNRegressor
A regressor for TensorFlow DNN models.
```python
my_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNRegressor(
feature_columns=my_features,
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNRegressor(
feature_columns=my_features,
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### DNNLinearCombinedClassifier
A classifier for TensorFlow Linear and DNN joined training models.
* Wide and deep model
* Multi class (2 by default)
```python
my_linear_features = [crossed_feature_a_x_b]
my_deep_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNLinearCombinedClassifier(
# Common settings
n_classes=n_classes,
weight_column_name=weight_column_name,
# Wide settings
linear_feature_columns=my_linear_features,
linear_optimizer=tf.train.FtrlOptimizer(...),
# Deep settings
dnn_feature_columns=my_deep_features,
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.AdagradOptimizer(...))
```
#### LinearClassifier
Train a linear model to classify instances into one of multiple possible
classes. When number of possible classes is 2, this is binary classification.
```python
my_features = [sparse_feature_b, crossed_feature_a_x_b]
estimator = LinearClassifier(
feature_columns=my_features,
optimizer=tf.train.FtrlOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### LinearRegressor
Train a linear regression model to predict a label value given observation of
feature values.
```python
my_features = [sparse_feature_b, crossed_feature_a_x_b]
estimator = LinearRegressor(
feature_columns=my_features)
```
### LogisticRegressor
Logistic regression estimator for binary classification.
```python
# See tf.contrib.learn.Estimator(...) for details on model_fn structure
def my_model_fn(...):
pass
estimator = LogisticRegressor(model_fn=my_model_fn)
# Input builders
def input_fn_train:
pass
estimator.fit(input_fn=input_fn_train)
estimator.predict(x=x)
```
#### SVM - Support Vector Machine
Support Vector Machine (SVM) model for binary classification.
Currently only linear SVMs are supported.
```python
my_features = [real_feature, sparse_feature_a]
estimator = SVM(
example_id_column='example_id',
feature_columns=my_features,
l2_regularization=10.0)
```
#### TensorForestEstimator
Supports regression and binary classification.
```python
params = tf.contrib.tensor_forest.python.tensor_forest.ForestHParams(
num_classes=2, num_features=40, num_trees=10, max_nodes=1000)
# Estimator using the default graph builder.
estimator = TensorForestEstimator(params, model_dir=model_dir)
# Or estimator using TrainingLossForest as the graph builder.
estimator = TensorForestEstimator(
params, graph_builder_class=tensor_forest.TrainingLossForest,
model_dir=model_dir)
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
### Use the estimator
There are two main functions for using estimators, one of which is for
training, and one of which is for evaluation.
You can specify different data sources for each one in order to use different
datasets for train and eval.
```python
# Input builders
def input_fn_train: # returns x, Y
...
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, Y
...
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
## Creating Custom Estimator
To create a custom `Estimator`, provide a function to `Estimator`'s
constructor that builds your model (`model_fn`, below):
```python
estimator = tf.contrib.learn.Estimator(
model_fn=model_fn,
model_dir=model_dir) # Where the model's data (e.g., checkpoints)
# are saved.
```
Here is a skeleton of this function, with descriptions of its arguments and
return values in the accompanying tables:
```python
def model_fn(features, targets, mode, params):
# Logic to do the following:
# 1. Configure the model via TensorFlow operations
# 2. Define the loss function for training/evaluation
# 3. Define the training operation/optimizer
# 4. Generate predictions
return predictions, loss, train_op
```
You may use `mode` and check against
`tf.contrib.learn.ModeKeys.{TRAIN, EVAL, INFER}` to parameterize `model_fn`.
In the Further Reading section below, there is an end-to-end TensorFlow
tutorial for building a custom estimator.
## Additional Estimators
There is an additional estimators under
`tensorflow.contrib.factorization.python.ops`:
* Gaussian mixture model (GMM) clustering
## Further reading
For further reading, there are several tutorials with relevant topics,
including:
* [Overview of linear models](../../../tutorials/linear/overview.md)
* [Linear model tutorial](../../../tutorials/wide/index.md)
* [Wide and deep learning tutorial](../../../tutorials/wide_and_deep/index.md)
* [Custom estimator tutorial](../../../tutorials/estimators/index.md)
* [Building input functions](../../../tutorials/input_fn/index.md)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.estimators.constants import ProblemType
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNRegressor
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedRegressor
from tensorflow.contrib.learn.python.learn.estimators.estimator import BaseEstimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import Estimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input_fn
from tensorflow.contrib.learn.python.learn.estimators.estimator import SKCompat
from tensorflow.contrib.learn.python.learn.estimators.kmeans import KMeansClustering
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearClassifier
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearRegressor
from tensorflow.contrib.learn.python.learn.estimators.logistic_regressor import LogisticRegressor
from tensorflow.contrib.learn.python.learn.estimators.metric_key import MetricKey
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModeKeys
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModelFnOps
from tensorflow.contrib.learn.python.learn.estimators.prediction_key import PredictionKey
from tensorflow.contrib.learn.python.learn.estimators.run_config import ClusterConfig
from tensorflow.contrib.learn.python.learn.estimators.run_config import Environment
from tensorflow.contrib.learn.python.learn.estimators.run_config import RunConfig
from tensorflow.contrib.learn.python.learn.estimators.run_config import TaskType
from tensorflow.contrib.learn.python.learn.estimators.svm import SVM
| apache-2.0 |
nno/MOxUnit | tools/matlab_tokenizer.py | 4 | 5432 | #
# simple Tokenizer for Matlab / Octave code
#
import re
class Token(object):
NAME = 1
OP = 2
COMMENT = 3
NL = 4
WHITESPACE = 5
def __init__(self, content, type):
self.content = content
self.type = type
def __str__(self):
return '"%s" [%s]' % (self.content, self.type)
def __repr__(self):
return self.__str__()
@staticmethod
def join(tks, sep=''):
return sep.join(tk.content for tk in tks)
@staticmethod
def get_lines(tks, line_prefix=''):
lines = []
cur_line_parts = []
def insert_prefix(line):
return line_prefix + line
for tk in tks:
if tk.type == Token.NL:
cur_line = insert_prefix(''.join(cur_line_parts))
lines.append(cur_line)
cur_line_parts = []
else:
cur_line_parts.append(tk.content)
if len(cur_line_parts):
cur_line = insert_prefix(''.join(cur_line_parts))
lines.append(cur_line)
return lines
class Tokenizer(object):
'''
Simple tokenizer of Matlab code
Splits a string up in tokens representing a name, operator, comment,
newline or comment.
Current limitations:
- no support for line continuations
- floating point numbers may be represented by two name and one operator
- multi-line comments are not recognized
'''
OP_CHARS = ['(', ')', '{', '}', '@', '^', '&', '*', '-', '+'
, '=', ';', ':', '|', '<', '>', ',', ',/', '[', '\]', '.']
@staticmethod
def tokenize(string):
lines = string.split('\n')
tokens = []
ws_sp = re.compile(r'(\s+)')
op_re = ('([%s])' % ''.join(Tokenizer.OP_CHARS))
op_sp = re.compile(op_re)
for line in lines:
inside_str = False
comment_start = None
for i, c in enumerate(line):
if c == "'":
inside_str = not inside_str
elif not inside_str and c == '%':
comment_start = i
if comment_start is None:
code_line = line
else:
code_line = line[:comment_start]
for i, s in enumerate(ws_sp.split(code_line)):
is_whitespace = i % 2 == 1
if is_whitespace:
tk = Token(s, Token.WHITESPACE)
tokens.append(tk)
else:
for j, t in enumerate(op_sp.split(s)):
is_op = j % 2 == 1
if len(t) == 0:
continue
type = Token.OP if is_op else Token.NAME
tk = Token(t, type)
tokens.append(tk)
if comment_start is not None:
comment = line[comment_start:]
tk = Token(comment, Token.COMMENT)
tokens.append(tk)
tokens.append(Token('\n', Token.NL))
return tokens
@staticmethod
def from_file(fn):
with open(fn) as f:
string = f.read()
return Tokenizer.tokenize(string)
class TokenPattern(object):
def __init__(self, content, type):
self.content = content
self.type = type
def _matches(self, tk):
raise NotImplementedError
def __str__(self):
return '%s(%s,%s)' % (self.__class__.__name__,
self.content,
self.type)
@staticmethod
def find(token_pats, tks, start=0, stop=None):
n_pat = len(token_pats)
n_tk = len(tks)
for i_start in xrange(start, n_tk - n_pat):
if stop is not None and i_start >= stop:
break
matches = True
tk_pos = i_start
for j, token_pat in enumerate(token_pats):
tk_pos = token_pat._matches(tks, tk_pos)
if tk_pos is None:
matches = False
break
if matches:
return (i_start, tk_pos)
return None
class LiteralTokenPattern(TokenPattern):
def _matches(self, tks, pos):
tk = tks[pos]
if (self.content is not None and
self.content != tk.content):
return None
if (self.type is not None and
self.type != tk.type):
return None
return pos + 1
class OptionalTypeTokenPattern(TokenPattern):
def __init__(self, type):
super(OptionalTypeTokenPattern, self).__init__(None, type)
def _matches(self, tks, pos):
for i in xrange(pos, len(tks)):
tk = tks[i]
same_type = self.type == tk.type
if not same_type:
break
return i
class SkipUntilTokenPattern(TokenPattern):
def _matches(self, tks, pos):
n_tks = len(tks)
for i in xrange(pos, n_tks):
tk = tks[i]
matches_content = self.content is None or tk.content == self.content
matches_type = self.type == tk.type
if matches_content and matches_type:
return i
return None
class SkipUntilTypeTokenPattern(SkipUntilTokenPattern):
def __init__(self, type):
super(SkipUntilTypeTokenPattern, self).__init__(None, type)
| mit |
radoondas/elasticbeat | vendor/github.com/elastic/beats/dev-tools/aggregate_coverage.py | 10 | 1735 | #!/usr/bin/env python
"""Simple script to concatenate coverage reports.
"""
import os
import sys
import argparse
import fnmatch
def main(arguments):
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('dir', help="Input dir to search recursively for .cov files")
parser.add_argument('-o', '--outfile', help="Output file",
default=sys.stdout, type=argparse.FileType('w'))
args = parser.parse_args(arguments)
# Recursively find all matching .cov files.
matches = []
for root, dirnames, filenames in os.walk(args.dir):
for filename in fnmatch.filter(filenames, '*.cov'):
matches.append(os.path.join(root, filename))
# Write to output.
lines = {}
args.outfile.write('mode: atomic\n')
for m in matches:
if os.path.abspath(args.outfile.name) != os.path.abspath(m):
with open(m) as f:
for line in f:
if not line.startswith('mode:') and "vendor" not in line:
(position, stmt, count) = line.split(" ")
stmt = int(stmt)
count = int (count)
prev_count = 0
if lines.has_key(position):
(_, prev_stmt, prev_count) = lines[position]
assert prev_stmt == stmt
lines[position] = (position, stmt, prev_count + count)
for line in sorted(["%s %d %d\n" % lines[key] for key in lines.keys()]):
args.outfile.write(line)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| apache-2.0 |
jeromeetienne/neoip | libxml2-2.6.27/python/tests/validate.py | 87 | 1710 | #!/usr/bin/python -u
import sys
import libxml2
# Memory debug specific
libxml2.debugMemory(1)
ctxt = libxml2.createFileParserCtxt("valid.xml")
ctxt.validate(1)
ctxt.parseDocument()
doc = ctxt.doc()
valid = ctxt.isValid()
if doc.name != "valid.xml":
print "doc.name failed"
sys.exit(1)
root = doc.children
if root.name != "doc":
print "root.name failed"
sys.exit(1)
if valid != 1:
print "validity chec failed"
sys.exit(1)
doc.freeDoc()
i = 1000
while i > 0:
ctxt = libxml2.createFileParserCtxt("valid.xml")
ctxt.validate(1)
ctxt.parseDocument()
doc = ctxt.doc()
valid = ctxt.isValid()
doc.freeDoc()
if valid != 1:
print "validity check failed"
sys.exit(1)
i = i - 1
#desactivate error messages from the validation
def noerr(ctx, str):
pass
libxml2.registerErrorHandler(noerr, None)
ctxt = libxml2.createFileParserCtxt("invalid.xml")
ctxt.validate(1)
ctxt.parseDocument()
doc = ctxt.doc()
valid = ctxt.isValid()
if doc.name != "invalid.xml":
print "doc.name failed"
sys.exit(1)
root = doc.children
if root.name != "doc":
print "root.name failed"
sys.exit(1)
if valid != 0:
print "validity chec failed"
sys.exit(1)
doc.freeDoc()
i = 1000
while i > 0:
ctxt = libxml2.createFileParserCtxt("invalid.xml")
ctxt.validate(1)
ctxt.parseDocument()
doc = ctxt.doc()
valid = ctxt.isValid()
doc.freeDoc()
if valid != 0:
print "validity check failed"
sys.exit(1)
i = i - 1
del ctxt
# Memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print "OK"
else:
print "Memory leak %d bytes" % (libxml2.debugMemory(1))
libxml2.dumpMemory()
| gpl-3.0 |
pietern/caffe2 | caffe2/python/control_test.py | 4 | 13092 | # Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import control, core, test_util, workspace
import logging
logger = logging.getLogger(__name__)
class TestControl(test_util.TestCase):
def setUp(self):
super(TestControl, self).setUp()
self.N_ = 10
self.init_net_ = core.Net("init-net")
cnt = self.init_net_.CreateCounter([], init_count=0)
const_n = self.init_net_.ConstantFill(
[], shape=[], value=self.N_, dtype=core.DataType.INT64)
const_0 = self.init_net_.ConstantFill(
[], shape=[], value=0, dtype=core.DataType.INT64)
self.cnt_net_ = core.Net("cnt-net")
self.cnt_net_.CountUp([cnt])
curr_cnt = self.cnt_net_.RetrieveCount([cnt])
self.init_net_.ConstantFill(
[], [curr_cnt], shape=[], value=0, dtype=core.DataType.INT64)
self.cnt_net_.AddExternalOutput(curr_cnt)
self.cnt_2_net_ = core.Net("cnt-2-net")
self.cnt_2_net_.CountUp([cnt])
self.cnt_2_net_.CountUp([cnt])
curr_cnt_2 = self.cnt_2_net_.RetrieveCount([cnt])
self.init_net_.ConstantFill(
[], [curr_cnt_2], shape=[], value=0, dtype=core.DataType.INT64)
self.cnt_2_net_.AddExternalOutput(curr_cnt_2)
self.cond_net_ = core.Net("cond-net")
cond_blob = self.cond_net_.LT([curr_cnt, const_n])
self.cond_net_.AddExternalOutput(cond_blob)
self.not_cond_net_ = core.Net("not-cond-net")
cond_blob = self.not_cond_net_.GE([curr_cnt, const_n])
self.not_cond_net_.AddExternalOutput(cond_blob)
self.true_cond_net_ = core.Net("true-cond-net")
true_blob = self.true_cond_net_.LT([const_0, const_n])
self.true_cond_net_.AddExternalOutput(true_blob)
self.false_cond_net_ = core.Net("false-cond-net")
false_blob = self.false_cond_net_.GT([const_0, const_n])
self.false_cond_net_.AddExternalOutput(false_blob)
self.idle_net_ = core.Net("idle-net")
self.idle_net_.ConstantFill(
[], shape=[], value=0, dtype=core.DataType.INT64)
def CheckNetOutput(self, nets_and_expects):
"""
Check the net output is expected
nets_and_expects is a list of tuples (net, expect)
"""
for net, expect in nets_and_expects:
output = workspace.FetchBlob(
net.Proto().external_output[-1])
self.assertEqual(output, expect)
def CheckNetAllOutput(self, net, expects):
"""
Check the net output is expected
expects is a list of bools.
"""
self.assertEqual(len(net.Proto().external_output), len(expects))
for i in range(len(expects)):
output = workspace.FetchBlob(
net.Proto().external_output[i])
self.assertEqual(output, expects[i])
def BuildAndRunPlan(self, step):
plan = core.Plan("test")
plan.AddStep(control.Do('init', self.init_net_))
plan.AddStep(step)
self.assertEqual(workspace.RunPlan(plan), True)
def ForLoopTest(self, nets_or_steps):
step = control.For('myFor', nets_or_steps, self.N_)
self.BuildAndRunPlan(step)
self.CheckNetOutput([(self.cnt_net_, self.N_)])
def testForLoopWithNets(self):
self.ForLoopTest(self.cnt_net_)
self.ForLoopTest([self.cnt_net_, self.idle_net_])
def testForLoopWithStep(self):
step = control.Do('count', self.cnt_net_)
self.ForLoopTest(step)
self.ForLoopTest([step, self.idle_net_])
def WhileLoopTest(self, nets_or_steps):
step = control.While('myWhile', self.cond_net_, nets_or_steps)
self.BuildAndRunPlan(step)
self.CheckNetOutput([(self.cnt_net_, self.N_)])
def testWhileLoopWithNet(self):
self.WhileLoopTest(self.cnt_net_)
self.WhileLoopTest([self.cnt_net_, self.idle_net_])
def testWhileLoopWithStep(self):
step = control.Do('count', self.cnt_net_)
self.WhileLoopTest(step)
self.WhileLoopTest([step, self.idle_net_])
def UntilLoopTest(self, nets_or_steps):
step = control.Until('myUntil', self.not_cond_net_, nets_or_steps)
self.BuildAndRunPlan(step)
self.CheckNetOutput([(self.cnt_net_, self.N_)])
def testUntilLoopWithNet(self):
self.UntilLoopTest(self.cnt_net_)
self.UntilLoopTest([self.cnt_net_, self.idle_net_])
def testUntilLoopWithStep(self):
step = control.Do('count', self.cnt_net_)
self.UntilLoopTest(step)
self.UntilLoopTest([step, self.idle_net_])
def DoWhileLoopTest(self, nets_or_steps):
step = control.DoWhile('myDoWhile', self.cond_net_, nets_or_steps)
self.BuildAndRunPlan(step)
self.CheckNetOutput([(self.cnt_net_, self.N_)])
def testDoWhileLoopWithNet(self):
self.DoWhileLoopTest(self.cnt_net_)
self.DoWhileLoopTest([self.idle_net_, self.cnt_net_])
def testDoWhileLoopWithStep(self):
step = control.Do('count', self.cnt_net_)
self.DoWhileLoopTest(step)
self.DoWhileLoopTest([self.idle_net_, step])
def DoUntilLoopTest(self, nets_or_steps):
step = control.DoUntil('myDoUntil', self.not_cond_net_, nets_or_steps)
self.BuildAndRunPlan(step)
self.CheckNetOutput([(self.cnt_net_, self.N_)])
def testDoUntilLoopWithNet(self):
self.DoUntilLoopTest(self.cnt_net_)
self.DoUntilLoopTest([self.cnt_net_, self.idle_net_])
def testDoUntilLoopWithStep(self):
step = control.Do('count', self.cnt_net_)
self.DoUntilLoopTest(step)
self.DoUntilLoopTest([self.idle_net_, step])
def IfCondTest(self, cond_net, expect, cond_on_blob):
if cond_on_blob:
step = control.Do(
'if-all',
control.Do('count', cond_net),
control.If('myIf', cond_net.Proto().external_output[-1],
self.cnt_net_))
else:
step = control.If('myIf', cond_net, self.cnt_net_)
self.BuildAndRunPlan(step)
self.CheckNetOutput([(self.cnt_net_, expect)])
def testIfCondTrueOnNet(self):
self.IfCondTest(self.true_cond_net_, 1, False)
def testIfCondTrueOnBlob(self):
self.IfCondTest(self.true_cond_net_, 1, True)
def testIfCondFalseOnNet(self):
self.IfCondTest(self.false_cond_net_, 0, False)
def testIfCondFalseOnBlob(self):
self.IfCondTest(self.false_cond_net_, 0, True)
def IfElseCondTest(self, cond_net, cond_value, expect, cond_on_blob):
if cond_value:
run_net = self.cnt_net_
else:
run_net = self.cnt_2_net_
if cond_on_blob:
step = control.Do(
'if-else-all',
control.Do('count', cond_net),
control.If('myIfElse', cond_net.Proto().external_output[-1],
self.cnt_net_, self.cnt_2_net_))
else:
step = control.If('myIfElse', cond_net,
self.cnt_net_, self.cnt_2_net_)
self.BuildAndRunPlan(step)
self.CheckNetOutput([(run_net, expect)])
def testIfElseCondTrueOnNet(self):
self.IfElseCondTest(self.true_cond_net_, True, 1, False)
def testIfElseCondTrueOnBlob(self):
self.IfElseCondTest(self.true_cond_net_, True, 1, True)
def testIfElseCondFalseOnNet(self):
self.IfElseCondTest(self.false_cond_net_, False, 2, False)
def testIfElseCondFalseOnBlob(self):
self.IfElseCondTest(self.false_cond_net_, False, 2, True)
def IfNotCondTest(self, cond_net, expect, cond_on_blob):
if cond_on_blob:
step = control.Do(
'if-not',
control.Do('count', cond_net),
control.IfNot('myIfNot', cond_net.Proto().external_output[-1],
self.cnt_net_))
else:
step = control.IfNot('myIfNot', cond_net, self.cnt_net_)
self.BuildAndRunPlan(step)
self.CheckNetOutput([(self.cnt_net_, expect)])
def testIfNotCondTrueOnNet(self):
self.IfNotCondTest(self.true_cond_net_, 0, False)
def testIfNotCondTrueOnBlob(self):
self.IfNotCondTest(self.true_cond_net_, 0, True)
def testIfNotCondFalseOnNet(self):
self.IfNotCondTest(self.false_cond_net_, 1, False)
def testIfNotCondFalseOnBlob(self):
self.IfNotCondTest(self.false_cond_net_, 1, True)
def IfNotElseCondTest(self, cond_net, cond_value, expect, cond_on_blob):
if cond_value:
run_net = self.cnt_2_net_
else:
run_net = self.cnt_net_
if cond_on_blob:
step = control.Do(
'if-not-else',
control.Do('count', cond_net),
control.IfNot('myIfNotElse',
cond_net.Proto().external_output[-1],
self.cnt_net_, self.cnt_2_net_))
else:
step = control.IfNot('myIfNotElse', cond_net,
self.cnt_net_, self.cnt_2_net_)
self.BuildAndRunPlan(step)
self.CheckNetOutput([(run_net, expect)])
def testIfNotElseCondTrueOnNet(self):
self.IfNotElseCondTest(self.true_cond_net_, True, 2, False)
def testIfNotElseCondTrueOnBlob(self):
self.IfNotElseCondTest(self.true_cond_net_, True, 2, True)
def testIfNotElseCondFalseOnNet(self):
self.IfNotElseCondTest(self.false_cond_net_, False, 1, False)
def testIfNotElseCondFalseOnBlob(self):
self.IfNotElseCondTest(self.false_cond_net_, False, 1, True)
def testSwitch(self):
step = control.Switch(
'mySwitch',
(self.false_cond_net_, self.cnt_net_),
(self.true_cond_net_, self.cnt_2_net_)
)
self.BuildAndRunPlan(step)
self.CheckNetOutput([(self.cnt_net_, 0), (self.cnt_2_net_, 2)])
def testSwitchNot(self):
step = control.SwitchNot(
'mySwitchNot',
(self.false_cond_net_, self.cnt_net_),
(self.true_cond_net_, self.cnt_2_net_)
)
self.BuildAndRunPlan(step)
self.CheckNetOutput([(self.cnt_net_, 1), (self.cnt_2_net_, 0)])
def testBoolNet(self):
bool_net = control.BoolNet(('a', True))
step = control.Do('bool', bool_net)
self.BuildAndRunPlan(step)
self.CheckNetAllOutput(bool_net, [True])
bool_net = control.BoolNet(('a', True), ('b', False))
step = control.Do('bool', bool_net)
self.BuildAndRunPlan(step)
self.CheckNetAllOutput(bool_net, [True, False])
bool_net = control.BoolNet([('a', True), ('b', False)])
step = control.Do('bool', bool_net)
self.BuildAndRunPlan(step)
self.CheckNetAllOutput(bool_net, [True, False])
def testCombineConditions(self):
# combined by 'Or'
combine_net = control.CombineConditions(
'test', [self.true_cond_net_, self.false_cond_net_], 'Or')
step = control.Do('combine',
self.true_cond_net_,
self.false_cond_net_,
combine_net)
self.BuildAndRunPlan(step)
self.CheckNetOutput([(combine_net, True)])
# combined by 'And'
combine_net = control.CombineConditions(
'test', [self.true_cond_net_, self.false_cond_net_], 'And')
step = control.Do('combine',
self.true_cond_net_,
self.false_cond_net_,
combine_net)
self.BuildAndRunPlan(step)
self.CheckNetOutput([(combine_net, False)])
def testMergeConditionNets(self):
# merged by 'Or'
merge_net = control.MergeConditionNets(
'test', [self.true_cond_net_, self.false_cond_net_], 'Or')
step = control.Do('merge', merge_net)
self.BuildAndRunPlan(step)
self.CheckNetOutput([(merge_net, True)])
# merged by 'And'
merge_net = control.MergeConditionNets(
'test', [self.true_cond_net_, self.false_cond_net_], 'And')
step = control.Do('merge', merge_net)
self.BuildAndRunPlan(step)
self.CheckNetOutput([(merge_net, False)])
| apache-2.0 |
GoogleCloudPlatform/Data-Pipeline | app/src/csvmatchreplace/timestamp_test.py | 1 | 2460 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the timestamp processing functions."""
import logging
from src import basetest
from src.csvmatchreplace import timestamp
class TestTimestamp(basetest.TestCase):
def testConvertFmtToRe(self):
lookup = {'a': 'apple', 'b': 'bat', 'c': 'cat'}
tests = (
('', ''),
('%a', 'apple'),
('%a ', r'apple\ '),
(' %a', r'\ apple'),
(' %a ', r'\ apple\ '),
('%a-%b-%c', r'apple\-bat\-cat'),
('%b%b%b', r'batbatbat'),
# check that optional things are optional
('%b%b%b%c', r'batbatbat(cat)?'),
('%b%b%b%c%c', r'batbatbat(cat(cat)?)?')
)
for fmt, expected in tests:
self.assertEquals(expected, timestamp.ConvertFmtToRe(fmt,
lookup=lookup))
def testLooksLikeTimestamp(self):
ts = ('1989-10-02 05:23:48',
'1958-06-24T12:18:35.5803',
'1988-08-15T19:06:56.235',
'2012-12-12',
'2012-12-12 19:45')
for t in ts:
self.assertTrue(timestamp.LooksLikeTimestamp(t),
'%s should look like a timestamp' % t)
def testNormalizeTimeStamp(self):
ts = (
('1989-10-02 05:23:48', '1989-10-02 05:23:48.000000 '),
('2006-06-05', '2006-06-05 00:00:00.000000 '),
('10OCT2012:05:20:00.000000', '2012-10-10 05:20:00.000000 '),
('1983-01-28 15:12:31.488416', '1983-01-28 15:12:31.488416 '),
(u'1971-09-01 04:00:30.942295 ', '1971-09-01 04:00:30.942295 ')
)
for t, expected in ts:
self.assertEquals(expected, timestamp.NormalizeTimeStamp(t))
def testOneOff(self):
"""Useful for --test_arg=TestTimestamp.testOneOff to test one thing."""
s = u'1971-09-01 04:00:30.942295 '
self.assertEquals(s, timestamp.NormalizeTimeStamp(s))
if __name__ == '__main__':
basetest.main()
| apache-2.0 |
mdanielwork/intellij-community | python/lib/Lib/site-packages/django/contrib/formtools/preview.py | 229 | 6829 | """
Formtools Preview application.
"""
import cPickle as pickle
from django.conf import settings
from django.http import Http404
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.utils.hashcompat import md5_constructor
from django.utils.crypto import constant_time_compare
from django.contrib.formtools.utils import security_hash
AUTO_ID = 'formtools_%s' # Each form here uses this as its auto_id parameter.
class FormPreview(object):
preview_template = 'formtools/preview.html'
form_template = 'formtools/form.html'
# METHODS SUBCLASSES SHOULDN'T OVERRIDE ###################################
def __init__(self, form):
# form should be a Form class, not an instance.
self.form, self.state = form, {}
def __call__(self, request, *args, **kwargs):
stage = {'1': 'preview', '2': 'post'}.get(request.POST.get(self.unused_name('stage')), 'preview')
self.parse_params(*args, **kwargs)
try:
method = getattr(self, stage + '_' + request.method.lower())
except AttributeError:
raise Http404
return method(request)
def unused_name(self, name):
"""
Given a first-choice name, adds an underscore to the name until it
reaches a name that isn't claimed by any field in the form.
This is calculated rather than being hard-coded so that no field names
are off-limits for use in the form.
"""
while 1:
try:
f = self.form.base_fields[name]
except KeyError:
break # This field name isn't being used by the form.
name += '_'
return name
def preview_get(self, request):
"Displays the form"
f = self.form(auto_id=self.get_auto_id(), initial=self.get_initial(request))
return render_to_response(self.form_template,
self.get_context(request, f),
context_instance=RequestContext(request))
def preview_post(self, request):
"Validates the POST data. If valid, displays the preview page. Else, redisplays form."
f = self.form(request.POST, auto_id=self.get_auto_id())
context = self.get_context(request, f)
if f.is_valid():
self.process_preview(request, f, context)
context['hash_field'] = self.unused_name('hash')
context['hash_value'] = self.security_hash(request, f)
return render_to_response(self.preview_template, context, context_instance=RequestContext(request))
else:
return render_to_response(self.form_template, context, context_instance=RequestContext(request))
def _check_security_hash(self, token, request, form):
expected = self.security_hash(request, form)
if constant_time_compare(token, expected):
return True
else:
# Fall back to Django 1.2 method, for compatibility with forms that
# are in the middle of being used when the upgrade occurs. However,
# we don't want to do this fallback if a subclass has provided their
# own security_hash method - because they might have implemented a
# more secure method, and this would punch a hole in that.
# PendingDeprecationWarning <- left here to remind us that this
# compatibility fallback should be removed in Django 1.5
FormPreview_expected = FormPreview.security_hash(self, request, form)
if expected == FormPreview_expected:
# They didn't override security_hash, do the fallback:
old_expected = security_hash(request, form)
return constant_time_compare(token, old_expected)
else:
return False
def post_post(self, request):
"Validates the POST data. If valid, calls done(). Else, redisplays form."
f = self.form(request.POST, auto_id=self.get_auto_id())
if f.is_valid():
if not self._check_security_hash(request.POST.get(self.unused_name('hash'), ''),
request, f):
return self.failed_hash(request) # Security hash failed.
return self.done(request, f.cleaned_data)
else:
return render_to_response(self.form_template,
self.get_context(request, f),
context_instance=RequestContext(request))
# METHODS SUBCLASSES MIGHT OVERRIDE IF APPROPRIATE ########################
def get_auto_id(self):
"""
Hook to override the ``auto_id`` kwarg for the form. Needed when
rendering two form previews in the same template.
"""
return AUTO_ID
def get_initial(self, request):
"""
Takes a request argument and returns a dictionary to pass to the form's
``initial`` kwarg when the form is being created from an HTTP get.
"""
return {}
def get_context(self, request, form):
"Context for template rendering."
return {'form': form, 'stage_field': self.unused_name('stage'), 'state': self.state}
def parse_params(self, *args, **kwargs):
"""
Given captured args and kwargs from the URLconf, saves something in
self.state and/or raises Http404 if necessary.
For example, this URLconf captures a user_id variable:
(r'^contact/(?P<user_id>\d{1,6})/$', MyFormPreview(MyForm)),
In this case, the kwargs variable in parse_params would be
{'user_id': 32} for a request to '/contact/32/'. You can use that
user_id to make sure it's a valid user and/or save it for later, for
use in done().
"""
pass
def process_preview(self, request, form, context):
"""
Given a validated form, performs any extra processing before displaying
the preview page, and saves any extra data in context.
"""
pass
def security_hash(self, request, form):
"""
Calculates the security hash for the given HttpRequest and Form instances.
Subclasses may want to take into account request-specific information,
such as the IP address.
"""
return security_hash(request, form)
def failed_hash(self, request):
"Returns an HttpResponse in the case of an invalid security hash."
return self.preview_post(request)
# METHODS SUBCLASSES MUST OVERRIDE ########################################
def done(self, request, cleaned_data):
"""
Does something with the cleaned_data and returns an
HttpResponseRedirect.
"""
raise NotImplementedError('You must define a done() method on your %s subclass.' % self.__class__.__name__)
| apache-2.0 |
willprice/weboob | modules/nova/module.py | 7 | 3545 | # -*- coding: utf-8 -*-
# Copyright(C) 2011 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from cStringIO import StringIO
from weboob.capabilities.radio import CapRadio, Radio
from weboob.capabilities.audiostream import BaseAudioStream
from weboob.tools.capabilities.streaminfo import StreamInfo
from weboob.capabilities.collection import CapCollection
from weboob.tools.backend import Module
from weboob.deprecated.browser import StandardBrowser
from weboob.deprecated.browser.parsers import get_parser
__all__ = ['NovaModule']
class NovaModule(Module, CapRadio, CapCollection):
NAME = 'nova'
MAINTAINER = u'Romain Bignon'
EMAIL = 'romain@weboob.org'
VERSION = '1.1'
DESCRIPTION = u'Nova French radio'
LICENSE = 'AGPLv3+'
BROWSER = StandardBrowser
_RADIOS = {'nova': (u'Radio Nova', u'Radio nova', u'http://broadcast.infomaniak.net:80/radionova-high.mp3'),
}
def create_default_browser(self):
return self.create_browser(parser='json')
def iter_resources(self, objs, split_path):
if Radio in objs:
self._restrict_level(split_path)
for id in self._RADIOS.iterkeys():
yield self.get_radio(id)
def iter_radios_search(self, pattern):
for radio in self.iter_resources((Radio, ), []):
if pattern.lower() in radio.title.lower() or pattern.lower() in radio.description.lower():
yield radio
def get_radio(self, radio):
if not isinstance(radio, Radio):
radio = Radio(radio)
if radio.id not in self._RADIOS:
return None
title, description, url = self._RADIOS[radio.id]
radio.title = title
radio.description = description
artist, title = self.get_current()
current = StreamInfo(0)
current.who = artist
current.what = title
radio.current = current
stream = BaseAudioStream(0)
stream.bitrate=128
stream.format=u'mp3'
stream.title = u'128kbits/s'
stream.url = url
radio.streams = [stream]
return radio
def get_current(self):
doc = self.browser.location('http://www.novaplanet.com/radionova/ontheair?origin=/')
html = doc['track']['markup']
parser = get_parser()()
doc = parser.parse(StringIO(html))
artist = u' '.join([txt.strip() for txt in doc.xpath('//div[@class="artist"]')[0].itertext()])
title = u' '.join([txt.strip() for txt in doc.xpath('//div[@class="title"]')[0].itertext()])
return unicode(artist).strip(), unicode(title).strip()
def fill_radio(self, radio, fields):
if 'current' in fields:
if not radio.current:
radio.current = StreamInfo(0)
radio.current.who, radio.current.what = self.get_current()
return radio
OBJECTS = {Radio: fill_radio}
| agpl-3.0 |
semonte/intellij-community | python/lib/Lib/site-packages/django/contrib/messages/storage/fallback.py | 627 | 2171 | from django.contrib.messages.storage.base import BaseStorage
from django.contrib.messages.storage.cookie import CookieStorage
from django.contrib.messages.storage.session import SessionStorage
class FallbackStorage(BaseStorage):
"""
Tries to store all messages in the first backend, storing any unstored
messages in each subsequent backend backend.
"""
storage_classes = (CookieStorage, SessionStorage)
def __init__(self, *args, **kwargs):
super(FallbackStorage, self).__init__(*args, **kwargs)
self.storages = [storage_class(*args, **kwargs)
for storage_class in self.storage_classes]
self._used_storages = set()
def _get(self, *args, **kwargs):
"""
Gets a single list of messages from all storage backends.
"""
all_messages = []
for storage in self.storages:
messages, all_retrieved = storage._get()
# If the backend hasn't been used, no more retrieval is necessary.
if messages is None:
break
if messages:
self._used_storages.add(storage)
all_messages.extend(messages)
# If this storage class contained all the messages, no further
# retrieval is necessary
if all_retrieved:
break
return all_messages, all_retrieved
def _store(self, messages, response, *args, **kwargs):
"""
Stores the messages, returning any unstored messages after trying all
backends.
For each storage backend, any messages not stored are passed on to the
next backend.
"""
for storage in self.storages:
if messages:
messages = storage._store(messages, response,
remove_oldest=False)
# Even if there are no more messages, continue iterating to ensure
# storages which contained messages are flushed.
elif storage in self._used_storages:
storage._store([], response)
self._used_storages.remove(storage)
return messages
| apache-2.0 |
eusi/MissionPlanerHM | Lib/site-packages/scipy/stats/stats.py | 53 | 122717 | # Copyright (c) Gary Strangman. All rights reserved
#
# Disclaimer
#
# This software is provided "as-is". There are no expressed or implied
# warranties of any kind, including, but not limited to, the warranties
# of merchantability and fitness for a given application. In no event
# shall Gary Strangman be liable for any direct, indirect, incidental,
# special, exemplary or consequential damages (including, but not limited
# to, loss of use, data or profits, or business interruption) however
# caused and on any theory of liability, whether in contract, strict
# liability or tort (including negligence or otherwise) arising in any way
# out of the use of this software, even if advised of the possibility of
# such damage.
#
#
# Heavily adapted for use by SciPy 2002 by Travis Oliphant
"""
stats.py module
#################################################
####### Written by: Gary Strangman ###########
#################################################
A collection of basic statistical functions for python. The function
names appear below.
Some scalar functions defined here are also available in the scipy.special
package where they work on arbitrary sized arrays.
Disclaimers: The function list is obviously incomplete and, worse, the
functions are not optimized. All functions have been tested (some more
so than others), but they are far from bulletproof. Thus, as with any
free software, no warranty or guarantee is expressed or implied. :-) A
few extra functions that don't appear in the list below can be found by
interested treasure-hunters. These functions don't necessarily have
both list and array versions but were deemed useful
CENTRAL TENDENCY: gmean (geometric mean)
hmean (harmonic mean)
medianscore
mode
MOMENTS: moment
variation
skew
kurtosis
normaltest (for arrays only)
MOMENTS HANDLING NAN: nanmean
nanmedian
nanstd
ALTERED VERSIONS: tmean
tvar
tstd
tsem
describe
FREQUENCY STATS: freqtable
itemfreq
scoreatpercentile
percentileofscore
histogram
cumfreq
relfreq
VARIABILITY: obrientransform
signaltonoise (for arrays only)
sem
TRIMMING FCNS: threshold (for arrays only)
trimboth
trim1
around (round all vals to 'n' decimals)
CORRELATION FCNS: paired
pearsonr
fisher_exact
spearmanr
pointbiserialr
kendalltau
linregress
INFERENTIAL STATS: ttest_1samp
ttest_ind
ttest_rel
chisquare
ks_2samp
mannwhitneyu
ranksums
wilcoxon
kruskal
friedmanchisquare
PROBABILITY CALCS: chisqprob
zprob
fprob
betai
## Note that scipy.stats.distributions has many more statistical probability
## functions defined.
ANOVA FUNCTIONS: f_oneway
f_value
SUPPORT FUNCTIONS: ss
square_of_sums
shellsort
rankdata
References
----------
[CRCProbStat2000]_
.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
## CHANGE LOG:
## ===========
## since 2001-06-25 ... see scipy SVN changelog
## 05-11-29 ... fixed default axis to be 0 for consistency with scipy;
## cleanup of redundant imports, dead code, {0,1} -> booleans
## 02-02-10 ... require Numeric, eliminate "list-only" functions
## (only 1 set of functions now and no Dispatch class),
## removed all references to aXXXX functions.
## 00-04-13 ... pulled all "global" statements, except from aanova()
## added/fixed lots of documentation, removed io.py dependency
## changed to version 0.5
## 99-11-13 ... added asign() function
## 99-11-01 ... changed version to 0.4 ... enough incremental changes now
## 99-10-25 ... added acovariance and acorrelation functions
## 99-10-10 ... fixed askew/akurtosis to avoid divide-by-zero errors
## added aglm function (crude, but will be improved)
## 99-10-04 ... upgraded acumsum, ass, asummult, asamplevar, var, etc. to
## all handle lists of 'dimension's and keepdims
## REMOVED ar0, ar2, ar3, ar4 and replaced them with around
## reinserted fixes for abetai to avoid math overflows
## 99-09-05 ... rewrote achisqprob/aerfcc/aksprob/afprob/abetacf/abetai to
## handle multi-dimensional arrays (whew!)
## 99-08-30 ... fixed l/amoment, l/askew, l/akurtosis per D'Agostino (1990)
## added anormaltest per same reference
## re-wrote azprob to calc arrays of probs all at once
## 99-08-22 ... edited attest_ind printing section so arrays could be rounded
## 99-08-19 ... fixed amean and aharmonicmean for non-error(!) overflow on
## short/byte arrays (mean of #s btw 100-300 = -150??)
## 99-08-09 ... fixed asum so that the None case works for Byte arrays
## 99-08-08 ... fixed 7/3 'improvement' to handle t-calcs on N-D arrays
## 99-07-03 ... improved attest_ind, attest_rel (zero-division errortrap)
## 99-06-24 ... fixed bug(?) in attest_ind (n1=a.shape[0])
## 04/11/99 ... added asignaltonoise, athreshold functions, changed all
## max/min in array section to maximum/minimum,
## fixed square_of_sums to prevent integer overflow
## 04/10/99 ... !!! Changed function name ... sumsquared ==> square_of_sums
## 03/18/99 ... Added ar0, ar2, ar3 and ar4 rounding functions
## 02/28/99 ... Fixed aobrientransform to return an array rather than a list
## 01/15/99 ... Essentially ceased updating list-versions of functions (!!!)
## 01/13/99 ... CHANGED TO VERSION 0.3
## fixed bug in a/lmannwhitneyu p-value calculation
## 12/31/98 ... fixed variable-name bug in ldescribe
## 12/19/98 ... fixed bug in findwithin (fcns needed pstat. prefix)
## 12/16/98 ... changed amedianscore to return float (not array) for 1 score
## 12/14/98 ... added atmin and atmax functions
## removed umath from import line (not needed)
## l/ageometricmean modified to reduce chance of overflows (take
## nth root first, then multiply)
## 12/07/98 ... added __version__variable (now 0.2)
## removed all 'stats.' from anova() fcn
## 12/06/98 ... changed those functions (except shellsort) that altered
## arguments in-place ... cumsum, ranksort, ...
## updated (and fixed some) doc-strings
## 12/01/98 ... added anova() function (requires NumPy)
## incorporated Dispatch class
## 11/12/98 ... added functionality to amean, aharmonicmean, ageometricmean
## added 'asum' function (added functionality to add.reduce)
## fixed both moment and amoment (two errors)
## changed name of skewness and askewness to skew and askew
## fixed (a)histogram (which sometimes counted points <lowerlimit)
# Standard library imports.
import warnings
import math
# friedmanchisquare patch uses python sum
pysum = sum # save it before it gets overwritten
# Scipy imports.
from numpy import array, asarray, dot, ma, zeros, sum
import scipy.special as special
import scipy.linalg as linalg
import numpy as np
import futil
import distributions
# Local imports.
import _support
from _support import _chk_asarray, _chk2_asarray
__all__ = ['gmean', 'hmean', 'cmedian', 'mode',
'tmean', 'tvar', 'tmin', 'tmax', 'tstd', 'tsem',
'moment', 'variation', 'skew', 'kurtosis', 'describe',
'skewtest', 'kurtosistest', 'normaltest',
'itemfreq', 'scoreatpercentile', 'percentileofscore',
'histogram', 'histogram2', 'cumfreq', 'relfreq',
'obrientransform', 'signaltonoise', 'sem', 'zmap', 'zscore',
'threshold', 'sigmaclip', 'trimboth', 'trim1', 'trim_mean',
'f_oneway', 'pearsonr', 'fisher_exact',
'spearmanr', 'pointbiserialr', 'kendalltau', 'linregress',
'ttest_1samp', 'ttest_ind', 'ttest_rel',
'kstest', 'chisquare', 'ks_2samp', 'mannwhitneyu',
'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare',
'zprob', 'chisqprob', 'ksprob', 'fprob', 'betai',
'glm', 'f_value_wilks_lambda',
'f_value', 'f_value_multivariate',
'ss', 'square_of_sums',
'fastsort', 'rankdata',
'nanmean', 'nanstd', 'nanmedian',
]
def find_repeats(arr):
"""Find repeats in arr and return (repeats, repeat_count)
"""
v1,v2, n = futil.dfreps(arr)
return v1[:n],v2[:n]
#######
### NAN friendly functions
########
def nanmean(x, axis=0):
"""
Compute the mean over the given axis ignoring nans.
Parameters
----------
x : ndarray
Input array.
axis : int, optional
Axis along which the mean is computed. Default is 0, i.e. the
first axis.
Returns
-------
m : float
The mean of `x`, ignoring nans.
See Also
--------
nanstd, nanmedian
Examples
--------
>>> from scipy import stats
>>> a = np.linspace(0, 4, 3)
>>> a
array([ 0., 2., 4.])
>>> a[-1] = np.nan
>>> stats.nanmean(a)
1.0
"""
x, axis = _chk_asarray(x,axis)
x = x.copy()
Norig = x.shape[axis]
factor = 1.0-np.sum(np.isnan(x),axis)*1.0/Norig
x[np.isnan(x)] = 0
return np.mean(x,axis)/factor
def nanstd(x, axis=0, bias=False):
"""
Compute the standard deviation over the given axis, ignoring nans.
Parameters
----------
x : array_like
Input array.
axis : int or None, optional
Axis along which the standard deviation is computed. Default is 0.
If None, compute over the whole array `x`.
bias : bool, optional
If True, the biased (normalized by N) definition is used. If False
(default), the unbiased definition is used.
Returns
-------
s : float
The standard deviation.
See Also
--------
nanmean, nanmedian
Examples
--------
>>> from scipy import stats
>>> a = np.arange(10, dtype=float)
>>> a[1:3] = np.nan
>>> np.std(a)
nan
>>> stats.nanstd(a)
2.9154759474226504
>>> stats.nanstd(a.reshape(2, 5), axis=1)
array([ 2.0817, 1.5811])
>>> stats.nanstd(a.reshape(2, 5), axis=None)
2.9154759474226504
"""
x, axis = _chk_asarray(x,axis)
x = x.copy()
Norig = x.shape[axis]
Nnan = np.sum(np.isnan(x),axis)*1.0
n = Norig - Nnan
x[np.isnan(x)] = 0.
m1 = np.sum(x,axis)/n
if axis:
d = (x - np.expand_dims(m1, axis))**2.0
else:
d = (x - m1)**2.0
m2 = np.sum(d,axis)-(m1*m1)*Nnan
if bias:
m2c = m2 / n
else:
m2c = m2 / (n - 1.)
return np.sqrt(m2c)
def _nanmedian(arr1d): # This only works on 1d arrays
"""Private function for rank a arrays. Compute the median ignoring Nan.
Parameters
----------
arr1d : ndarray
Input array, of rank 1.
Results
-------
m : float
The median.
"""
cond = 1-np.isnan(arr1d)
x = np.sort(np.compress(cond,arr1d,axis=-1))
if x.size == 0:
return np.nan
return np.median(x)
def nanmedian(x, axis=0):
"""
Compute the median along the given axis ignoring nan values.
Parameters
----------
x : array_like
Input array.
axis : int, optional
Axis along which the median is computed. Default is 0, i.e. the
first axis.
Returns
-------
m : float
The median of `x` along `axis`.
See Also
--------
nanstd, nanmean
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 3, 1, 5, 5, np.nan])
>>> stats.nanmedian(a)
array(3.0)
>>> b = np.array([0, 3, 1, 5, 5, np.nan, 5])
>>> stats.nanmedian(b)
array(4.0)
Example with axis:
>>> c = np.arange(30.).reshape(5,6)
>>> idx = np.array([False, False, False, True, False] * 6).reshape(5,6)
>>> c[idx] = np.nan
>>> c
array([[ 0., 1., 2., nan, 4., 5.],
[ 6., 7., nan, 9., 10., 11.],
[ 12., nan, 14., 15., 16., 17.],
[ nan, 19., 20., 21., 22., nan],
[ 24., 25., 26., 27., nan, 29.]])
>>> stats.nanmedian(c, axis=1)
array([ 2. , 9. , 15. , 20.5, 26. ])
"""
x, axis = _chk_asarray(x, axis)
if x.ndim == 0:
return float(x.item())
x = x.copy()
x = np.apply_along_axis(_nanmedian, axis, x)
if x.ndim == 0:
x = float(x.item())
return x
#####################################
######## CENTRAL TENDENCY ########
#####################################
def gmean(a, axis=0, dtype=None):
"""
Compute the geometric mean along the specified axis.
Returns the geometric average of the array elements.
That is: n-th root of (x1 * x2 * ... * xn)
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int, optional, default axis=0
Axis along which the geometric mean is computed.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If dtype is not specified, it defaults to the
dtype of a, unless a has an integer dtype with a precision less than
that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
gmean : ndarray,
see dtype parameter above
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
hmean: Harmonic mean
Notes
-----
The geometric average is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity because masked
arrays automatically mask any non-finite values.
"""
if not isinstance(a, np.ndarray): #if not an ndarray object attempt to convert it
log_a=np.log(np.array(a, dtype=dtype))
elif dtype: #Must change the default dtype allowing array type
if isinstance(a,np.ma.MaskedArray):
log_a=np.log(np.ma.asarray(a, dtype=dtype))
else:
log_a=np.log(np.asarray(a, dtype=dtype))
else:
log_a = np.log(a)
return np.exp(log_a.mean(axis=axis))
def hmean(a, axis=0, dtype=None):
"""
Calculates the harmonic mean along the specified axis.
That is: n / (1/x1 + 1/x2 + ... + 1/xn)
Parameters
----------
a : array_like
Input array, masked array or object that can be converted to an array.
axis : int, optional, default axis=0
Axis along which the harmonic mean is computed.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults to the
dtype of `a`, unless `a` has an integer `dtype` with a precision less
than that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
hmean : ndarray,
see `dtype` parameter above
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
gmean: Geometric mean
Notes
-----
The harmonic mean is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity.
"""
if not isinstance(a, np.ndarray):
a=np.array(a, dtype=dtype)
if np.all(a >0): # Harmonic mean only defined if greater than zero
if isinstance(a, np.ma.MaskedArray):
size = a.count(axis)
else:
if axis == None:
a=a.ravel()
size = a.shape[0]
else:
size = a.shape[axis]
return size / np.sum(1.0/a, axis=axis, dtype=dtype)
else:
raise ValueError("Harmonic mean only defined if all elements greater than zero")
def cmedian(a, numbins=1000):
"""
Returns the computed median value of an array.
All of the values in the input array are used. The input array is first
histogrammed using `numbins` bins. The bin containing the median is
selected by searching for the halfway point in the cumulative histogram.
The median value is then computed by linearly interpolating across that
bin.
Parameters
----------
a : array_like
Input array.
numbins : int
The number of bins used to histogram the data. More bins give greater
accuracy to the approximation of the median.
Returns
-------
cmedian : float
An approximation of the median.
References
----------
[CRCProbStat2000]_ Section 2.2.6
.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
# TODO: numpy.median() always seems to be a better choice.
# A better version of this function would take already-histogrammed data
# and compute the median from that.
a = np.ravel(a)
n = float(len(a))
# We will emulate the (fixed!) bounds selection scheme used by
# scipy.stats.histogram(), but use numpy.histogram() since it is faster.
amin = a.min()
amax = a.max()
estbinwidth = (amax - amin)/float(numbins - 1)
binsize = (amax - amin + estbinwidth) / float(numbins)
(hist, bins) = np.histogram(a, numbins,
range=(amin-binsize*0.5, amax+binsize*0.5))
binsize = bins[1] - bins[0]
cumhist = np.cumsum(hist) # make cumulative histogram
cfbin = np.searchsorted(cumhist, n/2.0)
LRL = bins[cfbin] # get lower read limit of that bin
if cfbin == 0:
cfbelow = 0.0
else:
cfbelow = cumhist[cfbin-1] # cum. freq. below bin
freq = hist[cfbin] # frequency IN the 50%ile bin
median = LRL + ((n/2.0-cfbelow)/float(freq))*binsize # MEDIAN
return median
def mode(a, axis=0):
"""
Returns an array of the modal (most common) value in the passed array.
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
counts : ndarray
Array of counts for each mode.
Examples
--------
>>> a = np.array([[6, 8, 3, 0],
[3, 2, 1, 7],
[8, 1, 8, 4],
[5, 3, 0, 5],
[4, 7, 5, 9]])
>>> from scipy import stats
>>> stats.mode(a)
(array([[ 3., 1., 0., 0.]]), array([[ 1., 1., 1., 1.]]))
To get mode of whole array, specify axis=None:
>>> stats.mode(a, axis=None)
(array([ 3.]), array([ 3.]))
"""
a, axis = _chk_asarray(a, axis)
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = (a == score)
counts = np.expand_dims(np.sum(template, axis),axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def mask_to_limits(a, limits, inclusive):
"""Mask an array for values outside of given limits.
This is primarily a utility function.
Parameters
----------
a : array
limits : (float or None, float or None)
A tuple consisting of the (lower limit, upper limit). Values in the
input array less than the lower limit or greater than the upper limit
will be masked out. None implies no limit.
inclusive : (bool, bool)
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to lower or upper are allowed.
Returns
-------
A MaskedArray.
Raises
------
A ValueError if there are no values within the given limits.
"""
lower_limit, upper_limit = limits
lower_include, upper_include = inclusive
am = ma.MaskedArray(a)
if lower_limit is not None:
if lower_include:
am = ma.masked_less(am, lower_limit)
else:
am = ma.masked_less_equal(am, lower_limit)
if upper_limit is not None:
if upper_include:
am = ma.masked_greater(am, upper_limit)
else:
am = ma.masked_greater_equal(am, upper_limit)
if am.count() == 0:
raise ValueError("No array values within given limits")
return am
def tmean(a, limits=None, inclusive=(True, True)):
"""
Compute the trimmed mean
This function finds the arithmetic mean of given values, ignoring values
outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
Returns
-------
tmean : float
"""
a = asarray(a)
# Cast to a float if this is an integer array. If it is already a float
# array, leave it as is to preserve its precision.
if issubclass(a.dtype.type, np.integer):
a = a.astype(float)
# No trimming.
if limits is None:
return np.mean(a,None)
am = mask_to_limits(a.ravel(), limits, inclusive)
return am.mean()
def masked_var(am):
m = am.mean()
s = ma.add.reduce((am - m)**2)
n = am.count() - 1.0
return s / n
def tvar(a, limits=None, inclusive=(1,1)):
"""
Compute the trimmed variance
This function computes the sample variance of an array of values,
while ignoring values which are outside of given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
Returns
-------
tvar : float
"""
a = asarray(a)
a = a.astype(float).ravel()
if limits is None:
n = len(a)
return a.var()*(n/(n-1.))
am = mask_to_limits(a, limits, inclusive)
return masked_var(am)
def tmin(a, lowerlimit=None, axis=0, inclusive=True):
"""
Compute the trimmed minimum
This function finds the miminum value of an array `a` along the
specified axis, but only considering values greater than a specified
lower limit.
Parameters
----------
a : array_like
array of values
lowerlimit : None or float, optional
Values in the input array less than the given limit will be ignored.
When lowerlimit is None, then all values are used. The default value
is None.
axis : None or int, optional
Operate along this axis. None means to use the flattened array and
the default is zero
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the lower limit
are included. The default value is True.
Returns
-------
tmin: float
"""
a, axis = _chk_asarray(a, axis)
am = mask_to_limits(a, (lowerlimit, None), (inclusive, False))
return ma.minimum.reduce(am, axis)
def tmax(a, upperlimit, axis=0, inclusive=True):
"""
Compute the trimmed maximum
This function computes the maximum value of an array along a given axis,
while ignoring values larger than a specified upper limit.
Parameters
----------
a : array_like
array of values
upperlimit : None or float, optional
Values in the input array greater than the given limit will be ignored.
When upperlimit is None, then all values are used. The default value
is None.
axis : None or int, optional
Operate along this axis. None means to use the flattened array and
the default is zero.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the upper limit
are included. The default value is True.
Returns
-------
tmax : float
"""
a, axis = _chk_asarray(a, axis)
am = mask_to_limits(a, (None, upperlimit), (False, inclusive))
return ma.maximum.reduce(am, axis)
def tstd(a, limits=None, inclusive=(1,1)):
"""
Compute the trimmed sample standard deviation
This function finds the sample standard deviation of given values,
ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
Returns
-------
tstd : float
"""
return np.sqrt(tvar(a,limits,inclusive))
def tsem(a, limits=None, inclusive=(True,True)):
"""
Compute the trimmed standard error of the mean
This function finds the standard error of the mean for given
values, ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
Returns
-------
tsem : float
"""
a = np.asarray(a).ravel()
if limits is None:
n = float(len(a))
return a.std()/np.sqrt(n)
am = mask_to_limits(a.ravel(), limits, inclusive)
sd = np.sqrt(masked_var(am))
return sd / am.count()
#####################################
############ MOMENTS #############
#####################################
def moment(a, moment=1, axis=0):
"""
Calculates the nth moment about the mean for a sample.
Generally used to calculate coefficients of skewness and
kurtosis.
Parameters
----------
a : array_like
data
moment : int
order of central moment that is returned
axis : int or None
Axis along which the central moment is computed. If None, then the data
array is raveled. The default axis is zero.
Returns
-------
n-th central moment : ndarray or float
The appropriate moment along the given axis or over all values if axis
is None. The denominator for the moment calculation is the number of
observations, no degrees of freedom correction is done.
"""
a, axis = _chk_asarray(a, axis)
if moment == 1:
# By definition the first moment about the mean is 0.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.zeros(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return np.float64(0.0)
else:
mn = np.expand_dims(np.mean(a,axis), axis)
s = np.power((a-mn), moment)
return np.mean(s, axis)
def variation(a, axis=0):
"""
Computes the coefficient of variation, the ratio of the biased standard
deviation to the mean.
Parameters
----------
a : array_like
Input array.
axis : int or None
Axis along which to calculate the coefficient of variation.
References
----------
[CRCProbStat2000]_ Section 2.2.20
.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
return a.std(axis)/a.mean(axis)
def skew(a, axis=0, bias=True):
"""
Computes the skewness of a data set.
For normally distributed data, the skewness should be about 0. A skewness
value > 0 means that there is more weight in the left tail of the
distribution. The function `skewtest` can be used to determine if the
skewness value is close enough to 0, statistically speaking.
Parameters
----------
a : ndarray
data
axis : int or None
axis along which skewness is calculated
bias : bool
If False, then the calculations are corrected for statistical bias.
Returns
-------
skewness : ndarray
The skewness of values along an axis, returning 0 where all values are
equal.
References
----------
[CRCProbStat2000]_ Section 2.2.24.1
.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
a, axis = _chk_asarray(a,axis)
n = a.shape[axis]
m2 = moment(a, 2, axis)
m3 = moment(a, 3, axis)
zero = (m2 == 0)
vals = np.where(zero, 0, m3 / m2**1.5)
if not bias:
can_correct = (n > 2) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m3 = np.extract(can_correct, m3)
nval = np.sqrt((n-1.0)*n)/(n-2.0)*m3/m2**1.5
np.place(vals, can_correct, nval)
if vals.ndim == 0:
return vals.item()
return vals
def kurtosis(a, axis=0, fisher=True, bias=True):
"""
Computes the kurtosis (Fisher or Pearson) of a dataset.
Kurtosis is the fourth central moment divided by the square of the
variance. If Fisher's definition is used, then 3.0 is subtracted from
the result to give 0.0 for a normal distribution.
If bias is False then the kurtosis is calculated using k statistics to
eliminate bias coming from biased moment estimators
Use `kurtosistest` to see if result is close enough to normal.
Parameters
----------
a : array
data for which the kurtosis is calculated
axis : int or None
Axis along which the kurtosis is calculated
fisher : bool
If True, Fisher's definition is used (normal ==> 0.0). If False,
Pearson's definition is used (normal ==> 3.0).
bias : bool
If False, then the calculations are corrected for statistical bias.
Returns
-------
kurtosis : array
The kurtosis of values along an axis. If all values are equal,
return -3 for Fisher's definition and 0 for Pearson's definition.
References
----------
[CRCProbStat2000]_ Section 2.2.25
.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
m2 = moment(a,2,axis)
m4 = moment(a,4,axis)
zero = (m2 == 0)
vals = np.where(zero, 0, m4/ m2**2.0)
if not bias:
can_correct = (n > 3) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m4 = np.extract(can_correct, m4)
nval = 1.0/(n-2)/(n-3)*((n*n-1.0)*m4/m2**2.0-3*(n-1)**2.0)
np.place(vals, can_correct, nval+3.0)
if vals.ndim == 0:
vals = vals.item() # array scalar
if fisher:
return vals - 3
else:
return vals
def describe(a, axis=0):
"""
Computes several descriptive statistics of the passed array.
Parameters
----------
a : array_like
data
axis : int or None
axis along which statistics are calculated. If axis is None, then data
array is raveled. The default axis is zero.
Returns
-------
size of the data : int
length of data along axis
(min, max): tuple of ndarrays or floats
minimum and maximum value of data array
arithmetic mean : ndarray or float
mean of data along axis
unbiased variance : ndarray or float
variance of the data along axis, denominator is number of observations
minus one.
biased skewness : ndarray or float
skewness, based on moment calculations with denominator equal to the
number of observations, i.e. no degrees of freedom correction
biased kurtosis : ndarray or float
kurtosis (Fisher), the kurtosis is normalized so that it is zero for the
normal distribution. No degrees of freedom or bias correction is used.
See Also
--------
skew
kurtosis
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
#mm = (np.minimum.reduce(a), np.maximum.reduce(a))
mm = (np.min(a, axis=axis), np.max(a, axis=axis))
m = np.mean(a, axis=axis)
v = np.var(a, axis=axis, ddof=1)
sk = skew(a, axis)
kurt = kurtosis(a, axis)
return n, mm, m, v, sk, kurt
#####################################
######## NORMALITY TESTS ##########
#####################################
def skewtest(a, axis=0):
"""
Tests whether the skew is different from the normal distribution.
This function tests the null hypothesis that the skewness of
the population that the sample was drawn from is the same
as that of a corresponding normal distribution.
Parameters
----------
a : array
axis : int or None
Returns
-------
z-score : float
The computed z-score for this test.
p-value : float
a 2-sided p-value for the hypothesis test
Notes
-----
The sample size should be at least 8.
"""
a, axis = _chk_asarray(a, axis)
if axis is None:
a = np.ravel(a)
axis = 0
b2 = skew(a,axis)
n = float(a.shape[axis])
if n < 8:
warnings.warn(
"skewtest only valid for n>=8 ... continuing anyway, n=%i" %
int(n))
y = b2 * math.sqrt(((n+1)*(n+3)) / (6.0*(n-2)) )
beta2 = ( 3.0*(n*n+27*n-70)*(n+1)*(n+3) ) / ( (n-2.0)*(n+5)*(n+7)*(n+9) )
W2 = -1 + math.sqrt(2*(beta2-1))
delta = 1/math.sqrt(0.5*math.log(W2))
alpha = math.sqrt(2.0/(W2-1))
y = np.where(y==0, 1, y)
Z = delta*np.log(y/alpha + np.sqrt((y/alpha)**2+1))
return Z, 2 * distributions.norm.sf(np.abs(Z))
def kurtosistest(a, axis=0):
"""
Tests whether a dataset has normal kurtosis
This function tests the null hypothesis that the kurtosis
of the population from which the sample was drawn is that
of the normal distribution: ``kurtosis = 3(n-1)/(n+1)``.
Parameters
----------
a : array
array of the sample data
axis : int or None
the axis to operate along, or None to work on the whole array.
The default is the first axis.
Returns
-------
z-score : float
The computed z-score for this test.
p-value : float
The 2-sided p-value for the hypothesis test
Notes
-----
Valid only for n>20. The Z-score is set to 0 for bad entries.
"""
a, axis = _chk_asarray(a, axis)
n = float(a.shape[axis])
if n < 20:
warnings.warn(
"kurtosistest only valid for n>=20 ... continuing anyway, n=%i" %
int(n))
b2 = kurtosis(a, axis, fisher=False)
E = 3.0*(n-1) /(n+1)
varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1)*(n+3)*(n+5))
x = (b2-E)/np.sqrt(varb2)
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5))/
(n*(n-2)*(n-3)))
A = 6.0 + 8.0/sqrtbeta1 *(2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 -2/(9.0*A)
denom = 1 +x*np.sqrt(2/(A-4.0))
denom = np.where(denom < 0, 99, denom)
term2 = np.where(denom < 0, term1, np.power((1-2.0/A)/denom,1/3.0))
Z = ( term1 - term2 ) / np.sqrt(2/(9.0*A))
Z = np.where(denom == 99, 0, Z)
if Z.ndim == 0:
Z = Z[()]
#JPNote: p-value sometimes larger than 1
#zprob uses upper tail, so Z needs to be positive
return Z, 2 * distributions.norm.sf(np.abs(Z))
def normaltest(a, axis=0):
"""
Tests whether a sample differs from a normal distribution
This function tests the null hypothesis that a sample comes
from a normal distribution. It is based on D'Agostino and
Pearson's [1]_, [2]_ test that combines skew and kurtosis to
produce an omnibus test of normality.
Parameters
----------
a : array
axis : int or None
Returns
-------
p-value : float
A 2-sided chi squared probability for the hypothesis test
References
----------
.. [1] D'Agostino, R. B. and Pearson, E. S. (1971), "An Omnibus Test of
Normality for Moderate and Large Sample Size,"
Biometrika, 58, 341-348
.. [2] D'Agostino, R. B. and Pearson, E. S. (1973), "Testing for
departures from Normality," Biometrika, 60, 613-622
"""
a, axis = _chk_asarray(a, axis)
s,p = skewtest(a,axis)
k,p = kurtosistest(a,axis)
k2 = s*s + k*k
return k2, chisqprob(k2,2)
# Martinez-Iglewicz test
# K-S test
#####################################
###### FREQUENCY FUNCTIONS #######
#####################################
def itemfreq(a):
"""
Returns a 2D array of item frequencies.
Parameters
----------
a : array_like of rank 1
Input array.
Returns
-------
itemfreq : ndarray of rank 2
A 2D frequency table (col [0:n-1]=scores, col n=frequencies).
Column 1 contains item values, column 2 contains their respective
counts.
Notes
-----
This uses a loop that is only reasonably fast if the number of unique
elements is not large. For integers, numpy.bincount is much faster.
This function currently does not support strings or multi-dimensional
scores.
Examples
--------
>>> a = np.array([1, 1, 5, 0, 1, 2, 2, 0, 1, 4])
>>> stats.itemfreq(a)
array([[ 0., 2.],
[ 1., 4.],
[ 2., 2.],
[ 4., 1.],
[ 5., 1.]])
>>> np.bincount(a)
array([2, 4, 2, 0, 1, 1])
>>> stats.itemfreq(a/10.)
array([[ 0. , 2. ],
[ 0.1, 4. ],
[ 0.2, 2. ],
[ 0.4, 1. ],
[ 0.5, 1. ]])
"""
# TODO: I'm not sure I understand what this does. The docstring is
# internally inconsistent.
# comment: fortunately, this function doesn't appear to be used elsewhere
scores = _support.unique(a)
scores = np.sort(scores)
freq = zeros(len(scores))
for i in range(len(scores)):
freq[i] = np.add.reduce(np.equal(a,scores[i]))
return array(_support.abut(scores, freq))
def _interpolate(a, b, fraction):
"""Returns the point at the given fraction between a and b, where
'fraction' must be between 0 and 1.
"""
return a + (b - a)*fraction;
def scoreatpercentile(a, per, limit=()):
"""
Calculate the score at the given `per` percentile of the sequence `a`.
For example, the score at per=50 is the median. If the desired quantile
lies between two data points, we interpolate between them. If the parameter
`limit` is provided, it should be a tuple (lower, upper) of two values.
Values of `a` outside this (closed) interval will be ignored.
Parameters
----------
a : ndarray
Values from which to extract score.
per : int or float
Percentile at which to extract score.
limit : tuple, optional
Tuple of two scalars, the lower and upper limits within which to
compute the percentile.
Returns
-------
score : float
Score at percentile.
See Also
--------
percentileofscore
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
# TODO: this should be a simple wrapper around a well-written quantile
# function. GNU R provides 9 quantile algorithms (!), with differing
# behaviour at, for example, discontinuities.
values = np.sort(a,axis=0)
if limit:
values = values[(limit[0] <= values) & (values <= limit[1])]
idx = per /100. * (values.shape[0] - 1)
if (idx % 1 == 0):
return values[idx]
else:
return _interpolate(values[int(idx)], values[int(idx) + 1], idx % 1)
def percentileofscore(a, score, kind='rank'):
'''
The percentile rank of a score relative to a list of scores.
A `percentileofscore` of, for example, 80% means that 80% of the
scores in `a` are below the given score. In the case of gaps or
ties, the exact definition depends on the optional keyword, `kind`.
Parameters
----------
a: array like
Array of scores to which `score` is compared.
score: int or float
Score that is compared to the elements in `a`.
kind: {'rank', 'weak', 'strict', 'mean'}, optional
This optional parameter specifies the interpretation of the
resulting score:
- "rank": Average percentage ranking of score. In case of
multiple matches, average the percentage rankings of
all matching scores.
- "weak": This kind corresponds to the definition of a cumulative
distribution function. A percentileofscore of 80%
means that 80% of values are less than or equal
to the provided score.
- "strict": Similar to "weak", except that only values that are
strictly less than the given score are counted.
- "mean": The average of the "weak" and "strict" scores, often used in
testing. See
http://en.wikipedia.org/wiki/Percentile_rank
Returns
-------
pcos : float
Percentile-position of score (0-100) relative to `a`.
Examples
--------
Three-quarters of the given values lie below a given score:
>>> percentileofscore([1, 2, 3, 4], 3)
75.0
With multiple matches, note how the scores of the two matches, 0.6
and 0.8 respectively, are averaged:
>>> percentileofscore([1, 2, 3, 3, 4], 3)
70.0
Only 2/5 values are strictly less than 3:
>>> percentileofscore([1, 2, 3, 3, 4], 3, kind='strict')
40.0
But 4/5 values are less than or equal to 3:
>>> percentileofscore([1, 2, 3, 3, 4], 3, kind='weak')
80.0
The average between the weak and the strict scores is
>>> percentileofscore([1, 2, 3, 3, 4], 3, kind='mean')
60.0
'''
a = np.array(a)
n = len(a)
if kind == 'rank':
if not(np.any(a == score)):
a = np.append(a, score)
a_len = np.array(range(len(a)))
else:
a_len = np.array(range(len(a))) + 1.0
a = np.sort(a)
idx = [a == score]
pct = (np.mean(a_len[idx]) / n) * 100.0
return pct
elif kind == 'strict':
return sum(a < score) / float(n) * 100
elif kind == 'weak':
return sum(a <= score) / float(n) * 100
elif kind == 'mean':
return (sum(a < score) + sum(a <= score)) * 50 / float(n)
else:
raise ValueError("kind can only be 'rank', 'strict', 'weak' or 'mean'")
def histogram2(a, bins):
"""
Compute histogram using divisions in bins.
Count the number of times values from array `a` fall into
numerical ranges defined by `bins`. Range x is given by
bins[x] <= range_x < bins[x+1] where x =0,N and N is the
length of the `bins` array. The last range is given by
bins[N] <= range_N < infinity. Values less than bins[0] are
not included in the histogram.
Parameters
----------
a : array_like of rank 1
The array of values to be assigned into bins
bins : array_like of rank 1
Defines the ranges of values to use during histogramming.
Returns
-------
histogram2 : ndarray of rank 1
Each value represents the occurrences for a given bin (range) of
values.
"""
# comment: probably obsoleted by numpy.histogram()
n = np.searchsorted(np.sort(a), bins)
n = np.concatenate([ n, [len(a)]])
return n[ 1:]-n[:-1]
def histogram(a, numbins=10, defaultlimits=None, weights=None, printextras=False):
"""
Separates the range into several bins and returns the number of instances
of a in each bin. This histogram is based on numpy's histogram but has a
larger range by default if default limits is not set.
Parameters
----------
a: array_like
Array of scores which will be put into bins.
numbins: int, optional
The number of bins to use for the histogram. Default is 10.
defaultlimits: tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger then the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights: array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
printextras: bool, optional
If True, the number of extra points is printed to standard output.
Default is False.
Returns
-------
histogram: ndarray
Number of points (or sum of weights) in each bin.
low_range: float
Lowest value of histogram, the lower limit of the first bin.
binsize: float
The size of the bins (all bins have the same size).
extrapoints: int
The number of points outside the range of the histogram.
See Also
--------
numpy.histogram
"""
a = np.ravel(a) # flatten any >1D arrays
if defaultlimits is None:
# no range given, so use values in a
data_min = a.min()
data_max = a.max()
# Have bins extend past min and max values slightly
s = (data_max - data_min) / (2. * (numbins - 1.))
defaultlimits = (data_min - s, data_max + s)
# use numpy's histogram method to compute bins
hist, bin_edges = np.histogram(a, bins=numbins, range=defaultlimits,
weights=weights)
# hist are not always floats, convert to keep with old output
hist = np.array(hist, dtype=float)
# fixed width for bins is assumed, as numpy's histogram gives
# fixed width bins for int values for 'bins'
binsize = bin_edges[1] - bin_edges[0]
# calculate number of extra points
extrapoints = len([v for v in a
if defaultlimits[0] > v or v > defaultlimits[1]])
if extrapoints > 0 and printextras:
warnings.warn("Points outside given histogram range = %s" \
%extrapoints)
return (hist, defaultlimits[0], binsize, extrapoints)
def cumfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Returns a cumulative frequency histogram, using the histogram function.
Parameters
----------
a : array_like
Input array.
numbins: int, optional
The number of bins to use for the histogram. Default is 10.
defaultlimits: tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger then the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights: array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
cumfreq : ndarray
Binned values of cumulative frequency.
lowerreallimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> x = [1, 4, 2, 1, 3, 1]
>>> cumfreqs, lowlim, binsize, extrapoints = sp.stats.cumfreq(x, numbins=4)
>>> cumfreqs
array([ 3., 4., 5., 6.])
>>> cumfreqs, lowlim, binsize, extrapoints = \
... sp.stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5))
>>> cumfreqs
array([ 1., 2., 3., 3.])
>>> extrapoints
3
"""
h,l,b,e = histogram(a, numbins, defaultreallimits, weights=weights)
cumhist = np.cumsum(h*1, axis=0)
return cumhist,l,b,e
def relfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Returns a relative frequency histogram, using the histogram function.
Parameters
----------
a : array_like
Input array.
numbins: int, optional
The number of bins to use for the histogram. Default is 10.
defaultlimits: tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger then the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights: array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
relfreq : ndarray
Binned values of relative frequency.
lowerreallimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> a = np.array([1, 4, 2, 1, 3, 1])
>>> relfreqs, lowlim, binsize, extrapoints = sp.stats.relfreq(a, numbins=4)
>>> relfreqs
array([ 0.5 , 0.16666667, 0.16666667, 0.16666667])
>>> np.sum(relfreqs) # relative frequencies should add up to 1
0.99999999999999989
"""
h, l, b, e = histogram(a, numbins, defaultreallimits, weights=weights)
h = np.array(h / float(np.array(a).shape[0]))
return h, l, b, e
#####################################
###### VARIABILITY FUNCTIONS #####
#####################################
def obrientransform(*args):
"""
Computes a transform on input data (any number of columns). Used to
test for homogeneity of variance prior to running one-way stats. Each
array in *args is one level of a factor. If an F_oneway() run on the
transformed data and found significant, variances are unequal. From
Maxwell and Delaney, p.112.
Returns: transformed data for use in an ANOVA
"""
TINY = 1e-10
k = len(args)
n = zeros(k)
v = zeros(k)
m = zeros(k)
nargs = []
for i in range(k):
nargs.append(args[i].astype(float))
n[i] = float(len(nargs[i]))
v[i] = np.var(nargs[i], ddof=1)
m[i] = np.mean(nargs[i])
for j in range(k):
for i in range(int(n[j])):
t1 = (n[j]-1.5)*n[j]*(nargs[j][i]-m[j])**2
t2 = 0.5*v[j]*(n[j]-1.0)
t3 = (n[j]-1.0)*(n[j]-2.0)
nargs[j][i] = (t1-t2) / float(t3)
check = 1
for j in range(k):
if v[j] - np.mean(nargs[j]) > TINY:
check = 0
if check != 1:
raise ValueError('Lack of convergence in obrientransform.')
else:
return array(nargs)
def signaltonoise(a, axis=0, ddof=0):
"""
Calculates the signal-to-noise ratio, defined as the ratio between the mean
and the standard deviation.
Parameters
----------
a: array-like
An array like object containing the sample data
axis: int or None, optional
If axis is equal to None, the array is first ravel'd. If axis is an
integer, this is the axis over which to operate. Defaults to None???0
ddof : integer, optional, default 0
degrees of freedom correction for standard deviation
Returns
-------
array containing the value of the ratio of the mean to the standard
deviation along axis, or 0, when the standard deviation is equal to 0
"""
a = np.asanyarray(a)
m = a.mean(axis)
sd = a.std(axis=axis, ddof=ddof)
return np.where(sd == 0, 0, m/sd)
def sem(a, axis=0, ddof=1):
"""
Calculates the standard error of the mean (or standard error of
measurement) of the values in the input array.
Parameters
----------
a : array_like
An array containing the values for which the standard error is
returned.
axis : int or None, optional.
If axis is None, ravel `a` first. If axis is an integer, this will be
the axis over which to operate. Defaults to 0.
ddof : int, optional
Delta degrees-of-freedom. How many degrees of freedom to adjust
for bias in limited samples relative to the population estimate
of variance. Defaults to 1.
Returns
-------
s : ndarray or float
The standard error of the mean in the sample(s), along the input axis.
Notes
-----
The default value for `ddof` is different to the default (0) used by other
ddof containing routines, such as np.std nd stats.nanstd.
Examples
--------
Find standard error along the first axis:
>>> from scipy import stats
>>> a = np.arange(20).reshape(5,4)
>>> stats.sem(a)
array([ 2.8284, 2.8284, 2.8284, 2.8284])
Find standard error across the whole array, using n degrees of freedom:
>>> stats.sem(a, axis=None, ddof=0)
1.2893796958227628
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
s = np.std(a,axis=axis, ddof=ddof) / np.sqrt(n) #JP check normalization
return s
def zscore(a, axis=0, ddof=0):
"""
Calculates the z score of each value in the sample, relative to the sample
mean and standard deviation.
Parameters
----------
a : array_like
An array like object containing the sample data.
axis : int or None, optional
If `axis` is equal to None, the array is first raveled. If `axis` is
an integer, this is the axis over which to operate. Default is 0.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
Returns
-------
zscore : array_like
The z-scores, standardized by mean and standard deviation of input
array `a`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of `asarray`
for parameters).
Examples
--------
>>> a = np.array([ 0.7972, 0.0767, 0.4383, 0.7866, 0.8091, 0.1954,
0.6307, 0.6599, 0.1065, 0.0508])
>>> from scipy import stats
>>> stats.zscore(a)
array([ 1.1273, -1.247 , -0.0552, 1.0923, 1.1664, -0.8559, 0.5786,
0.6748, -1.1488, -1.3324])
Computing along a specified axis, using n-1 degrees of freedom (``ddof=1``)
to calculate the standard deviation:
>>> b = np.array([[ 0.3148, 0.0478, 0.6243, 0.4608],
[ 0.7149, 0.0775, 0.6072, 0.9656],
[ 0.6341, 0.1403, 0.9759, 0.4064],
[ 0.5918, 0.6948, 0.904 , 0.3721],
[ 0.0921, 0.2481, 0.1188, 0.1366]])
>>> stats.zscore(b, axis=1, ddof=1)
array([[-1.1649, -1.4319, -0.8554, -1.0189],
[-0.8661, -1.5035, -0.9737, -0.6154],
[-0.888 , -1.3817, -0.5461, -1.1156],
[-2.3043, -2.2014, -1.9921, -2.5241],
[-2.0773, -1.9212, -2.0506, -2.0328]])
"""
a = np.asanyarray(a)
mns = a.mean(axis=axis)
sstd = a.std(axis=axis, ddof=ddof)
if axis and mns.ndim < a.ndim:
return ((a - np.expand_dims(mns, axis=axis) /
np.expand_dims(sstd,axis=axis)))
else:
return (a - mns) / sstd
def zmap(scores, compare, axis=0, ddof=0):
"""
Calculates the relative z-scores.
Returns an array of z-scores, i.e., scores that are standardized to zero
mean and unit variance, where mean and variance are calculated from the
comparison array.
Parameters
----------
scores : array_like
The input for which z-scores are calculated.
compare : array_like
The input from which the mean and standard deviation of the
normalization are taken; assumed to have the same dimension as
`scores`.
axis : int or None, optional
Axis over which mean and variance of `compare` are calculated.
Default is 0.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
Returns
-------
zscore : array_like
Z-scores, in the same shape as `scores`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of `asarray`
for parameters).
"""
scores, compare = map(np.asanyarray, [scores, compare])
mns = compare.mean(axis=axis)
sstd = compare.std(axis=axis, ddof=ddof)
if axis and mns.ndim < compare.ndim:
return ((scores - np.expand_dims(mns, axis=axis) /
np.expand_dims(sstd,axis=axis)))
else:
return (scores - mns) / sstd
#####################################
####### TRIMMING FUNCTIONS #######
#####################################
def threshold(a, threshmin=None, threshmax=None, newval=0):
"""
Clip array to a given value.
Similar to numpy.clip(), except that values less than `threshmin` or
greater than `threshmax` are replaced by `newval`, instead of by
`threshmin` and `threshmax` respectively.
Parameters
----------
a : array_like
Data to threshold.
threshmin : float, int or None, optional
Minimum threshold, defaults to None.
threshmax : float, int or None, optional
Maximum threshold, defaults to None.
newval : float or int, optional
Value to put in place of values in `a` outside of bounds.
Defaults to 0.
Returns
-------
out : ndarray
The clipped input array, with values less than `threshmin` or
greater than `threshmax` replaced with `newval`.
Examples
--------
>>> a = np.array([9, 9, 6, 3, 1, 6, 1, 0, 0, 8])
>>> from scipy import stats
>>> stats.threshold(a, threshmin=2, threshmax=8, newval=-1)
array([-1, -1, 6, 3, -1, 6, -1, -1, -1, 8])
"""
a = asarray(a).copy()
mask = zeros(a.shape, dtype=bool)
if threshmin is not None:
mask |= (a < threshmin)
if threshmax is not None:
mask |= (a > threshmax)
a[mask] = newval
return a
def sigmaclip(a, low=4., high=4.):
"""
Iterative sigma-clipping of array elements.
The output array contains only those elements of the input array `c`
that satisfy the conditions ::
mean(c) - std(c)*low < c < mean(c) + std(c)*high
Starting from the full sample, all elements outside the critical range are
removed. The iteration continues with a new critical range until no
elements are outside the range.
Parameters
----------
a : array_like
Data array, will be raveled if not 1-D.
low : float, optional
Lower bound factor of sigma clipping. Default is 4.
high : float, optional
Upper bound factor of sigma clipping. Default is 4.
Returns
-------
c : ndarray
Input array with clipped elements removed.
critlower : float
Lower threshold value use for clipping.
critlupper : float
Upper threshold value use for clipping.
Examples
--------
>>> a = np.concatenate((np.linspace(9.5,10.5,31), np.linspace(0,20,5)))
>>> fact = 1.5
>>> c, low, upp = sigmaclip(a, fact, fact)
>>> c
array([ 9.96666667, 10. , 10.03333333, 10. ])
>>> c.var(), c.std()
(0.00055555555555555165, 0.023570226039551501)
>>> low, c.mean() - fact*c.std(), c.min()
(9.9646446609406727, 9.9646446609406727, 9.9666666666666668)
>>> upp, c.mean() + fact*c.std(), c.max()
(10.035355339059327, 10.035355339059327, 10.033333333333333)
>>> a = np.concatenate((np.linspace(9.5,10.5,11),
np.linspace(-100,-50,3)))
>>> c, low, upp = sigmaclip(a, 1.8, 1.8)
>>> (c == np.linspace(9.5,10.5,11)).all()
True
"""
c = np.asarray(a).ravel()
delta = 1
while delta:
c_std = c.std()
c_mean = c.mean()
size = c.size
critlower = c_mean - c_std*low
critupper = c_mean + c_std*high
c = c[(c>critlower) & (c<critupper)]
delta = size-c.size
return c, critlower, critupper
def trimboth(a, proportiontocut):
"""
Slices off a proportion of items from both ends of an array.
Slices off the passed proportion of items from both ends of the passed
array (i.e., with `proportiontocut` = 0.1, slices leftmost 10% **and**
rightmost 10% of scores). You must pre-sort the array if you want
'proper' trimming. Slices off less if proportion results in a
non-integer slice index (i.e., conservatively slices off
`proportiontocut`).
Parameters
----------
a : array_like
Data to trim.
proportiontocut : float or int
Proportion of total data set to trim of each end.
Returns
-------
out : ndarray
Trimmed version of array `a`.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(20)
>>> b = stats.trimboth(a, 0.1)
>>> b.shape
(16,)
"""
a = asarray(a)
lowercut = int(proportiontocut*len(a))
uppercut = len(a) - lowercut
if (lowercut >= uppercut):
raise ValueError("Proportion too big.")
return a[lowercut:uppercut]
def trim1(a, proportiontocut, tail='right'):
"""
Slices off a proportion of items from ONE end of the passed array
distribution.
If `proportiontocut` = 0.1, slices off 'leftmost' or 'rightmost'
10% of scores. Slices off LESS if proportion results in a non-integer
slice index (i.e., conservatively slices off `proportiontocut` ).
Parameters
----------
a : array_like
Input array
proportiontocut : float
Fraction to cut off of 'left' or 'right' of distribution
tail : string, {'left', 'right'}, optional
Defaults to 'right'.
Returns
-------
trim1 : ndarray
Trimmed version of array `a`
"""
a = asarray(a)
if tail.lower() == 'right':
lowercut = 0
uppercut = len(a) - int(proportiontocut*len(a))
elif tail.lower() == 'left':
lowercut = int(proportiontocut*len(a))
uppercut = len(a)
return a[lowercut:uppercut]
def trim_mean(a, proportiontocut):
"""
Return mean of array after trimming distribution from both lower and upper
tails.
If `proportiontocut` = 0.1, slices off 'leftmost' and 'rightmost' 10% of
scores. Slices off LESS if proportion results in a non-integer slice
index (i.e., conservatively slices off `proportiontocut` ).
Parameters
----------
a : array_like
Input array
proportiontocut : float
Fraction to cut off of both tails of the distribution
Returns
-------
trim_mean : ndarray
Mean of trimmed array.
"""
newa = trimboth(np.sort(a),proportiontocut)
return np.mean(newa,axis=0)
def f_oneway(*args):
"""
Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that two or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like
The sample measurements for each group.
Returns
-------
F-value : float
The computed F-value of the test.
p-value : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent.
2. Each sample is from a normally distributed population.
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`stats.kruskal`_) although with
some loss of power.
The algorithm is from Heiman[2], pp.394-7.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14. http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
"""
na = len(args) # ANOVA on 'na' groups, each in it's own array
tmp = map(np.array,args)
alldata = np.concatenate(args)
bign = len(alldata)
sstot = ss(alldata)-(square_of_sums(alldata)/float(bign))
ssbn = 0
for a in args:
ssbn = ssbn + square_of_sums(array(a))/float(len(a))
ssbn = ssbn - (square_of_sums(alldata)/float(bign))
sswn = sstot-ssbn
dfbn = na-1
dfwn = bign - na
msb = ssbn/float(dfbn)
msw = sswn/float(dfwn)
f = msb/msw
prob = fprob(dfbn,dfwn,f)
return f, prob
def pearsonr(x, y):
"""Calculates a Pearson correlation coefficient and the p-value for testing
non-correlation.
The Pearson correlation coefficient measures the linear relationship
between two datasets. Strictly speaking, Pearson's correlation requires
that each dataset be normally distributed. Like other correlation
coefficients, this one varies between -1 and +1 with 0 implying no
correlation. Correlations of -1 or +1 imply an exact linear
relationship. Positive correlations imply that as x increases, so does
y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
x : 1D array
y : 1D array the same length as x
Returns
-------
(Pearson's correlation coefficient,
2-tailed p-value)
References
----------
http://www.statsoft.com/textbook/glosp.html#Pearson%20Correlation
"""
# x and y should have same length.
x = np.asarray(x)
y = np.asarray(y)
n = len(x)
mx = x.mean()
my = y.mean()
xm, ym = x-mx, y-my
r_num = n*(np.add.reduce(xm*ym))
r_den = n*np.sqrt(ss(xm)*ss(ym))
r = (r_num / r_den)
# Presumably, if abs(r) > 1, then it is only some small artifact of floating
# point arithmetic.
r = max(min(r, 1.0), -1.0)
df = n-2
if abs(r) == 1.0:
prob = 0.0
else:
t_squared = r*r * (df / ((1.0 - r) * (1.0 + r)))
prob = betai(0.5*df, 0.5, df / (df + t_squared))
return r, prob
def fisher_exact(table) :
"""Performs a Fisher exact test on a 2x2 contingency table.
Parameters
----------
table : array_like of ints
A 2x2 contingency table.
Returns
-------
oddsratio : float
This is prior odds ratio and not a posterior estimate.
p_value : float
P-value for 2-sided hypothesis of independence.
Notes
-----
The calculated odds ratio is different from the one R uses. In R language,
this implementation returns the (more common) "unconditional Maximum
Likelihood Estimate", while R uses the "conditional Maximum Likelihood
Estimate".
Examples
--------
>>> fisher_exact([[100, 2], [1000, 5]])
(0.25, 0.13007593634330314)
"""
hypergeom = distributions.hypergeom
c = np.asarray(table, dtype=np.int64) # int32 is not enough for the algorithm
if not c.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if c[1,0] > 0 and c[0,1] > 0:
odssratio = c[0,0] * c[1,1] / float(c[1,0] * c[0,1])
else:
odssratio = np.inf
n1 = c[0,0] + c[0,1]
n2 = c[1,0] + c[1,1]
n = c[0,0] + c[1,0]
mode = int(float((n + 1) * (n1 + 1)) / (n1 + n2 + 2))
pexact = hypergeom.pmf(c[0,0], n1 + n2, n1, n)
pmode = hypergeom.pmf(mode, n1 + n2, n1, n)
epsilon = 1 - 1e-4
if float(np.abs(pexact - pmode)) / np.abs(np.max(pexact, pmode)) <= 1 - epsilon:
return odssratio, 1
elif c[0,0] < mode:
plower = hypergeom.cdf(c[0,0], n1 + n2, n1, n)
if hypergeom.pmf(n, n1 + n2, n1, n) > pexact / epsilon:
return odssratio, plower
# Binary search for where to begin upper half.
minval = mode
maxval = n
guess = -1
while maxval - minval > 1:
if maxval == minval + 1 and guess == minval:
guess = maxval
else:
guess = (maxval + minval) // 2
pguess = hypergeom.pmf(guess, n1 + n2, n1, n)
if pguess <= pexact and hypergeom.pmf(guess - 1, n1 + n2, n1, n) > pexact:
break
elif pguess < pexact:
maxval = guess
else:
minval = guess
if guess == -1:
guess = minval
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess -= 1
while hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess += 1
p = plower + hypergeom.sf(guess - 1, n1 + n2, n1, n)
if p > 1.0:
p = 1.0
return odssratio, p
else:
pupper = hypergeom.sf(c[0,0] - 1, n1 + n2, n1, n)
if hypergeom.pmf(0, n1 + n2, n1, n) > pexact / epsilon:
return odssratio, pupper
# Binary search for where to begin lower half.
minval = 0
maxval = mode
guess = -1
while maxval - minval > 1:
if maxval == minval + 1 and guess == minval:
guess = maxval
else:
guess = (maxval + minval) // 2
pguess = hypergeom.pmf(guess, n1 + n2, n1, n)
if pguess <= pexact and hypergeom.pmf(guess + 1, n1 + n2, n1, n) > pexact:
break
elif pguess <= pexact:
minval = guess
else:
maxval = guess
if guess == -1:
guess = minval
while hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess += 1
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess -= 1
p = pupper + hypergeom.cdf(guess, n1 + n2, n1, n)
if p > 1.0:
p = 1.0
return odssratio, p
def spearmanr(a, b=None, axis=0):
"""
Calculates a Spearman rank-order correlation coefficient and the p-value
to test for non-correlation.
The Spearman correlation is a nonparametric measure of the monotonicity
of the relationship between two datasets. Unlike the Pearson correlation,
the Spearman correlation does not assume that both datasets are normally
distributed. Like other correlation coefficients, this one varies
between -1 and +1 with 0 implying no correlation. Correlations of -1 or
+1 imply an exact monotonic relationship. Positive correlations imply that
as x increases, so does y. Negative correlations imply that as x
increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
a, b : 1D or 2D array_like, b is optional
One or two 1-D or 2-D arrays containing multiple variables and
observations. Each column of `a` and `b` represents a variable, and
each row entry a single observation of those variables. See also
`axis`. Both arrays need to have the same length in the `axis`
dimension.
axis : int or None, optional
If axis=0 (default), then each column represents a variable, with
observations in the rows. If axis=0, the relationship is transposed:
each row represents a variable, while the columns contain observations.
If axis=None, then both arrays will be raveled.
Returns
-------
rho: float or ndarray (2-D square)
Spearman correlation matrix or correlation coefficient (if only 2
variables are given as parameters. Correlation matrix is square with
length equal to total number of variables (columns or rows) in a and b
combined.
p-value : float
The two-sided p-value for a hypothesis test whose null hypothesis is
that two sets of data are uncorrelated, has same dimension as rho.
Notes
-----
Changes in scipy 0.8.0: rewrite to add tie-handling, and axis.
References
----------
[CRCProbStat2000]_ Section 14.7
.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Examples
--------
>>> spearmanr([1,2,3,4,5],[5,6,7,8,7])
(0.82078268166812329, 0.088587005313543798)
>>> np.random.seed(1234321)
>>> x2n=np.random.randn(100,2)
>>> y2n=np.random.randn(100,2)
>>> spearmanr(x2n)
(0.059969996999699973, 0.55338590803773591)
>>> spearmanr(x2n[:,0], x2n[:,1])
(0.059969996999699973, 0.55338590803773591)
>>> rho, pval = spearmanr(x2n,y2n)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> pval
array([[ 0. , 0.55338591, 0.06435364, 0.53617935],
[ 0.55338591, 0. , 0.27592895, 0.80234077],
[ 0.06435364, 0.27592895, 0. , 0.73039992],
[ 0.53617935, 0.80234077, 0.73039992, 0. ]])
>>> rho, pval = spearmanr(x2n.T, y2n.T, axis=1)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> spearmanr(x2n, y2n, axis=None)
(0.10816770419260482, 0.1273562188027364)
>>> spearmanr(x2n.ravel(), y2n.ravel())
(0.10816770419260482, 0.1273562188027364)
>>> xint = np.random.randint(10,size=(100,2))
>>> spearmanr(xint)
(0.052760927029710199, 0.60213045837062351)
"""
a, axisout = _chk_asarray(a, axis)
ar = np.apply_along_axis(rankdata,axisout,a)
br = None
if not b is None:
b, axisout = _chk_asarray(b, axis)
br = np.apply_along_axis(rankdata,axisout,b)
n = a.shape[axisout]
rs = np.corrcoef(ar,br,rowvar=axisout)
olderr = np.seterr(divide='ignore') # rs can have elements equal to 1
try:
t = rs * np.sqrt((n-2) / ((rs+1.0)*(1.0-rs)))
finally:
np.seterr(**olderr)
prob = distributions.t.sf(np.abs(t),n-2)*2
if rs.shape == (2,2):
return rs[1,0], prob[1,0]
else:
return rs, prob
def pointbiserialr(x, y):
# comment: I am changing the semantics somewhat. The original function is
# fairly general and accepts an x sequence that has any type of thing in it as
# along as there are only two unique items. I am going to restrict this to
# a boolean array for my sanity.
"""Calculates a point biserial correlation coefficient and the associated
p-value.
The point biserial correlation is used to measure the relationship
between a binary variable, x, and a continuous variable, y. Like other
correlation coefficients, this one varies between -1 and +1 with 0
implying no correlation. Correlations of -1 or +1 imply a determinative
relationship.
This function uses a shortcut formula but produces the same result as
`pearsonr`.
Parameters
----------
x : array_like of bools
Input array.
y : array_like
Input array.
Returns
-------
r : float
R value
p-value : float
2-tailed p-value
References
----------
http://www.childrens-mercy.org/stats/definitions/biserial.htm
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 0, 0, 1, 1, 1, 1])
>>> b = np.arange(7)
>>> stats.pointbiserialr(a, b)
(0.8660254037844386, 0.011724811003954652)
>>> stats.pearsonr(a, b)
(0.86602540378443871, 0.011724811003954626)
>>> np.corrcoef(a, b)
array([[ 1. , 0.8660254],
[ 0.8660254, 1. ]])
"""
## Test data: http://support.sas.com/ctx/samples/index.jsp?sid=490&tab=output
# x = [1,0,1,1,1,1,0,1,0,0,0,1,1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1]
# y = [14.8,13.8,12.4,10.1,7.1,6.1,5.8,4.6,4.3,3.5,3.3,3.2,3.0,2.8,2.8,2.5,
# 2.4,2.3,2.1,1.7,1.7,1.5,1.3,1.3,1.2,1.2,1.1,0.8,0.7,0.6,0.5,0.2,0.2,
# 0.1]
# rpb = 0.36149
x = np.asarray(x, dtype=bool)
y = np.asarray(y, dtype=float)
n = len(x)
# phat is the fraction of x values that are True
phat = x.sum() / float(len(x))
y0 = y[~x] # y-values where x is False
y1 = y[x] # y-values where x is True
y0m = y0.mean()
y1m = y1.mean()
# phat - phat**2 is more stable than phat*(1-phat)
rpb = (y1m - y0m) * np.sqrt(phat - phat**2) / y.std()
df = n-2
# fixme: see comment about TINY in pearsonr()
TINY = 1e-20
t = rpb*np.sqrt(df/((1.0-rpb+TINY)*(1.0+rpb+TINY)))
prob = betai(0.5*df, 0.5, df/(df+t*t))
return rpb, prob
def kendalltau(x, y, initial_lexsort=True):
"""
Calculates Kendall's tau, a correlation measure for ordinal data.
Kendall's tau is a measure of the correspondence between two rankings.
Values close to 1 indicate strong agreement, values close to -1 indicate
strong disagreement. This is the tau-b version of Kendall's tau which
accounts for ties.
Parameters
----------
x, y : array_like
Arrays of rankings, of the same shape. If arrays are not 1-D, they will
be flattened to 1-D.
initial_lexsort : bool, optional
Whether to use lexsort or quicksort as the sorting method for the
initial sort of the inputs. Default is lexsort (True), for which
`kendalltau` is of complexity O(n log(n)). If False, the complexity is
O(n^2), but with a smaller pre-factor (so quicksort may be faster for
small arrays).
Returns
-------
Kendall's tau : float
The tau statistic.
p-value : float
The two-sided p-value for a hypothesis test whose null hypothesis is
an absence of association, tau = 0.
References
----------
W.R. Knight, "A Computer Method for Calculating Kendall's Tau with
Ungrouped Data", Journal of the American Statistical Association, Vol. 61,
No. 314, Part 1, pp. 436-439, 1966.
Notes
-----
The definition of Kendall's tau that is used is::
tau = (P - Q) / sqrt((P + Q + T) * (P + Q + U))
where P is the number of concordant pairs, Q the number of discordant
pairs, T the number of ties only in `x`, and U the number of ties only in
`y`. If a tie occurs for the same pair in both `x` and `y`, it is not added
to either T or U.
Examples
--------
>>> x1 = [12, 2, 1, 12, 2]
>>> x2 = [1, 4, 7, 1, 0]
>>> tau, p_value = sp.stats.kendalltau(x1, x2)
>>> tau
-0.47140452079103173
>>> p_value
0.24821309157521476
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
n = len(x)
temp = range(n) # support structure used by mergesort
# this closure recursively sorts sections of perm[] by comparing
# elements of y[perm[]] using temp[] as support
# returns the number of swaps required by an equivalent bubble sort
def mergesort(offs, length):
exchcnt = 0
if length == 1:
return 0
if length == 2:
if y[perm[offs]] <= y[perm[offs+1]]:
return 0
t = perm[offs]
perm[offs] = perm[offs+1]
perm[offs+1] = t
return 1
length0 = length // 2
length1 = length - length0
middle = offs + length0
exchcnt += mergesort(offs, length0)
exchcnt += mergesort(middle, length1)
if y[perm[middle - 1]] < y[perm[middle]]:
return exchcnt
# merging
i = j = k = 0
while j < length0 or k < length1:
if k >= length1 or (j < length0 and y[perm[offs + j]] <=
y[perm[middle + k]]):
temp[i] = perm[offs + j]
d = i - j
j += 1
else:
temp[i] = perm[middle + k]
d = (offs + i) - (middle + k)
k += 1
if d > 0:
exchcnt += d;
i += 1
perm[offs:offs+length] = temp[0:length]
return exchcnt
# initial sort on values of x and, if tied, on values of y
if initial_lexsort:
# sort implemented as mergesort, worst case: O(n log(n))
perm = np.lexsort((y, x))
else:
# sort implemented as quicksort, 30% faster but with worst case: O(n^2)
perm = range(n)
perm.sort(key=lambda a: (x[a], y[a]))
# compute joint ties
first = 0
t = 0
for i in xrange(1, n):
if x[perm[first]] != x[perm[i]] or y[perm[first]] != y[perm[i]]:
t += ((i - first) * (i - first - 1)) // 2
first = i
t += ((n - first) * (n - first - 1)) // 2
# compute ties in x
first = 0
u = 0
for i in xrange(1,n):
if x[perm[first]] != x[perm[i]]:
u += ((i - first) * (i - first - 1)) // 2
first = i
u += ((n - first) * (n - first - 1)) // 2
# count exchanges
exchanges = mergesort(0, n)
# compute ties in y after mergesort with counting
first = 0
v = 0
for i in xrange(1,n):
if y[perm[first]] != y[perm[i]]:
v += ((i - first) * (i - first - 1)) // 2
first = i
v += ((n - first) * (n - first - 1)) // 2
tot = (n * (n - 1)) // 2
if tot == u and tot == v:
return 1 # Special case for all ties in both ranks
tau = ((tot - (v + u - t)) - 2.0 * exchanges) / \
np.sqrt((tot - u) * (tot - v))
# what follows reproduces the ending of Gary Strangman's original
# stats.kendalltau() in SciPy
svar = (4.0 * n + 10.0) / (9.0 * n * (n - 1))
z = tau / np.sqrt(svar)
prob = special.erfc(np.abs(z) / 1.4142136)
return tau, prob
def linregress(x, y=None):
"""
Calculate a regression line
This computes a least-squares regression for two sets of measurements.
Parameters
----------
x, y : array_like
two sets of measurements. Both arrays should have the same length.
If only x is given (and y=None), then it must be a two-dimensional
array where one dimension has length 2. The two sets of measurements
are then found by splitting the array along the length-2 dimension.
Returns
-------
slope : float
slope of the regression line
intercept : float
intercept of the regression line
r-value : float
correlation coefficient
p-value : float
two-sided p-value for a hypothesis test whose null hypothesis is
that the slope is zero.
stderr : float
Standard error of the estimate
Examples
--------
>>> from scipy import stats
>>> import numpy as np
>>> x = np.random.random(10)
>>> y = np.random.random(10)
>>> slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
# To get coefficient of determination (r_squared)
>>> print "r-squared:", r_value**2
r-squared: 0.15286643777
"""
TINY = 1.0e-20
if y is None: # x is a (2, N) or (N, 2) shaped array_like
x = asarray(x)
if x.shape[0] == 2:
x, y = x
elif x.shape[1] == 2:
x, y = x.T
else:
msg = "If only `x` is given as input, it has to be of shape (2, N) \
or (N, 2), provided shape was %s" % str(x.shape)
raise ValueError(msg)
else:
x = asarray(x)
y = asarray(y)
n = len(x)
xmean = np.mean(x,None)
ymean = np.mean(y,None)
# average sum of squares:
ssxm, ssxym, ssyxm, ssym = np.cov(x, y, bias=1).flat
r_num = ssxym
r_den = np.sqrt(ssxm*ssym)
if r_den == 0.0:
r = 0.0
else:
r = r_num / r_den
if (r > 1.0): r = 1.0 # from numerical error
#z = 0.5*log((1.0+r+TINY)/(1.0-r+TINY))
df = n-2
t = r*np.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = distributions.t.sf(np.abs(t),df)*2
slope = r_num / ssxm
intercept = ymean - slope*xmean
sterrest = np.sqrt((1-r*r)*ssym / ssxm / df)
return slope, intercept, r, prob, sterrest
#####################################
##### INFERENTIAL STATISTICS #####
#####################################
def ttest_1samp(a, popmean, axis=0):
"""Calculates the T-test for the mean of ONE group of scores `a`.
This is a two-sided test for the null hypothesis that the expected value
(mean) of a sample of independent observations is equal to the given
population mean, `popmean`.
Parameters
----------
a : array_like
sample observation
popmean : float or array_like
expected value in null hypothesis, if array_like than it must have the
same shape as `a` excluding the axis dimension
axis : int, optional, (default axis=0)
Axis can equal None (ravel array first), or an integer (the axis
over which to operate on a).
Returns
-------
t : float or array
t-statistic
prob : float or array
two-tailed p-value
Examples
--------
>>> from scipy import stats
>>> import numpy as np
>>> #fix seed to get the same result
>>> np.random.seed(7654567)
>>> rvs = stats.norm.rvs(loc=5,scale=10,size=(50,2))
test if mean of random sample is equal to true mean, and different mean.
We reject the null hypothesis in the second case and don't reject it in
the first case
>>> stats.ttest_1samp(rvs,5.0)
(array([-0.68014479, -0.04323899]), array([ 0.49961383, 0.96568674]))
>>> stats.ttest_1samp(rvs,0.0)
(array([ 2.77025808, 4.11038784]), array([ 0.00789095, 0.00014999]))
examples using axis and non-scalar dimension for population mean
>>> stats.ttest_1samp(rvs,[5.0,0.0])
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs.T,[5.0,0.0],axis=1)
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs,[[5.0],[0.0]])
(array([[-0.68014479, -0.04323899],
[ 2.77025808, 4.11038784]]), array([[ 4.99613833e-01, 9.65686743e-01],
[ 7.89094663e-03, 1.49986458e-04]]))
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
df=n-1
d = np.mean(a,axis) - popmean
v = np.var(a, axis, ddof=1)
t = d / np.sqrt(v/float(n))
t = np.where((d==0)*(v==0), 1.0, t) #define t=0/0 = 1, identical mean, var
prob = distributions.t.sf(np.abs(t),df)*2 #use np.abs to get upper tail
#distributions.t.sf currently does not propagate nans
#this can be dropped, if distributions.t.sf propagates nans
#if this is removed, then prob = prob[()] needs to be removed
prob = np.where(np.isnan(t), np.nan, prob)
if t.ndim == 0:
t = t[()]
prob = prob[()]
return t,prob
def ttest_ind(a, b, axis=0):
"""Calculates the T-test for the means of TWO INDEPENDENT samples of scores.
This is a two-sided test for the null hypothesis that 2 independent samples
have identical average (expected) values.
Parameters
----------
a, b : sequence of ndarrays
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
Axis can equal None (ravel array first), or an integer (the axis
over which to operate on a and b).
Returns
-------
t : float or array
t-statistic
prob : float or array
two-tailed p-value
Notes
-----
We can use this test, if we observe two independent samples from
the same or different population, e.g. exam scores of boys and
girls or of two ethnic groups. The test measures whether the
average (expected) value differs significantly across samples. If
we observe a large p-value, for example larger than 0.05 or 0.1,
then we cannot reject the null hypothesis of identical average scores.
If the p-value is smaller than the threshold, e.g. 1%, 5% or 10%,
then we reject the null hypothesis of equal averages.
References
----------
http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
Examples
--------
>>> from scipy import stats
>>> import numpy as np
>>> #fix seed to get the same result
>>> np.random.seed(12345678)
test with sample with identical means
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> stats.ttest_ind(rvs1,rvs2)
(0.26833823296239279, 0.78849443369564765)
test with sample with different means
>>> rvs3 = stats.norm.rvs(loc=8,scale=10,size=500)
>>> stats.ttest_ind(rvs1,rvs3)
(-5.0434013458585092, 5.4302979468623391e-007)
"""
a, b, axis = _chk2_asarray(a, b, axis)
v1 = np.var(a,axis,ddof = 1)
v2 = np.var(b,axis,ddof = 1)
n1 = a.shape[axis]
n2 = b.shape[axis]
df = n1+n2-2
d = np.mean(a,axis) - np.mean(b,axis)
svar = ((n1-1)*v1+(n2-1)*v2) / float(df)
t = d/np.sqrt(svar*(1.0/n1 + 1.0/n2))
t = np.where((d==0)*(svar==0), 1.0, t) #define t=0/0 = 0, identical means
prob = distributions.t.sf(np.abs(t),df)*2#use np.abs to get upper tail
#distributions.t.sf currently does not propagate nans
#this can be dropped, if distributions.t.sf propagates nans
#if this is removed, then prob = prob[()] needs to be removed
prob = np.where(np.isnan(t), np.nan, prob)
if t.ndim == 0:
t = t[()]
prob = prob[()]
return t, prob
def ttest_rel(a,b,axis=0):
"""
Calculates the T-test on TWO RELATED samples of scores, a and b.
This is a two-sided test for the null hypothesis that 2 related or
repeated samples have identical average (expected) values.
Parameters
----------
a, b : sequence of ndarrays
The arrays must have the same shape.
axis : int, optional, (default axis=0)
Axis can equal None (ravel array first), or an integer (the axis
over which to operate on a and b).
Returns
-------
t : float or array
t-statistic
prob : float or array
two-tailed p-value
Notes
-----
Examples for the use are scores of the same set of student in
different exams, or repeated sampling from the same units. The
test measures whether the average score differs significantly
across samples (e.g. exams). If we observe a large p-value, for
example greater than 0.05 or 0.1 then we cannot reject the null
hypothesis of identical average scores. If the p-value is smaller
than the threshold, e.g. 1%, 5% or 10%, then we reject the null
hypothesis of equal averages. Small p-values are associated with
large t-statistics.
References
----------
http://en.wikipedia.org/wiki/T-test#Dependent_t-test
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) # fix random seed to get same numbers
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = (stats.norm.rvs(loc=5,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs2)
(0.24101764965300962, 0.80964043445811562)
>>> rvs3 = (stats.norm.rvs(loc=8,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs3)
(-3.9995108708727933, 7.3082402191726459e-005)
"""
a, b, axis = _chk2_asarray(a, b, axis)
if a.shape[axis] != b.shape[axis]:
raise ValueError('unequal length arrays')
n = a.shape[axis]
df = float(n-1)
d = (a-b).astype('d')
v = np.var(d,axis,ddof=1)
dm = np.mean(d, axis)
t = dm / np.sqrt(v/float(n))
t = np.where((dm==0)*(v==0), 1.0, t) #define t=0/0 = 1, zero mean and var
prob = distributions.t.sf(np.abs(t),df)*2 #use np.abs to get upper tail
#distributions.t.sf currently does not propagate nans
#this can be dropped, if distributions.t.sf propagates nans
#if this is removed, then prob = prob[()] needs to be removed
prob = np.where(np.isnan(t), np.nan, prob)
## if not np.isscalar(t):
## probs = np.reshape(probs, t.shape) # this should be redundant
## if not np.isscalar(prob) and len(prob) == 1:
## prob = prob[0]
if t.ndim == 0:
t = t[()]
prob = prob[()]
return t, prob
#import scipy.stats
#import distributions
def kstest(rvs, cdf, args=(), N=20, alternative = 'two_sided', mode='approx',**kwds):
"""
Perform the Kolmogorov-Smirnov test for goodness of fit
This performs a test of the distribution G(x) of an observed
random variable against a given distribution F(x). Under the null
hypothesis the two distributions are identical, G(x)=F(x). The
alternative hypothesis can be either 'two_sided' (default), 'less'
or 'greater'. The KS test is only valid for continuous distributions.
Parameters
----------
rvs : string or array or callable
string: name of a distribution in scipy.stats
array: 1-D observations of random variables
callable: function to generate random variables, requires keyword
argument `size`
cdf : string or callable
string: name of a distribution in scipy.stats, if rvs is a string then
cdf can evaluate to `False` or be the same as rvs
callable: function to evaluate cdf
args : tuple, sequence
distribution parameters, used if rvs or cdf are strings
N : int
sample size if rvs is string or callable
alternative : 'two_sided' (default), 'less' or 'greater'
defines the alternative hypothesis (see explanation)
mode : 'approx' (default) or 'asymp'
defines the distribution used for calculating p-value
'approx' : use approximation to exact distribution of test statistic
'asymp' : use asymptotic distribution of test statistic
Returns
-------
D : float
KS test statistic, either D, D+ or D-
p-value : float
one-tailed or two-tailed p-value
Notes
-----
In the one-sided test, the alternative is that the empirical
cumulative distribution function of the random variable is "less"
or "greater" than the cumulative distribution function F(x) of the
hypothesis, G(x)<=F(x), resp. G(x)>=F(x).
Examples
--------
>>> from scipy import stats
>>> import numpy as np
>>> from scipy.stats import kstest
>>> x = np.linspace(-15,15,9)
>>> kstest(x,'norm')
(0.44435602715924361, 0.038850142705171065)
>>> np.random.seed(987654321) # set random seed to get the same result
>>> kstest('norm','',N=100)
(0.058352892479417884, 0.88531190944151261)
is equivalent to this
>>> np.random.seed(987654321)
>>> kstest(stats.norm.rvs(size=100),'norm')
(0.058352892479417884, 0.88531190944151261)
Test against one-sided alternative hypothesis:
>>> np.random.seed(987654321)
Shift distribution to larger values, so that cdf_dgp(x)< norm.cdf(x):
>>> x = stats.norm.rvs(loc=0.2, size=100)
>>> kstest(x,'norm', alternative = 'less')
(0.12464329735846891, 0.040989164077641749)
Reject equal distribution against alternative hypothesis: less
>>> kstest(x,'norm', alternative = 'greater')
(0.0072115233216311081, 0.98531158590396395)
Don't reject equal distribution against alternative hypothesis: greater
>>> kstest(x,'norm', mode='asymp')
(0.12464329735846891, 0.08944488871182088)
Testing t distributed random variables against normal distribution:
With 100 degrees of freedom the t distribution looks close to the normal
distribution, and the kstest does not reject the hypothesis that the sample
came from the normal distribution
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(100,size=100),'norm')
(0.072018929165471257, 0.67630062862479168)
With 3 degrees of freedom the t distribution looks sufficiently different
from the normal distribution, that we can reject the hypothesis that the
sample came from the normal distribution at a alpha=10% level
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(3,size=100),'norm')
(0.131016895759829, 0.058826222555312224)
"""
if isinstance(rvs, basestring):
#cdf = getattr(stats, rvs).cdf
if (not cdf) or (cdf == rvs):
cdf = getattr(distributions, rvs).cdf
rvs = getattr(distributions, rvs).rvs
else:
raise AttributeError('if rvs is string, cdf has to be the same distribution')
if isinstance(cdf, basestring):
cdf = getattr(distributions, cdf).cdf
if callable(rvs):
kwds = {'size':N}
vals = np.sort(rvs(*args,**kwds))
else:
vals = np.sort(rvs)
N = len(vals)
cdfvals = cdf(vals, *args)
if alternative in ['two_sided', 'greater']:
Dplus = (np.arange(1.0, N+1)/N - cdfvals).max()
if alternative == 'greater':
return Dplus, distributions.ksone.sf(Dplus,N)
if alternative in ['two_sided', 'less']:
Dmin = (cdfvals - np.arange(0.0, N)/N).max()
if alternative == 'less':
return Dmin, distributions.ksone.sf(Dmin,N)
if alternative == 'two_sided':
D = np.max([Dplus,Dmin])
if mode == 'asymp':
return D, distributions.kstwobign.sf(D*np.sqrt(N))
if mode == 'approx':
pval_two = distributions.kstwobign.sf(D*np.sqrt(N))
if N > 2666 or pval_two > 0.80 - N*0.3/1000.0 :
return D, distributions.kstwobign.sf(D*np.sqrt(N))
else:
return D, distributions.ksone.sf(D,N)*2
def chisquare(f_obs, f_exp=None, ddof=0):
"""
Calculates a one-way chi square test.
The chi square test tests the null hypothesis that the categorical data
has the given frequencies.
Parameters
----------
f_obs : array
observed frequencies in each category
f_exp : array, optional
expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
adjustment to the degrees of freedom for the p-value
Returns
-------
chisquare statistic : float
The chisquare test statistic
p : float
The p-value of the test.
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then then
the dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distributions is not a chisquare, in which case this
test is not appropriate.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8. http://faculty.vassar.edu/lowry/ch8pt1.html
"""
f_obs = asarray(f_obs)
k = len(f_obs)
if f_exp is None:
f_exp = array([np.sum(f_obs,axis=0)/float(k)] * len(f_obs),float)
f_exp = f_exp.astype(float)
chisq = np.add.reduce((f_obs-f_exp)**2 / f_exp)
return chisq, chisqprob(chisq, k-1-ddof)
def ks_2samp(data1, data2):
"""
Computes the Kolmogorov-Smirnof statistic on 2 samples.
This is a two-sided test for the null hypothesis that 2 independent samples
are drawn from the same continuous distribution.
Parameters
----------
a, b : sequence of 1-D ndarrays
two arrays of sample observations assumed to be drawn from a continuous
distribution, sample sizes can be different
Returns
-------
D : float
KS statistic
p-value : float
two-tailed p-value
Notes
-----
This tests whether 2 samples are drawn from the same distribution. Note
that, like in the case of the one-sample K-S test, the distribution is
assumed to be continuous.
This is the two-sided test, one-sided tests are not implemented.
The test uses the two-sided asymptotic Kolmogorov-Smirnov distribution.
If the K-S statistic is small or the p-value is high, then we cannot
reject the hypothesis that the distributions of the two samples
are the same.
Examples
--------
>>> from scipy import stats
>>> import numpy as np
>>> from scipy.stats import ks_2samp
>>> #fix random seed to get the same result
>>> np.random.seed(12345678);
>>> n1 = 200 # size of first sample
>>> n2 = 300 # size of second sample
different distribution
we can reject the null hypothesis since the pvalue is below 1%
>>> rvs1 = stats.norm.rvs(size=n1,loc=0.,scale=1);
>>> rvs2 = stats.norm.rvs(size=n2,loc=0.5,scale=1.5)
>>> ks_2samp(rvs1,rvs2)
(0.20833333333333337, 4.6674975515806989e-005)
slightly different distribution
we cannot reject the null hypothesis at a 10% or lower alpha since
the pvalue at 0.144 is higher than 10%
>>> rvs3 = stats.norm.rvs(size=n2,loc=0.01,scale=1.0)
>>> ks_2samp(rvs1,rvs3)
(0.10333333333333333, 0.14498781825751686)
identical distribution
we cannot reject the null hypothesis since the pvalue is high, 41%
>>> rvs4 = stats.norm.rvs(size=n2,loc=0.0,scale=1.0)
>>> ks_2samp(rvs1,rvs4)
(0.07999999999999996, 0.41126949729859719)
"""
data1, data2 = map(asarray, (data1, data2))
n1 = data1.shape[0]
n2 = data2.shape[0]
n1 = len(data1)
n2 = len(data2)
data1 = np.sort(data1)
data2 = np.sort(data2)
data_all = np.concatenate([data1,data2])
cdf1 = np.searchsorted(data1,data_all,side='right')/(1.0*n1)
cdf2 = (np.searchsorted(data2,data_all,side='right'))/(1.0*n2)
d = np.max(np.absolute(cdf1-cdf2))
#Note: d absolute not signed distance
en = np.sqrt(n1*n2/float(n1+n2))
try:
prob = ksprob((en+0.12+0.11/en)*d)
except:
prob = 1.0
return d, prob
def mannwhitneyu(x, y, use_continuity=True):
"""
Computes the Mann-Whitney rank test on samples x and y.
Parameters
----------
x, y : array_like
Array of samples, should be one-dimensional.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into
account. Default is True.
Returns
-------
u : float
The Mann-Whitney statistics.
prob : float
One-sided p-value assuming a asymptotic normal distribution.
Notes
-----
Use only when the number of observation in each sample is > 20 and
you have 2 independent samples of ranks. Mann-Whitney U is
significant if the u-obtained is LESS THAN or equal to the critical
value of U.
This test corrects for ties and by default uses a continuity correction.
The reported p-value is for a one-sided hypothesis, to get the two-sided
p-value multiply the returned p-value by 2.
"""
x = asarray(x)
y = asarray(y)
n1 = len(x)
n2 = len(y)
ranked = rankdata(np.concatenate((x,y)))
rankx = ranked[0:n1] # get the x-ranks
#ranky = ranked[n1:] # the rest are y-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - np.sum(rankx,axis=0) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
bigu = max(u1,u2)
smallu = min(u1,u2)
#T = np.sqrt(tiecorrect(ranked)) # correction factor for tied scores
T = tiecorrect(ranked)
if T == 0:
raise ValueError('All numbers are identical in amannwhitneyu')
sd = np.sqrt(T*n1*n2*(n1+n2+1)/12.0)
if use_continuity:
# normal approximation for prob calc with continuity correction
z = abs((bigu-0.5-n1*n2/2.0) / sd)
else:
z = abs((bigu-n1*n2/2.0) / sd) # normal approximation for prob calc
return smallu, distributions.norm.sf(z) #(1.0 - zprob(z))
def tiecorrect(rankvals):
"""Tie-corrector for ties in Mann Whitney U and Kruskal Wallis H tests.
See Siegel, S. (1956) Nonparametric Statistics for the Behavioral
Sciences. New York: McGraw-Hill. Code adapted from |Stat rankind.c
code.
Returns: T correction factor for U or H
"""
sorted,posn = fastsort(asarray(rankvals))
n = len(sorted)
T = 0.0
i = 0
while (i<n-1):
if sorted[i] == sorted[i+1]:
nties = 1
while (i<n-1) and (sorted[i] == sorted[i+1]):
nties = nties +1
i = i +1
T = T + nties**3 - nties
i = i+1
T = T / float(n**3-n)
return 1.0 - T
def ranksums(x, y):
"""
Compute the Wilcoxon rank-sum statistic for two samples.
The Wilcoxon rank-sum test tests the null hypothesis that two sets
of measurements are drawn from the same distribution. The alternative
hypothesis is that values in one sample are more likely to be
larger than the values in the other sample.
This test should be used to compare two samples from continuous
distributions. It does not handle ties between measurements
in x and y. For tie-handling and an optional continuity correction
see `stats.mannwhitneyu`_
Parameters
----------
x,y : array_like
The data from the two samples
Returns
-------
z-statistic : float
The test statistic under the large-sample approximation that the
rank sum statistic is normally distributed
p-value : float
The two-sided p-value of the test
References
----------
.. [1] http://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test
"""
x,y = map(np.asarray, (x, y))
n1 = len(x)
n2 = len(y)
alldata = np.concatenate((x,y))
ranked = rankdata(alldata)
x = ranked[:n1]
y = ranked[n1:]
s = np.sum(x,axis=0)
expected = n1*(n1+n2+1) / 2.0
z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2 * distributions.norm.sf(abs(z))
return z, prob
def kruskal(*args):
"""
Compute the Kruskal-Wallis H-test for independent samples
The Kruskal-Wallis H-test tests the null hypothesis that the population
median of all of the groups are equal. It is a non-parametric version of
ANOVA. The test works on 2 or more independent samples, which may have
different sizes. Note that rejecting the null hypothesis does not
indicate which of the groups differs. Post-hoc comparisons between
groups are required to determine which groups are different.
Parameters
----------
sample1, sample2, ... : array_like
Two or more arrays with the sample measurements can be given as
arguments.
Returns
-------
H-statistic : float
The Kruskal-Wallis H statistic, corrected for ties
p-value : float
The p-value for the test using the assumption that H has a chi
square distribution
Notes
-----
Due to the assumption that H has a chi square distribution, the number
of samples in each group must not be too small. A typical rule is
that each sample must have at least 5 measurements.
References
----------
.. [1] http://en.wikipedia.org/wiki/Kruskal-Wallis_one-way_analysis_of_variance
"""
if len(args) < 2:
raise ValueError("Need at least two groups in stats.kruskal()")
n = map(len,args)
all = []
for i in range(len(args)):
all.extend(args[i].tolist())
ranked = list(rankdata(all))
T = tiecorrect(ranked)
args = list(args)
for i in range(len(args)):
args[i] = ranked[0:n[i]]
del ranked[0:n[i]]
rsums = []
for i in range(len(args)):
rsums.append(np.sum(args[i],axis=0)**2)
rsums[i] = rsums[i] / float(n[i])
ssbn = np.sum(rsums,axis=0)
totaln = np.sum(n,axis=0)
h = 12.0 / (totaln*(totaln+1)) * ssbn - 3*(totaln+1)
df = len(args) - 1
if T == 0:
raise ValueError('All numbers are identical in kruskal')
h = h / float(T)
return h, chisqprob(h,df)
def friedmanchisquare(*args):
"""
Computes the Friedman test for repeated measurements
The Friedman test tests the null hypothesis that repeated measurements of
the same individuals have the same distribution. It is often used
to test for consistency among measurements obtained in different ways.
For example, if two measurement techniques are used on the same set of
individuals, the Friedman test can be used to determine if the two
measurement techniques are consistent.
Parameters
----------
measurements1, measurements2, measurements3... : array_like
Arrays of measurements. All of the arrays must have the same number
of elements. At least 3 sets of measurements must be given.
Returns
-------
friedman chi-square statistic : float
the test statistic, correcting for ties
p-value : float
the associated p-value assuming that the test statistic has a chi
squared distribution
Notes
-----
Due to the assumption that the test statistic has a chi squared
distribution, the p-value is only reliable for n > 10 and more than
6 repeated measurements.
References
----------
.. [1] http://en.wikipedia.org/wiki/Friedman_test
"""
k = len(args)
if k < 3:
raise ValueError('\nLess than 3 levels. Friedman test not appropriate.\n')
n = len(args[0])
for i in range(1,k):
if len(args[i]) <> n:
raise ValueError('Unequal N in friedmanchisquare. Aborting.')
# Rank data
data = apply(_support.abut,args)
data = data.astype(float)
for i in range(len(data)):
data[i] = rankdata(data[i])
# Handle ties
ties = 0
for i in range(len(data)):
replist, repnum = find_repeats(array(data[i]))
for t in repnum:
ties += t*(t*t-1)
c = 1 - ties / float(k*(k*k-1)*n)
ssbn = pysum(pysum(data)**2)
chisq = ( 12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1) ) / c
return chisq, chisqprob(chisq,k-1)
#####################################
#### PROBABILITY CALCULATIONS ####
#####################################
zprob = special.ndtr
def chisqprob(chisq, df):
"""
Probability value (1-tail) for the Chi^2 probability distribution.
Broadcasting rules apply.
Parameters
----------
chisq : array_like or float > 0
df : array_like or float, probably int >= 1
Returns
-------
chisqprob : ndarray
The area from `chisq` to infinity under the Chi^2 probability
distribution with degrees of freedom `df`.
"""
return special.chdtrc(df,chisq)
ksprob = special.kolmogorov
fprob = special.fdtrc
def betai(a, b, x):
"""
Returns the incomplete beta function.
I_x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt)
where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma
function of a.
The standard broadcasting rules apply to a, b, and x.
Parameters
----------
a : array_like or float > 0
b : array_like or float > 0
x : array_like or float
x will be clipped to be no greater than 1.0 .
Returns
-------
betai : ndarray
Incomplete beta function.
"""
x = np.asarray(x)
x = np.where(x < 1.0, x, 1.0) # if x > 1 then return 1.0
return special.betainc(a, b, x)
#####################################
####### ANOVA CALCULATIONS #######
#####################################
def glm(data, para):
"""Calculates a linear model fit ...
anova/ancova/lin-regress/t-test/etc. Taken from:
Peterson et al. Statistical limitations in functional neuroimaging
I. Non-inferential methods and statistical models. Phil Trans Royal Soc
Lond B 354: 1239-1260.
Returns: statistic, p-value ???
"""
if len(para) != len(data):
raise ValueError("data and para must be same length in aglm")
n = len(para)
p = _support.unique(para)
x = zeros((n,len(p))) # design matrix
for l in range(len(p)):
x[:,l] = para == p[l]
# fixme: normal equations are bad. Use linalg.lstsq instead.
b = dot(dot(linalg.inv(dot(np.transpose(x),x)), # i.e., b=inv(X'X)X'Y
np.transpose(x)),data)
diffs = (data - dot(x,b))
s_sq = 1./(n-len(p)) * dot(np.transpose(diffs), diffs)
if len(p) == 2: # ttest_ind
c = array([1,-1])
df = n-2
fact = np.sum(1.0/np.sum(x,0),axis=0) # i.e., 1/n1 + 1/n2 + 1/n3 ...
t = dot(c,b) / np.sqrt(s_sq*fact)
probs = betai(0.5*df,0.5,float(df)/(df+t*t))
return t, probs
else:
raise ValueError("only ttest_ind implemented")
def f_value_wilks_lambda(ER, EF, dfnum, dfden, a, b):
"""Calculation of Wilks lambda F-statistic for multivarite data, per
Maxwell & Delaney p.657.
"""
if isinstance(ER, (int, float)):
ER = array([[ER]])
if isinstance(EF, (int, float)):
EF = array([[EF]])
lmbda = linalg.det(EF) / linalg.det(ER)
if (a-1)**2 + (b-1)**2 == 5:
q = 1
else:
q = np.sqrt( ((a-1)**2*(b-1)**2 - 2) / ((a-1)**2 + (b-1)**2 -5) )
n_um = (1 - lmbda**(1.0/q))*(a-1)*(b-1)
d_en = lmbda**(1.0/q) / (n_um*q - 0.5*(a-1)*(b-1) + 1)
return n_um / d_en
def f_value(ER, EF, dfR, dfF):
"""
Returns an F-statistic for a restricted vs. unrestricted model.
Parameters
----------
ER : float
`ER` is the sum of squared residuals for the restricted model
or null hypothesis
EF : float
`EF` is the sum of squared residuals for the unrestricted model
or alternate hypothesis
dfR : int
`dfR` is the degrees of freedom in the restricted model
dfF : int
`dfF` is the degrees of freedom in the unrestricted model
Returns
-------
F-statistic : float
"""
return ((ER-EF)/float(dfR-dfF) / (EF/float(dfF)))
def f_value_multivariate(ER, EF, dfnum, dfden):
"""
Returns a multivariate F-statistic.
Parameters
----------
ER : ndarray
Error associated with the null hypothesis (the Restricted model).
From a multivariate F calculation.
EF : ndarray
Error associated with the alternate hypothesis (the Full model)
From a multivariate F calculation.
dfnum : int
Degrees of freedom the Restricted model.
dfden : int
Degrees of freedom associated with the Restricted model.
Returns
-------
fstat : float
The computed F-statistic.
"""
if isinstance(ER, (int, float)):
ER = array([[ER]])
if isinstance(EF, (int, float)):
EF = array([[EF]])
n_um = (linalg.det(ER) - linalg.det(EF)) / float(dfnum)
d_en = linalg.det(EF) / float(dfden)
return n_um / d_en
#####################################
####### SUPPORT FUNCTIONS ########
#####################################
def ss(a, axis=0):
"""
Squares each element of the input array, and returns the square(s) of that.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
The axis along which to calculate. If None, use whole array.
Default is 0, i.e. along the first axis.
Returns
-------
ss : ndarray
The sum along the given axis for (a**2).
See also
--------
square_of_sums : The square(s) of the sum(s) (the opposite of `ss`).
Examples
--------
>>> from scipy import stats
>>> a = np.array([1., 2., 5.])
>>> stats.ss(a)
30.0
And calculating along an axis:
>>> b = np.array([[1., 2., 5.], [2., 5., 6.]])
>>> stats.ss(b, axis=1)
array([ 30., 65.])
"""
a, axis = _chk_asarray(a, axis)
return np.sum(a*a, axis)
def square_of_sums(a, axis=0):
"""
Sums elements of the input array, and returns the square(s) of that sum.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
If axis is None, ravel `a` first. If `axis` is an integer, this will
be the axis over which to operate. Defaults to 0.
Returns
-------
ss : float or ndarray
The square of the sum over `axis`.
See also
--------
ss : The sum of squares (the opposite of `square_of_sums`).
Examples
--------
>>> from scipy import stats
>>> a = np.arange(20).reshape(5,4)
>>> stats.square_of_sums(a)
array([ 1600., 2025., 2500., 3025.])
>>> stats.square_of_sums(a, axis=None)
36100.0
"""
a, axis = _chk_asarray(a, axis)
s = np.sum(a,axis)
if not np.isscalar(s):
return s.astype(float)*s
else:
return float(s)*s
def fastsort(a):
"""
Sort an array and provide the argsort.
Parameters
----------
a : array_like
Input array.
Returns
-------
fastsort : ndarray of type int
sorted indices into the original array
"""
# TODO: the wording in the docstring is nonsense.
it = np.argsort(a)
as_ = a[it]
return as_, it
def rankdata(a):
"""
Ranks the data, dealing with ties appropriately.
Equal values are assigned a rank that is the average of the ranks that
would have been otherwise assigned to all of the values within that set.
Ranks begin at 1, not 0.
Parameters
----------
a : array_like
This array is first flattened.
Returns
-------
rankdata : ndarray
An array of length equal to the size of `a`, containing rank scores.
Examples
--------
>>> stats.rankdata([0, 2, 2, 3])
array([ 1. , 2.5, 2.5, 4. ])
"""
a = np.ravel(a)
n = len(a)
svec, ivec = fastsort(a)
sumranks = 0
dupcount = 0
newarray = np.zeros(n, float)
for i in xrange(n):
sumranks += i
dupcount += 1
if i==n-1 or svec[i] != svec[i+1]:
averank = sumranks / float(dupcount) + 1
for j in xrange(i-dupcount+1,i+1):
newarray[ivec[j]] = averank
sumranks = 0
dupcount = 0
return newarray
| gpl-3.0 |
olapaola/olapaola-android-scripting | python/src/Lib/DocXMLRPCServer.py | 61 | 10599 | """Self documenting XML-RPC Server.
This module can be used to create XML-RPC servers that
serve pydoc-style documentation in response to HTTP
GET requests. This documentation is dynamically generated
based on the functions and methods registered with the
server.
This module is built upon the pydoc and SimpleXMLRPCServer
modules.
"""
import pydoc
import inspect
import re
import sys
from SimpleXMLRPCServer import (SimpleXMLRPCServer,
SimpleXMLRPCRequestHandler,
CGIXMLRPCRequestHandler,
resolve_dotted_attribute)
class ServerHTMLDoc(pydoc.HTMLDoc):
"""Class used to generate pydoc HTML document for a server"""
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
# XXX Note that this regular expression does not allow for the
# hyperlinking of arbitrary strings being used as method
# names. Only methods with names consisting of word characters
# and '.'s are hyperlinked.
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?((?:\w|\.)+))\b')
while 1:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
elif selfdot:
results.append('self.<strong>%s</strong>' % name)
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return ''.join(results)
def docroutine(self, object, name, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
title = '<a name="%s"><strong>%s</strong></a>' % (
self.escape(anchor), self.escape(name))
if inspect.ismethod(object):
args, varargs, varkw, defaults = inspect.getargspec(object.im_func)
# exclude the argument bound to the instance, it will be
# confusing to the non-Python user
argspec = inspect.formatargspec (
args[1:],
varargs,
varkw,
defaults,
formatvalue=self.formatvalue
)
elif inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
else:
argspec = '(...)'
if isinstance(object, tuple):
argspec = object[0] or argspec
docstring = object[1] or ""
else:
docstring = pydoc.getdoc(object)
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
doc = self.markup(
docstring, self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def docserver(self, server_name, package_documentation, methods):
"""Produce HTML documentation for an XML-RPC server."""
fdict = {}
for key, value in methods.items():
fdict[key] = '#-' + key
fdict[value] = fdict[key]
server_name = self.escape(server_name)
head = '<big><big><strong>%s</strong></big></big>' % server_name
result = self.heading(head, '#ffffff', '#7799ee')
doc = self.markup(package_documentation, self.preformat, fdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
contents = []
method_items = sorted(methods.items())
for key, value in method_items:
contents.append(self.docroutine(value, key, funcs=fdict))
result = result + self.bigsection(
'Methods', '#ffffff', '#eeaa77', pydoc.join(contents))
return result
class XMLRPCDocGenerator:
"""Generates documentation for an XML-RPC server.
This class is designed as mix-in and should not
be constructed directly.
"""
def __init__(self):
# setup variables used for HTML documentation
self.server_name = 'XML-RPC Server Documentation'
self.server_documentation = \
"This server exports the following methods through the XML-RPC "\
"protocol."
self.server_title = 'XML-RPC Server Documentation'
def set_server_title(self, server_title):
"""Set the HTML title of the generated server documentation"""
self.server_title = server_title
def set_server_name(self, server_name):
"""Set the name of the generated HTML server documentation"""
self.server_name = server_name
def set_server_documentation(self, server_documentation):
"""Set the documentation string for the entire server."""
self.server_documentation = server_documentation
def generate_html_documentation(self):
"""generate_html_documentation() => html documentation for the server
Generates HTML documentation for the server using introspection for
installed functions and instances that do not implement the
_dispatch method. Alternatively, instances can choose to implement
the _get_method_argstring(method_name) method to provide the
argument string used in the documentation and the
_methodHelp(method_name) method to provide the help text used
in the documentation."""
methods = {}
for method_name in self.system_listMethods():
if method_name in self.funcs:
method = self.funcs[method_name]
elif self.instance is not None:
method_info = [None, None] # argspec, documentation
if hasattr(self.instance, '_get_method_argstring'):
method_info[0] = self.instance._get_method_argstring(method_name)
if hasattr(self.instance, '_methodHelp'):
method_info[1] = self.instance._methodHelp(method_name)
method_info = tuple(method_info)
if method_info != (None, None):
method = method_info
elif not hasattr(self.instance, '_dispatch'):
try:
method = resolve_dotted_attribute(
self.instance,
method_name
)
except AttributeError:
method = method_info
else:
method = method_info
else:
assert 0, "Could not find method in self.functions and no "\
"instance installed"
methods[method_name] = method
documenter = ServerHTMLDoc()
documentation = documenter.docserver(
self.server_name,
self.server_documentation,
methods
)
return documenter.page(self.server_title, documentation)
class DocXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
"""XML-RPC and documentation request handler class.
Handles all HTTP POST requests and attempts to decode them as
XML-RPC requests.
Handles all HTTP GET requests and interprets them as requests
for documentation.
"""
def do_GET(self):
"""Handles the HTTP GET request.
Interpret all HTTP GET requests as requests for server
documentation.
"""
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
response = self.server.generate_html_documentation()
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
# shut down the connection
self.wfile.flush()
self.connection.shutdown(1)
class DocXMLRPCServer( SimpleXMLRPCServer,
XMLRPCDocGenerator):
"""XML-RPC and HTML documentation server.
Adds the ability to serve server documentation to the capabilities
of SimpleXMLRPCServer.
"""
def __init__(self, addr, requestHandler=DocXMLRPCRequestHandler,
logRequests=1, allow_none=False, encoding=None,
bind_and_activate=True):
SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests,
allow_none, encoding, bind_and_activate)
XMLRPCDocGenerator.__init__(self)
class DocCGIXMLRPCRequestHandler( CGIXMLRPCRequestHandler,
XMLRPCDocGenerator):
"""Handler for XML-RPC data and documentation requests passed through
CGI"""
def handle_get(self):
"""Handles the HTTP GET request.
Interpret all HTTP GET requests as requests for server
documentation.
"""
response = self.generate_html_documentation()
print 'Content-Type: text/html'
print 'Content-Length: %d' % len(response)
print
sys.stdout.write(response)
def __init__(self):
CGIXMLRPCRequestHandler.__init__(self)
XMLRPCDocGenerator.__init__(self)
| apache-2.0 |
elba7r/lite-system | erpnext/patches/v5_4/fix_missing_item_images.py | 41 | 4287 | from __future__ import unicode_literals
import frappe
import os
from frappe.utils import get_files_path
from frappe.utils.file_manager import get_content_hash
def execute():
files_path = get_files_path()
# get files that don't have attached_to_name but exist
unlinked_files = get_unlinked_files(files_path)
if not unlinked_files:
return
fixed_files = fix_files_for_item(files_path, unlinked_files)
# fix remaining files
for key, file_data in unlinked_files.items():
if key not in fixed_files:
rename_and_set_content_hash(files_path, unlinked_files, key)
frappe.db.commit()
def fix_files_for_item(files_path, unlinked_files):
fixed_files = []
# make a list of files/something and /files/something to check in child table's image column
file_urls = [key for key in unlinked_files.keys()] + ["/" + key for key in unlinked_files.keys()]
file_item_code = get_file_item_code(file_urls)
for (file_url, item_code), children in file_item_code.items():
new_file_url = "/files/{0}".format(unlinked_files[file_url]["file_name"])
for row in children:
# print file_url, new_file_url, item_code, row.doctype, row.name
# replace image in these rows with the new file url
frappe.db.set_value(row.doctype, row.name, "image", new_file_url, update_modified=False)
# set it as attachment of this item code
file_data = frappe.get_doc("File", unlinked_files[file_url]["file"])
file_data.attached_to_doctype = "Item"
file_data.attached_to_name = item_code
file_data.flags.ignore_folder_validate = True
try:
file_data.save()
except IOError:
print "File {0} does not exist".format(new_file_url)
# marking fix to prevent further errors
fixed_files.append(file_url)
continue
# set it as image in Item
if not frappe.db.get_value("Item", item_code, "image"):
frappe.db.set_value("Item", item_code, "image", new_file_url, update_modified=False)
rename_and_set_content_hash(files_path, unlinked_files, file_url)
fixed_files.append(file_url)
# commit
frappe.db.commit()
return fixed_files
def rename_and_set_content_hash(files_path, unlinked_files, file_url):
# rename this file
old_filename = os.path.join(files_path, unlinked_files[file_url]["file"])
new_filename = os.path.join(files_path, unlinked_files[file_url]["file_name"])
if not os.path.exists(new_filename):
os.rename(old_filename, new_filename)
# set content hash if missing
file_data_name = unlinked_files[file_url]["file"]
if not frappe.db.get_value("File", file_data_name, "content_hash"):
with open(new_filename, "r") as f:
content_hash = get_content_hash(f.read())
frappe.db.set_value("File", file_data_name, "content_hash", content_hash)
def get_unlinked_files(files_path):
# find files that have the same name as a File doc
# and the file_name mentioned in that File doc doesn't exist
# and it isn't already attached to a doc
unlinked_files = {}
files = os.listdir(files_path)
for file in files:
if not frappe.db.exists("File", {"file_name": file}):
file_data = frappe.db.get_value("File", {"name": file},
["file_name", "attached_to_doctype", "attached_to_name"], as_dict=True)
if (file_data
and file_data.file_name
and file_data.file_name not in files
and not file_data.attached_to_doctype
and not file_data.attached_to_name):
file_data["file"] = file
unlinked_files["files/{0}".format(file)] = file_data
return unlinked_files
def get_file_item_code(file_urls):
# get a map of file_url, item_code and list of documents where file_url will need to be changed in image field
file_item_code = {}
doctypes = frappe.db.sql_list("""select name from `tabDocType` dt
where istable=1
and exists (select name from `tabDocField` df where df.parent=dt.name and df.fieldname='item_code')
and exists (select name from `tabDocField` df where df.parent=dt.name and df.fieldname='image')""")
for doctype in doctypes:
result = frappe.db.sql("""select name, image, item_code, '{0}' as doctype from `tab{0}`
where image in ({1})""".format(doctype, ", ".join(["%s"]*len(file_urls))),
file_urls, as_dict=True)
for r in result:
key = (r.image, r.item_code)
if key not in file_item_code:
file_item_code[key] = []
file_item_code[key].append(r)
return file_item_code
| gpl-3.0 |
faun/django_test | django/contrib/webdesign/lorem_ipsum.py | 439 | 4872 | """
Utility functions for generating "lorem ipsum" Latin text.
"""
import random
COMMON_P = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.'
WORDS = ('exercitationem', 'perferendis', 'perspiciatis', 'laborum', 'eveniet',
'sunt', 'iure', 'nam', 'nobis', 'eum', 'cum', 'officiis', 'excepturi',
'odio', 'consectetur', 'quasi', 'aut', 'quisquam', 'vel', 'eligendi',
'itaque', 'non', 'odit', 'tempore', 'quaerat', 'dignissimos',
'facilis', 'neque', 'nihil', 'expedita', 'vitae', 'vero', 'ipsum',
'nisi', 'animi', 'cumque', 'pariatur', 'velit', 'modi', 'natus',
'iusto', 'eaque', 'sequi', 'illo', 'sed', 'ex', 'et', 'voluptatibus',
'tempora', 'veritatis', 'ratione', 'assumenda', 'incidunt', 'nostrum',
'placeat', 'aliquid', 'fuga', 'provident', 'praesentium', 'rem',
'necessitatibus', 'suscipit', 'adipisci', 'quidem', 'possimus',
'voluptas', 'debitis', 'sint', 'accusantium', 'unde', 'sapiente',
'voluptate', 'qui', 'aspernatur', 'laudantium', 'soluta', 'amet',
'quo', 'aliquam', 'saepe', 'culpa', 'libero', 'ipsa', 'dicta',
'reiciendis', 'nesciunt', 'doloribus', 'autem', 'impedit', 'minima',
'maiores', 'repudiandae', 'ipsam', 'obcaecati', 'ullam', 'enim',
'totam', 'delectus', 'ducimus', 'quis', 'voluptates', 'dolores',
'molestiae', 'harum', 'dolorem', 'quia', 'voluptatem', 'molestias',
'magni', 'distinctio', 'omnis', 'illum', 'dolorum', 'voluptatum', 'ea',
'quas', 'quam', 'corporis', 'quae', 'blanditiis', 'atque', 'deserunt',
'laboriosam', 'earum', 'consequuntur', 'hic', 'cupiditate',
'quibusdam', 'accusamus', 'ut', 'rerum', 'error', 'minus', 'eius',
'ab', 'ad', 'nemo', 'fugit', 'officia', 'at', 'in', 'id', 'quos',
'reprehenderit', 'numquam', 'iste', 'fugiat', 'sit', 'inventore',
'beatae', 'repellendus', 'magnam', 'recusandae', 'quod', 'explicabo',
'doloremque', 'aperiam', 'consequatur', 'asperiores', 'commodi',
'optio', 'dolor', 'labore', 'temporibus', 'repellat', 'veniam',
'architecto', 'est', 'esse', 'mollitia', 'nulla', 'a', 'similique',
'eos', 'alias', 'dolore', 'tenetur', 'deleniti', 'porro', 'facere',
'maxime', 'corrupti')
COMMON_WORDS = ('lorem', 'ipsum', 'dolor', 'sit', 'amet', 'consectetur',
'adipisicing', 'elit', 'sed', 'do', 'eiusmod', 'tempor', 'incididunt',
'ut', 'labore', 'et', 'dolore', 'magna', 'aliqua')
def sentence():
"""
Returns a randomly generated sentence of lorem ipsum text.
The first word is capitalized, and the sentence ends in either a period or
question mark. Commas are added at random.
"""
# Determine the number of comma-separated sections and number of words in
# each section for this sentence.
sections = [u' '.join(random.sample(WORDS, random.randint(3, 12))) for i in range(random.randint(1, 5))]
s = u', '.join(sections)
# Convert to sentence case and add end punctuation.
return u'%s%s%s' % (s[0].upper(), s[1:], random.choice('?.'))
def paragraph():
"""
Returns a randomly generated paragraph of lorem ipsum text.
The paragraph consists of between 1 and 4 sentences, inclusive.
"""
return u' '.join([sentence() for i in range(random.randint(1, 4))])
def paragraphs(count, common=True):
"""
Returns a list of paragraphs as returned by paragraph().
If `common` is True, then the first paragraph will be the standard
'lorem ipsum' paragraph. Otherwise, the first paragraph will be random
Latin text. Either way, subsequent paragraphs will be random Latin text.
"""
paras = []
for i in range(count):
if common and i == 0:
paras.append(COMMON_P)
else:
paras.append(paragraph())
return paras
def words(count, common=True):
"""
Returns a string of `count` lorem ipsum words separated by a single space.
If `common` is True, then the first 19 words will be the standard
'lorem ipsum' words. Otherwise, all words will be selected randomly.
"""
if common:
word_list = list(COMMON_WORDS)
else:
word_list = []
c = len(word_list)
if count > c:
count -= c
while count > 0:
c = min(count, len(WORDS))
count -= c
word_list += random.sample(WORDS, c)
else:
word_list = word_list[:count]
return u' '.join(word_list)
| bsd-3-clause |
VeNoMouS/Sick-Beard | sickbeard/metadata/generic.py | 1 | 34333 | # Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import os.path
try:
import xml.etree.cElementTree as etree
except ImportError:
import elementtree.ElementTree as etree
import re
import sickbeard
from sickbeard import exceptions, helpers
from sickbeard.metadata import helpers as metadata_helpers
from sickbeard import logger
from sickbeard import encodingKludge as ek
from sickbeard.exceptions import ex
from lib.tvdb_api import tvdb_api, tvdb_exceptions
class GenericMetadata():
"""
Base class for all metadata providers. Default behavior is meant to mostly
follow XBMC 12+ metadata standards. Has support for:
- show metadata file
- episode metadata file
- episode thumbnail
- show fanart
- show poster
- show banner
- season thumbnails (poster)
- season thumbnails (banner)
- season all poster
- season all banner
"""
def __init__(self,
show_metadata=False,
episode_metadata=False,
fanart=False,
poster=False,
banner=False,
episode_thumbnails=False,
season_posters=False,
season_banners=False,
season_all_poster=False,
season_all_banner=False):
self.name = "Generic"
self._ep_nfo_extension = "nfo"
self._show_metadata_filename = "tvshow.nfo"
self.fanart_name = "fanart.jpg"
self.poster_name = "poster.jpg"
self.banner_name = "banner.jpg"
self.season_all_poster_name = "season-all-poster.jpg"
self.season_all_banner_name = "season-all-banner.jpg"
self.show_metadata = show_metadata
self.episode_metadata = episode_metadata
self.fanart = fanart
self.poster = poster
self.banner = banner
self.episode_thumbnails = episode_thumbnails
self.season_posters = season_posters
self.season_banners = season_banners
self.season_all_poster = season_all_poster
self.season_all_banner = season_all_banner
def get_config(self):
config_list = [self.show_metadata, self.episode_metadata, self.fanart, self.poster, self.banner, self.episode_thumbnails, self.season_posters, self.season_banners, self.season_all_poster, self.season_all_banner]
return '|'.join([str(int(x)) for x in config_list])
def get_id(self):
return GenericMetadata.makeID(self.name)
@staticmethod
def makeID(name):
name_id = re.sub("[+]", "plus", name)
name_id = re.sub("[^\w\d_]", "_", name_id).lower()
return name_id
def set_config(self, string):
config_list = [bool(int(x)) for x in string.split('|')]
self.show_metadata = config_list[0]
self.episode_metadata = config_list[1]
self.fanart = config_list[2]
self.poster = config_list[3]
self.banner = config_list[4]
self.episode_thumbnails = config_list[5]
self.season_posters = config_list[6]
self.season_banners = config_list[7]
self.season_all_poster = config_list[8]
self.season_all_banner = config_list[9]
def _has_show_metadata(self, show_obj):
result = ek.ek(os.path.isfile, self.get_show_file_path(show_obj))
logger.log(u"Checking if " + self.get_show_file_path(show_obj) + " exists: " + str(result), logger.DEBUG)
return result
def _has_episode_metadata(self, ep_obj):
result = ek.ek(os.path.isfile, self.get_episode_file_path(ep_obj))
logger.log(u"Checking if " + self.get_episode_file_path(ep_obj) + " exists: " + str(result), logger.DEBUG)
return result
def _has_fanart(self, show_obj):
result = ek.ek(os.path.isfile, self.get_fanart_path(show_obj))
logger.log(u"Checking if " + self.get_fanart_path(show_obj) + " exists: " + str(result), logger.DEBUG)
return result
def _has_poster(self, show_obj):
result = ek.ek(os.path.isfile, self.get_poster_path(show_obj))
logger.log(u"Checking if " + self.get_poster_path(show_obj) + " exists: " + str(result), logger.DEBUG)
return result
def _has_banner(self, show_obj):
result = ek.ek(os.path.isfile, self.get_banner_path(show_obj))
logger.log(u"Checking if " + self.get_banner_path(show_obj) + " exists: " + str(result), logger.DEBUG)
return result
def _has_episode_thumb(self, ep_obj):
location = self.get_episode_thumb_path(ep_obj)
result = location != None and ek.ek(os.path.isfile, location)
if location:
logger.log(u"Checking if " + location + " exists: " + str(result), logger.DEBUG)
return result
def _has_season_poster(self, show_obj, season):
location = self.get_season_poster_path(show_obj, season)
result = location != None and ek.ek(os.path.isfile, location)
if location:
logger.log(u"Checking if " + location + " exists: " + str(result), logger.DEBUG)
return result
def _has_season_banner(self, show_obj, season):
location = self.get_season_banner_path(show_obj, season)
result = location != None and ek.ek(os.path.isfile, location)
if location:
logger.log(u"Checking if " + location + " exists: " + str(result), logger.DEBUG)
return result
def _has_season_all_poster(self, show_obj):
result = ek.ek(os.path.isfile, self.get_season_all_poster_path(show_obj))
logger.log(u"Checking if " + self.get_season_all_poster_path(show_obj) + " exists: " + str(result), logger.DEBUG)
return result
def _has_season_all_banner(self, show_obj):
result = ek.ek(os.path.isfile, self.get_season_all_banner_path(show_obj))
logger.log(u"Checking if " + self.get_season_all_banner_path(show_obj) + " exists: " + str(result), logger.DEBUG)
return result
def get_show_file_path(self, show_obj):
return ek.ek(os.path.join, show_obj.location, self._show_metadata_filename)
def get_episode_file_path(self, ep_obj):
return helpers.replaceExtension(ep_obj.location, self._ep_nfo_extension)
def get_fanart_path(self, show_obj):
return ek.ek(os.path.join, show_obj.location, self.fanart_name)
def get_poster_path(self, show_obj):
return ek.ek(os.path.join, show_obj.location, self.poster_name)
def get_banner_path(self, show_obj):
return ek.ek(os.path.join, show_obj.location, self.banner_name)
def get_episode_thumb_path(self, ep_obj):
"""
Returns the path where the episode thumbnail should be stored.
ep_obj: a TVEpisode instance for which to create the thumbnail
"""
if ek.ek(os.path.isfile, ep_obj.location):
tbn_filename = ep_obj.location.rpartition(".")
if tbn_filename[0] == "":
tbn_filename = ep_obj.location + "-thumb.jpg"
else:
tbn_filename = tbn_filename[0] + "-thumb.jpg"
else:
return None
return tbn_filename
def get_season_poster_path(self, show_obj, season):
"""
Returns the full path to the file for a given season poster.
show_obj: a TVShow instance for which to generate the path
season: a season number to be used for the path. Note that season 0
means specials.
"""
# Our specials thumbnail is, well, special
if season == 0:
season_poster_filename = 'season-specials'
else:
season_poster_filename = 'season' + str(season).zfill(2)
return ek.ek(os.path.join, show_obj.location, season_poster_filename + '-poster.jpg')
def get_season_banner_path(self, show_obj, season):
"""
Returns the full path to the file for a given season banner.
show_obj: a TVShow instance for which to generate the path
season: a season number to be used for the path. Note that season 0
means specials.
"""
# Our specials thumbnail is, well, special
if season == 0:
season_banner_filename = 'season-specials'
else:
season_banner_filename = 'season' + str(season).zfill(2)
return ek.ek(os.path.join, show_obj.location, season_banner_filename + '-banner.jpg')
def get_season_all_poster_path(self, show_obj):
return ek.ek(os.path.join, show_obj.location, self.season_all_poster_name)
def get_season_all_banner_path(self, show_obj):
return ek.ek(os.path.join, show_obj.location, self.season_all_banner_name)
def _show_data(self, show_obj):
"""
This should be overridden by the implementing class. It should
provide the content of the show metadata file.
"""
return None
def _ep_data(self, ep_obj):
"""
This should be overridden by the implementing class. It should
provide the content of the episode metadata file.
"""
return None
def create_show_metadata(self, show_obj):
if self.show_metadata and show_obj and not self._has_show_metadata(show_obj):
logger.log(u"Metadata provider " + self.name + " creating show metadata for " + show_obj.name, logger.DEBUG)
return self.write_show_file(show_obj)
return False
def create_episode_metadata(self, ep_obj):
if self.episode_metadata and ep_obj and not self._has_episode_metadata(ep_obj):
logger.log(u"Metadata provider " + self.name + " creating episode metadata for " + ep_obj.prettyName(), logger.DEBUG)
return self.write_ep_file(ep_obj)
return False
def create_fanart(self, show_obj):
if self.fanart and show_obj and not self._has_fanart(show_obj):
logger.log(u"Metadata provider " + self.name + " creating fanart for " + show_obj.name, logger.DEBUG)
return self.save_fanart(show_obj)
return False
def create_poster(self, show_obj):
if self.poster and show_obj and not self._has_poster(show_obj):
logger.log(u"Metadata provider " + self.name + " creating poster for " + show_obj.name, logger.DEBUG)
return self.save_poster(show_obj)
return False
def create_banner(self, show_obj):
if self.banner and show_obj and not self._has_banner(show_obj):
logger.log(u"Metadata provider " + self.name + " creating banner for " + show_obj.name, logger.DEBUG)
return self.save_banner(show_obj)
return False
def create_episode_thumb(self, ep_obj):
if self.episode_thumbnails and ep_obj and not self._has_episode_thumb(ep_obj):
logger.log(u"Metadata provider " + self.name + " creating episode thumbnail for " + ep_obj.prettyName(), logger.DEBUG)
return self.save_thumbnail(ep_obj)
return False
def create_season_posters(self, show_obj):
if self.season_posters and show_obj:
result = []
for season, episodes in show_obj.episodes.iteritems(): # @UnusedVariable
if not self._has_season_poster(show_obj, season):
logger.log(u"Metadata provider " + self.name + " creating season posters for " + show_obj.name, logger.DEBUG)
result = result + [self.save_season_posters(show_obj, season)]
return all(result)
return False
def create_season_banners(self, show_obj):
if self.season_banners and show_obj:
result = []
for season, episodes in show_obj.episodes.iteritems(): # @UnusedVariable
if not self._has_season_banner(show_obj, season):
logger.log(u"Metadata provider " + self.name + " creating season banners for " + show_obj.name, logger.DEBUG)
result = result + [self.save_season_banners(show_obj, season)]
return all(result)
return False
def create_season_all_poster(self, show_obj):
if self.season_all_poster and show_obj and not self._has_season_all_poster(show_obj):
logger.log(u"Metadata provider " + self.name + " creating season all poster for " + show_obj.name, logger.DEBUG)
return self.save_season_all_poster(show_obj)
return False
def create_season_all_banner(self, show_obj):
if self.season_all_banner and show_obj and not self._has_season_all_banner(show_obj):
logger.log(u"Metadata provider " + self.name + " creating season all banner for " + show_obj.name, logger.DEBUG)
return self.save_season_all_banner(show_obj)
return False
def _get_episode_thumb_url(self, ep_obj):
"""
Returns the URL to use for downloading an episode's thumbnail. Uses
theTVDB.com data.
ep_obj: a TVEpisode object for which to grab the thumb URL
"""
all_eps = [ep_obj] + ep_obj.relatedEps
tvdb_lang = ep_obj.show.lang
# get a TVDB object
try:
# There's gotta be a better way of doing this but we don't wanna
# change the language value elsewhere
ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy()
if tvdb_lang and not tvdb_lang == 'en':
ltvdb_api_parms['language'] = tvdb_lang
t = tvdb_api.Tvdb(actors=True, **ltvdb_api_parms)
tvdb_show_obj = t[ep_obj.show.tvdbid]
except tvdb_exceptions.tvdb_shownotfound, e:
raise exceptions.ShowNotFoundException(e.message)
except tvdb_exceptions.tvdb_error, e:
logger.log(u"Unable to connect to TVDB while creating meta files - skipping - " + ex(e), logger.ERROR)
return None
# try all included episodes in case some have thumbs and others don't
for cur_ep in all_eps:
try:
myEp = tvdb_show_obj[cur_ep.season][cur_ep.episode]
except (tvdb_exceptions.tvdb_episodenotfound, tvdb_exceptions.tvdb_seasonnotfound):
logger.log(u"Unable to find episode " + str(cur_ep.season) + "x" + str(cur_ep.episode) + " on tvdb... has it been removed? Should I delete from db?")
continue
thumb_url = myEp["filename"]
if thumb_url:
return thumb_url
return None
def write_show_file(self, show_obj):
"""
Generates and writes show_obj's metadata under the given path to the
filename given by get_show_file_path()
show_obj: TVShow object for which to create the metadata
path: An absolute or relative path where we should put the file. Note that
the file name will be the default show_file_name.
Note that this method expects that _show_data will return an ElementTree
object. If your _show_data returns data in another format you'll need to
override this method.
"""
data = self._show_data(show_obj)
if not data:
return False
nfo_file_path = self.get_show_file_path(show_obj)
nfo_file_dir = ek.ek(os.path.dirname, nfo_file_path)
try:
if not ek.ek(os.path.isdir, nfo_file_dir):
logger.log(u"Metadata dir didn't exist, creating it at " + nfo_file_dir, logger.DEBUG)
ek.ek(os.makedirs, nfo_file_dir)
helpers.chmodAsParent(nfo_file_dir)
logger.log(u"Writing show nfo file to " + nfo_file_path, logger.DEBUG)
nfo_file = ek.ek(open, nfo_file_path, 'w')
data.write(nfo_file, encoding="utf-8")
nfo_file.close()
helpers.chmodAsParent(nfo_file_path)
except IOError, e:
logger.log(u"Unable to write file to " + nfo_file_path + " - are you sure the folder is writable? " + ex(e), logger.ERROR)
return False
return True
def write_ep_file(self, ep_obj):
"""
Generates and writes ep_obj's metadata under the given path with the
given filename root. Uses the episode's name with the extension in
_ep_nfo_extension.
ep_obj: TVEpisode object for which to create the metadata
file_name_path: The file name to use for this metadata. Note that the extension
will be automatically added based on _ep_nfo_extension. This should
include an absolute path.
Note that this method expects that _ep_data will return an ElementTree
object. If your _ep_data returns data in another format you'll need to
override this method.
"""
data = self._ep_data(ep_obj)
if not data:
return False
nfo_file_path = self.get_episode_file_path(ep_obj)
nfo_file_dir = ek.ek(os.path.dirname, nfo_file_path)
try:
if not ek.ek(os.path.isdir, nfo_file_dir):
logger.log(u"Metadata dir didn't exist, creating it at " + nfo_file_dir, logger.DEBUG)
ek.ek(os.makedirs, nfo_file_dir)
helpers.chmodAsParent(nfo_file_dir)
logger.log(u"Writing episode nfo file to " + nfo_file_path, logger.DEBUG)
nfo_file = ek.ek(open, nfo_file_path, 'w')
data.write(nfo_file, encoding="utf-8")
nfo_file.close()
helpers.chmodAsParent(nfo_file_path)
except IOError, e:
logger.log(u"Unable to write file to " + nfo_file_path + " - are you sure the folder is writable? " + ex(e), logger.ERROR)
return False
return True
def save_thumbnail(self, ep_obj):
"""
Retrieves a thumbnail and saves it to the correct spot. This method should not need to
be overridden by implementing classes, changing get_episode_thumb_path and
_get_episode_thumb_url should suffice.
ep_obj: a TVEpisode object for which to generate a thumbnail
"""
file_path = self.get_episode_thumb_path(ep_obj)
if not file_path:
logger.log(u"Unable to find a file path to use for this thumbnail, not generating it", logger.DEBUG)
return False
thumb_url = self._get_episode_thumb_url(ep_obj)
# if we can't find one then give up
if not thumb_url:
logger.log(u"No thumb is available for this episode, not creating a thumb", logger.DEBUG)
return False
thumb_data = metadata_helpers.getShowImage(thumb_url)
result = self._write_image(thumb_data, file_path)
if not result:
return False
for cur_ep in [ep_obj] + ep_obj.relatedEps:
cur_ep.hastbn = True
return True
def save_fanart(self, show_obj, which=None):
"""
Downloads a fanart image and saves it to the filename specified by fanart_name
inside the show's root folder.
show_obj: a TVShow object for which to download fanart
"""
# use the default fanart name
fanart_path = self.get_fanart_path(show_obj)
fanart_data = self._retrieve_show_image('fanart', show_obj, which)
if not fanart_data:
logger.log(u"No fanart image was retrieved, unable to write fanart", logger.DEBUG)
return False
return self._write_image(fanart_data, fanart_path)
def save_poster(self, show_obj, which=None):
"""
Downloads a poster image and saves it to the filename specified by poster_name
inside the show's root folder.
show_obj: a TVShow object for which to download a poster
"""
# use the default poster name
poster_path = self.get_poster_path(show_obj)
poster_data = self._retrieve_show_image('poster', show_obj, which)
if not poster_data:
logger.log(u"No show poster image was retrieved, unable to write poster", logger.DEBUG)
return False
return self._write_image(poster_data, poster_path)
def save_banner(self, show_obj, which=None):
"""
Downloads a banner image and saves it to the filename specified by banner_name
inside the show's root folder.
show_obj: a TVShow object for which to download a banner
"""
# use the default banner name
banner_path = self.get_banner_path(show_obj)
banner_data = self._retrieve_show_image('banner', show_obj, which)
if not banner_data:
logger.log(u"No show banner image was retrieved, unable to write banner", logger.DEBUG)
return False
return self._write_image(banner_data, banner_path)
def save_season_posters(self, show_obj, season):
"""
Saves all season posters to disk for the given show.
show_obj: a TVShow object for which to save the season thumbs
Cycles through all seasons and saves the season posters if possible. This
method should not need to be overridden by implementing classes, changing
_season_posters_dict and get_season_poster_path should be good enough.
"""
season_dict = self._season_posters_dict(show_obj, season)
result = []
# Returns a nested dictionary of season art with the season
# number as primary key. It's really overkill but gives the option
# to present to user via ui to pick down the road.
for cur_season in season_dict:
cur_season_art = season_dict[cur_season]
if len(cur_season_art) == 0:
continue
# Just grab whatever's there for now
art_id, season_url = cur_season_art.popitem() # @UnusedVariable
season_poster_file_path = self.get_season_poster_path(show_obj, cur_season)
if not season_poster_file_path:
logger.log(u"Path for season " + str(cur_season) + " came back blank, skipping this season", logger.DEBUG)
continue
seasonData = metadata_helpers.getShowImage(season_url)
if not seasonData:
logger.log(u"No season poster data available, skipping this season", logger.DEBUG)
continue
result = result + [self._write_image(seasonData, season_poster_file_path)]
if result:
return all(result)
else:
return False
return True
def save_season_banners(self, show_obj, season):
"""
Saves all season banners to disk for the given show.
show_obj: a TVShow object for which to save the season thumbs
Cycles through all seasons and saves the season banners if possible. This
method should not need to be overridden by implementing classes, changing
_season_banners_dict and get_season_banner_path should be good enough.
"""
season_dict = self._season_banners_dict(show_obj, season)
result = []
# Returns a nested dictionary of season art with the season
# number as primary key. It's really overkill but gives the option
# to present to user via ui to pick down the road.
for cur_season in season_dict:
cur_season_art = season_dict[cur_season]
if len(cur_season_art) == 0:
continue
# Just grab whatever's there for now
art_id, season_url = cur_season_art.popitem() # @UnusedVariable
season_banner_file_path = self.get_season_banner_path(show_obj, cur_season)
if not season_banner_file_path:
logger.log(u"Path for season " + str(cur_season) + " came back blank, skipping this season", logger.DEBUG)
continue
seasonData = metadata_helpers.getShowImage(season_url)
if not seasonData:
logger.log(u"No season banner data available, skipping this season", logger.DEBUG)
continue
result = result + [self._write_image(seasonData, season_banner_file_path)]
if result:
return all(result)
else:
return False
return True
def save_season_all_poster(self, show_obj, which=None):
# use the default season all poster name
poster_path = self.get_season_all_poster_path(show_obj)
poster_data = self._retrieve_show_image('poster', show_obj, which)
if not poster_data:
logger.log(u"No show poster image was retrieved, unable to write season all poster", logger.DEBUG)
return False
return self._write_image(poster_data, poster_path)
def save_season_all_banner(self, show_obj, which=None):
# use the default season all banner name
banner_path = self.get_season_all_banner_path(show_obj)
banner_data = self._retrieve_show_image('banner', show_obj, which)
if not banner_data:
logger.log(u"No show banner image was retrieved, unable to write season all banner", logger.DEBUG)
return False
return self._write_image(banner_data, banner_path)
def _write_image(self, image_data, image_path):
"""
Saves the data in image_data to the location image_path. Returns True/False
to represent success or failure.
image_data: binary image data to write to file
image_path: file location to save the image to
"""
# don't bother overwriting it
if ek.ek(os.path.isfile, image_path):
logger.log(u"Image already exists, not downloading", logger.DEBUG)
return False
if not image_data:
logger.log(u"Unable to retrieve image, skipping", logger.WARNING)
return False
image_dir = ek.ek(os.path.dirname, image_path)
try:
if not ek.ek(os.path.isdir, image_dir):
logger.log(u"Metadata dir didn't exist, creating it at " + image_dir, logger.DEBUG)
ek.ek(os.makedirs, image_dir)
helpers.chmodAsParent(image_dir)
outFile = ek.ek(open, image_path, 'wb')
outFile.write(image_data)
outFile.close()
helpers.chmodAsParent(image_path)
except IOError, e:
logger.log(u"Unable to write image to " + image_path + " - are you sure the show folder is writable? " + ex(e), logger.ERROR)
return False
return True
def _retrieve_show_image(self, image_type, show_obj, which=None):
"""
Gets an image URL from theTVDB.com, downloads it and returns the data.
image_type: type of image to retrieve (currently supported: fanart, poster, banner)
show_obj: a TVShow object to use when searching for the image
which: optional, a specific numbered poster to look for
Returns: the binary image data if available, or else None
"""
tvdb_lang = show_obj.lang
try:
# There's gotta be a better way of doing this but we don't wanna
# change the language value elsewhere
ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy()
if tvdb_lang and not tvdb_lang == 'en':
ltvdb_api_parms['language'] = tvdb_lang
t = tvdb_api.Tvdb(banners=True, **ltvdb_api_parms)
tvdb_show_obj = t[show_obj.tvdbid]
except (tvdb_exceptions.tvdb_error, IOError), e:
logger.log(u"Unable to look up show on TVDB, not downloading images: " + ex(e), logger.ERROR)
return None
if image_type not in ('fanart', 'poster', 'banner'):
logger.log(u"Invalid image type " + str(image_type) + ", couldn't find it in the TVDB object", logger.ERROR)
return None
image_url = tvdb_show_obj[image_type]
image_data = metadata_helpers.getShowImage(image_url, which)
return image_data
def _season_posters_dict(self, show_obj, season):
"""
Should return a dict like:
result = {<season number>:
{1: '<url 1>', 2: <url 2>, ...},}
"""
# This holds our resulting dictionary of season art
result = {}
tvdb_lang = show_obj.lang
try:
# There's gotta be a better way of doing this but we don't wanna
# change the language value elsewhere
ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy()
if tvdb_lang and not tvdb_lang == 'en':
ltvdb_api_parms['language'] = tvdb_lang
t = tvdb_api.Tvdb(banners=True, **ltvdb_api_parms)
tvdb_show_obj = t[show_obj.tvdbid]
except (tvdb_exceptions.tvdb_error, IOError), e:
logger.log(u"Unable to look up show on TVDB, not downloading images: " + ex(e), logger.ERROR)
return result
# if we have no season banners then just finish
if 'season' not in tvdb_show_obj['_banners'] or 'season' not in tvdb_show_obj['_banners']['season']:
return result
# Give us just the normal poster-style season graphics
seasonsArtObj = tvdb_show_obj['_banners']['season']['season']
# Returns a nested dictionary of season art with the season
# number as primary key. It's really overkill but gives the option
# to present to user via ui to pick down the road.
result[season] = {}
# find the correct season in the tvdb object and just copy the dict into our result dict
for seasonArtID in seasonsArtObj.keys():
if int(seasonsArtObj[seasonArtID]['season']) == season and seasonsArtObj[seasonArtID]['language'] == 'en':
result[season][seasonArtID] = seasonsArtObj[seasonArtID]['_bannerpath']
return result
def _season_banners_dict(self, show_obj, season):
"""
Should return a dict like:
result = {<season number>:
{1: '<url 1>', 2: <url 2>, ...},}
"""
# This holds our resulting dictionary of season art
result = {}
tvdb_lang = show_obj.lang
try:
# There's gotta be a better way of doing this but we don't wanna
# change the language value elsewhere
ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy()
if tvdb_lang and not tvdb_lang == 'en':
ltvdb_api_parms['language'] = tvdb_lang
t = tvdb_api.Tvdb(banners=True, **ltvdb_api_parms)
tvdb_show_obj = t[show_obj.tvdbid]
except (tvdb_exceptions.tvdb_error, IOError), e:
logger.log(u"Unable to look up show on TVDB, not downloading images: " + ex(e), logger.ERROR)
return result
# if we have no season banners then just finish
if 'season' not in tvdb_show_obj['_banners'] or 'seasonwide' not in tvdb_show_obj['_banners']['season']:
return result
# Give us just the normal season graphics
seasonsArtObj = tvdb_show_obj['_banners']['season']['seasonwide']
# Returns a nested dictionary of season art with the season
# number as primary key. It's really overkill but gives the option
# to present to user via ui to pick down the road.
result[season] = {}
# find the correct season in the tvdb object and just copy the dict into our result dict
for seasonArtID in seasonsArtObj.keys():
if int(seasonsArtObj[seasonArtID]['season']) == season and seasonsArtObj[seasonArtID]['language'] == 'en':
result[season][seasonArtID] = seasonsArtObj[seasonArtID]['_bannerpath']
return result
def retrieveShowMetadata(self, folder):
"""
Used only when mass adding Existing Shows, using previously generated Show metadata to reduce the need to query TVDB.
"""
empty_return = (None, None)
metadata_path = ek.ek(os.path.join, folder, self._show_metadata_filename)
if not ek.ek(os.path.isdir, folder) or not ek.ek(os.path.isfile, metadata_path):
logger.log(u"Can't load the metadata file from " + repr(metadata_path) + ", it doesn't exist", logger.DEBUG)
return empty_return
logger.log(u"Loading show info from metadata file in " + folder, logger.DEBUG)
try:
with ek.ek(open, metadata_path, 'r') as xmlFileObj:
showXML = etree.ElementTree(file=xmlFileObj)
if showXML.findtext('title') == None or (showXML.findtext('tvdbid') == None and showXML.findtext('id') == None):
logger.log(u"Invalid info in tvshow.nfo (missing name or id):" \
+ str(showXML.findtext('title')) + " " \
+ str(showXML.findtext('tvdbid')) + " " \
+ str(showXML.findtext('id')))
return empty_return
name = showXML.findtext('title')
if showXML.findtext('tvdbid') != None:
tvdb_id = int(showXML.findtext('tvdbid'))
elif showXML.findtext('id'):
tvdb_id = int(showXML.findtext('id'))
else:
logger.log(u"Empty <id> or <tvdbid> field in NFO, unable to find an ID", logger.WARNING)
return empty_return
if not tvdb_id:
logger.log(u"Invalid tvdb id (" + str(tvdb_id) + "), not using metadata file", logger.WARNING)
return empty_return
except Exception, e:
logger.log(u"There was an error parsing your existing metadata file: '" + metadata_path + "' error: " + ex(e), logger.WARNING)
return empty_return
return (tvdb_id, name)
| gpl-3.0 |
spygg/liusblog | liusblog/urls.py | 1 | 1124 | """liusblog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.urls import include
from django.conf import settings
from django.conf.urls.static import static
import comment.views
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('blog.urls')),
path('ckeditor', include('ckeditor_uploader.urls')),
path('comment/', comment.views.comment, name='comment'),
]
#访问MEDIA
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | gpl-3.0 |
openstack/tosca-parser | toscaparser/tests/test_toscatpl.py | 1 | 45468 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from toscaparser.common import exception
import toscaparser.elements.interfaces as ifaces
from toscaparser.elements.nodetype import NodeType
from toscaparser.elements.portspectype import PortSpec
from toscaparser.functions import GetInput
from toscaparser.functions import GetProperty
from toscaparser.nodetemplate import NodeTemplate
from toscaparser.tests.base import TestCase
from toscaparser.tosca_template import ToscaTemplate
from toscaparser.utils.gettextutils import _
import toscaparser.utils.yamlparser
class ToscaTemplateTest(TestCase):
'''TOSCA template.'''
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/tosca_single_instance_wordpress.yaml")
params = {'db_name': 'my_wordpress', 'db_user': 'my_db_user',
'db_root_pwd': '12345678'}
tosca = ToscaTemplate(tosca_tpl, parsed_params=params)
tosca_elk_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/tosca_elk.yaml")
tosca_repo_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/repositories/tosca_repositories_test_definition.yaml")
def test_version(self):
self.assertEqual(self.tosca.version, "tosca_simple_yaml_1_0")
def test_description(self):
expected_description = "TOSCA simple profile with wordpress, " \
"web server and mysql on the same server."
self.assertEqual(self.tosca.description, expected_description)
def test_inputs(self):
self.assertEqual(
['cpus', 'db_name', 'db_port',
'db_pwd', 'db_root_pwd', 'db_user'],
sorted([input.name for input in self.tosca.inputs]))
input_name = "db_port"
expected_description = "Port for the MySQL database."
for input in self.tosca.inputs:
if input.name == input_name:
self.assertEqual(input.description, expected_description)
def test_node_tpls(self):
'''Test nodetemplate names.'''
self.assertEqual(
['mysql_database', 'mysql_dbms', 'server',
'webserver', 'wordpress'],
sorted([tpl.name for tpl in self.tosca.nodetemplates]))
tpl_name = "mysql_database"
expected_type = "tosca.nodes.Database"
expected_properties = ['name', 'password', 'user']
expected_capabilities = ['database_endpoint', 'feature']
expected_requirements = [{'host': 'mysql_dbms'}]
''' TODO: needs enhancement in tosca_elk.yaml..
expected_relationshp = ['tosca.relationships.HostedOn']
expected_host = ['mysql_dbms']
'''
expected_interface = [ifaces.LIFECYCLE_SHORTNAME]
for tpl in self.tosca.nodetemplates:
if tpl_name == tpl.name:
'''Test node type.'''
self.assertEqual(tpl.type, expected_type)
'''Test properties.'''
self.assertEqual(
expected_properties,
sorted(tpl.get_properties().keys()))
'''Test capabilities.'''
self.assertEqual(
expected_capabilities,
sorted(tpl.get_capabilities().keys()))
'''Test requirements.'''
self.assertEqual(
expected_requirements, tpl.requirements)
'''Test relationship.'''
''' needs enhancements in tosca_elk.yaml
self.assertEqual(
expected_relationshp,
[x.type for x in tpl.relationships.keys()])
self.assertEqual(
expected_host,
[y.name for y in tpl.relationships.values()])
'''
'''Test interfaces.'''
self.assertEqual(
expected_interface,
[x.type for x in tpl.interfaces])
if tpl.name == 'server':
'''Test property value'''
props = tpl.get_properties()
if props and 'mem_size' in props.keys():
self.assertEqual(props['mem_size'].value, '4096 MB')
'''Test capability'''
caps = tpl.get_capabilities()
self.assertIn('os', caps.keys())
os_props_objs = None
os_props = None
os_type_prop = None
if caps and 'os' in caps.keys():
capability = caps['os']
os_props_objs = capability.get_properties_objects()
os_props = capability.get_properties()
os_type_prop = capability.get_property_value('type')
break
self.assertEqual(
['Linux'],
[p.value for p in os_props_objs if p.name == 'type'])
self.assertEqual(
'Linux',
os_props['type'].value if 'type' in os_props else '')
self.assertEqual('Linux', os_props['type'].value)
self.assertEqual('Linux', os_type_prop)
def test_node_inheritance_type(self):
wordpress_node = [
node for node in self.tosca.nodetemplates
if node.name == 'wordpress'][0]
self.assertTrue(
wordpress_node.is_derived_from("tosca.nodes.WebApplication"))
self.assertTrue(
wordpress_node.is_derived_from("tosca.nodes.Root"))
self.assertFalse(
wordpress_node.is_derived_from("tosca.policies.Root"))
def test_nodetype_without_relationship(self):
# Nodes that contain "relationship" in "requirements"
depend_node_types = (
"tosca.nodes.SoftwareComponent",
)
# Nodes that do not contain "relationship" in "requirements"
non_depend_node_types = (
"tosca.nodes.Compute",
"sample.SC",
)
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/test_nodetype_without_relationship.yaml")
tosca = ToscaTemplate(tosca_tpl)
nodetemplates = tosca.nodetemplates
for node in nodetemplates:
node_depend = node.related_nodes
if node_depend:
self.assertIn(
node.type,
depend_node_types
)
else:
self.assertIn(
node.type,
non_depend_node_types
)
def test_outputs(self):
self.assertEqual(
['website_url'],
sorted([output.name for output in self.tosca.outputs]))
def test_interfaces(self):
wordpress_node = [
node for node in self.tosca.nodetemplates
if node.name == 'wordpress'][0]
interfaces = wordpress_node.interfaces
self.assertEqual(2, len(interfaces))
for interface in interfaces:
if interface.name == 'create':
self.assertEqual(ifaces.LIFECYCLE_SHORTNAME,
interface.type)
self.assertEqual('wordpress/wordpress_install.sh',
interface.implementation)
self.assertIsNone(interface.inputs)
elif interface.name == 'configure':
self.assertEqual(ifaces.LIFECYCLE_SHORTNAME,
interface.type)
self.assertEqual('wordpress/wordpress_configure.sh',
interface.implementation)
self.assertEqual(3, len(interface.inputs))
self.skipTest('bug #1440247')
wp_db_port = interface.inputs['wp_db_port']
self.assertIsInstance(wp_db_port, GetProperty)
self.assertEqual('get_property', wp_db_port.name)
self.assertEqual(['SELF',
'database_endpoint',
'port'],
wp_db_port.args)
result = wp_db_port.result()
self.assertIsInstance(result, GetInput)
else:
raise AssertionError(
'Unexpected interface: {0}'.format(interface.name))
def test_normative_type_by_short_name(self):
# test template with a short name Compute
template = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/test_tosca_normative_type_by_shortname.yaml")
tosca_tpl = ToscaTemplate(template)
expected_type = "tosca.nodes.Compute"
for tpl in tosca_tpl.nodetemplates:
self.assertEqual(tpl.type, expected_type)
for tpl in tosca_tpl.nodetemplates:
compute_type = NodeType(tpl.type)
self.assertEqual(
sorted(['tosca.capabilities.Container',
'tosca.capabilities.Endpoint.Admin',
'tosca.capabilities.Node',
'tosca.capabilities.OperatingSystem',
'tosca.capabilities.network.Bindable',
'tosca.capabilities.Scalable']),
sorted([c.type
for c in compute_type.get_capabilities_objects()]))
def test_template_with_no_inputs(self):
tosca_tpl = self._load_template('test_no_inputs_in_template.yaml')
self.assertEqual(0, len(tosca_tpl.inputs))
def test_template_with_no_outputs(self):
tosca_tpl = self._load_template('test_no_outputs_in_template.yaml')
self.assertEqual(0, len(tosca_tpl.outputs))
def test_template_file_with_suffix_yml(self):
tosca_tpl = self._load_template('custom_types/wordpress.yml')
self.assertIsNotNone(tosca_tpl)
def test_relationship_interface(self):
template = ToscaTemplate(self.tosca_elk_tpl)
for node_tpl in template.nodetemplates:
if node_tpl.name == 'logstash':
config_interface = 'Configure'
artifact = 'logstash/configure_elasticsearch.py'
relation = node_tpl.relationships
for key in relation.keys():
rel_tpl = relation.get(key).get_relationship_template()
if rel_tpl:
self.assertTrue(rel_tpl[0].is_derived_from(
"tosca.relationships.Root"))
interfaces = rel_tpl[0].interfaces
for interface in interfaces:
self.assertEqual(config_interface,
interface.type)
self.assertEqual('pre_configure_source',
interface.name)
self.assertEqual(artifact,
interface.implementation)
def test_relationship(self):
template = ToscaTemplate(self.tosca_elk_tpl)
for node_tpl in template.nodetemplates:
if node_tpl.name == 'paypal_pizzastore':
expected_relationships = ['tosca.relationships.ConnectsTo',
'tosca.relationships.HostedOn']
expected_hosts = ['tosca.nodes.Database',
'tosca.nodes.WebServer']
self.assertEqual(len(node_tpl.relationships), 2)
self.assertEqual(
expected_relationships,
sorted([k.type for k in node_tpl.relationships.keys()]))
self.assertEqual(
expected_hosts,
sorted([v.type for v in node_tpl.relationships.values()]))
def test_repositories(self):
template = ToscaTemplate(self.tosca_repo_tpl)
self.assertEqual(
['repo_code0', 'repo_code1', 'repo_code2'],
sorted([input.name for input in template.repositories]))
input_name = "repo_code2"
expected_url = "https://github.com/nandinivemula/intern/master"
for input in template.repositories:
if input.name == input_name:
self.assertEqual(input.url, expected_url)
def test_template_macro(self):
template = ToscaTemplate(self.tosca_elk_tpl)
for node_tpl in template.nodetemplates:
if node_tpl.name == 'mongo_server':
self.assertEqual(
['disk_size', 'mem_size', 'num_cpus'],
sorted(node_tpl.get_capability('host').
get_properties().keys()))
def test_template_requirements(self):
"""Test different formats of requirements
The requirements can be defined in few different ways,
1. Requirement expressed as a capability with an implicit relationship.
2. Requirement expressed with explicit relationship.
3. Requirement expressed with a relationship template.
4. Requirement expressed via TOSCA types to provision a node
with explicit relationship.
5. Requirement expressed via TOSCA types with a filter.
"""
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/requirements/test_requirements.yaml")
tosca = ToscaTemplate(tosca_tpl)
for node_tpl in tosca.nodetemplates:
if node_tpl.name == 'my_app':
expected_relationship = [
('tosca.relationships.ConnectsTo', 'mysql_database'),
('tosca.relationships.HostedOn', 'my_webserver')]
actual_relationship = sorted([
(relation.type, node.name) for
relation, node in node_tpl.relationships.items()])
self.assertEqual(expected_relationship, actual_relationship)
if node_tpl.name == 'mysql_database':
self.assertEqual(
[('tosca.relationships.HostedOn', 'my_dbms')],
[(relation.type, node.name) for
relation,
node in node_tpl.relationships.items()])
if node_tpl.name == 'my_server':
self.assertEqual(
[('tosca.relationships.AttachesTo', 'my_storage')],
[(relation.type, node.name) for
relation,
node in node_tpl.relationships.items()])
def test_template_requirements_not_implemented(self):
# TODO(spzala): replace this test with new one once TOSCA types look up
# support is implemented.
"""Requirements that yet need to be implemented
The following requirement formats are not yet implemented,
due to look up dependency:
1. Requirement expressed via TOSCA types to provision a node
with explicit relationship.
2. Requirement expressed via TOSCA types with a filter.
"""
tpl_snippet_1 = '''
node_templates:
mysql_database:
type: tosca.nodes.Database
description: Requires a particular node type and relationship.
To be full-filled via lookup into node repository.
requirements:
- req1:
node: tosca.nodes.DBMS
relationship: tosca.relationships.HostedOn
'''
tpl_snippet_2 = '''
node_templates:
my_webserver:
type: tosca.nodes.WebServer
description: Requires a particular node type with a filter.
To be full-filled via lookup into node repository.
requirements:
- req1:
node: tosca.nodes.Compute
target_filter:
properties:
num_cpus: { in_range: [ 1, 4 ] }
mem_size: { greater_or_equal: 2 }
capabilities:
- tosca.capabilities.OS:
properties:
architecture: x86_64
type: linux
'''
tpl_snippet_3 = '''
node_templates:
my_webserver2:
type: tosca.nodes.WebServer
description: Requires a node type with a particular capability.
To be full-filled via lookup into node repository.
requirements:
- req1:
node: tosca.nodes.Compute
relationship: tosca.relationships.HostedOn
capability: tosca.capabilities.Container
'''
self._requirements_not_implemented(tpl_snippet_1, 'mysql_database')
self._requirements_not_implemented(tpl_snippet_2, 'my_webserver')
self._requirements_not_implemented(tpl_snippet_3, 'my_webserver2')
def _requirements_not_implemented(self, tpl_snippet, tpl_name):
nodetemplates = (toscaparser.utils.yamlparser.
simple_parse(tpl_snippet))['node_templates']
self.assertRaises(
NotImplementedError,
lambda: NodeTemplate(tpl_name, nodetemplates).relationships)
# Test the following:
# 1. Custom node type derived from 'WebApplication' named 'TestApp'
# with a custom Capability Type 'TestCapability'
# 2. Same as #1, but referencing a custom 'TestCapability' Capability Type
# that is not defined
def test_custom_capability_type_definition(self):
tpl_snippet = '''
node_templates:
test_app:
type: tosca.nodes.WebApplication.TestApp
capabilities:
test_cap:
properties:
test: 1
'''
# custom node type definition with custom capability type definition
custom_def = '''
tosca.nodes.WebApplication.TestApp:
derived_from: tosca.nodes.WebApplication
capabilities:
test_cap:
type: tosca.capabilities.TestCapability
tosca.capabilities.TestCapability:
derived_from: tosca.capabilities.Root
properties:
test:
type: integer
required: false
'''
expected_capabilities = ['app_endpoint', 'feature', 'test_cap']
nodetemplates = (toscaparser.utils.yamlparser.
simple_parse(tpl_snippet))['node_templates']
custom_def = (toscaparser.utils.yamlparser.
simple_parse(custom_def))
name = list(nodetemplates.keys())[0]
tpl = NodeTemplate(name, nodetemplates, custom_def)
self.assertEqual(
expected_capabilities,
sorted(tpl.get_capabilities().keys()))
# custom definition without valid capability type definition
custom_def = '''
tosca.nodes.WebApplication.TestApp:
derived_from: tosca.nodes.WebApplication
capabilities:
test_cap:
type: tosca.capabilities.TestCapability
'''
custom_def = (toscaparser.utils.yamlparser.
simple_parse(custom_def))
tpl = NodeTemplate(name, nodetemplates, custom_def)
err = self.assertRaises(
exception.InvalidTypeError,
lambda: NodeTemplate(name, nodetemplates,
custom_def).get_capabilities_objects())
self.assertEqual('Type "tosca.capabilities.TestCapability" is not '
'a valid type.', str(err))
def test_capability_without_properties(self):
expected_version = "tosca_simple_yaml_1_0"
expected_description = \
"Test resources for which properties are not defined in "\
"the parent of capabilitytype. "\
"TestApp has capabilities->test_cap, "\
"and the type of test_cap is TestCapabilityAA. "\
"The parents of TestCapabilityAA is TestCapabilityA, "\
"and TestCapabilityA has no properties."
expected_nodetemplates = {
"test_app": {
"type": "tosca.nodes.WebApplication.TestApp",
"capabilities": {
"test_cap": {
"properties": {
"test": 1
}
}
}
}
}
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/test_capability_without_properties.yaml")
tosca = ToscaTemplate(tosca_tpl)
self.assertEqual(expected_version, tosca.version)
self.assertEqual(expected_description, tosca.description)
self.assertEqual(
expected_nodetemplates,
tosca.nodetemplates[0].templates,
)
def test_local_template_with_local_relpath_import(self):
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/tosca_single_instance_wordpress.yaml")
params = {'db_name': 'my_wordpress', 'db_user': 'my_db_user',
'db_root_pwd': '12345678'}
tosca = ToscaTemplate(tosca_tpl, parsed_params=params)
self.assertTrue(tosca.topology_template.custom_defs)
def test_local_template_with_url_import(self):
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/tosca_single_instance_wordpress_with_url_import.yaml")
tosca = ToscaTemplate(tosca_tpl,
parsed_params={'db_root_pwd': '123456'})
self.assertTrue(tosca.topology_template.custom_defs)
def test_url_template_with_local_relpath_import(self):
tosca_tpl = ('https://raw.githubusercontent.com/openstack/'
'tosca-parser/master/toscaparser/tests/data/'
'tosca_single_instance_wordpress.yaml')
tosca = ToscaTemplate(tosca_tpl, a_file=False,
parsed_params={"db_name": "mysql",
"db_user": "mysql",
"db_root_pwd": "1234",
"db_pwd": "5678",
"db_port": 3306,
"cpus": 4})
self.assertTrue(tosca.topology_template.custom_defs)
def test_url_template_with_local_abspath_import(self):
tosca_tpl = ('https://raw.githubusercontent.com/openstack/'
'tosca-parser/master/toscaparser/tests/data/'
'tosca_single_instance_wordpress_with_local_abspath_'
'import.yaml')
self.assertRaises(exception.ValidationError, ToscaTemplate, tosca_tpl,
None, False)
err_msg = (_('Absolute file name "/tmp/tosca-parser/toscaparser/tests'
'/data/custom_types/wordpress.yaml" cannot be used in a '
'URL-based input template "%(tpl)s".')
% {'tpl': tosca_tpl})
exception.ExceptionCollector.assertExceptionMessage(ImportError,
err_msg)
def test_url_template_with_url_import(self):
tosca_tpl = ('https://raw.githubusercontent.com/openstack/'
'tosca-parser/master/toscaparser/tests/data/'
'tosca_single_instance_wordpress_with_url_import.yaml')
tosca = ToscaTemplate(tosca_tpl, a_file=False,
parsed_params={"db_root_pwd": "1234"})
self.assertTrue(tosca.topology_template.custom_defs)
def test_csar_parsing_wordpress(self):
csar_archive = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'data/CSAR/csar_wordpress.zip')
self.assertTrue(ToscaTemplate(csar_archive,
parsed_params={"db_name": "mysql",
"db_user": "mysql",
"db_root_pwd": "1234",
"db_pwd": "5678",
"db_port": 3306,
"cpus": 4}))
def test_csar_parsing_elk_url_based(self):
csar_archive = ('https://github.com/openstack/tosca-parser/raw/master/'
'toscaparser/tests/data/CSAR/csar_elk.zip')
self.assertTrue(ToscaTemplate(csar_archive, a_file=False,
parsed_params={"my_cpus": 4}))
def test_nested_imports_in_templates(self):
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/test_instance_nested_imports.yaml")
tosca = ToscaTemplate(tosca_tpl)
expected_custom_types = ['tosca.nodes.SoftwareComponent.Kibana',
'tosca.nodes.WebApplication.WordPress',
'test_namespace_prefix.Rsyslog',
'Test2ndRsyslogType',
'test_2nd_namespace_prefix.Rsyslog',
'tosca.nodes.SoftwareComponent.Logstash',
'tosca.nodes.SoftwareComponent.Rsyslog.'
'TestRsyslogType']
self.assertCountEqual(tosca.topology_template.custom_defs.keys(),
expected_custom_types)
def test_invalid_template_file(self):
template_file = 'invalid template file'
expected_msg = (_('"%s" is not a valid file.') % template_file)
self.assertRaises(
exception.ValidationError,
ToscaTemplate, template_file, None, False)
exception.ExceptionCollector.assertExceptionMessage(ValueError,
expected_msg)
def test_multiple_validation_errors(self):
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/test_multiple_validation_errors.yaml")
self.assertRaises(exception.ValidationError, ToscaTemplate, tosca_tpl,
None)
valid_versions = '", "'.join(ToscaTemplate.VALID_TEMPLATE_VERSIONS)
err1_msg = (_('The template version "tosca_simple_yaml_1" is invalid. '
'Valid versions are "%s".') % valid_versions)
exception.ExceptionCollector.assertExceptionMessage(
exception.InvalidTemplateVersion, err1_msg)
err2_msg = _('Import "custom_types/not_there.yaml" is not valid.')
exception.ExceptionCollector.assertExceptionMessage(
ImportError, err2_msg)
err3_msg = _('Type "tosca.nodes.WebApplication.WordPress" is not a '
'valid type.')
exception.ExceptionCollector.assertExceptionMessage(
exception.InvalidTypeError, err3_msg)
err4_msg = _('Node template "wordpress" contains unknown field '
'"requirement". Refer to the definition to verify valid '
'values.')
exception.ExceptionCollector.assertExceptionMessage(
exception.UnknownFieldError, err4_msg)
err5_msg = _('\'Property "passwords" was not found in node template '
'"mysql_database".\'')
exception.ExceptionCollector.assertExceptionMessage(
KeyError, err5_msg)
err6_msg = _('Template "mysql_dbms" is missing required field "type".')
exception.ExceptionCollector.assertExceptionMessage(
exception.MissingRequiredFieldError, err6_msg)
err7_msg = _('Node template "mysql_dbms" contains unknown field '
'"type1". Refer to the definition to verify valid '
'values.')
exception.ExceptionCollector.assertExceptionMessage(
exception.UnknownFieldError, err7_msg)
err8_msg = _('\'Node template "server1" was not found in '
'"webserver".\'')
exception.ExceptionCollector.assertExceptionMessage(
KeyError, err8_msg)
err9_msg = _('"relationship" used in template "webserver" is missing '
'required field "type".')
exception.ExceptionCollector.assertExceptionMessage(
exception.MissingRequiredFieldError, err9_msg)
err10_msg = _('Type "tosca.nodes.XYZ" is not a valid type.')
exception.ExceptionCollector.assertExceptionMessage(
exception.InvalidTypeError, err10_msg)
def test_invalid_section_names(self):
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/test_invalid_section_names.yaml")
self.assertRaises(exception.ValidationError, ToscaTemplate, tosca_tpl,
None)
err1_msg = _('Template contains unknown field '
'"tosca_definitions_versions". Refer to the definition '
'to verify valid values.')
exception.ExceptionCollector.assertExceptionMessage(
exception.UnknownFieldError, err1_msg)
err2_msg = _('Template contains unknown field "descriptions". '
'Refer to the definition to verify valid values.')
exception.ExceptionCollector.assertExceptionMessage(
exception.UnknownFieldError, err2_msg)
err3_msg = _('Template contains unknown field "import". Refer to '
'the definition to verify valid values.')
exception.ExceptionCollector.assertExceptionMessage(
exception.UnknownFieldError, err3_msg)
err4_msg = _('Template contains unknown field "topology_templates". '
'Refer to the definition to verify valid values.')
exception.ExceptionCollector.assertExceptionMessage(
exception.UnknownFieldError, err4_msg)
def test_csar_with_alternate_extenstion(self):
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/CSAR/csar_elk.csar")
tosca = ToscaTemplate(tosca_tpl, parsed_params={"my_cpus": 2})
self.assertTrue(tosca.topology_template.custom_defs)
def test_available_rel_tpls(self):
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/test_available_rel_tpls.yaml")
tosca = ToscaTemplate(tosca_tpl)
for node in tosca.nodetemplates:
for relationship, target in node.relationships.items():
try:
target.relationships
except TypeError as error:
self.fail(error)
def test_no_input(self):
self.assertRaises(exception.ValidationError, ToscaTemplate, None,
None, False, None)
err_msg = (('No path or yaml_dict_tpl was provided. '
'There is nothing to parse.'))
exception.ExceptionCollector.assertExceptionMessage(ValueError,
err_msg)
def test_path_and_yaml_dict_tpl_input(self):
test_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/tosca_helloworld.yaml")
yaml_dict_tpl = toscaparser.utils.yamlparser.load_yaml(test_tpl)
tosca = ToscaTemplate(test_tpl, yaml_dict_tpl=yaml_dict_tpl)
self.assertEqual(tosca.version, "tosca_simple_yaml_1_0")
def test_yaml_dict_tpl_input(self):
test_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/tosca_helloworld.yaml")
yaml_dict_tpl = toscaparser.utils.yamlparser.load_yaml(test_tpl)
tosca = ToscaTemplate(yaml_dict_tpl=yaml_dict_tpl)
self.assertEqual(tosca.version, "tosca_simple_yaml_1_0")
def test_yaml_dict_tpl_with_params_and_url_import(self):
test_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/tosca_single_instance_wordpress_with_url_import.yaml")
yaml_dict_tpl = toscaparser.utils.yamlparser.load_yaml(test_tpl)
params = {'db_name': 'my_wordpress', 'db_user': 'my_db_user',
'db_root_pwd': 'mypasswd'}
tosca = ToscaTemplate(parsed_params=params,
yaml_dict_tpl=yaml_dict_tpl)
self.assertEqual(tosca.version, "tosca_simple_yaml_1_0")
def test_yaml_dict_tpl_with_rel_import(self):
test_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/tosca_single_instance_wordpress.yaml")
yaml_dict_tpl = toscaparser.utils.yamlparser.load_yaml(test_tpl)
params = {'db_name': 'my_wordpress', 'db_user': 'my_db_user',
'db_root_pwd': '12345678'}
self.assertRaises(exception.ValidationError, ToscaTemplate, None,
params, False, yaml_dict_tpl)
err_msg = (_('Relative file name "custom_types/wordpress.yaml" '
'cannot be used in a pre-parsed input template.'))
exception.ExceptionCollector.assertExceptionMessage(ImportError,
err_msg)
def test_yaml_dict_tpl_with_fullpath_import(self):
test_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/tosca_single_instance_wordpress.yaml")
yaml_dict_tpl = toscaparser.utils.yamlparser.load_yaml(test_tpl)
yaml_dict_tpl['imports'] = [os.path.join(os.path.dirname(
os.path.abspath(__file__)), "data/custom_types/wordpress.yaml")]
params = {'db_name': 'my_wordpress', 'db_user': 'my_db_user',
'db_root_pwd': 'mypasswd'}
tosca = ToscaTemplate(parsed_params=params,
yaml_dict_tpl=yaml_dict_tpl)
self.assertEqual(tosca.version, "tosca_simple_yaml_1_0")
def test_policies_for_node_templates(self):
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/policies/tosca_policy_template.yaml")
tosca = ToscaTemplate(tosca_tpl)
for policy in tosca.topology_template.policies:
self.assertTrue(
policy.is_derived_from("tosca.policies.Root"))
if policy.name == 'my_compute_placement_policy':
self.assertEqual('tosca.policies.Placement', policy.type)
self.assertEqual(['my_server_1', 'my_server_2'],
policy.targets)
self.assertEqual('node_templates', policy.get_targets_type())
for node in policy.targets_list:
if node.name == 'my_server_1':
'''Test property value'''
props = node.get_properties()
if props and 'mem_size' in props.keys():
self.assertEqual(props['mem_size'].value,
'4096 MB')
def test_policies_for_groups(self):
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/policies/tosca_policy_template.yaml")
tosca = ToscaTemplate(tosca_tpl)
for policy in tosca.topology_template.policies:
self.assertTrue(
policy.is_derived_from("tosca.policies.Root"))
if policy.name == 'my_groups_placement':
self.assertEqual('mycompany.mytypes.myScalingPolicy',
policy.type)
self.assertEqual(['webserver_group'], policy.targets)
self.assertEqual('groups', policy.get_targets_type())
group = policy.get_targets_list()[0]
for node in group.get_member_nodes():
if node.name == 'my_server_2':
'''Test property value'''
props = node.get_properties()
if props and 'mem_size' in props.keys():
self.assertEqual(props['mem_size'].value,
'4096 MB')
# Test the following:
# check the inheritance between custom policies.
# It will first parse the tosca template located at
# data/policies/tosca_custom_policy_template.yaml where
# two empty customs policies have been created. The child
# empty custom policy tosca.policies.Adva.Failure.Restart
# is derived from its parent empty custom policy
# tosca.policies.Adva.Failure which is also derived
# from its parent empty policy tosca.policies.Root.
def test_policies_for_custom(self):
host_prop = {}
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/policies/tosca_custom_policy_template.yaml")
tosca = ToscaTemplate(tosca_tpl)
for policy in tosca.topology_template.policies:
self.assertTrue(
policy.is_derived_from("tosca.policies.Root"))
if policy.name == 'My_failure_policy_restart':
self.assertEqual('tosca.policies.Adva.Failure.Restart',
policy.type)
targets = policy.targets
for target in targets:
if ('my_server_1' == target):
'''Test property value'''
for nodetemplate in tosca.nodetemplates:
if nodetemplate.name == target:
caps = nodetemplate.get_capabilities()
for cap in caps.keys():
generic_cap = \
nodetemplate.get_capability(cap)
if generic_cap:
for prop in \
generic_cap.\
get_properties_objects():
host_prop[prop.name] = prop.value
if cap == 'host':
self.assertEqual(host_prop
['mem_size'],
'512 MB')
def test_node_filter(self):
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/node_filter/test_node_filter.yaml")
ToscaTemplate(tosca_tpl)
def test_attributes_inheritance(self):
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/test_attributes_inheritance.yaml")
ToscaTemplate(tosca_tpl)
def test_repositories_definition(self):
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/repositories/test_repositories_definition.yaml")
ToscaTemplate(tosca_tpl)
def test_custom_caps_def(self):
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/test_custom_caps_def.yaml")
ToscaTemplate(tosca_tpl)
def test_custom_caps_with_custom_datatype(self):
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/test_custom_caps_with_datatype.yaml")
ToscaTemplate(tosca_tpl)
def test_custom_rel_with_script(self):
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/test_tosca_custom_rel_with_script.yaml")
tosca = ToscaTemplate(tosca_tpl)
rel = tosca.relationship_templates[0]
self.assertEqual(rel.type, "tosca.relationships.HostedOn")
self.assertTrue(rel.is_derived_from("tosca.relationships.Root"))
self.assertEqual(len(rel.interfaces), 1)
self.assertEqual(rel.interfaces[0].type, "Configure")
def test_various_portspec_errors(self):
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/datatypes/test_datatype_portspec_add_req.yaml")
self.assertRaises(exception.ValidationError, ToscaTemplate, tosca_tpl,
None)
# TODO(TBD) find way to reuse error messages from constraints.py
msg = (_('The value "%(pvalue)s" of property "%(pname)s" is out of '
'range "(min:%(vmin)s, max:%(vmax)s)".') %
dict(pname=PortSpec.SOURCE,
pvalue='0',
vmin='1',
vmax='65535'))
exception.ExceptionCollector.assertExceptionMessage(
exception.ValidationError, msg)
# Test value below range min.
msg = (_('The value "%(pvalue)s" of property "%(pname)s" is out of '
'range "(min:%(vmin)s, max:%(vmax)s)".') %
dict(pname=PortSpec.SOURCE,
pvalue='1',
vmin='2',
vmax='65534'))
exception.ExceptionCollector.assertExceptionMessage(
exception.RangeValueError, msg)
# Test value above range max.
msg = (_('The value "%(pvalue)s" of property "%(pname)s" is out of '
'range "(min:%(vmin)s, max:%(vmax)s)".') %
dict(pname=PortSpec.SOURCE,
pvalue='65535',
vmin='2',
vmax='65534'))
exception.ExceptionCollector.assertExceptionMessage(
exception.RangeValueError, msg)
def test_containers(self):
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/containers/test_container_docker_mysql.yaml")
ToscaTemplate(tosca_tpl, parsed_params={"mysql_root_pwd": "12345678"})
def test_endpoint_on_compute(self):
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/test_endpoint_on_compute.yaml")
ToscaTemplate(tosca_tpl)
def test_nested_dsl_def(self):
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/dsl_definitions/test_nested_dsl_def.yaml")
self.assertIsNotNone(ToscaTemplate(tosca_tpl))
def test_multiple_policies(self):
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/policies/test_tosca_nfv_multiple_policies.yaml")
tosca = ToscaTemplate(tosca_tpl)
self.assertEqual(
['ALRM1', 'SP1', 'SP2'],
sorted([policy.name for policy in tosca.policies]))
def test_custom_capability(self):
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/test_custom_capabilty.yaml")
ToscaTemplate(tosca_tpl)
def test_csar_multilevel_imports_relative_path(self):
csar_archive = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'data/CSAR/csar_relative_path_import_check.zip')
self.assertTrue(ToscaTemplate(csar_archive))
def test_csar_multiple_deployment_flavours(self):
csar_archive = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'data/CSAR/csar_multiple_deployment_flavour.zip')
tosca = ToscaTemplate(csar_archive)
flavours = list()
for tp in tosca.nested_tosca_templates_with_topology:
flavour_id = tp.substitution_mappings.properties.get('flavour_id')
flavour = {'flavour_id': flavour_id}
flavours.append(flavour)
self.assertEqual(flavours[0]['flavour_id'], 'simple')
self.assertEqual(flavours[1]['flavour_id'], 'complex')
def test_custom_rel_get_type(self):
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/test_tosca_custom_rel.yaml")
tosca = ToscaTemplate(tosca_tpl)
for src in tosca.nodetemplates:
for rel, trgt in src.relationships.items():
rel_tpls = trgt.get_relationship_template()
self.assertEqual(rel_tpls[0].type, "MyAttachesTo")
def test_policies_without_required_property(self):
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/policies/test_policies_without_required_property.yaml")
self.assertRaises(exception.ValidationError, ToscaTemplate,
tosca_tpl, None)
| apache-2.0 |
gpoulter/pydedupe | tests/test_linkcsv.py | 1 | 1959 | #!/usr/bin/env python
import logging
import sys
import unittest
from os.path import dirname, join
sys.path.insert(0, dirname(dirname(dirname(__file__))))
from dedupe import block, sim, linkcsv
def classify(comparisons):
"""Returns match pairs and non-match pairs.
:type comparisons: {(R, R):[float, ...]}
:param comparisons: similarity vectors for pairs of records
>>> comparisons = {(1, 2): [0.8], (2, 3): [0.2]}
>>> classify(comparisons)
({(1, 2): 1.0}, {(2, 3): 0.0})
"""
matches, nomatches = {}, {}
for pair, sim in comparisons.iteritems():
if sim[0] > 0.5:
matches[pair] = 1.0
else:
nomatches[pair] = 0.0
return matches, nomatches
class FakeOpen:
def __init__(self, name, mode):
self.name = name
def write(self, text):
sys.stdout.write(self.name + ": " + text)
sys.stdout.flush()
def close(self):
pass
def __enter__(self):
return self
def __exit__(self, a, b, c):
pass
class TestLinkCSV(unittest.TestCase):
def test(self):
# fudge the built-in open function for linkcsv
linkcsv.open = FakeOpen
logging.open = FakeOpen
# set up parameters
records = [("A", "5.5"), ("B", "3.5"), ("C", "5.25")]
makekey = lambda r: [int(float(r[1]))]
vcompare = lambda x, y: float(int(x) == int(y))
indexing = [("Idx", block.Index, makekey)]
comparator = sim.Record(
("Compare", sim.Field(vcompare, 1, float)),
)
# link and print the output
linker = linkcsv.LinkCSV(
"/single", indexing, comparator, classify, records)
linker.write_all()
# link against master and print the output
linker = linkcsv.LinkCSV(
"/master", indexing, comparator, classify, records, master=records)
linker.write_all()
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
usc-isi/essex-baremetal-support | nova/network/linux_net.py | 1 | 46295 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implements vlans, bridges, and iptables rules using linux utilities."""
import calendar
import inspect
import netaddr
import os
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova.openstack.common import cfg
from nova import utils
LOG = logging.getLogger(__name__)
linux_net_opts = [
cfg.StrOpt('dhcpbridge_flagfile',
default='/etc/nova/nova-dhcpbridge.conf',
help='location of flagfile for dhcpbridge'),
cfg.StrOpt('networks_path',
default='$state_path/networks',
help='Location to keep network config files'),
cfg.StrOpt('public_interface',
default='eth0',
help='Interface for public IP addresses'),
cfg.StrOpt('network_device_mtu',
default=None,
help='MTU setting for vlan'),
cfg.StrOpt('dhcpbridge',
default='$bindir/nova-dhcpbridge',
help='location of nova-dhcpbridge'),
cfg.StrOpt('routing_source_ip',
default='$my_ip',
help='Public IP of network host'),
cfg.IntOpt('dhcp_lease_time',
default=120,
help='Lifetime of a DHCP lease in seconds'),
cfg.StrOpt('dns_server',
default=None,
help='if set, uses specific dns server for dnsmasq'),
cfg.StrOpt('dmz_cidr',
default='10.128.0.0/24',
help='dmz range that should be accepted'),
cfg.StrOpt('dnsmasq_config_file',
default="",
help='Override the default dnsmasq settings with this file'),
cfg.StrOpt('linuxnet_interface_driver',
default='nova.network.linux_net.LinuxBridgeInterfaceDriver',
help='Driver used to create ethernet devices.'),
cfg.StrOpt('linuxnet_ovs_integration_bridge',
default='br-int',
help='Name of Open vSwitch bridge used with linuxnet'),
cfg.BoolOpt('send_arp_for_ha',
default=False,
help='send gratuitous ARPs for HA setup'),
cfg.BoolOpt('use_single_default_gateway',
default=False,
help='Use single default gateway. Only first nic of vm will '
'get default gateway from dhcp server'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(linux_net_opts)
# NOTE(vish): Iptables supports chain names of up to 28 characters, and we
# add up to 12 characters to binary_name which is used as a prefix,
# so we limit it to 16 characters.
# (max_chain_name_length - len('-POSTROUTING') == 16)
binary_name = os.path.basename(inspect.stack()[-1][1])[:16]
class IptablesRule(object):
"""An iptables rule.
You shouldn't need to use this class directly, it's only used by
IptablesManager.
"""
def __init__(self, chain, rule, wrap=True, top=False):
self.chain = chain
self.rule = rule
self.wrap = wrap
self.top = top
def __eq__(self, other):
return ((self.chain == other.chain) and
(self.rule == other.rule) and
(self.top == other.top) and
(self.wrap == other.wrap))
def __ne__(self, other):
return not self == other
def __str__(self):
if self.wrap:
chain = '%s-%s' % (binary_name, self.chain)
else:
chain = self.chain
return '-A %s %s' % (chain, self.rule)
class IptablesTable(object):
"""An iptables table."""
def __init__(self):
self.rules = []
self.chains = set()
self.unwrapped_chains = set()
def add_chain(self, name, wrap=True):
"""Adds a named chain to the table.
The chain name is wrapped to be unique for the component creating
it, so different components of Nova can safely create identically
named chains without interfering with one another.
At the moment, its wrapped name is <binary name>-<chain name>,
so if nova-compute creates a chain named 'OUTPUT', it'll actually
end up named 'nova-compute-OUTPUT'.
"""
if wrap:
self.chains.add(name)
else:
self.unwrapped_chains.add(name)
def remove_chain(self, name, wrap=True):
"""Remove named chain.
This removal "cascades". All rule in the chain are removed, as are
all rules in other chains that jump to it.
If the chain is not found, this is merely logged.
"""
if wrap:
chain_set = self.chains
else:
chain_set = self.unwrapped_chains
if name not in chain_set:
LOG.debug(_('Attempted to remove chain %s which does not exist'),
name)
return
chain_set.remove(name)
self.rules = filter(lambda r: r.chain != name, self.rules)
if wrap:
jump_snippet = '-j %s-%s' % (binary_name, name)
else:
jump_snippet = '-j %s' % (name,)
self.rules = filter(lambda r: jump_snippet not in r.rule, self.rules)
def add_rule(self, chain, rule, wrap=True, top=False):
"""Add a rule to the table.
This is just like what you'd feed to iptables, just without
the '-A <chain name>' bit at the start.
However, if you need to jump to one of your wrapped chains,
prepend its name with a '$' which will ensure the wrapping
is applied correctly.
"""
if wrap and chain not in self.chains:
raise ValueError(_('Unknown chain: %r') % chain)
if '$' in rule:
rule = ' '.join(map(self._wrap_target_chain, rule.split(' ')))
self.rules.append(IptablesRule(chain, rule, wrap, top))
def _wrap_target_chain(self, s):
if s.startswith('$'):
return '%s-%s' % (binary_name, s[1:])
return s
def remove_rule(self, chain, rule, wrap=True, top=False):
"""Remove a rule from a chain.
Note: The rule must be exactly identical to the one that was added.
You cannot switch arguments around like you can with the iptables
CLI tool.
"""
try:
self.rules.remove(IptablesRule(chain, rule, wrap, top))
except ValueError:
LOG.debug(_('Tried to remove rule that was not there:'
' %(chain)r %(rule)r %(wrap)r %(top)r'),
{'chain': chain, 'rule': rule,
'top': top, 'wrap': wrap})
def empty_chain(self, chain, wrap=True):
"""Remove all rules from a chain."""
chained_rules = [rule for rule in self.rules
if rule.chain == chain and rule.wrap == wrap]
for rule in chained_rules:
self.rules.remove(rule)
class IptablesManager(object):
"""Wrapper for iptables.
See IptablesTable for some usage docs
A number of chains are set up to begin with.
First, nova-filter-top. It's added at the top of FORWARD and OUTPUT. Its
name is not wrapped, so it's shared between the various nova workers. It's
intended for rules that need to live at the top of the FORWARD and OUTPUT
chains. It's in both the ipv4 and ipv6 set of tables.
For ipv4 and ipv6, the built-in INPUT, OUTPUT, and FORWARD filter chains
are wrapped, meaning that the "real" INPUT chain has a rule that jumps to
the wrapped INPUT chain, etc. Additionally, there's a wrapped chain named
"local" which is jumped to from nova-filter-top.
For ipv4, the built-in PREROUTING, OUTPUT, and POSTROUTING nat chains are
wrapped in the same was as the built-in filter chains. Additionally,
there's a snat chain that is applied after the POSTROUTING chain.
"""
def __init__(self, execute=None):
if not execute:
self.execute = _execute
else:
self.execute = execute
self.ipv4 = {'filter': IptablesTable(),
'nat': IptablesTable()}
self.ipv6 = {'filter': IptablesTable()}
# Add a nova-filter-top chain. It's intended to be shared
# among the various nova components. It sits at the very top
# of FORWARD and OUTPUT.
for tables in [self.ipv4, self.ipv6]:
tables['filter'].add_chain('nova-filter-top', wrap=False)
tables['filter'].add_rule('FORWARD', '-j nova-filter-top',
wrap=False, top=True)
tables['filter'].add_rule('OUTPUT', '-j nova-filter-top',
wrap=False, top=True)
tables['filter'].add_chain('local')
tables['filter'].add_rule('nova-filter-top', '-j $local',
wrap=False)
# Wrap the built-in chains
builtin_chains = {4: {'filter': ['INPUT', 'OUTPUT', 'FORWARD'],
'nat': ['PREROUTING', 'OUTPUT', 'POSTROUTING']},
6: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']}}
for ip_version in builtin_chains:
if ip_version == 4:
tables = self.ipv4
elif ip_version == 6:
tables = self.ipv6
for table, chains in builtin_chains[ip_version].iteritems():
for chain in chains:
tables[table].add_chain(chain)
tables[table].add_rule(chain, '-j $%s' % (chain,),
wrap=False)
# Add a nova-postrouting-bottom chain. It's intended to be shared
# among the various nova components. We set it as the last chain
# of POSTROUTING chain.
self.ipv4['nat'].add_chain('nova-postrouting-bottom', wrap=False)
self.ipv4['nat'].add_rule('POSTROUTING', '-j nova-postrouting-bottom',
wrap=False)
# We add a snat chain to the shared nova-postrouting-bottom chain
# so that it's applied last.
self.ipv4['nat'].add_chain('snat')
self.ipv4['nat'].add_rule('nova-postrouting-bottom', '-j $snat',
wrap=False)
# And then we add a float-snat chain and jump to first thing in
# the snat chain.
self.ipv4['nat'].add_chain('float-snat')
self.ipv4['nat'].add_rule('snat', '-j $float-snat')
@utils.synchronized('iptables', external=True)
def apply(self):
"""Apply the current in-memory set of iptables rules.
This will blow away any rules left over from previous runs of the
same component of Nova, and replace them with our current set of
rules. This happens atomically, thanks to iptables-restore.
"""
s = [('iptables', self.ipv4)]
if FLAGS.use_ipv6:
s += [('ip6tables', self.ipv6)]
for cmd, tables in s:
for table in tables:
current_table, _err = self.execute('%s-save' % (cmd,),
'-t', '%s' % (table,),
run_as_root=True,
attempts=5)
current_lines = current_table.split('\n')
new_filter = self._modify_rules(current_lines,
tables[table])
self.execute('%s-restore' % (cmd,), run_as_root=True,
process_input='\n'.join(new_filter),
attempts=5)
LOG.debug(_("IPTablesManager.apply completed with success"))
def _modify_rules(self, current_lines, table, binary=None):
unwrapped_chains = table.unwrapped_chains
chains = table.chains
rules = table.rules
# Remove any trace of our rules
new_filter = filter(lambda line: binary_name not in line,
current_lines)
seen_chains = False
rules_index = 0
for rules_index, rule in enumerate(new_filter):
if not seen_chains:
if rule.startswith(':'):
seen_chains = True
else:
if not rule.startswith(':'):
break
our_rules = []
for rule in rules:
rule_str = str(rule)
if rule.top:
# rule.top == True means we want this rule to be at the top.
# Further down, we weed out duplicates from the bottom of the
# list, so here we remove the dupes ahead of time.
new_filter = filter(lambda s: s.strip() != rule_str.strip(),
new_filter)
our_rules += [rule_str]
new_filter[rules_index:rules_index] = our_rules
new_filter[rules_index:rules_index] = [':%s - [0:0]' % (name,)
for name in unwrapped_chains]
new_filter[rules_index:rules_index] = [':%s-%s - [0:0]' %
(binary_name, name,)
for name in chains]
seen_lines = set()
def _weed_out_duplicates(line):
line = line.strip()
if line in seen_lines:
return False
else:
seen_lines.add(line)
return True
# We filter duplicates, letting the *last* occurrence take
# precedence.
new_filter.reverse()
new_filter = filter(_weed_out_duplicates, new_filter)
new_filter.reverse()
return new_filter
# NOTE(jkoelker) This is just a nice little stub point since mocking
# builtins with mox is a nightmare
def write_to_file(file, data, mode='w'):
with open(file, mode) as f:
f.write(data)
def ensure_path(path):
if not os.path.exists(path):
os.makedirs(path)
def metadata_forward():
"""Create forwarding rule for metadata."""
iptables_manager.ipv4['nat'].add_rule('PREROUTING',
'-s 0.0.0.0/0 -d 169.254.169.254/32 '
'-p tcp -m tcp --dport 80 -j DNAT '
'--to-destination %s:%s' %
(FLAGS.metadata_host,
FLAGS.metadata_port))
iptables_manager.apply()
def metadata_accept():
"""Create the filter accept rule for metadata."""
iptables_manager.ipv4['filter'].add_rule('INPUT',
'-s 0.0.0.0/0 -d %s '
'-p tcp -m tcp --dport %s '
'-j ACCEPT' %
(FLAGS.metadata_host,
FLAGS.metadata_port))
iptables_manager.apply()
def add_snat_rule(ip_range):
iptables_manager.ipv4['nat'].add_rule('snat',
'-s %s -j SNAT --to-source %s' %
(ip_range,
FLAGS.routing_source_ip))
iptables_manager.apply()
def init_host(ip_range=None):
"""Basic networking setup goes here."""
# NOTE(devcamcar): Cloud public SNAT entries and the default
# SNAT rule for outbound traffic.
if not ip_range:
ip_range = FLAGS.fixed_range
add_snat_rule(ip_range)
iptables_manager.ipv4['nat'].add_rule('POSTROUTING',
'-s %s -d %s/32 -j ACCEPT' %
(ip_range, FLAGS.metadata_host))
iptables_manager.ipv4['nat'].add_rule('POSTROUTING',
'-s %s -d %s -j ACCEPT' %
(ip_range, FLAGS.dmz_cidr))
iptables_manager.ipv4['nat'].add_rule('POSTROUTING',
'-s %(range)s -d %(range)s '
'-m conntrack ! --ctstate DNAT '
'-j ACCEPT' %
{'range': ip_range})
iptables_manager.apply()
def bind_floating_ip(floating_ip, device):
"""Bind ip to public interface."""
_execute('ip', 'addr', 'add', str(floating_ip) + '/32',
'dev', device,
run_as_root=True, check_exit_code=[0, 2, 254])
if FLAGS.send_arp_for_ha:
_execute('arping', '-U', floating_ip,
'-A', '-I', device,
'-c', 1, run_as_root=True, check_exit_code=False)
def unbind_floating_ip(floating_ip, device):
"""Unbind a public ip from public interface."""
_execute('ip', 'addr', 'del', str(floating_ip) + '/32',
'dev', device,
run_as_root=True, check_exit_code=[0, 2, 254])
def ensure_metadata_ip():
"""Sets up local metadata ip."""
_execute('ip', 'addr', 'add', '169.254.169.254/32',
'scope', 'link', 'dev', 'lo',
run_as_root=True, check_exit_code=[0, 2, 254])
def ensure_vpn_forward(public_ip, port, private_ip):
"""Sets up forwarding rules for vlan."""
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'-d %s -p udp '
'--dport 1194 '
'-j ACCEPT' % private_ip)
iptables_manager.ipv4['nat'].add_rule('PREROUTING',
'-d %s -p udp '
'--dport %s -j DNAT --to %s:1194' %
(public_ip, port, private_ip))
iptables_manager.ipv4['nat'].add_rule("OUTPUT",
"-d %s -p udp "
"--dport %s -j DNAT --to %s:1194" %
(public_ip, port, private_ip))
iptables_manager.apply()
def ensure_floating_forward(floating_ip, fixed_ip):
"""Ensure floating ip forwarding rule."""
for chain, rule in floating_forward_rules(floating_ip, fixed_ip):
iptables_manager.ipv4['nat'].add_rule(chain, rule)
iptables_manager.apply()
def remove_floating_forward(floating_ip, fixed_ip):
"""Remove forwarding for floating ip."""
for chain, rule in floating_forward_rules(floating_ip, fixed_ip):
iptables_manager.ipv4['nat'].remove_rule(chain, rule)
iptables_manager.apply()
def floating_forward_rules(floating_ip, fixed_ip):
return [('PREROUTING', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)),
('OUTPUT', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)),
('float-snat',
'-s %s -j SNAT --to %s' % (fixed_ip, floating_ip))]
def initialize_gateway_device(dev, network_ref):
if not network_ref:
return
# NOTE(vish): The ip for dnsmasq has to be the first address on the
# bridge for it to respond to reqests properly
full_ip = '%s/%s' % (network_ref['dhcp_server'],
network_ref['cidr'].rpartition('/')[2])
new_ip_params = [[full_ip, 'brd', network_ref['broadcast']]]
old_ip_params = []
out, err = _execute('ip', 'addr', 'show', 'dev', dev,
'scope', 'global', run_as_root=True)
for line in out.split('\n'):
fields = line.split()
if fields and fields[0] == 'inet':
ip_params = fields[1:-1]
old_ip_params.append(ip_params)
if ip_params[0] != full_ip:
new_ip_params.append(ip_params)
if not old_ip_params or old_ip_params[0][0] != full_ip:
gateway = None
out, err = _execute('route', '-n', run_as_root=True)
for line in out.split('\n'):
fields = line.split()
if fields and fields[0] == '0.0.0.0' and fields[-1] == dev:
gateway = fields[1]
_execute('route', 'del', 'default', 'gw', gateway,
'dev', dev, run_as_root=True,
check_exit_code=[0, 7])
for ip_params in old_ip_params:
_execute(*_ip_bridge_cmd('del', ip_params, dev),
run_as_root=True, check_exit_code=[0, 2, 254])
for ip_params in new_ip_params:
_execute(*_ip_bridge_cmd('add', ip_params, dev),
run_as_root=True, check_exit_code=[0, 2, 254])
if gateway:
_execute('route', 'add', 'default', 'gw', gateway,
run_as_root=True, check_exit_code=[0, 7])
if FLAGS.send_arp_for_ha:
_execute('arping', '-U', network_ref['dhcp_server'],
'-A', '-I', dev,
'-c', 1, run_as_root=True, check_exit_code=False)
if(FLAGS.use_ipv6):
_execute('ip', '-f', 'inet6', 'addr',
'change', network_ref['cidr_v6'],
'dev', dev, run_as_root=True)
def get_dhcp_leases(context, network_ref):
"""Return a network's hosts config in dnsmasq leasefile format."""
hosts = []
host = None
if network_ref['multi_host']:
host = FLAGS.host
for data in db.network_get_associated_fixed_ips(context,
network_ref['id'],
host=host):
hosts.append(_host_lease(data))
return '\n'.join(hosts)
def get_dhcp_hosts(context, network_ref):
"""Get network's hosts config in dhcp-host format."""
hosts = []
host = None
if network_ref['multi_host']:
host = FLAGS.host
for data in db.network_get_associated_fixed_ips(context,
network_ref['id'],
host=host):
hosts.append(_host_dhcp(data))
return '\n'.join(hosts)
def _add_dnsmasq_accept_rules(dev):
"""Allow DHCP and DNS traffic through to dnsmasq."""
table = iptables_manager.ipv4['filter']
for port in [67, 53]:
for proto in ['udp', 'tcp']:
args = {'dev': dev, 'port': port, 'proto': proto}
table.add_rule('INPUT',
'-i %(dev)s -p %(proto)s -m %(proto)s '
'--dport %(port)s -j ACCEPT' % args)
iptables_manager.apply()
def get_dhcp_opts(context, network_ref):
"""Get network's hosts config in dhcp-opts format."""
hosts = []
host = None
if network_ref['multi_host']:
host = FLAGS.host
data = db.network_get_associated_fixed_ips(context,
network_ref['id'],
host=host)
if data:
#set of instance ids
instance_set = set([datum['instance_id'] for datum in data])
default_gw_vif = {}
for instance_id in instance_set:
vifs = db.virtual_interface_get_by_instance(context, instance_id)
if vifs:
#offer a default gateway to the first virtual interface
default_gw_vif[instance_id] = vifs[0]['id']
for datum in data:
if instance_id in default_gw_vif:
# we don't want default gateway for this fixed ip
if default_gw_vif[instance_id] != datum['vif_id']:
hosts.append(_host_dhcp_opts(datum))
return '\n'.join(hosts)
def release_dhcp(dev, address, mac_address):
utils.execute('dhcp_release', dev, address, mac_address, run_as_root=True)
def update_dhcp(context, dev, network_ref):
conffile = _dhcp_file(dev, 'conf')
write_to_file(conffile, get_dhcp_hosts(context, network_ref))
restart_dhcp(context, dev, network_ref)
def update_dhcp_hostfile_with_text(dev, hosts_text):
conffile = _dhcp_file(dev, 'conf')
write_to_file(conffile, hosts_text)
def kill_dhcp(dev):
pid = _dnsmasq_pid_for(dev)
if pid:
_execute('kill', '-9', pid, run_as_root=True)
# NOTE(ja): Sending a HUP only reloads the hostfile, so any
# configuration options (like dchp-range, vlan, ...)
# aren't reloaded.
@utils.synchronized('dnsmasq_start')
def restart_dhcp(context, dev, network_ref):
"""(Re)starts a dnsmasq server for a given network.
If a dnsmasq instance is already running then send a HUP
signal causing it to reload, otherwise spawn a new instance.
"""
conffile = _dhcp_file(dev, 'conf')
if FLAGS.use_single_default_gateway:
# NOTE(vish): this will have serious performance implications if we
# are not in multi_host mode.
optsfile = _dhcp_file(dev, 'opts')
write_to_file(optsfile, get_dhcp_opts(context, network_ref))
os.chmod(optsfile, 0644)
# Make sure dnsmasq can actually read it (it setuid()s to "nobody")
os.chmod(conffile, 0644)
pid = _dnsmasq_pid_for(dev)
# if dnsmasq is already running, then tell it to reload
if pid:
out, _err = _execute('cat', '/proc/%d/cmdline' % pid,
check_exit_code=False)
# Using symlinks can cause problems here so just compare the name
# of the file itself
if conffile.split("/")[-1] in out:
try:
_execute('kill', '-HUP', pid, run_as_root=True)
return
except Exception as exc: # pylint: disable=W0703
LOG.error(_('Hupping dnsmasq threw %s'), exc)
else:
LOG.debug(_('Pid %d is stale, relaunching dnsmasq'), pid)
cmd = ['FLAGFILE=%s' % FLAGS.dhcpbridge_flagfile,
'NETWORK_ID=%s' % str(network_ref['id']),
'dnsmasq',
'--strict-order',
'--bind-interfaces',
'--conf-file=%s' % FLAGS.dnsmasq_config_file,
'--domain=%s' % FLAGS.dhcp_domain,
'--pid-file=%s' % _dhcp_file(dev, 'pid'),
'--listen-address=%s' % network_ref['dhcp_server'],
'--except-interface=lo',
'--dhcp-range=%s,static,%ss' % (network_ref['dhcp_start'],
FLAGS.dhcp_lease_time),
'--dhcp-lease-max=%s' % len(netaddr.IPNetwork(network_ref['cidr'])),
'--dhcp-hostsfile=%s' % _dhcp_file(dev, 'conf'),
'--dhcp-script=%s' % FLAGS.dhcpbridge,
'--leasefile-ro']
if FLAGS.dns_server:
cmd += ['-h', '-R', '--server=%s' % FLAGS.dns_server]
if FLAGS.use_single_default_gateway:
cmd += ['--dhcp-optsfile=%s' % _dhcp_file(dev, 'opts')]
_execute(*cmd, run_as_root=True)
_add_dnsmasq_accept_rules(dev)
@utils.synchronized('radvd_start')
def update_ra(context, dev, network_ref):
conffile = _ra_file(dev, 'conf')
conf_str = """
interface %s
{
AdvSendAdvert on;
MinRtrAdvInterval 3;
MaxRtrAdvInterval 10;
prefix %s
{
AdvOnLink on;
AdvAutonomous on;
};
};
""" % (dev, network_ref['cidr_v6'])
write_to_file(conffile, conf_str)
# Make sure radvd can actually read it (it setuid()s to "nobody")
os.chmod(conffile, 0644)
pid = _ra_pid_for(dev)
# if radvd is already running, then tell it to reload
if pid:
out, _err = _execute('cat', '/proc/%d/cmdline'
% pid, check_exit_code=False)
if conffile in out:
try:
_execute('kill', pid, run_as_root=True)
except Exception as exc: # pylint: disable=W0703
LOG.debug(_('killing radvd threw %s'), exc)
else:
LOG.debug(_('Pid %d is stale, relaunching radvd'), pid)
cmd = ['radvd',
'-C', '%s' % _ra_file(dev, 'conf'),
'-p', '%s' % _ra_file(dev, 'pid')]
_execute(*cmd, run_as_root=True)
def _host_lease(data):
"""Return a host string for an address in leasefile format."""
if data['instance_updated']:
timestamp = data['instance_updated']
else:
timestamp = data['instance_created']
seconds_since_epoch = calendar.timegm(timestamp.utctimetuple())
return '%d %s %s %s *' % (seconds_since_epoch + FLAGS.dhcp_lease_time,
data['vif_address'],
data['address'],
data['instance_hostname'] or '*')
def _host_dhcp_network(data):
return 'NW-%s' % data['vif_id']
def _host_dhcp(data):
"""Return a host string for an address in dhcp-host format."""
if FLAGS.use_single_default_gateway:
return '%s,%s.%s,%s,%s' % (data['vif_address'],
data['instance_hostname'],
FLAGS.dhcp_domain,
data['address'],
"net:" + _host_dhcp_network(data))
else:
return '%s,%s.%s,%s' % (data['vif_address'],
data['instance_hostname'],
FLAGS.dhcp_domain,
data['address'])
def _host_dhcp_opts(data):
"""Return an empty gateway option."""
return '%s,%s' % (_host_dhcp_network(data), 3)
def _execute(*cmd, **kwargs):
"""Wrapper around utils._execute for fake_network."""
if FLAGS.fake_network:
LOG.debug('FAKE NET: %s', ' '.join(map(str, cmd)))
return 'fake', 0
else:
return utils.execute(*cmd, **kwargs)
def _device_exists(device):
"""Check if ethernet device exists."""
(_out, err) = _execute('ip', 'link', 'show', 'dev', device,
check_exit_code=False)
return not err
def _dhcp_file(dev, kind):
"""Return path to a pid, leases or conf file for a bridge/device."""
ensure_path(FLAGS.networks_path)
return os.path.abspath('%s/nova-%s.%s' % (FLAGS.networks_path,
dev,
kind))
def _ra_file(dev, kind):
"""Return path to a pid or conf file for a bridge/device."""
ensure_path(FLAGS.networks_path)
return os.path.abspath('%s/nova-ra-%s.%s' % (FLAGS.networks_path,
dev,
kind))
def _dnsmasq_pid_for(dev):
"""Returns the pid for prior dnsmasq instance for a bridge/device.
Returns None if no pid file exists.
If machine has rebooted pid might be incorrect (caller should check).
"""
pid_file = _dhcp_file(dev, 'pid')
if os.path.exists(pid_file):
try:
with open(pid_file, 'r') as f:
return int(f.read())
except (ValueError, IOError):
return None
def _ra_pid_for(dev):
"""Returns the pid for prior radvd instance for a bridge/device.
Returns None if no pid file exists.
If machine has rebooted pid might be incorrect (caller should check).
"""
pid_file = _ra_file(dev, 'pid')
if os.path.exists(pid_file):
with open(pid_file, 'r') as f:
return int(f.read())
def _ip_bridge_cmd(action, params, device):
"""Build commands to add/del ips to bridges/devices."""
cmd = ['ip', 'addr', action]
cmd.extend(params)
cmd.extend(['dev', device])
return cmd
# Similar to compute virt layers, the Linux network node
# code uses a flexible driver model to support different ways
# of creating ethernet interfaces and attaching them to the network.
# In the case of a network host, these interfaces
# act as gateway/dhcp/vpn/etc. endpoints not VM interfaces.
interface_driver = None
def _get_interface_driver():
global interface_driver
if not interface_driver:
interface_driver = utils.import_object(FLAGS.linuxnet_interface_driver)
return interface_driver
def plug(network, mac_address, gateway=True):
return _get_interface_driver().plug(network, mac_address, gateway)
def unplug(network):
return _get_interface_driver().unplug(network)
def get_dev(network):
return _get_interface_driver().get_dev(network)
class LinuxNetInterfaceDriver(object):
"""Abstract class that defines generic network host API"""
""" for for all Linux interface drivers."""
def plug(self, network, mac_address):
"""Create Linux device, return device name"""
raise NotImplementedError()
def unplug(self, network):
"""Destory Linux device, return device name"""
raise NotImplementedError()
def get_dev(self, network):
"""Get device name"""
raise NotImplementedError()
# plugs interfaces using Linux Bridge
class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
def plug(self, network, mac_address, gateway=True):
if network.get('vlan', None) is not None:
iface = FLAGS.vlan_interface or network['bridge_interface']
LinuxBridgeInterfaceDriver.ensure_vlan_bridge(
network['vlan'],
network['bridge'],
iface,
network,
mac_address)
else:
iface = FLAGS.flat_interface or network['bridge_interface']
LinuxBridgeInterfaceDriver.ensure_bridge(
network['bridge'],
iface,
network, gateway)
# NOTE(vish): applying here so we don't get a lock conflict
iptables_manager.apply()
return network['bridge']
def unplug(self, network):
return self.get_dev(network)
def get_dev(self, network):
return network['bridge']
@classmethod
def ensure_vlan_bridge(_self, vlan_num, bridge, bridge_interface,
net_attrs=None, mac_address=None):
"""Create a vlan and bridge unless they already exist."""
interface = LinuxBridgeInterfaceDriver.ensure_vlan(vlan_num,
bridge_interface, mac_address)
LinuxBridgeInterfaceDriver.ensure_bridge(bridge, interface, net_attrs)
return interface
@classmethod
@utils.synchronized('ensure_vlan', external=True)
def ensure_vlan(_self, vlan_num, bridge_interface, mac_address=None):
"""Create a vlan unless it already exists."""
interface = 'vlan%s' % vlan_num
if not _device_exists(interface):
LOG.debug(_('Starting VLAN inteface %s'), interface)
_execute('ip', 'link', 'add', 'link', bridge_interface,
'name', interface, 'type', 'vlan',
'id', vlan_num, run_as_root=True)
# (danwent) the bridge will inherit this address, so we want to
# make sure it is the value set from the NetworkManager
if mac_address:
_execute('ip', 'link', 'set', interface, "address",
mac_address, run_as_root=True)
_execute('ip', 'link', 'set', interface, 'up', run_as_root=True)
if FLAGS.network_device_mtu:
_execute('ip', 'link', 'set', interface, 'mtu',
FLAGS.network_device_mtu, run_as_root=True)
return interface
@classmethod
@utils.synchronized('ensure_bridge', external=True)
def ensure_bridge(_self, bridge, interface, net_attrs=None, gateway=True):
"""Create a bridge unless it already exists.
:param interface: the interface to create the bridge on.
:param net_attrs: dictionary with attributes used to create bridge.
If net_attrs is set, it will add the net_attrs['gateway'] to the bridge
using net_attrs['broadcast'] and net_attrs['cidr']. It will also add
the ip_v6 address specified in net_attrs['cidr_v6'] if use_ipv6 is set.
The code will attempt to move any ips that already exist on the
interface onto the bridge and reset the default gateway if necessary.
"""
if not _device_exists(bridge):
LOG.debug(_('Starting Bridge interface for %s'), interface)
_execute('brctl', 'addbr', bridge, run_as_root=True)
_execute('brctl', 'setfd', bridge, 0, run_as_root=True)
# _execute('brctl setageing %s 10' % bridge, run_as_root=True)
_execute('brctl', 'stp', bridge, 'off', run_as_root=True)
# (danwent) bridge device MAC address can't be set directly.
# instead it inherits the MAC address of the first device on the
# bridge, which will either be the vlan interface, or a
# physical NIC.
_execute('ip', 'link', 'set', bridge, 'up', run_as_root=True)
if interface:
out, err = _execute('brctl', 'addif', bridge, interface,
check_exit_code=False, run_as_root=True)
# NOTE(vish): This will break if there is already an ip on the
# interface, so we move any ips to the bridge
old_gateway = None
out, err = _execute('route', '-n', run_as_root=True)
for line in out.split('\n'):
fields = line.split()
if (fields and fields[0] == '0.0.0.0' and
fields[-1] == interface):
old_gateway = fields[1]
_execute('route', 'del', 'default', 'gw', old_gateway,
'dev', interface, run_as_root=True,
check_exit_code=[0, 7])
out, err = _execute('ip', 'addr', 'show', 'dev', interface,
'scope', 'global', run_as_root=True)
for line in out.split('\n'):
fields = line.split()
if fields and fields[0] == 'inet':
params = fields[1:-1]
_execute(*_ip_bridge_cmd('del', params, fields[-1]),
run_as_root=True, check_exit_code=[0, 2, 254])
_execute(*_ip_bridge_cmd('add', params, bridge),
run_as_root=True, check_exit_code=[0, 2, 254])
if old_gateway:
_execute('route', 'add', 'default', 'gw', old_gateway,
run_as_root=True, check_exit_code=[0, 7])
if (err and err != "device %s is already a member of a bridge;"
"can't ensubordinate it to bridge %s.\n" % (interface, bridge)):
raise exception.Error('Failed to add interface: %s' % err)
# Don't forward traffic unless we were told to be a gateway
ipv4_filter = iptables_manager.ipv4['filter']
if gateway:
ipv4_filter.add_rule('FORWARD',
'--in-interface %s -j ACCEPT' % bridge)
ipv4_filter.add_rule('FORWARD',
'--out-interface %s -j ACCEPT' % bridge)
else:
ipv4_filter.add_rule('FORWARD',
'--in-interface %s -j DROP' % bridge)
ipv4_filter.add_rule('FORWARD',
'--out-interface %s -j DROP' % bridge)
# plugs interfaces using Open vSwitch
class LinuxOVSInterfaceDriver(LinuxNetInterfaceDriver):
def plug(self, network, mac_address, gateway=True):
dev = self.get_dev(network)
if not _device_exists(dev):
bridge = FLAGS.linuxnet_ovs_integration_bridge
_execute('ovs-vsctl',
'--', '--may-exist', 'add-port', bridge, dev,
'--', 'set', 'Interface', dev, "type=internal",
'--', 'set', 'Interface', dev,
"external-ids:iface-id=%s" % dev,
'--', 'set', 'Interface', dev,
"external-ids:iface-status=active",
'--', 'set', 'Interface', dev,
"external-ids:attached-mac=%s" % mac_address,
run_as_root=True)
_execute('ip', 'link', 'set', dev, "address", mac_address,
run_as_root=True)
if FLAGS.network_device_mtu:
_execute('ip', 'link', 'set', dev, 'mtu',
FLAGS.network_device_mtu, run_as_root=True)
_execute('ip', 'link', 'set', dev, 'up', run_as_root=True)
if not gateway:
# If we weren't instructed to act as a gateway then add the
# appropriate flows to block all non-dhcp traffic.
_execute('ovs-ofctl',
'add-flow', bridge, "priority=1,actions=drop",
run_as_root=True)
_execute('ovs-ofctl', 'add-flow', bridge,
"udp,tp_dst=67,dl_dst=%s,priority=2,actions=normal" %
mac_address, run_as_root=True)
# .. and make sure iptbles won't forward it as well.
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--in-interface %s -j DROP' % bridge)
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--out-interface %s -j DROP' % bridge)
else:
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--in-interface %s -j ACCEPT' % bridge)
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--out-interface %s -j ACCEPT' % bridge)
return dev
def unplug(self, network):
dev = self.get_dev(network)
bridge = FLAGS.linuxnet_ovs_integration_bridge
_execute('ovs-vsctl', '--', '--if-exists', 'del-port',
bridge, dev, run_as_root=True)
return dev
def get_dev(self, network):
dev = "gw-" + str(network['uuid'][0:11])
return dev
# plugs interfaces using Linux Bridge when using QuantumManager
class QuantumLinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
BRIDGE_NAME_PREFIX = "brq"
GATEWAY_INTERFACE_PREFIX = "gw-"
def plug(self, network, mac_address, gateway=True):
dev = self.get_dev(network)
bridge = self.get_bridge(network)
if not gateway:
# If we weren't instructed to act as a gateway then add the
# appropriate flows to block all non-dhcp traffic.
# .. and make sure iptbles won't forward it as well.
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--in-interface %s -j DROP' % bridge)
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--out-interface %s -j DROP' % bridge)
return bridge
else:
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--in-interface %s -j ACCEPT' % bridge)
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--out-interface %s -j ACCEPT' % bridge)
QuantumLinuxBridgeInterfaceDriver.create_tap_dev(dev, mac_address)
if not _device_exists(bridge):
LOG.debug(_("Starting bridge %s "), bridge)
utils.execute('brctl', 'addbr', bridge, run_as_root=True)
utils.execute('brctl', 'setfd', bridge, str(0), run_as_root=True)
utils.execute('brctl', 'stp', bridge, 'off', run_as_root=True)
utils.execute('ip', 'link', 'set', bridge, "address", mac_address,
run_as_root=True)
utils.execute('ip', 'link', 'set', bridge, 'up', run_as_root=True)
LOG.debug(_("Done starting bridge %s"), bridge)
full_ip = '%s/%s' % (network['dhcp_server'],
network['cidr'].rpartition('/')[2])
utils.execute('ip', 'address', 'add', full_ip, 'dev', bridge,
run_as_root=True)
return dev
def unplug(self, network):
dev = self.get_dev(network)
if not _device_exists(dev):
return None
else:
try:
utils.execute('ip', 'link', 'delete', dev, run_as_root=True)
except exception.ProcessExecutionError:
LOG.warning(_("Failed unplugging gateway interface '%s'"),
dev)
raise
LOG.debug(_("Unplugged gateway interface '%s'"), dev)
return dev
@classmethod
def create_tap_dev(_self, dev, mac_address=None):
if not _device_exists(dev):
try:
# First, try with 'ip'
utils.execute('ip', 'tuntap', 'add', dev, 'mode', 'tap',
run_as_root=True)
except exception.ProcessExecutionError:
# Second option: tunctl
utils.execute('tunctl', '-b', '-t', dev, run_as_root=True)
if mac_address:
utils.execute('ip', 'link', 'set', dev, "address", mac_address,
run_as_root=True)
utils.execute('ip', 'link', 'set', dev, 'up', run_as_root=True)
def get_dev(self, network):
dev = self.GATEWAY_INTERFACE_PREFIX + str(network['uuid'][0:11])
return dev
def get_bridge(self, network):
bridge = self.BRIDGE_NAME_PREFIX + str(network['uuid'][0:11])
return bridge
iptables_manager = IptablesManager()
| apache-2.0 |
nens/openradar | openradar/scripts/sync.py | 1 | 2563 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) Nelen & Schuurmans. GPL licensed, see LICENSE.rst.
from openradar import config
from openradar import loghelper
import logging
import ftplib
import os
import io
def ftp_transfer(source, target, name):
""" Transfer ftp file from source ftp to target ftp. """
data = io.BytesIO()
source.retrbinary('RETR ' + name, data.write)
data.seek(0)
target.storbinary('STOR ' + name, data)
def ftp_sync(source, target):
""" Synchronize working directory of target ftp to that of source ftp. """
removed = 0
copied = 0
# Get the lists
source_names = [n for n in source.nlst() if not n.startswith('.')]
target_names = target.nlst()
# Delete files in target that are not in source
for name in target_names:
if name not in source_names:
target.delete(name)
logging.debug('Removed {}.'.format(name))
removed += 1
# Add files that are in source but not in target
for name in source_names:
if name not in target_names:
try:
ftp_transfer(source=source, target=target, name=name)
logging.debug('Copied {}.'.format(name))
copied += 1
except IOError:
logging.warning('Could not transfer {}.'.format(name))
logging.info('Files removed: {}, copied: {}.'.format(removed, copied))
def sync():
""" Synchronize specific remote ftp folders with our ftp. """
loghelper.setup_logging(os.path.join(config.LOG_DIR, 'sync.log'))
# Check sync possible
if not hasattr(config, 'FTP_HOST') or config.FTP_HOST == '':
logging.warning('FTP not configured, FTP syncing not possible.')
return
try:
target = ftplib.FTP(config.FTP_HOST,
config.FTP_USER,
config.FTP_PASSWORD)
for name, info in config.FTP_THROUGH.items():
logging.info('Syncing {}...'.format(name))
# Make the connection
source = ftplib.FTP(
info['host'],
info['user'],
info['password'],
)
# Change to proper directories
source.cwd(info['path'])
target.cwd(info['target'])
# Sync
ftp_sync(source=source, target=target)
# Quit connections.
source.quit()
target.quit()
except:
logging.exception('Error:')
logging.info('Sync done.')
def main():
return sync()
| gpl-3.0 |
Zhongqilong/mykbengineer | kbe/res/scripts/common/Lib/test/test_urllib_response.py | 96 | 1728 | """Unit tests for code in urllib.response."""
import socket
import tempfile
import urllib.response
import unittest
class TestResponse(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.fp = self.sock.makefile('rb')
self.test_headers = {"Host": "www.python.org",
"Connection": "close"}
def test_with(self):
addbase = urllib.response.addbase(self.fp)
self.assertIsInstance(addbase, tempfile._TemporaryFileWrapper)
def f():
with addbase as spam:
pass
self.assertFalse(self.fp.closed)
f()
self.assertTrue(self.fp.closed)
self.assertRaises(ValueError, f)
def test_addclosehook(self):
closehook_called = False
def closehook():
nonlocal closehook_called
closehook_called = True
closehook = urllib.response.addclosehook(self.fp, closehook)
closehook.close()
self.assertTrue(self.fp.closed)
self.assertTrue(closehook_called)
def test_addinfo(self):
info = urllib.response.addinfo(self.fp, self.test_headers)
self.assertEqual(info.info(), self.test_headers)
def test_addinfourl(self):
url = "http://www.python.org"
code = 200
infourl = urllib.response.addinfourl(self.fp, self.test_headers,
url, code)
self.assertEqual(infourl.info(), self.test_headers)
self.assertEqual(infourl.geturl(), url)
self.assertEqual(infourl.getcode(), code)
def tearDown(self):
self.sock.close()
if __name__ == '__main__':
unittest.main()
| lgpl-3.0 |
berendkleinhaneveld/VTK | ThirdParty/Twisted/twisted/trial/test/test_log.py | 40 | 7158 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test the interaction between trial and errors logged during test run.
"""
from __future__ import division
import time
from twisted.internet import reactor, task
from twisted.python import failure, log
from twisted.trial import unittest, reporter
def makeFailure():
"""
Return a new, realistic failure.
"""
try:
1/0
except ZeroDivisionError:
f = failure.Failure()
return f
class Mask(object):
"""
Hide C{MockTest}s from Trial's automatic test finder.
"""
class FailureLoggingMixin(object):
def test_silent(self):
"""
Don't log any errors.
"""
def test_single(self):
"""
Log a single error.
"""
log.err(makeFailure())
def test_double(self):
"""
Log two errors.
"""
log.err(makeFailure())
log.err(makeFailure())
class SynchronousFailureLogging(FailureLoggingMixin, unittest.SynchronousTestCase):
pass
class AsynchronousFailureLogging(FailureLoggingMixin, unittest.TestCase):
def test_inCallback(self):
"""
Log an error in an asynchronous callback.
"""
return task.deferLater(reactor, 0, lambda: log.err(makeFailure()))
class TestObserver(unittest.SynchronousTestCase):
"""
Tests for L{unittest._LogObserver}, a helper for the implementation of
L{SynchronousTestCase.flushLoggedErrors}.
"""
def setUp(self):
self.result = reporter.TestResult()
self.observer = unittest._LogObserver()
def test_msg(self):
"""
Test that a standard log message doesn't go anywhere near the result.
"""
self.observer.gotEvent({'message': ('some message',),
'time': time.time(), 'isError': 0,
'system': '-'})
self.assertEqual(self.observer.getErrors(), [])
def test_error(self):
"""
Test that an observed error gets added to the result
"""
f = makeFailure()
self.observer.gotEvent({'message': (),
'time': time.time(), 'isError': 1,
'system': '-', 'failure': f,
'why': None})
self.assertEqual(self.observer.getErrors(), [f])
def test_flush(self):
"""
Check that flushing the observer with no args removes all errors.
"""
self.test_error()
flushed = self.observer.flushErrors()
self.assertEqual(self.observer.getErrors(), [])
self.assertEqual(len(flushed), 1)
self.assertTrue(flushed[0].check(ZeroDivisionError))
def _makeRuntimeFailure(self):
return failure.Failure(RuntimeError('test error'))
def test_flushByType(self):
"""
Check that flushing the observer remove all failures of the given type.
"""
self.test_error() # log a ZeroDivisionError to the observer
f = self._makeRuntimeFailure()
self.observer.gotEvent(dict(message=(), time=time.time(), isError=1,
system='-', failure=f, why=None))
flushed = self.observer.flushErrors(ZeroDivisionError)
self.assertEqual(self.observer.getErrors(), [f])
self.assertEqual(len(flushed), 1)
self.assertTrue(flushed[0].check(ZeroDivisionError))
def test_ignoreErrors(self):
"""
Check that C{_ignoreErrors} actually causes errors to be ignored.
"""
self.observer._ignoreErrors(ZeroDivisionError)
f = makeFailure()
self.observer.gotEvent({'message': (),
'time': time.time(), 'isError': 1,
'system': '-', 'failure': f,
'why': None})
self.assertEqual(self.observer.getErrors(), [])
def test_clearIgnores(self):
"""
Check that C{_clearIgnores} ensures that previously ignored errors
get captured.
"""
self.observer._ignoreErrors(ZeroDivisionError)
self.observer._clearIgnores()
f = makeFailure()
self.observer.gotEvent({'message': (),
'time': time.time(), 'isError': 1,
'system': '-', 'failure': f,
'why': None})
self.assertEqual(self.observer.getErrors(), [f])
class LogErrorsMixin(object):
"""
High-level tests demonstrating the expected behaviour of logged errors
during tests.
"""
def setUp(self):
self.result = reporter.TestResult()
def tearDown(self):
self.flushLoggedErrors(ZeroDivisionError)
def test_singleError(self):
"""
Test that a logged error gets reported as a test error.
"""
test = self.MockTest('test_single')
test(self.result)
self.assertEqual(len(self.result.errors), 1)
self.assertTrue(self.result.errors[0][1].check(ZeroDivisionError),
self.result.errors[0][1])
self.assertEqual(0, self.result.successes)
def test_twoErrors(self):
"""
Test that when two errors get logged, they both get reported as test
errors.
"""
test = self.MockTest('test_double')
test(self.result)
self.assertEqual(len(self.result.errors), 2)
self.assertEqual(0, self.result.successes)
def test_errorsIsolated(self):
"""
Check that an error logged in one test doesn't fail the next test.
"""
t1 = self.MockTest('test_single')
t2 = self.MockTest('test_silent')
t1(self.result)
t2(self.result)
self.assertEqual(len(self.result.errors), 1)
self.assertEqual(self.result.errors[0][0], t1)
self.assertEqual(1, self.result.successes)
def test_boundedObservers(self):
"""
There are no extra log observers after a test runs.
"""
# XXX trial is *all about* global log state. It should really be fixed.
observer = unittest._LogObserver()
self.patch(unittest, '_logObserver', observer)
observers = log.theLogPublisher.observers[:]
test = self.MockTest()
test(self.result)
self.assertEqual(observers, log.theLogPublisher.observers)
class SynchronousLogErrorsTests(LogErrorsMixin, unittest.SynchronousTestCase):
MockTest = Mask.SynchronousFailureLogging
class AsynchronousLogErrorsTests(LogErrorsMixin, unittest.TestCase):
MockTest = Mask.AsynchronousFailureLogging
def test_inCallback(self):
"""
Test that errors logged in callbacks get reported as test errors.
"""
test = self.MockTest('test_inCallback')
test(self.result)
self.assertEqual(len(self.result.errors), 1)
self.assertTrue(self.result.errors[0][1].check(ZeroDivisionError),
self.result.errors[0][1])
| bsd-3-clause |
kzhong1991/Flight-AR.Drone-2 | src/3rdparty/Qt4.8.4/src/3rdparty/webkit/Source/ThirdParty/gyp/test/sibling/gyptest-relocate.py | 151 | 1144 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('build/all.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('build/all.gyp', test.ALL, chdir='relocate/src')
chdir = 'relocate/src/build'
# The top-level Makefile is in the directory where gyp was run.
# TODO(mmoss) Should the Makefile go in the directory of the passed in .gyp
# file? What about when passing in multiple .gyp files? Would sub-project
# Makefiles (see http://codereview.chromium.org/340008 comments) solve this?
if test.format == 'make':
chdir = 'relocate/src'
if test.format == 'xcode':
chdir = 'relocate/src/prog1'
test.run_built_executable('prog1',
chdir=chdir,
stdout="Hello from prog1.c\n")
if test.format == 'xcode':
chdir = 'relocate/src/prog2'
test.run_built_executable('prog2',
chdir=chdir,
stdout="Hello from prog2.c\n")
test.pass_test()
| bsd-3-clause |
Work4Labs/lettuce | tests/integration/lib/Django-1.2.5/tests/modeltests/expressions/tests.py | 92 | 7256 | from django.core.exceptions import FieldError
from django.db.models import F
from django.test import TestCase
from models import Company, Employee
class ExpressionsTests(TestCase):
def test_filter(self):
Company.objects.create(
name="Example Inc.", num_employees=2300, num_chairs=5,
ceo=Employee.objects.create(firstname="Joe", lastname="Smith")
)
Company.objects.create(
name="Foobar Ltd.", num_employees=3, num_chairs=4,
ceo=Employee.objects.create(firstname="Frank", lastname="Meyer")
)
Company.objects.create(
name="Test GmbH", num_employees=32, num_chairs=1,
ceo=Employee.objects.create(firstname="Max", lastname="Mustermann")
)
company_query = Company.objects.values(
"name", "num_employees", "num_chairs"
).order_by(
"name", "num_employees", "num_chairs"
)
# We can filter for companies where the number of employees is greater
# than the number of chairs.
self.assertQuerysetEqual(
company_query.filter(num_employees__gt=F("num_chairs")), [
{
"num_chairs": 5,
"name": "Example Inc.",
"num_employees": 2300,
},
{
"num_chairs": 1,
"name": "Test GmbH",
"num_employees": 32
},
],
lambda o: o
)
# We can set one field to have the value of another field
# Make sure we have enough chairs
company_query.update(num_chairs=F("num_employees"))
self.assertQuerysetEqual(
company_query, [
{
"num_chairs": 2300,
"name": "Example Inc.",
"num_employees": 2300
},
{
"num_chairs": 3,
"name": "Foobar Ltd.",
"num_employees": 3
},
{
"num_chairs": 32,
"name": "Test GmbH",
"num_employees": 32
}
],
lambda o: o
)
# We can perform arithmetic operations in expressions
# Make sure we have 2 spare chairs
company_query.update(num_chairs=F("num_employees")+2)
self.assertQuerysetEqual(
company_query, [
{
'num_chairs': 2302,
'name': u'Example Inc.',
'num_employees': 2300
},
{
'num_chairs': 5,
'name': u'Foobar Ltd.',
'num_employees': 3
},
{
'num_chairs': 34,
'name': u'Test GmbH',
'num_employees': 32
}
],
lambda o: o,
)
# Law of order of operations is followed
company_query.update(
num_chairs=F('num_employees') + 2 * F('num_employees')
)
self.assertQuerysetEqual(
company_query, [
{
'num_chairs': 6900,
'name': u'Example Inc.',
'num_employees': 2300
},
{
'num_chairs': 9,
'name': u'Foobar Ltd.',
'num_employees': 3
},
{
'num_chairs': 96,
'name': u'Test GmbH',
'num_employees': 32
}
],
lambda o: o,
)
# Law of order of operations can be overridden by parentheses
company_query.update(
num_chairs=((F('num_employees') + 2) * F('num_employees'))
)
self.assertQuerysetEqual(
company_query, [
{
'num_chairs': 5294600,
'name': u'Example Inc.',
'num_employees': 2300
},
{
'num_chairs': 15,
'name': u'Foobar Ltd.',
'num_employees': 3
},
{
'num_chairs': 1088,
'name': u'Test GmbH',
'num_employees': 32
}
],
lambda o: o,
)
# The relation of a foreign key can become copied over to an other
# foreign key.
self.assertEqual(
Company.objects.update(point_of_contact=F('ceo')),
3
)
self.assertQuerysetEqual(
Company.objects.all(), [
"Joe Smith",
"Frank Meyer",
"Max Mustermann",
],
lambda c: unicode(c.point_of_contact),
)
c = Company.objects.all()[0]
c.point_of_contact = Employee.objects.create(firstname="Guido", lastname="van Rossum")
c.save()
# F Expressions can also span joins
self.assertQuerysetEqual(
Company.objects.filter(ceo__firstname=F("point_of_contact__firstname")), [
"Foobar Ltd.",
"Test GmbH",
],
lambda c: c.name
)
Company.objects.exclude(
ceo__firstname=F("point_of_contact__firstname")
).update(name="foo")
self.assertEqual(
Company.objects.exclude(
ceo__firstname=F('point_of_contact__firstname')
).get().name,
"foo",
)
self.assertRaises(FieldError,
lambda: Company.objects.exclude(
ceo__firstname=F('point_of_contact__firstname')
).update(name=F('point_of_contact__lastname'))
)
# F expressions can be used to update attributes on single objects
test_gmbh = Company.objects.get(name="Test GmbH")
self.assertEqual(test_gmbh.num_employees, 32)
test_gmbh.num_employees = F("num_employees") + 4
test_gmbh.save()
test_gmbh = Company.objects.get(pk=test_gmbh.pk)
self.assertEqual(test_gmbh.num_employees, 36)
# F expressions cannot be used to update attributes which are foreign
# keys, or attributes which involve joins.
test_gmbh.point_of_contact = None
test_gmbh.save()
self.assertTrue(test_gmbh.point_of_contact is None)
def test():
test_gmbh.point_of_contact = F("ceo")
self.assertRaises(ValueError, test)
test_gmbh.point_of_contact = test_gmbh.ceo
test_gmbh.save()
test_gmbh.name = F("ceo__last_name")
self.assertRaises(FieldError, test_gmbh.save)
# F expressions cannot be used to update attributes on objects which do
# not yet exist in the database
acme = Company(
name="The Acme Widget Co.", num_employees=12, num_chairs=5,
ceo=test_gmbh.ceo
)
acme.num_employees = F("num_employees") + 16
self.assertRaises(TypeError, acme.save)
| gpl-3.0 |
franklingu/leetcode-solutions | questions/powerful-integers/Solution.py | 1 | 1157 | """
Given three integers x, y, and bound, return a list of all the powerful integers that have a value less than or equal to bound.
An integer is powerful if it can be represented as xi + yj for some integers i >= 0 and j >= 0.
You may return the answer in any order. In your answer, each value should occur at most once.
Example 1:
Input: x = 2, y = 3, bound = 10
Output: [2,3,4,5,7,9,10]
Explanation:
2 = 20 + 30
3 = 21 + 30
4 = 20 + 31
5 = 21 + 31
7 = 22 + 31
9 = 23 + 30
10 = 20 + 32
Example 2:
Input: x = 3, y = 5, bound = 15
Output: [2,4,6,8,10,14]
Constraints:
1 <= x, y <= 100
0 <= bound <= 106
"""
class Solution:
def powerfulIntegers(self, x: int, y: int, bound: int) -> List[int]:
ret = set()
a = 1
while True:
if a >= bound:
break
b = 1
while True:
t = a + b
if t <= bound:
ret.add(t)
else:
break
b *= y
if b == 1:
break
a *= x
if a == 1:
break
return list(ret) | mit |
henrykironde/deletedret | retriever/__main__.py | 1 | 10187 | """Data Retriever
This module handles the CLI for the Data retriever.
"""
import os
import sys
from retriever.engines import engine_list, choose_engine
from retriever.lib.datasets import datasets, dataset_names, license
from retriever.lib.defaults import sample_script, CITATION, SCRIPT_SEARCH_PATHS, LICENSE
from retriever.lib.engine_tools import reset_retriever
from retriever.lib.get_opts import parser
from retriever.lib.install import _install
from retriever.lib.repository import check_for_updates
from retriever.lib.scripts import SCRIPT_LIST, reload_scripts, get_script, name_matches, get_script_citation
from retriever.lib.create_scripts import create_package
from retriever.lib.provenance import commit, commit_log
def main():
"""This function launches the Data Retriever."""
if len(sys.argv) == 1:
# if no command line args are passed, show the help options
parser.parse_args(['-h'])
else:
# otherwise, parse them
args = parser.parse_args()
reset_or_update = args.command in ["reset", "update"]
if (not reset_or_update and not os.path.isdir(SCRIPT_SEARCH_PATHS[1]) and not [
f for f in os.listdir(SCRIPT_SEARCH_PATHS[-1])
if os.path.exists(SCRIPT_SEARCH_PATHS[-1])
]):
check_for_updates()
reload_scripts()
script_list = SCRIPT_LIST()
if args.command == "install" and not args.engine:
parser.parse_args(["install", "-h"])
if args.quiet:
sys.stdout = open(os.devnull, "w")
if args.command == "help":
parser.parse_args(["-h"])
if hasattr(args, "compile") and args.compile:
script_list = reload_scripts()
if args.command == "defaults":
for engine_item in engine_list:
print("Default options for engine ", engine_item.name)
for default_opts in engine_item.required_opts:
print(default_opts[0], " ", default_opts[2])
print()
return
if args.command == "update":
check_for_updates()
reload_scripts()
return
if args.command == "citation":
if args.dataset is None:
print("\nCitation for retriever:\n")
print(CITATION)
else:
citations = get_script_citation(args.dataset)
for citation in citations:
print("Citation: {}".format(citation))
return
if args.command == 'license':
if args.dataset is None:
print(LICENSE)
else:
dataset_license = license(args.dataset)
if dataset_license:
print(dataset_license)
else:
print("There is no license information for {}".format(args.dataset))
return
if args.command == 'new':
f = open(args.filename, 'w')
f.write(sample_script)
f.close()
return
if args.command == 'reset':
reset_retriever(args.scope)
return
if args.command == 'autocreate':
if sum([args.f, args.d]) == 1:
file_flag = bool(args.f)
create_package(args.path, args.dt, file_flag, args.o, args.skip_lines,
args.e)
else:
print('Please use one and only one of the flags -f -d')
return
if args.command == 'ls':
# scripts should never be empty because check_for_updates is run on SCRIPT_LIST init
if not (args.l or args.k or isinstance(args.v, list)):
all_scripts = dataset_names()
from retriever import lscolumns
all_scripts_combined = []
for dataset in all_scripts['offline']:
all_scripts_combined.append((dataset, True))
for dataset in all_scripts['online']:
if dataset in all_scripts['offline']:
continue
all_scripts_combined.append((dataset, False))
all_scripts_combined = sorted(all_scripts_combined, key=lambda x: x[0])
print("Available datasets : {}\n".format(len(all_scripts_combined)))
lscolumns.printls(all_scripts_combined)
print("\nThe symbol * denotes the online datasets.")
print("To see the full list of available online datasets, visit\n"
"https://github.com/weecology/retriever-recipes.")
elif isinstance(args.v, list):
online_scripts = []
if args.v:
try:
all_scripts = [get_script(dataset) for dataset in args.v]
except KeyError:
all_scripts = []
print("Dataset(s) is not found.")
else:
scripts = datasets()
all_scripts = scripts['offline']
online_scripts = scripts['online']
count = 1
if not args.v:
print("Offline datasets : {}\n".format(len(all_scripts)))
for script in all_scripts:
print("{count}. {title}\n {name}\n"
"{keywords}\n{description}\n"
"{licenses}\n{citation}\n"
"".format(
count=count,
title=script.title,
name=script.name,
keywords=script.keywords,
description=script.description,
licenses=str(script.licenses[0]['name']),
citation=script.citation,
))
count += 1
count = 1
offline_scripts = [script.name for script in all_scripts]
set_online_scripts = []
for script in online_scripts:
if script in offline_scripts:
continue
set_online_scripts.append(script)
if not args.v:
print("Online datasets : {}\n".format(len(set_online_scripts)))
for script in set_online_scripts:
print("{count}. {name}".format(count=count, name=script))
count += 1
else:
param_licenses = args.l if args.l else None
keywords = args.k if args.k else None
# search
searched_scripts = datasets(keywords, param_licenses)
offline_mesg = "Available offline datasets : {}\n"
online_mesg = "Available online datasets : {}\n"
if not searched_scripts:
print("No available datasets found")
else:
print(offline_mesg.format(len(searched_scripts['offline'])))
count = 1
for script in searched_scripts['offline']:
print("{count}. {title}\n{name}\n"
"{keywords}\n{licenses}\n".format(
count=count,
title=script.title,
name=script.name,
keywords=script.keywords,
licenses=str(script.licenses[0]['name']),
))
count += 1
count = 1
searched_scripts_offline = [
script.name for script in searched_scripts["offline"]
]
searched_scripts_online = []
for script in searched_scripts['online']:
if script in searched_scripts_offline:
continue
searched_scripts_online.append(script)
print(online_mesg.format(len(searched_scripts_online)))
for script in searched_scripts_online:
print("{count}. {name}".format(count=count, name=script))
count += 1
return
if args.command == 'commit':
commit(
dataset=args.dataset,
path=os.path.normpath(args.path) if args.path else None,
commit_message=args.message,
)
return
if args.command == 'log':
commit_log(dataset=args.dataset)
return
engine = choose_engine(args.__dict__)
if hasattr(args, 'debug') and args.debug:
debug = True
else:
debug = False
sys.tracebacklimit = 0
if hasattr(args, 'debug') and args.not_cached:
use_cache = False
else:
use_cache = True
engine.use_cache = use_cache
if args.dataset is not None:
scripts = name_matches(script_list, args.dataset)
else:
raise Exception("no dataset specified.")
if scripts:
if args.dataset.endswith('.zip') or (hasattr(args, 'hash_value') and
args.hash_value):
_install(vars(args), debug=debug, use_cache=use_cache)
return
for dataset in scripts:
print("=> Installing", dataset.name)
try:
dataset.download(engine, debug=debug)
dataset.engine.final_cleanup()
except KeyboardInterrupt:
pass
except Exception as e:
print(e)
if debug:
raise
print("Done!")
else:
print("Run 'retriever ls' to see a list of currently available datasets.")
if __name__ == "__main__":
main()
| mit |
AnhellO/DAS_Sistemas | Ago-Dic-2017/Enrique Castillo/Ordinario/test/Lib/site-packages/pip/utils/logging.py | 516 | 3327 | from __future__ import absolute_import
import contextlib
import logging
import logging.handlers
import os
try:
import threading
except ImportError:
import dummy_threading as threading
from pip.compat import WINDOWS
from pip.utils import ensure_dir
try:
from pip._vendor import colorama
# Lots of different errors can come from this, including SystemError and
# ImportError.
except Exception:
colorama = None
_log_state = threading.local()
_log_state.indentation = 0
@contextlib.contextmanager
def indent_log(num=2):
"""
A context manager which will cause the log output to be indented for any
log messages emitted inside it.
"""
_log_state.indentation += num
try:
yield
finally:
_log_state.indentation -= num
def get_indentation():
return getattr(_log_state, 'indentation', 0)
class IndentingFormatter(logging.Formatter):
def format(self, record):
"""
Calls the standard formatter, but will indent all of the log messages
by our current indentation level.
"""
formatted = logging.Formatter.format(self, record)
formatted = "".join([
(" " * get_indentation()) + line
for line in formatted.splitlines(True)
])
return formatted
def _color_wrap(*colors):
def wrapped(inp):
return "".join(list(colors) + [inp, colorama.Style.RESET_ALL])
return wrapped
class ColorizedStreamHandler(logging.StreamHandler):
# Don't build up a list of colors if we don't have colorama
if colorama:
COLORS = [
# This needs to be in order from highest logging level to lowest.
(logging.ERROR, _color_wrap(colorama.Fore.RED)),
(logging.WARNING, _color_wrap(colorama.Fore.YELLOW)),
]
else:
COLORS = []
def __init__(self, stream=None):
logging.StreamHandler.__init__(self, stream)
if WINDOWS and colorama:
self.stream = colorama.AnsiToWin32(self.stream)
def should_color(self):
# Don't colorize things if we do not have colorama
if not colorama:
return False
real_stream = (
self.stream if not isinstance(self.stream, colorama.AnsiToWin32)
else self.stream.wrapped
)
# If the stream is a tty we should color it
if hasattr(real_stream, "isatty") and real_stream.isatty():
return True
# If we have an ASNI term we should color it
if os.environ.get("TERM") == "ANSI":
return True
# If anything else we should not color it
return False
def format(self, record):
msg = logging.StreamHandler.format(self, record)
if self.should_color():
for level, color in self.COLORS:
if record.levelno >= level:
msg = color(msg)
break
return msg
class BetterRotatingFileHandler(logging.handlers.RotatingFileHandler):
def _open(self):
ensure_dir(os.path.dirname(self.baseFilename))
return logging.handlers.RotatingFileHandler._open(self)
class MaxLevelFilter(logging.Filter):
def __init__(self, level):
self.level = level
def filter(self, record):
return record.levelno < self.level
| mit |
xzturn/tensorflow | tensorflow/lite/testing/op_tests/elementwise.py | 7 | 2848 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for elementwise ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
def _make_elementwise_tests(op):
"""Make a set of tests to do element-wise operations."""
def f(options):
"""Actual function that generates examples."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[], [1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]],
}]
def build_graph(parameters):
"""Build the unary op testing graph."""
input_value = tf.compat.v1.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape"])
out = op(input_value)
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict={inputs[0]: input_value})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
return f
@register_make_test_function()
def make_sin_tests(options):
"""Make a set of tests to do sin."""
return _make_elementwise_tests(tf.sin)(options)
@register_make_test_function()
def make_log_tests(options):
"""Make a set of tests to do log."""
return _make_elementwise_tests(tf.math.log)(options)
@register_make_test_function()
def make_sqrt_tests(options):
"""Make a set of tests to do sqrt."""
return _make_elementwise_tests(tf.sqrt)(options)
@register_make_test_function()
def make_rsqrt_tests(options):
"""Make a set of tests to do 1/sqrt."""
return _make_elementwise_tests(tf.math.rsqrt)(options)
@register_make_test_function()
def make_square_tests(options):
"""Make a set of tests to do square."""
return _make_elementwise_tests(tf.square)(options)
| apache-2.0 |
ndemir/scrapy | scrapy/contrib_exp/iterators.py | 25 | 1383 | from scrapy.http import Response
from scrapy.selector import XmlXPathSelector
def xmliter_lxml(obj, nodename, namespace=None):
from lxml import etree
reader = _StreamReader(obj)
tag = '{%s}%s' % (namespace, nodename) if namespace else nodename
iterable = etree.iterparse(reader, tag=tag, encoding=reader.encoding)
selxpath = '//' + ('x:%s' % nodename if namespace else nodename)
for _, node in iterable:
nodetext = etree.tostring(node)
node.clear()
xs = XmlXPathSelector(text=nodetext)
if namespace:
xs.register_namespace('x', namespace)
yield xs.select(selxpath)[0]
class _StreamReader(object):
def __init__(self, obj):
self._ptr = 0
if isinstance(obj, Response):
self._text, self.encoding = obj.body, obj.encoding
else:
self._text, self.encoding = obj, 'utf-8'
self._is_unicode = isinstance(self._text, unicode)
def read(self, n=65535):
self.read = self._read_unicode if self._is_unicode else self._read_string
return self.read(n).lstrip()
def _read_string(self, n=65535):
s, e = self._ptr, self._ptr + n
self._ptr = e
return self._text[s:e]
def _read_unicode(self, n=65535):
s, e = self._ptr, self._ptr + n
self._ptr = e
return self._text[s:e].encode('utf-8')
| bsd-3-clause |
kjc88/sl4a | python/src/Demo/tkinter/guido/mbox.py | 43 | 7478 | #! /usr/bin/env python
# Scan MH folder, display results in window
import os
import sys
import re
import getopt
import string
import mhlib
from Tkinter import *
from dialog import dialog
mailbox = os.environ['HOME'] + '/Mail'
def main():
global root, tk, top, mid, bot
global folderbox, foldermenu, scanbox, scanmenu, viewer
global folder, seq
global mh, mhf
# Parse command line options
folder = 'inbox'
seq = 'all'
try:
opts, args = getopt.getopt(sys.argv[1:], '')
except getopt.error, msg:
print msg
sys.exit(2)
for arg in args:
if arg[:1] == '+':
folder = arg[1:]
else:
seq = arg
# Initialize MH
mh = mhlib.MH()
mhf = mh.openfolder(folder)
# Build widget hierarchy
root = Tk()
tk = root.tk
top = Frame(root)
top.pack({'expand': 1, 'fill': 'both'})
# Build right part: folder list
right = Frame(top)
right.pack({'fill': 'y', 'side': 'right'})
folderbar = Scrollbar(right, {'relief': 'sunken', 'bd': 2})
folderbar.pack({'fill': 'y', 'side': 'right'})
folderbox = Listbox(right, {'exportselection': 0})
folderbox.pack({'expand': 1, 'fill': 'both', 'side': 'left'})
foldermenu = Menu(root)
foldermenu.add('command',
{'label': 'Open Folder',
'command': open_folder})
foldermenu.add('separator')
foldermenu.add('command',
{'label': 'Quit',
'command': 'exit'})
foldermenu.bind('<ButtonRelease-3>', folder_unpost)
folderbox['yscrollcommand'] = (folderbar, 'set')
folderbar['command'] = (folderbox, 'yview')
folderbox.bind('<Double-1>', open_folder, 1)
folderbox.bind('<3>', folder_post)
# Build left part: scan list
left = Frame(top)
left.pack({'expand': 1, 'fill': 'both', 'side': 'left'})
scanbar = Scrollbar(left, {'relief': 'sunken', 'bd': 2})
scanbar.pack({'fill': 'y', 'side': 'right'})
scanbox = Listbox(left, {'font': 'fixed'})
scanbox.pack({'expand': 1, 'fill': 'both', 'side': 'left'})
scanmenu = Menu(root)
scanmenu.add('command',
{'label': 'Open Message',
'command': open_message})
scanmenu.add('command',
{'label': 'Remove Message',
'command': remove_message})
scanmenu.add('command',
{'label': 'Refile Message',
'command': refile_message})
scanmenu.add('separator')
scanmenu.add('command',
{'label': 'Quit',
'command': 'exit'})
scanmenu.bind('<ButtonRelease-3>', scan_unpost)
scanbox['yscrollcommand'] = (scanbar, 'set')
scanbar['command'] = (scanbox, 'yview')
scanbox.bind('<Double-1>', open_message)
scanbox.bind('<3>', scan_post)
# Separator between middle and bottom part
rule2 = Frame(root, {'bg': 'black'})
rule2.pack({'fill': 'x'})
# Build bottom part: current message
bot = Frame(root)
bot.pack({'expand': 1, 'fill': 'both'})
#
viewer = None
# Window manager commands
root.minsize(800, 1) # Make window resizable
# Fill folderbox with text
setfolders()
# Fill scanbox with text
rescan()
# Enter mainloop
root.mainloop()
def folder_post(e):
x, y = e.x_root, e.y_root
foldermenu.post(x - 10, y - 10)
foldermenu.grab_set()
def folder_unpost(e):
tk.call('update', 'idletasks')
foldermenu.grab_release()
foldermenu.unpost()
foldermenu.invoke('active')
def scan_post(e):
x, y = e.x_root, e.y_root
scanmenu.post(x - 10, y - 10)
scanmenu.grab_set()
def scan_unpost(e):
tk.call('update', 'idletasks')
scanmenu.grab_release()
scanmenu.unpost()
scanmenu.invoke('active')
scanparser = re.compile('^ *([0-9]+)')
def open_folder(e=None):
global folder, mhf
sel = folderbox.curselection()
if len(sel) != 1:
if len(sel) > 1:
msg = "Please open one folder at a time"
else:
msg = "Please select a folder to open"
dialog(root, "Can't Open Folder", msg, "", 0, "OK")
return
i = sel[0]
folder = folderbox.get(i)
mhf = mh.openfolder(folder)
rescan()
def open_message(e=None):
global viewer
sel = scanbox.curselection()
if len(sel) != 1:
if len(sel) > 1:
msg = "Please open one message at a time"
else:
msg = "Please select a message to open"
dialog(root, "Can't Open Message", msg, "", 0, "OK")
return
cursor = scanbox['cursor']
scanbox['cursor'] = 'watch'
tk.call('update', 'idletasks')
i = sel[0]
line = scanbox.get(i)
if scanparser.match(line) >= 0:
num = string.atoi(scanparser.group(1))
m = mhf.openmessage(num)
if viewer: viewer.destroy()
from MimeViewer import MimeViewer
viewer = MimeViewer(bot, '+%s/%d' % (folder, num), m)
viewer.pack()
viewer.show()
scanbox['cursor'] = cursor
def interestingheader(header):
return header != 'received'
def remove_message(e=None):
itop = scanbox.nearest(0)
sel = scanbox.curselection()
if not sel:
dialog(root, "No Message To Remove",
"Please select a message to remove", "", 0, "OK")
return
todo = []
for i in sel:
line = scanbox.get(i)
if scanparser.match(line) >= 0:
todo.append(string.atoi(scanparser.group(1)))
mhf.removemessages(todo)
rescan()
fixfocus(min(todo), itop)
lastrefile = ''
tofolder = None
def refile_message(e=None):
global lastrefile, tofolder
itop = scanbox.nearest(0)
sel = scanbox.curselection()
if not sel:
dialog(root, "No Message To Refile",
"Please select a message to refile", "", 0, "OK")
return
foldersel = folderbox.curselection()
if len(foldersel) != 1:
if not foldersel:
msg = "Please select a folder to refile to"
else:
msg = "Please select exactly one folder to refile to"
dialog(root, "No Folder To Refile", msg, "", 0, "OK")
return
refileto = folderbox.get(foldersel[0])
todo = []
for i in sel:
line = scanbox.get(i)
if scanparser.match(line) >= 0:
todo.append(string.atoi(scanparser.group(1)))
if lastrefile != refileto or not tofolder:
lastrefile = refileto
tofolder = None
tofolder = mh.openfolder(lastrefile)
mhf.refilemessages(todo, tofolder)
rescan()
fixfocus(min(todo), itop)
def fixfocus(near, itop):
n = scanbox.size()
for i in range(n):
line = scanbox.get(repr(i))
if scanparser.match(line) >= 0:
num = string.atoi(scanparser.group(1))
if num >= near:
break
else:
i = 'end'
scanbox.select_from(i)
scanbox.yview(itop)
def setfolders():
folderbox.delete(0, 'end')
for fn in mh.listallfolders():
folderbox.insert('end', fn)
def rescan():
global viewer
if viewer:
viewer.destroy()
viewer = None
scanbox.delete(0, 'end')
for line in scanfolder(folder, seq):
scanbox.insert('end', line)
def scanfolder(folder = 'inbox', sequence = 'all'):
return map(
lambda line: line[:-1],
os.popen('scan +%s %s' % (folder, sequence), 'r').readlines())
main()
| apache-2.0 |
pleaseproject/python-for-android | python3-alpha/python3-src/Lib/test/test_dummy_threading.py | 182 | 1807 | from test import support
import unittest
import dummy_threading as _threading
import time
class DummyThreadingTestCase(unittest.TestCase):
class TestThread(_threading.Thread):
def run(self):
global running
global sema
global mutex
# Uncomment if testing another module, such as the real 'threading'
# module.
#delay = random.random() * 2
delay = 0
if support.verbose:
print('task', self.name, 'will run for', delay, 'sec')
sema.acquire()
mutex.acquire()
running += 1
if support.verbose:
print(running, 'tasks are running')
mutex.release()
time.sleep(delay)
if support.verbose:
print('task', self.name, 'done')
mutex.acquire()
running -= 1
if support.verbose:
print(self.name, 'is finished.', running, 'tasks are running')
mutex.release()
sema.release()
def setUp(self):
self.numtasks = 10
global sema
sema = _threading.BoundedSemaphore(value=3)
global mutex
mutex = _threading.RLock()
global running
running = 0
self.threads = []
def test_tasks(self):
for i in range(self.numtasks):
t = self.TestThread(name="<thread %d>"%i)
self.threads.append(t)
t.start()
if support.verbose:
print('waiting for all tasks to complete')
for t in self.threads:
t.join()
if support.verbose:
print('all tasks done')
def test_main():
support.run_unittest(DummyThreadingTestCase)
if __name__ == '__main__':
test_main()
| apache-2.0 |
xcyan/models | skip_thoughts/skip_thoughts/skip_thoughts_model_test.py | 19 | 6755 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow_models.skip_thoughts.skip_thoughts_model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from skip_thoughts import configuration
from skip_thoughts import skip_thoughts_model
class SkipThoughtsModel(skip_thoughts_model.SkipThoughtsModel):
"""Subclass of SkipThoughtsModel without the disk I/O."""
def build_inputs(self):
if self.mode == "encode":
# Encode mode doesn't read from disk, so defer to parent.
return super(SkipThoughtsModel, self).build_inputs()
else:
# Replace disk I/O with random Tensors.
self.encode_ids = tf.random_uniform(
[self.config.batch_size, 15],
minval=0,
maxval=self.config.vocab_size,
dtype=tf.int64)
self.decode_pre_ids = tf.random_uniform(
[self.config.batch_size, 15],
minval=0,
maxval=self.config.vocab_size,
dtype=tf.int64)
self.decode_post_ids = tf.random_uniform(
[self.config.batch_size, 15],
minval=0,
maxval=self.config.vocab_size,
dtype=tf.int64)
self.encode_mask = tf.ones_like(self.encode_ids)
self.decode_pre_mask = tf.ones_like(self.decode_pre_ids)
self.decode_post_mask = tf.ones_like(self.decode_post_ids)
class SkipThoughtsModelTest(tf.test.TestCase):
def setUp(self):
super(SkipThoughtsModelTest, self).setUp()
self._model_config = configuration.model_config()
def _countModelParameters(self):
"""Counts the number of parameters in the model at top level scope."""
counter = {}
for v in tf.global_variables():
name = v.op.name.split("/")[0]
num_params = v.get_shape().num_elements()
if not num_params:
self.fail("Could not infer num_elements from Variable %s" % v.op.name)
counter[name] = counter.get(name, 0) + num_params
return counter
def _checkModelParameters(self):
"""Verifies the number of parameters in the model."""
param_counts = self._countModelParameters()
expected_param_counts = {
# vocab_size * embedding_size
"word_embedding": 12400000,
# GRU Cells
"encoder": 21772800,
"decoder_pre": 21772800,
"decoder_post": 21772800,
# (encoder_dim + 1) * vocab_size
"logits": 48020000,
"global_step": 1,
}
self.assertDictEqual(expected_param_counts, param_counts)
def _checkOutputs(self, expected_shapes, feed_dict=None):
"""Verifies that the model produces expected outputs.
Args:
expected_shapes: A dict mapping Tensor or Tensor name to expected output
shape.
feed_dict: Values of Tensors to feed into Session.run().
"""
fetches = expected_shapes.keys()
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
outputs = sess.run(fetches, feed_dict)
for index, output in enumerate(outputs):
tensor = fetches[index]
expected = expected_shapes[tensor]
actual = output.shape
if expected != actual:
self.fail("Tensor %s has shape %s (expected %s)." % (tensor, actual,
expected))
def testBuildForTraining(self):
model = SkipThoughtsModel(self._model_config, mode="train")
model.build()
self._checkModelParameters()
expected_shapes = {
# [batch_size, length]
model.encode_ids: (128, 15),
model.decode_pre_ids: (128, 15),
model.decode_post_ids: (128, 15),
model.encode_mask: (128, 15),
model.decode_pre_mask: (128, 15),
model.decode_post_mask: (128, 15),
# [batch_size, length, word_embedding_dim]
model.encode_emb: (128, 15, 620),
model.decode_pre_emb: (128, 15, 620),
model.decode_post_emb: (128, 15, 620),
# [batch_size, encoder_dim]
model.thought_vectors: (128, 2400),
# [batch_size * length]
model.target_cross_entropy_losses[0]: (1920,),
model.target_cross_entropy_losses[1]: (1920,),
# [batch_size * length]
model.target_cross_entropy_loss_weights[0]: (1920,),
model.target_cross_entropy_loss_weights[1]: (1920,),
# Scalar
model.total_loss: (),
}
self._checkOutputs(expected_shapes)
def testBuildForEval(self):
model = SkipThoughtsModel(self._model_config, mode="eval")
model.build()
self._checkModelParameters()
expected_shapes = {
# [batch_size, length]
model.encode_ids: (128, 15),
model.decode_pre_ids: (128, 15),
model.decode_post_ids: (128, 15),
model.encode_mask: (128, 15),
model.decode_pre_mask: (128, 15),
model.decode_post_mask: (128, 15),
# [batch_size, length, word_embedding_dim]
model.encode_emb: (128, 15, 620),
model.decode_pre_emb: (128, 15, 620),
model.decode_post_emb: (128, 15, 620),
# [batch_size, encoder_dim]
model.thought_vectors: (128, 2400),
# [batch_size * length]
model.target_cross_entropy_losses[0]: (1920,),
model.target_cross_entropy_losses[1]: (1920,),
# [batch_size * length]
model.target_cross_entropy_loss_weights[0]: (1920,),
model.target_cross_entropy_loss_weights[1]: (1920,),
# Scalar
model.total_loss: (),
}
self._checkOutputs(expected_shapes)
def testBuildForEncode(self):
model = SkipThoughtsModel(self._model_config, mode="encode")
model.build()
# Test feeding a batch of word embeddings to get skip thought vectors.
encode_emb = np.random.rand(64, 15, 620)
encode_mask = np.ones((64, 15), dtype=np.int64)
feed_dict = {model.encode_emb: encode_emb, model.encode_mask: encode_mask}
expected_shapes = {
# [batch_size, encoder_dim]
model.thought_vectors: (64, 2400),
}
self._checkOutputs(expected_shapes, feed_dict)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
wwjiang007/flink | flink-python/pyflink/__init__.py | 9 | 1969 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#################################################################################
import sys
from functools import wraps
__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore
if sys.version_info < (3, 6):
raise RuntimeError(
'Python versions prior to 3.6 are not supported for PyFlink [' +
str(sys.version_info) + '].')
def keyword(func):
"""
A decorator that forces keyword arguments usage and store actual
input keyword arguments in `_input_kwargs`.
"""
@wraps(func)
def wrapper(self, **kwargs):
self._input_kwargs = kwargs
return func(self, **kwargs)
return wrapper
def add_version_doc(f, version):
"""
Annotates a function to append the version the function was added.
"""
import re
indent_p = re.compile(r'\n( *)[^\n ]')
original_doc = f.__doc__ or ""
indents = indent_p.findall(original_doc)
indent = ' ' * (min(len(indent) for indent in indents) if indents else 0)
f.__doc__ = original_doc.rstrip() + "\n\n%s.. versionadded:: %s" % (indent, version)
| apache-2.0 |
wooga/airflow | airflow/contrib/operators/azure_cosmos_operator.py | 5 | 1212 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.microsoft.azure.operators.azure_cosmos`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.microsoft.azure.operators.azure_cosmos import AzureCosmosInsertDocumentOperator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.microsoft.azure.operators.azure_cosmos`.",
DeprecationWarning, stacklevel=2
)
| apache-2.0 |
thegodone/pyms | Deconvolution/BillerBiemann/Function.py | 7 | 11308 | """
BillerBiemann deconvolution algorithm
Provides a class to perform Biller and Biemann deconvolution
"""
#############################################################################
# #
# PyMS software for processing of metabolomic mass-spectrometry data #
# Copyright (C) 2005-2012 Vladimir Likic #
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License version 2 as #
# published by the Free Software Foundation. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. #
# #
#############################################################################
import numpy
import copy
from pyms.Utils.Error import error
from pyms.Utils.Utils import is_list, is_number, is_int
from pyms.GCMS.Class import IonChromatogram, MassSpectrum
from pyms.Peak.Class import Peak
# If psyco is installed, use it to speed up running time
try:
import psyco
psyco.full()
except:
pass
#######################
# structure
# 1) find local maxima per ion, store intensity and scan index
# 2) sum across N scans to compensate for scan type
# 3) sum ions belonging to each maxima scan
#######################
def BillerBiemann(im, points=3, scans=1):
"""
@summary: BillerBiemann Deconvolution
Deconvolution based on the algorithm of Biller and Biemann (1974)
@param im: An IntensityMatrix object
@type im: pyms.GCMS.Class.IntensityMatrix
@param points: Peak if maxima over 'points' number of scans (Default 3)
@type points: IntType
@param scans: To compensate for spectra skewing,
peaks from 'scans' scans are combined (Default 1).
@type scans: IntType
@return: List of Peak objects
@rtype: ListType
@author: Andrew Isaac
"""
rt_list = im.get_time_list()
mass_list = im.get_mass_list()
peak_list = []
maxima_im = get_maxima_matrix(im, points, scans)
numrows = len(maxima_im)
for row in range(numrows):
if sum(maxima_im[row]) > 0:
rt = rt_list[row]
ms = MassSpectrum(mass_list, maxima_im[row])
peak = Peak(rt, ms)
peak.set_pt_bounds([0,row,0]) # store IM index for convenience
peak_list.append(peak)
return peak_list
def rel_threshold(pl, percent=2):
"""
@summary: Remove ions with relative intensities less than the given
relative percentage of the maximum intensity.
@param pl: A list of Peak objects
@type pl: ListType
@param percent: Threshold for relative percentage of intensity (Default 2%)
@type percent: FloatType
@return: A new list of Peak objects with threshold ions
@rtype: ListType
@author: Andrew Isaac
"""
if not is_number(percent) or percent <= 0:
error("'percent' must be a number > 0")
pl_copy = copy.deepcopy(pl)
new_pl = []
for p in pl_copy:
ms = p.get_mass_spectrum()
ia = ms.mass_spec
# assume max(ia) big so /100 1st
cutoff = (max(ia)/100.0)*float(percent)
for i in range(len(ia)):
if ia[i] < cutoff:
ia[i] = 0
ms.mass_spec = ia
p.set_mass_spectrum(ms)
new_pl.append(p)
return new_pl
def num_ions_threshold(pl, n, cutoff):
"""
@summary: Remove Peaks where there are less than a given number of ion
intensities above the given threshold
@param pl: A list of Peak objects
@type pl: ListType
@param n: Minimum number of ions that must have intensities above the cutoff
@type n: IntType
@param cutoff: The minimum intensity threshold
@type cutoff: FloatType
@return: A new list of Peak objects
@rtype: ListType
@author: Andrew Isaac
"""
pl_copy = copy.deepcopy(pl)
new_pl = []
for p in pl_copy:
ms = p.get_mass_spectrum()
ia = ms.mass_spec
ions = 0
for i in range(len(ia)):
if ia[i] >= cutoff:
ions += 1
if ions >= n:
new_pl.append(p)
return new_pl
def sum_maxima(im, points=3, scans=1):
"""
@summary: Reconstruct the TIC as sum of maxima
@param im: An IntensityMatrix object
@type im: pyms.GCMS.Class.IntensityMatrix
@param points: Peak if maxima over 'points' number of scans
@type points: IntType
@param scans: To compensate for spectra scewing,
peaks from 'scans' scans are combined.
@type scans: IntType
@return: The reconstructed TIC
@rtype: pyms.GCMS.Class.IonChromatogram
@author: Andrew Isaac
"""
maxima_im = get_maxima_matrix(im, points)
sums = []
numrows = len(maxima_im)
half = int(scans/2)
for row in range(numrows):
val = 0
for ii in range(scans):
if row - half + ii >= 0 and row - half + ii < numrows:
val += maxima_im[row - half + ii].sum()
sums.append(val)
tic = IonChromatogram(numpy.array(sums), im.get_time_list())
return tic
def get_maxima_indices(ion_intensities, points=3):
"""
@summary: Find local maxima.
@param ion_intensities: A list of intensities for a single ion
@type ion_intensities: ListType
@param points: Peak if maxima over 'points' number of scans
@type points: IntType
@return: A list of scan indices
@rtype: ListType
@author: Andrew Isaac
"""
if not is_list(ion_intensities) or not is_number(ion_intensities[0]):
error("'ion_intensities' must be a List of numbers")
# find peak inflection points
# use a 'points' point window
# for a plateau after a rise, need to check if it is the left edge of
# a peak
peak_point = []
edge = -1
points = int(points)
half = int(points/2)
points = 2*half+1 # ensure odd number of points
for index in range(len(ion_intensities)-points+1):
left = ion_intensities[index:index+half]
mid = ion_intensities[index+half]
right = ion_intensities[index+half+1:index+points]
# max in middle
if mid > max(left) and mid > max(right):
peak_point.append(index+half)
edge = -1 # ignore previous rising edge
# flat from rise (left of peak?)
if mid > max(left) and mid == max(right):
edge = index+half # ignore previous rising edge, update latest
# fall from flat
if mid == max(left) and mid > max(right):
if edge > -1:
centre = int((edge+index+half)/2) # mid point
peak_point.append(centre)
edge = -1
return peak_point
def get_maxima_list(ic, points=3):
"""
@summary: List of retention time and intensity of local maxima for ion
@param ic: An IonChromatogram object
@type ic: pyms.GCMS.Class.IonChromatogram
@param points: Peak if maxima over 'points' number of scans
@type points: IntType
@return: A list of retention time and intensity of local maxima for ion
@rtype: ListType
@author: Andrew Isaac
"""
peak_point = get_maxima_indices(ic.get_intensity_array(), points)
mlist = []
for index in range(len(peak_point)):
rt = ic.get_time_at_index(peak_point[index])
intens = ic.get_intensity_at_index(peak_point[index])
mlist.append([rt, intens])
return mlist
def get_maxima_list_reduced(ic, mp_rt, points=13, window=3):
"""
@summary: List of retention time and intensity of local maxima for ion
Only peaks around a specific retention time are recorded
created for use with gap filling algorithm
@param ic: An IonChromatogram object
@type ic: pyms.GCMS.Class.IonChromatogram
@param mp_rt: The retention time of the missing peak
@type ic: floatType
@param points: Peak if maxima over 'points' number of scans
@type points: IntType
@param window: The window around the mp_rt where peaks should
be recorded
@type window: intType
@return: A list of retention time and intensity of local maxima for ion
@rtype: ListType
@author: Andrew Isaac
"""
peak_point = get_maxima_indices(ic.get_intensity_array(), points)
mlist = []
for index in range(len(peak_point)):
rt = ic.get_time_at_index(peak_point[index])
if (rt > float(mp_rt) - window) and (rt < float(mp_rt) + window):
intens = ic.get_intensity_at_index(peak_point[index])
mlist.append([rt, intens])
else:
pass
return mlist
def get_maxima_matrix(im, points=3, scans=1):
"""
@summary: Get matrix of local maxima for each ion
@param im: A list of intensities for a single ion
@type im: ListType
@param points: Peak if maxima over 'points' number of scans
@type points: IntType
@param scans: To compensate for spectra scewing,
peaks from 'scans' scans are combined (Default 1).
@type scans: IntType
@return: A matrix of each ion and scan and intensity at ion peaks
@rtype: ListType
@author: Andrew Isaac
"""
numrows, numcols = im.get_size()
# zeroed matrix, size numrows*numcols
maxima_im = numpy.zeros((numrows, numcols))
raw_im = numpy.array(im.get_matrix_list())
for col in range(numcols): # assume all rows have same width
# 1st, find maxima
maxima = get_maxima_indices(raw_im[:,col], points)
# 2nd, fill intensities
for row in maxima:
maxima_im[row, col] = raw_im[row, col]
# combine spectra within 'scans' scans.
half = int(scans/2)
for row in range(numrows):
tic = 0
best = 0
loc = 0
# find best in scans
for ii in range(scans):
if row - half + ii >= 0 and row - half + ii < numrows:
tic = maxima_im[row - half + ii].sum()
# find largest tic of scans
if tic > best:
best = tic
loc = ii
# move and add others to best
for ii in range(scans):
if row - half + ii >= 0 and row - half + ii < numrows and ii != loc:
for col in range(numcols):
maxima_im[row - half + loc, col] += \
maxima_im[row - half + ii, col]
maxima_im[row - half + ii, col] = 0
return maxima_im
| gpl-2.0 |
edeposit/marcxml_parser | src/marcxml_parser/tools/resorted.py | 1 | 1135 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
# Functions & classes =========================================================
def resorted(values):
"""
Sort values, but put numbers after alphabetically sorted words.
This function is here to make outputs diff-compatible with Aleph.
Example::
>>> sorted(["b", "1", "a"])
['1', 'a', 'b']
>>> resorted(["b", "1", "a"])
['a', 'b', '1']
Args:
values (iterable): any iterable object/list/tuple/whatever.
Returns:
list of sorted values, but with numbers after words
"""
if not values:
return values
values = sorted(values)
# look for first word
first_word = next(
(cnt for cnt, val in enumerate(values)
if val and not val[0].isdigit()),
None
)
# if not found, just return the values
if first_word is None:
return values
words = values[first_word:]
numbers = values[:first_word]
return words + numbers
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.