input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SavedModelCLI tool."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import pickle
import platform
import shutil
import sys
from absl.testing import parameterized
import numpy as np
from six import StringIO
from tensorflow.core.example import example_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.debug.wrappers import local_cli_wrapper
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_spec
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import save
from tensorflow.python.tools import saved_model_cli
from tensorflow.python.training.tracking import tracking
SAVED_MODEL_PATH = ('cc/saved_model/testdata/half_plus_two/00000123')
@contextlib.contextmanager
def captured_output():
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
class SavedModelCLITestCase(test.TestCase, parameterized.TestCase):
def setUp(self):
super(SavedModelCLITestCase, self).setUp()
if platform.system() == 'Windows':
self.skipTest('Skipping failing tests on Windows.')
def testShowCommandAll(self):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
self.parser = saved_model_cli.create_parser()
args = self.parser.parse_args(['show', '--dir', base_path, '--all'])
with captured_output() as (out, err):
saved_model_cli.show(args)
output = out.getvalue().strip()
# pylint: disable=line-too-long
exp_out = """MetaGraphDef with tag-set: 'serve' contains the following SignatureDefs:
signature_def['classify_x2_to_y3']:
The given SavedModel SignatureDef contains the following input(s):
inputs['inputs'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: x2:0
The given SavedModel SignatureDef contains the following output(s):
outputs['scores'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: y3:0
Method name is: tensorflow/serving/classify
signature_def['classify_x_to_y']:
The given SavedModel SignatureDef contains the following input(s):
inputs['inputs'] tensor_info:
dtype: DT_STRING
shape: unknown_rank
name: tf_example:0
The given SavedModel SignatureDef contains the following output(s):
outputs['scores'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: y:0
Method name is: tensorflow/serving/classify
signature_def['regress_x2_to_y3']:
The given SavedModel SignatureDef contains the following input(s):
inputs['inputs'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: x2:0
The given SavedModel SignatureDef contains the following output(s):
outputs['outputs'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: y3:0
Method name is: tensorflow/serving/regress
signature_def['regress_x_to_y']:
The given SavedModel SignatureDef contains the following input(s):
inputs['inputs'] tensor_info:
dtype: DT_STRING
shape: unknown_rank
name: tf_example:0
The given SavedModel SignatureDef contains the following output(s):
outputs['outputs'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: y:0
Method name is: tensorflow/serving/regress
signature_def['regress_x_to_y2']:
The given SavedModel SignatureDef contains the following input(s):
inputs['inputs'] tensor_info:
dtype: DT_STRING
shape: unknown_rank
name: tf_example:0
The given SavedModel SignatureDef contains the following output(s):
outputs['outputs'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: y2:0
Method name is: tensorflow/serving/regress
signature_def['serving_default']:
The given SavedModel SignatureDef contains the following input(s):
inputs['x'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: x:0
The given SavedModel SignatureDef contains the following output(s):
outputs['y'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: y:0
Method name is: tensorflow/serving/predict"""
# pylint: enable=line-too-long
self.maxDiff = None # Produce a useful error msg if the comparison fails
self.assertMultiLineEqual(output, exp_out)
self.assertEqual(err.getvalue().strip(), '')
def testShowAllWithFunctions(self):
class DummyModel(tracking.AutoTrackable):
"""Model with callable polymorphic functions specified."""
@def_function.function
def func1(self, a, b, c):
if c:
return a + b
else:
return a * b
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=(2, 2), dtype=dtypes.float32)
])
def func2(self, x):
return x + 2
@def_function.function
def __call__(self, y, c=7):
return y + 2 * c
saved_model_dir = os.path.join(test.get_temp_dir(), 'dummy_model')
dummy_model = DummyModel()
# Call with specific values to create new polymorphic function traces.
dummy_model.func1(constant_op.constant(5), constant_op.constant(9), True)
dummy_model(constant_op.constant(5))
save.save(dummy_model, saved_model_dir)
self.parser = saved_model_cli.create_parser()
args = self.parser.parse_args(['show', '--dir', saved_model_dir, '--all'])
with captured_output() as (out, err):
saved_model_cli.show(args)
output = out.getvalue().strip()
exp_out = """MetaGraphDef with tag-set: 'serve' contains the following SignatureDefs:
signature_def['__saved_model_init_op']:
The given SavedModel SignatureDef contains the following input(s):
The given SavedModel SignatureDef contains the following output(s):
outputs['__saved_model_init_op'] tensor_info:
dtype: DT_INVALID
shape: unknown_rank
name: NoOp
Method name is:
signature_def['serving_default']:
The given SavedModel SignatureDef contains the following input(s):
inputs['x'] tensor_info:
dtype: DT_FLOAT
shape: (2, 2)
name: serving_default_x:0
The given SavedModel SignatureDef contains the following output(s):
outputs['output_0'] tensor_info:
dtype: DT_FLOAT
shape: (2, 2)
name: PartitionedCall:0
Method name is: tensorflow/serving/predict
Defined Functions:
Function Name: '__call__'
Option #1
Callable with:
Argument #1
y: TensorSpec(shape=(), dtype=tf.int32, name='y')
Argument #2
DType: int
Value: 7
Function Name: 'func1'
Option #1
Callable with:
Argument #1
a: TensorSpec(shape=(), dtype=tf.int32, name='a')
Argument #2
b: TensorSpec(shape=(), dtype=tf.int32, name='b')
Argument #3
DType: bool
Value: True
Function Name: 'func2'
Option #1
Callable with:
Argument #1
x: TensorSpec(shape=(2, 2), dtype=tf.float32, name='x')
""".strip() # pylint: enable=line-too-long
self.maxDiff = None # Produce a useful error msg if the comparison fails
self.assertMultiLineEqual(output, exp_out)
self.assertEqual(err.getvalue().strip(), '')
def testShowAllWithPureConcreteFunction(self):
class DummyModel(tracking.AutoTrackable):
"""Model with a callable concrete function."""
def __init__(self):
function = def_function.function(
self.multiply,
input_signature=[
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32)
])
self.pure_concrete_function = function.get_concrete_function()
super(DummyModel, self).__init__()
def multiply(self, a, b):
return a * b
saved_model_dir = os.path.join(test.get_temp_dir(), 'dummy_model')
dummy_model = DummyModel()
save.save(dummy_model, saved_model_dir)
self.parser = saved_model_cli.create_parser()
args = self.parser.parse_args(['show', '--dir', saved_model_dir, '--all'])
with captured_output() as (out, err):
saved_model_cli.show(args)
output = out.getvalue().strip()
exp_out = """MetaGraphDef with tag-set: 'serve' contains the following SignatureDefs:
signature_def['__saved_model_init_op']:
The given SavedModel SignatureDef contains the following input(s):
The given SavedModel SignatureDef contains the following output(s):
outputs['__saved_model_init_op'] tensor_info:
dtype: DT_INVALID
shape: unknown_rank
name: NoOp
Method name is:
signature_def['serving_default']:
The given SavedModel SignatureDef contains the following input(s):
inputs['a'] tensor_info:
dtype: DT_FLOAT
shape: ()
name: serving_default_a:0
inputs['b'] tensor_info:
dtype: DT_FLOAT
shape: ()
name: serving_default_b:0
The given SavedModel SignatureDef contains the following output(s):
outputs['output_0'] tensor_info:
dtype: DT_FLOAT
shape: ()
name: PartitionedCall:0
Method name is: tensorflow/serving/predict
Defined Functions:
Function Name: 'pure_concrete_function'
Option #1
Callable with:
Argument #1
a: TensorSpec(shape=(), dtype=tf.float32, name='a')
Argument #2
b: TensorSpec(shape=(), dtype=tf.float32, name='b')
""".strip() # pylint: enable=line-too-long
self.maxDiff = None # Produce a useful error msg if the comparison fails
self.assertMultiLineEqual(output, exp_out)
self.assertEqual(err.getvalue().strip(), '')
def testShowCommandTags(self):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
self.parser = saved_model_cli.create_parser()
args = self.parser.parse_args(['show', '--dir', base_path])
with captured_output() as (out, err):
saved_model_cli.show(args)
output = out.getvalue().strip()
exp_out = 'The given SavedModel contains the following tag-sets:\n\'serve\''
self.assertMultiLineEqual(output, exp_out)
self.assertEqual(err.getvalue().strip(), '')
def testShowCommandSignature(self):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
self.parser = saved_model_cli.create_parser()
args = self.parser.parse_args(
['show', '--dir', base_path, '--tag_set', 'serve'])
with captured_output() as (out, err):
saved_model_cli.show(args)
output = out.getvalue().strip()
exp_header = ('The given SavedModel MetaGraphDef contains SignatureDefs '
'with the following keys:')
exp_start = 'SignatureDef key: '
exp_keys = [
'"classify_x2_to_y3"', '"classify_x_to_y"', '"regress_x2_to_y3"',
'"regress_x_to_y"', '"regress_x_to_y2"', '"serving_default"'
]
# Order of signatures does not matter
self.assertMultiLineEqual(
output,
'\n'.join([exp_header] + [exp_start + exp_key for exp_key in exp_keys]))
self.assertEqual(err.getvalue().strip(), '')
def testShowCommandErrorNoTagSet(self):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
self.parser = saved_model_cli.create_parser()
args = self.parser.parse_args(
['show', '--dir', base_path, '--tag_set', 'badtagset'])
with self.assertRaises(RuntimeError):
saved_model_cli.show(args)
def testShowCommandInputsOutputs(self):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
self.parser = saved_model_cli.create_parser()
args = self.parser.parse_args([
'show', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
'serving_default'
])
with captured_output() as (out, err):
saved_model_cli.show(args)
output = out.getvalue().strip()
expected_output = (
'The given SavedModel SignatureDef contains the following input(s):\n'
' inputs[\'x\'] tensor_info:\n'
' dtype: DT_FLOAT\n shape: (-1, 1)\n name: x:0\n'
'The given SavedModel SignatureDef contains the following output(s):\n'
' outputs[\'y\'] tensor_info:\n'
' dtype: DT_FLOAT\n shape: (-1, 1)\n name: y:0\n'
'Method name is: tensorflow/serving/predict')
self.assertEqual(output, expected_output)
self.assertEqual(err.getvalue().strip(), '')
def testPrintREFTypeTensor(self):
ref_tensor_info = meta_graph_pb2.TensorInfo()
ref_tensor_info.dtype = types_pb2.DT_FLOAT_REF
with captured_output() as (out, err):
saved_model_cli._print_tensor_info(ref_tensor_info)
self.assertTrue('DT_FLOAT_REF' in out.getvalue().strip())
self.assertEqual(err.getvalue().strip(), '')
def testInputPreProcessFormats(self):
input_str = 'input1=/path/file.txt[ab3];input2=file2'
input_expr_str = 'input3=np.zeros([2,2]);input4=[4,5]'
input_dict = saved_model_cli.preprocess_inputs_arg_string(input_str)
input_expr_dict = saved_model_cli.preprocess_input_exprs_arg_string(
input_expr_str, safe=False)
self.assertTrue(input_dict['input1'] == ('/path/file.txt', 'ab3'))
self.assertTrue(input_dict['input2'] == ('file2', None))
print(input_expr_dict['input3'])
self.assertAllClose(input_expr_dict['input3'], np.zeros([2, 2]))
self.assertAllClose(input_expr_dict['input4'], [4, 5])
self.assertTrue(len(input_dict) == 2)
self.assertTrue(len(input_expr_dict) == 2)
def testInputPreProcessExamplesWithStrAndBytes(self):
input_examples_str = 'inputs=[{"text":["foo"], "bytes":[b"bar"]}]'
input_dict = saved_model_cli.preprocess_input_examples_arg_string(
input_examples_str)
feature = example_pb2.Example.FromString(input_dict['inputs'][0])
self.assertProtoEquals(
"""
features {
feature {
key: "bytes"
value {
bytes_list {
value: "bar"
}
}
}
feature {
key: "text"
value {
bytes_list {
value: "foo"
}
}
}
}
""", feature)
def testInputPreprocessExampleWithCodeInjection(self):
input_examples_str = 'inputs=os.system("echo hacked")'
with self.assertRaisesRegex(RuntimeError, 'not a valid python literal.'):
saved_model_cli.preprocess_input_examples_arg_string(input_examples_str)
def testInputPreProcessFileNames(self):
input_str = (r'inputx=C:\Program Files\data.npz[v:0];'
r'input:0=c:\PROGRA~1\data.npy')
input_dict = saved_model_cli.preprocess_inputs_arg_string(input_str)
self.assertTrue(input_dict['inputx'] == (r'C:\Program Files\data.npz',
'v:0'))
self.assertTrue(input_dict['input:0'] == (r'c:\PROGRA~1\data.npy', None))
def testInputPreProcessErrorBadFormat(self):
input_str = 'inputx=file[[v1]v2'
with self.assertRaises(RuntimeError):
saved_model_cli.preprocess_inputs_arg_string(input_str)
input_str = 'inputx:file'
with self.assertRaises(RuntimeError):
saved_model_cli.preprocess_inputs_arg_string(input_str)
input_str = 'inputx:np.zeros((5))'
with self.assertRaisesRegex(RuntimeError, 'format is incorrect'):
saved_model_cli.preprocess_input_exprs_arg_string(input_str, safe=False)
| |
"""Base class for modeling portfolio and measuring its performance.
The job of the `Portfolio` class is to create a series of positions allocated
against a cash component, produce an equity curve, incorporate basic transaction costs
and produce a set of statistics about its performance. In particular it outputs
position/profit metrics and drawdown information.
## Workflow
`Portfolio` class can be instantiated using main price of the asset, initial capital,
records of filled orders, and cash and shares balances (as a result of filling orders).
It also accepts many other parameters such as annualization factor.
* Order records are used to track trades and positions, and to measure their performance.
* Main price, initial capital, and balances are used to compute risk and performance metrics.
To simplify creation of order records and keeping track of balances, it exposes several convenience methods
with prefix `from_`. For example, you can use `Portfolio.from_signals` method to create and fill orders
based on entry and exit signals. Alternatively, you can use `Portfolio.from_order_func` to define
a custom order function. The results are then automatically passed to the constructor method of
`Portfolio` and you will receive a portfolio instance ready to be used for performance measurements.
## Properties
The `Portfolio` class offers numerous properties for measuring the performance of a strategy.
They can be categorized as follows:
* Time series indexed by time, such as `Portfolio.returns`.
* Metrics indexed by columns, such as `Portfolio.total_profit`.
* Group objects with own time series and metrics, such as `Portfolio.positions`.
### Caching
Each property is cached, thus properties can effectively build upon each other, without side effects.
!!! note
Due to caching, `Portfolio` class is meant to be atomic and immutable, thus each public attribute
is marked as read-only. To change any parameter, you need to create a new `Portfolio` instance.
## Indexing
In addition, you can use pandas indexing on the `Portfolio` class itself, which forwards
indexing operation to each `__init__` argument with pandas type:
```python-repl
>>> import vectorbt as vbt
>>> import numpy as np
>>> import pandas as pd
>>> from numba import njit
>>> from datetime import datetime
>>> price = pd.Series([1, 2, 3, 2, 1], index=pd.Index([
... datetime(2020, 1, 1),
... datetime(2020, 1, 2),
... datetime(2020, 1, 3),
... datetime(2020, 1, 4),
... datetime(2020, 1, 5)
... ]), name='a')
>>> orders = pd.DataFrame({
... 'a': [np.inf, 0, 0, 0, 0],
... 'b': [1, 1, 1, 1, -np.inf],
... 'c': [np.inf, -np.inf, np.inf, -np.inf, np.inf]
... }, index=index)
>>> portfolio = vbt.Portfolio.from_orders(price, orders, init_capital=100)
>>> portfolio.equity
a b c
2020-01-01 100.0 100.0 100.000000
2020-01-02 200.0 101.0 200.000000
2020-01-03 300.0 103.0 200.000000
2020-01-04 200.0 100.0 133.333333
2020-01-05 100.0 96.0 133.333333
>>> portfolio['a'].equity
2020-01-01 100.0
2020-01-02 200.0
2020-01-03 300.0
2020-01-04 200.0
2020-01-05 100.0
Name: a, dtype: float64
```
!!! note
Changing index (time axis) is not supported."""
import numpy as np
import pandas as pd
from vectorbt import defaults
from vectorbt.utils import checks
from vectorbt.utils.decorators import cached_property
from vectorbt.base import reshape_fns
from vectorbt.base.indexing import PandasIndexer
from vectorbt.base.array_wrapper import ArrayWrapper
from vectorbt.generic import nb as generic_nb
from vectorbt.portfolio import nb
from vectorbt.portfolio.enums import SizeType, AccumulateExitMode, ConflictMode
from vectorbt.records import Orders, Trades, Positions, Drawdowns
def _indexing_func(obj, pd_indexing_func):
"""Perform indexing on `Portfolio`."""
if obj.wrapper.ndim == 1:
raise TypeError("Indexing on Series is not supported")
n_rows = len(obj.wrapper.index)
n_cols = len(obj.wrapper.columns)
col_mapper = obj.wrapper.wrap(np.broadcast_to(np.arange(n_cols), (n_rows, n_cols)))
col_mapper = pd_indexing_func(col_mapper)
if not pd.Index.equals(col_mapper.index, obj.wrapper.index):
raise NotImplementedError("Changing index (time axis) is not supported")
new_cols = col_mapper.values[0]
# Array-like params
def index_arraylike_param(param):
if np.asarray(param).ndim > 0:
param = reshape_fns.broadcast_to_axis_of(param, obj.main_price, 1)
param = param[new_cols]
return param
factor_returns = obj.factor_returns
if factor_returns is not None:
if checks.is_frame(factor_returns):
factor_returns = reshape_fns.broadcast_to(factor_returns, obj.main_price)
factor_returns = pd_indexing_func(factor_returns)
# Create new Portfolio instance
return obj.__class__(
pd_indexing_func(obj.main_price),
obj.init_capital.iloc[new_cols],
pd_indexing_func(obj.orders), # Orders class supports indexing
pd_indexing_func(obj.cash),
pd_indexing_func(obj.shares),
freq=obj.freq,
year_freq=obj.year_freq,
levy_alpha=index_arraylike_param(obj.levy_alpha),
risk_free=index_arraylike_param(obj.risk_free),
required_return=index_arraylike_param(obj.required_return),
cutoff=index_arraylike_param(obj.cutoff),
factor_returns=factor_returns,
incl_unrealized_stats=obj.incl_unrealized_stats
)
class Portfolio(PandasIndexer):
"""Class for modeling portfolio and measuring its performance.
Args:
main_price (pandas_like): Main price of the asset.
init_capital (float or pd.Series): The initial capital.
Each element must correspond to a column in `main_price`.
orders (vectorbt.records.orders.Orders): Order records.
cash (pandas_like): Cash held at each time step.
Must have the same metadata as `main_price`.
shares (pandas_like): Shares held at each time step.
Must have the same metadata as `main_price`.
freq (any): Index frequency in case `main_price.index` is not datetime-like.
year_freq (any): Year frequency for working with returns.
levy_alpha (float or array_like): Scaling relation (Levy stability exponent).
Single value or value per column.
risk_free (float or array_like): Constant risk-free return throughout the period.
Single value or value per column.
required_return (float or array_like): Minimum acceptance return of the investor.
Single value or value per column.
cutoff (float or array_like): Decimal representing the percentage cutoff for the
bottom percentile of returns.
Single value or value per column.
factor_returns (array_like): Benchmark return to compare returns against. Will broadcast.
By default it's `None`, but it's required by some return-based metrics.
incl_unrealized_stats (bool): Whether to include unrealized metrics in `Portfolio.stats`.
!!! note
Use class methods with `from_` prefix to build a portfolio.
The `__init__` method is reserved for indexing purposes.
All array objects must have the same metadata as `main_price`."""
def __init__(self, main_price, init_capital, orders, cash, shares, freq=None,
year_freq=None, levy_alpha=None, risk_free=None, required_return=None,
cutoff=None, factor_returns=None, incl_unrealized_stats=False):
# Perform checks
checks.assert_type(main_price, (pd.Series, pd.DataFrame))
if checks.is_frame(main_price):
checks.assert_type(init_capital, pd.Series)
checks.assert_same(main_price.columns, init_capital.index)
else:
checks.assert_ndim(init_capital, 0)
checks.assert_same_meta(main_price, cash)
checks.assert_same_meta(main_price, shares)
# Store passed arguments
self._main_price = main_price
self._init_capital = init_capital
self._orders = orders
self._cash = cash
self._shares = shares
self._incl_unrealized_stats = incl_unrealized_stats
freq = main_price.vbt(freq=freq).freq
if freq is None:
raise ValueError("Couldn't parse the frequency of index. You must set `freq`.")
self._freq = freq
year_freq = main_price.vbt.returns(year_freq=year_freq).year_freq
if freq is None:
raise ValueError("You must set `year_freq`.")
self._year_freq = year_freq
# Parameters
self._levy_alpha = defaults.portfolio['levy_alpha'] if levy_alpha is None else levy_alpha
self._risk_free = defaults.portfolio['risk_free'] if risk_free is None else risk_free
self._required_return = defaults.portfolio['required_return'] if required_return is None else required_return
self._cutoff = defaults.portfolio['cutoff'] if cutoff is None else cutoff
self._factor_returns = defaults.portfolio['factor_returns'] if factor_returns is None else factor_returns
# Supercharge
PandasIndexer.__init__(self, _indexing_func)
self.wrapper = ArrayWrapper.from_obj(main_price, freq=freq)
# ############# Class methods ############# #
@classmethod
def from_signals(cls, main_price, entries, exits, size=np.inf, size_type=SizeType.Shares,
entry_price=None, exit_price=None, init_capital=None, fees=None, fixed_fees=None,
slippage=None, accumulate=None, accumulate_exit_mode=None, conflict_mode=None,
broadcast_kwargs={}, freq=None, **kwargs):
"""Build portfolio from entry and exit signals.
For each signal in `entries`, buys `size` of shares for `entry_price` to enter
the position. For each signal in `exits`, sells everything for `exit_price`
to exit the position. Accumulation of orders is disabled by default.
For more details, see `vectorbt.portfolio.nb.simulate_from_signals_nb`.
Args:
main_price (pandas_like): Main price of the asset, such as close. Will broadcast.
entries (array_like): Boolean array of entry signals. Will broadcast.
exits (array_like): Boolean array of exit signals. Will broadcast.
size (float or array_like): The amount of shares to order. Will broadcast.
To buy/sell everything, set the size to `np.inf`.
size_type (int or array_like): See `vectorbt.portfolio.enums.SizeType`.
Only `SizeType.Shares` and `SizeType.Cash` are supported.
entry_price (array_like): Entry price. Defaults to `main_price`. Will broadcast.
exit_price (array_like): Exit price. Defaults to `main_price`. Will broadcast.
init_capital (float or array_like): The initial capital. Will broadcast.
Allowed is either a single value or value per column.
fees (float or array_like): Fees in percentage of the order value. Will broadcast.
fixed_fees (float or array_like): Fixed amount of fees to pay per order. Will broadcast.
slippage (float or array_like): Slippage in percentage of price. Will broadcast.
accumulate (bool): If `accumulate` is `True`, entering the market when already
in the market will be allowed to increase a position.
accumulate_exit_mode: See `vectorbt.portfolio.enums.AccumulateExitMode`.
conflict_mode: See `vectorbt.portfolio.enums.ConflictMode`.
broadcast_kwargs: Keyword arguments passed to `vectorbt.base.reshape_fns.broadcast`.
freq (any): Index frequency in case `main_price.index` is not datetime-like.
**kwargs: Keyword arguments passed to the `__init__` method.
For defaults, see `vectorbt.defaults.portfolio`.
All time series will be broadcasted together using `vectorbt.base.reshape_fns.broadcast`.
At the end, they will have the same metadata.
Example:
Portfolio from various signal sequences:
```python-repl
>>> entries = pd.DataFrame({
... 'a': [True, False, False, False, False],
... 'b': [True, False, True, False, True],
... 'c': [True, True, True, True, True]
... }, index=index)
>>> exits = pd.DataFrame({
... 'a': [False, False, False, False, False],
... 'b': [False, True, False, True, False],
... 'c': [True, True, True, True, True]
... }, index=index)
>>> portfolio = vbt.Portfolio.from_signals(
... price, | |
<reponame>snapwire-media/arion<filename>tests/functional/test.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import unittest
import json
from subprocess import Popen, PIPE
class TestArion(unittest.TestCase):
ARION_PATH = '../../build/arion'
# Images for general purpose testing (leave off file:// for testing)
IMAGE_1_PATH = '../../examples/images/image-1.jpg'
IMAGE_2_PATH = '../../examples/images/image-2.jpg'
IMAGE_3_PATH = '../../examples/images/image-3.jpg'
# Images for JPG orientation tests (include file:// for testing)
# Images from https://github.com/recurser/exif-orientation-examples
# Copyright (c) 2010 <NAME>.
LANDSCAPE_1_PATH = 'file://../images/Landscape_1.jpg'
LANDSCAPE_2_PATH = 'file://../images/Landscape_2.jpg'
LANDSCAPE_3_PATH = 'file://../images/Landscape_3.jpg'
LANDSCAPE_4_PATH = 'file://../images/Landscape_4.jpg'
LANDSCAPE_5_PATH = 'file://../images/Landscape_5.jpg'
LANDSCAPE_6_PATH = 'file://../images/Landscape_6.jpg'
LANDSCAPE_7_PATH = 'file://../images/Landscape_7.jpg'
LANDSCAPE_8_PATH = 'file://../images/Landscape_8.jpg'
OUTPUT_IMAGE_PATH = 'output/'
# -------------------------------------------------------------------------------
# Helper function for calling Arion
# -------------------------------------------------------------------------------
def call_arion(self, input_url, operations, *additional_root_params):
input_dict = {'input_url': input_url,
'correct_rotation': True,
'operations': operations}
if (additional_root_params):
input_dict = self.merge_two_dicts(input_dict, additional_root_params[0])
input_string = json.dumps(input_dict, separators=(',', ':'))
p = Popen([self.ARION_PATH, "--input", input_string], stdout=PIPE)
cmd_output = p.communicate()
output = json.loads(cmd_output[0])
# DEBUG
# print cmd_output[0]
return output
# -------------------------------------------------------------------------------
# Helper function for reading data back about an image
# -------------------------------------------------------------------------------
def read_image(self, input_url):
operation = {
'type': 'read_meta',
'params': {
'info': True
}
}
return self.call_arion(input_url, [operation])
# -------------------------------------------------------------------------------
# Helper function for copying an image
# -------------------------------------------------------------------------------
def copy_image(self, input_url, output_url):
operation = {
'type': 'copy',
'params': {
'output_url': output_url
}
}
return self.call_arion(input_url, [operation])
# -------------------------------------------------------------------------------
# Helper function for checking for successful output
# -------------------------------------------------------------------------------
def verifySuccess(self, output, expected_width=-1, expected_height=-1):
self.assertTrue(output['result'])
self.assertEqual(output['failed_operations'], 0)
self.assertEqual(output['total_operations'], 1)
if expected_width >= 0:
self.assertEqual(output['width'], expected_width)
if expected_height >= 0:
self.assertEqual(output['height'], expected_height)
# -------------------------------------------------------------------------------
# Helper function for checking for failed output
# -------------------------------------------------------------------------------
def verifyFailure(self, output):
self.assertFalse(output['result'])
self.assertEqual(output['failed_operations'], 1)
self.assertEqual(output['total_operations'], 1)
# -------------------------------------------------------------------------------
# Helper function for creating output url
# -------------------------------------------------------------------------------
def outputUrlHelper(self, filename):
return self.OUTPUT_IMAGE_PATH + filename
# -------------------------------------------------------------------------------
# Helper function for testing fill operation
# -------------------------------------------------------------------------------
def imageResizeHelper(self, srcPath, outputPrefix, options):
outputFilename = outputPrefix + \
str(options['width']) + 'x' + str(options['height']) + \
'_' + str(options['type']) + '.jpg'
outputUrl = self.outputUrlHelper(outputFilename)
resize_operation = {
'type': 'resize',
'params':
{
'width': options['width'],
'height': options['height'],
'type': options['type'],
'gravity': options['gravity'],
'output_url': outputUrl
}
}
operations = [resize_operation];
output = self.call_arion(srcPath, operations)
self.verifySuccess(output);
# -----------------------------
# Now read back image data
# -----------------------------
output = self.read_image(outputUrl)
self.verifySuccess(output, options['width'], options['height']);
# -------------------------------------------------------------------------------
# -------------------------------------------------------------------------------
def testImageFormats(self):
# -----------------------------------------
# JPG
# -----------------------------------------
input_url = '../images/small_input.jpg'
watermark_url = '../images/watermark.png'
output_url = self.outputUrlHelper('test_format_jpg.jpg')
resize_operation = {
'type': 'resize',
'params':
{
'width': 100,
'height': 400,
'type': 'width',
'quality': 92,
'watermark_url': watermark_url,
'watermark_type': 'adaptive',
'watermark_min': 0.3,
'watermark_max': 1.0,
'output_url': output_url
}
}
operations = [resize_operation];
output = self.call_arion(input_url, operations)
self.verifySuccess(output);
# -----------------------------------------
# PNG
# -----------------------------------------
input_url = '../images/small_input.png'
watermark_url = '../images/watermark.png'
output_url = self.outputUrlHelper('test_format_png.jpg')
resize_operation = {
'type': 'resize',
'params':
{
'width': 100,
'height': 400,
'type': 'width',
'quality': 92,
'watermark_url': watermark_url,
'watermark_type': 'adaptive',
'watermark_min': 0.3,
'watermark_max': 1.0,
'output_url': output_url
}
}
operations = [resize_operation];
output = self.call_arion(input_url, operations)
self.verifySuccess(output);
# -----------------------------------------
# TIFF
# -----------------------------------------
input_url = '../images/small_input.tif'
watermark_url = '../images/watermark.png'
output_url = self.outputUrlHelper('test_format_tif.jpg')
resize_operation = {
'type': 'resize',
'params':
{
'width': 100,
'height': 400,
'type': 'width',
'quality': 92,
'watermark_url': watermark_url,
'watermark_type': 'adaptive',
'watermark_min': 0.3,
'watermark_max': 1.0,
'output_url': output_url
}
}
operations = [resize_operation];
output = self.call_arion(input_url, operations)
self.verifySuccess(output);
# -------------------------------------------------------------------------------
# -------------------------------------------------------------------------------
def testWatermark(self):
# -----------------------------------------
# Standard 1:1
# -----------------------------------------
input_url = '../images/watermark_test_input.jpg'
watermark_url = '../images/watermark.png'
output_url = self.outputUrlHelper('test_watermark_1_standard.jpg')
resize_operation = {
'type': 'resize',
'params':
{
'width': 400,
'height': 400,
'type': 'fill',
'quality': 92,
'watermark_url': watermark_url,
'watermark_type': 'standard',
'watermark_amount': 0.1,
'output_url': output_url
}
}
operations = [resize_operation];
output = self.call_arion(input_url, operations)
self.verifySuccess(output);
# -----------------------------------------
# Adaptive 1:1
# -----------------------------------------
input_url = '../images/watermark_test_input.jpg'
watermark_url = '../images/watermark.png'
output_url = self.outputUrlHelper('test_watermark_2_adaptive.jpg')
resize_operation = {
'type': 'resize',
'params':
{
'width': 400,
'height': 400,
'type': 'fill',
'quality': 92,
'watermark_url': watermark_url,
'watermark_type': 'adaptive',
'watermark_min': 0.1,
'watermark_max': 0.5,
'output_url': output_url
}
}
operations = [resize_operation];
output = self.call_arion(input_url, operations)
self.verifySuccess(output);
# -----------------------------------------
# Output size is smaller than watermark
# -----------------------------------------
watermark_url = '../images/watermark2.png'
output_url = self.outputUrlHelper('test_watermark_2_photo.jpg')
resize_operation = {
'type': 'resize',
'params':
{
'width': 200,
'height': 200,
'type': 'fill',
'quality': 92,
'watermark_url': watermark_url,
'watermark_type': 'adaptive',
'watermark_min': 0.1,
'watermark_max': 0.5,
'output_url': output_url
}
}
operations = [resize_operation];
output = self.call_arion(self.IMAGE_1_PATH, operations)
self.verifySuccess(output);
# -----------------------------------------
# Output size is larger than watermark
# -----------------------------------------
watermark_url = '../images/watermark2.png'
output_url = self.outputUrlHelper('test_watermark_3_photo.jpg')
resize_operation = {
'type': 'resize',
'params':
{
'width': 1000,
'height': 1000,
'type': 'fill',
'quality': 92,
'watermark_url': watermark_url,
'watermark_type': 'adaptive',
'watermark_min': 0.1,
'watermark_max': 0.5,
'output_url': output_url
}
}
operations = [resize_operation];
output = self.call_arion(self.IMAGE_1_PATH, operations)
self.verifySuccess(output);
# -----------------------------------------
# Output width is larger than watermark,
# but height is smaller
# -----------------------------------------
output_url = self.outputUrlHelper('test_watermark_4_photo.jpg')
resize_operation = {
'type': 'resize',
'params':
{
'width': 1000,
'height': 200,
'type': 'fill',
'quality': 92,
'watermark_url': watermark_url,
'watermark_type': 'adaptive',
'watermark_min': 0.1,
'watermark_max': 0.5,
'output_url': output_url
}
}
operations = [resize_operation];
output = self.call_arion(self.IMAGE_1_PATH, operations)
self.verifySuccess(output);
# -----------------------------------------
# Output height is larger than watermark,
# but width is smaller
# -----------------------------------------
output_url = self.outputUrlHelper('test_watermark_5_photo.jpg')
resize_operation = {
'type': 'resize',
'params':
{
'width': 200,
'height': 1000,
'type': 'fill',
'quality': 92,
'watermark_url': watermark_url,
'watermark_url': watermark_url,
'watermark_type': 'adaptive',
'watermark_min': 0.1,
'watermark_max': 0.5,
'output_url': output_url
}
}
operations = [resize_operation];
output = self.call_arion(self.IMAGE_1_PATH, operations)
self.verifySuccess(output);
# -------------------------------------------------------------------------------
# Here we have a tall source image and we are always cropping a tall portion at
# the center of the image
# -------------------------------------------------------------------------------
def test100x200TallCenter(self):
srcPath = "file://../images/100x200_tall_center.png"
outputPrefix = "100x200_tall_center_to_"
# Just a crop, take the center
opts = {
'type': 'fill',
'gravity': 'center',
'width': 50,
'height': 200,
}
self.imageResizeHelper(srcPath, outputPrefix, opts)
opts = {
'type': 'fill',
'gravity': 'north',
'width': 25,
'height': 100,
}
self.imageResizeHelper(srcPath, outputPrefix, opts)
opts = {
'type': 'fill',
'gravity': 'south',
'width': 100,
'height': 400,
}
self.imageResizeHelper(srcPath, outputPrefix, opts)
# -------------------------------------------------------------------------------
# Here we have a tall source image and we are always cropping a tall portion at
# the left of the image
# -------------------------------------------------------------------------------
def test100x200TallLeft(self):
srcPath = "file://../images/100x200_tall_left.png"
outputPrefix = "100x200_tall_left_to_"
# Just a crop, take the left
opts = {
'type': 'fill',
'gravity': 'west',
'width': 50,
'height': 200,
}
self.imageResizeHelper(srcPath, outputPrefix, opts)
# Shrink, take the left
opts = {
'type': 'fill',
'gravity': 'northwest',
'width': 25,
'height': 100,
}
self.imageResizeHelper(srcPath, outputPrefix, opts)
# Enlarge, take the left
opts = {
'type': 'fill',
'gravity': 'southwest',
'width': 100,
'height': 400,
}
self.imageResizeHelper(srcPath, outputPrefix, opts)
# -------------------------------------------------------------------------------
# Here we have a tall source image and we are always cropping a tall portion
# at the right of the image
# -------------------------------------------------------------------------------
def test100x200TallRight(self):
srcPath = "file://../images/100x200_tall_right.png"
outputPrefix = "100x200_tall_right_to_"
# Just a crop, take the right
opts = {
'type': 'fill',
'gravity': 'east',
'width': 50,
'height': 200,
}
self.imageResizeHelper(srcPath, outputPrefix, opts)
# Shrink, take the right
opts = {
'type': 'fill',
'gravity': 'northeast',
'width': 25,
'height': 100,
}
self.imageResizeHelper(srcPath, outputPrefix, opts)
# Enlarge, take the right
opts = {
'type': 'fill',
'gravity': 'southeast',
'width': 100,
'height': 400,
}
self.imageResizeHelper(srcPath, outputPrefix, opts)
# -------------------------------------------------------------------------------
# Here we have a tall source image and we are always cropping a wide portion
# at the bottom of the image
# -------------------------------------------------------------------------------
def test100x200WideBottom(self):
srcPath = "file://../images/100x200_wide_bottom.png"
outputPrefix = "100x200_wide_bottom_to_"
# Just a crop, take the bottom
opts = {
'type': 'fill',
'gravity': 'south',
'width': 100,
'height': 50,
}
self.imageResizeHelper(srcPath, outputPrefix, opts)
# Shrink, take the bottom
opts = {
'type': 'fill',
'gravity': 'southeast',
'width': 50,
'height': 25,
}
self.imageResizeHelper(srcPath, outputPrefix, opts)
# Enlarge, take the bottom
opts = {
'type': 'fill',
'gravity': 'southwest',
'width': 200,
'height': 100,
}
self.imageResizeHelper(srcPath, outputPrefix, opts)
# -------------------------------------------------------------------------------
# Here we have a tall source image and we are always cropping a wide portion
# at the bottom of the image
# -------------------------------------------------------------------------------
def test100x200WideCenter(self):
srcPath = "file://../images/100x200_wide_center.png"
outputPrefix = "100x200_wide_center_to_"
# Just a crop, take the bottom
opts = {
'type': 'fill',
'gravity': 'center',
'width': 100,
'height': 50,
}
self.imageResizeHelper(srcPath, outputPrefix, opts)
# Shrink, take the bottom
opts = {
'type': 'fill',
'gravity': 'east',
'width': 50,
'height': 25,
}
self.imageResizeHelper(srcPath, outputPrefix, opts)
# Enlarge, take | |
<filename>tensorflow/python/autograph/pyct/cfg.py<gh_stars>1-10
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Control flow graph (CFG) structure for Python AST representation.
The CFG is a digraph with edges representing valid control flow. Each
node is associated with exactly one AST node, but not all AST nodes may have
a corresponding CFG counterpart.
Once built, the CFG itself is immutable, but the values it holds need not be;
they are usually annotated with information extracted by walking the graph.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import weakref
from enum import Enum
# pylint:disable=g-bad-import-order
import gast
# pylint:enable=g-bad-import-order
from tensorflow.python.autograph.pyct import compiler
class Node(object):
"""A node in the CFG.
Although new instances of this class are mutable, the objects that a user
finds in the CFG are typically not.
The nodes represent edges in the CFG graph, and maintain pointers to allow
efficient walking in both forward and reverse order. The following property
holds for all nodes: "child in node.next" iff "node in child.prev".
Attributes:
next: FrozenSet[Node, ...], the nodes that follow this node, in control
flow order
prev: FrozenSet[Node, ...], the nodes that precede this node, in reverse
control flow order
ast_node: ast.AST, the AST node corresponding to this CFG node
"""
def __init__(self, next_, prev, ast_node):
self.next = next_
self.prev = prev
self.ast_node = ast_node
def freeze(self):
self.next = frozenset(self.next)
# Assumption: All CFG nodes have identical life spans, because the graph
# owns them. Nodes should never be used outside the context of an existing
# graph.
self.prev = weakref.WeakSet(self.prev)
def __repr__(self):
if isinstance(self.ast_node, gast.FunctionDef):
return 'def %s' % self.ast_node.name
elif isinstance(self.ast_node, gast.withitem):
return compiler.ast_to_source(self.ast_node.context_expr).strip()
return compiler.ast_to_source(self.ast_node).strip()
class Graph(
collections.namedtuple(
'Graph',
['entry', 'exit', 'error', 'index', 'stmt_prev', 'stmt_next'])):
"""A Control Flow Graph.
The CFG maintains an index to allow looking up a CFG node by the AST node to
which it is associated. The index can also be enumerated in top-down, depth
first order.
Walking the graph in forward or reverse order is supported by double
parent-child links.
Note: the error nodes are not wired to their corresponding finally guards,
because these are shared, and wiring them would create a reverse path from
normal control flow into the error nodes, which we want to avoid.
The graph also maintains edges corresponding to higher level statements
like for-else loops. A node is considered successor of a statement if there
is an edge from a node that is lexically a child of that statement to a node
that is not. Statement predecessors are analogously defined.
Attributes:
entry: Node, the entry node
exit: FrozenSet[Node, ...], the exit nodes
error: FrozenSet[Node, ...], nodes that exit due to an explicitly raised
error (errors propagated from function calls are not accounted)
index: Dict[ast.Node, Node], mapping AST nodes to the respective CFG
node
stmt_prev: Dict[ast.Node, FrozenSet[Node, ...]], mapping statement AST
nodes to their predecessor CFG nodes
stmt_next: Dict[ast.Node, FrozenSet[Node, ...]], mapping statement AST
nodes to their successor CFG nodes
"""
def __repr__(self):
result = 'digraph CFG {\n'
for node in self.index.values():
result += ' %s [label="%s"];\n' % (id(node), node)
for node in self.index.values():
for next_ in node.next:
result += ' %s -> %s;\n' % (id(node), id(next_))
result += '}'
return result
class _WalkMode(Enum):
FORWARD = 1
REVERSE = 2
# TODO(mdan): Rename to DataFlowAnalyzer.
# TODO(mdan): Consider specializations that use gen/kill/transfer abstractions.
class GraphVisitor(object):
"""Base class for a CFG visitors.
This implementation is not thread safe.
The visitor has some facilities to simplify dataflow analyses. In particular,
it allows revisiting the nodes at the decision of the subclass. This can be
used to visit the graph until the state reaches a fixed point.
For more details on dataflow analysis, see
https://www.seas.harvard.edu/courses/cs252/2011sp/slides/Lec02-Dataflow.pdf
Note: the literature generally suggests visiting successor nodes only when the
state of the current node changed, regardless of whether that successor has
ever been visited. This implementation visits every successor at least once.
Attributes:
graph: Graph
in_: Dict[Node, Any], stores node-keyed state during a visit
out: Dict[Node, Any], stores node-keyed state during a visit
"""
def __init__(self, graph):
self.graph = graph
self.reset()
def init_state(self, node):
"""State initialization function. Optional to overload.
An in/out state slot will be created for each node in the graph. Subclasses
must overload this to control what that is initialized to.
Args:
node: Node
"""
raise NotImplementedError('Subclasses must implement this.')
# TODO(mdan): Rename to flow?
def visit_node(self, node):
"""Visitor function.
Args:
node: Node
Returns:
bool, whether the node should be revisited; subclasses can visit every
reachable node exactly once by always returning False
"""
raise NotImplementedError('Subclasses must implement this.')
def reset(self):
self.in_ = {
node: self.init_state(node) for node in self.graph.index.values()
}
self.out = {
node: self.init_state(node) for node in self.graph.index.values()
}
def _visit_internal(self, mode):
"""Visits the CFG, depth-first."""
assert mode in (_WalkMode.FORWARD, _WalkMode.REVERSE)
if mode == _WalkMode.FORWARD:
open_ = [self.graph.entry]
elif mode == _WalkMode.REVERSE:
open_ = list(self.graph.exit)
closed = set()
while open_:
node = open_.pop(0)
closed.add(node)
should_revisit = self.visit_node(node)
if mode == _WalkMode.FORWARD:
children = node.next
elif mode == _WalkMode.REVERSE:
children = node.prev
for next_ in children:
if should_revisit or next_ not in closed:
open_.append(next_)
def visit_forward(self):
self._visit_internal(_WalkMode.FORWARD)
def visit_reverse(self):
self._visit_internal(_WalkMode.REVERSE)
class GraphBuilder(object):
"""Builder that constructs a CFG from a given AST.
This GraphBuilder facilitates constructing the DAG that forms the CFG when
nodes
are supplied in lexical order (i.e., top-down, depth first). Under these
conditions, it supports building patterns found in typical structured
programs.
This builder ignores the flow generated by exceptions, which are assumed to
always be catastrophic and present purely for diagnostic purposes (e.g. to
print debug information). Statements like raise and try/catch sections are
allowed and will generate control flow edges, but ordinaty statements are
assumed not to raise exceptions.
Finally sections are also correctly interleaved between break/continue/return
nodes and their subsequent statements.
Important concepts:
* nodes - nodes refer refer to CFG nodes; AST nodes are qualified explicitly
* leaf set - since the graph is constructed gradually, a leaf set maintains
the CFG nodes that will precede the node that the builder expects to
receive next; when an ordinary node is added, it is connected to the
existing leaves and it in turn becomes the new leaf
* jump nodes - nodes that should generate edges other than what
ordinary nodes would; these correspond to break, continue and return
statements
* sections - logical delimiters for subgraphs that require special
edges; there are various types of nodes, each admitting various
types of jump nodes; sections are identified by their corresponding AST
node
"""
# TODO(mdan): Perhaps detail this in a markdown doc.
# TODO(mdan): Add exception support.
def __init__(self, parent_ast_node):
self.reset()
self.parent = parent_ast_node
def reset(self):
"""Resets the state of this factory."""
self.head = None
self.errors = set()
self.node_index = {}
# TODO(mdan): Too many primitives. Use classes.
self.leaves = set()
# Note: This mechanism requires that nodes are added in lexical order (top
# to bottom, depth first).
self.active_stmts = set()
self.owners = {} # type: Set[any]
self.forward_edges = set() # type: Tuple[Node, Node] # (from, to)
self.finally_sections = {}
# Dict values represent (entry, exits)
self.finally_section_subgraphs = {
} # type: Dict[ast.AST, Tuple[Node, Set[Node]]]
# Whether the guard section can be reached from the statement that precedes
# it.
self.finally_section_has_direct_flow = {}
# Finally sections that await their first node.
self.pending_finally_sections = set()
# Exit jumps keyed by the section they affect.
self.exits = {}
# The entry of loop sections, keyed | |
#!/usr/bin/python
#
# jvm-blocking-monitor.py Monitor JVM threads and prints stacktraces for long blocking threads.
# For Linux, uses BCC, eBPF.
#
# USAGE: jvm-blocking-monitor.py [-h] [-p PID | -u | -k] [-U | -K]
#
# Copyright 2021 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License")
#
#
# This program includes code taken from offcputime of bcc: https://github.com/iovisor/bcc which is licensed as follows.
#
# Copyright 2016 Netflix, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 13-Jan-2016 <NAME> Created this.
from __future__ import print_function
from bcc import BPF
from sys import stderr, stdout
from time import sleep, strftime, time, localtime
import argparse
import errno
import signal
import os
import stat
import json
from collections import namedtuple
import tempfile
from subprocess import Popen
import logging
import logging.handlers
# arg validation
def positive_int(val):
try:
ival = int(val)
except ValueError:
raise argparse.ArgumentTypeError("must be an integer")
if ival < 0:
raise argparse.ArgumentTypeError("must be positive")
return ival
def positive_nonzero_int(val):
ival = positive_int(val)
if ival == 0:
raise argparse.ArgumentTypeError("must be nonzero")
return ival
def stack_id_err(stack_id):
# -EFAULT in get_stackid normally means the stack-trace is not available,
# Such as getting kernel stack trace in userspace code
return (stack_id < 0) and (stack_id != -errno.EFAULT)
# arguments
examples = """examples:
./jvm-blocking-monitor.py -p JVM_PID -m 1000000 # monitor threads that blocks more than 10 seconds
"""
parser = argparse.ArgumentParser(
description="Monitor JVM threads and prints stacktraces for long blocking threads",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
# Note: this script provides --pid and --tid flags but their arguments are
# referred to internally using kernel nomenclature: TGID and PID.
parser.add_argument("-p", "--pid", metavar="PID", dest="tgid",
help="trace this PID only", type=positive_int, required=True)
stack_group = parser.add_mutually_exclusive_group()
stack_group.add_argument("-U", "--user-stacks-only", action="store_true",
help="show stacks from user space only (no kernel space stacks)")
stack_group.add_argument("-K", "--kernel-stacks-only", action="store_true",
help="show stacks from kernel space only (no user space stacks)")
parser.add_argument("--stack-storage-size", default=1024,
type=positive_nonzero_int,
help="the number of unique stack traces that can be stored and "
"displayed (default 1024)")
parser.add_argument("-m", "--min-block-time", default=1,
type=positive_nonzero_int,
help="the amount of time in microseconds over which we " +
"store traces (default 1)")
parser.add_argument("-M", "--max-block-time", default=(1 << 64) - 1,
type=positive_nonzero_int,
help="the amount of time in microseconds under which we " +
"store traces (default U64_MAX)")
parser.add_argument("--state", type=positive_int,
help="filter on this thread state bitmask (eg, 2 == TASK_UNINTERRUPTIBLE" +
") see include/linux/sched.h")
parser.add_argument("--kernel3x", action="store_true",
help="3.x kernel mode. Signal for JVMTI agent will be sent from user process + some kprobe function signature adjust")
parser.add_argument("-o", "--output", action="store", default="-",
help="Base path of the file to output events")
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
args = parser.parse_args()
debug = 0
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <linux/sched.h>
#define MINBLOCK_US MINBLOCK_US_VALUEULL
#define MAXBLOCK_US MAXBLOCK_US_VALUEULL
struct event_t {
u32 pid;
u32 tgid;
int user_stack_id;
int kernel_stack_id;
char name[TASK_COMM_LEN];
u64 offtime;
u64 t_start;
u64 t_end;
};
BPF_HASH(start, u32);
BPF_STACK_TRACE(stack_traces, STACK_STORAGE_SIZE);
BPF_PERF_OUTPUT(events);
FN_ONCPU {
u32 pid = prev->pid;
u32 tgid = prev->tgid;
u64 ts, *tsp, t_start;
// record previous thread sleep time
if ((THREAD_FILTER) && (STATE_FILTER)) {
ts = bpf_ktime_get_ns();
start.update(&pid, &ts);
}
// get the current thread's start time
pid = bpf_get_current_pid_tgid();
tgid = bpf_get_current_pid_tgid() >> 32;
tsp = start.lookup(&pid);
if (!tsp) {
return 0; // missed start or filtered
}
t_start = *tsp;
// calculate current thread's delta time
u64 t_end = bpf_ktime_get_ns();
start.delete(&pid);
if (!(THREAD_FILTER)) {
// There's a possibility such a task id that previously belonged to tgid = 1234
// is now re-used and is a task id of a different process.
return 0;
}
if (t_start > t_end) {
return 0;
}
u64 delta = t_end - t_start;
delta = delta / 1000;
if ((delta < MINBLOCK_US) || (delta > MAXBLOCK_US)) {
return 0;
}
// create and submit an event
struct event_t event = {};
event.pid = pid;
event.tgid = tgid;
event.user_stack_id = USER_STACK_GET;
event.kernel_stack_id = KERNEL_STACK_GET;
bpf_get_current_comm(&event.name, sizeof(event.name));
event.offtime = delta;
event.t_start = t_start;
event.t_end = t_end;
events.perf_submit(ctx, &event, sizeof(event));
// Signal target thread for taking call trace
SEND_SIGNAL_TO_TASK;
return 0;
}
"""
# set thread filter
thread_context = "PID %d" % args.tgid
thread_filter = 'tgid == %d' % args.tgid
if args.state == 0:
state_filter = 'prev->state == 0'
elif args.state:
# these states are sometimes bitmask checked
state_filter = 'prev->state & %d' % args.state
else:
state_filter = '1'
bpf_text = bpf_text.replace('THREAD_FILTER', thread_filter)
bpf_text = bpf_text.replace('STATE_FILTER', state_filter)
# set stack storage size
bpf_text = bpf_text.replace('STACK_STORAGE_SIZE', str(args.stack_storage_size))
bpf_text = bpf_text.replace('MINBLOCK_US_VALUE', str(args.min_block_time))
bpf_text = bpf_text.replace('MAXBLOCK_US_VALUE', str(args.max_block_time))
bpf_text = bpf_text.replace('FN_ONCPU',
"struct rq;\nint oncpu(struct pt_regs *ctx, struct rq *rq, struct task_struct *prev)"
if args.kernel3x else
"int oncpu(struct pt_regs *ctx, struct task_struct *prev)");
bpf_text = bpf_text.replace('SEND_SIGNAL_TO_TASK',
"" if args.kernel3x else "bpf_send_signal_thread(27)");
# handle stack args
kernel_stack_get = "stack_traces.get_stackid(ctx, 0)"
user_stack_get = "stack_traces.get_stackid(ctx, BPF_F_USER_STACK)"
stack_context = ""
if args.user_stacks_only:
stack_context = "user"
kernel_stack_get = "-1"
elif args.kernel_stacks_only:
stack_context = "kernel"
user_stack_get = "-1"
else:
stack_context = "user + kernel"
bpf_text = bpf_text.replace('USER_STACK_GET', user_stack_get)
bpf_text = bpf_text.replace('KERNEL_STACK_GET', kernel_stack_get)
if debug or args.ebpf:
print(bpf_text)
if args.ebpf:
exit()
logger = logging.getLogger("EventsOutput")
logger.setLevel(logging.INFO)
if args.output == "-":
handler = logging.StreamHandler(stream=stdout)
else:
handler = logging.handlers.TimedRotatingFileHandler(args.output, when='d', interval=1, backupCount=14)
logger.addHandler(handler)
# initialize BPF
b = BPF(text=bpf_text)
b.attach_kprobe(event="finish_task_switch", fn_name="oncpu")
matched = b.num_open_kprobes()
if matched == 0:
print("error: 0 functions traced. Exiting.", file=stderr)
exit(1)
print("Tracing off-CPU time (us) of %s by %s stack" % (thread_context, stack_context))
def pid_alive(pid):
try:
os.kill(pid, 0)
return True
except OSError:
return False
class AsyncProfiler(object):
def __init__(self, profiler_cmd_path, pid):
self.profiler_cmd_path = profiler_cmd_path
self.pid = pid
self.tmpfile = None
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def output_path(self):
if self.tmpfile:
return self.tmpfile.name
return None
def gen_ap_cmd(self, subcommand):
return [self.profiler_cmd_path, "-e", "none", "-o", "stream", "-f", self.output_path(), subcommand, str(self.pid)]
def exec_profiler_cmd(self, subcommand):
cmd = self.gen_ap_cmd(subcommand)
status = Popen(cmd, stdout=open(os.devnull, 'w')).wait()
if status != 0:
raise ValueError("profiler.sh exit with error: {}".format(status))
def start(self):
self.tmpfile = tempfile.NamedTemporaryFile("rw", prefix="jbm-ap-")
os.chmod(self.tmpfile.name, stat.S_IRUSR|stat.S_IWUSR|stat.S_IRGRP|stat.S_IWGRP|stat.S_IROTH|stat.S_IWOTH)
self.exec_profiler_cmd("start")
def stop(self):
if pid_alive(self.pid): # If the target PID has already gone, no need to stop
self.exec_profiler_cmd("stop")
self.tmpfile.close()
class AsyncProfileStream(object):
def __init__(self, path):
self.fp = open(path, "r")
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def next(self):
line = self.fp.readline()
if not line:
return None
return json.loads(line)
def close(self):
self.fp.close()
EVENT_MATCH_TIME_THRESHOLD_MS = 5000
EVENT_MATCH_GIVEUP_MS = 30000
StackFrame = namedtuple('StackFrame', ['address', 'symbol'])
BpfEvent = namedtuple('BpfEvent', ['timestamp', 'pid', 'tid', 'comm', 'duration_us', 'frames'])
def format_time(t):
return "{}.{}".format(strftime("%Y-%m-%d %H:%M:%S", localtime(t / 1000)), t % 1000)
def trash_ap_event(ap_event):
# TODO: should go to a separate file?
print("{} DISCARDED AP EVENT TID: {}".format(format_time(ap_event['timestamp']), ap_event['tid']), file=stderr)
for (i, frame) in enumerate(ap_event['frames']):
print(" {}: [0x{:x}] {}".format(i, frame['methodId'], frame['symbol']), file=stderr)
def report_event(bpf_event, ap_event):
out = "=== {} PID: {}, TID: {} ({}), DURATION: {} us\n".format(format_time(bpf_event.timestamp), bpf_event.pid, bpf_event.tid, bpf_event.comm, bpf_event.duration_us)
out += "Native Stack:\n"
for (i, frame) in enumerate(bpf_event.frames):
out += " {}: [0x{:x}] {}\n".format(i, frame.address, frame.symbol)
if ap_event:
out += "--------------------------------------------------------------------------------\n"
out += "JVM Stack (took: {}):\n".format(format_time(ap_event['timestamp']))
for (i, frame) in enumerate(ap_event['frames']):
if i > 0:
out += "\n"
out += " {}: [0x{:x}] {}".format(i, frame['methodId'], frame['symbol'])
logger.info(out)
class EventQueues(object):
def __init__(self):
self.bpf_queues = {}
self.ap_queues = {}
@staticmethod
def get_or_init_queue(queues, key):
if key not in queues:
queues[key] = []
return queues[key]
def fill_ap_queue(self, ap_stream):
while True:
event = ap_stream.next()
if not event:
break
EventQueues.get_or_init_queue(self.ap_queues, event['tid']).append(event)
def add_bpf_event(self, event):
EventQueues.get_or_init_queue(self.bpf_queues, event.tid).append(event)
def sweep(self):
now = int(time() * 1000)
for tid in self.bpf_queues.keys():
bpf_queue = self.bpf_queues[tid]
ap_queue = self.ap_queues.get(tid)
while bpf_queue:
bpf_event = bpf_queue[0]
reported = False
while ap_queue:
ap_event = ap_queue.pop(0)
if not ap_queue:
# Remove ap queue from per-TID list if it is now empty
del self.ap_queues[tid]
ts_diff = ap_event['timestamp'] - bpf_event.timestamp
if ts_diff < 0:
# There should be no corresponding event for this, trash it.
trash_ap_event(ap_event)
elif ts_diff < EVENT_MATCH_TIME_THRESHOLD_MS:
report_event(bpf_event, ap_event)
bpf_queue.pop(0)
reported = True
break
if reported:
continue
if now - bpf_event.timestamp >= EVENT_MATCH_GIVEUP_MS:
# No corresponding event found within the timeout, print the event only with
# stacktraces from eBPF
bpf_queue.pop(0)
report_event(bpf_event, None)
else:
# An event from eBPF has no corresponding event yet, and hasn't waited enough.
# Subsequent events must have higher timestamp, so they will fall into the same
# situation too.
break
if not bpf_queue:
# Remove bpf queue from per-TID list if it is now empty
del self.bpf_queues[tid]
stack_traces = b.get_table("stack_traces")
event_queues = EventQueues()
STACK_STORAGE_SIZE_CHECK_COUNT = 100
event_count = 0
def print_event(cpu, data, size):
timestamp = int(time() * 1000)
event = b["events"].event(data)
# Signal target thread for taking call trace
if args.kernel3x:
# In kernel before 5.x, bpf_send_signal_thread() isn't supported.
# Send signal from this process instead.
try:
os.kill(event.pid, signal.SIGPROF)
except OSError as e:
print("Failed to signal TID {}: {}".format(event.pid, e), file=stderr)
# user | |
<filename>src/plottoolbox/functions/kde.py
# -*- coding: utf-8 -*-
"""Collection of functions for the manipulation of time series."""
from __future__ import absolute_import, division, print_function
import itertools
import os
import warnings
import mando
import numpy as np
import pandas as pd
from mando.rst_text_formatter import RSTHelpFormatter
from tstoolbox import tsutils
from .. import plotutils
warnings.filterwarnings("ignore")
@mando.command("kde", formatter_class=RSTHelpFormatter, doctype="numpy")
@tsutils.doc(plotutils.ldocstrings)
def kde_cli(
input_ts="-",
columns=None,
start_date=None,
end_date=None,
clean=False,
skiprows=None,
index_type="datetime",
names=None,
ofilename="plot.png",
xtitle="",
ytitle="",
title="",
figsize="10,6.0",
legend=None,
legend_names=None,
subplots=False,
sharex=True,
sharey=False,
colors="auto",
linestyles="auto",
markerstyles=" ",
bar_hatchstyles="auto",
style="auto",
logx=False,
logy=False,
xaxis="arithmetic",
yaxis="arithmetic",
xlim=None,
ylim=None,
secondary_y=False,
mark_right=True,
scatter_matrix_diagonal="kde",
bootstrap_size=50,
bootstrap_samples=500,
norm_xaxis=False,
norm_yaxis=False,
lognorm_xaxis=False,
lognorm_yaxis=False,
xy_match_line="",
grid=False,
label_rotation=None,
label_skip=1,
force_freq=None,
drawstyle="default",
por=False,
invert_xaxis=False,
invert_yaxis=False,
round_index=None,
plotting_position="weibull",
prob_plot_sort_values="descending",
source_units=None,
target_units=None,
lag_plot_lag=1,
plot_styles="bright",
hlines_y=None,
hlines_xmin=None,
hlines_xmax=None,
hlines_colors=None,
hlines_linestyles="-",
vlines_x=None,
vlines_ymin=None,
vlines_ymax=None,
vlines_colors=None,
vlines_linestyles="-",
):
r"""Kernel density estimation of probability density function.
"kde" will create a plot of estimation of the probability density function
based on the data called kernel density estimation (KDE).
{ydata}
Parameters
----------
{input_ts}
ofilename : str
[optional, defaults to 'plot.png']
Output filename for the plot. Extension defines
the type, for example 'filename.png' will create a PNG file.
If used within Python, and `ofilename` is None will return the
Matplotlib figure that can then be changed or added to as
needed.
lag_plot_lag
[optional, default to 1]
The lag used if ``type`` "lag_plot" is chosen.
xtitle : str
[optional, default depends on ``type``]
Title of x-axis.
ytitle : str
[optional, default depends on ``type``]
Title of y-axis.
title : str
[optional, defaults to '']
Title of chart.
figsize : str
[optional, defaults to '10,6.5']
The 'width,height' of plot in inches.
legend
[optional, defaults to True]
Whether to display the legend.
legend_names : str
[optional, defaults to None]
Legend would normally use the time-series names associated with
the input data. The 'legend_names' option allows you to
override the names in the data set. You must supply a comma
separated list of strings for each time-series in the data set.
subplots
[optional, defaults to False]
Make separate subplots for each time series.
sharex
[optional, default to True]
In case subplots=True, share x axis.
sharey
[optional, default to False]
In case subplots=True, share y axis.
colors
[optional, default is 'auto']
The default 'auto' will cycle through matplotlib colors in the chosen
style.
At the command line supply a comma separated matplotlib
color codes, or within Python a list of color code strings.
Can identify colors in four different ways.
1. Use 'CN' where N is a number from 0 to 9 that gets the Nth color
from the current style.
2. Single character code from the table below.
+------+---------+
| Code | Color |
+======+=========+
| b | blue |
+------+---------+
| g | green |
+------+---------+
| r | red |
+------+---------+
| c | cyan |
+------+---------+
| m | magenta |
+------+---------+
| y | yellow |
+------+---------+
| k | black |
+------+---------+
3. Number between 0 and 1 that represents the level of gray, where 0 is
white an 1 is black.
4. Any of the HTML color names.
+------------------+
| HTML Color Names |
+==================+
| red |
+------------------+
| burlywood |
+------------------+
| chartreuse |
+------------------+
| ...etc. |
+------------------+
Color reference:
http://matplotlib.org/api/colors_api.html
linestyles
[optional, default to 'auto']
If 'auto' will iterate through the available matplotlib line types.
Otherwise on the command line a comma separated list, or a list of
strings if using the Python API.
To not display lines use a space (' ') as the linestyle code.
Separated 'colors', 'linestyles', and 'markerstyles' instead of using
the 'style' keyword.
+---------+--------------+
| Code | Lines |
+=========+==============+
| ``-`` | solid |
+---------+--------------+
| -- | dashed |
+---------+--------------+
| -. | dash_dot |
+---------+--------------+
| : | dotted |
+---------+--------------+
| None | draw nothing |
+---------+--------------+
| ' ' | draw nothing |
+---------+--------------+
| '' | draw nothing |
+---------+--------------+
Line reference:
http://matplotlib.org/api/artist_api.html
markerstyles
[optional, default to ' ']
The default ' ' will not plot a marker. If 'auto' will iterate through
the available matplotlib marker types. Otherwise on the command line
a comma separated list, or a list of strings if using the Python API.
Separated 'colors', 'linestyles', and 'markerstyles' instead of using
the 'style' keyword.
+-------+----------------+
| Code | Markers |
+=======+================+
| . | point |
+-------+----------------+
| o | circle |
+-------+----------------+
| v | triangle down |
+-------+----------------+
| ^ | triangle up |
+-------+----------------+
| < | triangle left |
+-------+----------------+
| > | triangle right |
+-------+----------------+
| 1 | tri_down |
+-------+----------------+
| 2 | tri_up |
+-------+----------------+
| 3 | tri_left |
+-------+----------------+
| 4 | tri_right |
+-------+----------------+
| 8 | octagon |
+-------+----------------+
| s | square |
+-------+----------------+
| p | pentagon |
+-------+----------------+
| ``*`` | star |
+-------+----------------+
| h | hexagon1 |
+-------+----------------+
| H | hexagon2 |
+-------+----------------+
| ``+`` | plus |
+-------+----------------+
| x | x |
+-------+----------------+
| D | diamond |
+-------+----------------+
| d | thin diamond |
+-------+----------------+
| _ | hlines_y |
+-------+----------------+
| None | nothing |
+-------+----------------+
| ' ' | nothing |
+-------+----------------+
| '' | nothing |
+-------+----------------+
Marker reference:
http://matplotlib.org/api/markers_api.html
style
[optional, default is None]
Still available, but if None is replaced by 'colors', 'linestyles', and
'markerstyles' options. Currently the 'style' option will override the
others.
Comma separated matplotlib style strings per time-series. Just
combine codes in 'ColorMarkerLine' order, for example 'r*--' is
a red dashed line with star marker.
bar_hatchstyles
[optional, default to "auto", only used if type equal to "bar", "barh",
"bar_stacked", and "barh_stacked"]
If 'auto' will iterate through the available matplotlib hatch types.
Otherwise on the command line a comma separated list, or a list of
strings if using the Python API.
+-----------------+-------------------+
| bar_hatchstyles | Description |
+=================+===================+
| / | diagonal hatching |
+-----------------+-------------------+
| ``\`` | back diagonal |
+-----------------+-------------------+
| ``|`` | vertical |
+-----------------+-------------------+
| - | horizontal |
+-----------------+-------------------+
| + | crossed |
+-----------------+-------------------+
| x | crossed diagonal |
+-----------------+-------------------+
| o | small circle |
+-----------------+-------------------+
| O | large circle |
+-----------------+-------------------+
| . | dots |
+-----------------+-------------------+
| * | stars |
+-----------------+-------------------+
logx
DEPRECATED: use '--xaxis="log"' instead.
logy
DEPRECATED: use '--yaxis="log"' instead.
xlim
[optional, default is based on range of x values]
Comma separated lower and upper limits for the x-axis of the
plot. For example, '--xlim 1,1000' would limit the plot from
1 to 1000, where '--xlim ,1000' would base the lower limit on
the data and set the upper limit to 1000.
ylim
[optional, default is based on range of y values]
Comma separated lower and upper limits for the y-axis of the
plot. See `xlim` for examples.
xaxis : str
[optional, default is 'arithmetic']
Defines the type of the xaxis. One of 'arithmetic', 'log'.
yaxis : str
[optional, default is 'arithmetic']
Defines the type of the yaxis. One of 'arithmetic', 'log'.
secondary_y
[optional, default is False]
Whether to plot on the secondary y-axis. If a list/tuple, which
time-series to plot on secondary y-axis.
mark_right
[optional, default is True]
When using a secondary_y axis, should the legend label the axis of the
various time-series automatically.
scatter_matrix_diagonal : str
[optional, defaults to 'kde']
If plot type is 'scatter_matrix', this specifies the plot along the
diagonal. One of 'kde' for Kernel Density Estimation or 'hist'
for a histogram.
bootstrap_size : int
[optional, defaults to 50]
The size of the random subset for 'bootstrap' plot.
bootstrap_samples
[optional, defaults to 500]
The number of random subsets of 'bootstrap_size'.
norm_xaxis
DEPRECATED: use '--type="norm_xaxis"' instead.
norm_yaxis
DEPRECATED: use '--type="norm_yaxis"' instead.
lognorm_xaxis
DEPRECATED: use '--type="lognorm_xaxis"' instead.
lognorm_yaxis
DEPRECATED: use '--type="lognorm_yaxis"' instead.
xy_match_line : str
[optional, defaults is '']
Will add a match line where x == y. Set to a line style code.
grid
[optional, default is False]
Whether to plot grid | |
<reponame>ZeayW/graph-contrastive-learning
from dataset_gcl import Dataset_gcl
from options import get_options
from model import *
from unity import CV_plot
import dgl
import pickle
import numpy as np
import os
from MyDataLoader_ud import *
from time import time
import math
import networkx as nx
from random import shuffle
import random
def preprocess(data_path, device, options):
# datapaths = ['../dataset/test/ICCAD2014/v/']
# datapaths = ["../dc/boom/implementation/"]
# save_file = 'iog_dc_cd5.pkl'
# Dataset = Dataset_dc
in_dim = get_options().in_dim
nlabels = options.nlabels
label2id = {}
if not os.path.exists(os.path.join(data_path, 'i{}'.format(options.num_input))):
os.makedirs(os.path.join(data_path, 'i{}'.format(options.num_input)))
data_file = os.path.join(data_path, 'i{}/{}.pkl'.format(options.num_input, options.split))
val_data_file = os.path.join(data_path, 'rocket2.pkl')
# if os.path.exists(val_data_file) is False:
# datapaths = ["../dc/rocket/implementation/"]
# th.multiprocessing.set_sharing_strategy('file_system')
# #print(dataset_not_edge.Dataset_n)
# dataset = Dataset_dc("Rocket",datapaths,label2id)
# g = dataset.batch_graph
# #print(g.ndata)
# print(g.ndata)
# #print(g.edata['r'])
# with open(val_data_file,'wb') as f:
# pickle.dump(g,f)
if os.path.exists(data_file) is False:
datapath = "../truthtables/i{}/implementation/".format(options.num_input)
th.multiprocessing.set_sharing_strategy('file_system')
# print(dataset_not_edge.Dataset_n)
dataset = Dataset_gcl(datapath, options.split)
# print(g.ndata)
print(dataset.batch_graph.ndata)
# print(g.edata['r'])
with open(data_file, 'wb') as f:
pickle.dump((dataset.batch_graph, dataset.POs, dataset.depth), f)
# split_data(g,split_save_file,options)
# with open(save_file, 'rb') as f:
# g = pickle.load(f)
# print(g.edata)
if options.pre_train:
with open(os.path.join(options.pre_model_dir, 'model.pkl'), 'rb') as f:
_, model,proj_head = pickle.load(f)
else:
if options.gnn:
network = GNN_1l
else:
network = FuncGCN
num_heads = options.num_heads
in_nlayers = options.in_nlayers
model = network(
label=options.label,
include=options.include,
device=device,
in_dim=in_dim,
hidden_dim=options.hidden_dim,
out_dim=options.out_dim,
num_heads=num_heads,
n_layers=in_nlayers,
dropout=options.gcn_dropout,
aggregation_type=options.agg_type,
combine_type=options.combine,
)
proj_head = Projection_Head(
in_feats=options.out_dim,
out_feats=options.out_dim
).to(device)
print("creating model in:", options.model_saving_dir)
print(model)
if os.path.exists(options.model_saving_dir) is False:
os.makedirs(options.model_saving_dir)
with open(os.path.join(options.model_saving_dir, 'model.pkl'), 'wb') as f:
parameters = options
pickle.dump((parameters, model,proj_head), f)
with open(os.path.join(options.model_saving_dir, 'res.txt'), 'w') as f:
pass
def load_model(device,options):
model_dir = options.model_saving_dir
if os.path.exists(os.path.join(model_dir, 'model.pkl')) is False:
return None,None
with open(os.path.join(model_dir,'model.pkl'), 'rb') as f:
#print(f)
param, classifier,proj_head = pickle.load(f)
#print(classifier)
param.model_saving_dir = options.model_saving_dir
classifier = classifier.to(device)
proj_head = proj_head.to(device)
if options.change_lr:
param.learning_rate = options.learning_rate
if options.change_alpha:
param.alpha = options.alpha
return param,classifier,proj_head
def unlabel_low(g, unlabel_threshold):
mask_low = g.ndata['position'] <= unlabel_threshold
g.ndata['label_o'][mask_low] = 0
def load_valdata(val_data_file, options):
with open(val_data_file, 'rb') as f:
val_g = pickle.load(f)
unlabel_low(val_g, 1)
label_name = 'label_o'
val_g.ndata['label_o'][val_g.ndata['label_o'].squeeze(-1) == 2] = 1
val_g.ndata['f_input'] = th.ones(size=(val_g.number_of_nodes(), options.hidden_dim), dtype=th.float)
val_g.ndata['temp'] = th.ones(size=(val_g.number_of_nodes(), options.hidden_dim), dtype=th.float)
val_g.ndata['ntype2'] = th.argmax(val_g.ndata['ntype'], dim=1).squeeze(-1)
val_graphs = dgl.unbatch(val_g)
return val_graphs
def check_sim(embeddings, neg_embeddings,boom_embeddings):
total_pos_sim, total_neg_sim = 0,0
total_cross_sim = 0
num = embeddings.shape[0]
avg_cross_sim = 0
for i in range(num):
#sim = (th.sum(th.cosine_similarity(embeddings[i], embeddings, dim=-1)) - 1) / (num - 1)
neg_sim = (th.sum(th.cosine_similarity(embeddings[i], neg_embeddings, dim=-1))) / len(neg_embeddings)
# distance += d
if boom_embeddings is not None:
cross_sim = (th.sum(th.cosine_similarity(embeddings[i], boom_embeddings, dim=-1))) / len(boom_embeddings)
total_cross_sim += cross_sim
#total_pos_sim += sim
total_neg_sim += neg_sim
# print('sample {}, pos sim:{}, neg sim{}'.format(i,sim,neg_sim))
avg_pos_sim = total_pos_sim / len(embeddings)
avg_neg_sim = total_neg_sim / len(embeddings)
if boom_embeddings is not None:
avg_cross_sim = total_cross_sim / len(embeddings)
# print('avg pos sim :{:.4f}, avg neg sim:{:.4f}'.format(avg_pos_sim,
# avg_neg_sim))
print('avg pos sim :{:.4f}, avg cross sim :{:.4f}, avg neg sim:{:.4f}'.format(avg_pos_sim, avg_cross_sim,
avg_neg_sim))
return avg_pos_sim,avg_cross_sim,avg_neg_sim
def validate_sim(val_g, boom_embeddings,sampler, device, model,res_sim):
#res_sim = []
val_nodes = th.tensor(range(val_g.number_of_nodes()))
pos_mask = (val_g.ndata['label_o'] == 1).squeeze(1)
neg_mask = (val_g.ndata['label_o'] == 0).squeeze(1)
loader = MyNodeDataLoader(
True,
val_g,
val_nodes,
sampler,
bs=val_g.num_nodes(),
batch_size=val_g.num_nodes(),
shuffle=False,
drop_last=False,
)
for ni, (central_nodes, input_nodes, blocks) in enumerate(loader):
blocks = [b.to(device) for b in blocks]
input_features = blocks[0].srcdata["f_input"]
output_labels = blocks[-1].dstdata['label_o'].squeeze(1)
embeddings = model(blocks, input_features)
pos_embeddings = embeddings[pos_mask]
# print(sorted(pos_embeddings.cpu().detach().numpy().tolist()))
# for ni,embed in enumerate(sorted(pos_embeddings.cpu().detach().numpy().tolist())):
# print(ni,embed[:7])
# print(len(pos_embeddings))
# exit()
neg_embeddings = embeddings[neg_mask]
# print(embeddings)
# print('-----------------------------------------------------------------------------------------\n\n')
pos_sim, cross_sim, neg_sim = check_sim(pos_embeddings, neg_embeddings, boom_embeddings)
res_sim.append((pos_sim, cross_sim, neg_sim))
# print('-----------------------------------------------------------------------------------------\n\n')
#return res_sim
def NCEloss(pos1, pos2, neg, tao):
pos_similarity = th.cosine_similarity(pos1, pos2, dim=-1)
neg_similarity = th.cosine_similarity(pos1, neg, dim=-1)
loss = -1 * th.log(
th.exp(pos_similarity / tao)
/
(th.sum(th.exp(neg_similarity / tao)) - math.exp(1 / tao))
)
return loss
def shuffle_nids(nids):
res_nids = []
nids1 ,nids2 = [],[]
for i,nid in enumerate(nids):
if i%2==0:
nids1.append(nid)
else:
nids2.append(nid)
randnum= random.randint(1,100)
random.seed(randnum)
random.shuffle(nids1)
random.seed(randnum)
random.shuffle(nids2)
for i in range(len(nids1)):
res_nids.append(nids1[i])
res_nids.append(nids2[i])
return res_nids
def train(options):
batch_sizes = {}
start_input, start_aug = options.start[0], options.start[1]
end_input, end_aug = options.end[0], options.end[1]
print(start_input,start_aug)
print(end_input,end_aug)
loss_thred = options.loss_thred
th.multiprocessing.set_sharing_strategy('file_system')
device = th.device("cuda:" + str(options.gpu) if th.cuda.is_available() else "cpu")
# Dump the preprocessing result to save time!
# for region detecion, the data_path is 'data/region', for boundary(io) detection, the data_path is 'data/boundary'
data_path = '../data/gcl_new/'
train_data_files = []
# for num_aug in range(1,4):
# for num_input in range(5, 8):
# train_data_files.append((num_input,num_aug))
for num_input in range(5,8):
for num_aug in range(1,4):
train_data_files.append((num_input,num_aug))
start_pos,end_pos = 0,0
for i , (num_input,num_aug) in enumerate(train_data_files):
if num_input == start_input and num_aug == start_aug:
start_pos = i
if num_input == end_input and num_aug == end_aug:
end_pos = i
train_data_files = train_data_files[start_pos:end_pos+1]
# for num_input,num_aug in train_data_files:
# if num_aug == 1:
# batch_sizes.append(350)
# elif num_aug == 2:
# batch_sizes.append(400)
# elif num_aug == 3:
# batch_sizes.append(512)
for num_input,num_aug in train_data_files:
if num_input == 5:
batch_sizes[(num_input,num_aug)] = 350
else:
batch_sizes[(num_input,num_aug)] = 512
print(train_data_files)
#exit()
# train_data_file = os.path.join(data_path,'i{}.pkl'.format(options.num_input))W
# neg_data_file = os.path.join(data_path, 'rocket2.pkl')
# val_data_file = os.path.join(data_path,'rocket2.pkl')
# split_dir = 'splits/rokcet'
in_nlayers, out_nlayers = options.in_nlayers, options.out_nlayers
num_epoch = options.num_epoch
if options.preprocess:
preprocess(data_path, device, options)
return
print(options)
print("Loading data...")
val_data_file = os.path.join('../data/simplify9', 'rocket2.pkl')
val_graphs = load_valdata(val_data_file, options)
val_sampler = Sampler([None] * (in_nlayers + 1), include_dst_in_src=options.include)
val_data_file1 = os.path.join('../data/simplify9', 'boom2.pkl')
val_graphs1 = load_valdata(val_data_file1,options)
val_graph1 = dgl.batch(val_graphs1)
val1_pos_nodes = th.tensor(range(val_graph1.number_of_nodes()))[val_graph1.ndata['label_o'].squeeze(1)>=1]
val_dataloader1 = MyNodeDataLoader(
True,
val_graph1,
val1_pos_nodes,
val_sampler,
bs=len(val1_pos_nodes),
batch_size=len(val1_pos_nodes),
shuffle=False,
drop_last=False,
)
data_loaders = []
for i,(num_input,num_aug) in enumerate(train_data_files):
file = os.path.join(data_path, 'i{}/aug{}.pkl'.format(num_input, num_aug))
with open(file, 'rb') as f:
train_g, POs, depth = pickle.load(f)
train_g.ndata['f_input'] = th.ones(size=(train_g.number_of_nodes(), options.hidden_dim), dtype=th.float)
train_g.ndata['temp'] = th.ones(size=(train_g.number_of_nodes(), options.hidden_dim), dtype=th.float)
train_g.ndata['ntype2'] = th.argmax(train_g.ndata['ntype'], dim=1).squeeze(-1)
data_size = len(POs)
# for po in POs.keys():
for po in POs:
assert len(train_g.successors(po)) == 0
if data_size > options.batch_size:
data_size = int(len(POs) / options.batch_size) * options.batch_size
POs = POs[:data_size]
if options.gat:
add_self_loop = True
else:
add_self_loop = False
sampler = Sampler(depth * [options.degree], include_dst_in_src=options.include, add_self_loop=add_self_loop)
print('aug{}, depth:{},num_nodes:{}, num_pos:{}'.format(num_aug, depth, train_g.number_of_nodes(), len(POs)))
# train_blocks = sampler.sample_blocks(train_g,POs)
# train_blocks = [b.to(device) for b in train_blocks]
# pos_pairs = None
#
# print(train_blocks)
# print(pos_pairs)
# print(po_depths)
# check(train_g,POs,depth)
data_loaders.append(
(num_input, num_aug, MyNodeDataLoader(
False,
train_g,
POs,
sampler,
bs = batch_sizes[(num_input, num_aug)],
batch_size=batch_sizes[(num_input, num_aug)],
shuffle=False,
drop_last=False,
))
)
print("Data successfully loaded")
options, model,proj_head = load_model(device, options)
if model is None:
print("No model, please prepocess first , or choose a pretrain model")
return
print(model)
print(proj_head)
optim = th.optim.Adam(
model.parameters(), options.learning_rate, weight_decay=options.weight_decay
)
model.train()
print(options.alpha)
max_val_recall, max_val_precision = 0.0, 0.0
print("Start training")
max_F1_score = 0
pre_loss = 100
stop_score = 0
dic = {}
for num_input, aug_indx, data_loader in data_loaders:
print('dataset:',num_input,aug_indx)
for epoch in range(num_epoch):
POs = data_loader.nids
g = data_loader.g
POs = shuffle_nids(POs)
# print(POs[:100])
# data_loader.collator.nids = POs
# data_loader.dataloader = DataLoader(data_loader.collator.dataset,
# collate_fn=data_loader.collator.collate,
# **dataloader_kwargs)
sampler = data_loader.block_sampler
data_loader = MyNodeDataLoader(
False,
g,
POs,
sampler,
bs=batch_sizes[(num_input, aug_indx)],
batch_size=batch_sizes[(num_input, aug_indx)],
shuffle=False,
drop_last=False,
)
runtime = 0
total_num, total_loss, correct, fn, fp, tn, tp = 0, 0.0, 0, 0, 0, 0, 0
pos_count, neg_count = 0, 0
for ni, (central_nodes, input_nodes, blocks) in enumerate(data_loader):
#print(central_nodes)
if ni==len(data_loader)-1:
continue
# continue
start_time = time()
neg_embeddings = []
blocks = [b.to(device) for b in blocks]
loss = 0
embeddings = model(blocks, blocks[0].srcdata['f_input'])
#embeddings = proj_head(embeddings)
for i in range(0, len(embeddings), 2):
loss += NCEloss(embeddings[i], embeddings[i + 1], embeddings, options.tao)
loss += NCEloss(embeddings[i + 1], embeddings[i], embeddings, options.tao)
loss = loss / len(embeddings)
#print(ni, loss.item())
#if num_input >= 7: print(ni, loss.item())
total_num += 1
total_loss += loss
endtime = time()
runtime += endtime - start_time
# print(loss.item())
# val_acc, val_recall, val_precision, val_F1_score = validate(valdataloader, label_name, device,
# model, Loss, options.alpha, beta,
# depth, width, num_aug, po_depths,query_embedding,
# thredshold=0.0)
start_time = time()
optim.zero_grad()
loss.backward()
# print(model.GCN1.layers[0].attn_n.grad)
optim.step()
endtime = time()
runtime += endtime - start_time
Train_loss = total_loss / total_num
boom_embeddings = None
for _,_, blocks in val_dataloader1:
blocks = [b.to(device) for b in blocks]
boom_embeddings = model(blocks,blocks[0].srcdata['f_input'])
print("epoch[{:d}]".format(epoch))
print("training runtime: ", runtime)
print(" train:")
print("loss:{:.8f}".format(Train_loss.item()))
res_sims = []
for val_g in val_graphs:
validate_sim(val_g,boom_embeddings, val_sampler, device, model,res_sims)
with open(os.path.join(options.model_saving_dir, 'res.txt'), 'a') as f:
f.write(str(round(Train_loss.item(), 3)))
for pos_sim,cross_sim,neg_sim in res_sims:
f.write('\n'+str(round(cross_sim.item(),4))+'\t'+str(round(neg_sim.item(),4)))
f.write('\n')
# judgement = val_F1_score > max_F1_score
judgement = True
#judgement = Train_loss < 100
if judgement:
| |
= QLabel("")
img = QPixmap("icons/fan.png")
img_library.setContentsMargins(60,0,0,0)
img_library.setPixmap(img)
right_bottom_layout.addWidget(img_library)
fan_main_right_layout.addLayout(right_bottom_layout,60)
def tabChanged(self,i):
self.getTemperature()
self.getHumiditiy()
self.getWater()
self.getLight()
self.getCarbondioksit()
self.getSeraKapak()
self.getUye()
self.getParca()
self.getFan()
self.gethava()
def getTemperature(self):
self.temperature_table.setFont(QFont("Times", 12))
for i in reversed(range(self.temperature_table.rowCount())):
self.temperature_table.removeRow(i)
query = cursor.execute("SELECT temperature_id,tarih,sicaklik,sicaklik2,sicaklik3,sicaklik4 FROM temperature")
for row_data in query:
row_number = self.temperature_table.rowCount()
self.temperature_table.insertRow(row_number)
for column_number, data in enumerate(row_data):
self.temperature_table.setItem(row_number, column_number, QTableWidgetItem(str(data)))
#QTimer.singleShot(1000,self.getTemperature)
self.temperature_table.setEditTriggers(QAbstractItemView.NoEditTriggers) # belgeler
def temperatureSearchBooks(self):
value = self.temperature_search_entry.text()
print(value)
if value == "":
QMessageBox.information(self, "Uyarı", "Arama bölümü boş olamaz")
else:
query = cursor.execute(
"SELECT temperature_id,tarih,sicaklik,sicaklik2,sicaklik3,sicaklik4 FROM temperature "
"WHERE tarih LIKE ? or sicaklik LIKE ? or sicaklik2 LIKE ? or sicaklik3 LIKE ? or sicaklik4 LIKE ?",
(value,
value , value , value , value )).fetchall() # içeren kelimelerin hepsini gösterir ve değerlerin hepsini getir
print(query)
if query == []:
QMessageBox.information(self, "Uyarı", "Böyle bir ifade olamaz")
else:
for i in reversed(range(self.temperature_table.rowCount())):
self.temperature_table.removeRow(i)
for row_data in query:
row_number = self.temperature_table.rowCount()
self.temperature_table.insertRow(row_number)
for column_numbber, data in enumerate(row_data):
self.temperature_table.setItem(row_number, column_numbber, QTableWidgetItem(str(data)))
def temperatureRadioButton(self):
print(self.temperature_radio_btn1.isChecked())
if self.temperature_radio_btn1.isChecked() == True:
query = cursor.execute(
"SELECT temperature_id,tarih,sicaklik,sicaklik2,sicaklik3,sicaklik4 FROM temperature")
for i in reversed(range(self.temperature_table.rowCount())):
self.temperature_table.removeRow(i)
for row_data in query:
row_number = self.temperature_table.rowCount()
self.temperature_table.insertRow(row_number)
for column_numbber, data in enumerate(row_data):
self.temperature_table.setItem(row_number, column_numbber, QTableWidgetItem(str(data)))
def getHumiditiy(self):
self.humidity_table.setFont(QFont("Times", 12))
for i in reversed(range(self.humidity_table.rowCount())):
self.humidity_table.removeRow(i)
query = cursor.execute("SELECT humidity_id,tarih,nem,nem2,nem3,nem4,nem5,nem6,nem7,nem8 FROM humidity")
for row_data in query:
row_number = self.humidity_table.rowCount()
self.humidity_table.insertRow(row_number)
for column_number, data in enumerate(row_data):
self.humidity_table.setItem(row_number, column_number, QTableWidgetItem(str(data)))
# QTimer.singleShot(1000,self.getTemperature)
self.humidity_table.setEditTriggers(QAbstractItemView.NoEditTriggers) # belgeler
def humiditySearchBooks(self):
value = self.humidity_search_entry.text()
print(value)
if value == "":
QMessageBox.information(self, "Uyarı", "Arama bölümü boş olamaz")
else:
query = cursor.execute(
"SELECT humidity_id,tarih,nem FROM humidity "
"WHERE tarih LIKE ? or nem LIKE ? or nem2 LIKE ? or nem3 LIKE ? or nem4 LIKE ? or nem5 LIKE ? or nem6 LIKE ? or nem7 LIKE ? or nem8 LIKE ?",
(value,
value,value,value,value,value,value,value,value,value)).fetchall() # içeren kelimelerin hepsini gösterir ve değerlerin hepsini getir
print(query)
if query == []:
QMessageBox.information(self, "Uyarı!!!", "Böyle bir nem değeri yok")
else:
for i in reversed(range(self.humidity_table.rowCount())):
self.humidity_table.removeRow(i)
for row_data in query:
row_number = self.humidity_table.rowCount()
self.humidity_table.insertRow(row_number)
for column_numbber, data in enumerate(row_data):
self.humidity_table.setItem(row_number, column_numbber, QTableWidgetItem(str(data)))
def humidityRadioButton(self):
print(self.humidity_radio_btn1.isChecked())
if self.humidity_radio_btn1.isChecked() == True:
query = cursor.execute(
"SELECT humidity_id,tarih,nem,nem2,nem3,nem4,nem5,nem6,nem7,nem8 FROM humidity")
for i in reversed(range(self.humidity_table.rowCount())):
self.humidity_table.removeRow(i)
for row_data in query:
row_number = self.humidity_table.rowCount()
self.humidity_table.insertRow(row_number)
for column_numbber, data in enumerate(row_data):
self.humidity_table.setItem(row_number, column_numbber, QTableWidgetItem(str(data)))
def getWater(self):
self.water_table.setFont(QFont("Times", 12))
for i in reversed(range(self.water_table.rowCount())):
self.water_table.removeRow(i)
query = cursor.execute("SELECT harcanansu_id,tarih,harcanan_su,toplam_su,pH,ec FROM harcanansumiktari")
for row_data in query:
row_number = self.water_table.rowCount()
self.water_table.insertRow(row_number)
for column_number, data in enumerate(row_data):
self.water_table.setItem(row_number, column_number, QTableWidgetItem(str(data)))
# QTimer.singleShot(1000,self.getTemperature)
self.water_table.setEditTriggers(QAbstractItemView.NoEditTriggers) # belgeler
def waterSearchBooks(self):
value = self.water_search_entry.text()
print(value)
if value == "":
QMessageBox.information(self, "Uyarı", "Arama bölümü boş olamaz")
else:
query = cursor.execute(
"SELECT harcanansu_id,tarih,harcanan_su,toplam_su,pH,ec FROM harcanansumiktari "
"WHERE tarih LIKE ? or harcanan_su LIKE ? or toplam_su LIKE ?, pH LIKE ?, ec LIKE ? ",
(value,value,value,value,value)).fetchall() # içeren kelimelerin hepsini gösterir ve değerlerin hepsini getir
print(query)
if query == []:
QMessageBox.information(self, "Uyarı", "Böyle bir ifade yok")
else:
for i in reversed(range(self.water_table.rowCount())):
self.water_table.removeRow(i)
for row_data in query:
row_number = self.water_table.rowCount()
self.water_table.insertRow(row_number)
for column_numbber, data in enumerate(row_data):
self.water_table.setItem(row_number, column_numbber, QTableWidgetItem(str(data)))
def waterRadioButton(self):
print(self.water_radio_btn1.isChecked())
if self.water_radio_btn1.isChecked() == True:
query = cursor.execute(
"SELECT harcanansu_id,tarih,harcanan_su,toplam_su,pH,ec FROM harcanansumiktari")
for i in reversed(range(self.water_table.rowCount())):
self.water_table.removeRow(i)
for row_data in query:
row_number = self.water_table.rowCount()
self.water_table.insertRow(row_number)
for column_numbber, data in enumerate(row_data):
self.water_table.setItem(row_number, column_numbber, QTableWidgetItem(str(data)))
def gethava(self):
self.hava_table.setFont(QFont("Times", 12))
for i in reversed(range(self.hava_table.rowCount())):
self.hava_table.removeRow(i)
query = cursor.execute("SELECT hava_id,hava_tarih,hava_nem FROM hava")
for row_data in query:
row_number = self.hava_table.rowCount()
self.hava_table.insertRow(row_number)
for column_number, data in enumerate(row_data):
self.hava_table.setItem(row_number, column_number, QTableWidgetItem(str(data)))
# QTimer.singleShot(1000,self.getTemperature)
self.hava_table.setEditTriggers(QAbstractItemView.NoEditTriggers) # belgeler
def havaSearchBooks(self):
value = self.hava_search_entry.text()
print(value)
if value == "":
QMessageBox.information(self, "Uyarı", "Arama bölümü boş olamaz")
else:
query = cursor.execute(
"SELECT hava_id,hava_tarih,hava_nem FROM hava "
"WHERE hava_tarih LIKE ? or hava_nem LIKE ?",
('%' + value + '%',
'%' + value + '%')).fetchall() # içeren kelimelerin hepsini gösterir ve değerlerin hepsini getir
print(query)
if query == []:
QMessageBox.information(self, "Uyarı", "Böyle bir ifade yok")
else:
for i in reversed(range(self.water_table.rowCount())):
self.hava_table.removeRow(i)
for row_data in query:
row_number = self.hava_table.rowCount()
self.hava_table.insertRow(row_number)
for column_numbber, data in enumerate(row_data):
self.hava_table.setItem(row_number, column_numbber, QTableWidgetItem(str(data)))
def havaRadioButton(self):
print(self.hava_radio_btn1.isChecked())
if self.hava_radio_btn1.isChecked() == True:
query = cursor.execute(
"SELECT hava_id,hava_tarih,hava_nem FROM hava")
for i in reversed(range(self.water_table.rowCount())):
self.hava_table.removeRow(i)
for row_data in query:
row_number = self.hava_table.rowCount()
self.hava_table.insertRow(row_number)
for column_numbber, data in enumerate(row_data):
self.hava_table.setItem(row_number, column_numbber, QTableWidgetItem(str(data)))
def getLight(self):
self.light_table.setFont(QFont("Times", 12))
for i in reversed(range(self.light_table.rowCount())):
self.light_table.removeRow(i)
query = cursor.execute("SELECT isik_id,tarih,isik_alma_süresi FROM isikalmasüresi")
for row_data in query:
row_number = self.light_table.rowCount()
self.light_table.insertRow(row_number)
for column_number, data in enumerate(row_data):
self.light_table.setItem(row_number, column_number, QTableWidgetItem(str(data)))
# QTimer.singleShot(1000,self.getTemperature)
self.light_table.setEditTriggers(QAbstractItemView.NoEditTriggers) # belgeler
def lightSearchBooks(self):
value = self.light_search_entry.text()
print(value)
if value == "":
QMessageBox.information(self, "Uyarı", "Arama bölümü boş olamaz")
else:
query = cursor.execute(
"SELECT isik_id,tarih,isik_alma_süresi FROM isikalmasüresi "
"WHERE tarih LIKE ? or isik_alma_süresi LIKE ?",
('%' + value + '%',
'%' + value + '%')).fetchall() # içeren kelimelerin hepsini gösterir ve değerlerin hepsini getir
print(query)
if query == []:
QMessageBox.information(self, "Uyarı", "Böyle bir ifade yok")
else:
for i in reversed(range(self.light_table.rowCount())):
self.light_table.removeRow(i)
for row_data in query:
row_number = self.light_table.rowCount()
self.light_table.insertRow(row_number)
for column_numbber, data in enumerate(row_data):
self.light_table.setItem(row_number, column_numbber, QTableWidgetItem(str(data)))
def lightRadioButton(self):
print(self.light_radio_btn1.isChecked())
if self.light_radio_btn1.isChecked() == True:
query = cursor.execute(
"SELECT isik_id,tarih,isik_alma_süresi FROM isikalmasüresi")
for i in reversed(range(self.light_table.rowCount())):
self.light_table.removeRow(i)
for row_data in query:
row_number = self.light_table.rowCount()
self.light_table.insertRow(row_number)
for column_numbber, data in enumerate(row_data):
self.light_table.setItem(row_number, column_numbber, QTableWidgetItem(str(data)))
def getCarbondioksit(self):
self.carbondioksit_table.setFont(QFont("Times", 12))
for i in reversed(range(self.carbondioksit_table.rowCount())):
self.carbondioksit_table.removeRow(i)
query = cursor.execute("SELECT carbondioksit_id,tarih,carbondioksit_alma_miktari FROM carbondioksitmiktari")
for row_data in query:
row_number = self.carbondioksit_table.rowCount()
self.carbondioksit_table.insertRow(row_number)
for column_number, data in enumerate(row_data):
self.carbondioksit_table.setItem(row_number, column_number, QTableWidgetItem(str(data)))
# QTimer.singleShot(1000,self.getTemperature)
self.carbondioksit_table.setEditTriggers(QAbstractItemView.NoEditTriggers) # belgeler
def carbondioksitSearchBooks(self):
value = self.carbondioksit_search_entry.text()
print(value)
if value == "":
QMessageBox.information(self, "Uyarı", "Arama bölümü boş olamaz")
else:
query = cursor.execute(
"SELECT carbondioksit_id,tarih,carbondioksit_alma_miktari FROM carbondioksitmiktari "
"WHERE tarih LIKE ? or carbondioksit_alma_miktari LIKE ?",
('%' + value + '%',
'%' + value + '%')).fetchall() # içeren kelimelerin hepsini gösterir ve değerlerin hepsini getir
print(query)
if query == []:
QMessageBox.information(self, "Uyarı", "Böyle bir ifade yok")
else:
for i in reversed(range(self.carbondioksit_table.rowCount())):
self.carbondioksit_table.removeRow(i)
for row_data in query:
row_number = self.carbondioksit_table.rowCount()
self.carbondioksit_table.insertRow(row_number)
for column_numbber, data in enumerate(row_data):
self.carbondioksit_table.setItem(row_number, column_numbber, QTableWidgetItem(str(data)))
def carbondioksitRadioButton(self):
print(self.carbondioksit_radio_btn1.isChecked())
if self.carbondioksit_radio_btn1.isChecked() == True:
query = cursor.execute(
"SELECT carbondioksit_id,tarih,carbondioksit_alma_miktari FROM carbondioksitmiktari")
for i in reversed(range(self.carbondioksit_table.rowCount())):
self.carbondioksit_table.removeRow(i)
for row_data in query:
row_number = self.carbondioksit_table.rowCount()
self.carbondioksit_table.insertRow(row_number)
for column_numbber, data in enumerate(row_data):
self.carbondioksit_table.setItem(row_number, column_numbber, QTableWidgetItem(str(data)))
def getSeraKapak(self):
self.sera_cati_table.setFont(QFont("Times", 12))
for i in reversed(range(self.sera_cati_table.rowCount())):
self.sera_cati_table.removeRow(i)
query = cursor.execute("SELECT sera_kapak_id,tarih,durum FROM seradurum")
for row_data in query:
row_number = self.sera_cati_table.rowCount()
self.sera_cati_table.insertRow(row_number)
for column_number, data in enumerate(row_data):
self.sera_cati_table.setItem(row_number, column_number, QTableWidgetItem(str(data)))
# QTimer.singleShot(1000,self.getTemperature)
self.sera_cati_table.setEditTriggers(QAbstractItemView.NoEditTriggers) # belgeler
def catiSearchBooks(self):
value = self.kapak_search_entry.text()
print(value)
if value == "":
QMessageBox.information(self, "Uyarı", "Arama bölümü boş olamaz")
else:
query = cursor.execute(
"SELECT sera_kapak_id,tarih,durum FROM seradurum "
"WHERE tarih LIKE ? or durum LIKE ?",
('%' + value + '%',
'%' + value + '%')).fetchall() # içeren kelimelerin hepsini gösterir ve değerlerin hepsini getir
print(query)
if query == []:
QMessageBox.information(self, "Uyarı", "Böyle bir ifade yok")
else:
for i in reversed(range(self.sera_cati_table.rowCount())):
self.sera_cati_table.removeRow(i)
for row_data in query:
row_number = self.sera_cati_table.rowCount()
self.sera_cati_table.insertRow(row_number)
for column_numbber, data in enumerate(row_data):
self.sera_cati_table.setItem(row_number, column_numbber, QTableWidgetItem(str(data)))
def catiRadioButton(self):
print(self.kapak_radio_btn1.isChecked())
if self.kapak_radio_btn1.isChecked() == True:
query = cursor.execute(
"SELECT sera_kapak_id,tarih,durum FROM seradurum")
for i in reversed(range(self.sera_cati_table.rowCount())):
self.sera_cati_table.removeRow(i)
for row_data in query:
row_number = self.sera_cati_table.rowCount()
self.sera_cati_table.insertRow(row_number)
for column_numbber, data in enumerate(row_data):
self.sera_cati_table.setItem(row_number, column_numbber, QTableWidgetItem(str(data)))
elif self.kapak_radio_btn2.isChecked() == True:
print(self.kapak_radio_btn2.isChecked())
query = cursor.execute(
"SELECT sera_kapak_id,tarih,durum FROM seradurum WHERE durum =?",
("Açık",))
for i in reversed(range(self.sera_cati_table.rowCount())):
self.sera_cati_table.removeRow(i)
for row_data in query:
row_number = self.sera_cati_table.rowCount()
self.sera_cati_table.insertRow(row_number)
for column_numbber, data in enumerate(row_data):
self.sera_cati_table.setItem(row_number, column_numbber, QTableWidgetItem(str(data)))
elif self.kapak_radio_btn3.isChecked() == True:
query = cursor.execute(
"SELECT sera_kapak_id,tarih,durum FROM seradurum WHERE durum =?",
("Kapalı",))
for i in reversed(range(self.sera_cati_table.rowCount())):
self.sera_cati_table.removeRow(i)
for row_data in query:
row_number = self.sera_cati_table.rowCount()
self.sera_cati_table.insertRow(row_number)
for column_numbber, data in enumerate(row_data):
self.sera_cati_table.setItem(row_number, column_numbber, QTableWidgetItem(str(data)))
def getUye(self):
self.uye_table.setFont(QFont("Times", 12))
for i in reversed(range(self.uye_table.rowCount())):
self.uye_table.removeRow(i)
query = cursor.execute("SELECT üye_id,isim,telefon FROM üyeler")
for row_data in query:
row_number = self.uye_table.rowCount()
self.uye_table.insertRow(row_number)
for column_number, data in enumerate(row_data):
self.uye_table.setItem(row_number, column_number, QTableWidgetItem(str(data)))
# QTimer.singleShot(1000,self.getTemperature)
self.uye_table.setEditTriggers(QAbstractItemView.NoEditTriggers) # belgeler
def searchMember(self):
value = self.uye_search_entry.text()
print(value)
if value == "":
QMessageBox.information(self, "Uyarı", "Arama bölümü boş olamaz")
else:
query = cursor.execute(
"SELECT üye_id,isim,telefon FROM üyeler "
| |
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 19 17:06:15 2014
@author: <NAME>
"""
# make sure the rest of the ABXpy package is accessible
import os
import sys
package_path = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
if not(package_path in sys.path):
sys.path.append(package_path)
import ABXpy.misc.type_fitting as type_fitting
# FIXME should remove above dependency on rest of ABX...
import h5py
import collections
import os
import numpy as np
import np2h5
# API functions:
# __init__, write, sort, find, read
class H5IO(object):
# example call without shared indexes and without fused datasets: H5IO('file.h5', ['talker', 'language', 'age'], {'talker': ['t1', 't2', 't3'], 'language': ['French', 'English']})
# example call with shared indexes and with fused datasets:
# H5IO('file.h5', {'talker1': 'talker', 'talker2': 'talker', 'language':
# 'language', 'age1': None, 'age2': None}, {'talker': ['t1', 't2', 't3'],
# 'language': ['French', 'English']}, {'talkers': ['talker1', 'talker2']})
def __init__(self, filename, datasets=None, indexes=None, fused=None, group='/'):
# format and check inputs
if indexes is None:
indexes = {}
if fused is None:
fused = {}
if datasets is not None:
if isinstance(datasets, collections.Mapping):
indexed_datasets = [
key for key, value in datasets.iteritems() if not(value is None)]
indexed_datasets_indexes = [
value for value in datasets.values() if not(value is None)]
if not(set(datasets.values()).difference([None]) == set(indexes.keys())):
raise ValueError(
'Indexes and datasets declaration are inconsistent.')
datasets = datasets.keys()
else:
indexed_datasets = indexes.keys()
indexed_datasets_indexes = indexes.keys()
if not(set(indexes.keys()).issubset(datasets)):
raise ValueError(
'Indexes and datasets declaration are inconsistent.')
# check that all datasets to be fused are indexed
all_fused_dsets = [dset for dsets in fused.values()
for dset in dsets]
for dset in all_fused_dsets:
if not(dset in indexed_datasets):
raise ValueError(
'Only datasets for which an index was provided can be fused.')
# create HDF5 file if it doesn't exist
try: # first check if file exists
with open(filename):
if not(datasets is None):
raise IOError('File %s already exists' % filename)
with h5py.File(filename) as f:
try:
f[group]
except KeyError:
raise IOError(
"File %s doesn't contain a group named %s" % (filename, group))
except IOError: # if file doesn't exist create it
if datasets is None:
raise IOError("File %s doesn't exist" % filename)
with h5py.File(filename) as f:
if not(group in f): # handler error here ...
g = f.create_group(group)
else:
g = f[group]
# general structure
g.attrs['empty'] = True
g.attrs['sorted'] = False
# h5 dtype for storing variable length strings
str_dtype = h5py.special_dtype(vlen=unicode)
g.create_dataset(
'managed_datasets', data=datasets, dtype=str_dtype)
raw_datasets = list(set(datasets).difference(indexed_datasets))
if raw_datasets:
g.create_dataset(
'raw_datasets', data=raw_datasets, dtype=str_dtype)
# indexed datasets
if indexed_datasets:
g.create_dataset(
'indexed_datasets', data=indexed_datasets, dtype=str_dtype)
g.create_dataset(
'indexed_datasets_indexes', data=indexed_datasets_indexes, dtype=str_dtype)
index_group = g.create_group('indexes')
for key, value in indexes.iteritems():
index_group.create_dataset(
key, data=value, dtype=get_dtype(value))
non_fused = [
dset for dset in indexed_datasets if not(dset in all_fused_dsets)]
if non_fused:
g.create_dataset(
'non_fused_datasets', data=non_fused, dtype=str_dtype)
# fused datasets
if fused:
g.create_dataset(
'fused_datasets', data=fused.keys(), dtype=str_dtype)
h = g.create_group('fused')
for name, fused_dsets in fused.iteritems():
i = h.create_group(name)
i.create_dataset(
'datasets', data=fused_dsets, dtype=str_dtype)
nb_levels = [len(indexes[indexed_datasets_indexes[
indexed_datasets.index(dset)]]) for dset in fused_dsets]
i.create_dataset(
'nb_levels', data=nb_levels, dtype=np.uint64)
# instantiate h5io runtime object from (possibly newly created) file
self.filename = filename
self.group = group
self.__load__()
def __load__(self):
self.__load_metadata__()
# FIXME: self.load_data() # this implementation supposes that the
# datasets can be held in memory without problems
def __load_metadata__(self):
with h5py.File(self.filename) as f:
g = f[self.group]
self.is_empty = g.attrs['empty']
self.is_sorted = g.attrs['sorted']
self.managed_datasets = list(g['managed_datasets'][...])
if 'raw_datasets' in g:
self.raw_datasets = list(g['raw_datasets'][...])
else:
self.raw_datasets = []
if 'indexed_datasets' in g:
self.indexed_datasets = list(g['indexed_datasets'][...])
self.indexed_datasets_indexes = list(
g['indexed_datasets_indexes'][...])
self.indexes = {}
for dset in g['indexes']:
self.indexes[dset] = list(g['indexes'][dset][...])
else:
self.indexed_datasets = []
if 'non_fused_datasets' in g:
self.non_fused_datasets = list(g['non_fused_datasets'][...])
else:
self.non_fused_datasets = []
if 'fused_datasets' in g:
self.fused_datasets = list(g['fused_datasets'][...])
self.fused_members = {}
self.key_weights = {}
self.nb_levels = {}
for fused_dataset in g['fused']:
self.fused_members[fused_dataset] = list(
g['fused'][fused_dataset]['datasets'][...])
if fused_dataset + '/key_weights' in g['fused']:
self.key_weights[fused_dataset] = g['fused'][
fused_dataset]['key_weigths'][...]
else:
self.nb_levels[fused_dataset] = g['fused'][
fused_dataset]['nb_levels'][...]
else:
self.fused_datasets = []
# FIXME h5io should be developed as a subclass of np2h5
def __enter__(self):
try:
self.np2h5 = np2h5.NP2H5(self.filename)
self.np2h5.__enter__()
return self
except:
# FIXME if this fails might need a try block to ignore the
# exception?
del self.np2h5
raise
def __exit__(self, eType, eValue, eTrace):
try:
self.np2h5.__exit__(eType, eValue, eTrace)
# FIXME here could need to ignore/log a second exception if eValue is
# not None
finally:
del self.np2h5
def write(self, data, append=True, iterate=False, indexed=False):
if not(hasattr(self, 'np2h5')):
raise RuntimeError(
"Writing to h5io objects must be done inside a context manager ('with' statemt)")
if not(self.is_empty) and not(append):
raise IOError('File %s is already filled' % self.filename)
# if necessary, instantiate datasets
if self.is_empty:
if iterate:
sample_data = data.next()
else:
sample_data = data
self.__initialize_datasets__(sample_data)
else:
sample_data = None
# FIXME for now have to check that np2h5 was initialized
if not(self.np2h5.buffers):
raise ValueError(
"Current implementation does not allow to complete non-empty datasets")
# set flags
with h5py.File(self.filename) as f:
if self.is_empty:
self.is_empty = False
f[self.group].attrs['empty'] = False
if self.is_sorted:
self.is_sorted = False
f[self.group].attrs['sorted'] = False
if not(sample_data is None) and iterate:
self.__write__(sample_data, indexed)
if iterate:
for d in data:
self.__write__(d, indexed)
else:
self.__write__(data, indexed)
def __parse_input_data__(self, data):
if not(isinstance(data, collections.Mapping)):
data_dict = {}
for dataset, d in zip(self.managed_datasets, data):
data_dict[dataset] = d
data = data_dict
if not(set(data.keys()) == set(self.managed_datasets)):
raise ValueError(
'It is necessary to write to all of the managed datasets simultaneously.')
return data
def __convert_input_data__(self, data):
res = {}
for dset, d in data.iteritems():
if not(hasattr(d, 'shape')):
d = np.array(d) # risky type conversion ?
if len(d.shape) == 1:
# to avoid shape problems, maybe non optimal
d = np.reshape(d, (d.shape[0], 1))
res[dset] = d
return res
def __initialize_datasets__(self, sample_data):
self.out = {}
sample_data = self.__parse_input_data__(sample_data)
sample_data = self.__convert_input_data__(sample_data)
dims = {dset: 1 if len(data.shape) == 1 else data.shape[
1] for dset, data in sample_data.iteritems()}
# needed for raw_datasets only
dtypes = {
dset: get_dtype(sample_data[dset]) for dset in self.raw_datasets}
# init raw datasets
for dset in self.raw_datasets:
(group, dataset) = os.path.split(dset)
if not(group):
group = '/'
self.out[dset] = self.np2h5.add_dataset(group, dataset, n_columns=dims[dset], item_type=dtypes[
dset], fixed_size=False) # FIXME at some point should become super.add_dataset(...)
# init not fused indexed datasets, in this implementation they are all
# encoded in the same matrix
if self.non_fused_datasets:
indexed_dims = [dims[dset] for dset in self.non_fused_datasets]
indexed_levels = [len(self.indexes[dset])
for dset in self.non_fused_datasets]
dim = sum(indexed_dims)
# smallest unsigned integer dtype compatible with all
# indexed_datasets
d_type = type_fitting.fit_integer_type(
max(indexed_levels), is_signed=False)
# FIXME at some point should become super.add_dataset(...)
self.out['indexed'] = self.np2h5.add_dataset(
self.group, 'indexed_data', n_columns=dim, item_type=d_type, fixed_size=False)
with h5py.File(self.filename) as f:
# necessary to access the part of the data corresponding to a
# particular dataset
f[self.group].create_dataset(
'indexed_cumudims', data=np.cumsum(indexed_dims), dtype=np.uint64)
# fused datasets have a separate one dimensional dataset each
self.key_weights = {}
for fused_dset in self.fused_datasets:
fused_dims = np.array(
[dims[dset] for dset in self.fused_members[fused_dset]], dtype=np.uint64)
max_key = np.prod(
self.nb_levels[fused_dset] ** fused_dims) - np.uint64(1)
if max_key >= 2 ** 64:
raise ValueError('fused dataset %s in file %s cannot be created because 64 bits keys are not sufficient to cover all possible combinations of the fused datasets' % (
fused_dset, self.filename))
# smallest unsigned integer dtype compatible
d_type = type_fitting.fit_integer_type(max_key, is_signed=False)
# FIXME at some point should become super.add_dataset(...)
self.out[fused_dset] = self.np2h5.add_dataset(
self.group, fused_dset, n_columns=1, item_type=d_type, fixed_size=False)
nb_levels_with_multiplicity = np.concatenate([np.array(
n, dtype=d_type) * np.ones(d, dtype=d_type) for n, d in zip(self.nb_levels[fused_dset], fused_dims)])
self.key_weights[fused_dset] = np.concatenate(
[np.array([1], dtype=d_type), np.cumprod(d_type(nb_levels_with_multiplicity))[:-1]])
with h5py.File(self.filename) as f:
f[self.group]['fused'][fused_dset].create_dataset(
'key_weights', data=self.key_weights[fused_dset], dtype=d_type)
def __write__(self, data, indexed=False):
data = self.__parse_input_data__(data)
if not(indexed):
data = self.__compute_indexes__(data)
data = self.__convert_input_data__(data)
# write raw data
for dset in self.raw_datasets:
# need type conversion sometimes here? (np.array(data[dset]))
self.out[dset].write(data[dset])
# write indexed data
if self.non_fused_datasets:
# FIXME check that values are in correct range of index ?
indexed_values = [data[dset] for dset in self.non_fused_datasets]
# need type conversion sometimes here?
self.out['indexed'].write(np.concatenate(indexed_values, axis=1))
# write fused data
for fused_dset in self.fused_datasets:
keys = self.__compute_keys__(fused_dset, np.concatenate(
[data[key] for key in self.fused_members[fused_dset]], axis=1)) # need type conversion sometimes here?
self.out[fused_dset].write(keys)
# this function might be optimized if useful (using searchsorted and
# stuff?)
def __compute_indexes__(self, data):
data = dict([(dset, [self.indexes[self.indexed_datasets_indexes[self.indexed_datasets.index(dset)]].index(
e) for e in d]) if dset in self.indexed_datasets else (dset, d) for | |
that is necessary to
# capture RDKit error/warning messages. See
# https://stackoverflow.com/questions/24277488/in-python-how-to-capture-the-stdout-from-a-c-shared-library-to-a-variable
stderr_fileno = sys.stderr.fileno()
stderr_save = os.dup(stderr_fileno)
stderr_pipe = os.pipe()
os.dup2(stderr_pipe[1], stderr_fileno)
os.close(stderr_pipe[1])
mol = Chem.MolFromSmiles(smiles_str)
os.close(stderr_fileno)
os.close(stderr_pipe[0])
os.dup2(stderr_save, stderr_fileno)
os.close(stderr_save)
# Check that there are None type errors Chem.MolFromSmiles has
# sanitize on which means if there is even a small error in the SMILES
# (kekulize, nitrogen charge...) then mol=None. ie.
# Chem.MolFromSmiles("C[N]=[N]=[N]") = None this is an example of an
# nitrogen charge error. It is cased in a try statement to be overly
# cautious.
return None if mol is None else mol
@staticmethod
def eprint(*args, **kwargs):
"""Error messages should be printed to STDERR. See
https://stackoverflow.com/questions/5574702/how-to-print-to-stderr-in-python"""
print(*args, file=sys.stderr, **kwargs)
class LoadSMIFile(object):
"""A generator class for loading in the SMILES strings from a file, one at
a time."""
def __init__(self, filename, args):
"""Initializes this class.
:param filename: The filename or file object (i.e., StringIO).
:type filename: str or StringIO
"""
self.args = args
if type(filename) is str:
# It's a filename
self.f = open(filename, "r")
else:
# It's a file object (i.e., StringIO)
self.f = filename
def __iter__(self):
"""Returns this generator object.
:return: This generator object.
:rtype: LoadSMIFile
"""
return self
def __next__(self):
"""Ensure Python3 compatibility.
:return: A dict, where the "smiles" key contains the canonical SMILES
string and the "data" key contains the remaining information
(e.g., the molecule name).
:rtype: dict
"""
return self.next()
def next(self):
"""Get the data associated with the next line.
:raises StopIteration: If there are no more lines left iin the file.
:return: A dict, where the "smiles" key contains the canonical SMILES
string and the "data" key contains the remaining information
(e.g., the molecule name).
:rtype: dict
"""
line = self.f.readline()
if line == "":
# EOF
self.f.close()
raise StopIteration()
return
# Divide line into smi and data
splits = line.split()
if len(splits) != 0:
# Generate mol object
smiles_str = splits[0]
# Convert from SMILES string to RDKIT Mol. This series of tests is
# to make sure the SMILES string is properly formed and to get it
# into a canonical form. Filter if failed.
mol = UtilFuncs.convert_smiles_str_to_mol(smiles_str)
if mol is None:
if "silent" in self.args and not self.args["silent"]:
UtilFuncs.eprint(
"WARNING: Skipping poorly formed SMILES string: " + line
)
return self.next()
# Handle nuetralizing the molecules. Filter if failed.
mol = UtilFuncs.neutralize_mol(mol)
if mol is None:
if "silent" in self.args and not self.args["silent"]:
UtilFuncs.eprint(
"WARNING: Skipping poorly formed SMILES string: " + line
)
return self.next()
# Remove the hydrogens.
try:
mol = Chem.RemoveHs(mol)
except:
if "silent" in self.args and not self.args["silent"]:
UtilFuncs.eprint(
"WARNING: Skipping poorly formed SMILES string: " + line
)
return self.next()
if mol is None:
if "silent" in self.args and not self.args["silent"]:
UtilFuncs.eprint(
"WARNING: Skipping poorly formed SMILES string: " + line
)
return self.next()
# Regenerate the smiles string (to standardize).
new_mol_string = Chem.MolToSmiles(mol, isomericSmiles=True)
return {"smiles": new_mol_string, "data": splits[1:]}
else:
# Blank line? Go to next one.
return self.next()
class Protonate(object):
"""A generator class for protonating SMILES strings, one at a time."""
def __init__(self, args):
"""Initialize the generator.
:param args: A dictionary containing the arguments.
:type args: dict
"""
# Make the args an object variable variable.
self.args = args
# A list to store the protonated SMILES strings associated with a
# single input model.
self.cur_prot_SMI = []
# Clean and normalize the args
self.args = ArgParseFuncs.clean_args(args)
# Make sure functions in ProtSubstructFuncs have access to the args.
ProtSubstructFuncs.args = args
# Load the substructures that can be protonated.
self.subs = ProtSubstructFuncs.load_protonation_substructs_calc_state_for_ph(
self.args["min_ph"], self.args["max_ph"], self.args["pka_precision"]
)
def __iter__(self):
"""Returns this generator object.
:return: This generator object.
:rtype: Protonate
"""
return self
def __next__(self):
"""Ensure Python3 compatibility.
:return: A dict, where the "smiles" key contains the canonical SMILES
string and the "data" key contains the remaining information
(e.g., the molecule name).
:rtype: dict
"""
return self.next()
def next(self):
"""Return the next protonated SMILES string.
:raises StopIteration: If there are no more lines left iin the file.
:return: A dict, where the "smiles" key contains the canonical SMILES
string and the "data" key contains the remaining information
(e.g., the molecule name).
:rtype: dict
"""
# If there are any SMILES strings in self.cur_prot_SMI, just return
# the first one and update the list to include only the remaining.
if len(self.cur_prot_SMI) > 0:
first, self.cur_prot_SMI = self.cur_prot_SMI[0], self.cur_prot_SMI[1:]
return first
# self.cur_prot_SMI is empty, so try to add more to it.
# Get the next SMILES string from the input file.
try:
smile_and_datum = self.args["smiles_and_data"].next()
except StopIteration:
# There are no more input smiles strings...
raise StopIteration()
# Keep track of the original smiles string for reporting, starting the
# protonation process, etc.
orig_smi = smile_and_datum["smiles"]
# Dimorphite-DL may protonate some sites in ways that produce invalid
# SMILES. We need to keep track of all smiles so we can "rewind" to
# the last valid one, should things go south.
properly_formed_smi_found = [orig_smi]
# Everything on SMILES line but the SMILES string itself (e.g., the
# molecule name).
data = smile_and_datum["data"]
# Collect the data associated with this smiles (e.g., the molecule
# name).
tag = " ".join(data)
# sites is a list of (atom index, "PROTONATED|DEPROTONATED|BOTH",
# reaction name, mol). Note that the second entry indicates what state
# the site SHOULD be in (not the one it IS in per the SMILES string).
# It's calculated based on the probablistic distributions obtained
# during training.
(
sites,
mol_used_to_idx_sites,
) = ProtSubstructFuncs.get_prot_sites_and_target_states(orig_smi, self.subs)
new_mols = [mol_used_to_idx_sites]
if len(sites) > 0:
for site in sites:
# Make a new smiles with the correct protonation state. Note that
# new_smis is a growing list. This is how multiple protonation
# sites are handled.
new_mols = ProtSubstructFuncs.protonate_site(new_mols, site)
if len(new_mols) > self.args["max_variants"]:
new_mols = new_mols[: self.args["max_variants"]]
if "silent" in self.args and not self.args["silent"]:
UtilFuncs.eprint(
"WARNING: Limited number of variants to "
+ str(self.args["max_variants"])
+ ": "
+ orig_smi
)
# Go through each of these new molecules and add them to the
# properly_formed_smi_found, in case you generate a poorly
# formed SMILES in the future and have to "rewind."
properly_formed_smi_found += [Chem.MolToSmiles(m) for m in new_mols]
else:
# Deprotonate the mols (because protonate_site never called to do
# it).
mol_used_to_idx_sites = Chem.RemoveHs(mol_used_to_idx_sites)
new_mols = [mol_used_to_idx_sites]
# Go through each of these new molecules and add them to the
# properly_formed_smi_found, in case you generate a poorly formed
# SMILES in the future and have to "rewind."
properly_formed_smi_found.append(Chem.MolToSmiles(mol_used_to_idx_sites))
# In some cases, the script might generate redundant molecules.
# Phosphonates, when the pH is between the two pKa values and the
# stdev value is big enough, for example, will generate two identical
# BOTH states. Let's remove this redundancy.
new_smis = list(
set(
[
Chem.MolToSmiles(m, isomericSmiles=True, canonical=True)
for m in new_mols
]
)
)
# Sometimes Dimorphite-DL generates molecules that aren't actually
# possible. Simply convert these to mol objects to eliminate the bad
# ones (that are None).
new_smis = [
s for s in new_smis if UtilFuncs.convert_smiles_str_to_mol(s) is not None
]
# If there are no smi left, return the input one at the very least.
# All generated forms have apparently been judged
# inappropriate/malformed.
if len(new_smis) == 0:
properly_formed_smi_found.reverse()
for smi in properly_formed_smi_found:
if UtilFuncs.convert_smiles_str_to_mol(smi) is not None:
new_smis = [smi]
break
# If the user wants to see the target states, add those to the ends of
# each line.
if self.args["label_states"]:
states = "\t".join([x[1] for x in sites])
new_lines = [x + "\t" + tag + "\t" + states for x in new_smis]
else:
new_lines = [x + "\t" + tag for x in new_smis]
self.cur_prot_SMI = new_lines
return self.next()
class ProtSubstructFuncs:
"""A namespace to store functions for loading the substructures that can
be protonated. To keep things organized."""
args | |
1, Pin name = IO_L40P_GCLK11_M1A5, Sch name = E-GTXCLK
Subsignal("gtx", Pins("L12")),
# NET "phyrxclk" LOC = "K15"; # Bank = 1, Pin name = IO_L41P_GCLK9_IRDY1_M1RASN, Sch name = E-RXCLK
Subsignal("rx", Pins("K15")),
IOStandard(LVCMOS_BANK1)
),
("eth", 0,
# NET "phyrst" LOC = "G13"; # Bank = 1, Pin name = IO_L32N_A16_M1A9, Sch name = E-RESET
Subsignal("rst_n", Pins("G13")),
# NET "phyint" LOC = "L16"; # Bank = 1, Pin name = IO_L42N_GCLK6_TRDY1_M1LDM, Sch name = E-INT
Subsignal("int_n", Pins("L16")),
# NET "phymdi" LOC = "N17"; # Bank = 1, Pin name = IO_L48P_HDC_M1DQ8, Sch name = E-MDIO
Subsignal("mdio", Pins("N17")),
# NET "phymdc" LOC = "F16"; # Bank = 1, Pin name = IO_L1N_A24_VREF, Sch name = E-MDC
Subsignal("mdc", Pins("F16")),
# NET "phyrxdv" LOC = "F17"; # Bank = 1, Pin name = IO_L35P_A11_M1A7, Sch name = E-RXDV
Subsignal("rx_dv", Pins("F17")),
# NET "phyrxer" LOC = "F18"; # Bank = 1, Pin name = IO_L35N_A10_M1A2, Sch name = E-RXER
Subsignal("rx_er", Pins("F18")),
# NET "phyRXD<0>" LOC = "G16"; # Bank = 1, Pin name = IO_L38P_A5_M1CLK, Sch name = E-RXD0
# NET "phyRXD<1>" LOC = "H14"; # Bank = 1, Pin name = IO_L36N_A8_M1BA1, Sch name = E-RXD1
# NET "phyRXD<2>" LOC = "E16"; # Bank = 1, Pin name = IO_L33P_A15_M1A10, Sch name = E-RXD2
# NET "phyRXD<3>" LOC = "F15"; # Bank = 1, Pin name = IO_L1P_A25, Sch name = E-RXD3
# NET "phyRXD<4>" LOC = "F14"; # Bank = 1, Pin name = IO_L30P_A21_M1RESET, Sch name = E-RXD4
# NET "phyRXD<5>" LOC = "E18"; # Bank = 1, Pin name = IO_L33N_A14_M1A4, Sch name = E-RXD5
# NET "phyRXD<6>" LOC = "D18"; # Bank = 1, Pin name = IO_L31N_A18_M1A12, Sch name = E-RXD6
# NET "phyRXD<7>" LOC = "D17"; # Bank = 1, Pin name = IO_L31P_A19_M1CKE, Sch name = E-RXD7
Subsignal("rx_data", Pins("G16 H14 E16 F15 F14 E18 D18 D17")),
# NET "phytxen" LOC = "H15"; # Bank = 1, Pin name = IO_L37P_A7_M1A0, Sch name = E-TXEN
Subsignal("tx_en", Pins("H15")),
# NET "phytxer" LOC = "G18"; # Bank = 1, Pin name = IO_L38N_A4_M1CLKN, Sch name = E-TXER
Subsignal("tx_er", Pins("G18")),
# NET "phyTXD<0>" LOC = "H16"; # Bank = 1, Pin name = IO_L37N_A6_M1A1, Sch name = E-TXD0
# NET "phyTXD<1>" LOC = "H13"; # Bank = 1, Pin name = IO_L36P_A9_M1BA0, Sch name = E-TXD1
# NET "phyTXD<2>" LOC = "K14"; # Bank = 1, Pin name = IO_L39N_M1ODT, Sch name = E-TXD2
# NET "phyTXD<3>" LOC = "K13"; # Bank = 1, Pin name = IO_L34N_A12_M1BA2, Sch name = E-TXD3
# NET "phyTXD<4>" LOC = "J13"; # Bank = 1, Pin name = IO_L39P_M1A3, Sch name = E-TXD4
# NET "phyTXD<5>" LOC = "G14"; # Bank = 1, Pin name = IO_L30N_A20_M1A11, Sch name = E-TXD5
# NET "phyTXD<6>" LOC = "H12"; # Bank = 1, Pin name = IO_L32P_A17_M1A8, Sch name = E-TXD6
# NET "phyTXD<7>" LOC = "K12"; # Bank = 1, Pin name = IO_L34P_A13_M1WE, Sch name = E-TXD7
Subsignal("tx_data", Pins("H16 H13 K14 K13 J13 G14 H12 K12")),
# C17 - from Atlys reference manual, not listed in UCF file?
Subsignal("col", Pins("C17")),
# C18 - from Atlys reference manual, not listed in UCF file?
Subsignal("crs", Pins("C18")),
IOStandard(LVCMOS_BANK1)
),
## DDR2
# 128Mbyte DDR2 16-bit wide data @ 800MHz
# Older boards - MT47H64M16HR-25E - DDR2 - 2.5ns @ CL = 5 (DDR2-800)
# Newer boards - MIRA P3R1GE3EGF G8E DDR2 -
#
# The interface supports SSTL18 signaling. Address and control signals
# are terminated through 47-ohm resistors to a 0.9V VTT, and data
# signals use the On-Die-Termination (ODT) feature of the DDR2 chip.
#
# When generating a MIG core for the MIRA part, selecting the
# “EDE1116AXXX-8E” device will result in the correct timing parameters
# being set. When generating a component for the Micron part, it can be
# selected by name within the wizard. The part loaded on your Atlys can
# be determined by examining the print on the DDR2 component (IC13).
#
# NET "DDR2CLK0" LOC = "G3"; # Bank = 3, Pin name = IO_L46P_M3CLK, Sch name = DDR-CK_P
# NET "DDR2CLK1" LOC = "G1"; # Bank = 3, Pin name = IO_L46N_M3CLKN, Sch name = DDR-CK_N
("ddram_clock", 0,
Subsignal("p", Pins("G3")),
Subsignal("n", Pins("G1")),
IOStandard("DIFF_SSTL18_II"), Misc("IN_TERM=NONE")
),
("ddram", 0,
# NET "DDR2CKE" LOC = "H7"; # Bank = 3, Pin name = IO_L53P_M3CKE, Sch name = DDR-CKE
Subsignal("cke", Pins("H7"), IOStandard("SSTL18_II")),
# NET "DDR2RASN" LOC = "L5"; # Bank = 3, Pin name = IO_L43P_GCLK23_M3RASN, Sch name = DDR-RAS
Subsignal("ras_n", Pins("L5"), IOStandard("SSTL18_II")),
# NET "DDR2CASN" LOC = "K5"; # Bank = 3, Pin name = IO_L43N_GCLK22_IRDY2_M3CASN, Sch name = DDR-CAS
Subsignal("cas_n", Pins("K5"), IOStandard("SSTL18_II")),
# NET "DDR2WEN" LOC = "E3"; # Bank = 3, Pin name = IO_L50P_M3WE, Sch name = DDR-WE
Subsignal("we_n", Pins("E3"), IOStandard("SSTL18_II")),
# NET "DDR2BA0" LOC = "F2"; # Bank = 3, Pin name = IO_L48P_M3BA0, Sch name = DDR-BA0
# NET "DDR2BA1" LOC = "F1"; # Bank = 3, Pin name = IO_L48N_M3BA1, Sch name = DDR-BA1
# NET "DDR2BA2" LOC = "E1"; # Bank = 3, Pin name = IO_L50N_M3BA2, Sch name = DDR-BA2
Subsignal("ba", Pins("F2 F1 E1"), IOStandard("SSTL18_II")),
# NET "DDR2A0" LOC = "J7"; # Bank = 3, Pin name = IO_L47P_M3A0, Sch name = DDR-A0
# NET "DDR2A1" LOC = "J6"; # Bank = 3, Pin name = IO_L47N_M3A1, Sch name = DDR-A1
# NET "DDR2A2" LOC = "H5"; # Bank = 3, Pin name = IO_L49N_M3A2, Sch name = DDR-A2
# NET "DDR2A3" LOC = "L7"; # Bank = 3, Pin name = IO_L45P_M3A3, Sch name = DDR-A3
# NET "DDR2A4" LOC = "F3"; # Bank = 3, Pin name = IO_L51N_M3A4, Sch name = DDR-A4
# NET "DDR2A5" LOC = "H4"; # Bank = 3, Pin name = IO_L44P_GCLK21_M3A5, Sch name = DDR-A5
# NET "DDR2A6" LOC = "H3"; # Bank = 3, Pin name = IO_L44N_GCLK20_M3A6, Sch name = DDR-A6
# NET "DDR2A7" LOC = "H6"; # Bank = 3, Pin name = IO_L49P_M3A7, Sch name = DDR-A7
# NET "DDR2A8" LOC = "D2"; # Bank = 3, Pin name = IO_L52P_M3A8, Sch name = DDR-A8
# NET "DDR2A9" LOC = "D1"; # Bank = 3, Pin name = IO_L52N_M3A9, Sch name = DDR-A9
# NET "DDR2A10" LOC = "F4"; # Bank = 3, Pin name = IO_L51P_M3A10, Sch name = DDR-A10
# NET "DDR2A11" LOC = "D3"; # Bank = 3, Pin name = IO_L54N_M3A11, Sch name = DDR-A11
# NET "DDR2A12" LOC = "G6"; # Bank = 3, Pin name = IO_L53N_M3A12, Sch name = DDR-A12
Subsignal("a", Pins("J7 J6 H5 L7 F3 H4 H3 H6 D2 D1 F4 D3 G6"), IOStandard("SSTL18_II")),
# NET "DDR2DQ0" LOC = "L2"; # Bank = 3, Pin name = IO_L37P_M3DQ0, Sch name = DDR-DQ0
# NET "DDR2DQ1" LOC = "L1"; # Bank = 3, Pin name = IO_L37N_M3DQ1, Sch name = DDR-DQ1
# NET "DDR2DQ2" LOC = "K2"; # Bank = 3, Pin name = IO_L38P_M3DQ2, Sch name = DDR-DQ2
# NET "DDR2DQ3" LOC = "K1"; # Bank = 3, Pin name = IO_L38N_M3DQ3, Sch name = DDR-DQ3
# NET "DDR2DQ4" LOC = "H2"; # Bank = 3, Pin name = IO_L41P_GCLK27_M3DQ4, Sch name = DDR-DQ4
# NET "DDR2DQ5" LOC = "H1"; # Bank = 3, Pin name = IO_L41N_GCLK26_M3DQ5, Sch name = DDR-DQ5
# NET "DDR2DQ6" LOC = "J3"; # Bank = 3, Pin name = IO_L40P_M3DQ6, Sch name = DDR-DQ6
# NET "DDR2DQ7" LOC = "J1"; # Bank = 3, Pin name = IO_L40N_M3DQ7, Sch name = DDR-DQ7
# NET "DDR2DQ8" LOC = "M3"; # Bank = 3, Pin name = IO_L36P_M3DQ8, Sch name = DDR-DQ8
# NET "DDR2DQ9" LOC = "M1"; # Bank = 3, Pin name = IO_L36N_M3DQ9, Sch name = | |
activation='linear')(x)
outputs_gain = Lambda(lambda x: x * output_gain)(predictions)
# this creates a model that includes
# the Input layer and three Dense layers
opt_q_fa = RMSprop(lr = self.alpha)
self.q_fa = Model(input=inputs_q_fa, output=outputs_gain)
self.q_fa.compile(optimizer=opt_q_fa, loss='mse')
self.q_fa_training_cnt = 0
self.q_fa_training_loss = 0
# this returns a tensor
# inputs_q_Q_fa = Input(shape=(3,))
inputs_q_Q_fa = Input(shape=(2 + len(self.actions),))
inputs_gain = Lambda(lambda x: x * input_gain)(inputs_q_Q_fa)
# inputs_squared = Lambda(lambda x: (x ** 2) * 0.1)(inputs_q_Q_fa)
# inputs_combined = Merge(mode="concat", concat_axis=1)([inputs_gain, inputs_squared])
# a layer instance is callable on a tensor, and returns a tensor
x = Dense(layer_1_num_units, activation='tanh')(inputs_gain)
x = Dense(layer_2_num_units, activation='tanh')(x)
predictions = Dense(1, activation='linear')(x)
outputs_gain = Lambda(lambda x: x * output_gain)(predictions)
# this creates a model that includes
# the Input layer and three Dense layers
opt_q_Q_fa = RMSprop(lr = self.alpha)
self.q_Q_fa = Model(input=inputs_q_Q_fa, output=outputs_gain)
self.q_Q_fa.compile(optimizer=opt_q_Q_fa, loss='mse')
self.q_Q_fa_training_cnt = 0
self.q_Q_fa_training_loss = 0
# this returns a tensor
inputs_q_SARSA_fa = Input(shape=(2 + len(self.actions),))
inputs_gain = Lambda(lambda x: x * input_gain)(inputs_q_SARSA_fa)
# inputs_squared = Lambda(lambda x: (x ** 2) * 0.1)(inputs_q_SARSA_fa)
# inputs_combined = Merge(mode="concat", concat_axis=1)([inputs_gain, inputs_squared])
# a layer instance is callable on a tensor, and returns a tensor
x = Dense(layer_1_num_units, activation='tanh')(inputs_gain)
x = Dense(layer_2_num_units, activation='tanh')(x)
predictions = Dense(1, activation='linear')(x)
outputs_gain = Lambda(lambda x: x * output_gain)(predictions)
# this creates a model that includes
# the Input layer and three Dense layers
opt_q_SARSA_fa = RMSprop(lr = self.alpha)
self.q_SARSA_fa = Model(input=inputs_q_SARSA_fa, output=outputs_gain)
self.q_SARSA_fa.compile(optimizer=opt_q_SARSA_fa, loss='mse')
self.q_SARSA_fa_training_cnt = 0
self.q_SARSA_fa_training_loss = 0
def v_fa_predict(self, s):
return self.v_fa.predict(s[:2,0].reshape((1,2)) * 1.0) * 1.0
def v_fa_update(self, s):
# print "s", s
v_fa_tm1 = self.v(self.s_tm1)
v_fa = self.v(s)
x = self.s_tm1[:2,0].reshape((1,2))
y = s[2,0] + self.gamma * v_fa
if True or self.v_fa_training_cnt > 100 or s[2,0] > 0.0:
# target_weight = (1.0 + s[2] * 10.0).reshape()
target_weight = np.ones((1,)) + s[2] * 10.0
self.v_fa_training_loss = self.v_fa.train_on_batch(x * 1.0, y * 1.0, sample_weight = target_weight) # starts training
self.v_fa_training_cnt += 1
def q_fa_predict(self, s, a):
a_ = np.zeros((len(self.actions),1))
a_[int(a[0,0]),0] = 1.0
# x = np.vstack((s[:2,0].reshape((2,1)), a))
x = np.vstack((s[:2,0].reshape((2,1)), a_))
return self.q_fa.predict(x.T * 1.0) * 1.0
def q_fa_update(self, s, a):
# print "s", s
a_tm1_ = np.zeros((len(self.actions),1))
a_tm1_[int(self.a_tm1[0,0]),0] = 1.0
# print "a_tm1_", a_tm1_
# q_fa_tm1 = self.q(self.s_tm1, self.a_tm1)
q_fa = self.q(s, a)
# x = np.vstack((self.s_tm1[:2,0].reshape((2,1)), self.a_tm1)).T
x = np.vstack((self.s_tm1[:2,0].reshape((2,1)), a_tm1_)).T
# print "x", x
y = s[2,0] + self.gamma * q_fa
if True or self.q_fa_training_cnt > 100 or s[2,0] > 0.0:
target_weight = np.ones((1,)) + s[2] * 10.0
self.q_fa_training_loss = self.q_fa.train_on_batch(x * 1.0, y * 1.0, sample_weight = target_weight) # starts training
self.q_fa_training_cnt += 1
def q_Q_fa_predict(self, s, a):
a_ = np.zeros((len(self.actions),1))
a_[a[0,0],0] = 1.0
x = np.vstack((s[:2,0].reshape((2,1)), a_))
# x = np.vstack((s[:2,0].reshape((2,1)), a))
return self.q_Q_fa.predict(x.T)
def q_Q_fa_update(self, s, a):
# print "s", s
a_tm1_ = np.zeros((len(self.actions),1))
a_tm1_[int(self.a_tm1[0,0]),0] = 1.0
# q_Q_fa_tm1 = self.q_Q(self.s_tm1, self.a_tm1)
q_Q_fa_ = []
for a_ in range(len(self.actions)):
q_Q_fa_.append(self.q_Q(self.s, np.array([[a_]])))
q_Q_fa_ = np.array([q_Q_fa_])
q_Q_fa_max = np.max(q_Q_fa_)
q_Q_fa_max = np.array([[q_Q_fa_max]]) # ?
# print "argmax", q_Q_fa_max
x = np.vstack((self.s_tm1[:2,0].reshape((2,1)), a_tm1_)).T
y = s[2,0] + self.gamma * q_Q_fa_max
# print "x", x, "y", y
if True or self.q_Q_fa_training_cnt > 100 or s[2,0] > 0.0:
target_weight = np.ones((1,)) + s[2] * 10.0
self.q_Q_fa_training_loss = self.q_Q_fa.train_on_batch(x, y, sample_weight = target_weight) # starts training
self.q_Q_fa_training_cnt += 1
def q_SARSA_fa_predict(self, s, a):
a_ = np.zeros((len(self.actions),1))
a_[a[0,0],0] = 1.0
x = np.vstack((s[:2,0].reshape((2,1)), a_))
# x = np.vstack((s[:2,0].reshape((2,1)), a))
return self.q_SARSA_fa.predict(x.T)
def q_SARSA_fa_update(self, s, a):
# print "s", s
a_tm1_ = np.zeros((len(self.actions),1))
a_tm1_[int(self.a_tm1[0,0]),0] = 1.0
q_SARSA_fa = self.q_SARSA(s, a)
x = np.vstack((self.s_tm1[:2,0].reshape((2,1)), a_tm1_)).T
y = s[2,0] + self.gamma * q_SARSA_fa
if True or self.q_SARSA_fa_training_cnt > 100 or s[2,0] > 0.0:
target_weight = np.ones((1,)) + s[2] * 10.0
self.q_SARSA_fa_training_loss = self.q_SARSA_fa.train_on_batch(x, y, sample_weight = target_weight) # starts training
self.q_SARSA_fa_training_cnt += 1
################################################################################
def update_get_indices(self, s, s_tm1, a_tm1):
l_x = int(s[0,0])
l_y = int(s[1,0])
l_x_tm1 = int(s_tm1[0,0])
l_y_tm1 = int(s_tm1[1,0])
l_a_tm1 = int(a_tm1[0,0])
return (l_x, l_y, l_x_tm1, l_y_tm1, l_a_tm1)
def v_tbl_predict(self, s):
l_x = int(s[0,0])
l_y = int(s[1,0])
return self.v_tbl[l_x, l_y]
def q_tbl_predict(self, s, a):
l_x = int(s[0,0])
l_y = int(s[1,0])
l_a = int(a[0,0])
return self.q_tbl[l_x, l_y, l_a]
def q_Q_tbl_predict(self, s, a):
l_x = int(s[0,0])
l_y = int(s[1,0])
l_a = int(a[0,0])
return self.q_Q_tbl[l_x, l_y, l_a]
def q_SARSA_tbl_predict(self, s, a):
l_x = int(s[0,0])
l_y = int(s[1,0])
l_a = int(a[0,0])
return self.q_SARSA_tbl[l_x, l_y, l_a]
def v_tbl_update(self, s):
l_x, l_y, l_x_tm1, l_y_tm1, l_a_tm1 = self.update_get_indices(s, self.s_tm1, self.a_tm1)
# back up old state value once
# self.v_tbl_s_tm1 = self.v_tbl[l_x_tm1, l_y_tm1].copy()
self.v_tbl_s_tm1 = self.v(self.s_tm1).copy()
# perform update, SB2nded pg. ?, eq. ?
# self.v_tbl[l_x_tm1, l_y_tm1] = self.v_tbl_s_tm1 + self.alpha * 0.1 * (s[2,0] + self.gamma * self.v_tbl[l_x, l_y] - self.v_tbl_s_tm1)
self.v_tbl[l_x_tm1, l_y_tm1] = self.v_tbl_s_tm1 + self.alpha * 0.1 * (s[2,0] + self.gamma * self.v(s) - self.v_tbl_s_tm1)
def q_tbl_update(self, s, a):
l_x, l_y, l_x_tm1, l_y_tm1, l_a_tm1 = self.update_get_indices(s, self.s_tm1, self.a_tm1)
# back up old state-action value once
# self.q_tbl_sa_tm1 = self.q_tbl[l_x_tm1, l_y_tm1, l_a_tm1].copy()
self.q_tbl_sa_tm1 = self.q(self.s_tm1, self.a_tm1).copy()
# perform update, SB2nded pg. ?, eq. ?
# self.q_tbl[l_x_tm1, l_y_tm1, l_a_tm1] = self.q_tbl_sa_tm1 + self.alpha * (self.s[2,0] + self.gamma * self.q_tbl[l_x, l_y, l_a_tm1] - self.q_tbl_sa_tm1)
self.q_tbl[l_x_tm1, l_y_tm1, l_a_tm1] = self.q_tbl_sa_tm1 + self.alpha * (self.s[2,0] + self.gamma * self.q(s, self.a_tm1) - self.q_tbl_sa_tm1)
def q_Q_tbl_update(self, s, a):
l_x, l_y, l_x_tm1, l_y_tm1, l_a_tm1 = self.update_get_indices(s, self.s_tm1, self.a_tm1)
# back up old state-action value once Q-Learning
# self.q_Q_tbl_tm1 = self.q_Q_tbl[l_x_tm1, l_y_tm1, l_a_tm1].copy()
self.q_Q_tbl_tm1 = self.q_Q(self.s_tm1, self.a_tm1).copy()
# perform update, SB2nded pg. ?, eq. ?
# print "q_Q update max(Q_q(S, a))", np.max(self.q_Q_tbl[l_x, l_y, l_a_tm1])
# print "self.q_Q_tbl[l_x, l_y, l_a_tm1]", self.q_Q_tbl[l_x, l_y, :]
self.q_Q_tbl[l_x_tm1, l_y_tm1, l_a_tm1] = self.q_Q_tbl_tm1 + self.alpha * (self.s[2,0] + self.gamma * np.max(self.q_Q_tbl[l_x, l_y, :]) - self.q_Q_tbl_tm1)
# self.q_Q_tbl[l_x_tm1, l_y_tm1, l_a_tm1] = self.q_Q_tbl_tm1 + self.alpha * (self.s[2,0] + self.gamma * np.max(self.q_Q_tbl[l_x, l_y, l_a_tm1]) - self.q_Q_tbl_tm1)
def q_SARSA_tbl_update(self, s, a):
l_x, l_y, l_x_tm1, l_y_tm1, l_a_tm1 = self.update_get_indices(s, self.s_tm1, self.a_tm1)
# back up old state-action value once Q-Learning
# self.q_SARSA_tbl_tm1 = self.q_SARSA_tbl[l_x_tm1, l_y_tm1, l_a_tm1].copy()
self.q_SARSA_tbl_tm1 = self.q_SARSA(self.s_tm1, self.a_tm1).copy()
# perform update, SB2nded pg. ?, eq. ?
# self.q_SARSA_tbl[l_x_tm1, l_y_tm1, l_a_tm1] = self.q_SARSA_tbl_tm1 + self.alpha * (self.s[2,0] + (self.gamma * self.q_SARSA_tbl[l_x, l_y, self.a]) - self.q_SARSA_tbl_tm1)
self.q_SARSA_tbl[l_x_tm1, l_y_tm1, l_a_tm1] = self.q_SARSA_tbl_tm1 + self.alpha * (self.s[2,0] + (self.gamma * self.q_SARSA(s, a)) - self.q_SARSA_tbl_tm1)
# policies
def policy(self, q, s, epsilon = 0.0):
return self.policy_func(q, s)
def policy_random(self, q, s):
return np.random.randint(len(self.actions), size=self.a.shape)
def policy_epsilon_greedy(self, q, s, epsilon = 0.05):
if np.random.uniform() < epsilon:
return self.policy_random(q, s)
else:
# get best action according to current q estimate
q_s = q[int(s[0,0]), int(s[1,0])]
# print "%s.policy_epsilon_greedy q_s = %s" % (self.__class__.__name__, q_s)
a_s = np.argmax(q_s).reshape(self.a.shape)
# print "%s.policy_epsilon_greedy a_s = %s" % (self.__class__.__name__, a_s)
return a_s
def step(self, s):
# stop episode
if self.terminal:
self.terminal_ -= 1
if self.repr == "approximation":
if not hasattr(self, "avg_loss"):
self.avg_loss = 0.0
self.avg_loss = 0.9 * self.avg_loss + 0.1 * np.sum([self.v_fa_training_loss, self.q_fa_training_loss, self.q_Q_fa_training_loss, self.q_SARSA_fa_training_loss])
print "tc", self.v_fa_training_cnt, self.v_fa_training_loss, self.q_fa_training_cnt, self.q_fa_training_loss, self.q_Q_fa_training_cnt, self.q_Q_fa_training_loss, self.q_SARSA_fa_training_cnt, self.q_SARSA_fa_training_loss
print "avg loss", self.avg_loss
# sensory measurement: [x, y, reward].T
self.s = s.copy()
# print "%s.step s = %s" % (self.__class__.__name__, self.s)
# current state
l_x = int(self.s[0,0])
l_y = int(self.s[1,0])
# last state
l_x_tm1 = int(self.s_tm1[0,0])
l_y_tm1 = int(self.s_tm1[1,0])
l_a_tm1 = int(self.a_tm1[0,0])
# print "l", l_x, l_y, "l_tm1", l_x_tm1, l_y_tm1
# update v
# print "v", l_x, l_y, self.v_tbl[l_x, l_y]
# update value functions
# v
self.v_update(self.s)
# q with td0 update
self.q_update(self.s, self.a)
# q with Q update
self.q_Q_update(self.s, self.a)
# policy: some functional thing that produces an action
if self.sensorimotor_loop == "td_0_prediction":
self.a = self.policy(self.q_tbl, self.s)
elif self.sensorimotor_loop == "td_0_off_policy_control":
# back up old q_Q for off policy foo
self.a = self.policy(self.q_Q_tbl, self.s, epsilon = self.epsilon)
elif self.sensorimotor_loop == "td_0_on_policy_control":
self.a = self.policy(self.q_SARSA_tbl, self.s, epsilon = self.epsilon)
# print self.a
# q | |
it to the list of
# batches.
batch_info['type'] = batch_type
batches.append(batch_info)
prev_batch_info = batch_info
prev_batch_type = batch_type
# Now let's perform one last pass, this time through the new
# consolidated batches. That information will be used to generate
# the SQL and combined state needed during the execute_tasks() and
# execute() stages.
if hinted:
hinted_evolution = evolver.initial_diff.evolution()
else:
hinted_evolution = None
for batch_info in batches:
if batch_info['type'] == UpgradeMethod.EVOLUTIONS:
new_models = batch_info.pop('new_models', None)
if new_models:
# We can now calculate the SQL for all these models.
#
# We'll also need to grab each unique task in order. For
# that, use an OrderedDict's keys, simulating an ordered
# set.
new_models_nodes = batch_info.pop('new_models_nodes')
assert new_models_nodes
batch_info.update({
'new_models_sql': sql_create_models(
new_models,
db_name=database_name),
'new_models_tasks': filter_dup_list_items(
node.state['task']
for node in new_models_nodes
),
})
# For each task introducing evolutions to apply, we need to
# determine the pending mutations and resulting SQL for
# applying those mutations. Since we have a whole batch that
# we know we'll be applying at once, we can safely optimize
# those at this stage.
#
# Note that we'll have one task per app to evolve.
task_evolutions = batch_info.get('task_evolutions', {})
for (batch_task,
batch_task_info) in six.iteritems(task_evolutions):
# This is going to look pretty similar to what's already
# been done in the prepare() stage, and it is. The
# difference is that we're now running operations on the
# batch's set of evolutions rather than the task's.
#
# There's not much we can do to share this logic between
# here and prepare().
if batch_task._evolutions:
# Custom evolutions were passed to the task. Build the
# list of mutations for all evolutions in this task
# in the correct order.
mutations_map = {
_info['label']: _info['mutations']
for _info in batch_task._evolutions
}
pending_mutations = list(itertools.chain.from_iterable(
mutations_map[_label]
for _label in batch_task_info['evolutions']
))
elif hinted:
# This is a hinted mutation, so grab the mutations
# hinted for this task's app.
pending_mutations = \
hinted_evolution.get(batch_task.app_label)
else:
# This is our standard case: An actual evolution from
# written evolution files. Generate the set of
# mutations to apply for all queued evolutions in
# this task.
pending_mutations = get_app_pending_mutations(
app=batch_task.app,
evolution_labels=batch_task_info['evolutions'],
old_project_sig=evolver.project_sig,
project_sig=evolver.target_project_sig,
database=database_name)
if pending_mutations:
# We have pending mutations for this task. Generate
# the final optimized SQL and list of mutations and
# store them for later execution.
#
# This will modify the signature in the Evolver.
mutations_info = batch_task.generate_mutations_info(
pending_mutations)
if mutations_info:
batch_task_info.update({
'mutations': mutations_info['mutations'],
'sql': mutations_info['sql'],
})
return batches
@classmethod
def _create_models(cls, sql_executor, evolver, tasks, sql):
"""Create tables for models in the database.
Args:
sql_executor (django_evolution.utils.sql.SQLExecutor):
The SQL executor used to run any SQL on the database.
evolver (Evolver):
The evolver executing the tasks.
tasks (list of EvolveAppTask):
The list of tasks containing models to create.
sql (list):
The list of SQL statements to execute.
Returns:
list:
The list of SQL statements used to create the model. This is
used primarily for unit tests.
Raises:
django_evolution.errors.EvolutionExecutionError:
There was an unexpected error creating database models.
"""
assert sql_executor
assert tasks
assert sql
# We need to create all models at once, in order to allow Django to
# handle deferring SQL statements referencing a model until after the
# model has been created.
#
# Because of this, we also need to emit the creating_models and
# created_models signals for every set of models up-front.
for task in tasks:
assert task
creating_models.send(sender=evolver,
app_label=task.app_label,
model_names=task.new_model_names)
try:
result = sql_executor.run_sql(sql=sql,
execute=True,
capture=True)
except Exception as e:
last_sql_statement = getattr(e, 'last_sql_statement', None)
detailed_error = six.text_type(e)
if len(tasks) == 1:
app_label = tasks[0].app_label
raise EvolutionExecutionError(
_('Error creating database models for %s: %s')
% (app_label, e),
app_label=app_label,
detailed_error=detailed_error,
last_sql_statement=last_sql_statement)
else:
raise EvolutionExecutionError(
_('Error creating database models: %s') % e,
detailed_error=detailed_error,
last_sql_statement=last_sql_statement)
for task in tasks:
created_models.send(sender=evolver,
app_label=task.app_label,
model_names=task.new_model_names)
return result
def __init__(self, evolver, app, evolutions=None, migrations=None):
"""Initialize the task.
Args:
evolver (Evolver):
The evolver that will execute the task.
app (module):
The app module to evolve.
evolutions (list of dict, optional):
Optional evolutions to use for the app instead of loading
from a file. This is intended for testing purposes.
Each dictionary needs a ``label`` key for the evolution label
and a ``mutations`` key for a list of
:py:class:`~django_evolution.mutations.BaseMutation` instances.
migrations (list of django.db.migrations.Migration, optional):
Optional migrations to use for the app instead of loading from
files. This is intended for testing purposes.
"""
super(EvolveAppTask, self).__init__(
task_id='evolve-app:%s' % app.__name__,
evolver=evolver)
self.app = app
self.app_label = get_app_label(app)
self.legacy_app_label = get_legacy_app_label(app)
self.app_sig = None
self.app_sig_is_new = False
self.new_model_names = []
self.new_models = []
self.upgrade_method = None
self.applied_migrations = None
self.hinted_evolution = None
self._new_models_sql = []
self._evolutions = evolutions
self._migrations = migrations
self._mutations = None
self._pending_mutations = None
def generate_mutations_info(self, pending_mutations, update_evolver=True):
"""Generate information on a series of mutations.
This will optimize and run the list of pending mutations against the
evolver's stored signature and return the optimized list of mutations
and SQL, along with some information on the app.
The evolver's signature will be updated by default, but this can be
disabled in order to just retrieve information without making any
changes.
Args:
pending_mutations (list of
django_evolution.mutations.BaseMutation):
The list of pending mutations to run.
update_evolver (bool, optional):
Whether to update the evolver's signature.
Returns:
dict:
The resulting information from running the mutations. This
includes the following:
``app_mutator`` (:py:class:`~django_evolution.mutations.AppMutator):
The app mutator that ran the mutations.
``applied_migrations`` (list of tuple):
The list of migrations that were ultimately marked as applied.
``mutations`` (list of :py:class:`~django_evolution.mutations.BaseMutation):
The optimized list of mutations.
``sql`` (list):
The optimized list of SQL statements to execute.
``upgrade_method`` (unicode):
The resulting upgrade method for the app, after applying all
mutations.
If there are no mutations to run after optimization, this will
return ``None``.
"""
mutations = [
mutation
for mutation in pending_mutations
if self.is_mutation_mutable(mutation,
app_label=self.app_label)
]
if not mutations:
return None
app_label = self.app_label
legacy_app_label = self.legacy_app_label
app_mutator = AppMutator.from_evolver(
evolver=self.evolver,
app_label=app_label,
legacy_app_label=legacy_app_label,
update_evolver=update_evolver)
app_mutator.run_mutations(mutations)
project_sig = app_mutator.project_sig
app_sig = (
project_sig.get_app_sig(app_label) or
project_sig.get_app_sig(legacy_app_label)
)
if app_sig is None:
# The evolutions didn't make any changes to an existing app
# signature. We may not have had an existing one. Bail.
applied_migrations = []
upgrade_method = None
else:
applied_migrations = app_sig.applied_migrations
upgrade_method = app_sig.upgrade_method
return {
'app_mutator': app_mutator,
'applied_migrations': applied_migrations,
'mutations': mutations,
'sql': app_mutator.to_sql(),
'upgrade_method': upgrade_method,
}
def prepare(self, hinted=False, **kwargs):
"""Prepare state for this task.
This will determine if there are any unapplied evolutions in the app,
and record that state and the SQL needed to apply the evolutions.
Args:
hinted (bool, optional):
Whether to prepare the task for hinted evolutions.
**kwargs (dict, unused):
Additional keyword arguments passed for task preparation.
"""
app = self.app
app_label = self.app_label
evolver = self.evolver
database_name = evolver.database_name
project_sig = evolver.project_sig
# Check if there are any models for this app that don't yet exist
# in the database.
new_models = db_get_installable_models_for_app(
app=app,
db_state=evolver.database_state)
self.new_models = new_models
self.new_model_names = [
model._meta.object_name
for model in new_models
]
# See if we're already tracking this app in the signature.
app_sig = (project_sig.get_app_sig(app_label) or
project_sig.get_app_sig(self.legacy_app_label))
app_sig_is_new = app_sig is None
self.app_sig_is_new = app_sig_is_new
orig_upgrade_method = None
upgrade_method = None
target_project_sig = evolver.target_project_sig
target_app_sig = target_project_sig.get_app_sig(app_label,
required=True)
evolutions = []
if new_models:
# Record what we know so far about the state. We might find that
# we can't simulate once we process evolutions.
self.can_simulate = True
self.evolution_required = True
if app_sig_is_new:
# We're adding this app for the first time. If there are models
# here, then copy the entire signature from the target, and mark
# all evolutions for the app as applied.
if new_models:
app_sig = target_app_sig.clone()
project_sig.add_app_sig(app_sig)
orig_upgrade_method = app_sig.upgrade_method
app_upgrade_info = get_app_upgrade_info(app,
simulate_applied=True,
database=database_name)
upgrade_method = app_upgrade_info.get('upgrade_method')
evolutions = get_evolution_sequence(app)
else:
orig_upgrade_method = app_sig.upgrade_method
# Copy only the models from the target signature | |
# -*- coding: utf-8 -*-
'''Simplicity masking and scoring classes.
'''
import os
import shutil
# 3rd-party packages
import pyfaidx
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# module packages
from . import cli
from .common import *
#
# global constants
#
TERM_CHAR = '$'
NUM_HISTOGRAM_BINS = 25
DEFAULT_SMOOTH_WINDOW = 11 # best if this is odd
WINDOW_TYPES = ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']
BWT_ONLY = False
DENSITY_CUTOFF = 0.1
#
# class definitions
#
class RunlengthSimplicity(SimplicityObject):
'''Define simplicity by the number of repeated letters.
'''
def __init__(self, default_cutoff=DEFAULT_SIMPLICITY_CUTOFF):
super().__init__(default_cutoff=default_cutoff)
self.label = 'runlength'
self.desc = 'runlength (repeated characters)'
def _runlength(self, s):
return [all([s[i + j + 1] == s[i] for j in range(self.cutoff - 1)])
for i in range(len(s) - self.cutoff + 1)]
def mask(self, seq, print_results=False):
'''Mask high-simplicity positions in a string.
:param s: Input string.
:return: Input string with masked positions changed to lower-case.
'''
for pos in [i for i, masked in
enumerate(self._runlength(to_str(seq).upper()))
if masked]:
if isinstance(seq, str): # strings need to have whole length set
seq = seq[:pos] + seq[pos:pos + self.cutoff].lower() + \
seq[pos + self.cutoff:]
else:
seq[pos:pos +
self.cutoff] = to_str(seq[pos:pos +
self.cutoff]).lower()
return super().mask(seq)
class LetterFrequencySimplicity(SimplicityObject):
'''Define simplicity by the number of repeated letters.
'''
def __init__(self,
default_cutoff=DEFAULT_SIMPLICITY_CUTOFF,
window_size=None):
global config_obj
super().__init__(default_cutoff=default_cutoff)
if window_size is None:
try:
self.window_size = config_obj.config_dict['simplicity_window']
except KeyError:
self.window_size = DEFAULT_SIMPLICITY_WINDOW
else:
self.window_size = window_size
self.label = 'letterfreq%d' % self.window_size
self.desc = 'letter frequency in window of %d residues' % self.window_size
def mask(self, seq, print_results=False):
'''Mask high-simplicity positions in a string.
:param s: Input string.
:return: Input string with masked positions changed to lower-case.
'''
out_str = to_str(seq)
end_idx = len(out_str) - 1
byte_arr = np.array([char for char in to_bytes(out_str.upper())])
mask_positions = set()
#
# test character by character for number of occurrances over window
#
for char in set(byte_arr): # test character by character
char_positions = list(np.where(byte_arr == char)[0])
while len(char_positions) >= self.cutoff:
testpos = char_positions.pop(0)
next_positions = char_positions[:self.cutoff - 1]
if next_positions[-1] - testpos < self.window_size:
mask_positions = mask_positions.union(
set([testpos] + next_positions))
#
# mask everything
#
for pos in mask_positions:
out_str = out_str[:pos] + out_str[pos].lower() + out_str[pos + 1:]
if isinstance(seq, str): # strings need to have whole length set
seq = out_str
else: # may be MutableSeq that needs lengths
seq[:end_idx] = out_str[:end_idx]
return super().mask(seq)
class GenerisSimplicity(SimplicityObject):
'''Define simplicity by the number of repeated letters.
'''
def __init__(self,
default_cutoff=DEFAULT_SIMPLICITY_CUTOFF,
window_size=None):
super().__init__(default_cutoff=default_cutoff)
if window_size is None:
try:
self.window_size = config_obj.config_dict['simplicity_window']
except KeyError:
self.window_size = DEFAULT_SIMPLICITY_WINDOW
else:
self.window_size = window_size
self.label = 'generis%d' % self.window_size
self.desc = 'pattern by BW xform in window of %d residues' % self.window_size
def _runlength(self, s):
return [all([s[i + j + 1] == s[i] for j in range(self.cutoff - 1)])
for i in range(len(s) - self.cutoff + 1)]
def _bwt(self, s):
'''Burrows-Wheeler Transform.
:param s: Input string. Must not contain TERMCHAR.
:return: Transformed string.
'''
s = s + TERM_CHAR
return ''.join([x[-1] for x in sorted([s[i:] + s[:i]
for i in range(len(s))])])
def _ibwt(self, s):
'''Inverse Burrows-Wheeler Transform on uppercase-only comparisons.
:param s: Transformed string with mixed upper and lower case.
:return: Untransformed string with original order.
'''
L = [''] * len(s)
for i in range(len(s)):
L = sorted([s[i] + L[i] for i in range(len(s))],
key=str.upper)
return [x for x in L if x.endswith(TERM_CHAR)][0][:-1]
def merge_mask_regions(self, mask, max_run):
"merge regions separated by less than max_run"
runs = self.run_lengths(mask)
for i in range(len(runs)):
if runs[i] <= max_run:
mask[i] = True
return mask
def unset_small_regions(self, mask, min_run):
"merge regions separated by less than max_run"
runs = self.run_lengths([int(not i) for i in mask])
for i in range(len(runs)):
if mask[i] and (runs[i] < min_run-1):
mask[i] = False
return mask
def mask(self, seq, print_results=False):
'''Mask high-simplicity positions in a string.
:param s: Input string, will be converted to all-uppercase.
:return: Input string with masked positions changed to lower-case.
'''
out_str = to_str(seq)
end_idx = len(out_str) - 1
upper_str = out_str.upper()
# run-length mask in direct space
if not BWT_ONLY:
dir_mask = self._runlength(upper_str)
for pos in [i for i, masked in
enumerate(dir_mask)
if masked]:
out_str = out_str[:pos] + out_str[pos:pos +\
self.cutoff].lower() +\
out_str[pos + self.cutoff:]
if print_results:
logger.info(' rlm: %s', colorize_string(out_str))
#run-length mask in Burrows-Wheeler space
bwts = self._bwt(upper_str)
bwt_mask = self._runlength(bwts)
#bwt_mask = self.merge_mask_regions(bwt_mask, 2)
#bwt_mask = self.unset_small_regions(bwt_mask, self.cutoff)
for pos in [i for i, masked in
enumerate(bwt_mask)
if masked]:
bwts = bwts[:pos] + bwts[pos:pos + \
self.cutoff].lower() + bwts[pos + self.cutoff:]
ibwts = self._ibwt(bwts)
if print_results:
logger.info(' bwt: %s', colorize_string(bwts))
logger.info(' inv. bwt: %s', colorize_string(ibwts))
logger.info('runlength: %s', colorize_string(out_str))
# add in mask from inverse-transformed string
for pos in [i for i, char in
enumerate(ibwts) if char.islower()]:
out_str = out_str[:pos] + out_str[pos].lower() + out_str[pos + 1:]
if print_results:
logger.info(' generis: %s', colorize_string(out_str))
if isinstance(seq, str): # strings need to have whole length set
seq = out_str
else: # may be MutableSeq that needs lengths
seq[:end_idx] = out_str[:end_idx]
return super().mask(seq)
#
# Instantiations of classes.
#
NULL_SIMPLICITY = SimplicityObject()
RUNLENGTH_SIMPLICITY = RunlengthSimplicity()
LETTERFREQ_SIMPLICITY = LetterFrequencySimplicity()
GENERIS_SIMPLICITY = GenerisSimplicity()
@cli.command()
@click.option('--smooth/--no-smooth', default=True, is_flag=True,
help='Finish with real-space smoothing.')
@click.option('--cutoff', default=DEFAULT_SIMPLICITY_CUTOFF, show_default=True,
help='Maximum simplicity to keep.')
@click.option('-k', default=DEFAULT_K, show_default=True,
help='k-mer size for score calculation.')
def demo_simplicity(smooth, cutoff, k):
'''Demo self-provided simplicity outputs.
:param cutoff: Simplicity value cutoff, lower is less complex.
:param window_size: Window size for masking computation..
:return:
'''
user_ctx = get_user_context_obj()
simplicity_obj = user_ctx['simplicity_object']
simplicity_obj.set_cutoff(cutoff)
logger.info('Simplicity function is %s with cutoff of %d.',
simplicity_obj.desc, cutoff)
simplicity_obj.set_k(k)
simplicity_obj.use_smoother(smooth)
logger.info(' Mask window demo for %d-mers:', k)
mask_test = 'AAAAAAAAAAaaaaAAAAAAAAAAAaaaaaAAAAAAAAAAAAaaaaaaAAAAAAAAAAAAAaaaaaaaAAAAAAAAAAAAAA'
logger.info(' in: %s\n S-score: %s\n', mask_test,
''.join(['%X' % i for i in
simplicity_obj.score(mask_test)]))
try:
window_size = user_ctx['simplicity_window']
except KeyError:
window_size = DEFAULT_SIMPLICITY_WINDOW
smoother_tests = [('L end', 'aAAAAAAAAAAAAA'),
('R end', 'AAAAAAAAAAAAAa'),
('singleton', 'AAAAAaAAAAAAAAaaaaAAAAA'),
('non-windowed singleton', 'AaAAAAaAAAAaAAAAAaAAAAAaAAAAAA')]
logger.info('Demo smoother with window size of %d' %simplicity_obj.k)
for label, pattern in smoother_tests:
logger.info('%s: %s', label, colorize_string(pattern))
smoothed = simplicity_obj.smoother(pattern)
logger.info('smoothed: %s', colorize_string(smoothed))
for desc, case in simplicity_obj.testcases:
if case is '':
logger.info(' %s', desc)
else:
logger.info('\n%s:', desc)
logger.info(' in: %s', case)
masked_str = simplicity_obj.mask(case, print_results=True)
logger.info(' smoothed: %s', colorize_string(masked_str))
def num_masked(seq):
"""Count the number of lower-case characters in a sequence.
:param seq: Sequence of characters.
:type seq: str, bytes, or other convertible sequence type.
:return: Count of lower-case characters.
:rtype: int
"""
gene = to_str(seq)
mask = []
[mask.append(gene[i].islower()) for i in range(len(gene))]
masked = sum(mask)
return masked
class Smoother(object):
'''Smooth data using a window function'''
def __init__(self,
window_len=DEFAULT_SMOOTH_WINDOW,
window_type='flat'):
if window_len < 3:
raise ValueError('Window length must be >3')
self.window_len = window_len
if window_type not in WINDOW_TYPES:
raise ValueError('Window type "%s" unknown'%window_type)
if window_type == 'flat': #moving average
self.window = np.ones(window_len,'d')
else:
self.window = eval('np.' + window_type + '(window_len)')
self.window = self.window/self.window.sum() # normalize
def smooth(self, x, reflect=False):
"""convolve the window with the signal"""
if reflect:
signal = np.r_[x[self.window_len-1:0:-1],
x,
x[-2:-self.window_len-1:-1]]
else:
signal = x
return np.convolve(self.window, signal ,mode='valid')
@cli.command()
@click.option('--cutoff',
default=DEFAULT_SIMPLICITY_CUTOFF,
help='Minimum simplicity level to unmask.')
@click.option('--smooth/--no-smooth',
default=True,
help='Smooth mask profile over window.')
@click.argument('infilename', type=str)
@click.argument('outfilestem', type=str)
@click.argument('setlist', nargs=-1, type=DATA_SET_VALIDATOR)
def peptide_simplicity_mask(cutoff, smooth, infilename, outfilestem, setlist):
'''Lower-case high-simplicity regions in FASTA.
:param infilename: Name of input FASTA files for every directory in setlist.
:param outfilestem: Stem of output filenames.
:param cutoff: Minimum simplicity level to unmask.
:param plot: If specified, make a histogram of masked fraction.
:param setlist: List of defined sets to iterate over.
:return:
Note that this calculation is single-threaded and may be time-consuming, so
starting multiple processes may be a good idea.
'''
global config_obj
user_ctx = get_user_context_obj()
setlist = DATA_SET_VALIDATOR.multiple_or_empty_set(setlist)
simplicity_obj = user_ctx['simplicity_object']
simplicity_obj.set_cutoff(cutoff)
simplicity_obj.use_smoother(smooth)
logger.info('Simplicity function is %s with cutoff of %d.',
simplicity_obj.desc, cutoff)
if simplicity_obj.smooth:
logger.info('Mask will be smoothed over window of %d residues',
simplicity_obj.k)
logger.debug('Reading from FASTA file "%s".', infilename)
instem, ext = os.path.splitext(infilename)
outfilename = outfilestem + ext
logger.debug('Output FASTA file name is "%s".', outfilename)
for calc_set in setlist:
dir = config_obj.config_dict[calc_set]['dir']
inpath = os.path.join(dir, infilename)
outpath = os.path.join(dir, outfilename)
shutil.copy(inpath, outpath)
fasta = pyfaidx.Fasta(outpath, mutable=True)
if user_ctx['first_n']:
keys = list(fasta.keys())[:user_ctx['first_n']]
else:
keys = fasta.keys()
if user_ctx['progress']:
with click.progressbar(keys, label='%s genes processed' % calc_set,
length=len(keys)) as bar:
for key in bar:
masked_gene = simplicity_obj.mask(fasta[key])
else:
for key | |
1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_types_pandas_names1():
X = pd.DataFrame()
X["feature1"] = [1, 4]
X["feature2"] = [2, 5]
X["feature3"] = [3, 6]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'continuous', 'continuous']
feature_names_in = unify_feature_names(X, feature_names_given=pd.Series([0, 1, 2]), feature_types_given=feature_types_given)
assert(feature_names_in == ["0", "1", "2"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_types_pandas_names2():
X = pd.DataFrame()
X["feature1"] = [1, 4]
X["feature2"] = [2, 5]
X["feature3"] = [3, 6]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'continuous', 'continuous']
feature_names_in = unify_feature_names(X, feature_names_given=[0, "SOMETHING", 2], feature_types_given=feature_types_given)
assert(feature_names_in == ["0", "SOMETHING", "2"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_types_ignored_names1():
X = np.array([[1, 2, 3], [4, 5, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'ignore', 'continuous']
feature_names_in = unify_feature_names(X, feature_names_given=pd.Series([0, 1, 2]), feature_types_given=feature_types_given)
assert(feature_names_in == ["0", "1", "2"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 3.0)
assert(X_cols[1][1][1] == 6.0)
def test_unify_feature_names_types_ignored_names2():
X = np.array([[1, 2, 3], [4, 5, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'ignore', 'continuous']
feature_names_in = unify_feature_names(X, feature_names_given=[0, "SOMETHING", 2], feature_types_given=feature_types_given)
assert(feature_names_in == ["0", "SOMETHING", "2"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 3.0)
assert(X_cols[1][1][1] == 6.0)
def test_unify_feature_names_types_ignored_pandas_names1():
X = pd.DataFrame()
X["feature1"] = [1, 4]
X["feature2"] = [2, 5]
X["feature3"] = [3, 6]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'ignore', 'continuous']
feature_names_in = unify_feature_names(X, feature_names_given=pd.Series([0, 1, 2]), feature_types_given=feature_types_given)
assert(feature_names_in == ["0", "1", "2"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 3.0)
assert(X_cols[1][1][1] == 6.0)
def test_unify_feature_names_types_ignored_pandas_names2():
X = pd.DataFrame()
X["feature1"] = [1, 4]
X["feature2"] = [2, 5]
X["feature3"] = [3, 6]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'ignore', 'continuous']
feature_names_in = unify_feature_names(X, feature_names_given=[0, "SOMETHING", 2], feature_types_given=feature_types_given)
assert(feature_names_in == ["0", "SOMETHING", "2"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 3.0)
assert(X_cols[1][1][1] == 6.0)
def test_unify_feature_names_types_dropped_names1():
X = np.array([[1, 3], [4, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'ignore', 'continuous']
feature_names_in = unify_feature_names(X, feature_names_given=pd.Series([0, 1, 2]), feature_types_given=feature_types_given)
assert(feature_names_in == ["0", "1", "2"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 3.0)
assert(X_cols[1][1][1] == 6.0)
def test_unify_feature_names_types_dropped_names2():
X = np.array([[1, 3], [4, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'ignore', 'continuous']
feature_names_in = unify_feature_names(X, feature_names_given=[0, "SOMETHING", 2], feature_types_given=feature_types_given)
assert(feature_names_in == ["0", "SOMETHING", "2"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 3.0)
assert(X_cols[1][1][1] == 6.0)
def test_unify_feature_names_types_dropped_pandas_names1():
X = pd.DataFrame()
X["feature1"] = [1, 4]
X["feature3"] = [3, 6]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'ignore', 'continuous']
feature_names_in = unify_feature_names(X, feature_names_given=pd.Series([0, 1, 2]), feature_types_given=feature_types_given)
assert(feature_names_in == ["0", "1", "2"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 3.0)
assert(X_cols[1][1][1] == 6.0)
def test_unify_feature_names_types_dropped_pandas_names2():
X = pd.DataFrame()
X["feature1"] = [1, 4]
X["feature3"] = [3, 6]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'ignore', 'continuous']
feature_names_in = unify_feature_names(X, feature_names_given=[0, "SOMETHING", 2], feature_types_given=feature_types_given)
assert(feature_names_in == ["0", "SOMETHING", "2"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 3.0)
assert(X_cols[1][1][1] == 6.0)
def test_unify_feature_names_types_nondropped2_names2():
X = np.array([[1, 2, 3], [4, 5, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'ignore', 'continuous']
feature_names_in = unify_feature_names(X, feature_names_given=[0, 2], feature_types_given=feature_types_given)
assert(feature_names_in == ["0", "feature_0001", "2"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 3.0)
assert(X_cols[1][1][1] == 6.0)
def test_unify_feature_names_types_nondropped2_pandas_names1():
X = pd.DataFrame()
X["feature1"] = [1, 4]
X["feature2"] = [2, 5]
X["feature3"] = [3, 6]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'ignore', 'continuous']
feature_names_in = unify_feature_names(X, feature_names_given=pd.Series([0, 2]), feature_types_given=feature_types_given)
assert(feature_names_in == ["0", "feature_0001", "2"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 3.0)
assert(X_cols[1][1][1] == 6.0)
def test_unify_feature_names_types_dropped2_names2():
X = np.array([[1, 3], [4, 6]])
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'ignore', 'continuous']
feature_names_in = unify_feature_names(X, feature_names_given=[0, 2], feature_types_given=feature_types_given)
assert(feature_names_in == ["0", "feature_0001", "2"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 3.0)
assert(X_cols[1][1][1] == 6.0)
def test_unify_feature_names_types_dropped2_pandas_names1():
X = pd.DataFrame()
X["feature1"] = [1, 4]
X["feature3"] = [3, 6]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'ignore', 'continuous']
feature_names_in = unify_feature_names(X, feature_names_given=pd.Series([0, 2]), feature_types_given=feature_types_given)
assert(feature_names_in == ["0", "feature_0001", "2"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 3.0)
assert(X_cols[1][1][1] == 6.0)
def test_unify_feature_names_types_keep_pandas_names1():
X = pd.DataFrame()
X["feature1"] = [1, 4]
X["feature2"] = [2, 5]
X["feature3"] = [3, 6]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'continuous', 'continuous']
feature_names_in = unify_feature_names(X, feature_types_given=feature_types_given)
assert(feature_names_in == ["feature1", "feature2", "feature3"])
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(3 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 2.0)
assert(X_cols[1][1][1] == 5.0)
assert(X_cols[2][1][0] == 3.0)
assert(X_cols[2][1][1] == 6.0)
def test_unify_feature_names_types_dropped3_pandas_names1():
X = pd.DataFrame()
X["feature1"] = [1, 4]
X["feature2"] = [2, 5]
X["feature3"] = [3, 6]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'ignore', 'continuous']
feature_names_in = unify_feature_names(X, feature_types_given=feature_types_given)
assert(feature_names_in == ["feature1", "feature2", "feature3"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 3.0)
assert(X_cols[1][1][1] == 6.0)
def test_unify_feature_names_types_dropped3_pandas_names2():
X = pd.DataFrame()
X["feature1"] = [1, 4]
X["feature3"] = [3, 6]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'ignore', 'continuous']
feature_names_in = unify_feature_names(X, feature_types_given=feature_types_given)
assert(feature_names_in == ["feature1", "feature_0001", "feature3"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 1.0)
assert(X_cols[0][1][1] == 4.0)
assert(X_cols[1][1][0] == 3.0)
assert(X_cols[1][1][1] == 6.0)
def test_unify_feature_names_types_rearrange1_drop1():
X = pd.DataFrame()
X["width"] = [1, 4]
X["UNUSED"] = [2, 5]
X["length"] = [3, 6]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'ignore', 'continuous']
feature_names_in = unify_feature_names(X, feature_names_given=["length", "SOMETHING", "width"], feature_types_given=feature_types_given)
assert(feature_names_in == ["length", "SOMETHING", "width"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 3.0)
assert(X_cols[0][1][1] == 6.0)
assert(X_cols[1][1][0] == 1.0)
assert(X_cols[1][1][1] == 4.0)
def test_unify_feature_names_types_rearrange1_drop2():
X = pd.DataFrame()
X["width"] = [1, 4]
X["length"] = [3, 6]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'ignore', 'continuous']
feature_names_in = unify_feature_names(X, feature_names_given=["length", "SOMETHING", "width"], feature_types_given=feature_types_given)
assert(feature_names_in == ["length", "SOMETHING", "width"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 3.0)
assert(X_cols[0][1][1] == 6.0)
assert(X_cols[1][1][0] == 1.0)
assert(X_cols[1][1][1] == 4.0)
def test_unify_feature_names_types_rearrange2_drop1():
X = pd.DataFrame()
X["width"] = [1, 4]
X["UNUSED"] = [2, 5]
X["length"] = [3, 6]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'ignore', 'continuous']
feature_names_in = unify_feature_names(X, feature_names_given=["length", "width"], feature_types_given=feature_types_given)
assert(feature_names_in == ["length", "feature_0001", "width"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 3.0)
assert(X_cols[0][1][1] == 6.0)
assert(X_cols[1][1][0] == 1.0)
assert(X_cols[1][1][1] == 4.0)
def test_unify_feature_names_types_rearrange2_drop2():
X = pd.DataFrame()
X["width"] = [1, 4]
X["length"] = [3, 6]
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_types_given=['continuous', 'ignore', 'continuous']
feature_names_in = unify_feature_names(X, feature_names_given=["length", "width"], feature_types_given=feature_types_given)
assert(feature_names_in == ["length", "feature_0001", "width"])
X_cols = list(unify_columns(X, [(0, None), (2, None)], feature_names_in, feature_types_given, min_unique_continuous=0))
assert(2 == len(X_cols))
assert(X_cols[0][1][0] == 3.0)
assert(X_cols[0][1][1] == 6.0)
assert(X_cols[1][1][0] == 1.0)
assert(X_cols[1][1][1] == 4.0)
def test_unify_feature_names_types_rearrange_more1():
X = pd.DataFrame()
| |
<reponame>tamuhey/pytype<filename>pytype/tools/xref/indexer.py
#!/usr/bin/env python
"""Generate cross references from a project."""
from __future__ import print_function
import collections
from pytype import abstract
from pytype import analyze
from pytype import errors
from pytype import io
from pytype import load_pytd
from pytype import module_utils
from pytype.tools.xref import utils as xref_utils
from pytype.tools.xref import kythe
from typed_ast import ast27t as ast27
from typed_ast import ast3
# A global "ast" variable that we set to ast27 or ast3 depending on the target
# python version.
#
# TODO(mdemello): Use typed_ast.convert to coerce everything into ast3
ast = None
# A mapping of offsets between a node's start position and the symbol being
# defined. e.g. in the declaration "class X" the X is at +6 from the start.
DEF_OFFSETS = {
"ClassDef": 6, # class X
"FunctionDef": 4, # def f
}
# Marker for a link to a file rather than a node within the file.
IMPORT_FILE_MARKER = "<__FILE__>"
def children(node):
"""Children to recurse over."""
# Children to recurse into for each node type.
node_children = {
ast.Module: ["body"],
ast.ClassDef: ["bases", "body"],
ast.FunctionDef: ["body"],
ast.Assign: ["targets", "value"],
}
ks = node_children.get(node.__class__, None)
if ks:
return [(k, getattr(node, k)) for k in ks]
else:
return ast.iter_fields(node)
def typename(node):
return node.__class__.__name__
def get_id(node):
"""Construct an id based on node type."""
c = node.__class__
if c == ast.FunctionDef:
return "function %s" % node.name
elif c == ast.ClassDef:
return "class %s" % node.name
elif c == ast.Module:
return "module"
else:
raise ValueError("Unexpected scope: %r" % node)
def qualified_method(data):
"""Fully qualify a method call with its class scope."""
if isinstance(data, abstract.BoundFunction):
return data.repr_names()
else:
return [data.name]
def get_name(node):
"""Nodes have different name attributes."""
if isinstance(node, ast.Attribute):
return get_name(node.value) + "." + node.attr
elif isinstance(node, ast.arg):
return node.arg
elif isinstance(node, str):
return node
elif hasattr(node, "name"):
return node.name
elif hasattr(node, "id"):
return node.id
else:
return "[" + typename(node) + "]"
def get_location(node):
# TODO(mdemello): The column offset for nodes like "class A" needs to be
# adjusted to the start of the symbol.
return (node.lineno, node.col_offset)
def has_decorator(f, decorator):
for d in f.decorator_list:
if isinstance(d, ast.Name) and d.id == decorator:
return True
return False
def get_opcodes(traces, lineno, op_list):
"""Get all opcodes in op_list on a given line."""
return [x for x in traces[lineno] if x[0] in op_list]
# Internal datatypes
class AttrError(Exception):
pass
class SourceFile(object):
"""Line-based source code access."""
def __init__(self, src, raw_traces, filename):
self.text = src
self.traces = self.collect_traces(raw_traces)
self.filename = filename
self.lines = src.split("\n")
self.offsets = []
self._init_byte_offsets()
def _init_byte_offsets(self):
offset = 0
for line in self.lines:
self.offsets.append(offset)
offset += len(line) + 1 # account for the \n
def get_offset(self, line, column):
return self.offsets[line - 1] + column
def collect_traces(self, raw_traces):
"""Postprocess pytype's opcode traces."""
out = collections.defaultdict(list)
for op, symbol, data in raw_traces:
out[op.line].append((op.name, symbol, data))
return out
def line(self, n):
"""Index source lines from 1."""
return self.lines[n - 1]
def find_text(self, start_line, end_line, text):
"""Find text within a range of lines."""
for l in range(start_line, end_line):
col = self.line(l).find(text)
if col > -1:
# TODO(mdemello): Temporary hack, replace with a token stream!
# This will break if we have a # in a string before our desired text.
comment_marker = self.line(l).find("#")
if comment_marker > -1 and comment_marker < col:
continue
return (l, col)
return None, None
def next_non_comment_line(self, line):
for l in range(line + 1, len(self.lines)):
if self.line(l).lstrip().startswith("#"):
continue
return l
return None
def display_traces(self):
"""Debug printing of source + traces per line."""
for line in sorted(self.traces.keys()):
print("%d %s" % (line, self.line(line)))
for name, symbol, data in self.traces[line]:
print(" %s : %s <- %s %s" % (
name, symbol, data, data and [typename(x) for x in data]))
print("-------------------")
class PytypeValue(object):
"""Stores a value inferred by pytype."""
def __init__(self, module, name, typ):
self.module = module
self.name = name
self.typ = typ
self.id = self.module + "." + self.name
def format(self):
return "%s { %s.%s : %s }" % (
self.id, self.module, self.typ, self.name)
@classmethod
def _from_data(cls, data):
"""Construct a PytypeValue from a single datum."""
if not data:
return None
if isinstance(data, abstract.PyTDClass):
if data.module:
# If we have a remote reference, return Remote rather than PytypeValue.
return Remote(data.module, data.name, resolved=True)
else:
# This is a namedtuple or some other special case pytype has generated a
# local PyTDClass for. We need special cases for them too.
return None
elif isinstance(data, abstract.Module):
return Remote(data.name, IMPORT_FILE_MARKER, resolved=True)
elif isinstance(data, abstract.InterpreterClass):
return cls("module", data.name, "Class")
elif isinstance(data, abstract.BoundFunction):
# TODO(mdemello): Handle multiple class bindings.
name = data.repr_names(callself_repr=typename)[0]
return cls("module", name, "BoundFunction")
else:
# TODO(mdemello): We need to infer the module here.
return cls("module", str(data), typename(data))
@classmethod
def from_data(cls, data):
"""Construct a PytypeValue from a list of data."""
if not data:
return None
else:
return [cls._from_data(x) for x in data]
def to_signature(self):
return self.module + "." + self.name
class Module(object):
def __init__(self, name):
self.name = name
def attr(self, attr_name):
return Remote(self.name, attr_name, resolved=True)
def submodule(self, attr_name):
name = self.name + "." + attr_name
return Remote(name, IMPORT_FILE_MARKER, resolved=True)
class Dummy(object):
"""Work around a python3 issue with calling super with kwargs."""
def __init__(self, *args, **kwargs):
pass
class DocString(collections.namedtuple(
"docstring", ["text", "location", "length"])):
"""Store the text and location of a docstring."""
@classmethod
def from_node(cls, node):
"""If the first element in node.body is a string, create a docstring."""
# This should only be called on ClassDef and FunctionDef
assert isinstance(node, (ast.ClassDef, ast.FunctionDef))
if (node.body and
isinstance(node.body[0], ast.Expr) and
isinstance(node.body[0].value, ast.Str)):
doc_node = node.body[0]
doc = doc_node.value.s
length = len(doc) # we want to preserve the byte length
if isinstance(doc, bytes):
# In target 2.7 mode we get docstrings as bytes.
doc = doc.decode("utf-8")
return cls(doc, get_location(doc_node), length)
return None
class Definition(collections.namedtuple(
"defn", ["name", "typ", "scope", "target", "doc"]), Dummy):
"""A symbol definition.
Attributes:
name: The symbol name
typ: The definition type (e.g. ClassDef)
scope: The namespace id (e.g. module:class A:function f:x)
target: The LHS of an attribute (e.g. for x.foo, target = typeof(x))
doc: The docstring, if any, for function and class defs
id: The id
"""
def __init__(self, name, typ, scope, target, doc):
super(Definition, self).__init__(name, typ, scope, target, doc)
self.id = self.scope + "." + self.name
def format(self):
return self.id
def to_signature(self):
return self.id
def doc_signature(self):
"""Signature for the definition's docstring."""
return self.to_signature() + ".__doc__"
def node_kind(self):
# TODO(mdemello): Add more node types.
if self.typ == "ClassDef":
return "class"
elif self.typ == "FunctionDef":
return "function"
else:
return "variable"
class Remote(collections.namedtuple(
"remote", ["module", "name", "resolved"]), Dummy):
"""A symbol from another module."""
def __init__(self, module, name, resolved):
super(Remote, self).__init__(module, name, resolved)
self.id = self.module + "/module." + self.name
def attr(self, attr_name):
return Remote(self.module, self.name + "." + attr_name, self.resolved)
def format(self):
return self.id
class DefLocation(collections.namedtuple("defloc", ["def_id", "location"])):
"""A location of a symbol definition.
Attributes:
def_id: The definition id (scope + name)
location: The location of the definition in the source code.
Note that a single definition can have multiple locations, for symbols that
are redefined in the code.
"""
pass
class Reference(collections.namedtuple(
"refr", [
"name", "typ", "data", "scope", "ref_scope", "target", "location"])
, Dummy):
"""A symbol holding a reference to a definition.
Attributes:
name: The symbol name
typ: The symbol type (e.g. Attribute)
data: The pytype data attached to the symbol
scope: The namespace id (e.g. module.A.f)
ref_scope: The namespace id of the referred symbol (if we can determine it)
target: The LHS of an attribute (e.g. for x.foo, target = typeof(x))
location: The line and column of the symbol in the source code
id: The id
"""
def __init__(self, name, typ, data, scope, ref_scope, target, location):
super(Reference, self).__init__(
name, typ, data, scope, ref_scope, target, location)
self.id = self.scope + "." + self.name
def format(self):
return self.id
class Funcall(object):
"""Representation of a function call."""
def __init__(self, name, func, location):
self.name = name
self.func = func
self.location = location
class Env(object):
"""A collection of namespaced symbols."""
def __init__(self, scope, parent, cls):
"""Initialize an environment.
Arguments:
scope: The namespace key (e.g. module:class A:function f)
parent: The env of the directly enclosing namespace
cls: The class currently being defined
(None if we are not in a class definition)
Other attributes defined:
env: The dictionary holding the symbol table for | |
for this step
"""
if self._measure_shadow_work:
self.addComputeSum("old_ke", self._kinetic_energy)
# update velocities
if self._mts:
self.addComputePerDof(
"v", "v + ((dt / {}) * f{} / m)".format(
self._force_group_nV[force_group], force_group))
else:
self.addComputePerDof(
"v", "v + (dt / {}) * f / m".format(self._force_group_nV["0"]))
self.addConstrainVelocities()
if self._measure_shadow_work:
self.addComputeSum("new_ke", self._kinetic_energy)
self.addComputeGlobal("shadow_work",
"shadow_work + (new_ke - old_ke)")
def _add_O_step(self):
"""Add an O step (stochastic velocity update)
"""
if self._measure_heat:
self.addComputeSum("old_ke", self._kinetic_energy)
# update velocities
self.addComputePerDof("v", "(a * v) + (b * sigma * gaussian)")
self.addConstrainVelocities()
if self._measure_heat:
self.addComputeSum("new_ke", self._kinetic_energy)
self.addComputeGlobal("heat", "heat + (new_ke - old_ke)")
def _substep_function(self, step_string):
"""Take step string, and add the appropriate R, V, O step with appropriate parameters.
The step string input here is a single character (or character + number, for MTS)
"""
function, can_accept_force_groups = self._step_dispatch_table[
step_string[0]]
if can_accept_force_groups:
force_group = step_string[1:]
function(force_group)
else:
function()
def _parse_splitting_string(self, splitting_string):
"""Parse the splitting string to check for simple errors and extract necessary information
Parameters
----------
splitting_string : str
The string that specifies how to do the integrator splitting
Returns
-------
ORV_counts : dict
Number of O, R, and V steps
mts : bool
Whether the splitting specifies an MTS integrator
force_group_n_V : dict
Specifies the number of V steps per force group. {"0": nV} if not MTS
"""
# convert the string to all caps
splitting_string = splitting_string.upper()
# sanity check the splitting string
self._sanity_check(splitting_string)
ORV_counts = dict()
# count number of R, V, O steps:
for step_symbol in self._step_dispatch_table:
ORV_counts[step_symbol] = splitting_string.count(step_symbol)
# split by delimiter (space)
step_list = splitting_string.split(" ")
# populate a list with all the force groups in the system
force_group_list = []
for step in step_list:
# if the length of the step is greater than one, it has a digit after it
if step[0] == "V" and len(step) > 1:
force_group_list.append(step[1:])
# Make a set to count distinct force groups
force_group_set = set(force_group_list)
# check if force group list cast to set is longer than one
# If it is, then multiple force groups are specified
if len(force_group_set) > 1:
mts = True
else:
mts = False
# If the integrator is MTS, count how many times the V steps appear for each
if mts:
force_group_n_V = {
force_group: 0
for force_group in force_group_set
}
for step in step_list:
if step[0] == "V":
# ensure that there are no V-all steps if it's MTS
assert len(step) > 1
# extract the index of the force group from the step
force_group_idx = step[1:]
# increment the number of V calls for that force group
force_group_n_V[force_group_idx] += 1
else:
force_group_n_V = {"0": ORV_counts["V"]}
return ORV_counts, mts, force_group_n_V
def _add_metropolize_start(self):
"""Save the current x and v for a metropolization step later"""
self.addComputePerDof("xold", "x")
self.addComputePerDof("vold", "v")
def _add_metropolize_finish(self):
"""Add a Metropolization (based on shadow work) step to the integrator.
When Metropolization occurs, shadow work is reset.
"""
self.addComputeGlobal("accept",
"step(exp(-(shadow_work)/kT) - uniform)")
self.addComputeGlobal("ntrials", "ntrials + 1")
self.beginIfBlock("accept != 1")
self.addComputePerDof("x", "xold")
self.addComputePerDof("v", "-vold")
self.addComputeGlobal("nreject", "nreject + 1")
self.endBlock()
self.addComputeGlobal("naccept", "ntrials - nreject")
self.addComputeGlobal("shadow_work", "0")
class AlchemicalNonequilibriumLangevinIntegrator(
NonequilibriumLangevinIntegrator):
def __init__(self,
alchemical_functions=None,
splitting="O { V R H R V } O",
nsteps_neq=100,
*args,
**kwargs):
if alchemical_functions is None:
alchemical_functions = dict()
if (nsteps_neq < 0) or (nsteps_neq != int(nsteps_neq)):
raise Exception('nsteps_neq must be an integer >= 0')
self._alchemical_functions = alchemical_functions
self._n_steps_neq = nsteps_neq # number of integrator steps
# collect the system parameters.
self._system_parameters = {
system_parameter
for system_parameter in alchemical_functions.keys()
}
# call the base class constructor
kwargs['splitting'] = splitting
super(AlchemicalNonequilibriumLangevinIntegrator, self).__init__(
*args, **kwargs)
@property
def _step_dispatch_table(self):
"""dict: The dispatch table step_name -> add_step_function."""
# TODO use methoddispatch (see yank.utils) when dropping Python 2 support.
dispatch_table = super(AlchemicalNonequilibriumLangevinIntegrator,
self)._step_dispatch_table
dispatch_table['H'] = (self._add_alchemical_perturbation_step, False)
return dispatch_table
def _add_global_variables(self):
"""Add the appropriate global parameters to the CustomIntegrator. nsteps refers to the number of
total steps in the protocol.
Parameters
----------
nsteps : int, greater than 0
The number of steps in the switching protocol.
"""
super(AlchemicalNonequilibriumLangevinIntegrator,
self)._add_global_variables()
self.addGlobalVariable('Eold',
0) #old energy value before perturbation
self.addGlobalVariable('Enew', 0) #new energy value after perturbation
self.addGlobalVariable(
'lambda', 0.0
) # parameter switched from 0 <--> 1 during course of integrating internal 'nsteps' of dynamics
self.addGlobalVariable(
'nsteps', self._n_steps_neq
) # total number of NCMC steps to perform; this SHOULD NOT BE CHANGED during the protocol
self.addGlobalVariable(
'step', 0
) # step counter for handling initialization and terminating integration
# Keep track of number of Hamiltonian updates per nonequilibrium switch
n_H = self._ORV_counts['H'] # number of H updates per integrator step
self._n_lambda_steps = self._n_steps_neq * n_H # number of Hamiltonian increments per switching step
if self._n_steps_neq == 0:
self._n_lambda_steps = 1 # instantaneous switching
self.addGlobalVariable(
'n_lambda_steps', self._n_lambda_steps
) # total number of NCMC steps to perform; this SHOULD NOT BE CHANGED during the protocol
self.addGlobalVariable('lambda_step', 0)
def _add_update_alchemical_parameters_step(self):
"""
Add step to update Context parameters according to provided functions.
"""
for context_parameter in self._alchemical_functions:
if context_parameter in self._system_parameters:
self.addComputeGlobal(
context_parameter,
self._alchemical_functions[context_parameter])
def _add_alchemical_perturbation_step(self):
"""
Add alchemical perturbation step, accumulating protocol work.
TODO: Extend this to be able to handle force groups?
"""
# Store initial potential energy
self.addComputeGlobal("Eold", "energy")
# Update lambda and increment that tracks updates.
self.addComputeGlobal('lambda', '(lambda_step+1)/n_lambda_steps')
self.addComputeGlobal('lambda_step', 'lambda_step + 1')
# Update all slaved alchemical parameters
self._add_update_alchemical_parameters_step()
# Accumulate protocol work
self.addComputeGlobal("Enew", "energy")
self.addComputeGlobal("protocol_work", "protocol_work + (Enew-Eold)")
def _add_integrator_steps(self):
"""
Override the base class to insert reset steps around the integrator.
"""
# First step: Constrain positions and velocities and reset work accumulators and alchemical integrators
self.beginIfBlock('step = 0')
self.addConstrainPositions()
self.addConstrainVelocities()
self._add_reset_protocol_work_step()
self._add_alchemical_reset_step()
self.endBlock()
# Main body
if self._n_steps_neq == 0:
# If nsteps = 0, we need to force execution on the first step only.
self.beginIfBlock('step = 0')
super(AlchemicalNonequilibriumLangevinIntegrator,
self)._add_integrator_steps()
self.addComputeGlobal("step", "step + 1")
self.endBlock()
else:
#call the superclass function to insert the appropriate steps, provided the step number is less than n_steps
self.beginIfBlock("step < nsteps")
super(AlchemicalNonequilibriumLangevinIntegrator,
self)._add_integrator_steps()
self.addComputeGlobal("step", "step + 1")
self.endBlock()
def _add_alchemical_reset_step(self):
"""
Reset the alchemical lambda to its starting value
"""
self.addComputeGlobal("lambda", "0")
self.addComputeGlobal("protocol_work", "0")
self.addComputeGlobal("step", "0")
self.addComputeGlobal("lambda_step", "0")
# Add all dependent parameters
self._add_update_alchemical_parameters_step()
temperature = 300 * unit.kelvin
collision_rate = 1 / unit.picoseconds
timestep = 1.0 * unit.femtoseconds
n_steps = 100
alchemical_functions = {'lambda_sterics': 'lambda'}
from openmmtools import testsystems, alchemy
factory = alchemy.AbsoluteAlchemicalFactory(consistent_exceptions=False)
ala = testsystems.AlanineDipeptideVacuum()
alchemical_region = alchemy.AlchemicalRegion(alchemical_atoms=range(22))
alchemical_atoms = list(alchemical_region.alchemical_atoms)
alanine_alchemical_system = factory.create_alchemical_system(
reference_system=ala.system, alchemical_regions=alchemical_region)
alchemical_state = alchemy.AlchemicalState.from_system(
alanine_alchemical_system)
thermo_state = ThermodynamicState(
system=alanine_alchemical_system, temperature=300 * unit.kelvin)
compound_state = CompoundThermodynamicState(
thermo_state, composable_states=[alchemical_state])
sampler_state = SamplerState(positions=ala.positions)
print(compound_state.lambda_sterics)
print(compound_state.lambda_electrostatics)
ncmc_integrator = AlchemicalNonequilibriumLangevinIntegrator(
alchemical_functions,
splitting='H R V O V R H',
#splitting='O { V R H R V } O',
temperature=temperature,
collision_rate=collision_rate,
timestep=timestep,
nsteps_neq=n_steps,
measure_heat=True)
integrator = LangevinIntegrator(
temperature=temperature,
timestep=timestep,
collision_rate=collision_rate,
measure_heat=True)
#print(integrator)
#print(dir(integrator))
#print(integrator.getGlobalVariableByName('heat'))
compound_integrator = CompoundIntegrator()
compound_integrator.addIntegrator(ncmc_integrator)
compound_integrator.addIntegrator(integrator)
compound_integrator.setCurrentIntegrator(1)
dir(compound_integrator)
context_cache = cache.global_context_cache
context, compound_integrator = context_cache.get_context(
compound_state, compound_integrator)
dir(compound_integrator)
# If we reassign velocities, we can ignore the ones in sampler_state.
sampler_state.apply_to_context(context)
context.setVelocitiesToTemperature(compound_state.temperature)
#langevin_integrator = compound_integrator.getIntegrator(1)
#RestorableOpenMMObject.restore_interface(langevin_integrator)
#global_variables = {
# langevin_integrator.getGlobalVariableName(index): index
# for index in range(langevin_integrator.getNumGlobalVariables())
#}
#print(global_variables)
dir(compound_integrator)
compound_integrator.step(5)
# global_variables = {
# langevin_integrator.getGlobalVariableName(index): index
# for index in range(langevin_integrator.getNumGlobalVariables())
# }
# print(global_variables)
langevin_integrator = compound_integrator.getIntegrator(1)
dir(langevin_integrator)
#RestorableOpenMMObject.restore_interface(langevin_integrator)
print(langevin_integrator)
#print(dir(langevin_integrator))
langevin_integrator.reset()
alch_integrator = compound_integrator.getIntegrator(0)
RestorableOpenMMObject.restore_interface(alch_integrator)
print(alch_integrator)
#print(dir(alch_integrator))
alch_integrator.reset()
class MCMCSampler(object):
"""
Markov chain Monte Carlo (MCMC) sampler.
This is a minimal functional implementation placeholder until we can replace this with MCMCSampler from `openmmmcmc`.
Properties
----------
positions : simtk.unit.Quantity of size [nparticles,3] with units compatible with nanometers
The current positions.
iteration : int
Iterations completed.
verbose : bool
If True, verbose output is printed
Examples
--------
>>> # Create a test system
>>> test = testsystems.AlanineDipeptideVacuum()
>>> # Create a sampler state.
>>> sampler_state = SamplerState(positions=test.positions)
>>> # Create a thermodynamic state.
>>> thermodynamic_state = ThermodynamicState(system=test.system, temperature=298.0*unit.kelvin)
>>> # Create an MCMC sampler
>>> sampler = MCMCSampler(thermodynamic_state, sampler_state)
>>> # Run the sampler
>>> sampler.verbose = False
>>> sampler.run()
"""
def __init__(self,
thermodynamic_state=None,
sampler_state=None,
platform=None,
ncfile=None):
"""
Create an MCMC sampler.
Parameters
----------
thermodynamic_state : ThermodynamicState
The thermodynamic state to simulate
sampler_state : SamplerState
The initial sampler state to simulate from.
platform : simtk.openmm.Platform, optional, default=None
If specified, this platform will be used
ncfile : netCDF4.Dataset, optional, default=None
NetCDF storage file.
"""
if thermodynamic_state is None:
raise Exception("'thermodynamic_state' must be specified")
if sampler_state is None:
raise Exception("'sampler_state' | |
import pandas as pd
import numpy as np
import random
import datetime
from tqdm import tqdm
from tr.core.resources import f1_in_tasks, f1_in_checks
from tr.core.utils import dict_to_list, diff_time_list, get_slots, diff_time_list_peak_season
from tr.core.utils import advance_date, days_between_dates, convert_iso_to_timestamp
from tr.core.utils import look_o_dict, load_pickle, save_pickle
from collections import OrderedDict, defaultdict
def excel_to_book(file_input: str):
print("INFO: parsing xlsx to runtime book")
try:
# returns an ordered dict
book = pd.read_excel(file_input, sheet_name=None)
except Exception as e:
print(e)
print('Error parsing the excel file into a dict book buddy!')
print("INFO: xlsx to runtime book completed")
return book
# Function 4: Given a string checks if it is a Month/Year or day.
# Returns time in months if it was a 'Y' or 'M', otherwise 0 is returned.
def preprocessMonths(x):
# that, if x is a string,
if type(x) is str:
if x[-1] == 'M':
return float(x[0:len(x) - 2])
elif x[-1] == 'Y':
return float(x[0:len(x) - 2]) * 12
else:
return 0
else:
return
# Function 5: Given a string checks if it is a day
# Return amount of days if it was a 'D', otherwise 0 is returned
def preprocessDays(x):
# that, if x is a string,
if type(x) is str:
if x[-1] == 'D':
return float(x[0:len(x) - 2])
else:
return 0
else:
return
# Function that changes everything unequal to 0 to 1 necessary for tasks by block columns
def preprocesstask(x):
# that, if x is a string,
if type(x) is str:
return 1
def book_to_kwargs_MPO(book):
print("#########################")
print("INFO: processing from runtime checks book")
""" given an MPO input, compute dict where keys are aircraft ids and the rest
of sheet info is organized by aircraft id """
aircraft_info = get_aircraft_info_MPO(book)
calendar_restrictions = get_restrictions_MPO(book)
# each type of maintenance as several restrictions we will devide in 2
# time and hangar restrictions
m_type_restriction = {}
m_type_restriction = {'time_type': 'day'}
a_time = dict_to_list(calendar_restrictions['A_NOT_ALLOWED']['DATE'])
c_time = diff_time_list(calendar_restrictions['C_NOT_ALLOWED'])
c_peak = diff_time_list_peak_season(calendar_restrictions['C_PEAK'])
all_time = dict_to_list(calendar_restrictions['PUBLIC_HOLIDAYS']['DATE'])
a_resources = {'slots': get_slots(calendar_restrictions['MORE_A_SLOTS'])}
c_resources = {'slots': get_slots(calendar_restrictions['MORE_C_SLOTS'])}
m_type_restriction['a-type'] = {'time': a_time, 'resources': a_resources}
m_type_restriction['c-type'] = {
'time': c_time,
'resources': c_resources,
'c_peak': c_peak,
'c_allowed': c_time
}
m_type_restriction['all'] = {'time': all_time}
end = datetime.datetime(2023, 1, 1, 0, 0)
start_date = pd.to_datetime(book['ADDITIONAL'][2019][1])
end_date = pd.to_datetime(end)
m_type_restriction['start_date'] = start_date
m_type_restriction['end_date'] = end_date
# # all these restrictions will restrict the general calendar
# # for
print("INFO: information from runtime parsed with success")
print("#########################")
return {
'aircraft_info': aircraft_info,
'restrictions': m_type_restriction,
}
def get_restrictions_MPO(book):
print('INFO: gathering restrictions info')
restrictions_info = OrderedDict()
for sheet_name in book.keys():
if 'A/C TAIL' not in book[sheet_name].keys():
# for column_idx in book[sheet_name].keys():
restrictions_info[sheet_name] = book[sheet_name].to_dict()
print('INFO: restrictions info completed')
return restrictions_info
def get_aircraft_info_MPO(book):
print('INFO: gathering aircraft info')
aircraft_info = OrderedDict()
for sheet_name in book.keys():
if 'A/C TAIL' in book[sheet_name].keys():
# create ordered dict to store aircraft info
for _ in range(len(book[sheet_name]['A/C TAIL'])):
a_id = book[sheet_name]['A/C TAIL'][_]
if a_id not in list(aircraft_info.keys()):
aircraft_info[a_id] = OrderedDict()
if sheet_name not in list(aircraft_info[a_id].keys()):
aircraft_info[a_id][sheet_name] = OrderedDict()
# fill the info of other columns, pandas already adds idx to equal
# value columns
for column_idx in book[sheet_name].keys():
if column_idx != 'A/C TAIL':
for _ in range(len(book[sheet_name]['A/C TAIL'])):
a_id = book[sheet_name]['A/C TAIL'][_]
aircraft_info[a_id][sheet_name][column_idx] = book[sheet_name][column_idx][
_]
print('INFO: aircraft info completed')
return aircraft_info
def book_to_kwargs_tasks(book):
print("#########################")
print("INFO: processing from runtime tasks book")
# given an MPO input, compute dict where keys are
# aircraft ids and the rest of sheet info is organized by aircraft id
sheet_name = 'TASK_LIST'
df = book[sheet_name]
# equivalent to Preprocess.py/PreprocessTasks
def process_df(df):
for _ in df.keys():
df[_] = df[_].apply(lambda x: x.strip() if type(x) is str else x)
# df['PER FH'].fillna(0, inplace=True)
# df['PER FC'].fillna(0, inplace=True)
# df['LIMIT FH'].fillna(False, inplace=True)
# df['LIMIT FC'].fillna(False, inplace=True)
# df['LIMIT EXEC DT'].fillna(False, inplace=True)
# df['LAST EXEC FC'].fillna(False, inplace=True)
# df['LAST EXEC FH'].fillna(False, inplace=True)
# df['LAST EXEC DT'].fillna(False, inplace=True)
if not isinstance(df['LIMIT EXEC DT'][0], pd.Timestamp):
df['LIMIT EXEC DT'] = pd.to_datetime(df['LIMIT EXEC DT'], format='%m/%d/%Y')
df['LAST EXEC DT'] = pd.to_datetime(df['LAST EXEC DT'], format='%m/%d/%Y')
print("INFO: using anonymized data")
# df['PER CALEND'].fillna(0, inplace=True)
df['TASK BY BLOCK'].fillna("C-CHECK", inplace=True)
# do not use things without due dates
# df = df[(df['PER FH'] != False) | (df['PER FC'] != False) |
# (df['PER CALEND'] != False)]
df = df[df['TASK BY BLOCK'] != 'LINE MAINTENANCE']
df = df.reset_index(drop=True)
return df
df = process_df(df)
assert 'A/C' in df.keys()
# maps aircrafts, to items, to task number (unique indentifier) to stuffs,
# I think it makes sense,
# but we should also return the df for searching purposes!
aircraft_tasks = OrderedDict()
for _ in df['A/C'].unique():
aircraft_tasks[_] = OrderedDict()
skills = []
for _ in book['SKILL_TYPE']['SKILL TYPE']:
skills.append(_)
skills_ratios_A = OrderedDict()
for _ in book['A-CHECK_NRS_RATIO']['SKILL GI']:
skills_ratios_A[_] = {}
skills_ratios_C = OrderedDict()
for _ in book['C-CHECK_NRS_RATIO']['SKILL GI']:
skills_ratios_C[_] = {}
for _ in book['A-CHECK_NRS_RATIO']['SKILL GI'].keys():
skill = book['A-CHECK_NRS_RATIO']['SKILL GI'][_]
skill_block = book['A-CHECK_NRS_RATIO']['BLOCK'][_]
skill_modifier = book['A-CHECK_NRS_RATIO']['SKILL MDO'][_]
skill_ratio = book['A-CHECK_NRS_RATIO']['RATIO'][_]
if skill_block not in skills_ratios_A[skill].keys():
skills_ratios_A[skill][skill_block] = {}
skills_ratios_A[skill][skill_block][skill_modifier] = skill_ratio
for _ in book['C-CHECK_NRS_RATIO']['SKILL GI'].keys():
skill = book['C-CHECK_NRS_RATIO']['SKILL GI'][_]
skill_block = book['C-CHECK_NRS_RATIO']['BLOCK'][_]
skill_modifier = book['C-CHECK_NRS_RATIO']['SKILL MDO'][_]
skill_ratio = book['C-CHECK_NRS_RATIO']['RATIO'][_]
if skill_block not in skills_ratios_C[skill].keys():
skills_ratios_C[skill][skill_block] = {}
skills_ratios_C[skill][skill_block][skill_modifier] = skill_ratio
def get_man_personnel(book):
df_personnel = book['NUMBER_OF_TECHNICIANS']
weekdays = df_personnel['Weekday'].unique()
man_personnel = OrderedDict()
# 0=Monday, 1=Tuesday, this is nice for.. .weekday()
ctx = 0
for weekday in weekdays:
man_personnel[ctx] = OrderedDict()
for m_key in df_personnel.keys():
if m_key != 'Weekday' and m_key != 'Week Number' and m_key != 'Date':
man_personnel[ctx][m_key] = np.mean(
df_personnel[m_key][df_personnel['Weekday'] == weekday])
ctx += 1
return man_personnel
def get_man_hours(man_personnel):
effective_hours = 4
h_ndt = 7 # 'HM-NDT'
l_ndt = 7
for week_day in man_personnel.keys():
man_personnel[week_day]['HM-NDT'] = h_ndt
man_personnel[week_day]['LM-NDT'] = l_ndt
for skill_day in man_personnel[week_day].keys():
man_personnel[week_day][skill_day] *= 4
return man_personnel
man_personnel = get_man_personnel(book)
man_personnel_hours = get_man_hours(man_personnel)
man_hours_skills = OrderedDict()
def shred_tasks(df_aircraft):
# Preprocess data
# Excel file 1 maintenance tasks
# 1. Modification/expanding dataset section
# Modification/expanding 1: nan values to zero
# Replace nan values with zero in the PER FH and PER FC columns
df_aircraft["PER FH"] = df_aircraft["PER FH"].fillna(0)
df_aircraft["PER FC"] = df_aircraft["PER FC"].fillna(0)
df_aircraft["PER CALEND"] = df_aircraft["PER CALEND"].fillna(0)
# df_aircraft["TASK BY BLOCK"] = df_aircraft["TASK BY BLOCK"].apply(
# preprocesstask).fillna(0)
# Modification/expanding 2: new columns added for month and day
# The CAL column needs a special treatment. The years are transformed to months.
# Two new columns will be created: PER Month (only months and years (expressed in months))
# and PER DAY (only in days!)
df_aircraft['PER MONTH'] = df_aircraft['PER CALEND'].apply(preprocessMonths).fillna(0)
df_aircraft['PER DAY'] = df_aircraft['PER CALEND'].apply(preprocessDays).fillna(0)
if not isinstance(df_aircraft['LIMIT EXEC DT'][0], pd.Timestamp):
df_aircraft['LIMIT EXEC DT'] = pd.to_datetime(df_aircraft['LIMIT EXEC DT'],
format='%m/%d/%Y')
df_aircraft['LAST EXEC DT'] = pd.to_datetime(df_aircraft['LAST EXEC DT'],
format='%m/%d/%Y')
# import ipdb;
# ipdb.set_trace()
# Modification/expanding 3: new column with nr task added to dataset
# Each of the tasks will be represented by a task nr starting from 0.
# This can be found in the column 'NR TASK'
df_aircraft['NR TASK'] = range(len(df_aircraft))
# Modification/expanding 4: Remove list that have no given limit in FH/FC/CALEND
# The tasks that have no PER FH, PER FC, PER CALEND will be removed from the tasks list.
tasks_no_date = np.where((df_aircraft['PER FH'] + df_aircraft['PER FC'] +
df_aircraft['PER MONTH'] + df_aircraft['PER DAY']) == 0)
amount_remove = np.count_nonzero(tasks_no_date)
index_labels = []
for i in range(len(tasks_no_date[0])):
index_labels.append(tasks_no_date[0][i])
# Now dropping the rows without due dates
df_aircraft = df_aircraft.drop(df_aircraft.index[index_labels])
return df_aircraft, tasks_no_date, index_labels, amount_remove
aircraft_clustered_tasks = OrderedDict()
df_aircraft_shaved = OrderedDict()
for aircraft in aircraft_tasks.keys():
df_aircraft = df.copy(deep=True)
df_aircraft = df_aircraft[df_aircraft['A/C'] == aircraft]
df_aircraft = df_aircraft.reset_index(drop=True)
df_aircraft, tasks_no_date, index_labels, amount_remove = shred_tasks(df_aircraft)
df_aircraft_shaved[aircraft] = df_aircraft
aircraft_clustered_tasks[aircraft] = OrderedDict()
for line_idx in list(df_aircraft.index):
item = df_aircraft['ITEM'][line_idx]
if item not in aircraft_tasks[aircraft].keys():
aircraft_tasks[aircraft][item] = OrderedDict()
aircraft_tasks[aircraft][item][line_idx] = OrderedDict()
for column_idx in df_aircraft.keys():
if column_idx != 'A/C' and column_idx != 'ITEM':
value = df_aircraft[column_idx][line_idx]
aircraft_tasks[aircraft][item][line_idx][column_idx] = value
##############################################
# setting skills as A or C check
##############################################
# task by block 1: A-check, 0 C-check
per_fc = aircraft_tasks[aircraft][item][line_idx]['PER FC']
per_fh = aircraft_tasks[aircraft][item][line_idx]['PER FH']
per_months = aircraft_tasks[aircraft][item][line_idx]['PER MONTH']
per_block = aircraft_tasks[aircraft][item][line_idx]['TASK BY BLOCK']
per_fc_boolean = per_fc if not per_fc else (per_fc < 5000)
per_fh_boolean = per_fh if not per_fh else | |
= chess.Move(squares[0], squares[1])
if not mv in board.legal_moves:
mv = chess.Move(squares[1], squares[0])
elif len(squares) == 3: # En passant capture.
if board.turn == chess.WHITE: # White move.
if squares[1] == squares[2] - 8: # Taken black pawn square.
mv = chess.Move(squares[0], squares[2])
else:
mv = chess.Move(squares[1], squares[2])
else: # Black move.
if squares[1] == squares[0] + 8: # Taken white pawn square.
mv = chess.Move(squares[2], squares[0])
else:
mv = chess.Move(squares[1], squares[0])
else: # Castling move.
if squares[0] == 4: # White king side castling.
mv = chess.Move(4, 6)
elif squares[0] == 0: # White queen side castling.
mv = chess.Move(4, 2)
elif squares[0] == 60: # Black king side castling.
mv = chess.Move(60, 62)
else: # Black queen side castling.
mv = chess.Move(60, 58)
if mv in board.legal_moves: move = mv
return move
def Show_Position(board, changed_squares=set()):
''' Display a chess diagram image showing the position on the
python-chess board, highlighting any changed squares.
'''
sqsize = 64
bdsize = 8*64
RED = (255, 70, 70) # RGB light red.
# Create a light grey bdsize x bdsize image.
img = Image.new('RGBA', (bdsize,bdsize), (200,200,200))
draw = ImageDraw.Draw(img)
# Draw the dark squares.
for sq in range(64):
x, y = sq % 8, sq // 8
p1 = x * sqsize, y * sqsize
p2 = p1[0] + sqsize, p1[1] + sqsize
if (x + y) % 2 == 1:
draw.rectangle([p1, p2], (0,128,43))
# Highlight any changed squares.
for sq in changed_squares:
if White_bottom:
p1 = (sq % 8 ) * sqsize, (7 - sq//8) * sqsize
else:
p1 = (7 - sq % 8 ) * sqsize, (sq//8) * sqsize
p2 = p1[0] + sqsize, p1[1] + sqsize
draw.rectangle([p1, p2], RED)
# Draw the pieces on the board.
for sq in range(64):
piece = board.piece_at(sq)
if piece:
piece_img = PIECES[piece.symbol()]
if White_bottom:
x, y = (sq % 8)*sqsize, (7 - sq//8)*sqsize
else:
x, y = (7 - sq % 8)*sqsize, (sq // 8)*sqsize
img.paste(piece_img, (x, y), mask=piece_img)
img = ImageTk.PhotoImage(img)
posn_label.configure(image=img)
posn_label.image = img
def Show_Image(image):
''' Display an OpenCV image.
'''
if len(np.shape(image)) == 3:
img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
else:
img = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
img = Image.fromarray(img)
img = img.resize((512,512))
img = ImageTk.PhotoImage(img)
posn_label.configure(image=img)
posn_label.image = img
def Show_Board_Image(board_image, posn):
font = cv2.FONT_HERSHEY_SIMPLEX
for square in range(64):
x, y = square % 8, 7 - square // 8
X, Y = x*SQSIZE+18, y*SQSIZE+29
piece = posn[square]
cv2.putText(board_image, piece, (X,Y), font, 0.5, (0,0,255),\
1, cv2.LINE_AA)
Show_Image(board_image)
def Show_Message_Wait(message, color='red'):
''' Display a message and wait for the Next>> button or Return
key to be pressed.
'''
print(message)
mess_label.configure(text=message, fg=color)
root.wait_variable(wait_var) # Wait for Next>> to be pressed.
def Save_Game():
''' Ask if the user wants to save the game. If so, write the PGN
for the game to a file, or append it to the file if it
already exists.
'''
global close_enabled
if board.move_stack: # If there are moves to save.
close_enabled = False
answer = messagebox.askyesno(TITLE,\
'Do you want to save the game?')
close_enabled = True
if answer:
header = date.today().strftime('%d/%m/%Y')
game = str(chess.pgn.Game().from_board(board)).split('\n')
game = '\n'.join(game[8:]) # Strip the empty PGN header.
print(header + '\n' + game)
with open(GAMES_PATH, 'a') as f:
f.write(header + '\n' + game + '\n\n')
def Make_Engine_Move():
''' Get a move from the engine, make it on the python-chess
board and display it.
'''
global engine, engine_move
if board.is_game_over():
# Show the final position.
Show_Position(board)
# Show the result.
Show_Message_Wait('Game Over: ' + board.result(), 'black')
return
engine_move = engine.play(board, chess.engine.Limit(time=1.0)).move
changed_squares = {engine_move.from_square, engine_move.to_square}
# Show the engine move on the chess board diagram.
Show_Position(board, changed_squares)
# Show the move.
mv_str = 'Engine Move: ' + board.variation_san([engine_move])
board.push(engine_move)
if board.is_game_over():
mv_str = mv_str + ' ' + board.result()
Show_Message_Wait(mv_str, 'black')
def On_Closing():
''' The main window close button has been clicked. Close the
program down if close_enabled is True, otherwise return.
'''
global close_enabled
if close_enabled == False: return
Save_Game()
root.destroy()
def On_Next(event=None):
''' The Next button (or Return key) has been pressed.
'''
wait_var.set(1) # Release the wait in Show_Message.
def On_Rotate(event=None):
''' Rotate the board image through 180 degress.
'''
global board_image, Corners
c0 = Corners[0]
Corners[0] = Corners[2]
Corners[2] = c0
c1 = Corners[1]
Corners[1] = Corners[3]
Corners[3] = c1
board_image, Corners = Transform_Image(Corners, image)
wait_var.set(1) # Release the wait in Show_Message.
def On_Takeback(event=None):
''' The Takeback button (or T key) has been pressed. A move should
have been taken back on the physical chess board. Take back the
move on the python-chess board.
'''
global board
try:
move = board.pop()
except:
Show_Message_Wait('No Move to Take Back')
return
print('Move taken back')
Show_Position(board)
mess_label.configure(text='Move Taken Back')
def On_Engine(event=None):
''' The Engine button (or E key) has been pressed. If the engine
is off, turn it on. Highlight the button in pink. Get a move from
the engine and display it. If the engine is on, turn it off, and
remove the pink highlighting.
'''
global engine_on, engine_color
if engine_on:
engine_on = False
engine_button.configure(bg='light gray',\
activebackground='light gray')
else:
engine_color = board.turn
engine_on = True
engine_button.configure(bg='pink', activebackground='pink')
# Get a move from the engine, and display it.
Make_Engine_Move()
def On_Level(event=None):
global close_enabled
''' The Level button (or L key) has been pressed. Ask the user to
set the engine skill level (if that option is available).
'''
if 'Skill Level' in eo:
min_lv = eo['Skill Level'].min
max_lv = eo['Skill Level'].max
mess = 'Set Skill Level (' + str(min_lv) + '-' + \
str(max_lv) + ')'
close_enabled = False
level = simpledialog.askinteger(TITLE, mess,\
minvalue=min_lv,\
maxvalue = max_lv)
close_enabled = True
if level:
print('Skill level', level)
mess_label.configure(text='Skill Level '+str(level))
engine.configure({'Skill Level': level})
next_button.focus_set()
def On_Flip(event=None):
''' Rotate chess diagrams through 180 degrees.
'''
global White_bottom
White_bottom = not White_bottom
Show_Position(board)
# Create a window to display board images, messages and command buttons.
root = tk.Tk()
root.title(TITLE)
wait_var = tk.IntVar()
img = Image.new('RGBA', (512,512), (0,0,0))
img = ImageTk.PhotoImage(img)
posn_frame = tk.Frame(root, width=512, bd=6, relief=tk.FLAT)
posn_frame.pack(side=tk.TOP)
posn_label = tk.Label(posn_frame, image=img)
posn_label.pack()
mess_frame = tk.Frame(root, width=512, bd=4, relief=tk.GROOVE)
mess_frame.pack(fill='both', expand=True)
mess_label = tk.Label(mess_frame, text='Set Up an Empty Board',\
height=2, font=('Liberation', 24), pady=8)
mess_label.pack(side=tk.TOP)
button_frame = tk.Frame(root, bd=3)
button_frame.pack(side=tk.TOP)
bfont = ('Liberation', 12)
next_button = tk.Button(button_frame, text='Next>>', font=bfont,\
bd=3, command=On_Next)
next_button.pack(side=tk.LEFT)
rotate_button = tk.Button(button_frame, text='Rotate', font=bfont,\
bd=3, underline=0, command=On_Rotate)
takeback_button = tk.Button(button_frame, text='Takeback', font=bfont,\
bd=3, underline=0, command=On_Takeback)
engine_button = tk.Button(button_frame, text='Engine', font=bfont,\
bd=3, underline=0, command=On_Engine)
level_button = tk.Button(button_frame, text='Level', font=bfont,\
bd=3, underline=0, command=On_Level)
f_button = tk.Button(button_frame, text='Flip', font=bfont,\
bd=3, underline=0, command=On_Flip)
next_button.focus_set()
root.bind('<Return>', On_Next)
root.bind('t', On_Takeback)
root.bind('e', On_Engine)
root.bind('l', On_Level)
root.bind('f', On_Flip)
root.bind('r', On_Rotate)
root.protocol("WM_DELETE_WINDOW", On_Closing)
# Identify the chess board.
while True:
image = Get_Image()
if image is None:
Show_Message_Wait('Image Capture Failed')
continue
Corners = Find_Corners(image)
if Corners is None:
Show_Image(image)
Show_Message_Wait('Chess Board Not Found')
continue
board_image, Corners = Transform_Image(Corners, image)
if board_image is not None: break
Show_Image(image)
Show_Message_Wait('Chess Board Not Found')
Show_Image(image)
Show_Message_Wait('Chess Board Found\nSet Up Start Position', 'black')
rotate_button.pack(side=tk.LEFT)
# Identify the start position.
while True:
while True:
image = Get_Image()
if image is None:
Show_Message_Wait('Image Capture Failed')
continue
board_image, Corners = Transform_Image(Corners, image)
if board_image is not None: break
Show_Image(image)
Show_Message_Wait('Chess Board Not Found')
Show_Image(board_image)
Show_Message_Wait('White at Bottom?\nIf not Rotate', 'black')
trWoB, trBoB, trWoW, trWB = Optimise_Thresholds(board_image)
posn = Identify_Position(board_image)
if posn == START_POSN: break
print(posn)
Show_Board_Image(board_image, posn)
Show_Message_Wait('Start Position Not Found')
rotate_button.pack_forget() # Remove the Rotate button.
root.unbind('r')
# Show the Takeback, Engine, Level and R buttons.
takeback_button.pack(side=tk.LEFT)
engine_button.pack(side=tk.LEFT)
level_button.pack(side=tk.LEFT)
f_button.pack(side=tk.LEFT)
Show_Position(board)
Show_Message_Wait('Make a Move or Press Engine', 'black')
# Respond to moves.
while True:
if engine_on and engine_color==board.turn and not engine_move:
# Make a move from the engine, and display it.
Make_Engine_Move()
image = Get_Image()
if image is None:
Show_Message_Wait('Image Capture Failed')
break
board_image, Corners = Transform_Image(Corners, image)
if board_image is None:
Show_Image(image)
Show_Message_Wait('Chess Board Not Found')
continue
posn = Identify_Position(board_image)
#Show_Board_Image(board_image, posn)
#Show_Message_Wait('Position from Image', 'black')
if posn == START_POSN:
# New game: the start position has been set up again.
mess_label.configure(text='New Game')
Save_Game() # Ask the user if he wants to save the old game.
board = chess.Board() # Set up a new game.
engine_on = False
engine_button.configure(bg='light gray',\
activebackground='light gray')
Show_Position(board)
root.wait_variable(wait_var) # Wait for Next>> to be pressed.
continue
before_mv_posn = Get_Posn(board) # | |
<filename>src/icosahedron.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 26 09:41:29 2015
@author: sacim
"""
import numpy as np
import math
#=============================================================================
class Icosahedron:
def __init__(self, ngrid, name='ellipsoid'):
self.name = name # A short name of our icosahedron
self.ngrid = ngrid # grid size for each domain
self.NT = 10*2*ngrid**2 # Number of triangles
self.TP = np.zeros((3*self.NT,3)) # Vertices of triangles
self.NP = np.zeros((3*self.NT,3)) # Normal at each triangle vertex
self.indices = np.zeros(3*2*self.NT,dtype=int) # vertex and normal locations (for use with pycollada)
self.create_icosahedron()
def create_icosahedron(self):
"""
Creates an icosahedron
A regular icosahedron is initialized with 12 vertices defined by array
A (see code). This defines a regular icosahedron with vertices
normalised to unity. Each vertex is then rotated by Beta (see code)
about the y-axis. This gives us a regular icosahedron with 'poles'
along the z-axis.
From this point the icosahedron is split into 10 domains, consisting
of 2 joined equilateral trianlges, 5 with a vertex position at the
north pole, and five with a vertex at the south pole.
Within each domain, ngrid-1 geodesic points are calculated between
adjacent vertices, giving ngrid+1 points along each domain edge.
The remaining geodesic points are then calculated by walking through
suitable pairs of domain edge points, and calculating the suitable
number of points between these pairs.
Each domain is split into triangle points and stored in a numpy array
TP. TP triplets consist of x, y, and z coordinates of the calculated
triangles.
The numpy array NP holds the normal of each vertex of a TP triangle.
the numpy array indices holds the indices of the triangles and their
associated normals in a format suitable for processing by the
pycollada package.
"""
ngrid=self.ngrid
a=1.
tau = 0.5*(math.sqrt(5.)+1.)
rho=tau-1.
u=a/(math.sqrt(1.+rho**2.))
v=rho*u
A =np.zeros((12,3))
A[0][0] = v; A[0][1] = 0; A[0][2] = u;
A[1][0] = u; A[1][1] = v; A[1][2] = 0;
A[2][0] = 0; A[2][1] = u; A[2][2] = v;
A[3][0] =-v; A[3][1] = 0; A[3][2] = u;
A[4][0] = 0; A[4][1] =-u; A[4][2] = v;
A[5][0] = u; A[5][1] =-v; A[5][2] = 0;
A[6][0] = 0; A[6][1] = u; A[6][2] =-v;
A[7][0] =-u; A[7][1] = v; A[7][2] = 0;
A[8][0] =-u; A[8][1] =-v; A[8][2] = 0;
A[9][0] = 0; A[9][1] =-u; A[9][2] =-v;
A[10][0]= v; A[10][1]= 0; A[10][2]=-u;
A[11][0]=-v; A[11][1]= 0; A[11][2]=-u;
Beta= math.atan(v/u);
Ad=np.zeros((12,3))
for i in range(12):
Ad[i,:]=rotate_point_about_yaxis(Beta,A[i,:])
xn=np.zeros(10*(ngrid+1)**2)
yn=np.zeros(10*(ngrid+1)**2)
zn=np.zeros(10*(ngrid+1)**2)
# Define Domain corners
for id in range(10):
if id<5:
# Northern Hemisphere
# 0,0
xn[id*(ngrid+1)**2+0] = Ad[0][0]
yn[id*(ngrid+1)**2+0] = Ad[0][1]
zn[id*(ngrid+1)**2+0] = Ad[0][2]
# mt, 0
xn[id*(ngrid+1)**2+ngrid] = Ad[id+1][0]
yn[id*(ngrid+1)**2+ngrid] = Ad[id+1][1]
zn[id*(ngrid+1)**2+ngrid] = Ad[id+1][2]
# 0,mt
if id==4:
xn[id*(ngrid+1)**2+(ngrid+1)*ngrid] = Ad[1][0]
yn[id*(ngrid+1)**2+(ngrid+1)*ngrid] = Ad[1][1]
zn[id*(ngrid+1)**2+(ngrid+1)*ngrid] = Ad[1][2]
else:
xn[id*(ngrid+1)**2+(ngrid+1)*ngrid] = Ad[id+2][0]
yn[id*(ngrid+1)**2+(ngrid+1)*ngrid] = Ad[id+2][1]
zn[id*(ngrid+1)**2+(ngrid+1)*ngrid] = Ad[id+2][2]
# mt,mt
xn[id*(ngrid+1)**2+(ngrid+1)**2-1] = Ad[id+6][0]
yn[id*(ngrid+1)**2+(ngrid+1)**2-1] = Ad[id+6][1]
zn[id*(ngrid+1)**2+(ngrid+1)**2-1] = Ad[id+6][2]
# Southern Hemisphere
else:
# South Pole
xn[id*(ngrid+1)**2+0] = Ad[id-4][0];
yn[id*(ngrid+1)**2+0] = Ad[id-4][1];
zn[id*(ngrid+1)**2+0] = Ad[id-4][2];
# mt,0
if id==5:
xn[id*(ngrid+1)**2+ngrid] = Ad[10][0];
yn[id*(ngrid+1)**2+ngrid] = Ad[10][1];
zn[id*(ngrid+1)**2+ngrid] = Ad[10][2];
else:
xn[id*(ngrid+1)**2+ngrid] = Ad[id][0];
yn[id*(ngrid+1)**2+ngrid] = Ad[id][1];
zn[id*(ngrid+1)**2+ngrid] = Ad[id][2];
# 0,mt
xn[id*(ngrid+1)**2+(ngrid+1)*ngrid] = Ad[id+1][0];
yn[id*(ngrid+1)**2+(ngrid+1)*ngrid] = Ad[id+1][1];
zn[id*(ngrid+1)**2+(ngrid+1)*ngrid] = Ad[id+1][2];
# mt,mt
xn[id*(ngrid+1)**2+(ngrid+1)**2-1] = Ad[11][0];
yn[id*(ngrid+1)**2+(ngrid+1)**2-1] = Ad[11][1];
zn[id*(ngrid+1)**2+(ngrid+1)**2-1] = Ad[11][2];
# Define Domain points between corners
for id in range(10):
for k in range(1,ngrid):
# upper left line
index1 = id*(ngrid+1)**2 + 0
index2 = id*(ngrid+1)**2 + ngrid
index = id*(ngrid+1)**2 + k
xn[index] = xn[index1] + 1.*k*(xn[index2]-xn[index1])/ngrid
yn[index] = yn[index1] + 1.*k*(yn[index2]-yn[index1])/ngrid
zn[index] = zn[index1] + 1.*k*(zn[index2]-zn[index1])/ngrid
(xn[index],yn[index],zn[index]) = normalize(np.array((xn[index],yn[index],zn[index])))
# upper right line
index1 = id*(ngrid+1)**2 + 0
index2 = id*(ngrid+1)**2 + (ngrid+1)*ngrid
index = id*(ngrid+1)**2 + (ngrid+1)*k
xn[index] = xn[index1] + 1.*k*(xn[index2]-xn[index1])/ngrid
yn[index] = yn[index1] + 1.*k*(yn[index2]-yn[index1])/ngrid
zn[index] = zn[index1] + 1.*k*(zn[index2]-zn[index1])/ngrid
(xn[index],yn[index],zn[index]) = normalize(np.array((xn[index],yn[index],zn[index])))
# lower left line
index1 = id*(ngrid+1)**2 + ngrid
index2 = id*(ngrid+1)**2 + (ngrid+1)**2-1
index = id*(ngrid+1)**2 + (ngrid+1)*(k+1)-1
xn[index] = xn[index1] + 1.*k*(xn[index2]-xn[index1])/ngrid
yn[index] = yn[index1] + 1.*k*(yn[index2]-yn[index1])/ngrid
zn[index] = zn[index1] + 1.*k*(zn[index2]-zn[index1])/ngrid
(xn[index],yn[index],zn[index]) = normalize(np.array((xn[index],yn[index],zn[index])))
# lower right line
index1 = id*(ngrid+1)**2 + (ngrid+1)*ngrid
index2 = id*(ngrid+1)**2 + (ngrid+1)**2-1
index = id*(ngrid+1)**2 + (ngrid+1)*ngrid+k
xn[index] = xn[index1] + 1.*k*(xn[index2]-xn[index1])/ngrid
yn[index] = yn[index1] + 1.*k*(yn[index2]-yn[index1])/ngrid
zn[index] = zn[index1] + 1.*k*(zn[index2]-zn[index1])/ngrid
(xn[index],yn[index],zn[index]) = normalize(np.array((xn[index],yn[index],zn[index])))
# middle line
index1 = id*(ngrid+1)**2 + ngrid
index2 = id*(ngrid+1)**2 + (ngrid+1)*ngrid
index = id*(ngrid+1)**2 + (ngrid+1)*k+ngrid-k
xn[index] = xn[index1] + 1.*k*(xn[index2]-xn[index1])/ngrid
yn[index] = yn[index1] + 1.*k*(yn[index2]-yn[index1])/ngrid
zn[index] = zn[index1] + 1.*k*(zn[index2]-zn[index1])/ngrid
(xn[index],yn[index],zn[index]) = normalize(np.array((xn[index],yn[index],zn[index])))
# Define Domain points all over
# top half triangle
for k in range(2,ngrid):
for l in range(1,k):
index1 = id*(ngrid+1)**2 + k
index2 = id*(ngrid+1)**2 + k*(ngrid+1)
index = id*(ngrid+1)**2 + k + l*(ngrid)
xn[index] = xn[index1] + 1.*l*(xn[index2]-xn[index1])/k
yn[index] = yn[index1] + 1.*l*(yn[index2]-yn[index1])/k
zn[index] = zn[index1] + 1.*l*(zn[index2]-zn[index1])/k
(xn[index],yn[index],zn[index]) = normalize(np.array((xn[index],yn[index],zn[index])))
# bottom half triangle
for k in range(2,ngrid):
for l in range(1,ngrid+1-k):
index1 = id*(ngrid+1)**2 + k*(ngrid+1) - 1
index2 = id*(ngrid+1)**2 + (ngrid+1)*ngrid + k -1
index = id*(ngrid+1)**2 + k*(ngrid+1) -1 + l*(ngrid)
xn[index] = xn[index1] + 1.*l*(xn[index2]-xn[index1])/(ngrid+1-k)
yn[index] = yn[index1] + 1.*l*(yn[index2]-yn[index1])/(ngrid+1-k)
zn[index] = zn[index1] + 1.*l*(zn[index2]-zn[index1])/(ngrid+1-k)
(xn[index],yn[index],zn[index]) = normalize(np.array((xn[index],yn[index],zn[index])))
TP1=np.zeros((self.NT,3)) # triangle point 1 x,y,z positions
TP2=np.zeros((self.NT,3)) # " 2 "
TP3=np.zeros((self.NT,3)) # " 3 "
# Create triangles
for id in range(10):
for k in range(ngrid):
for l in range(ngrid):
Tindex=id*2*ngrid*ngrid + k*2*ngrid + l
index1=id*(ngrid+1)**2 + k*(ngrid+1) +l
index2=id*(ngrid+1)**2 + k*(ngrid+1) +l+1
index3=id*(ngrid+1)**2 + (k+1)*(ngrid+1) +l
TP1[Tindex][0] = xn[index1]
TP1[Tindex][1] = yn[index1]
TP1[Tindex][2] = zn[index1]
TP2[Tindex][0] = xn[index2]
TP2[Tindex][1] = yn[index2]
TP2[Tindex][2] = zn[index2]
TP3[Tindex][0] = xn[index3]
TP3[Tindex][1] = yn[index3]
TP3[Tindex][2] = zn[index3]
for l in range(0,ngrid):
Tindex=id*2*ngrid*ngrid + k*2*ngrid + ngrid + l
index1=id*(ngrid+1)**2 + k*(ngrid+1) + l+1
index2=id*(ngrid+1)**2 + (k+1)*(ngrid+1) + l+1
index3=id*(ngrid+1)**2 + (k+1)*(ngrid+1) + l
TP1[Tindex][0] = xn[index1]
TP1[Tindex][1] = yn[index1]
TP1[Tindex][2] = zn[index1]
TP2[Tindex][0] = xn[index2]
TP2[Tindex][1] = yn[index2]
TP2[Tindex][2] = zn[index2]
TP3[Tindex][0] = xn[index3]
TP3[Tindex][1] = yn[index3]
TP3[Tindex][2] = zn[index3]
self.TP[0::3,:]=TP1[:,:]
self.TP[1::3,:]=TP2[:,:]
self.TP[2::3,:]=TP3[:,:]
NTP1=np.zeros((self.NT,3))
NTP2=np.zeros((self.NT,3))
NTP3=np.zeros((self.NT,3))
for t in range(self.NT):
NTP1[t,:] = np.cross(TP1[t,:]-TP2[t,:],TP3[t,:]-TP2[t,:])
NTP3[t,:] = NTP2[t,:] = NTP1[t,:]
self.NP[0::3,:]=NTP1[:,:]
self.NP[1::3,:]=NTP2[:,:]
self.NP[2::3,:]=NTP3[:,:]
self.NP=-1.*self.NP
for i in range(3*self.NT):
self.indices[2*i+0]=self.indices[2*i+1]=i
#=============================================================================
def get_centre(self):
"""
Finds the centre of the ellipsoid, simply by averaging two points
lying on the (perhaps rotated) x semi-axis.
"""
centre=np.zeros(3)
centre[0]=0.5*(self.TP[0,0]+self.TP[-2,0])
centre[1]=0.5*(self.TP[0,1]+self.TP[-2,1])
centre[2]=0.5*(self.TP[0,2]+self.TP[-2,2])
return centre
#=============================================================================
def translate(self,x,y,z):
"""
Shifts the origin by x,y,z
"""
self.TP[:,0]=self.TP[:,0]+x
self.TP[:,1]=self.TP[:,1]+y
self.TP[:,2]=self.TP[:,2]+z
#=============================================================================
def translateTo(self,x,y,z):
"""
Shifts the origin to x,y,z
"""
centre=self.get_centre()
dx=x-centre[0]
dy=y-centre[1]
dz=z-centre[2]
self.translate(dx,dy,dz)
#=============================================================================
def stretch(self,A,B,C):
"""
Stretches the icosahedron by a factor of A,B,C along x,y,z axes respectively
Before scaling, the icosahedron is centred at the origin, and returned
to its original position afterwards.
"""
# translate to origin before stretch
centre=self.get_centre()
self.translateTo(0.,0.,0.)
self.TP[:,0]=A*self.TP[:,0]
self.TP[:,1]=B*self.TP[:,1]
self.TP[:,2]=C*self.TP[:,2]
self.NP[:,0]=A*self.NP[:,0]
self.NP[:,1]=B*self.NP[:,1]
self.NP[:,2]=C*self.NP[:,2]
# return to original position
self.translateTo(centre[0],centre[1],centre[2])
#=============================================================================
def rotate_about_xaxis(self, alpha):
"""
Rotates the icosahedron by beta radians about the x axis. Rotation is
anti-clockwise as you look down the axis (from a positive viewpoint)
towards the origin.
"""
alpha=-alpha
Rx=np.array([[ 1, 0, 0 ],
[ 0, math.cos(alpha), math.sin(alpha) ],
[ 0, -math.sin(alpha), math.cos(alpha) ]])
for i in range(len(self.TP)):
point = [self.TP[i,0],self.TP[i,1],self.TP[i,2]]
(self.TP[i,0],
self.TP[i,1],
self.TP[i,2]) = np.dot(Rx,np.transpose(point))
npoint = [self.NP[i,0],self.NP[i,1],self.NP[i,2]]
(self.NP[i,0],
self.NP[i,1],
self.NP[i,2]) = np.dot(Rx,np.transpose(npoint))
#=============================================================================
def rotate_about_yaxis(self, beta):
"""
Rotates the icosahedron by beta radians about the y axis. Rotation is
anti-clockwise as you look down the axis (from a positive viewpoint)
towards the origin.
"""
beta=-beta
Ry=np.array([[ np.cos(beta), 0, -np.sin(beta) ],
| |
<filename>WDL/runtime/task_container.py
"""
Abstract interface for task container runtime + default Docker Swarm backend
"""
import os
import logging
import time
import json
import contextlib
import shutil
import random
import threading
import base64
import uuid
import hashlib
import shlex
from typing import Callable, Iterable, List, Set, Tuple, Type, Any, Dict, Optional
from abc import ABC, abstractmethod
import docker
from .. import Error
from .._util import TerminationSignalFlag, path_really_within, chmod_R_plus, PygtailLogger
from .._util import StructuredLogMessage as _
from . import config, _statusbar
from .error import OutputError, Interrupted, Terminated, CommandFailed, RunFailed, error_json
class TaskContainer(ABC):
"""
Base class for task containers, subclassed by runtime-specific backends (e.g. Docker).
"""
# class stuff
@classmethod
def global_init(cls, cfg: config.Loader, logger: logging.Logger) -> None:
"""
Perform any necessary one-time initialization of the underlying container backend. Must be
invoked once per process prior to any instantiation of the class.
"""
raise NotImplementedError()
@classmethod
def detect_resource_limits(cls, cfg: config.Loader, logger: logging.Logger) -> Dict[str, int]:
"""
Detect the maximum resources (cpu and mem_bytes) that the underlying container backend
would be able to provision.
If determining this is at all costly, then backend should memoize (thread-safely and
perhaps front-loaded in global_init).
"""
raise NotImplementedError()
# instance stuff
run_id: str
host_dir: str
"""
:type: str
The host path to the scratch directory that will be mounted inside the
container.
"""
container_dir: str
"""
:type: str
The scratch directory's mounted path inside the container. The task
command's working directory will be ``{container_dir}/work/``.
"""
input_file_map: Dict[str, str]
"""
:type: Dict[str,str]
A mapping of host input file paths to in-container mounted paths,
maintained by ``add_files``.
"""
input_file_map_rev: Dict[str, str]
runtime_values: Dict[str, Any]
"""
Evaluted task runtime{} section. Typically the TaskContainer backend needs to honor
cpu, memory_limit, memory_reservation, docker. Resources must have already been fit to
get_resource_limits(). Retry logic (maxRetries, preemptible) is handled externally.
"""
stderr_callback: Optional[Callable[[str], None]]
"""
A function called line-by-line for the task's standard error stream, iff verbose logging is
enabled. If provided by a plugin then it overrides the default standard error logging, which
writes each line to the 'stderr' child of the task logger.
"""
_running: bool
def __init__(self, cfg: config.Loader, run_id: str, host_dir: str) -> None:
self.cfg = cfg
self.run_id = run_id
self.host_dir = host_dir
self.container_dir = "/mnt/miniwdl_task_container"
self.input_file_map = {}
self.input_file_map_rev = {}
self.stderr_callback = None
self._running = False
self.runtime_values = {}
os.makedirs(os.path.join(self.host_dir, "work"))
def add_files(self, host_files: Iterable[str]) -> None:
"""
Use before running the container to add a list of host files to mount
inside the container as inputs. The host-to-container path mapping is
maintained in ``input_file_map``.
Although ``add_files`` can be used multiple times, files should be
added together where possible, as this allows heuristics for dealing
with any name collisions among them.
"""
assert not self._running
# partition the files by host directory
host_files_by_dir = {}
for host_file in host_files:
if host_file not in self.input_file_map:
if not os.path.isfile(host_file):
raise Error.InputError("input file not found: " + host_file)
host_files_by_dir.setdefault(os.path.dirname(host_file), set()).add(host_file)
# for each such partition of files
# - if there are no basename collisions under input subdirectory 0, then mount them there.
# - otherwise, mount them in a fresh subdirectory
for files in host_files_by_dir.values():
based = os.path.join(self.container_dir, "work/_miniwdl_inputs")
subd = "0"
for host_file in files:
container_file = os.path.join(based, subd, os.path.basename(host_file))
if container_file in self.input_file_map_rev:
subd = str(len(self.input_file_map) + 1)
for host_file in files:
container_file = os.path.join(based, subd, os.path.basename(host_file))
assert container_file not in self.input_file_map_rev
self.input_file_map[host_file] = container_file
self.input_file_map_rev[container_file] = host_file
def copy_input_files(self, logger: logging.Logger) -> None:
# After add_files has been used as needed, copy the input files from their original
# locations to the appropriate subdirectories of the container working directory. This may
# not be necessary e.g. if the container backend supports bind-mounting the input
# files from their original host paths.
# called once per task run (attempt)
for host_filename, container_filename in self.input_file_map.items():
assert container_filename.startswith(self.container_dir)
host_copy_filename = os.path.join(
self.host_dir, os.path.relpath(container_filename, self.container_dir)
)
logger.info(_("copy host input file", input=host_filename, copy=host_copy_filename))
os.makedirs(os.path.dirname(host_copy_filename), exist_ok=True)
shutil.copy(host_filename, host_copy_filename)
def run(self, logger: logging.Logger, command: str,) -> None:
"""
1. Container is instantiated with the configured mounts and resources
2. The mounted directory and all subdirectories have u+rwx,g+rwx permission bits; all files
within have u+rw,g+rw permission bits.
3. Command is executed in ``{host_dir}/work/`` (where {host_dir} is mounted to
{container_dir} inside the container)
4. Standard output is written to ``{host_dir}/stdout.txt``
5. Standard error is written to ``{host_dir}/stderr.txt`` and logged at VERBOSE level
6. Raises CommandFailed for nonzero exit code
7. Raises Terminated if TerminationSignalFlag detected, or Interrupted if the backend
cancels on us for some reason that isn't our fault.
The container is torn down in any case, including SIGTERM/SIGHUP signal which is trapped.
"""
# container-specific logic should be in _run(). this wrapper traps signals
assert not self._running
if command.strip(): # if the command is empty then don't bother with any of this
with TerminationSignalFlag(logger) as terminating:
if terminating():
raise Terminated(quiet=True)
self._running = True
try:
exit_status = self._run(logger, terminating, command)
finally:
self._running = False
if exit_status != 0:
raise CommandFailed(
exit_status, os.path.join(self.host_dir, "stderr.txt")
) if not terminating() else Terminated()
@abstractmethod
def _run(self, logger: logging.Logger, terminating: Callable[[], bool], command: str,) -> int:
# run command in container & return exit status
raise NotImplementedError()
def reset(self, logger: logging.Logger, retries: int, delete_work: bool = False) -> None:
"""
After a container/command failure, reset the working directory state so that
copy_input_files() and run() can be retried.
"""
artifacts_dir = os.path.join(self.host_dir, "failed_tries", str(retries))
artifacts = []
for artifact in ["work", "command", "stdout.txt", "stderr.txt", "stderr.txt.offset"]:
src = os.path.join(self.host_dir, artifact)
if os.path.exists(src):
artifacts.append(src)
if delete_work:
(shutil.rmtree if os.path.isdir(src) else os.unlink)(src)
else:
os.renames(src, os.path.join(artifacts_dir, artifact))
logger.info(
_("deleted failed task artifacts", artifacts=artifacts)
if delete_work
else _("archived failed task artifacts", artifacts=artifacts, dest=artifacts_dir)
)
os.makedirs(os.path.join(self.host_dir, "work"))
def host_file(self, container_file: str, inputs_only: bool = False) -> Optional[str]:
"""
Map an output file's in-container path under ``container_dir`` to a host path under
``host_dir``. Return None if the designated file does not exist.
SECURITY: except for input files, this method must only return host paths under
``host_dir`` and prevent any reference to other host files (e.g. /etc/passwd), including
via sneaky symlinks
"""
if os.path.isabs(container_file):
# handle output of std{out,err}.txt
if container_file in [
os.path.join(self.container_dir, pipe_file)
for pipe_file in ["stdout.txt", "stderr.txt"]
]:
return os.path.join(self.host_dir, os.path.basename(container_file))
# handle output of an input file
if container_file in self.input_file_map_rev:
return self.input_file_map_rev[container_file]
if inputs_only:
raise Error.InputError(
"task inputs attempted to use a non-input or non-existent file "
+ container_file
)
# relativize the path to the provisioned working directory
container_file = os.path.relpath(
container_file, os.path.join(self.container_dir, "work")
)
host_workdir = os.path.join(self.host_dir, "work")
ans = os.path.join(host_workdir, container_file)
if os.path.isfile(ans):
if path_really_within(ans, host_workdir):
return ans
raise OutputError(
"task outputs attempted to use a file outside its working directory: "
+ container_file
)
return None
_backends: Dict[str, Type[TaskContainer]] = dict()
_backends_lock: threading.Lock = threading.Lock()
def new(cfg: config.Loader, logger: logging.Logger, run_id: str, host_dir: str) -> TaskContainer:
"""
Instantiate a TaskContainer from the configured backend, including any necessary global
initialization.
"""
global _backends
with _backends_lock:
if not _backends:
for plugin_name, plugin_cls in config.load_plugins(cfg, "container_backend"):
_backends[plugin_name] = plugin_cls # pyre-fixme
backend_cls = _backends[cfg["scheduler"]["container_backend"]]
if not getattr(backend_cls, "_global_init", False):
backend_cls.global_init(cfg, logger)
setattr(backend_cls, "_global_init", True)
ans = backend_cls(cfg, run_id, host_dir)
assert isinstance(ans, TaskContainer)
return ans
class SwarmContainer(TaskContainer):
"""
TaskContainer docker (swarm) runtime
"""
_limits: Dict[str, int] = {}
@classmethod
def global_init(cls, cfg: config.Loader, logger: logging.Logger) -> None:
client = docker.from_env()
worker_nodes = []
try:
logger.debug("dockerd :: " + json.dumps(client.version())[1:-1])
# initialize swarm
state = "(unknown)"
while True:
info = client.info()
if "Swarm" in info and "LocalNodeState" in info["Swarm"]:
logger.debug(_("swarm info", **info["Swarm"]))
state = info["Swarm"]["LocalNodeState"]
# https://github.com/moby/moby/blob/e7b5f7dbe98c559b20c0c8c20c0b31a6b197d717/api/types/swarm/swarm.go#L185
if state == "active":
if info["Swarm"]["ControlAvailable"]:
worker_nodes = [
node
for node in client.nodes.list()
if node.attrs["Spec"]["Availability"] == "active"
and node.attrs["Status"]["State"] == "ready"
]
if worker_nodes:
break
else:
logging.warning(
"this host is a docker swarm worker but not a manager; "
"WDL task scheduling requires manager access"
)
elif state == "inactive" and cfg["docker_swarm"].get_bool("auto_init"):
logger.warning(
"docker swarm is inactive on this host; "
"performing `docker swarm init --advertise-addr 127.0.0.1 --listen-addr 127.0.0.1`"
)
try:
client.swarm.init(advertise_addr="127.0.0.1", listen_addr="127.0.0.1")
except Exception as | |
static_values_shape = None
with self.cached_session() as sess:
for op in ops:
if static_indices_shape is None:
static_indices_shape = op.indices.get_shape()
else:
self.assertAllEqual(
static_indices_shape.as_list(), op.indices.get_shape().as_list())
if static_values_shape is None:
static_values_shape = op.values.get_shape()
else:
self.assertAllEqual(
static_values_shape.as_list(), op.values.get_shape().as_list())
dynamic_indices_shape_ops.append(array_ops.shape(op.indices))
dynamic_values_shape_ops.append(array_ops.shape(op.values))
results = sess.run(
list(ops) + dynamic_indices_shape_ops + dynamic_values_shape_ops)
op_count = len(ops)
op_results = results[0:op_count]
dynamic_indices_shapes = results[op_count:2 * op_count]
dynamic_values_shapes = results[2 * op_count:3 * op_count]
# Assert static and dynamic tensor shapes, and result shapes, are all
# consistent.
static_indices_shape.assert_is_compatible_with(dynamic_indices_shapes[0])
static_values_shape.assert_is_compatible_with(dynamic_values_shapes[0])
self.assertAllEqual(dynamic_indices_shapes[0], op_results[0].indices.shape)
self.assertAllEqual(dynamic_values_shapes[0], op_results[0].values.shape)
# Assert dynamic shapes and values are the same for all ops.
for i in range(1, len(ops)):
self.assertAllEqual(dynamic_indices_shapes[0], dynamic_indices_shapes[i])
self.assertAllEqual(dynamic_values_shapes[0], dynamic_values_shapes[i])
self.assertAllEqual(op_results[0].indices, op_results[i].indices)
self.assertAllEqual(op_results[0].values, op_results[i].values)
self.assertAllEqual(op_results[0].dense_shape, op_results[i].dense_shape)
return op_results[0]
def _set_intersection(self, a, b):
# Validate that we get the same results with or without `validate_indices`,
# and with a & b swapped.
ops = (
sets.set_intersection(
a, b, validate_indices=True),
sets.set_intersection(
a, b, validate_indices=False),
sets.set_intersection(
b, a, validate_indices=True),
sets.set_intersection(
b, a, validate_indices=False),)
for op in ops:
self._assert_static_shapes(a, op)
return self._run_equivalent_set_ops(ops)
def _set_intersection_count(self, a, b):
op = sets.set_size(sets.set_intersection(a, b))
with self.cached_session() as sess:
return sess.run(op)
def test_set_difference_multirow_2d(self):
for dtype in _DTYPES:
self._test_set_difference_multirow_2d(dtype)
def _test_set_difference_multirow_2d(self, dtype):
a_values = [[1, 1, 1], [1, 5, 9], [4, 5, 3], [5, 5, 1]]
b_values = [[], [1, 2], [1, 2, 2], []]
# a - b.
expected_indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1], [2, 2], [3, 0],
[3, 1]]
expected_values = _values([1, 5, 9, 3, 4, 5, 1, 5], dtype)
expected_shape = [4, 3]
expected_counts = [1, 2, 3, 2]
# Dense to sparse.
a = _constant(a_values, dtype=dtype)
sp_b = _dense_to_sparse(b_values, dtype=dtype)
difference = self._set_difference(a, sp_b, True)
self._assert_set_operation(
expected_indices,
expected_values,
expected_shape,
difference,
dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(a, sp_b, True))
# Sparse to sparse.
sp_a = _dense_to_sparse(a_values, dtype=dtype)
difference = self._set_difference(sp_a, sp_b, True)
self._assert_set_operation(
expected_indices,
expected_values,
expected_shape,
difference,
dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(sp_a, sp_b, True))
# b - a.
expected_indices = [[1, 0], [2, 0], [2, 1]]
expected_values = _values([2, 1, 2], dtype)
expected_shape = [4, 2]
expected_counts = [0, 1, 2, 0]
# Dense to sparse.
difference = self._set_difference(a, sp_b, False)
self._assert_set_operation(
expected_indices,
expected_values,
expected_shape,
difference,
dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(a, sp_b, False))
# Sparse to sparse.
difference = self._set_difference(sp_a, sp_b, False)
self._assert_set_operation(
expected_indices,
expected_values,
expected_shape,
difference,
dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(sp_a, sp_b, False))
def test_dense_set_difference_multirow_2d(self):
for dtype in _DTYPES:
self._test_dense_set_difference_multirow_2d(dtype)
def _test_dense_set_difference_multirow_2d(self, dtype):
a_values = [[1, 5, 9], [4, 5, 3]]
b_values = [[1, 2, 6], [1, 2, 2]]
# a - b.
expected_indices = [[0, 0], [0, 1], [1, 0], [1, 1], [1, 2]]
expected_values = _values([5, 9, 3, 4, 5], dtype)
expected_shape = [2, 3]
expected_counts = [2, 3]
# Dense to dense.
a = _constant(a_values, dtype=dtype)
b = _constant(b_values, dtype=dtype)
difference = self._set_difference(a, b, True)
self._assert_set_operation(
expected_indices,
expected_values,
expected_shape,
difference,
dtype=dtype)
self.assertAllEqual(expected_counts, self._set_difference_count(a, b, True))
# b - a.
expected_indices = [[0, 0], [0, 1], [1, 0], [1, 1]]
expected_values = _values([2, 6, 1, 2], dtype)
expected_shape = [2, 2]
expected_counts = [2, 2]
# Dense to dense.
difference = self._set_difference(a, b, False)
self._assert_set_operation(
expected_indices,
expected_values,
expected_shape,
difference,
dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(a, b, False))
def test_sparse_set_difference_multirow_2d(self):
for dtype in _DTYPES:
self._test_sparse_set_difference_multirow_2d(dtype)
def _test_sparse_set_difference_multirow_2d(self, dtype):
sp_a = _dense_to_sparse(
[[], [1, 5, 9], [4, 5, 3, 3, 4, 5], [5, 1]], dtype=dtype)
sp_b = _dense_to_sparse([[], [1, 2], [1, 2, 2], []], dtype=dtype)
# a - b.
expected_indices = [[1, 0], [1, 1], [2, 0], [2, 1], [2, 2], [3, 0], [3, 1]]
expected_values = _values([5, 9, 3, 4, 5, 1, 5], dtype)
expected_shape = [4, 3]
expected_counts = [0, 2, 3, 2]
difference = self._set_difference(sp_a, sp_b, True)
self._assert_set_operation(
expected_indices,
expected_values,
expected_shape,
difference,
dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(sp_a, sp_b, True))
# b - a.
expected_indices = [[1, 0], [2, 0], [2, 1]]
expected_values = _values([2, 1, 2], dtype)
expected_shape = [4, 2]
expected_counts = [0, 1, 2, 0]
difference = self._set_difference(sp_a, sp_b, False)
self._assert_set_operation(
expected_indices,
expected_values,
expected_shape,
difference,
dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(sp_a, sp_b, False))
def test_set_difference_duplicates_2d(self):
for dtype in _DTYPES:
self._test_set_difference_duplicates_2d(dtype)
def _test_set_difference_duplicates_2d(self, dtype):
a_values = [[1, 1, 3]]
b_values = [[1, 2, 2]]
# a - b.
expected_indices = [[0, 0]]
expected_values = _values([3], dtype)
expected_shape = [1, 1]
expected_counts = [1]
# Dense to sparse.
a = _constant(a_values, dtype=dtype)
sp_b = _dense_to_sparse(b_values, dtype=dtype)
difference = self._set_difference(a, sp_b, True)
self._assert_set_operation(
expected_indices,
expected_values,
expected_shape,
difference,
dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(a, sp_b, True))
# Sparse to sparse.
sp_a = _dense_to_sparse(a_values, dtype=dtype)
difference = self._set_difference(sp_a, sp_b, True)
self._assert_set_operation(
expected_indices,
expected_values,
expected_shape,
difference,
dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(a, sp_b, True))
# b - a.
expected_indices = [[0, 0]]
expected_values = _values([2], dtype)
expected_shape = [1, 1]
expected_counts = [1]
# Dense to sparse.
difference = self._set_difference(a, sp_b, False)
self._assert_set_operation(
expected_indices,
expected_values,
expected_shape,
difference,
dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(a, sp_b, False))
# Sparse to sparse.
difference = self._set_difference(sp_a, sp_b, False)
self._assert_set_operation(
expected_indices,
expected_values,
expected_shape,
difference,
dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(a, sp_b, False))
def test_sparse_set_difference_3d(self):
for dtype in _DTYPES:
self._test_sparse_set_difference_3d(dtype)
def test_sparse_set_difference_3d_invalid_indices(self):
for dtype in _DTYPES:
self._test_sparse_set_difference_3d(dtype, invalid_indices=True)
def _test_sparse_set_difference_3d(self, dtype, invalid_indices=False):
if invalid_indices:
indices = constant_op.constant(
[
[0, 1, 0],
[0, 1, 1], # 0,1
[1, 0, 0], # 1,0
[1, 1, 0],
[1, 1, 1],
[1, 1, 2], # 1,1
[0, 0, 0],
[0, 0, 2], # 0,0
# 2,0
[2, 1, 1] # 2,1
# 3,*
],
dtypes.int64)
else:
indices = constant_op.constant(
[
[0, 0, 0],
[0, 0, 2], # 0,0
[0, 1, 0],
[0, 1, 1], # 0,1
[1, 0, 0], # 1,0
[1, 1, 0],
[1, 1, 1],
[1, 1, 2], # 1,1
# 2,0
[2, 1, 1] # 2,1
# 3,*
],
dtypes.int64)
sp_a = sparse_tensor_lib.SparseTensor(
indices,
_constant(
[
1,
9, # 0,0
3,
3, # 0,1
1, # 1,0
9,
7,
8, # 1,1
# 2,0
5 # 2,1
# 3,*
],
dtype),
constant_op.constant([4, 2, 3], dtypes.int64))
sp_b = sparse_tensor_lib.SparseTensor(
constant_op.constant(
[
[0, 0, 0],
[0, 0, 3], # 0,0
# 0,1
[1, 0, 0], # 1,0
[1, 1, 0],
[1, 1, 1], # 1,1
[2, 0, 1], # 2,0
[2, 1, 1], # 2,1
[3, 0, 0], # 3,0
[3, 1, 0] # 3,1
],
dtypes.int64),
_constant(
[
1,
3, # 0,0
# 0,1
3, # 1,0
7,
8, # 1,1
2, # 2,0
5, # 2,1
4, # 3,0
4 # 3,1
],
dtype),
constant_op.constant([4, 2, 4], dtypes.int64))
if invalid_indices:
with self.assertRaisesRegexp(errors_impl.OpError, "out of order"):
self._set_difference(sp_a, sp_b, False)
with self.assertRaisesRegexp(errors_impl.OpError, "out of order"):
self._set_difference(sp_a, sp_b, True)
else:
# a-b
expected_indices = [
[0, 0, 0], # 0,0
[0, 1, 0], # 0,1
[1, 0, 0], # 1,0
[1, 1, 0], # 1,1
# 2,*
# 3,*
]
expected_values = _values(
[
9, # 0,0
3, # 0,1
1, # 1,0
9, # 1,1
# 2,*
# 3,*
],
dtype)
expected_shape = [4, 2, 1]
expected_counts = [
[
1, # 0,0
1 # 0,1
],
[
1, # 1,0
1 # 1,1
],
[
0, # 2,0
0 # 2,1
],
[
0, # 3,0
0 # 3,1
]
]
difference = self._set_difference(sp_a, sp_b, True)
self._assert_set_operation(
expected_indices,
expected_values,
expected_shape,
difference,
dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(sp_a, sp_b))
# b-a
expected_indices = [
[0, 0, 0], # 0,0
# 0,1
[1, 0, 0], # 1,0
# 1,1
[2, 0, 0], # 2,0
# 2,1
[3, 0, 0], # 3,0
[3, 1, 0] # 3,1
]
expected_values = _values(
[
3, # 0,0
# 0,1
3, # 1,0
# 1,1
2, # 2,0
# 2,1
4, # 3,0
4, # 3,1
],
dtype)
expected_shape = [4, 2, 1]
expected_counts = [
[
1, # 0,0
0 # 0,1
],
[
1, # 1,0
0 # 1,1
],
[
1, # 2,0
0 # 2,1
],
[
1, # 3,0
1 # 3,1
]
]
difference = self._set_difference(sp_a, sp_b, False)
self._assert_set_operation(
expected_indices,
expected_values,
expected_shape,
difference,
dtype=dtype)
self.assertAllEqual(expected_counts,
self._set_difference_count(sp_a, sp_b, False))
def _set_difference(self, a, b, aminusb=True):
# Validate that we get the same results with or without `validate_indices`,
# and with a | |
0.00000000371 * mu.cost(2.23508025241 + 4996.17273089800 * x)
R0 += 0.00000000300 * mu.cost(6.04069385215 + 9499.25986200560 * x)
R0 += 0.00000000345 * mu.cost(1.74260482164 + 5617.91076994730 * x)
R0 += 0.00000000302 * mu.cost(3.01127349940 + 7483.58877584580 * x)
R0 += 0.00000000289 * mu.cost(0.39479288685 + 2412.77245819700 * x)
R0 += 0.00000000289 * mu.cost(2.21430640344 + 11140.59307220220 * x)
R0 += 0.00000000330 * mu.cost(5.81605457596 + 4246.06912336460 * x)
R0 += 0.00000000394 * mu.cost(2.12229107240 + 6475.03930496240 * x)
R0 += 0.00000000301 * mu.cost(5.26147877814 + 9945.57120882380 * x)
R0 += 0.00000000338 * mu.cost(4.94717046909 + 5625.77507647350 * x)
R0 += 0.00000000372 * mu.cost(5.46968662800 + 3561.02506913860 * x)
R0 += 0.00000000279 * mu.cost(0.54063870001 + 3226.21331978640 * x)
R0 += 0.00000000291 * mu.cost(5.21021494024 + 13171.00144068760 * x)
R0 += 0.00000000384 * mu.cost(3.23921380878 + 10022.81760116760 * x)
R0 += 0.00000000309 * mu.cost(3.17514941939 + 14047.49376102520 * x)
R0 += 0.00000000273 * mu.cost(1.68203034215 + 4253.18267036540 * x)
R0 += 0.00000000272 * mu.cost(0.11218647217 + 7314.00859271280 * x)
R0 += 0.00000000281 * mu.cost(5.15132055967 + 2825.14855560680 * x)
R0 += 0.00000000292 * mu.cost(3.33720586058 + 9468.26787725700 * x)
R0 += 0.00000000316 * mu.cost(1.41719074976 + 589.06482700820 * x)
R0 += 0.00000000264 * mu.cost(0.48845594730 + 16699.53901514999 * x)
R0 += 0.00000000266 * mu.cost(1.69694779915 + 647.01083331480 * x)
R0 += 0.00000000318 * mu.cost(2.47072726153 + 8436.28750316460 * x)
R0 += 0.00000000260 * mu.cost(2.54459932529 + 20.35531939880 * x)
R0 += 0.00000000275 * mu.cost(2.78608579994 + 2970.91261075940 * x)
R0 += 0.00000000295 * mu.cost(5.96367554548 + 4025.65648092580 * x)
R0 += 0.00000000268 * mu.cost(3.01034973031 + 6518.75821726740 * x)
R0 += 0.00000000259 * mu.cost(5.20888482695 + 7366.26890762560 * x)
R0 += 0.00000000298 * mu.cost(2.71010678192 + 6652.77566593180 * x)
R0 += 0.00000000276 * mu.cost(0.78545108300 + 3735.23831175900 * x)
R0 += 0.00000000305 * mu.cost(6.19137255377 + 6677.34351804160 * x)
R0 += 0.00000000265 * mu.cost(3.25145629239 + 24889.57479599160 * x)
R0 += 0.00000000260 * mu.cost(3.99612605351 + 2171.02417529120 * x)
R0 += 0.00000000252 * mu.cost(4.14773813625 + 5642.19824260920 * x)
R0 += 0.00000000254 * mu.cost(1.38470256851 + 846.08283475120 * x)
R0 += 0.00000000258 * mu.cost(2.03261985834 + 2089.78223039900 * x)
R0 += 0.00000000298 * mu.cost(3.81212222628 + 28230.18722269139 * x)
R0 += 0.00000000241 * mu.cost(2.96550398155 + 27682.14074415640 * x)
R0 += 0.00000000259 * mu.cost(4.79545870271 + 6657.34641565180 * x)
R0 += 0.00000000238 * mu.cost(1.18977479528 + 3171.03224356680 * x)
R0 += 0.00000000256 * mu.cost(1.01427800277 + 568.82187402740 * x)
R0 += 0.00000000236 * mu.cost(5.56425829084 + 14.22709400160 * x)
R0 += 0.00000000304 * mu.cost(3.81556245925 + 1190.92389187560 * x)
R0 += 0.00000000237 * mu.cost(1.37222961867 + 2277.29834324750 * x)
R0 += 0.00000000239 * mu.cost(2.47752611026 + 5430.39465709880 * x)
R0 += 0.00000000234 * mu.cost(4.34929504798 + 6675.70192909220 * x)
R0 += 0.00000000239 * mu.cost(0.14012746335 + 3742.28454845700 * x)
R0 += 0.00000000286 * mu.cost(5.04045301355 + 5607.61582920880 * x)
R0 += 0.00000000305 * mu.cost(4.59739079660 + 6685.10618875760 * x)
R0 += 0.00000000254 * mu.cost(5.03693878366 + 1905.46476494040 * x)
R0 += 0.00000000323 * mu.cost(1.67390215145 + 4922.57177498280 * x)
R0 += 0.00000000232 * mu.cost(4.82565548677 + 9070.11887384880 * x)
R0 += 0.00000000236 * mu.cost(2.40662610715 + 3620.39893105220 * x)
R0 += 0.00000000260 * mu.cost(5.72282468723 + 17468.85519794540 * x)
R0 += 0.00000000259 * mu.cost(6.15179402778 + 16706.58525184800 * x)
R0 += 0.00000000263 * mu.cost(0.63922292958 + 2008.55753915900 * x)
R0 += 0.00000000300 * mu.cost(3.78527265088 + 34363.36559755600 * x)
R0 += 0.00000000226 * mu.cost(1.86970344963 + 6418.14093002680 * x)
R0 += 0.00000000239 * mu.cost(0.04616997400 + 13362.38239649640 * x)
R0 += 0.00000000241 * mu.cost(4.85896907298 + 14158.74771361560 * x)
R0 += 0.00000000225 * mu.cost(1.70179250908 + 18451.07854656599 * x)
R0 += 0.00000000288 * mu.cost(2.26316945288 + 6621.85099148600 * x)
R0 += 0.00000000231 * mu.cost(2.19861265305 + 3936.79080070880 * x)
R0 += 0.00000000251 * mu.cost(5.51232121883 + 3416.87849797540 * x)
R0 += 0.00000000245 * mu.cost(3.30613942274 + 1197.97012857360 * x)
R0 += 0.00000000253 * mu.cost(4.54308131689 + 2285.16264977370 * x)
R0 += 0.00000000225 * mu.cost(5.50822507089 + 4936.79886898440 * x)
R0 += 0.00000000249 * mu.cost(1.06089727346 + 3313.21087060300 * x)
R0 += 0.00000000309 * mu.cost(6.21936675838 + 16304.91313009080 * x)
R0 += 0.00000000244 * mu.cost(1.94855224181 + 3.59042865180 * x)
R0 += 0.00000000287 * mu.cost(5.70461951656 + 792.77488846740 * x)
R0 += 0.00000000254 * mu.cost(5.34446995416 + 5401.43028077160 * x)
R0 += 0.00000000263 * mu.cost(1.49663212332 + 6364.83298374300 * x)
R0 += 0.00000000223 * mu.cost(2.66825139116 + 31968.94865279940 * x)
R0 += 0.00000000222 * mu.cost(2.48370132727 + 5355.23588148860 * x)
R0 += 0.00000000220 * mu.cost(5.20799024654 + 23017.06265793620 * x)
R0 += 0.00000000215 * mu.cost(2.72743996418 + 6740.59871531320 * x)
R0 += 0.00000000218 * mu.cost(1.30797197521 + 29822.78323632420 * x)
R0 += 0.00000000270 * mu.cost(0.90714939427 + 6155.05700665400 * x)
R0 += 0.00000000216 * mu.cost(4.73975263349 + 6679.74038069130 * x)
R0 += 0.00000000211 * mu.cost(3.72756562629 + 10042.61267559180 * x)
R0 += 0.00000000211 * mu.cost(2.61999755641 + 10124.93005431800 * x)
R0 += 0.00000000293 * mu.cost(6.07059383381 + 14061.72085502680 * x)
R0 += 0.00000000219 * mu.cost(2.98472846458 + 131.54196168640 * x)
R0 += 0.00000000210 * mu.cost(5.27496906319 + 13355.33615979840 * x)
R0 += 0.00000000259 * mu.cost(1.25267305830 + 2641.34127847220 * x)
R0 += 0.00000000208 * mu.cost(3.30241021109 + 6850.80503653260 * x)
R0 += 0.00000000226 * mu.cost(5.48438086246 + 7203.80227149340 * x)
R0 += 0.00000000243 * mu.cost(2.44748800604 + 3311.18291816379 * x)
R0 += 0.00000000208 * mu.cost(1.13500579457 + 5888.44996493220 * x)
R0 += 0.00000000251 * mu.cost(4.67012983729 + 6666.99775939800 * x)
R0 += 0.00000000227 * mu.cost(1.59926413307 + 10001.06188460700 * x)
R0 += 0.00000000264 * mu.cost(3.72622435628 + 6747.71226231400 * x)
R0 += 0.00000000216 * mu.cost(0.34122804918 + 6686.74777770700 * x)
R0 += 0.00000000260 * mu.cost(3.67749190896 + 6645.19698672220 * x)
R0 += 0.00000000209 * mu.cost(4.31928920378 + 3337.86091608880 * x)
R0 += 0.00000000218 * mu.cost(4.08068730999 + 3378.74546233760 * x)
R0 += 0.00000000275 * mu.cost(1.64274205426 + 2011.10033643980 * x)
R0 += 0.00000000204 * mu.cost(0.73237459784 + 3.93215326310 * x)
R0 += 0.00000000219 * mu.cost(0.88584017263 + 19513.98359510420 * x)
R0 += 0.00000000205 * mu.cost(2.60851826933 + 2771.79055267240 * x)
R0 += 0.00000000215 * mu.cost(2.99299817517 + 10824.20120254560 * x)
R0 += 0.00000000249 * mu.cost(0.99914444136 + 5753.38488489680 * x)
R0 += 0.00000000225 * mu.cost(0.23309143434 + 4782.87363546000 * x)
R0 += 0.00000000275 * mu.cost(0.86215660461 + 8749.15625447220 * x)
R0 += 0.00000000201 * mu.cost(2.87118854159 + 21548.96236929180 * x)
R0 += 0.00000000246 * mu.cost(3.34468800742 + 3333.92876282570 * x)
R0 += 0.00000000203 * mu.cost(4.11410443276 + 31570.79964939120 * x)
R0 += 0.00000000202 * mu.cost(4.96805650734 + 8166.15734309380 * x)
R0 += 0.00000000232 * mu.cost(2.69372584349 + 5989.06725217280 * x)
R0 += 0.00000000214 * mu.cost(4.83852070026 + 6681.64492949320 * x)
R0 += 0.00000000258 * mu.cost(2.66551831451 + 1062.90504853820 * x)
R0 += 0.00000000197 * mu.cost(0.55202541352 + 735.87651353180 * x)
R0 += 0.00000000256 * mu.cost(2.76274941586 + 2480.30249794700 * x)
R0 += 0.00000000216 * mu.cost(4.02506717011 + 3133.91168940320 * x)
R0 += 0.00000000193 * mu.cost(1.52645682146 + 949.17560896980 * x)
R0 += 0.00000000209 * mu.cost(0.67348618655 + 13892.14067189380 * x)
R0 += 0.00000000220 * mu.cost(1.52502617699 + 6660.86953400080 * x)
R0 += 0.00000000223 * mu.cost(1.09348882524 + 6148.01076995600 * x)
R0 += 0.00000000192 * mu.cost(2.90571322264 + 8799.98871377800 * x)
R0 += 0.00000000243 * mu.cost(3.36420301442 + 8965.97846825920 * x)
R0 += 0.00000000206 * mu.cost(5.11175800472 + 4140.43355186520 * x)
R0 += 0.00000000189 * mu.cost(1.06641624209 + 9374.82644678460 * x)
R0 += 0.00000000240 * mu.cost(5.92078519032 + 13362.51701710200 * x)
R0 += 0.00000000248 * mu.cost(5.79997873732 + 15806.14683944200 * x)
R0 += 0.00000000193 * mu.cost(3.19008521814 + 6756.00645196690 * x)
R0 += 0.00000000237 * mu.cost(4.11979030463 + 22487.37169284160 * x)
R0 += 0.00000000218 * mu.cost(0.84212090761 + 6717.25272007700 * x)
R0 += 0.00000000200 * mu.cost(2.43100846630 + 10018.24685144760 * x)
R0 += 0.00000000199 * mu.cost(5.81238461796 + 4289.78803566960 * x)
R0 += 0.00000000214 * mu.cost(5.95026024979 + 6680.80477730600 * x)
R0 += 0.00000000192 * mu.cost(3.06285109033 + 32765.24665961580 * x)
R0 += 0.00000000229 * mu.cost(1.68884404323 + 11614.43329373220 * x)
R0 += 0.00000000185 * mu.cost(3.13072183492 + 3253.30422216000 * x)
R0 += 0.00000000246 * mu.cost(2.58151525126 + 1795.25844372100 * x)
R0 += 0.00000000187 * mu.cost(4.06797969837 + 14577.18472611980 * x)
R0 += 0.00000000185 * mu.cost(0.96747889626 + 2604.73591316800 * x)
R0 += 0.00000000184 * mu.cost(1.46731725339 + 1437.17561419860 * x)
R0 += 0.00000000186 * mu.cost(2.55094700069 + 3188.71514561460 * x)
R0 += 0.00000000211 * mu.cost(4.23522784526 + 16703.07938715119 * x)
R0 += 0.00000000196 * mu.cost(2.80582160764 + 2796.69436760360 * x)
R0 += 0.00000000198 * mu.cost(5.92372067560 + 4133.38731516720 * x)
R0 += 0.00000000238 * | |
= points
self.rotation = rotation
def to_inch(self):
self.start_point = tuple([inch(x) for x in self.start_point])
self.points = tuple([(inch(x), inch(y)) for x, y in self.points])
def to_metric(self):
self.start_point = tuple([metric(x) for x in self.start_point])
self.points = tuple([(metric(x), metric(y)) for x, y in self.points])
def to_gerber(self, settings=None):
data = dict(
code=self.code,
exposure="1" if self.exposure == "on" else "0",
n_points=len(self.points),
start_point="%.6g,%.6g" % self.start_point,
points=",\n".join(["%.6g,%.6g" % point for point in self.points]),
rotation=str(self.rotation)
)
return "{code},{exposure},{n_points},{start_point},{points},{rotation}*".format(**data)
def to_primitive(self, units):
"""
Convert this to a drawable primitive. This uses the Outline instead of Line
primitive to handle differences in end caps when rotated.
"""
lines = []
prev_point = rotate_point(self.start_point, self.rotation)
for point in self.points:
cur_point = rotate_point(point, self.rotation)
lines.append(Line(prev_point, cur_point, Circle((0,0), 0)))
prev_point = cur_point
if lines[0].start != lines[-1].end:
raise ValueError('Outline must be closed')
return Outline(lines, units=units, level_polarity=self._level_polarity)
class AMPolygonPrimitive(AMPrimitive):
""" Aperture Macro Polygon primitive. Code 5.
A polygon primitive is a regular polygon defined by the number of
vertices, the center point, and the diameter of the circumscribed circle.
.. seealso::
`The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_
**Section 4.12.3.8:** Polygon, primitive code 5.
Parameters
----------
code : int
PolygonPrimitive code. Must be 5.
exposure : string
'on' or 'off'
vertices : int, 3 <= vertices <= 12
Number of vertices
position : tuple (<float>, <float>)
X and Y coordinates of polygon center
diameter : float
diameter of circumscribed circle.
rotation : float
polygon rotation about the origin.
Returns
-------
PolygonPrimitive : :class:`gerbers.am_statements.AMPolygonPrimitive`
An initialized AMPolygonPrimitive
Raises
------
ValueError, TypeError
"""
@classmethod
def from_primitive(cls, primitive):
return cls(5, 'on', primitive.sides, primitive.position, primitive.diameter, primitive.rotation)
@classmethod
def from_gerber(cls, primitive):
modifiers = primitive.strip(' *').split(",")
code = int(modifiers[0])
exposure = "on" if float(modifiers[1]) == 1 else "off"
vertices = int(float(modifiers[2]))
position = (float(modifiers[3]), float(modifiers[4]))
try:
diameter = float(modifiers[5])
except:
diameter = 0
rotation = float(modifiers[6])
return cls(code, exposure, vertices, position, diameter, rotation)
def __init__(self, code, exposure, vertices, position, diameter, rotation):
""" Initialize AMPolygonPrimitive
"""
if code != 5:
raise ValueError('PolygonPrimitive code is 5')
super(AMPolygonPrimitive, self).__init__(code, exposure)
if vertices < 3 or vertices > 12:
raise ValueError('Number of vertices must be between 3 and 12')
self.vertices = vertices
validate_coordinates(position)
self.position = position
self.diameter = diameter
self.rotation = rotation
def to_inch(self):
self.position = tuple([inch(x) for x in self.position])
self.diameter = inch(self.diameter)
def to_metric(self):
self.position = tuple([metric(x) for x in self.position])
self.diameter = metric(self.diameter)
def to_gerber(self, settings=None):
data = dict(
code=self.code,
exposure="1" if self.exposure == "on" else "0",
vertices=self.vertices,
position="%.4g,%.4g" % self.position,
diameter='%.4g' % self.diameter,
rotation=str(self.rotation)
)
fmt = "{code},{exposure},{vertices},{position},{diameter},{rotation}*"
return fmt.format(**data)
def to_primitive(self, units):
return Polygon(self.position, self.vertices, self.diameter / 2.0, 0, rotation=math.radians(self.rotation), units=units, level_polarity=self._level_polarity)
class AMMoirePrimitive(AMPrimitive):
""" Aperture Macro Moire primitive. Code 6.
The moire primitive is a cross hair centered on concentric rings (annuli).
Exposure is always on.
.. seealso::
`The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_
**Section 4.12.3.9:** Moire, primitive code 6.
Parameters
----------
code : int
Moire Primitive code. Must be 6.
position : tuple (<float>, <float>)
X and Y coordinates of moire center
diameter : float
outer diameter of outer ring.
ring_thickness : float
thickness of concentric rings.
gap : float
gap between concentric rings.
max_rings : float
maximum number of rings
crosshair_thickness : float
thickness of crosshairs
crosshair_length : float
length of crosshairs
rotation : float
moire rotation about the origin.
Returns
-------
MoirePrimitive : :class:`gerbers.am_statements.AMMoirePrimitive`
An initialized AMMoirePrimitive
Raises
------
ValueError, TypeError
"""
@classmethod
def from_gerber(cls, primitive):
modifiers = primitive.strip(' *').split(",")
code = int(modifiers[0])
position = (float(modifiers[1]), float(modifiers[2]))
diameter = float(modifiers[3])
ring_thickness = float(modifiers[4])
gap = float(modifiers[5])
max_rings = int(float(modifiers[6]))
crosshair_thickness = float(modifiers[7])
crosshair_length = float(modifiers[8])
rotation = float(modifiers[9])
return cls(code, position, diameter, ring_thickness, gap, max_rings, crosshair_thickness, crosshair_length, rotation)
def __init__(self, code, position, diameter, ring_thickness, gap, max_rings, crosshair_thickness, crosshair_length, rotation):
""" Initialize AMoirePrimitive
"""
if code != 6:
raise ValueError('MoirePrimitive code is 6')
super(AMMoirePrimitive, self).__init__(code, 'on')
validate_coordinates(position)
self.position = position
self.diameter = diameter
self.ring_thickness = ring_thickness
self.gap = gap
self.max_rings = max_rings
self.crosshair_thickness = crosshair_thickness
self.crosshair_length = crosshair_length
self.rotation = rotation
def to_inch(self):
self.position = tuple([inch(x) for x in self.position])
self.diameter = inch(self.diameter)
self.ring_thickness = inch(self.ring_thickness)
self.gap = inch(self.gap)
self.crosshair_thickness = inch(self.crosshair_thickness)
self.crosshair_length = inch(self.crosshair_length)
def to_metric(self):
self.position = tuple([metric(x) for x in self.position])
self.diameter = metric(self.diameter)
self.ring_thickness = metric(self.ring_thickness)
self.gap = metric(self.gap)
self.crosshair_thickness = metric(self.crosshair_thickness)
self.crosshair_length = metric(self.crosshair_length)
def to_gerber(self, settings=None):
data = dict(
code=self.code,
position="%.4g,%.4g" % self.position,
diameter=self.diameter,
ring_thickness=self.ring_thickness,
gap=self.gap,
max_rings=self.max_rings,
crosshair_thickness=self.crosshair_thickness,
crosshair_length=self.crosshair_length,
rotation=self.rotation
)
fmt = "{code},{position},{diameter},{ring_thickness},{gap},{max_rings},{crosshair_thickness},{crosshair_length},{rotation}*"
return fmt.format(**data)
def to_primitive(self, units):
#raise NotImplementedError()
return None
class AMThermalPrimitive(AMPrimitive):
""" Aperture Macro Thermal primitive. Code 7.
The thermal primitive is a ring (annulus) interrupted by four gaps.
Exposure is always on.
.. seealso::
`The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_
**Section 4.12.3.10:** Thermal, primitive code 7.
Parameters
----------
code : int
Thermal Primitive code. Must be 7.
position : tuple (<float>, <float>)
X and Y coordinates of thermal center
outer_diameter : float
outer diameter of thermal.
inner_diameter : float
inner diameter of thermal.
gap : float
gap thickness
rotation : float
thermal rotation about the origin.
Returns
-------
ThermalPrimitive : :class:`gerbers.am_statements.AMThermalPrimitive`
An initialized AMThermalPrimitive
Raises
------
ValueError, TypeError
"""
@classmethod
def from_gerber(cls, primitive):
modifiers = primitive.strip(' *').split(",")
code = int(modifiers[0])
position = (float(modifiers[1]), float(modifiers[2]))
outer_diameter = float(modifiers[3])
inner_diameter = float(modifiers[4])
gap = float(modifiers[5])
rotation = float(modifiers[6])
return cls(code, position, outer_diameter, inner_diameter, gap, rotation)
def __init__(self, code, position, outer_diameter, inner_diameter, gap, rotation):
if code != 7:
raise ValueError('ThermalPrimitive code is 7')
super(AMThermalPrimitive, self).__init__(code, 'on')
validate_coordinates(position)
self.position = position
self.outer_diameter = outer_diameter
self.inner_diameter = inner_diameter
self.gap = gap
self.rotation = rotation
def to_inch(self):
self.position = tuple([inch(x) for x in self.position])
self.outer_diameter = inch(self.outer_diameter)
self.inner_diameter = inch(self.inner_diameter)
self.gap = inch(self.gap)
def to_metric(self):
self.position = tuple([metric(x) for x in self.position])
self.outer_diameter = metric(self.outer_diameter)
self.inner_diameter = metric(self.inner_diameter)
self.gap = metric(self.gap)
def to_gerber(self, settings=None):
data = dict(
code=self.code,
position="%.4g,%.4g" % self.position,
outer_diameter=self.outer_diameter,
inner_diameter=self.inner_diameter,
gap=self.gap,
rotation=self.rotation
)
fmt = "{code},{position},{outer_diameter},{inner_diameter},{gap},{rotation}*"
return fmt.format(**data)
def _approximate_arc_cw(self, start_angle, end_angle, radius, center):
"""
Get an arc as a series of points
Parameters
----------
start_angle : The start angle in radians
end_angle : The end angle in radians
radius`: Radius of the arc
center : The center point of the arc (x, y) tuple
Returns
-------
array of point tuples
"""
# The total sweep
sweep_angle = end_angle - start_angle
num_steps = 10
angle_step = sweep_angle / num_steps
radius = radius
center = center
points = []
for i in range(num_steps + 1):
current_angle = start_angle + (angle_step * i)
nextx = (center[0] + math.cos(current_angle) * radius)
nexty = (center[1] + math.sin(current_angle) * radius)
points.append((nextx, nexty))
return points
def to_primitive(self, units):
# We start with calculating the top right section, then duplicate it
inner_radius = self.inner_diameter / 2.0
outer_radius = self.outer_diameter / 2.0
# Calculate the start angle relative to the horizontal axis
inner_offset_angle = asin(self.gap / 2.0 / inner_radius)
outer_offset_angle = asin(self.gap / 2.0 / outer_radius)
rotation_rad = math.radians(self.rotation)
inner_start_angle = inner_offset_angle + rotation_rad
inner_end_angle = math.pi / 2 - inner_offset_angle + rotation_rad
outer_start_angle = outer_offset_angle + rotation_rad
outer_end_angle = math.pi / 2 - outer_offset_angle + rotation_rad
outlines = []
aperture = Circle((0, 0), 0)
points = (self._approximate_arc_cw(inner_start_angle, inner_end_angle, inner_radius, self.position)
+ list(reversed(self._approximate_arc_cw(outer_start_angle, outer_end_angle, outer_radius, self.position))))
# Add in the last point since outlines should be closed
points.append(points[0])
# There are four outlines at rotated sections
for rotation in [0, 90.0, 180.0, 270.0]:
lines = []
prev_point = rotate_point(points[0], rotation, self.position)
for point in points[1:]:
cur_point = rotate_point(point, rotation, self.position)
lines.append(Line(prev_point, cur_point, aperture))
prev_point = cur_point
outlines.append(Outline(lines, units=units, level_polarity=self._level_polarity))
return outlines
class AMCenterLinePrimitive(AMPrimitive):
""" Aperture Macro Center Line primitive. Code 21.
The center line primitive is a rectangle defined by its width, height, and center point.
.. seealso::
`The Gerber File Format Specification <http://www.ucamco.com/files/downloads/file/81/the_gerber_file_format_specification.pdf>`_
**Section 4.12.3.4:** Center Line, primitive code 21.
Parameters
----------
code : int
Center Line Primitive code. Must be 21.
exposure : str
'on' or 'off'
width : float
Width of rectangle
height : float
Height of rectangle
center | |
"""
Webserver module based on aiohttp to handle web/api requests
"""
# flake8: noqa
# pylint: disable=wrong-import-position, wrong-import-order
import aiohttp
setattr(aiohttp.http, 'SERVER_SOFTWARE', '')
import sys
import re
import argparse
import uuid
import gc
from typing import Callable, Optional, Type, List, Dict, Tuple, Any, Union, Coroutine
from functools import partial
from datetime import datetime, timezone
import logging
import asyncio
from aiohttp import web
from aiohttp.web_response import Response
import aiohttp_cors # type: ignore
from aiohttp_cors import CorsConfig
import aiojobs # type: ignore
import aiojobs.aiohttp as aiojobs_http # type: ignore
from aiojobs import Scheduler
from stringcase import snakecase, titlecase # type: ignore
from hopeit.server import api
from hopeit.server.steps import find_datatype_handler
from hopeit.toolkit import auth
from hopeit.dataobjects.payload import Payload
from hopeit.app.context import EventContext, NoopMultiparReader, PostprocessHook, PreprocessHook
from hopeit.dataobjects import DataObject, EventPayload, EventPayloadType
from hopeit.app.errors import Unauthorized, BadRequest
from hopeit.server.engine import Server, AppEngine
from hopeit.server.config import parse_server_config_json, ServerConfig, AuthType
from hopeit.server.logger import engine_logger, extra_logger, combined, EngineLoggerWrapper
from hopeit.server.metrics import metrics
from hopeit.server.errors import ErrorInfo
from hopeit.server.names import route_name
from hopeit.server.api import app_route_name
from hopeit.app.config import AppConfig, EventType, EventDescriptor, parse_app_config_json, EventPlugMode
from hopeit.server import runtime
__all__ = ['parse_args',
'main',
'start_server',
'start_app',
'stop_server']
logger: EngineLoggerWrapper = logging.getLogger(__name__) # type: ignore
extra = extra_logger()
ResponseType = Union[web.Response, web.FileResponse]
# server = Server()
web_server = web.Application()
aiojobs_http.setup(web_server)
auth_info_default = {}
def main(host: Optional[str], port: Optional[int], path: Optional[str], start_streams: bool,
config_files: List[str], api_file: Optional[str]):
loop = asyncio.get_event_loop()
scheduler = loop.run_until_complete(aiojobs.create_scheduler())
logger.info("Loading engine config file=%s...", config_files[0]) # type: ignore
server_config = _load_engine_config(config_files[0])
loop.run_until_complete(start_server(server_config))
if server_config.auth.domain:
auth_info_default['domain'] = server_config.auth.domain
if api_file is not None:
api.load_api_file(api_file)
api.register_server_config(server_config)
apps_config = []
for config_file in config_files[1:]:
logger.info(__name__, f"Loading app config file={config_file}...")
config = _load_app_config(config_file)
config.server = server_config
apps_config.append(config)
api.register_apps(apps_config)
api.enable_swagger(server_config, web_server)
for config in apps_config:
loop.run_until_complete(start_app(config, scheduler, start_streams))
logger.debug(__name__, "Performing forced garbage collection...")
gc.collect()
web.run_app(web_server, path=path, port=port, host=host)
def init_logger():
global logger
logger = engine_logger()
async def start_server(config: ServerConfig):
"""
Start engine engine
"""
await runtime.server.start(config=config)
init_logger()
async def stop_server():
global web_server
await runtime.server.stop()
await web_server.shutdown()
runtime.server = Server()
web_server = web.Application()
async def start_app(config: AppConfig, scheduler: Scheduler, start_streams: bool = False):
"""
Start Hopeit app specified by config
:param config: AppConfig, configuration for the app to start
:param start_streams: if True all stream events in app will start consuming
"""
app_engine = await runtime.server.start_app(app_config=config)
cors_origin = aiohttp_cors.setup(web_server, defaults={
config.engine.cors_origin: aiohttp_cors.ResourceOptions(
allow_credentials=True,
expose_headers="*",
allow_headers="*",
)
}) if config.engine.cors_origin else None
_setup_app_event_routes(app_engine)
for plugin in config.plugins:
plugin_engine = runtime.server.app_engine(app_key=plugin.app_key())
_setup_app_event_routes(app_engine, plugin_engine)
if cors_origin:
app = app_engine.app_config.app
_enable_cors(route_name('api', app.name, app.version), cors_origin)
if start_streams:
await _start_streams(app_engine, scheduler)
def _effective_events(app_engine: AppEngine, plugin: Optional[AppEngine] = None):
if plugin is None:
return {
k: v for k, v in app_engine.effective_events.items()
if v.plug_mode == EventPlugMode.STANDALONE
}
return {
k: v for k, v in plugin.effective_events.items()
if v.plug_mode == EventPlugMode.ON_APP
}
def _load_engine_config(path: str):
"""
Load engine configuration from json file
"""
with open(path) as f:
return parse_server_config_json(f.read())
def _load_app_config(path: str):
"""
Load app configuration from json file
"""
with open(path) as f:
return parse_app_config_json(f.read())
def _enable_cors(prefix: str, cors: CorsConfig):
for route in web_server.router.routes():
if route.resource and route.resource.canonical.startswith(prefix):
cors.add(route)
def _setup_app_event_routes(app_engine: AppEngine,
plugin: Optional[AppEngine] = None):
"""
Setup http routes for existing events in app,
in existing web_server global instance.
Supports:
* GET requests with query params
* POST requests with query params and payload sent in body
* STREAM start/stop endpoints
:param app_engine: AppEngine, initialized application engine
:param cors: initialized CorsConfig object or None is CORS is disabled
:param plugin: optional AppEngine, when the implementation of the route
is handled by a plugin app, if not specified methods will be handled
by same app_engine
"""
for event_name, event_info in _effective_events(app_engine, plugin).items():
if event_info.type == EventType.POST:
web_server.add_routes([
_create_post_event_route(
app_engine, plugin=plugin, event_name=event_name, override_route_name=event_info.route
)
])
elif event_info.type == EventType.GET:
web_server.add_routes([
_create_get_event_route(
app_engine, plugin=plugin, event_name=event_name, override_route_name=event_info.route
)
])
elif event_info.type == EventType.MULTIPART:
web_server.add_routes([
_create_multipart_event_route(
app_engine, plugin=plugin, event_name=event_name, override_route_name=event_info.route
)
])
elif event_info.type == EventType.STREAM and plugin is None:
web_server.add_routes(
_create_event_management_routes(
app_engine, event_name=event_name, event_info=event_info
)
)
elif event_info.type == EventType.SERVICE and plugin is None:
web_server.add_routes(
_create_event_management_routes(
app_engine, event_name=event_name, event_info=event_info
)
)
else:
raise ValueError(f"Invalid event_type:{event_info.type} for event:{event_name}")
def _auth_types(app_engine: AppEngine, event_name: str):
assert app_engine.app_config.server
event_info = app_engine.app_config.events[event_name]
if event_info.auth:
return event_info.auth
return app_engine.app_config.server.auth.default_auth_methods
def _create_post_event_route(
app_engine: AppEngine, *,
plugin: Optional[AppEngine] = None,
event_name: str,
override_route_name: Optional[str]) -> web.RouteDef:
"""
Creates route for handling POST event
"""
datatype = find_datatype_handler(app_config=app_engine.app_config, event_name=event_name)
route = app_route_name(app_engine.app_config.app, event_name=event_name,
plugin=None if plugin is None else plugin.app_config.app,
override_route_name=override_route_name)
logger.info(__name__, f"POST path={route} input={str(datatype)}")
impl = plugin if plugin else app_engine
handler = partial(_handle_post_invocation, app_engine, impl,
event_name, datatype, _auth_types(impl, event_name))
setattr(handler, '__closure__', None)
setattr(handler, '__code__', _handle_post_invocation.__code__)
api_handler = api.add_route('post', route, handler)
return web.post(route, api_handler)
def _create_get_event_route(
app_engine: AppEngine, *,
plugin: Optional[AppEngine] = None,
event_name: str,
override_route_name: Optional[str]) -> web.RouteDef:
"""
Creates route for handling GET requests
"""
route = app_route_name(app_engine.app_config.app, event_name=event_name,
plugin=None if plugin is None else plugin.app_config.app,
override_route_name=override_route_name)
logger.info(__name__, f"GET path={route}")
impl = plugin if plugin else app_engine
handler = partial(_handle_get_invocation, app_engine, impl, event_name, _auth_types(impl, event_name))
setattr(handler, '__closure__', None)
setattr(handler, '__code__', _handle_post_invocation.__code__)
api_handler = api.add_route('get', route, handler)
return web.get(route, api_handler)
def _create_multipart_event_route(
app_engine: AppEngine, *,
plugin: Optional[AppEngine] = None,
event_name: str,
override_route_name: Optional[str]) -> web.RouteDef:
"""
Creates route for handling MULTIPART event
"""
datatype = find_datatype_handler(app_config=app_engine.app_config, event_name=event_name)
route = app_route_name(app_engine.app_config.app, event_name=event_name,
plugin=None if plugin is None else plugin.app_config.app,
override_route_name=override_route_name)
logger.info(__name__, f"MULTIPART path={route} input={str(datatype)}")
impl = plugin if plugin else app_engine
handler = partial(_handle_multipart_invocation, app_engine, impl,
event_name, datatype, _auth_types(impl, event_name))
setattr(handler, '__closure__', None)
setattr(handler, '__code__', _handle_multipart_invocation.__code__)
api_handler = api.add_route('post', route, handler)
return web.post(route, api_handler)
def _create_event_management_routes(
app_engine: AppEngine, *,
event_name: str,
event_info: EventDescriptor) -> List[web.RouteDef]:
"""
Create routes to start and stop processing of STREAM events
"""
evt = event_name.replace('.', '/').replace('$', '/')
base_route = app_route_name(app_engine.app_config.app, event_name=evt,
prefix='mgmt', override_route_name=event_info.route)
logger.info(__name__, f"{event_info.type.value.upper()} path={base_route}/[start|stop]")
handler: Optional[partial[Coroutine[Any, Any, Response]]] = None
if event_info.type == EventType.STREAM:
handler = partial(_handle_stream_start_invocation, app_engine, event_name)
elif event_info.type == EventType.SERVICE:
handler = partial(_handle_service_start_invocation, app_engine, event_name)
assert handler is not None, f"No handler for event={event_name} type={event_info.type}"
return [
web.get(base_route + '/start', handler),
web.get(
base_route + '/stop',
partial(_handle_event_stop_invocation, app_engine, event_name)
)
]
def _response(*, track_ids: Dict[str, str], key: str,
payload: EventPayload, hook: PostprocessHook) -> ResponseType:
"""
Creates a web response object from a given payload (body), header track ids
and applies a postprocess hook
"""
response: ResponseType
headers = {
**hook.headers,
**{f"X-{re.sub(' ', '-', titlecase(k))}": v for k, v in track_ids.items()}
}
if hook.file_response is not None:
response = web.FileResponse(
path=hook.file_response,
headers={'Content-Type': hook.content_type, **headers}
)
else:
serializer: Callable[..., str] = CONTENT_TYPE_BODY_SER.get(
hook.content_type, _text_response
)
body = serializer(payload, key=key)
response = web.Response(
body=body,
headers=headers,
content_type=hook.content_type
)
for name, cookie in hook.cookies.items():
value, args, kwargs = cookie
response.set_cookie(name, value, *args, **kwargs)
for name, args, kwargs in hook.del_cookies:
response.del_cookie(name, *args, **kwargs)
if hook.status:
response.set_status(hook.status)
return response
def _response_info(response: ResponseType):
return extra(prefix='response.', status=str(response.status))
def _track_ids(request: web.Request) -> Dict[str, str]:
return {
'track.operation_id': str(uuid.uuid4()),
'track.request_id': str(uuid.uuid4()),
'track.request_ts': datetime.now().astimezone(timezone.utc).isoformat(),
**{
"track." + snakecase(k[8:].lower()): v
for k, v in request.headers.items() if k.lower().startswith('x-track-')
}
}
def _failed_response(context: Optional[EventContext],
e: Exception) -> web.Response:
if context:
logger.error(context, e)
logger.failed(context)
else:
logger.error(__name__, e)
info = ErrorInfo.from_exception(e)
return web.Response(
status=500,
body=Payload.to_json(info)
)
def _ignored_response(context: Optional[EventContext],
status: int,
e: BaseException) -> web.Response:
if context:
logger.error(context, e)
logger.ignored(context)
else:
logger.error(__name__, e)
info = ErrorInfo.from_exception(e)
return web.Response(
status=status,
body=Payload.to_json(info)
)
def _request_start(app_engine: AppEngine,
plugin: AppEngine,
event_name: str,
request: web.Request) -> EventContext:
"""
Extracts context and track information from a request and logs start of event
"""
context = EventContext(
app_config=app_engine.app_config,
plugin_config=plugin.app_config,
event_name=event_name,
track_ids=_track_ids(request),
auth_info=auth_info_default
)
logger.start(context)
return context
def _extract_auth_header(request: web.Request, context: EventContext) -> Optional[str]:
return request.headers.get("Authorization")
def _extract_refresh_cookie(request: web.Request, context: EventContext) -> Optional[str]:
return request.cookies.get(f"{context.app_key}.refresh")
def _ignore_auth(request: web.Request, context: EventContext) -> str:
return 'Unsecured -'
AUTH_HEADER_EXTRACTORS = {
AuthType.BASIC: _extract_auth_header,
AuthType.BEARER: _extract_auth_header,
AuthType.REFRESH: _extract_refresh_cookie,
AuthType.UNSECURED: _ignore_auth
}
def _extract_authorization(auth_methods: List[AuthType], request: web.Request, context: EventContext):
for auth_type in auth_methods:
auth_header = AUTH_HEADER_EXTRACTORS[auth_type](request, context)
if auth_header is not None:
return auth_header
return 'Unsecured -'
def _validate_authorization(app_config: AppConfig,
context: EventContext,
auth_types: List[AuthType],
request: web.Request):
"""
Validates Authorization header from request to provide valid credentials
for the methods supported in event configuration.
:raise `Unauthorized` if authorization is not valid
"""
auth_methods = context.event_info.auth
if (len(auth_methods) == 0) and (app_config.server is not None):
auth_methods = app_config.server.auth.default_auth_methods
auth_header = _extract_authorization(auth_methods, request, context)
try:
method, data = auth_header.split(" ")
except ValueError as e:
raise BadRequest("Malformed Authorization") from e
context.auth_info['allowed'] = False
for auth_type in auth_types:
if method.upper() == auth_type.name.upper():
auth.validate_auth_method(auth_type, data, context)
if | |
'3_dx', e1_crop_3.get())
config.set('Crop', '3_dy', e2_crop_3.get())
config.set('Crop', '3_width', e3_crop_3.get())
config.set('Crop', '3_height', e4_crop_3.get())
config.set('Crop', 'gravity', img_crop_gravity.get())
config.add_section('Border')
config.set('Border', 'on', str(img_border_on.get()))
config.set('Border', 'color', img_border_color.get())
config.set('Border', 'size', e_border.get())
config.add_section('Color')
config.set('Color', 'on', str(img_bw_on.get()))
config.set('Color', 'black-white', str(img_bw.get()))
config.set('Color', 'sepia', e_bw_sepia.get())
config.add_section('Normalize')
config.set('Normalize', 'on', str(img_normalize_on.get()))
config.set('Normalize', 'normalize', str(img_normalize.get()))
config.set('Normalize', 'channel', co_normalize_channel.get())
config.add_section('Contrast')
config.set('Contrast', 'on', str(img_contrast_on.get()))
config.set('Contrast', 'contrast', str(img_contrast.get()))
config.set('Contrast', 'selection', co_contrast_selection.get())
config.set('Contrast', 'contrast_stretch_1', e1_contrast.get())
config.set('Contrast', 'contrast_stretch_2', e2_contrast.get())
config.add_section('Mirror')
config.set('Mirror', 'on', str(img_mirror_on.get()))
config.set('Mirror', 'flip', str(img_mirror_flip.get()))
config.set('Mirror', 'flop', str(img_mirror_flop.get()))
config.add_section('Logo')
config.set('Logo', 'on', str(img_logo_on.get()))
config.set('Logo', 'logo', file_logo_path.get())
config.set('Logo', 'gravity', img_logo_gravity.get())
config.set('Logo', 'width', e_logo_width.get())
config.set('Logo', 'height', e_logo_height.get())
config.set('Logo', 'dx', e_logo_dx.get())
config.set('Logo', 'dy', e_logo_dy.get())
config.add_section('Custom')
config.set('Custom', 'on', str(img_custom_on.get()))
# save to a file
try:
with open(FILE_INI, 'w', encoding='utf-8', buffering=1) as configfile:
config.write(configfile)
except:
log.write_log("ini_save: cannot save config file: " + FILE_INI, "E")
def help_info(event):
""" okno info """
# global PWD
try:
license_file = os.path.join(PWD, "LICENSE")
with open(license_file, "r", encoding="utf8") as licensef:
message = ""
for i in licensef:
message = message + i
except:
log.write_log("help_info: error during loading license file", "W")
message = "Copyright " + version.__copyright__ + " " + version.__author__ + " under MIT license"
messagebox.showinfo(title=_("License"), message=message)
def close_window():
""" close program """
root.quit()
root.destroy()
sys.exit()
def win_deleted():
""" close program window """
log.write_log("closed", "M")
close_window()
def mouse_crop_NW(event):
""" Left-Upper corner """
x_preview = event.x
y_preview = event.y
xy_max = common.mouse_crop_calculation(file_in_width.get(),
file_in_width.get(),
int(co_preview_selector_orig.get()))
width = int(x_preview*xy_max['x_orig']/xy_max['x_max'])
height = int(y_preview*xy_max['y_orig']/xy_max['y_max'])
if img_crop_on.get() == 1:
if img_crop.get() == 1:
e1_crop_1.delete(0, "end")
e1_crop_1.insert(0, width)
e2_crop_1.delete(0, "end")
e2_crop_1.insert(0, height)
elif img_crop.get() == 2:
e1_crop_2.delete(0, "end")
e1_crop_2.insert(0, width)
e2_crop_2.delete(0, "end")
e2_crop_2.insert(0, height)
preview_orig()
if img_text_on.get() and not img_text_gravity_onoff.get():
e_text_x.delete(0, "end")
e_text_x.insert(0, width)
e_text_y.delete(0, "end")
e_text_y.insert(0, height)
def mouse_crop_SE(event):
""" Right-Lower corner """
if img_crop_on.get() == 1:
x_preview = event.x
y_preview = event.y
xy_max = common.mouse_crop_calculation(file_in_width.get(),
file_in_width.get(),
int(co_preview_selector_orig.get()))
width = int(x_preview*xy_max['x_orig']/xy_max['x_max'])
height = int(y_preview*xy_max['y_orig']/xy_max['y_max'])
e3_crop_1.delete(0, "end")
e3_crop_1.insert(0, width)
e4_crop_1.delete(0, "end")
e4_crop_1.insert(0, height)
preview_orig()
def preview_orig_refresh(event):
""" callback after selection of size preview"""
preview_orig()
def preview_orig():
"""
generation preview of original picture
and add crop rectangle
"""
if co_preview_selector_orig.get() != "none":
if os.path.isfile(file_in_path.get()):
if img_crop_on.get() == 1:
# draw crop rectangle on preview
xy_max = common.mouse_crop_calculation(file_in_width.get(),
file_in_height.get(),
int(co_preview_selector_orig.get()))
if img_crop.get() == 1:
x0 = int(e1_crop_1.get())
y0 = int(e2_crop_1.get())
x1 = int(e3_crop_1.get())
y1 = int(e4_crop_1.get())
# do_nothing = 0
elif img_crop.get() == 2:
x0 = int(e1_crop_2.get())
y0 = int(e2_crop_2.get())
x1 = x0 + int(e3_crop_2.get())
y1 = y0 + int(e4_crop_2.get())
# do_nothing = 0
elif img_crop.get() == 3:
coord_for_crop = (int(e1_crop_3.get()), int(e2_crop_3.get()),
int(e3_crop_3.get()), int(e4_crop_3.get()),
img_crop_gravity.get())
coord = convert.convert_preview_crop_gravity(coord_for_crop,
xy_max['x_orig'],
xy_max['y_orig'])
x0 = coord[0]
y0 = coord[1]
x1 = coord[2]
y1 = coord[3]
# do_nothing = 0
ratio_X = xy_max['x_max'] / xy_max['x_orig']
ratio_Y = xy_max['y_max'] / xy_max['y_orig']
x0 = int(x0 * ratio_X)
y0 = int(y0 * ratio_Y)
x1 = int(x1 * ratio_X)
y1 = int(y1 * ratio_Y)
# x0y0x1y1 = str(x0) + "," + str(y0) + " " + str(x1) + "," + str(y1)
# command = " -fill none -draw \"stroke '#FFFF00' rectangle " \
# + x0y0x1y1 + "\" "
crop_rectangle = (x0, y0, x1, y1)
else:
# command = " "
crop_rectangle = (" ")
# it works with every pictures
# preview_picture = preview.preview_convert(file_in_path.get(),
# command,
# int(co_preview_selector_orig.get()),
# GM_or_IM)
# it can has problem with RGBA png/gif files
preview_picture = preview.preview_pillow(file_in_path.get(),
int(co_preview_selector_orig.get()),
crop_rectangle)
try:
pi_preview_orig.configure(file=common.spacja(preview_picture['filename']))
l_preview_orig_pi.configure(image=pi_preview_orig)
except:
log.write_log("preview_orig: Cannot load preview", "E")
try:
l_preview_orig.configure(text=preview_picture['width'] + "x" \
+ preview_picture['height'] \
+ " - " \
+ preview_picture['size'])
except:
log.write_log("preview_orig: Cannot load image size", "E")
if img_histograms_on.get() == 1:
pi_histogram_orig.configure(
file=preview.preview_histogram(file_in_path.get(),
GM_or_IM))
l_histogram_orig.configure(image=pi_histogram_orig)
else:
preview_orig_clear()
def preview_logo():
""" generating logo preview """
if os.path.isfile(file_logo_path.get()):
l_logo_filename.configure(text=os.path.basename(file_logo_path.get()))
preview_info = preview.preview_convert(file_logo_path.get(), " ", PREVIEW_LOGO, GM_or_IM)
# because PIL has problem with coversion RGBA->RGB,
# is impossible to use command as below :-(
# preview_info = preview.preview_pillow(file_logo_path.get(), PREVIEW_LOGO)
try:
pi_logo_preview.configure(file=preview_info['filename'])
except:
log.write_log("Preview_logo: Cannot display file", "E")
l_logo_preview.configure(text=preview_info['width'] + "x" + preview_info['height'])
else:
log.write_log("Preview_logo: Cannot load file", "E")
def preview_logo_clear():
""" clear if no logo picture is selected """
l_logo_filename.configure(text=_("No file selected"))
pi_logo_preview.configure(file="")
l_logo_preview.configure(text="")
def tools_set_event(event):
"""tool set for event fot theme selector"""
tools_set(1)
def tools_set_on():
"""tool set for event fot theme selector and preview original"""
tools_set(1)
def tools_set_off():
"""tool set for event fot theme selector and no preview original"""
tools_set(0)
def tools_set(preview_on):
""" wybór narzędzi do wyświetlenia """
if img_custom_on.get() == 1:
frame_custom.grid()
else:
frame_custom.grid_remove()
if img_histograms_on.get() == 0:
frame_histogram_orig.grid_remove()
frame_histogram_new.grid_remove()
else:
frame_histogram_orig.grid()
frame_histogram_new.grid()
if img_resize_on.get() == 0:
frame_resize.grid_remove()
else:
frame_resize.grid()
if img_crop_on.get() == 0:
frame_crop.grid_remove()
else:
frame_crop.grid()
if img_text_on.get() == 0:
frame_text.grid_remove()
else:
frame_text.grid()
if img_rotate_on.get() == 0:
frame_rotate.grid_remove()
else:
frame_rotate.grid()
if img_border_on.get() == 0:
frame_border.grid_remove()
else:
frame_border.grid()
if img_bw_on.get() == 0:
frame_bw.grid_remove()
else:
frame_bw.grid()
if img_contrast_on.get() == 0:
frame_contrast.grid_remove()
else:
frame_contrast.grid()
if img_normalize_on.get() == 0:
frame_normalize.grid_remove()
else:
frame_normalize.grid()
if img_mirror_on.get() == 0:
frame_mirror.grid_remove()
else:
frame_mirror.grid()
if img_logo_on.get() == 0:
frame_logo.grid_remove()
else:
frame_logo.grid()
style.theme_use(co_theme_selector.get())
if preview_on:
preview_orig()
def crop_tool_hide_show():
"""hide not necessary things, or show if the are needed"""
if img_crop.get() == 1:
f_clickL_crop.grid()
f_clickR_crop.grid()
e1_crop_2.grid_remove()
e2_crop_2.grid_remove()
e3_crop_2.grid_remove()
e4_crop_2.grid_remove()
e1_crop_3.grid_remove()
e2_crop_3.grid_remove()
e3_crop_3.grid_remove()
e4_crop_3.grid_remove()
frame_crop_gravity.grid_remove()
elif img_crop.get() == 2:
f_clickL_crop.grid_remove()
f_clickR_crop.grid_remove()
e1_crop_2.grid()
e2_crop_2.grid()
e3_crop_2.grid()
e4_crop_2.grid()
e1_crop_3.grid_remove()
e2_crop_3.grid_remove()
e3_crop_3.grid_remove()
e4_crop_3.grid_remove()
frame_crop_gravity.grid_remove()
elif img_crop.get() == 3:
f_clickL_crop.grid_remove()
f_clickR_crop.grid_remove()
e1_crop_2.grid_remove()
e2_crop_2.grid_remove()
e3_crop_2.grid_remove()
e4_crop_2.grid_remove()
e1_crop_3.grid()
e2_crop_3.grid()
e3_crop_3.grid()
e4_crop_3.grid()
frame_crop_gravity.grid()
preview_orig()
def text_tool_hide_show():
"""hide not necessary things, or show if the are needed"""
if img_text_inout.get():
# Outside
l_text_xy_x.grid_remove()
l_text_xy_y.grid_remove()
e_text_x.grid_remove()
e_text_y.grid_remove()
rb_text_NW.grid()
rb_text_N.grid()
rb_text_NE.grid()
rb_text_W.grid_remove()
rb_text_C.grid_remove()
rb_text_E.grid_remove()
rb_text_SW.grid()
rb_text_S.grid()
rb_text_SE.grid()
cb_text_gravity.grid_remove()
else:
# Inside
l_text_xy_x.grid()
l_text_xy_y.grid()
e_text_x.grid()
e_text_y.grid()
cb_text_gravity.grid()
if img_text_gravity_onoff.get():
# Gravity on
rb_text_NW.grid()
rb_text_N.grid()
rb_text_NE.grid()
rb_text_W.grid()
rb_text_C.grid()
rb_text_E.grid()
rb_text_SW.grid()
rb_text_S.grid()
rb_text_SE.grid()
l_text_xy_x.configure(text=_("dx"))
l_text_xy_y.configure(text=_("dy"))
else:
# Gravity off
rb_text_NW.grid_remove()
rb_text_N.grid_remove()
rb_text_NE.grid_remove()
rb_text_W.grid_remove()
rb_text_C.grid_remove()
rb_text_E.grid_remove()
rb_text_SW.grid_remove()
rb_text_S.grid_remove()
rb_text_SE.grid_remove()
l_text_xy_x.configure(text=_("x"))
l_text_xy_y.configure(text=_("y"))
###############################################################################
# GUI okno główne
###############################################################################
root = Tk()
# hidden file
# https://code.activestate.com/lists/python-tkinter-discuss/3723/
try:
# call a dummy dialog with an impossible option to initialize the file
# dialog without really getting a dialog window; this will throw a
# TclError, so we need a try...except :
try:
root.tk.call('tk_getOpenFile', '-foobarbaz')
except TclError:
pass
# now set the magic variables accordingly
root.tk.call('set', '::tk::dialog::file::showHiddenBtn', '1')
root.tk.call('set', '::tk::dialog::file::showHiddenVar', '0')
except:
pass
style = ttk.Style()
theme_list = style.theme_names() # read available themes
style.configure("Blue.TButton", foreground="blue")
style.configure("Brown.TButton", foreground="#8B0000")
style.configure("Blue.TLabelframe.Label", foreground="blue")
style.configure("Fiolet.TLabelframe.Label", foreground="#800080")
##########################
# Zmienne globalne
FILE_INI = os.path.join(os.path.expanduser("~"), ".fotokilof.ini")
PWD = os.getcwd()
log_level = StringVar() # E(rror), W(arning), M(essage)
work_dir = StringVar() # default: "FotoKilof"
work_sub_dir = StringVar() # subdir for resized pictures
work_sub_dir.set("") # default none
file_dir_selector = IntVar()
file_in_path = StringVar() # fullpath original picture
file_in_width = IntVar() # width original picture
file_in_height = IntVar() # height original picture
file_in_size = IntVar() # size original picture (bytes)
img_histograms_on = IntVar()
img_logo_on = IntVar() # Logo
file_logo_path = StringVar() # fullpath logo file
img_logo_gravity = StringVar()
img_resize_on = IntVar() # Resize
img_resize = IntVar() # (1, 2, 3, 4, 5)
img_text_on = IntVar() # Text
img_text_gravity = StringVar()
img_text_gravity_onoff = IntVar()
img_text_font = StringVar()
img_text_font_dict = {} # dict with available fonts, from fonts()
img_text_color = StringVar()
img_text_box = IntVar()
img_text_box_color = StringVar()
img_text_inout = IntVar() # Text inside or outside picture
img_rotate_on = IntVar() # Rotate
img_rotate = IntVar()
img_crop_on = IntVar() # Crop
img_crop = IntVar() # (1, 2, 3)
img_crop_gravity = StringVar()
img_border_on = IntVar() # Border
img_border_color = StringVar()
img_normalize_on = IntVar() # Normalize
img_normalize = IntVar() # (1,2,3)
normalize_channels = ("None", "Red", "Green", "Blue", "Alpha", "Gray",
"Cyan", "Magenta", "Yellow", "Black", "Opacity",
"Index", "RGB", "RGBA", "CMYK", "CMYKA")
img_bw_on = IntVar() # Black-white
img_bw = IntVar()
img_contrast_on = IntVar() # Contrast
img_contrast = IntVar() # (1, 2)
img_mirror_on = IntVar() # Mirror
img_mirror_flip = IntVar() # (0, 1)
img_mirror_flop = IntVar() # (0, 1)
contrast_selection = ("+3", "+2", "+1", "0", "-1", "-2", "-3")
img_custom_on = IntVar() # Custom
progress_var = IntVar() # progressbar
progress_files = StringVar()
file_extension = (".jpeg", ".jpg", ".png", ".tif")
magick_commands = ("composite", "convert")
#magick_commands = ("animate", "compare", "composite", "conjure", "convert",
# "identify", "import", "mogrify", "montage", "stream")
######################################################################
# Karty
######################################################################
main_menu = ttk.Frame()
main_tools = ttk.Frame()
main = PanedWindow()
main_menu.pack(side='top', expand=0, fill='both')
main_tools.pack(side='top', expand=0, fill='both')
main.pack(side='bottom', expand=1, fill='both')
validation = main.register(gui.only_numbers) # Entry validation
####################################################################
# main_menu row
####################################################################
###########################
# Picture selection
###########################
frame_file_select = ttk.Labelframe(main_menu, text=_("Image"),
style="Fiolet.TLabelframe")
frame_file_select.grid(row=1, column=1, sticky=(N, W, E, S), padx=5, pady=5)
b_file_select = ttk.Button(frame_file_select, text=_("File selection"),
command=open_file, style="Brown.TButton")
b_file_select_screenshot = ttk.Button(frame_file_select, text=_("Screenshot"),
command=open_screenshot)
if mswindows.windows() or mswindows.macos():
b_file_select_screenshot.configure(text=_("Clipboard"))
b_file_select_first = ttk.Button(frame_file_select, text=_("First"),
command=open_file_first)
b_file_select_prev = ttk.Button(frame_file_select, text=_("Previous"),
command=open_file_prev)
b_file_select_next = ttk.Button(frame_file_select, text=_("Next"),
command=open_file_next)
b_file_select_last = ttk.Button(frame_file_select, text=_("Last"),
command=open_file_last)
b_file_select.grid(column=1, row=1, padx=5, pady=5, sticky=W)
#
b_file_select_screenshot.grid(column=2, row=1, padx=10, pady=5, sticky=W)
#
b_file_select_first.grid(column=5, row=1, padx=5, pady=5, sticky=W)
b_file_select_prev.grid(column=6, row=1, padx=5, pady=5, sticky=W)
b_file_select_next.grid(column=7, row=1, padx=5, pady=5, sticky=W)
b_file_select_last.grid(column=8, row=1, padx=5, pady=5, sticky=W)
##########################
# Execute all
##########################
frame_apply = ttk.LabelFrame(main_menu, text=_("Execute all"),
style="Fiolet.TLabelframe")
frame_apply.grid(row=1, column=2, sticky=(N, W, | |
list.
"""
table = {"i":_readInt, "f":_readFloat, "s":_readString, "b":_readBlob, "d":_readDouble, "t":_readTimeTag}
decoded = []
address, rest = _readString(data)
if address.startswith(","):
typetags = address
address = ""
else:
typetags = ""
if address == "#bundle":
time, rest = _readTimeTag(rest)
decoded.append(address)
decoded.append(time)
while len(rest)>0:
length, rest = _readInt(rest)
decoded.append(decodeOSC(rest[:length]))
rest = rest[length:]
elif len(rest)>0:
if not len(typetags):
typetags, rest = _readString(rest)
decoded.append(address)
decoded.append(typetags)
if typetags.startswith(","):
for tag in typetags[1:]:
value, rest = table[tag](rest)
decoded.append(value)
else:
raise OSCError("OSCMessage's typetag-string lacks the magic ','")
return decoded
######
#
# Utility functions
#
######
def hexDump(bytes):
""" Useful utility; prints the string in hexadecimal.
"""
print("byte 0 1 2 3 4 5 6 7 8 9 A B C D E F")
if isinstance(bytes,str):
bytes = bytes.encode('latin1')
num = len(bytes)
for i in range(num):
if (i) % 16 == 0:
line = "%02X0 : " % (i/16)
line += "%02X " % bytes[i]
if (i+1) % 16 == 0:
print("%s: %s" % (line, repr(bytes[i-15:i+1])))
line = ""
bytes_left = num % 16
if bytes_left:
print("%s: %s" % (line.ljust(54), repr(bytes[-bytes_left:])))
def getUrlStr(*args):
"""Convert provided arguments to a string in 'host:port/prefix' format
Args can be:
- (host, port)
- (host, port), prefix
- host, port
- host, port, prefix
"""
if not len(args):
return ""
if type(args[0]) == tuple:
host = args[0][0]
port = args[0][1]
args = args[1:]
else:
host = args[0]
port = args[1]
args = args[2:]
if len(args):
prefix = args[0]
else:
prefix = ""
if len(host) and (host != '0.0.0.0'):
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
else:
host = 'localhost'
if isinstance(port,int):
return "%s:%d%s" % (host, port, prefix)
else:
return host + prefix
def parseUrlStr(url):
"""Convert provided string in 'host:port/prefix' format to it's components
Returns ((host, port), prefix)
"""
if not (isinstance(url,str) and len(url)):
return (None, '')
i = url.find("://")
if i > -1:
url = url[i+3:]
i = url.find(':')
if i > -1:
host = url[:i].strip()
tail = url[i+1:].strip()
else:
host = ''
tail = url
for i in range(len(tail)):
if not tail[i].isdigit():
break
else:
i += 1
portstr = tail[:i].strip()
tail = tail[i:].strip()
found = len(tail)
for c in ('/', '+', '-', '*'):
i = tail.find(c)
if (i > -1) and (i < found):
found = i
head = tail[:found].strip()
prefix = tail[found:].strip()
prefix = prefix.strip('/')
if len(prefix) and prefix[0] not in ('+', '-', '*'):
prefix = '/' + prefix
if len(head) and not len(host):
host = head
if len(host):
try:
host = socket.gethostbyname(host)
except socket.error:
pass
try:
port = int(portstr)
except ValueError:
port = None
return ((host, port), prefix)
######
#
# OSCClient class
#
######
class OSCClient(object):
"""Simple OSC Client. Handles the sending of OSC-Packets (OSCMessage or OSCBundle) via a UDP-socket
"""
# set outgoing socket buffer size
sndbuf_size = 4096 * 8
def __init__(self, server=None):
"""Construct an OSC Client.
When the 'address' argument is given this client is connected to a specific remote server.
- address ((host, port) tuple): the address of the remote server to send all messages to
Otherwise it acts as a generic client:
If address == 'None', the client doesn't connect to a specific remote server,
and the remote address must be supplied when calling sendto()
- server: Local OSCServer-instance this client will use the socket of for transmissions.
If none is supplied, a socket will be created.
"""
self.socket = None
if server == None:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.sndbuf_size)
self._fd = self.socket.fileno()
self.server = None
else:
self.setServer(server)
self.client_address = None
def setServer(self, server):
"""Associate this Client with given server.
The Client will send from the Server's socket.
The Server will use this Client instance to send replies.
"""
if not isinstance(server, OSCServer):
raise ValueError("'server' argument is not a valid OSCServer object")
if self.socket != None:
self.close()
self.socket = server.socket.dup()
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.sndbuf_size)
self._fd = self.socket.fileno()
self.server = server
if self.server.client != None:
self.server.client.close()
self.server.client = self
def close(self):
"""Disconnect & close the Client's socket
"""
if self.socket != None:
self.socket.close()
self.socket = None
def __str__(self):
"""Returns a string containing this Client's Class-name, software-version
and the remote-address it is connected to (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.address()
if addr:
out += " connected to osc://%s" % getUrlStr(addr)
else:
out += " (unconnected)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
isequal = cmp(self.socket._sock, other.socket._sock)
if isequal and self.server and other.server:
return cmp(self.server, other.server)
return isequal
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
def address(self):
"""Returns a (host,port) tuple of the remote server this client is
connected to or None if not connected to any server.
"""
try:
return self.socket.getpeername()
except socket.error:
return None
def connect(self, address):
"""Bind to a specific OSC server:
the 'address' argument is a (host, port) tuple
- host: hostname of the remote OSC server,
- port: UDP-port the remote OSC server listens to.
"""
try:
self.socket.connect(address)
self.client_address = address
except socket.error as e:
self.client_address = None
raise OSCClientError("SocketError: %s" % str(e))
if self.server != None:
self.server.return_port = address[1]
def sendto(self, msg, address, timeout=None):
"""Send the given OSCMessage to the specified address.
- msg: OSCMessage (or OSCBundle) to be sent
- address: (host, port) tuple specifing remote server to send the message to
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
self.socket.connect(address)
self.socket.sendall(msg.getBinary())
if self.client_address:
self.socket.connect(self.client_address)
except socket.error as e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending to %s: %s" % (str(address), str(e)))
def send(self, msg, timeout=None):
"""Send the given OSCMessage.
The Client must be already connected.
- msg: OSCMessage (or OSCBundle) to be sent
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket,
or when the Client isn't connected to a remote server.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
self.socket.sendall(msg.getBinary())
except socket.error as e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending: %s" % str(e))
######
#
# FilterString Utility functions
#
######
def parseFilterStr(args):
"""Convert Message-Filter settings in '+<addr> -<addr> ...' format to a dict of the form
{ '<addr>':True, '<addr>':False, ... }
Returns a list: ['<prefix>', filters]
"""
out = {}
if isinstance(args,str):
args = [args]
prefix = None
for arg in args:
head = None
for plus in arg.split('+'):
minus = plus.split('-')
plusfs = minus.pop(0).strip()
if len(plusfs):
plusfs = '/' + plusfs.strip('/')
if (head == None) and (plusfs != "/*"):
head = plusfs
elif len(plusfs):
if plusfs == '/*':
out = { '/*':True } # reset all previous filters
else:
out[plusfs] = True
for minusfs in minus:
minusfs = minusfs.strip()
if len(minusfs):
minusfs = '/' + minusfs.strip('/')
if minusfs == '/*':
out = { '/*':False } # reset all previous filters
else:
out[minusfs] = False
if prefix == None:
prefix = head
return [prefix, out]
def getFilterStr(filters):
"""Return the given 'filters' dict as a list of
'+<addr>' | '-<addr>' filter-strings
"""
if not len(filters):
return []
if '/*' in list(filters.keys()):
if filters['/*']:
out = ["+/*"]
else:
out = ["-/*"]
else:
if False in list(filters.values()):
out = ["+/*"]
else:
out = ["-/*"]
for (addr, bool) in list(filters.items()):
if addr == '/*':
continue
if bool:
out.append("+%s" % addr)
else:
out.append("-%s" % addr)
return out
# A translation-table for mapping OSC-address expressions to Python 're' expressions
OSCtrans = str.maketrans("{,}?","(|).")
def getRegEx(pattern):
"""Compiles and returns a 'regular expression' object for the given address-pattern.
"""
# Translate OSC-address syntax to python 're' syntax
pattern = pattern.replace(".", r"\.") # first, escape all '.'s in the pattern.
pattern = pattern.replace("(", r"\(") # escape all '('s.
pattern = pattern.replace(")", r"\)") # escape all ')'s.
pattern = pattern.replace("*", r".*") # replace a '*' by '.*' (match 0 or more characters)
pattern = pattern.translate(OSCtrans) # change '?' to '.' and '{,}' to '(|)'
return re.compile(pattern)
######
#
# OSCMultiClient class
#
######
class OSCMultiClient(OSCClient):
"""'Multiple-Unicast' OSC Client. Handles the sending of OSC-Packets (OSCMessage or OSCBundle) via a UDP-socket
This client keeps a dict of 'OSCTargets'. and sends each OSCMessage to each OSCTarget
The OSCTargets are simply (host, port) tuples, and may be associated with an OSC-address prefix.
the OSCTarget's prefix gets prepended to each OSCMessage sent to that target.
"""
def __init__(self, server=None):
"""Construct a "Multi" OSC Client.
- server: Local OSCServer-instance this client will use the socket of for transmissions.
If none is supplied, a socket will be created.
"""
super(OSCMultiClient, self).__init__(server)
self.targets = {}
def _searchHostAddr(self, host):
"""Search the subscribed OSCTargets for (the first occurence of) given host.
Returns a (host, port) tuple
"""
try:
host = socket.gethostbyname(host)
except socket.error:
pass
for addr in list(self.targets.keys()):
if host == addr[0]:
return addr
raise NotSubscribedError((host, None))
def _updateFilters(self, dst, src):
"""Update a 'filters' dict with values form another 'filters' dict:
- src[a] == True and dst[a] == False: del dst[a]
- src[a] == False and dst[a] == True: del dst[a]
- a not in dst: dst[a] == src[a]
"""
if '/*' in list(src.keys()): # reset filters
dst.clear() # 'match everything' == no filters
if not src.pop('/*'):
dst['/*'] = False # 'match nothing'
for (addr, bool) in list(src.items()):
if (addr in list(dst.keys())) and (dst[addr] != bool):
del dst[addr]
else:
dst[addr] = bool
def _setTarget(self, address, prefix=None, filters=None):
"""Add (i.e. subscribe) a new OSCTarget, or change the prefix for an existing OSCTarget.
- address ((host, port) tuple): | |
<reponame>fgonzalezvenegas/GridReconstructrion
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 16 18:02:09 2020
Pandapower !
@author: U546416
"""
#import mobility as mb
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection, PatchCollection
import matplotlib.patches as ptc
#import polygons as pg
import matplotlib.patheffects as pe
import util
import pandapower as pp
import pandapower.topology as ppt
import pandapower.plotting as ppp
import pandapower.control as ppc
import pandapower.timeseries as ppts
import time as time
import datetime as dt
from ppgrid import *
from grid import *
import VoltVar
#%% OPTIONAL: Load processed data
print('Loading MV grid data')
folder = r'c:\user\U546416\Documents\PhD\Data\MVGrids\Boriette\\'
subf = r'Data_Boriette_2020-06-28\\'
lines = pd.read_csv(folder + subf + 'MVLines.csv', engine='python', index_col=0)
ss = pd.read_csv(folder + subf + 'SS.csv', engine='python', index_col=0)
lv = pd.read_csv(folder + subf + 'MVLV.csv', engine='python', index_col=0)
nodes = pd.read_csv(folder + subf + 'Nodes.csv', engine='python', index_col=0)
lines.ShapeGPS = lines.ShapeGPS.apply(eval)
nodes.xyGPS = nodes.xyGPS.apply(eval)
folder_profiles = r'c:\user\U546416\Documents\PhD\Data\MVGrids\Boriette\Profiles\\'
profiles_load = pd.read_csv(folder_profiles + r'profiles_iris.csv',
engine='python', index_col=0)
print('Loading tech data')
folder_tech = r'c:\user\U546416\Documents\PhD\Data\MVGrids\\'
file_tech = 'line_data_France_MV_grids.xlsx'
tech = pd.read_excel(folder_tech + file_tech, index_col=0)
#%% OPTIONAL: Load IRIS polygon data
print('Loading IRIS polygons')
# TODO: Load IRIS polygons
folder_iris = r'c:\user\U546416\Documents\PhD\Data\DataGeo\\'
file_iris = 'IRIS_all_geo_'+str(2016)+'.csv'
iris_poly = pd.read_csv(folder_iris+file_iris,
engine='python', index_col=0)
print('\tDone loading polygons')
#%% OPTIONAL: Load conso data
print('Loading Conso per IRIS')
folder_consodata = r'c:\user\U546416\Documents\PhD\Data\Mobilité\Data_Traitee\Conso'
iris = pd.read_csv(folder_consodata + r'\IRIS_enedis_2017.csv',
engine='python', index_col=0)
print('Loading profiles')
profiles_all = pd.read_csv(folder_consodata + r'\conso_all_pu.csv',
engine='python', index_col=0)
profiles_all.drop(['ENT', 'NonAffecte'], axis=1, inplace=True)
#%% Load pv data per departement
folder_pv = r'c:\user\U546416\Documents\PhD\Data\Conso-Reseau\Réseau'
dep_parc_prod = pd.read_csv(folder_pv + r'\parc-pv-departement.csv',
engine='python', sep=',')
pv_dep = dep_parc_prod.groupby(['DEP_CODE', 'TYPE_PV']).P_MW.sum()
#% load number of LV trafos per iris
folder_lv = r'c:\user\U546416\Documents\PhD\Data\Conso-Reseau\Réseau\\'
lv_iris = pd.read_csv(folder_lv+'Nb_BT_IRIS2016.csv',
engine='python', index_col=0)
#%% showing data
n0 = ss.node.iloc[0]
# Reducing polygons to consider to +- 0.5 degrees of latitude/longitude to data
dt = 0.5
lonmin, lonmax, latmin, latmax = nodes.xGPS.min(), nodes.xGPS.max(), nodes.yGPS.min(), nodes.yGPS.max()
polys = iris_poly[(iris_poly.Lon > lonmin-dt) &
(iris_poly.Lon < lonmax+dt) &
(iris_poly.Lat > latmin-dt) &
(iris_poly.Lat < latmax+dt)][['IRIS_NAME', 'Polygon', 'Lon', 'Lat']]
polys.columns = ['Name', 'Polygon', 'xGPS', 'yGPS']
polys.Polygon = pd.Series(util.do_polygons(polys, plot=False))
#plot_quick(lines, lv, ss, nodes, GPS=True)
off = on_off_lines(lines, n0, ss=ss, lv=lv, GPS=True, geo=polys, tech=tech, nodes=nodes)
#%% Transforming data in pandapower DataFrames
v = util.input_y_n('Do you want to create the grid from loaded data (Y) or load existing grid (N):')
if v in ['Y', 'y', True]:
print('\tCreating grids')
n0 = ss.node.iloc[0]
#% Create grid
print('\t\tCreating base grid')
net = create_pp_grid(nodes, lines, tech, lv, n0=0,
hv=True, ntrafos_hv=2, vn_kv=20,
tanphi=0.3, hv_trafo_controller=True,
verbose=True)
# Check connectedness
# Check unsupplied buses from External grid
ub = ppt.unsupplied_buses(net)
if len(ub) == 0:
print('Connectedness ok')
else:
print('There are Non supplied buses!')
print('Running')
t = time()
pp.runpp(net, run_control=True)
print('Run! dt={:.2f}'.format(time()-t))
plot_v_profile(net)
plt.title('Voltage profile')
# TODO: Find out why time series of Pandapower doesnt work
#%% Create Net with RES
print('\t\tCreating RES grid')
net_res = create_pp_grid(nodes, lines, tech, lv, n0=0,
hv=True, ntrafos_hv=2, vn_kv=20,
tanphi=0.3, verbose=True)
# IRIS in the net
ies = net_res.load.zone.unique()
if not 'iris' in net_res.bus:
if not 'Geo' in nodes:
assign_polys(nodes, polys)
net_res.bus['iris'] = list(nodes.Geo.values) + [0]
# ADDING PV
# Region of net
reg = int(iris.REGION_CODE[ies].unique()[0])
ies_reg = iris[iris.REGION_CODE == reg].index
# PV growth factor
national_target = 35000 # MW of PV installed
current_pv = dep_parc_prod.P_MW.sum() * 1.2 # times 1.2 because i only have Enedis data
growth = national_target / current_pv
# Regional PV targets [MW]
pv_reg = dep_parc_prod.groupby(['REG_CODE', 'TYPE_PV']).P_MW.sum() * growth
# RESIDENTIAL
# Computing penetration of RES PV
pv_res_cap = 3 #kW
nb_res_reg = (iris.Nb_RES[ies_reg] * (100 - iris.Taux_logements_collectifs[ies_reg])/100).sum()
RES_penetration = pv_reg[reg, 'home_rooftop'] / (pv_res_cap/1000 * nb_res_reg)
# Add home rooftop PV
add_res_pv_rooftop(net_res, pv_penetration=RES_penetration,
iris_data=iris, lv_per_geo=lv_iris.Nb_BT, pv_cap_kw=pv_res_cap)
# COMMERCIAL
# Add commercial rooftop PV
pv_comm_cap = 0.120 #MW
# Computing potential sites per region and in the SS
# Parameters for proxy of commercial sites
tertiary_threshold = 600 #MWh/year
industry_threshold = 600 #MWh/year
agri_multiplier = 2
# Potential sites per iris
ncomm = ((iris.Conso_Tertiaire // tertiary_threshold).round(decimals=0) +
(iris.Conso_Industrie // industry_threshold).round(decimals=0) +
iris.Nb_Agriculture * agri_multiplier)
# target MW of SS based on Ratio of potential sites in SS vs region
ncomm_ss = ncomm[ies] * net_res.load.zone.value_counts() / lv_iris.Nb_BT[ies]
target_comm = pv_reg[reg, 'commercial_rooftop'] * ncomm_ss.sum() / ncomm[ies_reg].sum()
# Random buses to be selected to host commercial PV
buses = np.concatenate([np.random.choice(net_res.load[net_res.load.zone == i].bus,
size=int(round(ncomm_ss[i],0)))
for i in ncomm_ss.index])
add_random_pv_rooftop(net_res, mean_size=pv_comm_cap, std_dev=pv_comm_cap/6,
total_mw=target_comm, buses=buses, replace=True,
name='Comm_PV_', sgtype='Comm_PV')
# Add solar farms
# Target MW of solar farms based on ratio of Rural communes / total communes in the region
pv_farm_cap = 2 #MW
# Number of rural communes in the region
ncommunes_region = iris.loc[ies_reg][iris.IRIS_TYPE[ies_reg] == 'Z'].COMM_CODE.nunique()
# We'll put the solar farms far from the center of the city, only in rural IRIS
# number of rural communes in the studies area
ies_rural = iris.loc[ies][iris.IRIS_TYPE[ies] == 'Z'].index
ncommunes_SS = len(iris.COMM_CODE[ies_rural].unique())
target_farms = pv_reg[reg, 'solar_farm'] * ncommunes_SS / ncommunes_region
# Selecting buses of rural communes
buses = net_res.bus[net_res.bus.iris.isin(ies_rural)].index
add_random_pv_rooftop(net_res, mean_size=pv_farm_cap, std_dev=pv_farm_cap/6, total_mw=target_farms,
buses=buses, replace=True, name='Solar_PV_', sgtype='Farm_PV')
#%% Alternative Load existing grid:
else:
folder_grids = r'c:\user\U546416\Documents\PhD\Data\MVGrids\Boriette\PPGrid\\'
print('\t loading grids from folder {}'.format(folder_grids))
# load predefined grid
folder_grids = r'c:\user\U546416\Documents\PhD\Data\MVGrids\Boriette\PPGrid\\'
net = pp.from_json(folder_grids + 'base_grid.json')
net_res = pp.from_json(folder_grids + 'res_grid.json')
#net_res_ev = pp.from_json(folder_grids + 'res_ev_grid.json')
#%% Compute PV inst cap per IRIS // energy produced
ies = net_res.load.zone.unique()
# Compute installed RES per IRIS
conso_prod_iris = {}
for i in net_res.bus.iris.unique():
if not np.isnan(i):
bss = net_res.bus[net_res.bus.iris == i].index
conso_prod_iris[int(i)] = [net_res.load[net_res.load.bus.isin(bss)].p_mw.sum(),
net_res.sgen[(net_res.sgen.type == 'RES_PV') & (net_res.sgen.bus.isin(bss))].p_mw.sum(),
net_res.sgen[(net_res.sgen.type == 'Comm_PV') & (net_res.sgen.bus.isin(bss))].p_mw.sum(),
net_res.sgen[(net_res.sgen.type == 'Farm_PV') & (net_res.sgen.bus.isin(bss))].p_mw.sum()]
conso_prod_iris = pd.DataFrame(conso_prod_iris, index=['Conso_MWh', 'PV_Home_MW','PV_Commercial_MW', 'PV_farm_MW']).T
# Plot installed PV per IRIS
pls = util.list_polygons(polys.Polygon, ies)
f, axs = plt.subplots(1,3)
cmap = plt.get_cmap('plasma')
for i, ax in enumerate(axs):
maxpv = conso_prod_iris.iloc[:,1:].max().max()
colors = cmap(conso_prod_iris.iloc[:,i+1].loc[ies]/maxpv)
util.plot_polygons(pls, ax=ax, color=colors, alpha=0.8)
plot_lines(lines, col='ShapeGPS', ax=ax, color='k', linewidth=0.5)
ax.set_title('{}\nTotal {:.1f} MW'.format(conso_prod_iris.columns[i+1], conso_prod_iris.iloc[:,i+1].sum()))
f.set_size_inches(12.04, 4.76)
f.tight_layout()
# TODO: add legend
dv_pv = np.round(maxpv/0.5,0)*0.5
tranches = np.arange(0,maxpv+0.1, maxpv/7)
labels = ['{:.2f} MW'.format(i) for i in tranches]
colors = cmap(tranches/maxpv)
util.do_labels(labels, colors, ax, f=True)
#f.title('PV installed capacity per IRIS')
# Plot type IRIS
c_t = {'A':'r', 'D':'gray', 'Z':'g', 'H':'b'}
colors = [c_t[iris_poly.IRIS_TYPE[i]] for i in ies]
ax=util.plot_polygons(pls, color=colors)
plot_lines(lines, col='ShapeGPS', ax=ax, color='k', linewidth=0.5)
nodes_farms = net_res.sgen[net_res.sgen.type=='Farm_PV'].bus
plt.plot(net_res.bus_geodata.x[nodes_farms],net_res.bus_geodata.y[nodes_farms],'Xr', markersize=5, label='PV farms')
util.do_labels(['Activité', 'Divers', 'Rural', 'Habitation'], list(c_t.values()), ax=ax)
plt.title('Type IRIS')
# Plot share of PV per feeder
plt.subplots()
fs = [f for f in net_res.bus.Feeder.unique() if (f==f and (not (f is None)))]
fs = np.sort(fs)
load_pv_feeder = {}
for f in fs:
bs = net_res.bus[net_res.bus.Feeder == f].index
load_pv_feeder[f] = {'load': net_res.load[net_res.load.bus.isin(bs)].p_mw.sum(),
'PV_gen': net_res.sgen[net_res.sgen.bus.isin(bs)].p_mw.sum()}
load_pv_feeder = pd.DataFrame(load_pv_feeder).T
plt.pie(load_pv_feeder.PV_gen, labels=load_pv_feeder.index)
plt.title('Share of total PV installed capacity among feeders. \nTotal {:.2f} MW'.format(load_pv_feeder.PV_gen.sum()))
plt.subplots()
plt.bar(load_pv_feeder.PV_gen.index, load_pv_feeder.PV_gen)
plt.title('Installed PV capacity per feeder. \nTotal {:.2f} MW'.format(load_pv_feeder.PV_gen.sum()))
plt.xticks(rotation=90)
plt.ylabel('PV capacity [MW]')
#plt.legend()
colors = ['r', 'g', 'b', 'gray', 'c', 'y', 'm', 'darkblue', 'purple', 'brown',
'maroon', 'olive', 'deeppink', 'blueviolet', 'darkturquoise', 'darkorange']
f,ax = plt.subplots()
for i, f in enumerate(fs):
if f[0]=='F':
plot_lines(lines[lines.Feeder==f], color=colors[i%len(colors)], linewidth=load_pv_feeder.PV_gen[f]/1,
label=f, col='ShapeGPS', ax=ax)
plt.legend()
## Saving grids
v = util.input_y_n('Do you want to save grids created grids?')
if v in ['Y', 'y', True]:
pp.to_json(net, folder + r'PPGrid/base_grid.json')
pp.to_json(net_res, folder + r'PPGrid/res_grid_voltvar.json')
#%% Run base grid time series
v = util.input_y_n('Run base grid time series?')
if v in ['Y', 'y', True]:
# Creating profiler
profiler = Profiler(net=net, profiles_db=profiles_load)
for n in net.load.zone.unique():
if not n is None:
profiler.add_profile_idx('load', net.load[net.load.zone==n].index, variable='scaling', profile=str(n))
# Setting iterator
time_steps=profiler.profiles_db.index
iterator = Iterator(net=net, profiler=profiler)
of = folder + r'\Results_Base'
iterator.iterate(time_steps=time_steps[0:10], save=True, outputfolder=of, ultraverbose=False)
#%% Plot some results - base case
# critical V:
farther_bus = pd.Series(index=[i for i in net.bus.zone.unique() if not i is None])
d = dist_to_node_nx(lines, n0)
for i in farther_bus.index:
farther_bus[i] = int(d[net.bus[net.bus.zone == i].index].idxmax())
# Critical feeders
fs = ['0SS', 'F06', 'F16', 'F17', 'F18', 'F19']
critday = iterator.ow.res_v.min(axis=1).idxmin()
# Net load at transformer
f, ax = plt.subplots()
ax.plot(iterator.ow.global_res.TrafoOut_MW[::2])
plt.xticks(np.arange(0,8760,8761/6), rotation=45)
ax.set_xticklabels(['jan', 'mar', 'may', 'jul', 'sep', 'nov'])
plt.axhline(y=0, linestyle='--', color='gray', linewidth=0.8)
plt.xlim(0,8760)
plt.title('Net load at transformer')
# plot V whole year
f, ax = plt.subplots()
for f in fs:
plt.plot(iterator.ow.res_v[farther_bus[f]][::2], label=f)
plt.legend()
plt.xticks(np.arange(0,8760,8761/6), rotation=45)
ax.set_xticklabels(['jan', 'mar', 'may', 'jul', 'sep', 'nov'])
plt.xlim(0,8760)
plt.title('Yearly voltage at selected buses')
# Plot V profiles
# Set critical day as res
critday = iterator.ow.res_v.min(axis=1).idxmin()
net.res_bus.vm_pu = iterator.ow.res_v.loc[critday]
f, ax = plt.subplots()
plot_v_profile(net, ax=ax)
plt.title('Voltage at critical hour, {}'.format(critday))
critpower = iterator.ow.global_res.TotLoad_MW.idxmax()
net.res_bus.vm_pu = iterator.ow.res_v.loc[critpower]
f, ax = plt.subplots()
plot_v_profile(net, ax=ax)
plt.title('Voltage at max load, {}'.format(critpower))
#%% Adding profiles RES
# loading solar profiles => Download from renewables.ninja
folder_profiles = r'c:\user\U546416\Documents\PhD\Data\MVGrids\Boriette\Profiles\\'
prof_solar_farm = pd.read_csv(folder_profiles + 'solar_farm.csv',
engine='python', skiprows=3)
prof_solar_roof = pd.read_csv(folder_profiles + 'solar_roof.csv',
engine='python', | |
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCCHC.append({S[i], C[j], C[k], H[l], C[m]})
STRAIGHT_SCCHC.append({S[9], C[10], C[11], H[12], C[0]})
STRAIGHT_SCCHH = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCCHH.append({S[i], C[j], C[k], H[l], H[m]})
STRAIGHT_SCCHH.append({S[9], C[10], C[11], H[12], H[0]})
STRAIGHT_SCCHD = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCCHD.append({S[i], C[j], C[k], H[l], D[m]})
STRAIGHT_SCCHD.append({S[9], C[10], C[11], H[12], D[0]})
STRAIGHT_SCCDS = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCCDS.append({S[i], C[j], C[k], D[l], S[m]})
STRAIGHT_SCCDS.append({S[9], C[10], C[11], D[12], S[0]})
STRAIGHT_SCCDC = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCCDC.append({S[i], C[j], C[k], D[l], C[m]})
STRAIGHT_SCCDC.append({S[9], C[10], C[11], D[12], C[0]})
STRAIGHT_SCCDH = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCCDH.append({S[i], C[j], C[k], D[l], H[m]})
STRAIGHT_SCCDH.append({S[9], C[10], C[11], D[12], H[0]})
STRAIGHT_SCCDD = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCCDD.append({S[i], C[j], C[k], D[l], D[m]})
STRAIGHT_SCCDD.append({S[9], C[10], C[11], D[12], D[0]})
STRAIGHT_SCHSS = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCHSS.append({S[i], C[j], H[k], S[l], S[m]})
STRAIGHT_SCHSS.append({S[9], C[10], H[11], S[12], S[0]})
STRAIGHT_SCHSC = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCHSC.append({S[i], C[j], H[k], S[l], C[m]})
STRAIGHT_SCHSC.append({S[9], C[10], H[11], S[12], C[0]})
STRAIGHT_SCHSH = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCHSH.append({S[i], C[j], H[k], S[l], H[m]})
STRAIGHT_SCHSH.append({S[9], C[10], H[11], S[12], H[0]})
STRAIGHT_SCHSD = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCHSD.append({S[i], C[j], H[k], S[l], D[m]})
STRAIGHT_SCHSD.append({S[9], C[10], H[11], S[12], D[0]})
STRAIGHT_SCHCS = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCHCS.append({S[i], C[j], H[k], C[l], S[m]})
STRAIGHT_SCHCS.append({S[9], C[10], H[11], C[12], S[0]})
STRAIGHT_SCHCC = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCHCC.append({S[i], C[j], H[k], C[l], C[m]})
STRAIGHT_SCHCC.append({S[9], C[10], H[11], C[12], C[0]})
STRAIGHT_SCHCH = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCHCH.append({S[i], C[j], H[k], C[l], H[m]})
STRAIGHT_SCHCH.append({S[9], C[10], H[11], C[12], H[0]})
STRAIGHT_SCHCD = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCHCD.append({S[i], C[j], H[k], C[l], D[m]})
STRAIGHT_SCHCD.append({S[9], C[10], H[11], C[12], D[0]})
STRAIGHT_SCHHS = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCHHS.append({S[i], C[j], H[k], H[l], S[m]})
STRAIGHT_SCHHS.append({S[9], C[10], H[11], H[12], S[0]})
STRAIGHT_SCHHC = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCHHC.append({S[i], C[j], H[k], H[l], C[m]})
STRAIGHT_SCHHC.append({S[9], C[10], H[11], H[12], C[0]})
STRAIGHT_SCHHH = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCHHH.append({S[i], C[j], H[k], H[l], H[m]})
STRAIGHT_SCHHH.append({S[9], C[10], H[11], H[12], H[0]})
STRAIGHT_SCHHD = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCHHD.append({S[i], C[j], H[k], H[l], D[m]})
STRAIGHT_SCHHD.append({S[9], C[10], H[11], H[12], D[0]})
STRAIGHT_SCHDS = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCHDS.append({S[i], C[j], H[k], D[l], S[m]})
STRAIGHT_SCHDS.append({S[9], C[10], H[11], D[12], S[0]})
STRAIGHT_SCHDC = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCHDC.append({S[i], C[j], H[k], D[l], C[m]})
STRAIGHT_SCHDC.append({S[9], C[10], H[11], D[12], C[0]})
STRAIGHT_SCHDH = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCHDH.append({S[i], C[j], H[k], D[l], H[m]})
STRAIGHT_SCHDH.append({S[9], C[10], H[11], D[12], H[0]})
STRAIGHT_SCHDD = []
for i in range(13):
for j in range(1, | |
ID
"""
group_id = group_id or self.id_group(role)
try:
group_id = int(group_id)
except:
group_id = self.id_group(group_id) # interpret group_id as a role
if not user_id and self.user:
user_id = self.user.id
membership = self.settings.table_membership
record = membership(user_id = user_id,
group_id = group_id,
pe_id = entity,
)
if record:
return record.id
else:
membership_id = membership.insert(group_id = group_id,
user_id = user_id,
pe_id = entity,
)
self.update_groups()
self.log_event(self.messages.add_membership_log,
{"user_id": user_id,
"group_id": group_id,
})
return membership_id
# -------------------------------------------------------------------------
def verify_email(self,
next = DEFAULT,
log = DEFAULT,
):
"""
Action when user clicks the link in the verification email
"""
settings = self.settings
request = current.request
# Customise the resource
customise = current.deployment_settings.customise_resource("auth_user")
if customise:
customise(request, "auth_user")
key = request.args[-1]
utable = settings.table_user
query = (utable.registration_key == key)
user = current.db(query).select(limitby = (0, 1),
).first()
if not user:
redirect(settings.verify_email_next)
if log == DEFAULT:
log = self.messages.verify_email_log
if next == DEFAULT:
next = settings.verify_email_next
approved = self.s3_verify_user(user)
if approved:
# Log them in
user = Storage(utable._filter_fields(user, id=True))
self.login_user(user)
if log:
self.log_event(log, user)
redirect(next)
# -------------------------------------------------------------------------
def profile(self,
next = DEFAULT,
onvalidation = DEFAULT,
onaccept = DEFAULT,
log = DEFAULT,
):
"""
Returns a form that lets the user change his/her profile
Patched for S3 to use s3_mark_required
"""
if not self.is_logged_in():
redirect(self.settings.login_url)
messages = self.messages
settings = self.settings
utable = settings.table_user
passfield = settings.password_field
utable[passfield].writable = False
request = current.request
session = current.session
deployment_settings = current.deployment_settings
# Users should not be able to change their Org affiliation
# - also hide popup-link to create a new Org (makes
# no sense here if the field is read-only anyway)
utable.organisation_id.writable = False
utable.organisation_id.comment = None
## Only allowed to select Orgs that the user has update access to
#utable.organisation_id.requires = \
# current.s3db.org_organisation_requires(updateable = True)
if next == DEFAULT:
next = request.get_vars._next \
or request.post_vars._next \
or settings.profile_next
if onvalidation == DEFAULT:
onvalidation = settings.profile_onvalidation
if onaccept == DEFAULT:
onaccept = settings.profile_onaccept
if log == DEFAULT:
log = messages.profile_log
labels = s3_mark_required(utable)[0]
formstyle = deployment_settings.get_ui_formstyle()
current.response.form_label_separator = ""
form = SQLFORM(utable,
self.user.id,
fields = settings.profile_fields,
labels = labels,
hidden = {"_next": next},
showid = settings.showid,
submit_button = messages.profile_save_button,
delete_label = messages.delete_label,
upload = settings.download_url,
formstyle = formstyle,
separator = ""
)
form.add_class("auth_profile")
if deployment_settings.get_auth_openid():
from gluon.contrib.login_methods.openid_auth import OpenIDAuth
openid_login_form = OpenIDAuth(self)
form = DIV(form, openid_login_form.list_user_openids())
if form.accepts(request, session,
formname = "profile",
onvalidation = onvalidation,
hideerror = settings.hideerror,
):
#self.s3_auth_user_register_onaccept(form.vars.email, self.user.id)
self.user.update(utable._filter_fields(form.vars))
session.flash = messages.profile_updated
if log:
self.log_event(log, self.user)
callback(onaccept, form)
if not next:
next = self.url(args = request.args)
elif isinstance(next, (list, tuple)): ### fix issue with 2.6
next = next[0]
elif next and not next[0] == "/" and next[:4] != "http":
next = self.url(next.replace("[id]", str(form.vars.id)))
redirect(next)
return form
# -------------------------------------------------------------------------
@property
def user_represent(self):
"""
Common auth_UserRepresent instance for meta-fields (lazy property)
Returns:
S3Represent instance
"""
represent = self._user_represent
if represent is None:
if current.deployment_settings.get_ui_auth_user_represent() == "name":
show_name = True
show_email = False
else:
show_name = False
show_email = True
represent = current.s3db.auth_UserRepresent(show_name = show_name,
show_email = show_email,
show_link = False,
)
self._user_represent = represent
return represent
# -------------------------------------------------------------------------
def configure_user_fields(self, pe_ids=None):
"""
Configure User Fields - for registration, user administration & profile
Args:
pe_ids: an optional list of pe_ids for the Org Filter
i.e. org_admin coming from admin.py/user()
"""
from .s3validators import IS_ONE_OF
T = current.T
db = current.db
s3db = current.s3db
request = current.request
messages = self.messages
cmessages = current.messages
settings = self.settings
deployment_settings = current.deployment_settings
if deployment_settings.get_ui_multiselect_widget():
from .s3widgets import S3MultiSelectWidget
multiselect_widget = True
else:
multiselect_widget = False
utable = self.settings.table_user
utable.password.label = T("Password") #messages.label_password
first_name = utable.first_name
first_name.label = T("First Name") #messages.label_first_name
first_name.requires = IS_NOT_EMPTY(error_message = messages.is_empty)
last_name = utable.last_name
last_name.label = T("Last Name") #messages.label_last_name
if deployment_settings.get_L10n_mandatory_lastname():
last_name.notnull = True
last_name.requires = IS_NOT_EMPTY(error_message = messages.is_empty)
userfield = settings.login_userfield
if userfield != "email":
utable[userfield].requires = \
IS_NOT_IN_DB(db, "%s.%s" % (utable._tablename,
userfield,
))
email = utable.email
email.label = T("Email") #messages.label_email
email.requires = [IS_EMAIL(error_message = messages.invalid_email),
IS_LOWER(),
IS_NOT_IN_DB(db,
"%s.email" % utable._tablename,
error_message = messages.duplicate_email,
)
]
language = utable.language
languages = deployment_settings.get_L10n_languages()
if len(languages) > 1:
language.label = T("Language")
language.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Language"),
T("The language you wish the site to be displayed in.")
),
)
requires = IS_ISO639_2_LANGUAGE_CODE(sort = True,
translate = True,
zero = None,
)
language.represent = requires.represent
language.requires = requires
# Default the profile language to the one currently active
language.default = T.accepted_language
if multiselect_widget:
language.widget = S3MultiSelectWidget(multiple=False)
else:
language.default = list(languages.keys())[0]
language.readable = language.writable = False
utable.registration_key.label = messages.label_registration_key
#utable.reset_password_key.label = messages.label_registration_key
# Organisation
is_admin = self.s3_has_role("ADMIN")
if is_admin:
show_org = deployment_settings.get_auth_admin_sees_organisation()
else:
show_org = deployment_settings.get_auth_registration_requests_organisation()
if show_org:
if pe_ids and not is_admin:
# Filter orgs to just those belonging to the Org Admin's Org
# & Descendants (or realms for which they are Org Admin):
filterby = "pe_id"
filter_opts = pe_ids
# If the current user can only register users for certain orgs,
# then they must not leave this field empty:
org_required = True
else:
filterby = None
filter_opts = None
org_required = deployment_settings.get_auth_registration_organisation_required()
organisation_id = utable.organisation_id
organisation_id.label = messages.label_organisation_id
organisation_id.readable = organisation_id.writable = True
organisation_id.default = deployment_settings.get_auth_registration_organisation_default()
org_represent = s3db.org_organisation_represent
organisation_id.represent = org_represent
requires = IS_ONE_OF(db, "org_organisation.id",
org_represent,
filterby = filterby,
filter_opts = filter_opts,
orderby = "org_organisation.name",
sort = True,
)
if org_required:
organisation_id.requires = requires
else:
organisation_id.requires = IS_EMPTY_OR(requires)
if deployment_settings.get_auth_registration_organisation_link_create():
from s3layouts import S3PopupLink
org_crud_strings = s3db.crud_strings["org_organisation"]
organisation_id.comment = S3PopupLink(c = "org",
f = "organisation",
label = org_crud_strings.label_create,
title = org_crud_strings.title_list,
)
#from .s3widgets import S3OrganisationAutocompleteWidget
#organisation_id.widget = S3OrganisationAutocompleteWidget()
#organisation_id.comment = DIV(_class="tooltip",
# _title="%s|%s" % (T("Organization"),
# cmessages.AUTOCOMPLETE_HELP))
if multiselect_widget:
organisation_id.widget = S3MultiSelectWidget(multiple=False)
# Organisation Group
if deployment_settings.get_auth_registration_requests_organisation_group():
org_group_id = utable.org_group_id
org_group_id.label = messages.label_org_group_id
org_group_id.readable = org_group_id.writable = True
org_group_represent = s3db.org_group_represent
org_group_id.represent = org_group_represent
requires = IS_ONE_OF(db, "org_group.id",
org_group_represent,
# @ToDo: Filter org groups to just those belonging to the Org Admin's Org
# @ToDo: Dynamically filter groups to just those that the selected Org is a member of
#filterby = filterby,
#filter_opts = filter_opts,
orderby = "org_group.name",
sort = True,
)
if deployment_settings.get_auth_registration_organisation_group_required():
org_group_id.requires = requires
else:
org_group_id.requires = IS_EMPTY_OR(requires)
#from s3layouts import S3PopupLink
#ogroup_crud_strings = s3db.crud_strings["org_group"]
#org_group_id.comment = S3PopupLink(c = "org",
# f = "group",
# label = ogroup_crud_strings.label_create,
# title = ogroup_crud_strings.title_list,
# )
if multiselect_widget:
org_group_id.widget = S3MultiSelectWidget(multiple=False)
# Site
if deployment_settings.get_auth_registration_requests_site():
site_id = request.get_vars.get("site_id", None)
field = utable.site_id
field.label = deployment_settings.get_org_site_label()
site_represent = s3db.org_site_represent
field.represent = site_represent
if site_id:
field.default = site_id
field.readable = True
else:
field.readable = field.writable = True
#field.default = deployment_settings.get_auth_registration_site_id_default()
site_required = deployment_settings.get_auth_registration_site_required()
if show_org:
from .s3validators import IS_ONE_OF_EMPTY
requires = IS_ONE_OF_EMPTY(db, "org_site.site_id",
site_represent,
orderby = "org_site.name",
sort = True,
)
if site_required:
site_optional = ""
else:
site_optional = ''',
'optional': true'''
current.response.s3.jquery_ready.append('''
$.filterOptionsS3({
'trigger':'organisation_id',
'target':'site_id',
'lookupField':'site_id',
'lookupResource':'site',
'lookupURL':S3.Ap.concat('/org/sites_for_org.json/')%s
})''' % site_optional)
else:
requires = IS_ONE_OF(db, "org_site.site_id",
site_represent,
orderby = "org_site.name",
sort = True,
)
#from .s3widgets import S3SiteAutocompleteWidget
#field.widget = S3SiteAutocompleteWidget()
field.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Facility"),
T("Select the default site.")
),
)
if site_required:
field.requires = requires
else:
field.requires = IS_EMPTY_OR(requires)
# Link User to Organisation (as staff, volunteer, or member)
if any(m in request.args for m in ("profile", "user_profile")):
# Irrelevant in personal profile
link_user_to_opts = False
else:
link_user_to_opts = deployment_settings.get_auth_registration_link_user_to()
if link_user_to_opts:
link_user_to = utable.link_user_to
link_user_to_default = deployment_settings.get_auth_registration_link_user_to_default()
req_vars = request.vars
for hrtype in ["staff", "volunteer", "member"]:
if "link_user_to_%s" % hrtype in req_vars:
link_user_to_default.append(hrtype)
if link_user_to_default:
link_user_to.default = link_user_to_default
else:
link_user_to.readable = link_user_to.writable = True
link_user_to.label = T("Register As")
link_user_to.requires = IS_IN_SET(link_user_to_opts,
multiple = True
)
link_user_to.represent = lambda ids: \
ids and ", ".join([str(link_user_to_opts[id]) for id in ids]) or cmessages["NONE"]
#if multiselect_widget:
# link_user_to.widget = S3MultiSelectWidget()
#else:
link_user_to.widget = SQLFORM.widgets.checkboxes.widget
link_user_to.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (link_user_to.label,
T("Will create and link your user account to the following records")
),
)
# -------------------------------------------------------------------------
def s3_import_prep(self, tree):
"""
Looks up Pseudo-reference Integer fields from Names, e.g.:
| |
)
# self.plot_enc_attn.append(plot_attn_chunk)
# --------------------
hs_pad_temp1 = hs_pad_temp
mem_size = self.hwsize//self.compressive_rate
if self.conv1d2decoder is not None and not self.usespk_version2:
hs_pad_temp_mask = torch.ones(hs_pad_temp.size(0),1,hs_pad_temp.size(1)).type(memsh_mask.dtype).to(memsh_mask.device)
hs_pad_mask.append(torch.cat((memsh_mask[:,:,-self.memspeech_size-mem_size:-mem_size]
,hs_pad_temp_mask),dim=2))
hs_pad_temp = torch.cat((last_memsh[:,-self.memspeech_size:,:],hs_pad_temp),dim=1) # batch, mem+windows_size, adim
memsph_tmp = self.conv1d2decoder(hs_pad_temp1.transpose(-1,-2)).transpose(-1,-2)
last_memsh = torch.cat((last_memsh,memsph_tmp),dim=1)
# analysis
self.whole_hs = self.whole_hs + (hs_pad_temp1,)
elif self.alllayer2ctc is None and self.conv1d2decoder is None:
hs_pad_ctc = hs_pad_ctc + (hs_pad_temp1,)
if self.usespk2decoder:
if self.usespk_version2 and self.conv1d2decoder is not None:
hs_pad_temp_mask = torch.ones(hs_pad_temp.size(0),1,hs_pad_temp.size(1)).type(memsh_mask.dtype).to(memsh_mask.device)
hs_pad_mask.append(torch.cat((memspk_mask[:,:,-self.memspeaker_size:]
,memsh_mask[:,:,-self.memspeech_size-mem_size:-mem_size]
,hs_pad_temp_mask),dim=2))
hs_pad_temp = torch.cat((memspk[:,-self.memspeaker_size:,:],last_memsh[:,-self.memspeech_size:,:],hs_pad_temp),dim=1)
mem_whole = self.conv1d2decoder(hs_pad_temp1.transpose(-1,-2)).transpose(-1,-2)
mem_softlin = self.soft_linear(mem_whole).view(mem_whole.size(0),mem_whole.size(1),2,self.adim)
mem_softmax = torch.nn.functional.softmax(mem_softlin,dim=2)
memspk_tmp = self.spklinear2em(mem_softmax[:,:,0,:]*mem_whole)
memsph_tmp = self.shlinear2em(mem_softmax[:,:,1,:]*mem_whole)
last_memsh = torch.cat((last_memsh,memsph_tmp),dim=1)
memspk = torch.cat((memspk, memspk_tmp) ,dim=1)
memspk_mask = torch.cat((memspk_mask
, torch.ones(xs_pad.size(0),1,mem_size).type(memspk_mask.dtype).to(memspk_mask.device)),dim=2)
else:
hs_pad_mask[-1] = torch.cat((memspk_mask[:,:,-self.memspeaker_size:],hs_pad_mask[-1]),dim=2)
hs_pad_temp = torch.cat((memspk[:,-self.memspeaker_size:,:],hs_pad_temp),dim=1)
memspk_mask = torch.cat((memspk_mask
, torch.ones(xs_pad.size(0),1,mem_size).type(memspk_mask.dtype).to(memspk_mask.device)),dim=2)
memspk_tmp = self.speaker2decoder(hs_pad_temp1.transpose(-1,-2)).transpose(-1,-2)
# memspk_tmp = torch.mean(memspk_tmp, dim=1).unsqueeze(1)
memspk = torch.cat((memspk, memspk_tmp) ,dim=1)
if self.memattnloss_decoder:
x_mems.append(self.encoder.encoders[-1].x_mem.detach()) #use detach no grad
commems.append(torch.cat((memspk_tmp,memsph_tmp),dim=1))
hs_pad = hs_pad + (hs_pad_temp.unsqueeze(1),) # batch, 1, windows_size, adim
mem_loss_total = mem_loss_total + mem_loss if mem_loss_total is not None else mem_loss
# self.plot_encoders_attn(plot_enc_attn) # plot attn
if self.conv1d2decoder is not None:
self.whole_hs = torch.cat(self.whole_hs,dim=1)
memsh_final = self.memsh_final = last_memsh[:,self.memspeech_size:,:] #anaylsis
hs_pad_mask = torch.stack(hs_pad_mask,dim=1)
hs_len = memsh_mask.view(memsh_final.size(0), -1).sum(1)
elif self.alllayer2ctc is None :
# memsh_final = memsh[-1][:,self.memspeech_size:,:]
# hs_len = memsh_mask.view(memsh_final.size(0), -1).sum(1)
memsh_final = torch.cat(hs_pad_ctc,dim=1) # batch, all_window, adim
hs_len = pred_len*self.hwsize
# memsh_final = memsh[-1][:,self.memspeech_size:,:] # torch.mean(memsh,dim=0)[:,self.memspeech_size:,:] last layer
else:
memsh_final = torch.cat(memsh,dim=-1)[:,self.memspeech_size:,:] # batch, mem, dim
memsh_final = self.alllayer2ctc(memsh_final)
hs_len = memsh_mask.view(memsh_final.size(0), -1).sum(1)
if self.memattnloss_decoder:
x_mems = torch.stack(x_mems,dim=1)
commems = torch.stack(commems,dim=1)
self.mem_loss_total = mem_loss_total
hs_pad = torch.cat(hs_pad,dim=1) # batch, n_chunk, windows_size, adim
self.hs_pad = hs_pad
self.memspk = memspk
# 1.5. transducer preparation related
ys_in_pad, target, ys_mask , target_len = prepare_loss_inputs(ys_pad) # hs_mask_reshape[:,:,-1]
# 2. decoder
if self.dtype == "transformer":
ys_in_pad = ys_in_pad.unsqueeze(1).expand(-1, n_chunk, -1) #(batch_size, chunk, tgtsize)
ys_mask = ys_mask.unsqueeze(1).expand(-1, n_chunk, -1, -1) #(batch_size, chunk, tgtsize, tgtsize)
hs_mask = hs_pad_mask if len(hs_pad_mask) else None # hs_pad_mask for conv1d2decoder
x_mems = x_mems if len(x_mems) else None
commems = commems if len(commems) else None
pred_pad, loss_attndec = self.decoder(ys_in_pad, ys_mask, hs_pad, hs_mask, x_mems,commems) # None is hs_mask, using pred_len to mask
# plot dec attn
# self.plot_dec_attn = []
# for m in self.decoder.decoders:
# self.plot_dec_attn.append(
# m.src_attn.attn[0].cpu().detach().numpy() #(nchunk, head, nenc, ndec)
# )
# self.plot_decoders_attn(self.plot_dec_attn)
self.pred_pad = pred_pad # (batch_size,nchunk,nseq,tgtsize)
# pred_pad = torch.log_softmax(pred_pad,dim=2) #log_softmax
# 3. loss computation
loss_ctc = self.ctc(memsh_final, hs_len, ys_pad)
loss = self.criterion(pred_pad, target, pred_len, target_len)
self.loss = loss + self.mem_loss_total + loss_ctc #0.3*
if self.usespk2decoder:
pred_spk = self.spklinear(memspk[:,self.memspeaker_size:,:])
real_spk = spkid.unsqueeze(1).expand(-1,pred_spk.size(1)).reshape(-1)
loss_spk = self.spkloss(pred_spk.reshape(-1,self.spkodim),real_spk)
self.loss += loss_spk
loss_spk_data = float(loss_spk)
else:
loss_spk_data = None
if loss_attndec is not None:
self.loss += loss_attndec
loss_attndec_data = float(loss_attndec)
else:
loss_attndec_data = None
loss_rnnt_data = float(loss)
loss_ctc_data = float(loss_ctc)
loss_mem_data = float(self.mem_loss_total)
# if(not torch.equal(torch.tensor([ilens[0]]*ilens.size(0)),ilens)):
# logging.warning(str(("ilens: ",ilens,"pred_lens: ",pred_len)))
if math.isnan(float(self.loss)):
print("outof mem")
pass
if not math.isnan(loss_rnnt_data):
self.reporter.report(float(self.loss), loss_rnnt_data, loss_ctc_data, loss_mem_data,loss_spk_data,loss_attndec_data)
else:
logging.warning("loss (=%f) is not correct", loss_rnnt_data)
return self.loss
def recognize(self, x, recog_args, char_list=None, rnnlm=None):
"""Recognize input features.
Args:
x (ndarray): input acoustic feature (T, D)
recog_args (namespace): argument Namespace containing options
char_list (list): list of characters
rnnlm (torch.nn.Module): language model module
Returns:
y (list): n-best decoding results
"""
self.eval()
# logging.INFO("beam_size: ",recog_args.beam_size)
with torch.no_grad():
h = self.encode_transformer(x) #h, memsh =
h = h.squeeze(0)
n_chunk = h.size(0)
# memsh = torch.stack(memsh,dim=0)
# memsh = torch.mean(memsh,dim=0)[:,self.memspeech_size:,:]
recog_args.hwsize = self.hwsize
recog_args.n_chunk = n_chunk
params = [h, self.hs_pad_mask, recog_args]
if recog_args.beam_size == 1 or recog_args.beam_size==0:
nbest_hyps = self.decoder.recognize(*params)
else:
nbest_hyps = self.decoder_recognize(h,recog_args)
# params.append(rnnlm)
# nbest_hyps = self.decoder.recognize_beam(*params)
return nbest_hyps
def online_recognize_setup(self, beam_size):
self.src_mask = torch.tensor([True]*(self.hwsize*4+3)).reshape((1,1,-1))
self.memsh, self.memsh_mask = self.initial_state(1,self.memspeech_size,len(self.encoder.encoders))
self.memsh_mask = self.memsh_mask.type(self.src_mask.dtype)
self.last_memsh = None
if self.conv1d2decoder is not None:
self.last_memsh = to_device(self, torch.zeros(1,self.memspeech_size,self.adim))
if beam_size == 1 or beam_size==0:
hyp = {"score": 0.0, "yseq": [self.blank_id]}
else:
hyp = {"score": 0.0, "yseq": torch.tensor([self.blank_id], dtype=torch.long)}
self.hyps = [hyp]
def online_recognize_each_chunk(self, x, recog_args):
self.eval()
x = to_device(self,torch.as_tensor(x).unsqueeze(0))
self.hs_pad_temp, _, _, _ = self.encoder(x, self.src_mask ,self.memsh,
self.memsh_mask,is_inference=True,is_compress=False)
# batch, windows_size,adim
if self.conv1d2decoder is not None:
hs = torch.cat((self.last_memsh[:,-self.memspeech_size:,:],self.hs_pad_temp),dim=1) # batch, mem+windows_size, adim
hs_mask_tmp=torch.ones(1,1,self.hwsize).type(self.memsh_mask.dtype).to(self.memsh_mask.device)
hs_mask = torch.cat((self.memsh_mask[:,:,-self.memspeech_size:],hs_mask_tmp),dim=2)
else:
hs = self.hs_pad_temp
hs_mask = None
hs = hs.squeeze(0)
if recog_args.beam_size == 1 or recog_args.beam_size==0:
self.hyps = self.hyps[0]
self.hyps = self.decoder.recognize_each_chunk(self.hyps , hs, h_mask=hs_mask)
self.hyps = [self.hyps]
else:
if hs_mask is not None:
hs_mask = hs_mask.squeeze(0)
self.hyps =self.decoder_each_chunk_beam_search(self.hyps, hs, h_mask=hs_mask)
return self.hyps
def update_commem(self): #for online
self.memsh=self.encoder.update_commem(self.memsh)
cp_size = self.hwsize//self.compressive_rate
mask_tmp = torch.ones(1,1,cp_size).type(self.memsh_mask.dtype).to(self.memsh_mask.device)
self.memsh_mask = torch.cat((self.memsh_mask,mask_tmp),dim=2)
if self.conv1d2decoder is not None:
mem_tmp = self.conv1d2decoder(self.hs_pad_temp.transpose(-1,-2)).transpose(-1,-2)
self.last_memsh = torch.cat((self.last_memsh,mem_tmp),dim=1)
def encode_transformer(self, x):
"""Encode acoustic features.
Args:
x (ndarray): input acoustic feature (T, D)
Returns:
x (torch.Tensor): encoded features (T, attention_dim)
"""
self.eval()
x = to_device(self,torch.as_tensor(x).unsqueeze(0))
x_reshape = self.reshapetochunk(x)
n_chunk = x_reshape.size(1)
src_mask = make_non_pad_mask([n_chunk]).to(x.device) # (batch, nchunk)
src_mask_expand = src_mask.unsqueeze(-1).unsqueeze(-2).expand(-1,-1,-1,self.hwsize*4+3) # (batch, nchunk, 1,hwsize*4+3)
memsh, memsh_mask = self.initial_state(x_reshape.size(0),self.memspeech_size,len(self.encoder.encoders))
memsh_mask = memsh_mask.type(src_mask_expand.dtype)
memspk = to_device(self, torch.zeros(x_reshape.size(0),self.memspeaker_size,self.adim))
memspk_mask = to_device(self, torch.zeros(x_reshape.size(0),1,self.memspeaker_size)).type(src_mask_expand.dtype)
if self.conv1d2decoder is not None:
last_memsh = to_device(self, torch.zeros(x_reshape.size(0),self.memspeech_size,self.adim))
hs_pad = tuple()
hs_pad_ctc = tuple()
hs_pad_mask = []
for i in range(n_chunk):
hs_pad_temp, memsh_mask, memsh, _ = self.encoder(x_reshape[:,i,:,:], src_mask_expand[:,i,:,:] ,
memsh, memsh_mask,is_inference=True) # batch, windows_size,adim
hs_pad_temp1 = hs_pad_temp
mem_size = self.hwsize//self.compressive_rate
if self.conv1d2decoder is not None and not self.usespk_version2:
hs_pad_temp_mask = torch.ones(hs_pad_temp.size(0),1,hs_pad_temp.size(1)).type(memsh_mask.dtype).to(memsh_mask.device)
hs_pad_mask.append(torch.cat((memsh_mask[:,:,-self.memspeech_size-mem_size:-mem_size]
,hs_pad_temp_mask),dim=2))
hs_pad_temp = torch.cat((last_memsh[:,-self.memspeech_size:,:],hs_pad_temp),dim=1) # batch, mem+windows_size, adim
memsph_tmp = self.conv1d2decoder(hs_pad_temp1.transpose(-1,-2)).transpose(-1,-2)
last_memsh = torch.cat((last_memsh,memsph_tmp),dim=1)
elif self.alllayer2ctc is None and self.conv1d2decoder is None:
hs_pad_ctc = hs_pad_ctc + (hs_pad_temp1,)
if self.usespk2decoder:
if self.usespk_version2 and self.conv1d2decoder is not None:
hs_pad_temp_mask = torch.ones(hs_pad_temp.size(0),1,hs_pad_temp.size(1)).type(memsh_mask.dtype).to(memsh_mask.device)
hs_pad_mask.append(torch.cat((memspk_mask[:,:,-self.memspeaker_size:]
,memsh_mask[:,:,-self.memspeech_size-mem_size:-mem_size]
,hs_pad_temp_mask),dim=2))
hs_pad_temp = torch.cat((memspk[:,-self.memspeaker_size:,:],last_memsh[:,-self.memspeech_size:,:],hs_pad_temp),dim=1)
mem_whole = self.conv1d2decoder(hs_pad_temp1.transpose(-1,-2)).transpose(-1,-2)
mem_softlin = self.soft_linear(mem_whole).view(mem_whole.size(0),mem_whole.size(1),2,self.adim)
mem_softmax = torch.nn.functional.softmax(mem_softlin,dim=2)
memspk_tmp = self.spklinear2em(mem_softmax[:,:,0,:]*mem_whole)
memsph_tmp = self.shlinear2em(mem_softmax[:,:,1,:]*mem_whole)
last_memsh = torch.cat((last_memsh,memsph_tmp),dim=1)
memspk = torch.cat((memspk, memspk_tmp) ,dim=1)
memspk_mask = torch.cat((memspk_mask
, torch.ones(x_reshape.size(0),1,mem_size).type(memspk_mask.dtype).to(memspk_mask.device)),dim=2)
else:
hs_pad_mask[-1] = torch.cat((memspk_mask[:,:,-self.memspeaker_size:],hs_pad_mask[-1]),dim=2)
hs_pad_temp = torch.cat((memspk[:,-self.memspeaker_size:,:],hs_pad_temp),dim=1)
memspk_mask = torch.cat((memspk_mask
, torch.ones(x_reshape.size(0),1,mem_size).type(memspk_mask.dtype).to(memspk_mask.device)),dim=2)
memspk_tmp = self.speaker2decoder(hs_pad_temp1.transpose(-1,-2)).transpose(-1,-2)
# memspk_tmp = torch.mean(memspk_tmp, dim=1).unsqueeze(1)
memspk = torch.cat((memspk, memspk_tmp) ,dim=1)
hs_pad = hs_pad + (hs_pad_temp.unsqueeze(1),)
if self.conv1d2decoder is not None:
self.memsh_final = last_memsh[:,self.memspeech_size:,:]
self.hs_pad_mask = torch.stack(hs_pad_mask,dim=1)
elif self.alllayer2ctc is None:
self.memsh_final = torch.cat(hs_pad_ctc,dim=1) # batch, all_window, adim
# self.memsh_final = memsh[-1][:,self.memspeech_size:,:]
# torch.mean(memsh,dim=0)[:,self.memspeech_size:,:] use last layer for now
self.hs_pad_mask = None
else:
memsh_final = torch.cat(memsh,dim=-1)[:,self.memspeech_size:,:] # batch, mem, dim
self.memsh_final = self.alllayer2ctc(memsh_final)
self.hs_pad_mask = None
if self.usespk2decoder is not None :
self.memspk = memspk[:,self.memspeaker_size:,:]
hs_pad = torch.cat(hs_pad,dim=1) # batch, n_chunk, windows_size, adim
return hs_pad #hs_pad,memsh
def decoder_recognize(self,h,recog_args):
# search parms
beam = recog_args.beam_size
nbest = recog_args.nbest
ctc_weight = recog_args.ctc_weight
if ctc_weight > 0.0:
lpz = self.ctc.log_softmax(self.memsh_final)
lpz = lpz.squeeze(0)
else:
lpz = None
#initialize hypothesis
hyp = {"score": 0.0, "yseq": torch.tensor([self.blank_id], dtype=torch.long)}
if lpz is not None:
self.ctc_prefix_score = CTCPrefixScore(lpz.detach().numpy(), 0, self.eos, numpy)
hyp["ctc_state_prev"] = self.ctc_prefix_score.initial_state()
hyp["ctc_score_prev"] = 0.0
if ctc_weight != 1.0:
# pre-pruning based on attention scores
ctc_beam = min(lpz.shape[-1], int(beam)) #beam is enough, not beam * CTC_SCORING_RATIO
else:
ctc_beam = lpz.shape[-1]
else:
ctc_beam = 0
hyps = [hyp]
for i,hi in enumerate(h):
h_mask = self.hs_pad_mask[0,i] if self.hs_pad_mask is not None else None
hyps = self.decoder_each_chunk_beam_search(hyps, hi, h_mask=h_mask, beam=beam,
ctc_beam=ctc_beam,ctc_weight=ctc_weight)
nbest_hyps = sorted(hyps, key=lambda x: x["score"], reverse=True)[:nbest]
return nbest_hyps
def decoder_each_chunk_beam_search(self, hyps, hi, h_mask=None,
beam=5, times=3,ctc_beam=0, ctc_weight=0):
hyps_yseq = [h["yseq"] for h in hyps]
hyps_len = [len(h["yseq"]) for h in hyps]
hyps_score = torch.tensor([h["score"] for h in hyps])
ys = to_device(self, pad_list(hyps_yseq, self.blank_id)).unsqueeze(1) #(batch,1, tgtsize)
hi = hi.unsqueeze(0).unsqueeze(1).expand(ys.size(0),-1,-1,-1) # (batch,1,nwindow, adim)
ys_mask = to_device(
self, subsequent_mask(ys.size(-1)).unsqueeze(0).unsqueeze(0) #(1, 1, tgtsize, tgtsize)
)
h_mask = h_mask.unsqueeze(0).unsqueeze(0) if h_mask is not None else None
scores=self.decoder.forward_one_step_forbatch(ys, ys_mask, hyps_len, hi, h_mask)
n_tokens = scores.size(1)-1
hyps_blank_score = hyps_score + scores[:,0]
expan_blank_score, expan_hyps_yseq = [], []
if ctc_beam>0:
expan_ctc_score, expan_ctc_status = [], []
hyps_ctc_score = [h["ctc_score_prev"] for h in hyps]
hyps_ctc_state = [h["ctc_state_prev"] for h in hyps]
for ex_i in range(times): # means one chunk generate 2 word at most
if ex_i==0:
if ctc_beam>0:
ctc_expan_scores, ctc_expan_ids = torch.topk(scores[:,1:], ctc_beam, dim=1)
ctc_scores_list, ctc_states_list = [], []
ctc_local_scores_list = []
n_size = ctc_expan_scores.size(0)
for k in range(n_size):
| |
<reponame>juanmed/singleshot6Dpose<filename>test2.py
# import support libraries
import os
import time
import numpy as np
# import main working libraries
import cv2
import torch
from torch.autograd import Variable
from torchvision import transforms
from PIL import Image
# import app libraries
from darknet import Darknet
from utils import *
from MeshPly import MeshPly
class Line():
def __init__(self,p1,p2):
# tilt
if( (p2[0]-p1[0]) == 0.0 ):
self.m = "NaN" # vertical line
else:
self.m = (p2[1]-p1[1])/(p2[0]-p1[0])
# intercept
if(self.m == "NaN"):
self.b = "NaN"
else:
self.b = -1.0*self.m*p1[0] + p1[1]
self.p = p1 #store one sample
def eval(self,x):
# TODO verify if line is vertical
return(x*self.m + self.b)
def find_intersection(l1, l2):
x = (l2.b - l1.b)/(l1.m - l2.m) # x coord of intersection point
y = l1.eval(x) # y coord of intersection point
return x,y
# estimate bounding box
<EMAIL>
def test(datacfg, cfgfile, weightfile, imgfile):
# ******************************************#
# PARAMETERS PREPARATION #
# ******************************************#
#parse configuration files
options = read_data_cfg(datacfg)
meshname = options['mesh']
name = options['name']
#Parameters for the network
seed = int(time.time())
gpus = '0' # define gpus to use
test_width = 608 # define test image size
test_height = 608
torch.manual_seed(seed) # seed torch random
use_cuda = True
if use_cuda:
os.environ['CUDA_VISIBLE_DEVICES'] = gpus
torch.cuda.manual_seed(seed) # seed cuda random
conf_thresh = 0.1
num_classes = 1
# Read object 3D model, get 3D Bounding box corners
mesh = MeshPly(meshname)
vertices = np.c_[np.array(mesh.vertices), np.ones((len(mesh.vertices), 1))].transpose()
#print("Vertices are:\n {} Shape: {} Type: {}".format(vertices,vertices.shape, type(vertices)))
corners3D = get_3D_corners(vertices)
feet_cm = 30.48 # 1 ft = 30.48 cm
corners3D[0] = np.array([-11*feet_cm/2.0, -11*feet_cm/2.0, -11*feet_cm/2.0, -11*feet_cm/2.0, 11*feet_cm/2.0, 11*feet_cm/2.0, 11*feet_cm/2.0, 11*feet_cm/2.0])
corners3D[1] = np.array([-feet_cm/2.0, -feet_cm/2.0, feet_cm/2.0, feet_cm/2.0, -feet_cm/2.0, -feet_cm/2.0, feet_cm/2.0, feet_cm/2.0])
corners3D[2] = np.array([-11*feet_cm/2.0, 11*feet_cm/2.0, -11*feet_cm/2.0, 11*feet_cm/2.0, -11*feet_cm/2.0, 11*feet_cm/2.0, -11*feet_cm/2.0, 11*feet_cm/2.0])
#print("3D Corners are:\n {} Shape: {} Type: {}".format(corners3D,corners3D.shape, type(corners3D)))
diam = float(options['diam'])
# now configure camera intrinsics
internal_calibration = get_camera_intrinsic()
# ******************************************#
# NETWORK CREATION #
# ******************************************#
# Create the network based on cfg file
model = Darknet(cfgfile)
#model.print_network()
model.load_weights(weightfile)
model.cuda()
model.eval()
# ******************************************#
# INPUT IMAGE PREPARATION FOR NN #
# ******************************************#
# Now prepare image: convert to RGB, resize, transform to Tensor
# use cuda,
img = Image.open(imgfile).convert('RGB')
ori_size = img.size # store original size
img = img.resize((test_width, test_height))
t1 = time.time()
img = transforms.Compose([transforms.ToTensor(),])(img)#.float()
img = Variable(img, requires_grad = True)
img = img.unsqueeze(0)
img = img.cuda()
# ******************************************#
# PASS IT TO NETWORK AND GET PREDICTION #
# ******************************************#
# Forward pass
output = model(img).data
#print("Output Size: {}".format(output.size(0)))
t2 = time.time()
# ******************************************#
# EXTRACT PREDICTIONS #
# ******************************************#
# Using confidence threshold, eliminate low-confidence predictions
# and get only boxes over the confidence threshold
all_boxes = get_region_boxes(output, conf_thresh, num_classes)
boxes = all_boxes[0]
# iterate through boxes to find the one with highest confidence
best_conf_est = -1
best_box_index = -1
for j in range(len(boxes)):
# the confidence is in index = 18
if( boxes[j][18] > best_conf_est):
box_pr = boxes[j] # get bounding box
best_conf_est = boxes[j][18]
best_box_index = j
#print("Best box is: {} and 2D prediction is {}".format(best_box_index,box_pr))
#print("Confidence is: {}".format(best_conf_est))
print(best_conf_est.item(),type(best_conf_est.item()))
# Denormalize the corner predictions
# This are the predicted 2D points with which a bounding cube can be drawn
corners2D_pr = np.array(np.reshape(box_pr[:18], [9, 2]), dtype='float32')
corners2D_pr[:, 0] = corners2D_pr[:, 0] * ori_size[0] # Width
corners2D_pr[:, 1] = corners2D_pr[:, 1] * ori_size[1] # Height
t3 = time.time()
# **********************************************#
# GET OBJECT POSE ESTIMATION #
# Remember the problem in 6D Pose estimation #
# is exactly to estimate the pose - position #
# and orientation of the object of interest #
# with reference to a camera frame. That is #
# why although the 2D projection of the 3D #
# bounding cube are ready, we still need to #
# compute the rotation matrix -orientation- #
# and a translation vector -position- for the #
# object #
# #
# **********************************************#
# get rotation matrix and transform
R_pr, t_pr = pnp(np.array(np.transpose(np.concatenate((np.zeros((3, 1)), corners3D[:3, :]), axis=1)), dtype='float32'), corners2D_pr, np.array(internal_calibration, dtype='float32'))
t4 = time.time()
# ******************************************#
# DISPLAY IMAGE WITH BOUNDING CUBE #
# ******************************************#
# Reload Original img
img = cv2.imread(imgfile)
# create a window to display image
wname = "Prediction"
cv2.namedWindow(wname)
# draw each predicted 2D point
for i, (x,y) in enumerate(corners2D_pr):
# get colors to draw the lines
col1 = 28*i
col2 = 255 - (28*i)
col3 = np.random.randint(0,256)
cv2.circle(img, (x,y), 3, (col1,col2,col3), -1)
cv2.putText(img, str(i), (int(x) + 5, int(y) + 5),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (col1, col2, col3), 1)
# Get each predicted point and the centroid
p1 = corners2D_pr[1]
p2 = corners2D_pr[2]
p3 = corners2D_pr[3]
p4 = corners2D_pr[4]
p5 = corners2D_pr[5]
p6 = corners2D_pr[6]
p7 = corners2D_pr[7]
p8 = corners2D_pr[8]
center = corners2D_pr[0]
# Draw cube lines around detected object
# draw front face
line_point = 2
cv2.line(img,(p1[0],p1[1]),(p2[0],p2[1]), (0,255,0),line_point)
cv2.line(img,(p2[0],p2[1]),(p4[0],p4[1]), (0,255,0),line_point)
cv2.line(img,(p4[0],p4[1]),(p3[0],p3[1]), (0,255,0),line_point)
cv2.line(img,(p3[0],p3[1]),(p1[0],p1[1]), (0,255,0),line_point)
# draw back face
cv2.line(img,(p5[0],p5[1]),(p6[0],p6[1]), (0,255,0),line_point)
cv2.line(img,(p7[0],p7[1]),(p8[0],p8[1]), (0,255,0),line_point)
cv2.line(img,(p6[0],p6[1]),(p8[0],p8[1]), (0,255,0),line_point)
cv2.line(img,(p5[0],p5[1]),(p7[0],p7[1]), (0,255,0),line_point)
# draw right face
cv2.line(img,(p2[0],p2[1]),(p6[0],p6[1]), (0,255,0),line_point)
cv2.line(img,(p1[0],p1[1]),(p5[0],p5[1]), (0,255,0),line_point)
# draw left face
cv2.line(img,(p3[0],p3[1]),(p7[0],p7[1]), (0,255,0),line_point)
cv2.line(img,(p4[0],p4[1]),(p8[0],p8[1]), (0,255,0),line_point)
# Calculate gate dimensions
min_x = np.min(corners3D[0,:]) # this are the gate outermost corners
max_x = np.max(corners3D[0,:])
min_y = np.min(corners3D[1,:])
max_y = np.max(corners3D[1,:])
min_z = np.min(corners3D[2,:])
max_z = np.max(corners3D[2,:])
gate_dim_z = max_z - min_z
gate_dim_x = max_x - min_x
gate_dim_y = max_y - min_y
############################################################
# PREDICT FLYABLE AREA BASED ON ESTIMATED 2D PROJECTIONS
############################################################
# Calculate Fly are based based on offset from predicted 2D
# Projection
flyarea_side = 243.84 #cm 8ft
offset_z = (gate_dim_z - flyarea_side)/2.0
offset_x = (gate_dim_x - flyarea_side)/2.0
offset_z_ratio = (offset_z/gate_dim_z) # calculate as ratio wrt side, to use with pixels later
offset_x_ratio = (offset_x/gate_dim_x)
#print("Offset X ratio: {}, Offset Z ratio: {}".format(offset_x_ratio,offset_z_ratio))
# GATE FRONT
#
# array to store all 4 points
flyarea_corners_front = np.zeros((4,2), dtype = 'float32')
# corner 1
flyarea_corners_front[0][0] = p4[0] + int((p2[0]-p4[0])*offset_x_ratio)
flyarea_corners_front[0][1] = p4[1] + int((p3[1]-p4[1])*offset_z_ratio)
# corner 2
flyarea_corners_front[1][0] = p2[0] + int((p4[0]-p2[0])*offset_x_ratio)
flyarea_corners_front[1][1] = p2[1] + int((p1[1]-p2[1])*offset_x_ratio)
# corner 3
flyarea_corners_front[2][0] = p1[0] + int((p3[0]-p1[0])*offset_x_ratio)
flyarea_corners_front[2][1] = p1[1] + int((p2[1]-p1[1])*offset_x_ratio)
# corner 4
flyarea_corners_front[3][0] = p3[0] + int((p1[0]-p3[0])*offset_x_ratio)
flyarea_corners_front[3][1] = p3[1] + int((p4[1]-p3[1])*offset_x_ratio)
#print("Front points: {}".format(flyarea_corners_front))
# draw front gate area
fa_p1_f = flyarea_corners_front[0]
fa_p2_f = flyarea_corners_front[1]
fa_p3_f = flyarea_corners_front[2]
fa_p4_f = flyarea_corners_front[3]
"""
cv2.line(img,(fa_p1_f[0],fa_p1_f[1]),(fa_p2_f[0],fa_p2_f[1]), (255,0,255),line_point)
cv2.line(img,(fa_p2_f[0],fa_p2_f[1]),(fa_p3_f[0],fa_p3_f[1]), (255,0,255),line_point)
cv2.line(img,(fa_p4_f[0],fa_p4_f[1]),(fa_p1_f[0],fa_p1_f[1]), (255,0,255),line_point)
cv2.line(img,(fa_p3_f[0],fa_p3_f[1]),(fa_p4_f[0],fa_p4_f[1]), (255,0,255),line_point)
"""
# GATE BACK
#
# array to store all 4 points
flyarea_corners_back = np.zeros((4,2), dtype = 'float32')
# corner 1
flyarea_corners_back[0][0] = p8[0] + int((p6[0]-p8[0])*offset_x_ratio)
flyarea_corners_back[0][1] = p8[1] + int((p7[1]-p8[1])*offset_z_ratio)
# corner 2
flyarea_corners_back[1][0] = p6[0] + int((p8[0]-p6[0])*offset_x_ratio)
flyarea_corners_back[1][1] = p6[1] + int((p5[1]-p6[1])*offset_x_ratio)
# corner 3
flyarea_corners_back[2][0] = p5[0] + int((p7[0]-p5[0])*offset_x_ratio)
flyarea_corners_back[2][1] = p5[1] + int((p6[1]-p5[1])*offset_x_ratio)
# corner 4
flyarea_corners_back[3][0] = p7[0] + int((p5[0]-p7[0])*offset_x_ratio)
flyarea_corners_back[3][1] = p7[1] + int((p8[1]-p7[1])*offset_x_ratio)
#print("Back points: {}".format(flyarea_corners_back))
# draw back gate area
fa_p1_b = flyarea_corners_back[0]
fa_p2_b = flyarea_corners_back[1]
fa_p3_b = flyarea_corners_back[2]
fa_p4_b = flyarea_corners_back[3]
"""
cv2.line(img,(fa_p1_b[0],fa_p1_b[1]),(fa_p2_b[0],fa_p2_b[1]), (255,0,255),line_point)
cv2.line(img,(fa_p2_b[0],fa_p2_b[1]),(fa_p3_b[0],fa_p3_b[1]), (255,0,255),line_point)
cv2.line(img,(fa_p4_b[0],fa_p4_b[1]),(fa_p1_b[0],fa_p1_b[1]), (255,0,255),line_point)
cv2.line(img,(fa_p3_b[0],fa_p3_b[1]),(fa_p4_b[0],fa_p4_b[1]), (255,0,255),line_point)
"""
"""
# draw each predicted 2D point
for i, (x,y) in enumerate(flyarea_corners_front):
# get colors to draw the lines
col1 = 0#np.random.randint(0,256)
col2 = 0#np.random.randint(0,256)
col3 = 255#np.random.randint(0,256)
cv2.circle(img, (x,y), 3, (col1,col2,col3), -1)
cv2.putText(img, str(i), (int(x) + 5, int(y) + 5),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (col1, col2, col3), 1)
# draw each predicted 2D point
for i, (x,y) in enumerate(flyarea_corners_back):
# get colors to draw the lines
col1 = 0#np.random.randint(0,256)
col2 = 0#np.random.randint(0,256)
col3 = 255#np.random.randint(0,256)
cv2.circle(img, (x,y), 3, (col1,col2,col3), -1)
cv2.putText(img, str(i+4), (int(x) + 5, int(y) + 5),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (col1, col2, col3), 1)
"""
# GATE ALL FRONT AND BACK
# LINES
# FRONT
front_up = Line(flyarea_corners_front[0],flyarea_corners_front[1])
front_right = Line(flyarea_corners_front[1],flyarea_corners_front[2])
front_down = Line(flyarea_corners_front[2],flyarea_corners_front[3])
front_left = Line(flyarea_corners_front[3],flyarea_corners_front[0])
#print("Front Up Line: m {:.4f} b{:.4f}".format(front_up.m, front_up.b))
#print("Front Right Line: m {:.4f} b{:.4f}".format(front_right.m, front_right.b))
#print("Front Down Line: m {:.4f} b{:.4f}".format(front_down.m, front_down.b))
#print("Front Left Line: m {:.4f} b{:.4f}".format(front_left.m, front_left.b))
# BACK
back_up = Line(flyarea_corners_back[0],flyarea_corners_back[1])
back_right = Line(flyarea_corners_back[1],flyarea_corners_back[2])
back_down = Line(flyarea_corners_back[2],flyarea_corners_back[3])
back_left = Line(flyarea_corners_back[3],flyarea_corners_back[0])
#print("Back Up Line: m {:.4f} b{:.4f}".format(back_up.m, back_up.b))
| |
to remove in #5389
assert {perm["code"].lower() for perm in data["user"]["permissions"]} == {
permission_manage_orders.codename,
}
def test_staff_update_out_of_scope_user(
staff_api_client,
superuser_api_client,
permission_manage_staff,
permission_manage_orders,
media_root,
):
"""Ensure that staff user cannot update user with wider scope of permission.
Ensure superuser pass restrictions.
"""
query = STAFF_UPDATE_MUTATIONS
staff_user = User.objects.create(email="<EMAIL>", is_staff=True)
staff_user.user_permissions.add(permission_manage_orders)
id = graphene.Node.to_global_id("User", staff_user.id)
variables = {"id": id, "input": {"isActive": False}}
# for staff user
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_staff]
)
content = get_graphql_content(response)
data = content["data"]["staffUpdate"]
assert not data["user"]
assert len(data["staffErrors"]) == 1
assert data["staffErrors"][0]["field"] == "id"
assert data["staffErrors"][0]["code"] == AccountErrorCode.OUT_OF_SCOPE_USER.name
# for superuser
response = superuser_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content["data"]["staffUpdate"]
assert data["user"]["email"] == staff_user.email
assert data["user"]["isActive"] is False
assert not data["staffErrors"]
def test_staff_update_out_of_scope_groups(
staff_api_client,
superuser_api_client,
permission_manage_staff,
media_root,
permission_manage_users,
permission_manage_orders,
permission_manage_products,
):
"""Ensure that staff user cannot add to groups which permission scope is wider
than user's scope.
Ensure superuser pass restrictions.
"""
query = STAFF_UPDATE_MUTATIONS
groups = Group.objects.bulk_create(
[
Group(name="manage users"),
Group(name="manage orders"),
Group(name="manage products"),
]
)
group1, group2, group3 = groups
group1.permissions.add(permission_manage_users)
group2.permissions.add(permission_manage_orders)
group3.permissions.add(permission_manage_products)
staff_user = User.objects.create(email="<EMAIL>", is_staff=True)
staff_api_client.user.user_permissions.add(permission_manage_orders)
id = graphene.Node.to_global_id("User", staff_user.id)
variables = {
"id": id,
"input": {
"isActive": False,
"addGroups": [
graphene.Node.to_global_id("Group", gr.pk) for gr in [group1, group2]
],
"removeGroups": [graphene.Node.to_global_id("Group", group3.pk)],
},
}
# for staff user
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_staff]
)
content = get_graphql_content(response)
data = content["data"]["staffUpdate"]
errors = data["staffErrors"]
assert not data["user"]
assert len(errors) == 2
expected_errors = [
{
"field": "addGroups",
"code": AccountErrorCode.OUT_OF_SCOPE_GROUP.name,
"permissions": None,
"groups": [graphene.Node.to_global_id("Group", group1.pk)],
},
{
"field": "removeGroups",
"code": AccountErrorCode.OUT_OF_SCOPE_GROUP.name,
"permissions": None,
"groups": [graphene.Node.to_global_id("Group", group3.pk)],
},
]
for error in errors:
error.pop("message")
assert error in expected_errors
# for superuser
response = superuser_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content["data"]["staffUpdate"]
errors = data["staffErrors"]
assert not errors
assert data["user"]["email"] == staff_user.email
assert {group["name"] for group in data["user"]["permissionGroups"]} == {
group1.name,
group2.name,
}
def test_staff_update_duplicated_input_items(
staff_api_client,
permission_manage_staff,
media_root,
permission_manage_orders,
permission_manage_users,
):
query = STAFF_UPDATE_MUTATIONS
groups = Group.objects.bulk_create(
[Group(name="manage users"), Group(name="manage orders"), Group(name="empty")]
)
group1, group2, group3 = groups
group1.permissions.add(permission_manage_users)
group2.permissions.add(permission_manage_orders)
staff_user = User.objects.create(email="<EMAIL>", is_staff=True)
staff_api_client.user.user_permissions.add(
permission_manage_orders, permission_manage_users
)
id = graphene.Node.to_global_id("User", staff_user.id)
variables = {
"id": id,
"input": {
"addGroups": [
graphene.Node.to_global_id("Group", gr.pk) for gr in [group1, group2]
],
"removeGroups": [
graphene.Node.to_global_id("Group", gr.pk)
for gr in [group1, group2, group3]
],
},
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_staff]
)
content = get_graphql_content(response)
data = content["data"]["staffUpdate"]
errors = data["staffErrors"]
assert len(errors) == 1
assert errors[0]["field"] is None
assert errors[0]["code"] == AccountErrorCode.DUPLICATED_INPUT_ITEM.name
assert set(errors[0]["groups"]) == {
graphene.Node.to_global_id("Group", gr.pk) for gr in [group1, group2]
}
assert errors[0]["permissions"] is None
def test_staff_update_doesnt_change_existing_avatar(
staff_api_client, permission_manage_staff, media_root, staff_users,
):
query = STAFF_UPDATE_MUTATIONS
mock_file = MagicMock(spec=File)
mock_file.name = "image.jpg"
staff_user, staff_user1, _ = staff_users
id = graphene.Node.to_global_id("User", staff_user1.id)
variables = {"id": id, "input": {"isActive": False}}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_staff]
)
content = get_graphql_content(response)
data = content["data"]["staffUpdate"]
assert data["staffErrors"] == []
staff_user.refresh_from_db()
assert not staff_user.avatar
def test_staff_update_deactivate_with_manage_staff_left_not_manageable_perms(
staff_api_client,
superuser_api_client,
staff_users,
permission_manage_users,
permission_manage_staff,
permission_manage_orders,
media_root,
):
"""Ensure that staff user can't and superuser can deactivate user where some
permissions will be not manageable.
"""
query = STAFF_UPDATE_MUTATIONS
groups = Group.objects.bulk_create(
[
Group(name="manage users"),
Group(name="manage staff"),
Group(name="manage orders"),
]
)
group1, group2, group3 = groups
group1.permissions.add(permission_manage_users)
group2.permissions.add(permission_manage_staff)
group3.permissions.add(permission_manage_orders)
staff_user, staff_user1, staff_user2 = staff_users
group1.user_set.add(staff_user1)
group2.user_set.add(staff_user2, staff_user1)
group3.user_set.add(staff_user2)
staff_user.user_permissions.add(permission_manage_users, permission_manage_orders)
id = graphene.Node.to_global_id("User", staff_user1.id)
variables = {"id": id, "input": {"isActive": False}}
# for staff user
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_staff]
)
content = get_graphql_content(response)
data = content["data"]["staffUpdate"]
errors = data["staffErrors"]
assert not data["user"]
assert len(errors) == 1
assert errors[0]["field"] == "isActive"
assert errors[0]["code"] == AccountErrorCode.LEFT_NOT_MANAGEABLE_PERMISSION.name
assert len(errors[0]["permissions"]) == 1
assert errors[0]["permissions"][0] == AccountPermissions.MANAGE_USERS.name
# for superuser
response = superuser_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content["data"]["staffUpdate"]
errors = data["staffErrors"]
staff_user1.refresh_from_db()
assert data["user"]["email"] == staff_user1.email
assert data["user"]["isActive"] is False
assert not errors
assert not staff_user1.is_active
def test_staff_update_deactivate_with_manage_staff_all_perms_manageable(
staff_api_client,
staff_users,
permission_manage_users,
permission_manage_staff,
permission_manage_orders,
media_root,
):
query = STAFF_UPDATE_MUTATIONS
groups = Group.objects.bulk_create(
[
Group(name="manage users"),
Group(name="manage staff"),
Group(name="manage orders"),
]
)
group1, group2, group3 = groups
group1.permissions.add(permission_manage_users)
group2.permissions.add(permission_manage_staff)
group3.permissions.add(permission_manage_orders)
staff_user, staff_user1, staff_user2 = staff_users
group1.user_set.add(staff_user1, staff_user2)
group2.user_set.add(staff_user2, staff_user1)
group3.user_set.add(staff_user2)
staff_user.user_permissions.add(permission_manage_users, permission_manage_orders)
id = graphene.Node.to_global_id("User", staff_user1.id)
variables = {"id": id, "input": {"isActive": False}}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_staff]
)
content = get_graphql_content(response)
data = content["data"]["staffUpdate"]
errors = data["staffErrors"]
staff_user1.refresh_from_db()
assert not errors
assert staff_user1.is_active is False
STAFF_DELETE_MUTATION = """
mutation DeleteStaff($id: ID!) {
staffDelete(id: $id) {
staffErrors {
field
code
message
permissions
}
user {
id
}
}
}
"""
def test_staff_delete(staff_api_client, permission_manage_staff):
query = STAFF_DELETE_MUTATION
staff_user = User.objects.create(email="<EMAIL>", is_staff=True)
user_id = graphene.Node.to_global_id("User", staff_user.id)
variables = {"id": user_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_staff]
)
content = get_graphql_content(response)
data = content["data"]["staffDelete"]
assert data["staffErrors"] == []
assert not User.objects.filter(pk=staff_user.id).exists()
def test_staff_delete_app_no_permission(app_api_client, permission_manage_staff):
query = STAFF_DELETE_MUTATION
staff_user = User.objects.create(email="<EMAIL>", is_staff=True)
user_id = graphene.Node.to_global_id("User", staff_user.id)
variables = {"id": user_id}
response = app_api_client.post_graphql(
query, variables, permissions=[permission_manage_staff]
)
assert_no_permission(response)
def test_staff_delete_out_of_scope_user(
staff_api_client,
superuser_api_client,
permission_manage_staff,
permission_manage_products,
):
"""Ensure staff user cannot delete users even when some of user permissions are
out of requestor scope.
Ensure superuser pass restrictions.
"""
query = STAFF_DELETE_MUTATION
staff_user = User.objects.create(email="<EMAIL>", is_staff=True)
staff_user.user_permissions.add(permission_manage_products)
user_id = graphene.Node.to_global_id("User", staff_user.id)
variables = {"id": user_id}
# for staff user
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_staff]
)
content = get_graphql_content(response)
data = content["data"]["staffDelete"]
assert not data["user"]
assert len(data["staffErrors"]) == 1
assert data["staffErrors"][0]["field"] == "id"
assert data["staffErrors"][0]["code"] == AccountErrorCode.OUT_OF_SCOPE_USER.name
# for superuser
response = superuser_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content["data"]["staffDelete"]
assert data["staffErrors"] == []
assert not User.objects.filter(pk=staff_user.id).exists()
def test_staff_delete_left_not_manageable_permissions(
staff_api_client,
superuser_api_client,
staff_users,
permission_manage_staff,
permission_manage_users,
permission_manage_orders,
):
"""Ensure staff user can't and superuser can delete staff user when some of
permissions will be not manageable.
"""
query = STAFF_DELETE_MUTATION
groups = Group.objects.bulk_create(
[
Group(name="manage users"),
Group(name="manage staff"),
Group(name="manage orders"),
]
)
group1, group2, group3 = groups
group1.permissions.add(permission_manage_users)
group2.permissions.add(permission_manage_staff)
group3.permissions.add(permission_manage_orders)
staff_user, staff_user1, staff_user2 = staff_users
group1.user_set.add(staff_user1)
group2.user_set.add(staff_user2, staff_user1)
group3.user_set.add(staff_user1)
user_id = graphene.Node.to_global_id("User", staff_user1.id)
variables = {"id": user_id}
# for staff user
staff_user.user_permissions.add(permission_manage_users, permission_manage_orders)
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_staff]
)
content = get_graphql_content(response)
data = content["data"]["staffDelete"]
errors = data["staffErrors"]
assert len(errors) == 1
assert errors[0]["field"] == "id"
assert errors[0]["code"] == AccountErrorCode.LEFT_NOT_MANAGEABLE_PERMISSION.name
assert set(errors[0]["permissions"]) == {
AccountPermissions.MANAGE_USERS.name,
OrderPermissions.MANAGE_ORDERS.name,
}
assert User.objects.filter(pk=staff_user1.id).exists()
# for superuser
staff_user.user_permissions.add(permission_manage_users, permission_manage_orders)
response = superuser_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content["data"]["staffDelete"]
errors = data["staffErrors"]
assert not errors
assert not User.objects.filter(pk=staff_user1.id).exists()
def test_staff_delete_all_permissions_manageable(
staff_api_client,
staff_users,
permission_manage_staff,
permission_manage_users,
permission_manage_orders,
):
query = STAFF_DELETE_MUTATION
groups = Group.objects.bulk_create(
[
Group(name="manage users"),
Group(name="manage staff"),
Group(name="manage users and orders"),
]
)
group1, group2, group3 = groups
group1.permissions.add(permission_manage_users)
group2.permissions.add(permission_manage_staff)
group3.permissions.add(permission_manage_users, permission_manage_orders)
staff_user, staff_user1, staff_user2 = staff_users
group1.user_set.add(staff_user1)
group2.user_set.add(staff_user2)
group3.user_set.add(staff_user1)
user_id = graphene.Node.to_global_id("User", staff_user1.id)
variables = {"id": user_id}
staff_user.user_permissions.add(permission_manage_users, permission_manage_orders)
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_staff]
)
content = get_graphql_content(response)
data = content["data"]["staffDelete"]
errors = data["staffErrors"]
assert len(errors) == 0
assert not User.objects.filter(pk=staff_user1.id).exists()
def test_user_delete_errors(staff_user, admin_user):
info = Mock(context=Mock(user=staff_user))
with pytest.raises(ValidationError) as e:
UserDelete.clean_instance(info, staff_user)
msg = "You cannot delete your own account."
assert e.value.error_dict["id"][0].message == msg
info = Mock(context=Mock(user=staff_user))
with pytest.raises(ValidationError) as e:
UserDelete.clean_instance(info, admin_user)
msg = "Cannot delete this account."
assert e.value.error_dict["id"][0].message == msg
def test_staff_delete_errors(staff_user, customer_user, admin_user):
info = Mock(context=Mock(user=staff_user))
with pytest.raises(ValidationError) as e:
StaffDelete.clean_instance(info, customer_user)
msg = "Cannot delete a non-staff users."
assert e.value.error_dict["id"][0].message == msg
# should not raise any errors
info = Mock(context=Mock(user=admin_user))
StaffDelete.clean_instance(info, staff_user)
def test_staff_update_errors(staff_user, customer_user, admin_user):
errors = defaultdict(list)
input = {"is_active": None}
StaffUpdate.clean_is_active(input, customer_user, staff_user, errors)
assert not errors["is_active"]
input["is_active"] = False
StaffUpdate.clean_is_active(input, staff_user, staff_user, errors)
assert len(errors["is_active"]) == 1
assert (
errors["is_active"][0].code.upper()
== AccountErrorCode.DEACTIVATE_OWN_ACCOUNT.name
)
errors = defaultdict(list)
StaffUpdate.clean_is_active(input, admin_user, staff_user, errors)
assert len(errors["is_active"]) == 2
assert {error.code.upper() for error in errors["is_active"]} == {
AccountErrorCode.DEACTIVATE_SUPERUSER_ACCOUNT.name,
AccountErrorCode.LEFT_NOT_MANAGEABLE_PERMISSION.name,
}
errors = defaultdict(list)
# should not raise any errors
StaffUpdate.clean_is_active(input, customer_user, staff_user, errors)
assert not errors["is_active"]
SET_PASSWORD_MUTATION = """
mutation SetPassword($email: String!, $token: String!, $password: String!) {
setPassword(email: $email, token: $token, password: $password) {
errors {
field
message
}
accountErrors {
field
message
code
}
user {
id
}
token
refreshToken
}
}
"""
@freeze_time("2018-05-31 12:00:01")
def test_set_password(user_api_client, customer_user):
token = default_token_generator.make_token(customer_user)
password = "<PASSWORD>"
variables = {"email": customer_user.email, "password": password, "token": token}
response = user_api_client.post_graphql(SET_PASSWORD_MUTATION, variables)
content = get_graphql_content(response)
data = content["data"]["setPassword"]
assert data["user"]["id"]
assert data["token"]
customer_user.refresh_from_db()
assert | |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests cluster_device_api."""
import datetime
import unittest
import mock
from protorpc import protojson
import pytz
from tradefed_cluster.util import ndb_shim as ndb
from tradefed_cluster import api_messages
from tradefed_cluster import api_test
from tradefed_cluster import common
from tradefed_cluster import datastore_entities
from tradefed_cluster import datastore_test_util
from tradefed_cluster import device_manager
from tradefed_cluster import note_manager
class ClusterDeviceApiTest(api_test.ApiTest):
TIMESTAMP = datetime.datetime(2015, 10, 9)
TIMESTAMP_0 = datetime.datetime(2015, 10, 5)
TIMESTAMP_1 = datetime.datetime(2015, 10, 6)
def _assertDeviceCount(self, request, count):
"""Helper function for checking device list count given a request."""
api_response = self.testapp.post_json(
'/_ah/api/ClusterDeviceApi.ListDevices', request)
device_collection = protojson.decode_message(
api_messages.DeviceInfoCollection, api_response.body)
self.assertEqual('200 OK', api_response.status)
self.assertEqual(count, len(device_collection.device_infos))
def _setDeviceState(self, serial, state):
"""Helper function to set a device's state."""
device = datastore_entities.DeviceInfo.query().filter(
datastore_entities.DeviceInfo.device_serial == serial).get()
device.state = state
device.put()
def setUp(self):
api_test.ApiTest.setUp(self)
self.ndb_host_0 = datastore_test_util.CreateHost('free', 'host_0')
self.ndb_device_0 = datastore_test_util.CreateDevice(
'free', 'host_0', 'device_0', 'lab-name-1', timestamp=self.TIMESTAMP)
self.ndb_device_1 = datastore_test_util.CreateDevice(
'free', 'host_0', 'device_1', timestamp=self.TIMESTAMP)
self.ndb_host_1 = datastore_test_util.CreateHost(
'paid', 'host_1', lab_name='alab')
self.ndb_device_2 = datastore_test_util.CreateDevice(
'paid', 'host_1', 'device_2', hidden=True, lab_name='alab')
self.ndb_device_3 = datastore_test_util.CreateDevice(
'paid',
'host_1',
'device_3',
lab_name='alab',
device_type=api_messages.DeviceTypeMessage.NULL)
self.ndb_host_2 = datastore_test_util.CreateHost(
'free', 'host_2', hidden=True)
self.ndb_device_4 = datastore_test_util.CreateDevice(
'free',
'host_2',
'device_4',
device_type=api_messages.DeviceTypeMessage.NULL)
self.note = datastore_entities.Note(
type=common.NoteType.DEVICE_NOTE,
device_serial='device_0',
user='user0',
timestamp=self.TIMESTAMP,
message='Hello, World')
self.note.put()
self.device_history_0 = datastore_entities.DeviceStateHistory(
device_serial='device_0',
parent=self.ndb_device_0.key,
timestamp=self.TIMESTAMP_0,
state='Available')
device_history_0_key = self.device_history_0.put()
self.device_history_1 = datastore_entities.DeviceStateHistory(
device_serial='device_0',
parent=self.ndb_device_0.key,
timestamp=self.TIMESTAMP_1,
state='Allocated')
device_history_1_key = self.device_history_1.put()
self.history = [device_history_0_key, device_history_1_key]
def testListDevices(self):
"""Tests ListDevices returns all devices."""
api_request = {}
api_response = self.testapp.post_json(
'/_ah/api/ClusterDeviceApi.ListDevices', api_request)
device_collection = protojson.decode_message(
api_messages.DeviceInfoCollection, api_response.body)
self.assertEqual('200 OK', api_response.status)
# ListDevices counts non-hidden devices under hidden host.
self.assertEqual(4, len(device_collection.device_infos))
def testListDevices_filterCluster(self):
"""Tests ListDevices returns devices filtered by cluster."""
api_request = {'cluster_id': 'free'}
api_response = self.testapp.post_json(
'/_ah/api/ClusterDeviceApi.ListDevices', api_request)
device_collection = protojson.decode_message(
api_messages.DeviceInfoCollection, api_response.body)
self.assertEqual('200 OK', api_response.status)
# ListDevices counts non-hidden devices under hidden host.
self.assertEqual(3, len(device_collection.device_infos))
for device in device_collection.device_infos:
self.assertEqual('free', device.cluster)
def testListDevices_filterLabName(self):
"""Tests ListDevices returns devices filtered by lab_name."""
api_request = {'lab_name': 'alab'}
api_response = self.testapp.post_json(
'/_ah/api/ClusterDeviceApi.ListDevices', api_request)
device_collection = protojson.decode_message(
api_messages.DeviceInfoCollection, api_response.body)
self.assertEqual('200 OK', api_response.status)
# It will not get the hidden device.
self.assertEqual(1, len(device_collection.device_infos))
for device in device_collection.device_infos:
self.assertEqual('alab', device.lab_name)
def testListDevices_filterDeviceSerial(self):
"""Tests ListDevices returns device filtered by device serial."""
api_request = {'device_serial': self.ndb_device_3.device_serial}
api_response = self.testapp.post_json(
'/_ah/api/ClusterDeviceApi.ListDevices', api_request)
device_collection = protojson.decode_message(
api_messages.DeviceInfoCollection, api_response.body)
self.assertEqual('200 OK', api_response.status)
self.assertEqual(1, len(device_collection.device_infos))
self.assertEqual(self.ndb_device_3.device_serial,
device_collection.device_infos[0].device_serial)
def testListDevices_filterTestHarness(self):
"""Tests ListDevices returns devices filtered by test harness."""
self.ndb_device_0 = datastore_test_util.CreateDevice(
'mh_cluster',
'mh_host',
'mh_device',
timestamp=self.TIMESTAMP,
test_harness='mh')
api_request = {'test_harness': 'mh'}
api_response = self.testapp.post_json(
'/_ah/api/ClusterDeviceApi.ListDevices', api_request)
device_collection = protojson.decode_message(
api_messages.DeviceInfoCollection, api_response.body)
self.assertEqual('200 OK', api_response.status)
# It will not get the hidden device.
self.assertEqual(1, len(device_collection.device_infos))
self.assertEqual('mh', device_collection.device_infos[0].test_harness)
def testListDevices_filterMultiTestHarness(self):
"""Tests ListDevices returns devices filtered by multiple test harness."""
datastore_test_util.CreateDevice(
'mh_cluster',
'mh_host',
'mh_device',
test_harness='mh')
datastore_test_util.CreateDevice(
'goats_cluster',
'goats_host',
'goats_device',
test_harness='goats')
api_request = {'test_harness': ['mh', 'goats']}
api_response = self.testapp.post_json(
'/_ah/api/ClusterDeviceApi.ListDevices', api_request)
device_collection = protojson.decode_message(
api_messages.DeviceInfoCollection, api_response.body)
self.assertEqual('200 OK', api_response.status)
# It will not get the hidden device.
self.assertEqual(2, len(device_collection.device_infos))
self.assertEqual('goats', device_collection.device_infos[0].test_harness)
self.assertEqual('mh', device_collection.device_infos[1].test_harness)
def testListDevices_filterHostname(self):
"""Tests ListDevices returns devices filtered by hostname."""
api_request = {'hostname': 'host_1'}
api_response = self.testapp.post_json(
'/_ah/api/ClusterDeviceApi.ListDevices', api_request)
device_collection = protojson.decode_message(
api_messages.DeviceInfoCollection, api_response.body)
self.assertEqual('200 OK', api_response.status)
# It will not get the hidden device.
self.assertEqual(1, len(device_collection.device_infos))
for device in device_collection.device_infos:
self.assertEqual('host_1', device.hostname)
def testListDevices_filterHostnames(self):
"""Tests ListDevices returns devices filtered by hostnames."""
api_request = {'hostnames': ['host_0', 'host_1']}
api_response = self.testapp.post_json(
'/_ah/api/ClusterDeviceApi.ListDevices', api_request)
device_collection = protojson.decode_message(
api_messages.DeviceInfoCollection, api_response.body)
self.assertEqual('200 OK', api_response.status)
self.assertEqual(3, len(device_collection.device_infos))
self.assertEqual(self.ndb_device_0.device_serial,
device_collection.device_infos[0].device_serial)
self.assertEqual(self.ndb_device_1.device_serial,
device_collection.device_infos[1].device_serial)
self.assertEqual(self.ndb_device_3.device_serial,
device_collection.device_infos[2].device_serial)
def testListDevices_filterPools(self):
"""Tests ListDevices returns devices filtered by pools."""
datastore_test_util.CreateDevice('cluster_01', 'host_01', 'device_01',
pools='pools_A')
datastore_test_util.CreateDevice('cluster_01', 'host_01', 'device_02',
pools='pools_B')
api_request = {'pools': ['pools_A', 'pools_B']}
api_response = self.testapp.post_json(
'/_ah/api/ClusterDeviceApi.ListDevices', api_request)
device_collection = protojson.decode_message(
api_messages.DeviceInfoCollection, api_response.body)
self.assertEqual('200 OK', api_response.status)
self.assertEqual(2, len(device_collection.device_infos))
self.assertEqual(['pools_A'], device_collection.device_infos[0].pools)
self.assertEqual(['pools_B'], device_collection.device_infos[1].pools)
def testListDevices_includeHidden(self):
"""Tests ListDevices returns both hidden and non-hidden devices."""
api_request = {'include_hidden': True}
api_response = self.testapp.post_json(
'/_ah/api/ClusterDeviceApi.ListDevices', api_request)
device_collection = protojson.decode_message(
api_messages.DeviceInfoCollection, api_response.body)
self.assertEqual('200 OK', api_response.status)
self.assertEqual(5, len(device_collection.device_infos))
def testListDevices_deviceTypePhysicalAndNull(self):
"""Tests ListDevices returns physical and null devices."""
api_request = {'device_types': ['PHYSICAL', 'NULL']}
api_response = self.testapp.post_json(
'/_ah/api/ClusterDeviceApi.ListDevices', api_request)
device_collection = protojson.decode_message(
api_messages.DeviceInfoCollection, api_response.body)
self.assertEqual('200 OK', api_response.status)
# ListDevices counts non-hidden devices under hidden host.
self.assertEqual(4, len(device_collection.device_infos))
def testListDevices_deviceTypeNull(self):
"""Tests ListDevices returns null devices."""
api_request = {'device_types': ['NULL']}
api_response = self.testapp.post_json(
'/_ah/api/ClusterDeviceApi.ListDevices', api_request)
device_collection = protojson.decode_message(
api_messages.DeviceInfoCollection, api_response.body)
self.assertEqual('200 OK', api_response.status)
# ListDevices counts non-hidden devices under hidden host.
self.assertEqual(2, len(device_collection.device_infos))
for d in device_collection.device_infos:
self.assertEqual(api_messages.DeviceTypeMessage.NULL, d.device_type)
def testListDevices_filterHostGroups(self):
"""Tests ListDevices returns devices filtered by host groups."""
datastore_test_util.CreateDevice(
'cluster_01',
'host_01',
'device_01',
host_group='hg_01')
datastore_test_util.CreateDevice(
'cluster_01',
'host_01',
'device_02',
host_group='hg_02')
datastore_test_util.CreateDevice(
'cluster_01',
'host_01',
'device_03',
host_group='hg_02')
datastore_test_util.CreateDevice(
'cluster_01',
'host_01',
'device_04',
host_group='hg_03')
api_request = {'host_groups': ['hg_01', 'hg_02']}
api_response = self.testapp.post_json(
'/_ah/api/ClusterDeviceApi.ListDevices', api_request)
device_collection = protojson.decode_message(
api_messages.DeviceInfoCollection, api_response.body)
self.assertEqual('200 OK', api_response.status)
self.assertEqual(3, len(device_collection.device_infos))
self.assertEqual('hg_01', device_collection.device_infos[0].host_group)
self.assertEqual('hg_02', device_collection.device_infos[1].host_group)
self.assertEqual('hg_02', device_collection.device_infos[2].host_group)
def testListDevices_withOffset(self):
"""Tests ListDevices returns devices applying a count and offset."""
api_request = {'include_hidden': True, 'count': '2'}
api_response = self.testapp.post_json(
'/_ah/api/ClusterDeviceApi.ListDevices', api_request)
device_collection = protojson.decode_message(
api_messages.DeviceInfoCollection, api_response.body)
self.assertEqual('200 OK', api_response.status)
self.assertEqual(2, len(device_collection.device_infos))
self.assertTrue(device_collection.more)
self.assertIsNotNone(device_collection.next_cursor)
def testListDevices_withCursorAndOffsetAndLastPage(self):
"""Tests ListDevices returns devices applying a count and offset.
This test retrieves the last page which should have less devices than the
specified count.
"""
# 5 devices. Offset of 3 means it should return only 2 when count >= 2
api_request = {'include_hidden': True, 'count': '3'}
api_response = self.testapp.post_json(
'/_ah/api/ClusterDeviceApi.ListDevices', api_request)
device_collection = protojson.decode_message(
api_messages.DeviceInfoCollection, api_response.body)
device_infos = device_collection.device_infos
self.assertEqual('200 OK', api_response.status)
self.assertEqual(3, len(device_infos))
self.assertEqual('device_0', device_infos[0].device_serial)
self.assertEqual('device_1', device_infos[1].device_serial)
self.assertEqual('device_2', device_infos[2].device_serial)
api_request = {
'include_hidden': True,
'count': '3',
'cursor': device_collection.next_cursor
}
api_response = self.testapp.post_json(
'/_ah/api/ClusterDeviceApi.ListDevices', api_request)
device_collection = protojson.decode_message(
api_messages.DeviceInfoCollection, api_response.body)
device_infos = device_collection.device_infos
self.assertEqual('200 OK', api_response.status)
self.assertEqual(2, len(device_collection.device_infos))
self.assertEqual('device_3', device_infos[0].device_serial)
self.assertEqual('device_4', device_infos[1].device_serial)
self.assertFalse(device_collection.more)
def testListDevices_filterRunTargets(self):
"""Tests ListDevices returns devices filtered by run targets."""
datastore_test_util.CreateDevice('cluster_01', 'host_01', 'device_01',
run_target='run_target_01')
datastore_test_util.CreateDevice('cluster_01', 'host_01', 'device_02',
run_target='run_target_02')
datastore_test_util.CreateDevice('cluster_01', 'host_01', 'device_03',
run_target='run_target_03')
api_request = {'run_targets': ['run_target_01', 'run_target_02']}
api_response = self.testapp.post_json(
'/_ah/api/ClusterDeviceApi.ListDevices', api_request)
device_collection = protojson.decode_message(
api_messages.DeviceInfoCollection, api_response.body)
self.assertEqual('200 OK', api_response.status)
self.assertEqual(2, len(device_collection.device_infos))
self.assertEqual('run_target_01',
device_collection.device_infos[0].run_target)
self.assertEqual('run_target_02',
device_collection.device_infos[1].run_target)
def testListDevices_filterState(self):
"""Tests ListDevices returns devices filtered by states."""
datastore_test_util.CreateDevice(
'cluster_01',
'host_01',
'device_01',
state=common.DeviceState.ALLOCATED)
datastore_test_util.CreateDevice(
'cluster_01', 'host_01', 'device_02', state=common.DeviceState.UNKNOWN)
api_request = {
'device_states': [
common.DeviceState.ALLOCATED, common.DeviceState.UNKNOWN
]
}
api_response = self.testapp.post_json(
'/_ah/api/ClusterDeviceApi.ListDevices', api_request)
device_collection = protojson.decode_message(
api_messages.DeviceInfoCollection, api_response.body)
self.assertEqual('200 OK', api_response.status)
self.assertEqual(2, len(device_collection.device_infos))
self.assertEqual(common.DeviceState.ALLOCATED,
device_collection.device_infos[0].state)
self.assertEqual(common.DeviceState.UNKNOWN,
device_collection.device_infos[1].state)
def testListDevices_filterExtraInfo(self):
"""Tests ListDevices returns devices filtered by extra info."""
extra_info_0 = {}
extra_info_0['sim_state'] = 'ready'
extra_info_0['key2'] = 'value2'
datastore_test_util.CreateDevice(
'cluster_01',
'host_01',
'device_01',
state=common.DeviceState.ALLOCATED,
extra_info=extra_info_0)
datastore_test_util.CreateDevice(
'cluster_01', 'host_01', 'device_02', state=common.DeviceState.UNKNOWN)
api_request = {
'flated_extra_info': 'sim_state:ready'
}
api_response = self.testapp.post_json(
'/_ah/api/ClusterDeviceApi.ListDevices', api_request)
device_collection = protojson.decode_message(
api_messages.DeviceInfoCollection, api_response.body)
self.assertEqual('200 OK', api_response.status)
self.assertEqual(1, len(device_collection.device_infos))
self.assertEqual('device_01',
device_collection.device_infos[0].device_serial)
def testBatchGetLatestNotesByDevice(self):
"""Tests ListDevices returns all devices."""
note_entities = [
datastore_entities.Note(
type=common.NoteType.DEVICE_NOTE,
user='user1',
device_serial=self.ndb_device_2.device_serial,
timestamp=datetime.datetime(1987, 10, 19),
message='message_0'),
datastore_entities.Note(
type=common.NoteType.DEVICE_NOTE,
user='user1',
device_serial=self.ndb_device_2.device_serial,
timestamp=datetime.datetime(2020, 3, 12),
message='message_3'),
datastore_entities.Note(
type=common.NoteType.DEVICE_NOTE,
user='user1',
device_serial=self.ndb_device_4.device_serial,
timestamp=datetime.datetime(2001, 9, 17),
message='message_1'),
datastore_entities.Note(
type=common.NoteType.DEVICE_NOTE,
user='user1',
device_serial=self.ndb_device_4.device_serial,
timestamp=datetime.datetime(2008, 10, 15),
message='message_2'),
]
ndb.put_multi(note_entities)
api_request = {
'device_serials': [
self.ndb_device_2.device_serial,
self.ndb_device_4.device_serial,
]
}
api_response = self.testapp.post_json(
'/_ah/api/ClusterDeviceApi.BatchGetLatestNotesByDevice', api_request)
device_note_collection = protojson.decode_message(
api_messages.NoteCollection, api_response.body)
self.assertEqual('200 OK', api_response.status)
self.assertEqual(2, len(device_note_collection.notes))
self.assertEqual('message_3',
device_note_collection.notes[0].message)
self.assertEqual('message_2',
device_note_collection.notes[1].message)
def testBatchGetLatestNotesByDevice_noNotesFound(self):
"""Tests ListDevices returns all devices."""
api_request = {
'device_serials': [
self.ndb_device_2.device_serial,
self.ndb_device_4.device_serial,
]
}
api_response = self.testapp.post_json(
'/_ah/api/ClusterDeviceApi.BatchGetLatestNotesByDevice', api_request)
device_note_collection = protojson.decode_message(
api_messages.NoteCollection, api_response.body)
self.assertEqual('200 OK', api_response.status)
self.assertEqual(0, len(device_note_collection.notes))
def testGetDevice(self):
"""Tests GetDevice without including notes."""
api_request = {'device_serial': self.ndb_device_0.device_serial}
api_response = self.testapp.post_json('/_ah/api/ClusterDeviceApi.GetDevice',
api_request)
device = protojson.decode_message(api_messages.DeviceInfo,
api_response.body)
self.assertEqual('200 OK', api_response.status)
self.assertEqual(self.ndb_device_0.device_serial, device.device_serial)
self.assertEqual(self.ndb_device_0.hostname, device.hostname)
self.assertEqual(self.ndb_device_0.physical_cluster, device.cluster)
self.assertEqual(self.ndb_device_0.battery_level, device.battery_level)
self.assertEqual(self.ndb_device_0.hidden, device.hidden)
self.assertEqual(0, len(device.notes))
def testGetDevice_withHostname(self):
"""Tests GetDevice without including notes."""
api_request = {
'device_serial': self.ndb_device_0.device_serial,
'hostname': self.ndb_device_0.hostname
}
api_response = self.testapp.post_json('/_ah/api/ClusterDeviceApi.GetDevice',
api_request)
device = protojson.decode_message(api_messages.DeviceInfo,
api_response.body)
self.assertEqual('200 OK', api_response.status)
self.assertEqual(self.ndb_device_0.device_serial, device.device_serial)
self.assertEqual(self.ndb_device_0.hostname, device.hostname)
def testGetDevice_notFound(self):
"""Tests GetDevice where it does not exist."""
api_request = {'device_serial': 'fake_device_serial'}
api_response = self.testapp.post_json(
'/_ah/api/ClusterDeviceApi.GetDevice', api_request, expect_errors=True)
self.assertEqual('404 Not Found', api_response.status)
def testGetDevice_includeNotes(self):
| |
<gh_stars>1-10
# Author:柠檬班-木森
# E-mail:<EMAIL>
import copy
import re
import json
import os
from numbers import Number
import requests
import jsonpath
from apin.core.dataParser import DataParser
from apin.core.initEvn import ENV, func_tools, DB
from apin.core.basecase import BaseTestCase
from apin.core.basecase import CaseLog
from requests_toolbelt.multipart import MultipartEncoder
class CaseData:
__attrs = ['url', 'method', 'params', 'data', 'json', 'files', 'headers', 'cookies', 'auth', 'timeout',
'allow_redirects', 'proxies', 'hooks', 'stream', 'verify', 'cert']
def __init__(self, title="", host="", interface="", extract=None, verification=None, url=None, method=None,
params=None, data=None, headers=None, cookies=None, files=None,
auth=None, timeout=None, allow_redirects=True, proxies=None,
hooks=None, stream=None, verify=None, cert=None, json=None, ):
"""Constructs a :class:`Request <Request>`, prepares it and sends it.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query
string for the :class:`Request`.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the
:class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the
:class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the
:class:`Request`.
:param files: (optional) Dictionary of ``'filename': file-like-objects``
for multipart encoding upload.
:param auth: (optional) Auth tuple or callable to enable
Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Set to True by default.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol or protocol and
hostname to the URL of the proxy.
:param stream: (optional) whether to immediately download the response
content. Defaults to ``False``.
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use. Defaults to ``True``.
:param cert: (optional) if String, path to ssl client cert file (.pem).
If Tuple, ('cert', 'key') pair.
:rtype: requests.Response
"""
self.title = title
self.host = host
self.interface = interface
self.verification = verification
self.extract = extract
self.url = url
self.method = method
self.params = params
self.data = data
self.json = json
self.files = files
self.headers = headers
self.cookies = cookies
self.auth = auth
self.timeout = timeout
self.allow_redirects = allow_redirects
self.proxies = proxies
self.hooks = hooks
self.stream = stream
self.verify = verify
self.cert = cert
self.datas = {}
def data_handle(self, test):
"""请求数据处理"""
# ------------如果用例没有设置url,method,headers则获取类属性中的------------------
if not self.host:
host = getattr(test, 'host', None) or ENV.get('host')
if host:
setattr(self, 'host', host)
else:
raise ValueError('用例参数host不能为空')
if not self.interface:
interface = getattr(test, 'interface', None) or ENV.get('interface')
if interface:
setattr(self, 'interface', interface)
else:
raise ValueError('用例参数interface不能为空')
self.url = self.host + self.interface
if not getattr(self, 'method', None):
method = getattr(test, 'method', None) or ENV.get('method')
if method:
setattr(self, 'method', method)
else:
raise ValueError('用例参数method不能为空')
if not getattr(self, 'headers', None):
headers = getattr(test, 'headers', None) or ENV.get('headers')
if headers:
setattr(self, 'headers', headers)
# =======处理文件上传===============
files = self.datas.get('files')
if self.files:
if isinstance(self.files, dict):
file_data = self.files.items()
else:
file_data = self.files
field = []
for name, file_info in file_data:
# 判断是否时文件上传
if len(file_info) == 3 and os.path.isfile(file_info[1]):
field.append([name, (file_info[0], open(file_info[1], 'rb'), file_info[2])])
else:
field.append([name, file_info])
form_data = MultipartEncoder(fields=field)
self.headers["Content-Type"] = form_data.content_type
self.data = form_data
self.files = None
else:
if self.headers.get("Content-Type"):
del self.headers["Content-Type"]
for k, v in self.__dict__.items():
if k in self.__attrs:
self.datas[k] = v
def get(self, attr):
return getattr(self, attr, None)
class Extract:
"""数据提取"""
def json_extract(self, response, ext):
"""jsonpath数据提取"""
value = jsonpath.jsonpath(response.json(), ext)
value = value[0] if value else ''
return value
def re_extract(self, response, ext):
"""正则表达式提取数据提取"""
value = re.search(ext, response.text)
value = value.group(1) if value else ''
return value
class HttpCase(BaseTestCase, Extract, CaseLog):
env = {}
host = None
interface = None
headers = None
method = None
Cases = []
# 测试结果
test_result = []
session = requests.Session()
def perform(self, case):
self.__run_log()
# 前置数据库查询
db_func = self.__get_db_check(case)
if db_func:
self.DBCheck = db_func(self, DB, ENV, self.env)
try:
self.debug_log('执行前置sql语句')
next(self.DBCheck)
except StopIteration:
del self.DBCheck
# 发送http请求
response = self.http_requests(case)
# 数据提取
self.data_extraction(response, case)
self.__run_log()
# 响应断言
self.assert_result(response, case)
# 前置数据库后置查询和校验
self.__assert_db()
def data_extraction(self, response, case):
"""
数据提取
:param response: response对象
:param item: 要提数据的数据,列表嵌套字典
:return:
"""
exts = case.get('extract') or getattr(self, 'extract', None)
if not (isinstance(exts, dict) and exts): return
self.info_log("从响应结果中开始提取数据")
self.extras = []
# 遍历要提取的数据
for name, ext in exts.items():
# 判断提取数据的方式
if len(ext) == 3 and ext[1] == "jsonpath":
value = self.json_extract(response, ext[2])
elif len(ext) == 3 and ext[1] == "re":
value = self.re_extract(response, ext[2])
else:
self.error_log("变量{},的提取表达式 :{}格式不对!".format(name, ext))
self.extras.append((name, ext, '提取失败!'))
break
if ext[0] == 'ENV':
ENV[name] = value
elif ext[0] == 'env':
self.env[name] = value
else:
self.error_log("错误的变量级别,变量提取表达式中的变量级别只能为ENV,或者env".format(ext[1]))
continue
self.extras.append((name, ext, value))
self.info_log("提取变量:{},提取方式【{}】,提取表达式:{},提取值为:{}".format(name, ext[1], ext[2], value))
def http_requests(self, case):
# 发送请求
# 处理请求数据
self.__request_hook(case, self.env, ENV, DB)
case = self.__handle_data(case)
self.info_log('正在发送请求:')
response = Request(case, self).request_api()
self.__response_hook(case, response, self.env, ENV, DB)
self.response = response
return response
def assert_result(self, response, case):
"""断言"""
self.assert_info = []
# 获取断言数据
assert_list = case.get('verification') or getattr(self, 'verification', None)
# 判断是否需要断言
if assert_list and isinstance(assert_list, list):
# 遍历断言的字段
for item in assert_list:
# 判断断言数据的类型
if isinstance(item, list) and len(item) == 3:
self.__verification(response, item)
else:
raise ValueError("断言表达式 {} 格式错误:,\n断言表达式必须为如下格式:[断言方式,预期结果,实际结果]".format(item))
elif assert_list:
raise ValueError("""{}verification字段格式错误
verification字段必须为如下格式:[
[断言方式,预期结果,实际结果]
]""".format(assert_list))
def __verification(self, response, item: list):
self.info_log('断言表达式:{}'.format(item))
# 判断断言的方法
if item[2] == "status_code":
if item[0] == 'eq':
actual = response.status_code
expected = item[1]
self.info_log('断言http响应状态码是否和预期一致')
return self.__assert(self.assertEqual, expected, actual, 'eq')
else:
self.error_log('http状态码,断言方式必须使用eq')
raise ValueError('http状态码,断言方式必须使用eq')
actual = self.__actualDataHandle(response, item[2])
expected = item[1]
expected = DataParser.parser_variable(self.env, expected) if expected else expected
if item[0] == 'eq':
self.__assert(self.assertEqual, expected, actual, 'eq')
elif item[0] == 'contains':
self.__assert(self.assertIn, expected, actual, 'contains')
else:
raise ValueError('断言方法有误!断言方式只支持 eq 和 contains')
def __handle_data(self, case):
"""处理用例数据"""
if isinstance(case, CaseData):
data = case
elif isinstance(case, dict):
data = CaseData()
for k, v in case.items():
setattr(data, k, v)
else:
raise TypeError('用例数据只能为dict类型CaseData类型')
for k, v in data.__dict__.items():
if k not in ["extract", "verification"]:
# 解析数据中的函数
v = DataParser.parser_func(self.env, v)
# 解析数据中的变量
v = DataParser.parser_variable(self.env, v)
setattr(data, k, v)
data.data_handle(self)
return data
def __run_log(self):
"""输出当前环境变量数据的日志"""
self.l_env = ['{}:{}\n'.format(k, repr(v)) for k, v in self.env.items()]
self.g_env = ['{}:{}\n'.format(k, repr(v)) for k, v in ENV.items()]
self.debug_log("全局变量:\n{}".format(''.join(self.g_env)))
self.debug_log("局部变量:\n{}".format(''.join(self.l_env)))
def __actualDataHandle(self, response, act):
"""处理实际结果"""
if isinstance(act, Number):
return act
actual_pattern = r"V{{(.+?)}}"
actual = DataParser.parser_variable(self.env, act) if act else act
if isinstance(actual, dict):
for k, v in actual.items():
res = re.search(actual_pattern, v)
if res:
path = res.group(1)
v1 = self.json_extract(response, path)
v2 = self.re_extract(response, path)
value = v1 if v1 or v1 == 0 else v2
actual[k] = value
elif isinstance(actual, list):
for k, v in enumerate(copy.deepcopy(actual)):
res = re.search(actual_pattern, v)
if res:
path = res.group(1)
v1 = self.json_extract(response, path)
v2 = self.re_extract(response, path)
value = v1 if v1 or v1 == 0 else v2
actual[k] = value
else:
res = re.search(actual_pattern, actual)
if res:
path = res.group(1)
v1 = self.json_extract(response, path)
v2 = self.re_extract(response, path)
value = v1 if v1 or v1 == 0 else v2
actual = value
return actual
def __assert(self, assert_method, expected, actual, method):
"""断言"""
self.info_log("预期结果:{} ".format(expected))
self.info_log("实际结果:{}".format(actual))
try:
assert_method(expected, actual)
except AssertionError as e:
self.assert_info.append((repr(expected), repr(actual), 'fail', method))
self.warning_log('断言未通过')
raise e
else:
self.assert_info.append((repr(expected), repr(actual), 'pass', method))
self.info_log('断言通过!')
def __assert_db(self):
if hasattr(self, 'DBCheck'):
try:
self.debug_log('执行后置sql语句')
next(self.DBCheck)
except StopIteration as e:
assert_list = e.value
if assert_list and isinstance(assert_list, list):
self.info_log('开始数据库校验')
# 遍历断言的字段
for item in assert_list:
# 判断断言数据的类型
if isinstance(item, list) and len(item) == 3:
self.__verification(self.response, item)
else:
raise ValueError("断言表达式 {} 格式错误:,\n断言表达式必须为如下格式:[断言方式,预期结果,实际结果]".format(item))
else:
raise TypeError("""db_check_hook中返回的数据库校验规则,必须为如下格式:[[断言方式,预期结果,实际结果]]""")
def __request_hook(self, case, env, ENV, db):
"""请求钩子函数"""
test = self
request_shell = case.get('request_hook') or self.get('request_hook')
if request_shell:
self.info_log('执行请求前置脚本')
try:
exec(request_shell)
except Exception as e:
self.error_log('请求前置脚本执行错误:\n{}'.format(e))
def __response_hook(self, case, response, env, ENV, db):
test = self
"""响应钩子函数"""
response_shell = case.get('response_hook') or self.get('response_hook')
if response_shell:
self.info_log('执行请求后置脚本')
try:
exec(response_shell)
except Exception as e:
self.error_log('执行请求后置脚本执行错误:\n{}'.format(e))
def __get_db_check(self, case):
hook = self.get('db_check_hook') or case.get('db_check_hook')
if not hook: return
# 执行setup_hook方法
if not isinstance(hook, str):
raise ValueError('db_check_hook只能传递funcTools中定义的函数名')
func = getattr(func_tools, hook)
if not func:
raise ValueError('函数引用错误:\n{}\n中的函数{}未定义!,'.format(func_tools, hook))
return func
@staticmethod
| |
<gh_stars>0
#!/usr/local/bin/python3
# DISCLAIMER:
"""
This script is for demo purposes only which provides customers with programming information regarding the Developer APIs. This script is supplied "AS IS" without any warranties and support.
We assume no responsibility or liability for the use of the script, convey no license or title under any patent or copyright.
We reserve the right to make changes in the script without notification and make no representation or warranty that such application will be suitable for the specified use without further testing or modification.
"""
# HISTORY
"""
28/02/2018 - Initial version.
03/07/2018 - Added CLI tool compatible menus and functions. New feature to activate only based on loadid and version.
By <NAME>
Akamai Solutions Architect
"""
import requests, json, sys, os
from akamai.edgegrid import EdgeGridAuth,EdgeRc
import urllib
import argparse
from urllib.parse import urljoin
class MyArgumentParser(argparse.ArgumentParser):
def error(self, message):
self.print_help(sys.stderr)
self.exit(0, '%s: error: %s\n' % (self.prog, message))
# Function to get the Load Balancing ID version active.
def load_id_version(get_active, loadid):
request_version = session.get(urljoin(baseurl, '/cloudlets/api/v2/origins/currentActivations'))
request_version = json.loads(request_version.text)
try:
version = str(request_version[loadid][get_active]['version'])
except Exception as exc:
version = 'null'
print('ERROR: load balancing ID or active version(s) not found')
pass
return(version)
# Function to get the Load Balancing ID in Prod.
def get_load_version(version, loadid, verbose):
request_balancing = session.get(urljoin(baseurl, '/cloudlets/api/v2/origins/' + loadid + '/versions/' + version + '?validate=false'))
request_balancing = json.loads(request_balancing.text)
if verbose:
print('DEBUG: API Endpoint:', urljoin(baseurl, '/cloudlets/api/v2/origins/' + loadid + '/versions/' + version + '?validate=false'))
print('DEBUG:',request_balancing,'\n')
return(request_balancing)
# In the json response the data centers are under the same key, so this function indexes them in order to call them later on individually. This allows the user's data centers to ve provided in disorder and not necessarily to provide all of them.
def create_dc_index(balancing):
dc_index = {}
n = 0
for entry in balancing['dataCenters']:
# Building the index dictionary. Example: {'Alpharetta': 0, 'Dallas': 1, 'Fairfield': 2}
dc_index[entry['originId']] = n
n = n +1
return(dc_index)
# Modify the json response stored in 'balancing' with the new % values for the data centers
def modify_datacenters(balancing, dc_index, my_datacenters):
#my_datacenters = {'Dallas': 10.0, 'Fairfield': 30.0, 'Alpharetta': 60.0}
# Use the DC name and % value from the user's input
try:
for dc_name in my_datacenters.keys():
dc_value = my_datacenters[dc_name]
for entry in balancing['dataCenters']:
# Look for the index number for the user's DC and use it to modify the correct DC in the 'balancing' variable which has the original json response
dc_number = dc_index[dc_name]
if entry['originId'] == dc_name:
balancing['dataCenters'][dc_number]['percent'] = dc_value
print('INFO: DC', dc_name, 'Found and New weight set to', dc_value)
except Exception as exc:
print('ERROR: data center', dc_name, 'not found')
balancing = 'dc_not_found'
return(balancing)
percentage_status = verify_percentage(balancing)
if percentage_status is False:
balancing = 'wrong_weights'
return(balancing)
# Check the weights sum is always 100.0. This check is executed at this point once the request has been constructed with the user's weights because the script allows not all of the DCs to be specified.
def verify_percentage(balancing):
weight_value = 0
for dc_entry in balancing['dataCenters']:
weight_value = weight_value + dc_entry['percent']
if weight_value != 100.0:
print('ERROR: weights sum =', weight_value)
return(False)
return(True)
# This function can be used to further manipulate any parameters in the json object 'balancing' before sending the POST request to create a new version. For now only the description will be modified.
def update_params_balancing(balancing, version):
balancing['description'] = 'INFO: cloned from version ' + version
print(balancing['description'])
return(balancing)
# Push the new weights to the new load id version
def push_change(balancing, loadid, verbose):
print('INFO: uploading new version')
headers = {'content-type': 'application/json'}
# Convert the json object to a string that the API can interpret
balancing = json.dumps(balancing)
request_new = session.post(urljoin(baseurl, '/cloudlets/api/v2/origins/' + loadid + '/versions'), data=balancing, headers=headers)
# Pretty print
#print(json.dumps(request_new.json(), indent=4, sort_keys=True))
request_new = json.loads(request_new.text)
if verbose:
print('\nDEBUG: API Endpoint:', urljoin(baseurl, '/cloudlets/api/v2/origins/' + loadid + '/versions'))
print('DEBUG:',request_new,'\n')
new_version = request_new['version']
return(new_version)
# Activate load balancing ID
def activate_load_id(data, loadid, verbose):
print('INFO: activating the new load balancing version')
headers = {'content-type': 'application/json'}
# Convert the json object to a string that the API can interpret
version = data['version']
data = json.dumps(data)
request_activation = session.post(urljoin(baseurl, '/cloudlets/api/v2/origins/' + loadid + '/activations'), data=data, headers=headers)
if request_activation.status_code == 200:
# Pretty print
#print(json.dumps(request_activation.json(), indent=4, sort_keys=True))
request_activation = json.loads(request_activation.text)
if verbose:
print('\nDEBUG: API Endpoint:', urljoin(baseurl, '/cloudlets/api/v2/origins/' + loadid + '/activations'))
print('DEBUG: ',request_activation,'\n')
return()
else:
print('ERROR: load balancing ID', loadid, 'or version', str(version), 'not found')
return()
# Get the policyId, property names and production version
def get_single_policy_associations(policyName, cloudletId):
response = session.get(urljoin(baseurl, '/cloudlets/api/v2/policies?cloudletId=' + cloudletId))
response = json.loads(response.text)
list_of_properties = []
policyId=False
for key in response:
if key['name'] == policyName:
policyId = str(key['policyId'])
version = str(key['activations'][0]['policyInfo']['version'])
for activations in key['activations']:
list_of_properties.append(activations['propertyInfo']['name'])
if policyId is False:
print('Policy Not Found')
exit()
else:
return(policyId, version, set(list_of_properties))
# Similar to the previous function, except this is global for all policyNames.
def get_all_policy_associations(cloudletId):
response = session.get(urljoin(baseurl, '/cloudlets/api/v2/policies?cloudletId=' + cloudletId))
response = json.loads(response.text)
list_of_properties = []
policyId=False
for key in response:
policyName = key['name']
policyId = str(key['policyId'])
try:
version = str(key['activations'][0]['policyInfo']['version'])
except Exception as exc:
version = 'null'
pass
for activations in key['activations']:
list_of_properties.append(activations['propertyInfo']['name'])
# Start building the dictionary: {PolicyName: [policyId, version, {properties}, {load balancing ids}]}
d.setdefault(policyName, []).append(policyId)
d.setdefault(policyName, []).append(version)
d.setdefault(policyName, []).append(list(set(list_of_properties)))
list_of_properties = []
if policyId is False:
print('Policy Not Found')
exit()
else:
return()
# Get the associated balancing Ids to a policyId/policyName
def get_associated_balancing_ids(policyId, version):
response = session.get(urljoin(baseurl, '/cloudlets/api/v2/policies/' + policyId + '/versions/' + version))
response = json.loads(response.text)
list_of_balancing_ids = []
if response['matchRules'] is not None:
for rules in response['matchRules']:
list_of_balancing_ids.append(rules['forwardSettings']['originId'])
return(list_of_balancing_ids)
else:
return()
# Get all the associated balancing Ids using the partially built dictionary {PolicyName: [policyId, version, {properties}]}
def get_all_associated_balancing_ids():
list_of_balancing_ids = []
for policyName, content in d.items():
policyId = content[0]
version = content[1]
# Check for policies that have no versions active and set those to null.
if version == 'null':
d.setdefault(policyName, []).append('null')
else:
list_of_balancing_ids = get_associated_balancing_ids(policyId, version)
d.setdefault(policyName, []).append(list(set(list_of_balancing_ids)))
return()
# Get the origins associated to the Load Balancing ID.
def get_associated_origins(loadbalancing_name):
get_active = 'STAGING'
list_of_origins = []
version = load_id_version(get_active, loadbalancing_name)
if version != 'null':
response = session.get(urljoin(baseurl, '/cloudlets/api/v2/origins/' + loadbalancing_name + '/versions/' + version + '?validate=false'))
response = json.loads(response.text)
for dataCenter in response['dataCenters']:
list_of_origins.append(dataCenter['originId'])
return(list_of_origins)
else:
print('Policy Not Found')
# Print the properties, policies and load balancing Ids in a tree view.
def search_results_print(properties, policy, loadbalancing_ids):
print('INFO: this is a tree view of the properties, policies, load IDs and origins associations')
print('|--- Property\n |--------- Policy\n |------------ Load ID\n |------------ Origin\n')
for property_name in properties:
print('|---',property_name)
print(' |---------',policy)
for loadbalancing_name in loadbalancing_ids:
print(' |------------',loadbalancing_name)
origin_ids = get_associated_origins(loadbalancing_name)
for origin_name in origin_ids:
print(' |------------',origin_name)
return()
# Initialization of section and edgerc.
def init_config(edgerc_file, section):
global baseurl, session
# Check if the edgerc_file variable or the AKAMAI_EDGERC env var exist then use a default value if they don't exist.
if not edgerc_file:
if not os.getenv("AKAMAI_EDGERC"):
edgerc_file = os.path.join(os.path.expanduser("~"), '.edgerc')
else:
edgerc_file = os.getenv("AKAMAI_EDGERC")
if not os.access(edgerc_file, os.R_OK):
print("Unable to read edgerc file \"%s\"" % edgerc_file)
exit(1)
if not section:
if not os.getenv("AKAMAI_EDGERC_SECTION"):
section = "cloudlets"
else:
section = os.getenv("AKAMAI_EDGERC_SECTION")
try:
edgerc = EdgeRc(edgerc_file)
baseurl = 'https://%s' % edgerc.get(section, 'host')
session = requests.Session()
session.auth = EdgeGridAuth.from_edgerc(edgerc, section)
return(baseurl, session)
except configparser.NoSectionError:
print("Edgerc section \"%s\" not found" % section)
exit(1)
except Exception:
print("Unknown error occurred trying to read edgerc file (%s)" % edgerc_file)
exit(1)
# Main function
def main():
global args
parser = MyArgumentParser(
description='Application Load Balancer Cloudlet Weitghts Updater', add_help=False
)
parser.add_argument('--version', action='version', version='ALB Cloudlet Weights Updater v2.0')
subparsers = parser.add_subparsers(title='Commands', dest='command', metavar="")
create_parser = subparsers.add_parser('help', help='Show available help').add_argument('args', metavar="", nargs=argparse.REMAINDER)
parser_update = subparsers.add_parser('update', help='Modify target rule', add_help=False)
parser_activate = subparsers.add_parser('activate', help='Activate a Load Balancing ID to Staging or Production', add_help=False)
parser_search = subparsers.add_parser('search', help='Search for policies and load balancing IDs', add_help=False)
mandatory_up = parser_update.add_argument_group('required arguments')
mandatory_up.add_argument('--loadid', required=True, help='Load Balancing ID name')
mandatory_up.add_argument('--datacenters', required=True, help='Data Center Name and Percentge Value, Example: \'DC1:20,DC2:35,DC3:45\'')
optional_up = parser_update.add_argument_group('optional arguments')
optional_up.add_argument('--stage', action='store_true', help='Work based on the last active verion in staging. By default the script works on the last version activated in production')
optional_up.add_argument('--activate', choices={'STAGING', 'PRODUCTION'}, help='Activate the policy to the specified network. Default is PRODUCTION')
optional_up.add_argument('--edgerc', | |
word[1] == "j" :
toGuess = toGuess[:1] + "j" + toGuess[2:]
if word[2] == "J" or word[2] == "j" :
toGuess = toGuess[:2] + "j" + toGuess[3:]
if word[3] == "J" or word[3] == "j" :
toGuess = toGuess[:3] + "j" + toGuess[4:]
if word[4] == "J" or word[4] == "j" :
toGuess = toGuess[:4] + "j" + toGuess[5:]
if word[5] == "J" or word[5] == "j" :
toGuess = toGuess[:5] + "j" + toGuess[6:]
if word[1] != "J" and word[1] != "j" and word[2] != "J" and word[2] != "j" and word[3] != "J" and word[3] != "j" and word[4] != "J" and word[4] != "j" and word[5] != "J" and word[5] != "j" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "j" + ", "
if guessChar == "K" or guessChar == "k" :
if word[1] == "K" or word[1] == "k" :
toGuess = toGuess[:1] + "k" + toGuess[2:]
if word[2] == "K" or word[2] == "k" :
toGuess = toGuess[:2] + "k" + toGuess[3:]
if word[3] == "K" or word[3] == "k" :
toGuess = toGuess[:3] + "k" + toGuess[4:]
if word[4] == "K" or word[4] == "k" :
toGuess = toGuess[:4] + "k" + toGuess[5:]
if word[5] == "K" or word[5] == "k" :
toGuess = toGuess[:5] + "k" + toGuess[6:]
if word[1] != "K" and word[1] != "k" and word[2] != "K" and word[2] != "k" and word[3] != "K" and word[3] != "k" and word[4] != "K" and word[4] != "k" and word[5] != "K" and word[5] != "k" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "k" + ", "
if guessChar == "L" or guessChar == "l" :
if word[1] == "L" or word[1] == "l" :
toGuess = toGuess[:1] + "l" + toGuess[2:]
if word[2] == "L" or word[2] == "l" :
toGuess = toGuess[:2] + "l" + toGuess[3:]
if word[3] == "L" or word[3] == "l" :
toGuess = toGuess[:3] + "l" + toGuess[4:]
if word[4] == "L" or word[4] == "l" :
toGuess = toGuess[:4] + "l" + toGuess[5:]
if word[5] == "L" or word[5] == "l" :
toGuess = toGuess[:5] + "l" + toGuess[6:]
if word[1] != "L" and word[1] != "l" and word[2] != "L" and word[2] != "l" and word[3] != "L" and word[3] != "l" and word[4] != "L" and word[4] != "l" and word[5] != "L" and word[5] != "l" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "l" + ", "
if guessChar == "M" or guessChar == "m" :
if word[1] == "M" or word[1] == "m" :
toGuess = toGuess[:1] + "m" + toGuess[2:]
if word[2] == "M" or word[2] == "m" :
toGuess = toGuess[:2] + "m" + toGuess[3:]
if word[3] == "M" or word[3] == "m" :
toGuess = toGuess[:3] + "m" + toGuess[4:]
if word[4] == "M" or word[4] == "m" :
toGuess = toGuess[:4] + "m" + toGuess[5:]
if word[5] == "M" or word[5] == "m" :
toGuess = toGuess[:5] + "m" + toGuess[6:]
if word[1] != "M" and word[1] != "m" and word[2] != "M" and word[2] != "m" and word[3] != "M" and word[3] != "m" and word[4] != "M" and word[4] != "m" and word[5] != "M" and word[5] != "m" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "m" + ", "
if guessChar == "N" or guessChar == "n" :
if word[1] == "N" or word[1] == "n" :
toGuess = toGuess[:1] + "n" + toGuess[2:]
if word[2] == "N" or word[2] == "n" :
toGuess = toGuess[:2] + "n" + toGuess[3:]
if word[3] == "N" or word[3] == "n" :
toGuess = toGuess[:3] + "n" + toGuess[4:]
if word[4] == "N" or word[4] == "n" :
toGuess = toGuess[:4] + "n" + toGuess[5:]
if word[5] == "N" or word[5] == "n" :
toGuess = toGuess[:5] + "n" + toGuess[6:]
if word[1] != "N" and word[1] != "n" and word[2] != "N" and word[2] != "n" and word[3] != "N" and word[3] != "n" and word[4] != "N" and word[4] != "n" and word[5] != "N" and word[5] != "n" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "n" + ", "
if guessChar == "O" or guessChar == "o" :
if word[1] == "O" or word[1] == "o" :
toGuess = toGuess[:1] + "o" + toGuess[2:]
if word[2] == "O" or word[2] == "o" :
toGuess = toGuess[:2] + "o" + toGuess[3:]
if word[3] == "O" or word[3] == "o" :
toGuess = toGuess[:3] + "o" + toGuess[4:]
if word[4] == "O" or word[4] == "o" :
toGuess = toGuess[:4] + "o" + toGuess[5:]
if word[5] == "O" or word[5] == "o" :
toGuess = toGuess[:5] + "o" + toGuess[6:]
if word[1] != "O" and word[1] != "o" and word[2] != "O" and word[2] != "o" and word[3] != "O" and word[3] != "o" and word[4] != "O" and word[4] != "o" and word[5] != "O" and word[5] != "o" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "o" + ", "
if guessChar == "P" or guessChar == "p" :
if word[1] == "P" or word[1] == "p" :
toGuess = toGuess[:1] + "p" + toGuess[2:]
if word[2] == "P" or word[2] == "p" :
toGuess = toGuess[:2] + "p" + toGuess[3:]
if word[3] == "P" or word[3] == "p" :
toGuess = toGuess[:3] + "p" + toGuess[4:]
if word[4] == "P" or word[4] == "p" :
toGuess = toGuess[:4] + "p" + toGuess[5:]
if word[5] == "P" or word[5] == "p" :
toGuess = toGuess[:5] + "p" + toGuess[6:]
if word[1] != "P" and word[1] != "p" and word[2] != "P" and word[2] != "p" and word[3] != "P" and word[3] != "p" and word[4] != "P" and word[4] != "p" and word[5] != "P" and word[5] != "p" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "p" + ", "
if guessChar == "Q" or guessChar == "q" :
if word[1] == "Q" or word[1] == "q" :
toGuess = toGuess[:1] + "q" + toGuess[2:]
if word[2] == "Q" or word[2] == "q" :
toGuess = toGuess[:2] + "q" + toGuess[3:]
if word[3] == "Q" or word[3] == "q" :
toGuess = toGuess[:3] + "q" + toGuess[4:]
if word[4] == "Q" or word[4] == "q" :
toGuess = toGuess[:4] + "q" + toGuess[5:]
if word[5] == "Q" or word[5] == "q" :
toGuess = toGuess[:5] + "q" + toGuess[6:]
if word[1] != "Q" and word[1] != "q" and word[2] != "Q" and word[2] != "q" and word[3] != "Q" and word[3] != "q" and word[4] != "Q" and word[4] != "q" and word[5] != "Q" and word[5] != "q" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "q" + ", "
if guessChar == "R" or guessChar == "r" :
if word[1] == "R" or word[1] == "r" :
toGuess = toGuess[:1] + "r" + toGuess[2:]
if word[2] == "R" or word[2] == "r" :
toGuess = toGuess[:2] + "r" + toGuess[3:]
if word[3] == "R" or word[3] == "r" :
toGuess = toGuess[:3] + "r" + toGuess[4:]
if word[4] == "R" or word[4] == "r" :
toGuess = toGuess[:4] + "r" + toGuess[5:]
if word[5] == "R" or word[5] == "r" :
toGuess = toGuess[:5] + "r" + toGuess[6:]
if word[1] != "R" and word[1] != "r" and word[2] != "R" and word[2] != "r" and word[3] != "R" and word[3] != "r" and word[4] != "R" and word[4] != "r" and word[5] != "R" and word[5] != "r" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "r" + ", "
if guessChar == "S" or guessChar == "s" :
if word[1] == "S" or word[1] == "s" :
toGuess = toGuess[:1] + "s" + toGuess[2:]
if word[2] == "S" or word[2] == "s" :
toGuess = toGuess[:2] + "s" + toGuess[3:]
if word[3] == "S" or word[3] == "s" :
toGuess = toGuess[:3] + "s" + toGuess[4:]
if word[4] == "S" or word[4] == "s" :
toGuess = toGuess[:4] + "s" + toGuess[5:]
if word[5] == "S" or word[5] == "s" :
toGuess = toGuess[:5] + "s" + toGuess[6:]
if word[1] != "S" and word[1] != "s" and word[2] != "S" and word[2] != "s" and word[3] != "S" and word[3] != "s" and word[4] != "S" and word[4] != "s" and word[5] != "S" and word[5] != "s" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "s" + ", "
if guessChar == | |
Hera': ['Hera Boss'],
'Agahnims Tower': ['Tower Agahnim 1'],
'Palace of Darkness': ['PoD Boss'],
'Swamp Palace': ['Swamp Boss'],
'Skull Woods': ['Skull 1 Lobby', 'Skull 2 East Lobby', 'Skull 2 West Lobby', 'Skull Boss'],
'Thieves Town': ['Thieves Boss', ('Thieves Blind\'s Cell', 'Thieves Boss')],
'Ice Palace': ['Ice Boss'],
'Misery Mire': ['Mire Boss'],
'Turtle Rock': ['TR Main Lobby', 'TR Lazy Eyes', 'TR Big Chest Entrance', 'TR Eye Bridge', 'TR Boss'],
'Ganons Tower': ['GT Agahnim 2']
}
if world.mode[player] == 'standard':
paths['Hyrule Castle'].append('Hyrule Dungeon Cellblock')
# noinspection PyTypeChecker
paths['Hyrule Castle'].append(('Hyrule Dungeon Cellblock', 'Sanctuary'))
if world.doorShuffle[player] in ['basic']:
paths['Thieves Town'].append('Thieves Attic Window')
return paths
def overworld_prep(world, player):
find_inaccessible_regions(world, player)
add_inaccessible_doors(world, player)
def find_inaccessible_regions(world, player):
world.inaccessible_regions[player] = []
if world.mode[player] != 'inverted':
start_regions = ['Links House', 'Sanctuary']
else:
start_regions = ['Inverted Links House', 'Inverted Dark Sanctuary']
regs = convert_regions(start_regions, world, player)
all_regions = set([r for r in world.regions if r.player == player and r.type is not RegionType.Dungeon])
visited_regions = set()
queue = deque(regs)
while len(queue) > 0:
next_region = queue.popleft()
visited_regions.add(next_region)
if next_region.name == 'Inverted Dark Sanctuary': # special spawn point in cave
for ent in next_region.entrances:
parent = ent.parent_region
if parent and parent.type is not RegionType.Dungeon and parent not in queue and parent not in visited_regions:
queue.append(parent)
for ext in next_region.exits:
connect = ext.connected_region
if connect and connect.type is not RegionType.Dungeon and connect not in queue and connect not in visited_regions:
queue.append(connect)
world.inaccessible_regions[player].extend([r.name for r in all_regions.difference(visited_regions) if valid_inaccessible_region(r)])
if world.mode[player] == 'standard':
world.inaccessible_regions[player].append('Hyrule Castle Ledge')
world.inaccessible_regions[player].append('Sewer Drop')
logger = logging.getLogger('')
logger.debug('Inaccessible Regions:')
for r in world.inaccessible_regions[player]:
logger.debug('%s', r)
def valid_inaccessible_region(r):
return r.type is not RegionType.Cave or (len(r.exits) > 0 and r.name not in ['Links House', '<NAME> Room'])
def add_inaccessible_doors(world, player):
# todo: ignore standard mode hyrule castle ledge?
for inaccessible_region in world.inaccessible_regions[player]:
region = world.get_region(inaccessible_region, player)
for ext in region.exits:
create_door(world, player, ext.name, region.name)
def create_door(world, player, entName, region_name):
entrance = world.get_entrance(entName, player)
connect = entrance.connected_region
for ext in connect.exits:
if ext.connected_region is not None and ext.connected_region.name == region_name:
d = Door(player, ext.name, DoorType.Logical, ext),
world.doors += d
connect_door_only(world, ext.name, ext.connected_region, player)
d = Door(player, entName, DoorType.Logical, entrance),
world.doors += d
connect_door_only(world, entName, connect, player)
def check_required_paths(paths, world, player):
for dungeon_name in paths.keys():
builder = world.dungeon_layouts[player][dungeon_name]
if len(paths[dungeon_name]) > 0:
states_to_explore = defaultdict(list)
for path in paths[dungeon_name]:
if type(path) is tuple:
states_to_explore[tuple([path[0]])].append(path[1])
else:
states_to_explore[tuple(builder.path_entrances)].append(path)
cached_initial_state = None
for start_regs, dest_regs in states_to_explore.items():
check_paths = convert_regions(dest_regs, world, player)
start_regions = convert_regions(start_regs, world, player)
initial = start_regs == tuple(builder.path_entrances)
if not initial or cached_initial_state is None:
init = determine_init_crystal(initial, cached_initial_state, start_regions)
state = ExplorationState(init, dungeon_name)
for region in start_regions:
state.visit_region(region)
state.add_all_doors_check_unattached(region, world, player)
explore_state(state, world, player)
if initial and cached_initial_state is None:
cached_initial_state = state
else:
state = cached_initial_state
valid, bad_region = check_if_regions_visited(state, check_paths)
if not valid:
if check_for_pinball_fix(state, bad_region, world, player):
explore_state(state, world, player)
valid, bad_region = check_if_regions_visited(state, check_paths)
if not valid:
raise Exception('%s cannot reach %s' % (dungeon_name, bad_region.name))
def determine_init_crystal(initial, state, start_regions):
if initial:
return CrystalBarrier.Orange
if state is None:
raise Exception('Please start path checking from the entrances')
if len(start_regions) > 1:
raise NotImplementedError('Path checking for multiple start regions (not the entrances) not implemented, use more paths instead')
start_region = start_regions[0]
if start_region in state.visited_blue and start_region in state.visited_orange:
return CrystalBarrier.Either
elif start_region in state.visited_blue:
return CrystalBarrier.Blue
elif start_region in state.visited_orange:
return CrystalBarrier.Orange
else:
raise Exception('Can\'t get to %s from initial state', start_region.name)
def explore_state(state, world, player):
while len(state.avail_doors) > 0:
door = state.next_avail_door().door
connect_region = world.get_entrance(door.name, player).connected_region
if state.can_traverse(door) and not state.visited(connect_region) and valid_region_to_explore(connect_region, world, player):
state.visit_region(connect_region)
state.add_all_doors_check_unattached(connect_region, world, player)
def check_if_regions_visited(state, check_paths):
valid = True
breaking_region = None
for region_target in check_paths:
if not state.visited_at_all(region_target):
valid = False
breaking_region = region_target
break
return valid, breaking_region
def check_for_pinball_fix(state, bad_region, world, player):
pinball_region = world.get_region('Skull Pinball', player)
if bad_region.name == 'Skull 2 West Lobby' and state.visited_at_all(pinball_region): #revisit this for entrance shuffle
door = world.get_door('Skull Pinball WS', player)
room = world.get_room(door.roomIndex, player)
if room.doorList[door.doorListPos][1] == DoorKind.Trap:
room.change(door.doorListPos, DoorKind.Normal)
door.trapFlag = 0x0
door.blocked = False
connect_two_way(world, door.name, door.dest.name, player)
state.add_all_doors_check_unattached(pinball_region, world, player)
return True
return False
@unique
class DROptions(Flag):
NoOptions = 0x00
Eternal_Mini_Bosses = 0x01 # If on, GT minibosses marked as defeated when they try to spawn a heart
Town_Portal = 0x02 # If on, Players will start with mirror scroll
Open_Desert_Wall = 0x80 # If on, pre opens the desert wall, no fire required
# DATA GOES DOWN HERE
logical_connections = [
('Hyrule Dungeon North Abyss Catwalk Dropdown', 'Hyrule Dungeon North Abyss'),
('Sewers Secret Room Push Block', 'Sewers Secret Room Blocked Path'),
('Eastern Hint Tile Push Block', 'Eastern Hint Tile'),
('Eastern Map Balcony Hook Path', 'Eastern Map Room'),
('Eastern Map Room Drop Down', 'Eastern Map Balcony'),
('Desert Main Lobby Left Path', 'Desert Left Alcove'),
('Desert Main Lobby Right Path', 'Desert Right Alcove'),
('Desert Left Alcove Path', 'Desert Main Lobby'),
('Desert Right Alcove Path', 'Desert Main Lobby'),
('Hera Big Chest Landing Exit', 'Hera 4F'),
('PoD Pit Room Block Path N', 'PoD Pit Room Blocked'),
('PoD Pit Room Block Path S', 'PoD Pit Room'),
('PoD Arena Bonk Path', 'PoD Arena Bridge'),
('PoD Arena Main Crystal Path', 'PoD Arena Crystal'),
('PoD Arena Crystal Path', 'PoD Arena Main'),
('PoD Arena Main Orange Barrier', 'PoD Arena North'),
('PoD Arena North Drop Down', 'PoD Arena Main'),
('PoD Arena Bridge Drop Down', 'PoD Arena Main'),
('PoD Map Balcony Drop Down', 'PoD Sexy Statue'),
('PoD Basement Ledge Drop Down', 'PoD Stalfos Basement'),
('PoD Falling Bridge Path N', 'PoD Falling Bridge Ledge'),
('PoD Falling Bridge Path S', 'PoD Falling Bridge'),
('Swamp Lobby Moat', 'Swamp Entrance'),
('Swamp Entrance Moat', 'Swamp Lobby'),
('Swamp Trench 1 Approach Dry', 'Swamp Trench 1 Nexus'),
('Swamp Trench 1 Approach Key', 'Swamp Trench 1 Key Ledge'),
('Swamp Trench 1 Approach Swim Depart', 'Swamp Trench 1 Departure'),
('Swamp Trench 1 Nexus Approach', 'Swamp Trench 1 Approach'),
('Swamp Trench 1 Nexus Key', 'Swamp Trench 1 Key Ledge'),
('Swamp Trench 1 Key Ledge Dry', 'Swamp Trench 1 Nexus'),
('Swamp Trench 1 Key Approach', 'Swamp Trench 1 Approach'),
('Swamp Trench 1 Key Ledge Depart', 'Swamp Trench 1 Departure'),
('Swamp Trench 1 Departure Dry', 'Swamp Trench 1 Nexus'),
('Swamp Trench 1 Departure Approach', 'Swamp Trench 1 Approach'),
('Swamp Trench 1 Departure Key', 'Swamp Trench 1 Key Ledge'),
('Swamp Hub Hook Path', 'Swamp Hub North Ledge'),
('Swamp Hub North Ledge Drop Down', 'Swamp Hub'),
('Swamp Compass Donut Push Block', 'Swamp Donut Top'),
('Swamp Shortcut Blue Barrier', 'Swamp Trench 2 Pots'),
('Swamp Trench 2 Pots Blue Barrier', 'Swamp Shortcut'),
('Swamp Trench 2 Pots Dry', 'Swamp Trench 2 Blocks'),
('Swamp Trench 2 Pots Wet', 'Swamp Trench 2 Departure'),
('Swamp Trench 2 Blocks Pots', 'Swamp Trench 2 Pots'),
('Swamp Trench 2 Departure Wet', 'Swamp Trench 2 Pots'),
('Swamp West Shallows Push Blocks', 'Swamp West Block Path'),
('Swamp West Block Path Drop Down', 'Swamp West Shallows'),
('Swamp West Ledge Drop Down', 'Swamp West Shallows'),
('Swamp West Ledge Hook Path', 'Swamp Barrier Ledge'),
('Swamp Barrier Ledge Drop Down', 'Swamp West Shallows'),
('Swamp Barrier Ledge - Orange', 'Swamp Barrier'),
('Swamp Barrier - Orange', 'Swamp Barrier Ledge'),
('Swamp Barrier Ledge Hook Path', 'Swamp West Ledge'),
('Swamp Drain Right Switch', 'Swamp Drain Left'),
('Swamp Flooded Spot Ladder', 'Swamp Flooded Room'),
('Swamp Flooded Room Ladder', 'Swamp Flooded Spot'),
('Skull Pot Circle Star Path', 'Skull Map Room'),
('Skull Big Chest Hookpath', 'Skull 1 Lobby'),
('Skull Back Drop Star Path', 'Skull Small Hall'),
('Thieves Rail Ledge Drop Down', 'Thieves BK Corner'),
('Thieves Hellway Orange Barrier', 'Thieves Hellway S Crystal'),
('Thieves Hellway Crystal Orange Barrier', 'Thieves Hellway'),
('Thieves Hellway Blue Barrier', 'Thieves Hellway N Crystal'),
('Thieves Hellway Crystal Blue Barrier', 'Thieves Hellway'),
('Thieves Basement Block Path', 'Thieves Blocked Entry'),
('Thieves Blocked Entry Path', 'Thieves Basement Block'),
('Thieves Conveyor Bridge Block Path', 'Thieves Conveyor Block'),
('Thieves Conveyor Block Path', 'Thieves Conveyor Bridge'),
('Ice Cross Bottom Push Block Left', 'Ice Floor Switch'),
('Ice Cross Right Push Block Top', 'Ice Bomb Drop'),
('Ice Big Key Push Block', 'Ice Dead End'),
('Ice Bomb Jump Ledge Orange | |
= property(fget=lambda self: self._get_header_byte_range()[0])
def _populatePrco(self):
"Populate the package object with the needed PRCO interface."
tag2prco = { "OBSOLETE": share_data("obsoletes"),
"CONFLICT": share_data("conflicts"),
"REQUIRE": share_data("requires"),
"PROVIDE": share_data("provides") }
for tag in tag2prco:
name = decode_value( self.hdr[getattr(rpm, 'RPMTAG_%sNAME' % tag)] )
name = list(map(share_data, name))
if not name: # empty or none or whatever, doesn't matter
continue
lst = decode_value( self.hdr[getattr(rpm, 'RPMTAG_%sFLAGS' % tag)] )
flag = list(map(flagToString, lst))
flag = list(map(share_data, flag))
lst = decode_value( self.hdr[getattr(rpm, 'RPMTAG_%sVERSION' % tag)] )
vers = list(map(stringToVersion, lst))
vers = [(share_data(x[0]), share_data(x[1]), share_data(x[2])) for x in vers]
prcotype = tag2prco[tag]
self.prco[prcotype] = list(map(share_data, list(zip(name, flag, vers))))
def inPrcoRange(self, prcotype, reqtuple):
"""returns true if the package has a the prco that satisfies
the reqtuple range, assume false.
Takes: prcotype, requested prco tuple"""
return bool(self.matchingPrcos(prcotype, reqtuple))
def checkPrco(self, prcotype, prcotuple):
"""returns 1 or 0 if the pkg contains the requested tuple/tuple range"""
# get rid of simple cases - nothing
if prcotype not in self.prco:
return 0
# First try and exact match, then search
# Make it faster, if it's "big".
if len(self.prco[prcotype]) <= 8:
if prcotuple in self.prco[prcotype]:
return 1
else:
if not hasattr(self, '_prco_lookup'):
self._prco_lookup = {'obsoletes': None, 'conflicts': None,
'requires': None, 'provides': None}
if self._prco_lookup[prcotype] is None:
self._prco_lookup[prcotype] = set(self.prco[prcotype])
if prcotuple in self._prco_lookup[prcotype]:
return 1
# make us look it up and compare
(reqn, reqf, (reqe, reqv, reqr)) = prcotuple
if reqf is not None:
return self.inPrcoRange(prcotype, prcotuple)
else:
for (n, f, (e, v, r)) in self.returnPrco(prcotype):
if reqn.encode() == n.encode():
return 1
return 0
def returnPrco(self, prcotype, printable=False):
"""return list of provides, requires, conflicts or obsoletes"""
if not self._prcoPopulated:
self._populatePrco()
self._prcoPopulated = True
return self.prco.get(prcotype, [])
def returnPrcoNames(self, prcotype):
if not hasattr(self, '_cache_prco_names_' + prcotype):
data = [n for (n, f, v) in self.returnPrco(prcotype)]
setattr(self, '_cache_prco_names_' + prcotype, data)
return getattr(self, '_cache_prco_names_' + prcotype)
requires = property(fget=lambda self: self.returnPrco('requires'))
provides = property(fget=lambda self: self.returnPrco('provides'))
obsoletes = property(fget=lambda self: self.returnPrco('obsoletes'))
conflicts = property(fget=lambda self: self.returnPrco('conflicts'))
provides_names = property(fget=lambda self: self.returnPrcoNames('provides'))
requires_names = property(fget=lambda self: self.returnPrcoNames('requires'))
conflicts_names = property(fget=lambda self: self.returnPrcoNames('conflicts'))
obsoletes_names = property(fget=lambda self: self.returnPrcoNames('obsoletes'))
def _loadFiles(self):
files = decode_value( self.hdr['filenames'] )
fileflags = decode_value( self.hdr['fileflags'] )
filemodes = decode_value( self.hdr['filemodes'] )
filetuple = list(zip(files, filemodes, fileflags))
if not self._loadedfiles:
for (fn, mode, flag) in filetuple:
# garbage checks
if mode is None or mode == '':
if 'file' not in self.files:
self.files['file'] = []
self.files['file'].append(fn)
continue
if mode not in self._mode_cache:
self._mode_cache[mode] = stat.S_ISDIR(mode)
fkey = 'file'
if self._mode_cache[mode]:
fkey = 'dir'
elif flag is not None and (flag & 64):
fkey = 'ghost'
self.files.setdefault(fkey, []).append(fn)
self._loadedfiles = True
def returnFileEntries(self, ftype='file', primary_only=False):
"""return list of files based on type, you can pass primary_only=True
to limit to those files in the primary repodata"""
self._loadFiles()
if self.files:
if ftype in self.files:
if primary_only:
if ftype == 'dir':
match = re_primary_dirname
else:
match = re_primary_filename
return [fn for fn in self.files[ftype] if match(fn)]
return self.files[ftype]
return []
filelist = property(fget=lambda self: self.returnFileEntries(ftype='file'))
dirlist = property(fget=lambda self: self.returnFileEntries(ftype='dir'))
ghostlist = property(fget=lambda self: self.returnFileEntries(ftype='ghost'))
def _is_pre_req(self, flag):
"""check the flags for a requirement, return 1 or 0 whether or not requires
is a pre-requires or a not"""
# FIXME this should probably be put in rpmUtils.miscutils since
# - that's what it is
if flag is not None:
# Note: RPMSENSE_PREREQ == 0 since rpm-4.4'ish
if flag & (rpm.RPMSENSE_PREREQ |
rpm.RPMSENSE_SCRIPT_PRE |
rpm.RPMSENSE_SCRIPT_POST):
return 1
return 0
def _requires_with_pre(self):
"""returns requires with pre-require bit"""
name = decode_value( self.hdr[rpm.RPMTAG_REQUIRENAME] )
lst = decode_value( self.hdr[rpm.RPMTAG_REQUIREFLAGS] )
flag = list(map(flagToString, lst))
pre = list(map(self._is_pre_req, lst))
lst = decode_value( self.hdr[rpm.RPMTAG_REQUIREVERSION] )
vers = list(map(stringToVersion, lst))
if name is not None:
lst = list(zip(name, flag, vers, pre))
mylist = list(set(lst))
return mylist
def _dump_base_items(self):
packager = url = ''
if self.packager:
packager = to_xml(self.packager)
if self.url:
url = to_xml(self.url)
msg = """
<name>{0}</name>
<arch>{1}</arch>
<version epoch="{2}" ver="{3}" rel="{4}"/>
<checksum type="{5}" pkgid="YES">{6}</checksum>
<summary>{7}</summary>
<description>{8}</description>
<packager>{9}</packager>
<url>{10}</url>
<time file="{11}" build="{12}"/>
<size package="{13}" installed="{14}" archive="{15}"/>\n""".format(self.name, self.arch, self.epoch, self.ver, self.rel, 'sha256', self.checksum, to_xml(self.summary), to_xml(self.description), packager, url, self.filetime, self.buildtime, self.packagesize, self.size, self.archivesize)
msg += """<location href="{0}"/>\n""".format( to_xml(self.relpath, attrib=True) )
return msg
def _dump_format_items(self):
msg = " <format>\n"
if self.license:
msg += """ <rpm:license>{0}</rpm:license>\n""".format( to_xml(self.license) )
else:
msg += """ <rpm:license/>\n"""
if self.vendor:
msg += """ <rpm:vendor>{0}</rpm:vendor>\n""".format( to_xml(self.vendor) )
else:
msg += """ <rpm:vendor/>\n"""
if self.group:
msg += """ <rpm:group>{0}</rpm:group>\n""".format( to_xml(self.group) )
else:
msg += """ <rpm:group/>\n"""
if self.buildhost:
msg += """ <rpm:buildhost>{0}</rpm:buildhost>\n""".format( to_xml(self.buildhost) )
else:
msg += """ <rpm:buildhost/>\n"""
if self.sourcerpm:
msg += """ <rpm:sourcerpm>{0}</rpm:sourcerpm>\n""".format( to_xml(self.sourcerpm) )
else: # b/c yum 2.4.3 and OLD y-m-p willgfreak out if it is not there.
msg += """ <rpm:sourcerpm/>\n"""
msg += """ <rpm:header-range start="{0}" end="{1}"/>""".format( self.hdrstart, self.hdrend)
msg += self._dump_pco('provides')
msg += self._dump_requires()
msg += self._dump_pco('conflicts')
msg += self._dump_pco('obsoletes')
msg += self._dump_files(True)
if msg[-1] != '\n':
msg += """\n"""
msg += """ </format>"""
return msg
def _dump_pco(self, pcotype):
msg = ""
mylist = getattr(self, pcotype)
if mylist:
msg = "\n <rpm:{0}>\n".format( pcotype )
for (name, flags, (e, v, r)) in mylist:
pcostring = ''' <rpm:entry name="{0}"'''.format( to_xml(name, attrib=True) )
if flags:
pcostring += ''' flags="{0}"'''.format( to_xml(flags, attrib=True) )
if e:
pcostring += ''' epoch="{0}"'''.format( to_xml(e, attrib=True) )
if v:
pcostring += ''' ver="{0}"'''.format( to_xml(v, attrib=True) )
if r:
pcostring += ''' rel="{0}"'''.format( to_xml(r, attrib=True) )
pcostring += "/>\n"
msg += pcostring
if mylist:
msg += " </rpm:{0}>".format( pcotype )
return msg
def _dump_requires(self):
"""returns deps in XML format"""
mylist = self._requires_with_pre()
msg = ""
if mylist:
msg = "\n <rpm:requires>\n"
if getattr(self, '_collapse_libc_requires', False):
libc_requires = [x for x in mylist if x[0].startswith('libc.so.6')]
if libc_requires:
rest = sorted(libc_requires, cmp=compareVerOnly, key=itemgetter(0))
best = rest.pop()
if len(rest) > 0 and best[0].startswith('libc.so.6()'): # rpmvercmp will sort this one as 'highest' so we need to remove it from the list
best = rest.pop()
newlist = []
for i in mylist:
if i[0].startswith('libc.so.6') and i != best:
continue
newlist.append(i)
mylist = newlist
for (name, flags, (e, v, r), pre) in mylist:
if name.startswith('rpmlib('):
continue
# this drops out requires that the pkg provides for itself.
if name in self.provides_names or (name.startswith('/') and (name in self.filelist or name in self.dirlist or name in self.ghostlist)):
if not flags:
continue
else:
if self.checkPrco('provides', (name, flags, (e, v, r))):
continue
prcostring = ''' <rpm:entry name="{0}"'''.format( to_xml(name, attrib=True) )
if flags:
prcostring += ''' flags="{0}"'''.format( to_xml(flags, attrib=True) )
if e:
prcostring += ''' epoch="{0}"'''.format( to_xml(e, attrib=True) )
if v:
prcostring += ''' ver="{0}"'''.format( to_xml(v, attrib=True) )
if r:
prcostring += ''' rel="{0}"'''.format( to_xml(r, attrib=True) )
if pre:
prcostring += ''' pre="{0}"'''.format( pre )
prcostring += "/>\n"
msg += prcostring
if mylist:
msg += " </rpm:requires>"
return msg
def _dump_files(self, primary=False):
msg = "\n"
if not primary:
files = self.returnFileEntries('file')
dirs = self.returnFileEntries('dir')
ghosts = self.returnFileEntries('ghost')
else:
files = self.returnFileEntries('file', primary_only=True)
dirs = self.returnFileEntries('dir', primary_only=True)
ghosts = self.returnFileEntries('ghost', primary_only=True)
for fn in files:
msg += """ <file>{0}</file>\n""".format( to_xml(fn) )
for fn in dirs:
msg += """ <file type="dir">{0}</file>\n""".format( to_xml(fn) )
for fn in ghosts:
msg += """ <file type="ghost">{0}</file>\n""".format( to_xml(fn) )
return msg
def _dump_changelog(self, clog_limit):
if not self.changelog:
return ""
msg = "\n"
# We need to output them "backwards", so the oldest is first
if not clog_limit:
clogs = self.changelog
else:
clogs = self.changelog[:clog_limit]
last_ts = 0
hack_ts = 0
for (ts, author, content) in reversed(clogs):
if ts != last_ts:
hack_ts = 0
else:
hack_ts += 1
last_ts = ts
ts += hack_ts
msg += """<changelog author="{0}" date="{1}">{2}</changelog>\n""".format( to_xml(author, attrib=True), to_xml(str(ts)), to_xml(content))
return msg
def xml_dump_primary_metadata(self):
msg = """\n<package type="rpm">"""
msg += self._dump_base_items()
msg += self._dump_format_items()
msg += """\n</package>"""
return msg
def xml_dump_filelists_metadata(self):
msg = """\n<package pkgid="{0}" name="{1}" arch="{2}">
<version epoch="{3}" ver="{4}" rel="{5}"/>\n""".format(self.checksum, self.name, self.arch, self.epoch, self.ver, self.rel)
msg += self._dump_files()
msg += "</package>\n"
return msg
def xml_dump_other_metadata(self, clog_limit=0):
msg = """\n<package pkgid="{0}" name="{1}" arch="{2}">
<version epoch="{3}" | |
# -*- coding: utf-8 -*-
"""
Temporary file handling
AUTHORS:
- <NAME>, <NAME> (2012-10-18): move these functions here
from sage/misc/misc.py and make them secure, see :trac:`13579`.
- <NAME> (2013-03-17): add :class:`atomic_write`,
see :trac:`14292`.
"""
# ****************************************************************************
# Copyright (C) 2012 <NAME> <<EMAIL>>
# Copyright (C) 2012 <NAME> <<EMAIL>>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from __future__ import print_function
import io
import os
import tempfile
import atexit
def delete_tmpfiles():
"""
Remove the directory ``SAGE_TMP``.
TESTS:
This is automatically run when Sage exits, test this by running a
separate session of Sage::
sage: from sage.tests.cmdline import test_executable
sage: child_SAGE_TMP, err, ret = test_executable(["sage", "-c", "print(SAGE_TMP)"])
sage: err, ret
('', 0)
sage: os.path.exists(child_SAGE_TMP) # indirect doctest
False
The parent directory should exist::
sage: parent_SAGE_TMP = os.path.normpath(child_SAGE_TMP + '/..')
sage: os.path.isdir(parent_SAGE_TMP)
True
"""
import shutil
from sage.misc.misc import SAGE_TMP
shutil.rmtree(str(SAGE_TMP), ignore_errors=True)
# Run when Python shuts down
atexit.register(delete_tmpfiles)
#################################################################
# temporary directory
#################################################################
def tmp_dir(name="dir_", ext=""):
r"""
Create and return a temporary directory in
``$HOME/.sage/temp/hostname/pid/``
The temporary directory is deleted automatically when Sage exits.
INPUT:
- ``name`` -- (default: ``"dir_"``) A prefix for the directory name.
- ``ext`` -- (default: ``""``) A suffix for the directory name.
OUTPUT:
The absolute path of the temporary directory created, with a
trailing slash (or whatever the path separator is on your OS).
EXAMPLES::
sage: d = tmp_dir('dir_testing_', '.extension')
sage: d # random output
'/home/username/.sage/temp/hostname/7961/dir_testing_XgRu4p.extension/'
sage: os.chdir(d)
sage: f = open('file_inside_d', 'w')
Temporary directories are unaccessible by other users::
sage: os.stat(d).st_mode & 0o077
0
sage: f.close()
"""
from sage.misc.misc import SAGE_TMP
tmp = tempfile.mkdtemp(prefix=name, suffix=ext, dir=str(SAGE_TMP))
name = os.path.abspath(tmp)
return name + os.sep
#################################################################
# temporary filename
#################################################################
def tmp_filename(name="tmp_", ext=""):
r"""
Create and return a temporary file in
``$HOME/.sage/temp/hostname/pid/``
The temporary file is deleted automatically when Sage exits.
.. warning::
If you need a particular file extension always use
``tmp_filename(ext=".foo")``, this will ensure that the file
does not yet exist. If you were to use
``tmp_filename()+".foo"``, then you might overwrite an
existing file!
INPUT:
- ``name`` -- (default: ``"tmp_"``) A prefix for the file name.
- ``ext`` -- (default: ``""``) A suffix for the file name. If you
want a filename extension in the usual sense, this should start
with a dot.
OUTPUT:
The absolute path of the temporary file created.
EXAMPLES::
sage: fn = tmp_filename('just_for_testing_', '.extension')
sage: fn # random
'/home/username/.sage/temp/hostname/8044/just_for_testing_tVVHsn.extension'
sage: f = open(fn, 'w')
Temporary files are unaccessible by other users::
sage: os.stat(fn).st_mode & 0o077
0
sage: f.close()
"""
from sage.misc.misc import SAGE_TMP
handle, tmp = tempfile.mkstemp(prefix=name, suffix=ext, dir=str(SAGE_TMP))
os.close(handle)
name = os.path.abspath(tmp)
return name
#################################################################
# write to a temporary file and move it in place
#################################################################
class atomic_write(object):
"""
Write to a given file using a temporary file and then rename it
to the target file. This renaming should be atomic on modern
operating systems. Therefore, this class can be used to avoid race
conditions when a file might be read while it is being written.
It also avoids having partially written files due to exceptions
or crashes.
This is to be used in a ``with`` statement, where a temporary file
is created when entering the ``with`` and is moved in place of the
target file when exiting the ``with`` (if no exceptions occured).
INPUT:
- ``target_filename`` -- the name of the file to be written.
Normally, the contents of this file will be overwritten.
- ``append`` -- (boolean, default: False) if True and
``target_filename`` is an existing file, then copy the current
contents of ``target_filename`` to the temporary file when
entering the ``with`` statement. Otherwise, the temporary file is
initially empty.
- ``mode`` -- (default: ``0o666``) mode bits for the file. The
temporary file is created with mode ``mode & ~umask`` and the
resulting file will also have these permissions (unless the
mode bits of the file were changed manually). (Not to be confused with
the file opening mode.)
- ``binary`` -- (boolean, default: True on Python 2, False on Python 3) the
underlying file is opened in binary mode. If False then it is opened in
text mode and an encoding with which to write the file may be supplied.
- ``**kwargs`` -- additional keyword arguments passed to the underlying
`io.open` call.
EXAMPLES::
sage: from sage.misc.temporary_file import atomic_write
sage: target_file = tmp_filename()
sage: with open(target_file, 'w') as f:
....: _ = f.write("Old contents")
sage: with atomic_write(target_file) as f:
....: _ = f.write("New contents")
....: f.flush()
....: with open(target_file, 'r') as f2:
....: f2.read()
'Old contents'
sage: with open(target_file, 'r') as f:
....: f.read()
'New contents'
The name of the temporary file can be accessed using ``f.name``.
It is not a problem to close and re-open the temporary file::
sage: from sage.misc.temporary_file import atomic_write
sage: target_file = tmp_filename()
sage: with open(target_file, 'w') as f:
....: _ = f.write("Old contents")
sage: with atomic_write(target_file) as f:
....: f.close()
....: with open(f.name, 'w') as f2:
....: _ = f2.write("Newer contents")
sage: with open(target_file, 'r') as f:
....: f.read()
'Newer contents'
If an exception occurs while writing the file, the target file is
not touched::
sage: with atomic_write(target_file) as f:
....: _ = f.write("Newest contents")
....: raise RuntimeError
Traceback (most recent call last):
...
RuntimeError
sage: with open(target_file, 'r') as f:
....: f.read()
'Newer contents'
Some examples of using the ``append`` option. Note that the file
is never opened in "append" mode, it is possible to overwrite
existing data::
sage: target_file = tmp_filename()
sage: with atomic_write(target_file, append=True) as f:
....: _ = f.write("Hello")
sage: with atomic_write(target_file, append=True) as f:
....: _ = f.write(" World")
sage: with open(target_file, 'r') as f:
....: f.read()
'Hello World'
sage: with atomic_write(target_file, append=True) as f:
....: _ = f.seek(0)
....: _ = f.write("HELLO")
sage: with open(target_file, 'r') as f:
....: f.read()
'HELLO World'
If the target file is a symbolic link, the link is kept and the
target of the link is written to::
sage: link_to_target = os.path.join(tmp_dir(), "templink")
sage: os.symlink(target_file, link_to_target)
sage: with atomic_write(link_to_target) as f:
....: _ = f.write("Newest contents")
sage: with open(target_file, 'r') as f:
....: f.read()
'Newest contents'
We check the permission bits of the new file. Note that the old
permissions do not matter::
sage: os.chmod(target_file, 0o600)
sage: _ = os.umask(0o022)
sage: with atomic_write(target_file) as f:
....: pass
sage: '{:#o}'.format(os.stat(target_file).st_mode & 0o777)
'0o644'
sage: _ = os.umask(0o077)
sage: with atomic_write(target_file, mode=0o777) as f:
....: pass
sage: '{:#o}'.format(os.stat(target_file).st_mode & 0o777)
'0o700'
Test writing twice to the same target file. The outermost ``with``
"wins"::
sage: with open(target_file, 'w') as f:
....: _ = f.write('>>> ')
sage: with atomic_write(target_file, append=True) as f, \
....: atomic_write(target_file, append=True) as g:
....: _ = f.write("AAA"); f.close()
....: _ = g.write("BBB"); g.close()
sage: with open(target_file, 'r') as f:
....: f.read()
'>>> AAA'
Supplying an encoding means we're writing the file in "text mode" (in the
same sense as `io.open`) and so we must write unicode strings::
sage: target_file = tmp_filename()
sage: with atomic_write(target_file, binary=False,
....: encoding='utf-8') as f:
....: _ = f.write(u'Hélas')
sage: import io
sage: with io.open(target_file, encoding='utf-8') as f:
....: print(f.read())
Hélas
Supplying an encoding in binary mode (or other arguments that don't
make sense to `io.open` in binary mode) is an error::
sage: writer = atomic_write(target_file, binary=True,
....: encoding='utf-8')
sage: with writer as f:
....: _ = f.write(u'Hello')
Traceback (most recent call last):
...
ValueError: binary mode doesn't take an encoding argument
sage: os.path.exists(writer.tempname)
False
"""
def __init__(self, target_filename, append=False, mode=0o666,
binary=None, **kwargs):
"""
TESTS::
sage: from sage.misc.temporary_file import atomic_write
sage: link_to_target = os.path.join(tmp_dir(), "templink")
sage: os.symlink("/foobar", link_to_target)
sage: aw = atomic_write(link_to_target)
sage: print(aw.target)
/foobar
sage: print(aw.tmpdir)
/
"""
self.target = os.path.realpath(target_filename)
self.tmpdir = os.path.dirname(self.target)
self.append = append
# Remove umask bits from mode
umask = os.umask(0); os.umask(umask)
self.mode = mode & (~umask)
# 'binary' mode is the default on Python 2, whereas 'text' mode is the
# default on Python | |
'''
To run a Bokeh application on a Bokeh server from a single Python script,
pass the script name to ``bokeh serve`` on the command line:
.. code-block:: sh
bokeh serve app_script.py
By default, the Bokeh application will be served by the Bokeh server on a
default port ({DEFAULT_PORT}) at localhost, under the path ``/app_script``,
i.e.,
.. code-block:: none
http://localhost:{DEFAULT_PORT}/app_script
It is also possible to run the same commmand with jupyter notebooks:
.. code-block:: sh
bokeh serve app_notebook.ipynb
This will generate the same results as described with a python script
and the application will be served on a default port ({DEFAULT_PORT})
at localhost, under the path ``/app_notebook``
Applications can also be created from directories. The directory should
contain a ``main.py`` (and any other helper modules that are required) as
well as any additional assets (e.g., theme files). Pass the directory name
to ``bokeh serve`` to run the application:
.. code-block:: sh
bokeh serve app_dir
It is possible to run multiple applications at once:
.. code-block:: sh
bokeh serve app_script.py app_dir
If you would like to automatically open a browser to display the HTML
page(s), you can pass the ``--show`` option on the command line:
.. code-block:: sh
bokeh serve app_script.py app_dir --show
This will open two pages, for ``/app_script`` and ``/app_dir``,
respectively.
If you would like to pass command line arguments to Bokeh applications,
you can pass the ``--args`` option as the LAST option on the command
line:
.. code-block:: sh
bokeh serve app_script.py myapp.py --args foo bar --baz
Everything that follows ``--args`` will be included in ``sys.argv`` when
the application runs. In this case, when ``myapp.py`` executes, the
contents of ``sys.argv`` will be ``['myapp.py', 'foo', 'bar', '--baz']``,
consistent with standard Python expectations for ``sys.argv``.
Note that if multiple scripts or directories are provided, they
all receive the same set of command line arguments (if any) given by
``--args``.
If you have only one application, the server root will redirect to it.
Otherwise, You can see an index of all running applications at the server root:
.. code-block:: none
http://localhost:5006/
This index can be disabled with the ``--disable-index`` option, and the redirect
behavior can be disabled with the ``--disable-index-redirect`` option.
Network Configuration
~~~~~~~~~~~~~~~~~~~~~
To control the port that the Bokeh server listens on, use the ``--port``
argument:
.. code-block:: sh
bokeh serve app_script.py --port 8080
To listen on an arbitrary port, pass ``0`` as the port number. The actual
port number will be logged at startup.
Similarly, a specific network address can be specified with the
``--address`` argument. For example:
.. code-block:: sh
bokeh serve app_script.py --address 0.0.0.0
will have the Bokeh server listen all available network addresses.
Bokeh server can fork the underlying tornado server into multiprocess. This is
useful when trying to handle multiple connections especially in the context of
apps which require high computational loads. Default behavior is one process.
using 0 will auto-detect the number of cores and spin up corresponding number of
processes
.. code-block:: sh
bokeh serve app_script.py --num-procs 2
Note that due to limitations inherent in Tornado, Windows does not support
``--num-procs`` values greater than one! In this case consider running multiple
Bokeh server instances behind a load balancer.
By default, cross site connections to the Bokeh server websocket are not
allowed. You can enable websocket connections originating from additional
hosts by specifying them with the ``--allow-websocket-origin`` option:
.. code-block:: sh
bokeh serve app_script.py --allow-websocket-origin foo.com:8081
It is possible to specify multiple allowed websocket origins by adding
the ``--allow-websocket-origin`` option multiple times.
The Bokeh server can also add an optional prefix to all URL paths.
This can often be useful in conjunction with "reverse proxy" setups.
.. code-block:: sh
bokeh serve app_script.py --prefix foobar
Then the application will be served under the following URL:
.. code-block:: none
http://localhost:{DEFAULT_PORT}/foobar/app_script
If needed, Bokeh server can send keep-alive pings at a fixed interval.
To configure this feature, set the ``--keep-alive`` option:
.. code-block:: sh
bokeh serve app_script.py --keep-alive 10000
The value is specified in milliseconds. The default keep-alive interval
is 37 seconds. Give a value of 0 to disable keep-alive pings.
To control how often statistic logs are written, set the
``--stats-log-frequency`` option:
.. code-block:: sh
bokeh serve app_script.py --stats-log-frequency 30000
The value is specified in milliseconds. The default interval for
logging stats is 15 seconds. Only positive integer values are accepted.
Bokeh can also optionally log process memory usage. This feature requires
the optional ``psutil`` package to be installed. To enable memory logging
set the ``--mem-log-frequency`` option:
. code-block:: sh
bokeh serve app_script.py --mem-log-frequency 30000
The value is specified in milliseconds. The default interval for
logging stats is 0 (disabled). Only positive integer values are accepted.
To have the Bokeh server override the remote IP and URI scheme/protocol for
all requests with ``X-Real-Ip``, ``X-Forwarded-For``, ``X-Scheme``,
``X-Forwarded-Proto`` headers (if they are provided), set the
``--use-xheaders`` option:
.. code-block:: sh
bokeh serve app_script.py --use-xheaders
This is typically needed when running a Bokeh server behind a reverse proxy
that is SSL-terminated.
.. warning::
It is not advised to set this option on a Bokeh server directly facing
the Internet.
Session ID Options
~~~~~~~~~~~~~~~~~~
Typically, each browser tab connected to a Bokeh server will have
its own session ID. When the server generates an ID, it will make
it cryptographically unguessable. This keeps users from accessing
one another's sessions.
To control who can use a Bokeh application, the server can sign
sessions with a secret key and reject "made up" session
names. There are three modes, controlled by the ``--session-ids``
argument:
.. code-block:: sh
bokeh serve app_script.py --session-ids signed
The available modes are: {SESSION_ID_MODES}
In ``unsigned`` mode, the server will accept any session ID
provided to it in the URL. For example,
``http://localhost/app_script?bokeh-session-id=foo`` will create a
session ``foo``. In ``unsigned`` mode, if the session ID isn't
provided with ``?bokeh-session-id=`` in the URL, the server will
still generate a cryptographically-unguessable ID. However, the
server allows clients to create guessable or deliberately-shared
sessions if they want to.
``unsigned`` mode is most useful when the server is running
locally for development, for example you can have multiple
processes access a fixed session name such as
``default``. ``unsigned`` mode is also convenient because there's
no need to generate or configure a secret key.
In ``signed`` mode, the session ID must be in a special format and
signed with a secret key. Attempts to use the application with an
invalid session ID will fail, but if no ``?bokeh-session-id=``
parameter is provided, the server will generate a fresh, signed
session ID. The result of ``signed`` mode is that only secure
session IDs are allowed but anyone can connect to the server.
In ``external-signed`` mode, the session ID must be signed but the
server itself won't generate a session ID; the
``?bokeh-session-id=`` parameter will be required. To use this
mode, you would need some sort of external process (such as
another web app) which would use the
``bokeh.util.session_id.generate_session_id()`` function to create
valid session IDs. The external process and the Bokeh server must
share the same ``BOKEH_SECRET_KEY`` environment variable.
``external-signed`` mode is useful if you want another process to
authenticate access to the Bokeh server; if someone is permitted
to use the Bokeh application, you would generate a session ID for
them, then redirect them to the Bokeh server with that valid
session ID. If you don't generate a session ID for someone, then
they can't load the app from the Bokeh server.
In both ``signed`` and ``external-signed`` mode, the secret key
must be kept secret; anyone with the key can generate a valid
session ID.
The secret key should be set in a ``BOKEH_SECRET_KEY`` environment
variable and should be a cryptographically random string with at
least 256 bits (32 bytes) of entropy. You can generate a new
secret key with the ``bokeh secret`` command.
Session Expiration Options
~~~~~~~~~~~~~~~~~~~~~~~~~~
To configure how often to check for unused sessions. set the
--check-unused-sessions option:
.. code-block:: sh
bokeh serve app_script.py --check-unused-sessions 10000
The value is specified in milliseconds. The default interval for
checking for unused sessions is 17 seconds. Only positive integer
values are accepted.
To configure how often unused sessions last. set the
--unused-session-lifetime option:
.. code-block:: sh
bokeh serve app_script.py --unused-session-lifetime 60000
The value is specified in milliseconds. The default lifetime interval
for unused sessions is 15 seconds. Only positive integer values are
accepted.
Logging Options
~~~~~~~~~~~~~~~
The logging level can be controlled by the ``--log-level`` argument:
.. code-block:: sh
bokeh serve app_script.py --log-level debug
The available log levels are: {LOGLEVELS}
The log format can be controlled by the ``--log-format`` argument:
.. code-block:: sh
bokeh serve app_script.py --log-format "%(levelname)s: %(message)s"
The default log format is ``"{DEFAULT_LOG_FORMAT}"``
'''
from __future__ import absolute_import
import logging
log = logging.getLogger(__name__)
import argparse
from bokeh.application import Application
from bokeh.resources import DEFAULT_SERVER_PORT
from bokeh.util.logconfig import basicConfig
from bokeh.util.string import nice_join, format_docstring
from | |
import tkinter as tk
from tkinter import ttk
import pandas as pd
from os import path
import random
import csv
import time
from datetime import datetime
from rfid.rfid import RFID
from pump.pump import Pump
class GUI(tk.Tk):
file_path = "/home/pi/Documents/CS179J-Smart-Water-Station/data/user_data.csv"
already_counted_goal = 0
def __init__(self):
super().__init__()
self.frame = None
self.container = None
self.frame_dictionary = {}
self.frame_object_list = []
self.csv_initialize()
self.setup_gui()
self.create_container()
self.create_frames()
self.rfid = RFID()
self.card_uid = ''
self.card_state = False
self.pump = Pump()
def setup_gui(self):
self.title("Smart Water Station")
self.geometry('800x480')
self.resizable(width=False, height=False)
def create_container(self):
self.container = tk.Frame(self)
self.container.pack()
def create_frames(self):
self.frame_object_list = [IdlePage, RFIDPage, RefillPage, UserRegistrationPage, UserHomeScreen, SettingsPage,
DeletionConfirmationPage, DeletionPage, MoreInfoPage, ChangeAttributesPage,
EditAttributes]
for the_frame in self.frame_object_list:
self.frame = the_frame(self, self.container)
self.frame_dictionary[the_frame] = self.frame
self.frame.grid(row=0, column=0, sticky="news")
self.change_frame(IdlePage)
def update_frame(self, f):
self.frame = self.frame_dictionary[f]
for the_frame in self.frame_object_list:
self.frame.destroy()
for the_frame in self.frame_object_list:
self.frame = the_frame(self, self.container)
self.frame_dictionary[the_frame] = self.frame
self.frame.grid(row=0, column=0, sticky="news")
def change_frame(self, f):
self.frame = self.frame_dictionary[f]
self.frame.tkraise()
def csv_initialize(self):
if not path.exists(self.file_path):
columns = ['card_uid', 'registration_state', 'name', 'age', 'sex', 'activity_level',
'daily_hydration_lower', 'daily_hydration_upper', 'water_dispensed', 'total_dispensed',
'percent_dispensed_of_daily', 'num_days', 'num_days_goal', 'avg_intake', 'last_login'
]
user_data = [
['734a266f', False, ' ', 0, ' ', ' ', 0.0, 0.0, 0.0, 0.0, 0.0, 0, 0, 0.0, ' '],
['5d81e96d', False, ' ', 0, ' ', ' ', 0.0, 0.0, 0.0, 0.0, 0.0, 0, 0, 0.0, ' '],
['4d71f56d', False, ' ', 0, ' ', ' ', 0.0, 0.0, 0.0, 0.0, 0.0, 0, 0, 0.0, ' '],
['fdd1a46b', False, ' ', 0, ' ', ' ', 0.0, 0.0, 0.0, 0.0, 0.0, 0, 0, 0.0, ' '],
['1d4ba46b', False, ' ', 0, ' ', ' ', 0.0, 0.0, 0.0, 0.0, 0.0, 0, 0, 0.0, ' '],
['dd8b9f6b', False, ' ', 0, ' ', ' ', 0.0, 0.0, 0.0, 0.0, 0.0, 0, 0, 0.0, ' ']
]
# open file, write data to file
with open(self.file_path, 'w', newline='') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(columns)
writer.writerows(user_data)
csv_file.close()
def scan_rfid_card(self):
self.rfid.scan_card()
self.card_uid = self.rfid.get_uid()
def check_rfid_card_registration(self):
self.card_state = self.rfid.check_registration(self.card_uid)
def register_card(self):
self.rfid.register_card(self.card_uid)
self.check_rfid_card_registration()
def unregister_card(self):
self.rfid.unregister_card(self.card_uid)
self.check_rfid_card_registration()
def get_card_uid(self):
return self.card_uid
def get_card_state(self):
return self.card_state
class IdlePage(tk.Frame):
def __init__(self, container, parent):
tk.Frame.__init__(self, parent)
self.rowconfigure(0, minsize=70, weight=1)
self.rowconfigure(1, minsize=50, weight=1)
self.rowconfigure(2, minsize=230, weight=1)
self.rowconfigure(3, minsize=100, weight=1)
self.rowconfigure(4, minsize=30, weight=1)
self.columnconfigure(0, minsize=120, weight=1)
self.columnconfigure(1, minsize=480, weight=0)
self.columnconfigure(2, minsize=200, weight=1)
self.water_data = WaterData()
self.fact, self.source = self.water_data.get_fact_source()
# define GUI labels and buttons
self.idle_label = tk.Label(self, text="IDLE MODE", font=("Calibri", 12))
self.did_you_know_label = tk.Label(self, text="Did you know?\n\n", font=("Calibri", 12, "bold"))
self.fact_source_label = tk.Label(self, text=self.fact + "\n\n" + self.source, font=("Calibri", 12),
justify="left", anchor="w")
self.next_btn = tk.Button(self, text="-- Press this button to continue --", font=("Calibri", 12), bg = "light green",
command=lambda: container.change_frame(RFIDPage)).place(x= 120, y=380)
self.refil_btn = tk.Button(self, text="-- Refill Container --", font=("Calibri", 12),
command=lambda: container.change_frame(RefillPage)).place(x=470, y=380)
# structure the GUI page using a grid
self.idle_label.grid(row=0, column=0, sticky="nw", padx=7, pady=7)
self.did_you_know_label.grid(row=1, column=1, sticky="nw")
self.fact_source_label.grid(row=2, column=1, sticky="nw")
self.fact_source_label.after(15000, self.update_text)
def update_text(self):
self.fact, self.source = self.water_data.get_fact_source()
self.fact_source_label.config(text=self.fact + "\n\n" + self.source, font=("Calibri", 12), justify="left",
anchor="w")
# 15000 = 15 seconds, this can change to a different value if need be
self.fact_source_label.after(15000, self.update_text)
class RefillPage(tk.Frame):
def __init__(self, container, parent):
tk.Frame.__init__(self, parent)
self.scan_card_label = tk.Label(self, text= "Remove Hose\n\n Place hose in new container\n\n Place cup under nossle\n\n Push button until water is dispensed",
font=("Calibri", 20)).place(x=170, y=10)
self.refil_btn = tk.Button(self, text="-- Unlock Pump --", font=("Calibri", 12),
command=lambda: container.pump.pump_active()).place(x=315, y=280)
self.back_btn = tk.Button(self, text="Done", font=("Calibri", 12),
command=lambda: container.change_frame(IdlePage)).place(x=360, y=380)
class RFIDPage(tk.Frame):
def __init__(self, container, parent):
tk.Frame.__init__(self, parent)
self.file_path = GUI.file_path
self.uid = ''
self.state = False
self.scan_card_label = tk.Label(self, text="PLEASE SCAN YOUR RFID CARD TO CONTINUE",
font=("Calibri", 20)).place(x=100, y=10)
self.back_btn = tk.Button(self, text="Go Back", font=("Calibri", 12),
command=lambda: container.change_frame(IdlePage)).place(x=380, y=350)
self.scan_card_btn = tk.Button(self, text="Scan your RFID Card now", font=("Calibri", 12),
command=lambda: self.scan_rfid_card(container)).place(x=300, y=200)
def scan_rfid_card(self, container):
container.scan_rfid_card()
container.check_rfid_card_registration()
self.uid = container.get_card_uid()
self.state = container.get_card_state()
UserHomeScreen.uid = self.uid
MoreInfoPage.uid = self.uid
if self.state:
self.update_last_login_num_days()
container.update_frame(UserHomeScreen)
container.change_frame(UserHomeScreen)
else:
container.update_frame(UserRegistrationPage)
container.change_frame(UserRegistrationPage)
def update_last_login_num_days(self):
df = pd.read_csv(self.file_path)
row_num = df.index[df['card_uid'] == self.uid].tolist()
now = datetime.now()
date_time = now.strftime("%m/%d/%Y %H:%M:%S")
old_time = datetime.strptime(df.at[row_num[0], 'last_login'], "%m/%d/%Y %H:%M:%S")
time_diff = (now - old_time).days
if time_diff > 0:
df.at[row_num[0], 'num_days'] += 1
df.at[row_num[0], 'percent_dispensed_of_daily'] = 0.0
df.at[row_num[0], 'water_dispensed'] = 0.0
GUI.already_counted_goal = 0
df.at[row_num[0], 'last_login'] = date_time
df.to_csv(self.file_path, index=False)
class UserRegistrationPage(tk.Frame):
def __init__(self, container, parent):
tk.Frame.__init__(self, parent)
self.file_path = GUI.file_path
self.uid = ''
self.valid_command_name = (self.register(self.input_validation_name), '%S')
self.valid_command_age = (self.register(self.input_validation_age), '%S')
self.welcome_new_user_screen = tk.Label(self, text="Hello, New User!", font=("Calibri", 20)).place(x=320, y=0)
self.user_intro = tk.Label(self, text="What is your: ", font=("Calibri", 15)).place(x=240, y=120)
self.user_name = tk.Label(self, text="Name").place(x=240, y=160)
self.user_age = tk.Label(self, text="Age").place(x=240, y=200)
self.input_name = tk.StringVar()
self.usr_name_in = tk.Entry(self, width=30, textvariable=self.input_name, validate="key",
validatecommand=self.valid_command_name).place(x=310, y=160)
self.input_age = tk.StringVar()
self.usr_age_in = tk.Entry(self, textvariable=self.input_age, width=30, validate="key",
validatecommand=self.valid_command_age).place(x=310, y=200)
self.usr_S = tk.Label(self, text="Are you: ").place(x=240, y=240)
self.s = tk.StringVar()
self.usr_SSelection = ttk.Combobox(self, width=7, textvariable=self.s)
self.usr_SSelection.place(x=310, y=240)
self.usr_SSelection['values'] = ('Male', 'Female')
self.usr_SSelection.current()
self.usr_S2 = tk.Label(self, text="What is your activity level? ").place(x=240, y=280)
self.s2 = tk.StringVar()
self.usr_SSelection2 = ttk.Combobox(self, width=20, textvariable=self.s2)
self.usr_SSelection2.place(x=310, y=310)
self.usr_SSelection2['values'] = ('Sedentary', 'Moderate', 'Active')
self.usr_SSelection2.current()
self.submit = tk.Button(self, text="Submit",
command=lambda: [self.save_command(container), UserHomeScreen.research_data(self),
container.update_frame(UserHomeScreen),
container.change_frame(UserHomeScreen)]).place(x=385, y=350)
self.go_back_btn = tk.Button(self, text="Go Back", font=("Calibri", 12),
command=lambda: container.change_frame(RFIDPage)).place(x=375, y=400)
def input_validation_name(self, keypress):
if keypress.isalpha() or keypress.isspace():
return True
else:
return False
def input_validation_age(self, keypress):
if keypress.isnumeric():
return True
else:
return False
def save_command(self, container):
self.uid = container.get_card_uid()
container.register_card()
df = pd.read_csv(self.file_path)
row_num = df.index[df['card_uid'] == self.uid].tolist()
now = datetime.now()
date_time = now.strftime("%m/%d/%Y %H:%M:%S")
df.at[row_num[0], 'name'] = self.input_name.get()
df.at[row_num[0], 'age'] = self.input_age.get()
df.at[row_num[0], 'sex'] = self.s.get()
df.at[row_num[0], 'activity_level'] = self.s2.get()
df.at[row_num[0], 'daily_hydration_lower'] = 0.0
df.at[row_num[0], 'daily_hydration_upper'] = 0.0
df.at[row_num[0], 'water_dispensed'] = 0.0
df.at[row_num[0], 'total_dispensed'] = 0.0
df.at[row_num[0], 'percent_dispensed_of_daily'] = 0.0
df.at[row_num[0], 'num_days'] = 1
df.at[row_num[0], 'num_days_goal'] = 0
df.at[row_num[0], 'avg_intake'] = 0.0
df.at[row_num[0], 'last_login'] = date_time
df.to_csv(self.file_path, index=False)
class UserHomeScreen(tk.Frame):
uid = ''
def __init__(self, container, parent):
tk.Frame.__init__(self, parent)
self.file_path = GUI.file_path
self.uid = UserHomeScreen.uid
df = pd.read_csv(self.file_path)
row_num = df.index[df['card_uid'] == self.uid].tolist()
if len(row_num) is 0:
row_num.append(0)
self.welcome_home_screen = tk.Label(self, text="Hello, " + str(df.at[row_num[0], 'name']) + "!",
font=("Calibri", 20)).place(x=320, y=10)
self.hydration_percentage_header = tk.Label(self, text="Current Hydration Level:",
font=("Calibri", 30)).place(x=160, y=150)
self.hydration_percentage = tk.Label(self, text=str(df.at[row_num[0], 'percent_dispensed_of_daily']) + "%",
font=("Calibri", 30)).place(x=370, y=210)
self.settings_btn = tk.Button(self, text="Settings", font=("Calibri", 12),
command=lambda: container.change_frame(SettingsPage)).place(x=660, y=400)
self.logout_btn = tk.Button(self, text="Log Out", font=("Calibri", 12),
command=lambda: container.change_frame(IdlePage)).place(x=380, y=400)
self.more_info_btn = tk.Button(self, text="More Info", font=("Calibri", 12),
command=lambda: [self.research_data(), container.update_frame(MoreInfoPage),
container.change_frame(MoreInfoPage)]).place(x=50, y=400)
self.dispense_btn = tk.Button(self, text="Enable Dispenser", font=("Calibri", 12), fg="green",
command=lambda: [container.pump.dispense_water(container, False),
container.update_frame(UserHomeScreen),
container.change_frame(UserHomeScreen)]).place(x=340, y=320)
df.to_csv(self.file_path, index=False)
def research_data(self):
self.file_path = GUI.file_path
self.uid = UserHomeScreen.uid
df = pd.read_csv(self.file_path)
row_num = df.index[df['card_uid'] == self.uid].tolist()
if len(row_num) is 0:
row_num.append(0)
if df.at[row_num[0], 'age'] == 2 or df.at[row_num[0], 'age'] == 3:
df.at[row_num[0], 'daily_hydration_lower'] = 1000
df.at[row_num[0], 'daily_hydration_upper'] = 1400
elif df.at[row_num[0], 'sex'] == "Male":
if 3 < df.at[row_num[0], 'age'] <= 8:
df.at[row_num[0], 'daily_hydration_lower'] = 1400
df.at[row_num[0], 'daily_hydration_upper'] = 1600
elif 8 < df.at[row_num[0], 'age'] <= 13:
df.at[row_num[0], 'daily_hydration_lower'] = 1800
df.at[row_num[0], 'daily_hydration_upper'] = 2000
elif 13 < df.at[row_num[0], 'age'] <= 18:
df.at[row_num[0], 'daily_hydration_lower'] = 2400
df.at[row_num[0], 'daily_hydration_upper'] = 2800
elif 18 < df.at[row_num[0], 'age'] <= 30:
df.at[row_num[0], 'daily_hydration_lower'] = 2600
df.at[row_num[0], 'daily_hydration_upper'] = 2800
elif 30 < df.at[row_num[0], 'age'] <= 50:
df.at[row_num[0], 'daily_hydration_lower'] = 2400
df.at[row_num[0], 'daily_hydration_upper'] = 2600
elif df.at[row_num[0], 'age'] > 50:
df.at[row_num[0], 'daily_hydration_lower'] = 2200
df.at[row_num[0], 'daily_hydration_upper'] = 2400
elif df.at[row_num[0], 'sex'] == "Female":
if 3 < df.at[row_num[0], 'age'] <= 8:
df.at[row_num[0], 'daily_hydration_lower'] = 1400
df.at[row_num[0], 'daily_hydration_upper'] = 1600
elif 8 < df.at[row_num[0], 'age'] <= 13:
df.at[row_num[0], 'daily_hydration_lower'] = 1600
df.at[row_num[0], 'daily_hydration_upper'] = 2000
elif 13 < df.at[row_num[0], 'age'] <= 18:
df.at[row_num[0], 'daily_hydration_lower'] = 2000
df.at[row_num[0], 'daily_hydration_upper'] = 2000
elif 18 < df.at[row_num[0], 'age'] <= 30:
df.at[row_num[0], 'daily_hydration_lower'] = 2000
df.at[row_num[0], 'daily_hydration_upper'] = 2200
elif 30 < df.at[row_num[0], 'age'] <= 50:
df.at[row_num[0], 'daily_hydration_lower'] = 2000
df.at[row_num[0], 'daily_hydration_upper'] = 2000
elif df.at[row_num[0], 'age'] > 50:
df.at[row_num[0], 'daily_hydration_lower'] = 1800
df.at[row_num[0], 'daily_hydration_upper'] = 1800
if df.at[row_num[0], 'percent_dispensed_of_daily'] >= 100 and GUI.already_counted_goal == 0:
df.at[row_num[0], 'num_days_goal'] += 1
GUI.already_counted_goal = 1
df.to_csv(self.file_path, index=False)
class SettingsPage(tk.Frame):
def __init__(self, container, parent):
tk.Frame.__init__(self, parent)
self.settings_intro_header = tk.Label(self, text="What Would You Like To Do?",
font=("Calibri", 20)).place(x=210, y=0)
| |
ims, markers=markers, marker_kwargs=marker_kwargs, save_file=save_file, **kwargs)
# plot differences
if interp_func.__name__ == 'interpolate_2d':
# use upper left corner as base frame for whole grid
base_im = ims[0][0]
ims_diff = [[None for _ in range(n_frames)] for _ in range(n_frames)]
for r, ims_list_y in enumerate(ims):
for c, im in enumerate(ims_list_y):
ims_diff[r][c] = 0.5 + (im - base_im)
else:
# use left-most column as base frame for each row
ims_diff = [[None for _ in range(n_frames)] for _ in range(len(ims))]
for r, ims_list_y in enumerate(ims):
for c, im in enumerate(ims_list_y):
ims_diff[r][c] = 0.5 + (im - ims[r][0]) # compare across rows
plot_func(
ims_diff, markers=markers, marker_kwargs=marker_kwargs, save_file=save_file, **kwargs)
else:
"""plot generated frames and differences together"""
if crop_type:
plot_func(
ims, markers=None, marker_kwargs=marker_kwargs, save_file=save_file, **kwargs)
else:
plot_func(
ims, markers=None, marker_kwargs=marker_kwargs, save_file=save_file, **kwargs)
def plot_frame_array_latents(
hparams, ims, plot_func, interp_func, n_latents, crop_type, markers, save_outputs=False,
**kwargs):
n_frames = len(ims[0])
if crop_type:
marker_kwargs = {
'markersize': 30, 'markeredgewidth': 8, 'markeredgecolor': [1, 1, 0],
'fillstyle': 'none'}
else:
marker_kwargs = {
'markersize': 20, 'markeredgewidth': 5, 'markeredgecolor': [1, 1, 0],
'fillstyle': 'none'}
if save_outputs:
save_file = os.path.join(
get_user_dir('fig'),
'ae', 'D=%02i_latent-manipulation_%s_%s-crop.png' %
(hparams['n_ae_latents'], hparams['session'], crop_type))
else:
save_file = None
if plot_func.__name__ == 'plot_2d_frame_array':
"""plot generated frames and differences separately"""
# plot generated frames
if crop_type:
plot_func(ims, markers=None, marker_kwargs=marker_kwargs, save_file=save_file)
else:
plot_func(ims, markers=markers, marker_kwargs=marker_kwargs, save_file=save_file)
# plot differences
if n_latents == 2 and interp_func.__name__ == 'interpolate_2d':
# use top-most row as base frame for each column
ims_diff = [[None for _ in range(n_frames)] for _ in range(n_frames)]
for r, ims_list_y in enumerate(ims):
for c, im in enumerate(ims_list_y):
ims_diff[r][c] = 0.5 + (im - ims[0][c]) # compare across cols
plot_func(
ims_diff, markers=markers, marker_kwargs=marker_kwargs, save_file=save_file,
**kwargs)
# use left-most column as base frame for each row
ims_diff = [[None for _ in range(n_frames)] for _ in range(len(ims))]
for r, ims_list_y in enumerate(ims):
for c, im in enumerate(ims_list_y):
ims_diff[r][c] = 0.5 + (im - ims[r][0]) # compare across rows
plot_func(
ims_diff, markers=markers, marker_kwargs=marker_kwargs, save_file=save_file, **kwargs)
else:
"""plot generated frames and differences together"""
if crop_type:
raise NotImplementedError
else:
plot_func(
ims, markers=None, marker_kwargs=marker_kwargs, save_file=save_file, **kwargs)
def get_cluster_prototype_ims(dataset, n_clusters, as_numpy=False):
import pickle
from sklearn.cluster import KMeans
# ----------------------
# load AE model
# ----------------------
if dataset == 'ibl':
lab = 'ibl'
expt = 'ephys'
iters = 200
frac = '0.5'
n_ae_latents = 6
elif dataset == 'dipoppa':
lab = 'dipoppa'
expt = 'pupil'
iters = 200
frac = '0.5'
n_ae_latents = 5
elif dataset == 'musall':
lab = 'musall'
expt = 'vistrained'
iters = 200
frac = '0.5'
n_ae_latents = 7
else:
raise Exception
# set model info
version = 'best' # test-tube version; 'best' finds the version with the lowest mse
sess_idx = 0 # when using a multisession, this determines which session is used
hparams = {
'data_dir': get_user_dir('data'),
'save_dir': get_user_dir('save'),
'experiment_name': 'iters-%i_frac-%s' % (iters, frac),
'model_class': 'vae',
'model_type': 'conv',
'n_ae_latents': n_ae_latents,
'rng_seed_data': 0,
'trial_splits': '8;1;1;0',
'train_frac': float(frac),
'rng_seed_model': 0,
'conditional_encoder': False,
}
# programmatically fill out other hparams options
get_lab_example(hparams, lab, expt)
hparams['session_dir'], sess_ids = get_session_dir(hparams)
hparams['expt_dir'] = get_expt_dir(hparams)
# build model(s)
if hparams['model_class'] == 'ae':
from behavenet.models import AE as Model
elif hparams['model_class'] == 'vae':
from behavenet.models import VAE as Model
else:
raise NotImplementedError
model_ae, data_generator = get_best_model_and_data(hparams, Model, version=version)
# ----------------------
# cluster latents
# ----------------------
# load latents
sess_id = str('%s_%s_%s_%s_latents.pkl' % (
hparams['lab'], hparams['expt'], hparams['animal'], hparams['session']))
filename = os.path.join(
hparams['expt_dir'], 'version_%i' % 0, sess_id)
if not os.path.exists(filename):
print('exporting latents...', end='')
from behavenet.fitting.eval import export_latents
export_latents(data_generator, model_ae)
print('done')
latent_dict = pickle.load(open(filename, 'rb'))
# get all test latents
dtype = 'test'
latents = []
trials = []
frames = []
for trial in latent_dict['trials'][dtype]:
ls = latent_dict['latents'][trial]
n_frames_batch = ls.shape[0]
latents.append(ls)
trials.append([trial] * n_frames_batch)
frames.append(np.arange(n_frames_batch))
# print('trial: %i, frames: %i' % (trial, n_frames_batch))
latents = np.concatenate(latents)
trials = np.concatenate(trials)
frames = np.concatenate(frames)
np.random.seed(0) # to reproduce clusters
kmeans = KMeans(init='k-means++', n_clusters=n_clusters, n_init=10)
distances = kmeans.fit_transform(latents)
clust_id = kmeans.predict(latents)
# ----------------------
# get representative example from each cluster
# ----------------------
example_idxs = []
trial_idxs = []
frame_idxs = []
ims = []
# for clust in range(n_clusters):
for clust in range(n_clusters):
# get any frame in this cluster
# frame_idx = np.where(clust_ids==clust)[0][0]
# get frame that is closest to cluster center
frame_idx = np.argmin(distances[:, clust])
example_idxs.append(frame_idx)
trial_curr = trials[frame_idx]
frame_curr = frames[frame_idx]
batch = data_generator.datasets[0][trial_curr]
if as_numpy:
im = batch['images'].cpu().detach().numpy()[frame_curr, 0]
else:
im = batch['images'][None, frame_curr]
trial_idxs.append(trial_curr)
frame_idxs.append(frame_curr)
ims.append(im)
return example_idxs, trial_idxs, frame_idxs, ims
def interpolate_points(points, n_frames):
"""Scale arbitrary points"""
n_points = len(points)
if isinstance(n_frames, int):
n_frames = [n_frames] * (n_points - 1)
assert len(n_frames) == (n_points - 1)
inputs_list = []
for p in range(n_points - 1):
p0 = points[None, p]
p1 = points[None, p + 1]
p_vec = (p1 - p0) / n_frames[p]
for pn in range(n_frames[p]):
vec = p0 + pn * p_vec
inputs_list.append(vec)
return inputs_list
def interpolate_point_path(
interp_type, model, ims_0, latents_0, labels_0, points, n_frames=10, ch=0,
crop_kwargs=None, apply_inverse_transform=True):
"""Return reconstructed images created by interpolating through multiple points.
Parameters
----------
interp_type : :obj:`str`
'latents' | 'labels'
model : :obj:`behavenet.models` object
autoencoder model
ims_0 : :obj:`np.ndarray`
base images for interpolating labels, of shape (1, n_channels, y_pix, x_pix)
latents_0 : :obj:`np.ndarray`
base latents of shape (1, n_latents); these values will be used if
`interp_type='labels'`, and they will be ignored if `inter_type='latents'`
(since `points` will be used)
labels_0 : :obj:`np.ndarray`
base labels of shape (1, n_labels); these values will be used if
`interp_type='latents'`, and they will be ignored if `inter_type='labels'`
(since `points` will be used)
points : :obj:`list`
one entry for each point in path; each entry is an np.ndarray of shape (n_latents,)
n_frames : :obj:`int` or :obj:`array-like`
number of interpolation points between each point; can be an integer that is used
for all paths, or an array/list of length one less than number of points
ch : :obj:`int`, optional
specify which channel of input images to return (can only be a single value)
Returns
-------
:obj:`tuple`
- ims_list (:obj:`list` of :obj:`np.ndarray`) interpolated images
- inputs_list (:obj:`list` of :obj:`np.ndarray`) interpolated values
"""
if model.hparams.get('conditional_encoder', False):
raise NotImplementedError
n_points = len(points)
if isinstance(n_frames, int):
n_frames = [n_frames] * (n_points - 1)
assert len(n_frames) == (n_points - 1)
ims_list = []
inputs_list = []
for p in range(n_points - 1):
p0 = points[None, p]
p1 = points[None, p + 1]
p_vec = (p1 - p0) / n_frames[p]
for pn in range(n_frames[p]):
vec = p0 + pn * p_vec
if interp_type == 'latents':
if model.hparams['model_class'] == 'cond-ae' \
or model.hparams['model_class'] == 'cond-vae':
im_tmp = get_reconstruction(
model, vec, apply_inverse_transform=apply_inverse_transform,
labels=torch.from_numpy(labels_0).float().to(model.hparams['device']))
else:
im_tmp = get_reconstruction(
model, vec, apply_inverse_transform=apply_inverse_transform)
elif interp_type == 'labels':
if model.hparams['model_class'] == 'cond-ae-msp' \
or model.hparams['model_class'] == 'sss-vae':
im_tmp = get_reconstruction(
model, vec, apply_inverse_transform=True)
else: # cond-ae
im_tmp = get_reconstruction(
model, ims_0,
labels=torch.from_numpy(vec).float().to(model.hparams['device']))
else:
raise NotImplementedError
if crop_kwargs is not None:
if not isinstance(ch, int):
raise ValueError('"ch" must be an integer to use crop_kwargs')
ims_list.append(get_crop(
im_tmp[0, ch],
crop_kwargs['y_0'], crop_kwargs['y_ext'],
crop_kwargs['x_0'], crop_kwargs['x_ext']))
else:
if isinstance(ch, int):
ims_list.append(np.copy(im_tmp[0, ch]))
else:
ims_list.append(np.copy(concat(im_tmp[0])))
inputs_list.append(vec)
return ims_list, inputs_list
def make_interpolated(
ims, save_file, markers=None, text=None, text_title=None, text_color=[1, 1, 1],
frame_rate=20, scale=3, markersize=10, markeredgecolor='w', markeredgewidth=1, ax=None):
n_frames = len(ims)
y_pix, x_pix = ims[0].shape
if ax is None:
fig_width = scale / 2
fig_height = y_pix / x_pix * scale / 2
fig = plt.figure(figsize=(fig_width, fig_height), dpi=300)
ax = plt.gca()
return_ims = False
else:
return_ims = True
ax.set_xticks([])
ax.set_yticks([])
default_kwargs = {'animated': True, 'cmap': 'gray', 'vmin': 0, 'vmax': 1}
txt_kwargs = {
'fontsize': 4, 'color': text_color, 'fontname': 'monospace',
'horizontalalignment': 'left', 'verticalalignment': 'center',
'transform': ax.transAxes}
# ims is a list of lists, each row is a list of artists to draw in the current frame; here we
# are just animating one artist, the image, in each frame
ims_ani = []
for i, im in enumerate(ims):
im_tmp = []
im_tmp.append(ax.imshow(im, **default_kwargs))
# [s.set_visible(False) for s in ax.spines.values()]
if markers is not None:
im_tmp.append(ax.plot(
markers[i, 0], markers[i, 1], '.r', markersize=markersize,
markeredgecolor=markeredgecolor, markeredgewidth=markeredgewidth)[0])
if text is not | |
<filename>gen/SessionTypeParser.py
# Generated from /Users/lorenzobacchiani/Desktop/session-subtyping-tool/SessionType.g4 by ANTLR 4.9
# encoding: utf-8
from antlr4 import *
from io import StringIO
import sys
if sys.version_info[1] > 5:
from typing import TextIO
else:
from typing.io import TextIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3\21")
buf.write("\u00e3\4\2\t\2\4\3\t\3\4\4\t\4\3\2\3\2\3\2\3\2\3\3\3\3")
buf.write("\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\7\3\32\n")
buf.write("\3\f\3\16\3\35\13\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3")
buf.write("\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\7\3/\n\3\f\3\16\3\62\13")
buf.write("\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3")
buf.write("\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3")
buf.write("\3\3\3\3\3\3\3\3\3\3\3\7\3T\n\3\f\3\16\3W\13\3\3\3\3\3")
buf.write("\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3")
buf.write("\3\3\3\3\3\3\3\3\3\3\3\3\3\7\3o\n\3\f\3\16\3r\13\3\3\3")
buf.write("\3\3\3\3\5\3w\n\3\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4")
buf.write("\3\4\3\4\3\4\3\4\7\4\u0086\n\4\f\4\16\4\u0089\13\4\3\4")
buf.write("\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3")
buf.write("\4\3\4\7\4\u009b\n\4\f\4\16\4\u009e\13\4\3\4\3\4\3\4\3")
buf.write("\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4")
buf.write("\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\7")
buf.write("\4\u00be\n\4\f\4\16\4\u00c1\13\4\3\4\3\4\3\4\3\4\3\4\3")
buf.write("\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4")
buf.write("\3\4\3\4\3\4\7\4\u00d9\n\4\f\4\16\4\u00dc\13\4\3\4\3\4")
buf.write("\3\4\5\4\u00e1\n\4\3\4\2\2\5\2\4\6\2\2\2\u00f6\2\b\3\2")
buf.write("\2\2\4v\3\2\2\2\6\u00e0\3\2\2\2\b\t\5\4\3\2\t\n\b\2\1")
buf.write("\2\n\13\7\2\2\3\13\3\3\2\2\2\f\r\7\3\2\2\r\16\7\4\2\2")
buf.write("\16\17\b\3\1\2\17\20\7\20\2\2\20\21\7\b\2\2\21\22\5\4")
buf.write("\3\2\22\33\b\3\1\2\23\24\7\t\2\2\24\25\7\20\2\2\25\26")
buf.write("\7\b\2\2\26\27\5\4\3\2\27\30\b\3\1\2\30\32\3\2\2\2\31")
buf.write("\23\3\2\2\2\32\35\3\2\2\2\33\31\3\2\2\2\33\34\3\2\2\2")
buf.write("\34\36\3\2\2\2\35\33\3\2\2\2\36\37\b\3\1\2\37 \7\5\2\2")
buf.write(" w\3\2\2\2!\"\7\13\2\2\"#\7\4\2\2#$\b\3\1\2$%\7\20\2\2")
buf.write("%&\7\b\2\2&\'\5\4\3\2\'\60\b\3\1\2()\7\t\2\2)*\7\20\2")
buf.write("\2*+\7\b\2\2+,\5\4\3\2,-\b\3\1\2-/\3\2\2\2.(\3\2\2\2/")
buf.write("\62\3\2\2\2\60.\3\2\2\2\60\61\3\2\2\2\61\63\3\2\2\2\62")
buf.write("\60\3\2\2\2\63\64\b\3\1\2\64\65\7\5\2\2\65w\3\2\2\2\66")
buf.write("\67\7\f\2\2\678\7\20\2\289\7\n\2\29:\5\6\4\2:;\b\3\1\2")
buf.write(";w\3\2\2\2<=\7\20\2\2=w\b\3\1\2>?\7\r\2\2?w\b\3\1\2@A")
buf.write("\7\16\2\2AB\7\20\2\2BC\7\b\2\2CD\5\4\3\2DE\b\3\1\2Ew\3")
buf.write("\2\2\2FG\7\6\2\2GH\7\16\2\2HI\7\20\2\2IJ\7\b\2\2JK\5\4")
buf.write("\3\2KU\b\3\1\2LM\7\t\2\2MN\7\16\2\2NO\7\20\2\2OP\7\b\2")
buf.write("\2PQ\5\4\3\2QR\b\3\1\2RT\3\2\2\2SL\3\2\2\2TW\3\2\2\2U")
buf.write("S\3\2\2\2UV\3\2\2\2VX\3\2\2\2WU\3\2\2\2XY\7\7\2\2YZ\b")
buf.write("\3\1\2Zw\3\2\2\2[\\\7\17\2\2\\]\7\20\2\2]^\7\b\2\2^_\5")
buf.write("\4\3\2_`\b\3\1\2`w\3\2\2\2ab\7\6\2\2bc\7\17\2\2cd\7\20")
buf.write("\2\2de\7\b\2\2ef\5\4\3\2fp\b\3\1\2gh\7\t\2\2hi\7\17\2")
buf.write("\2ij\7\20\2\2jk\7\b\2\2kl\5\4\3\2lm\b\3\1\2mo\3\2\2\2")
buf.write("ng\3\2\2\2or\3\2\2\2pn\3\2\2\2pq\3\2\2\2qs\3\2\2\2rp\3")
buf.write("\2\2\2st\7\7\2\2tu\b\3\1\2uw\3\2\2\2v\f\3\2\2\2v!\3\2")
buf.write("\2\2v\66\3\2\2\2v<\3\2\2\2v>\3\2\2\2v@\3\2\2\2vF\3\2\2")
buf.write("\2v[\3\2\2\2va\3\2\2\2w\5\3\2\2\2xy\7\3\2\2yz\7\4\2\2")
buf.write("z{\b\4\1\2{|\7\20\2\2|}\7\b\2\2}~\5\4\3\2~\u0087\b\4\1")
buf.write("\2\177\u0080\7\t\2\2\u0080\u0081\7\20\2\2\u0081\u0082")
buf.write("\7\b\2\2\u0082\u0083\5\4\3\2\u0083\u0084\b\4\1\2\u0084")
buf.write("\u0086\3\2\2\2\u0085\177\3\2\2\2\u0086\u0089\3\2\2\2\u0087")
buf.write("\u0085\3\2\2\2\u0087\u0088\3\2\2\2\u0088\u008a\3\2\2\2")
buf.write("\u0089\u0087\3\2\2\2\u008a\u008b\b\4\1\2\u008b\u008c\7")
buf.write("\5\2\2\u008c\u00e1\3\2\2\2\u008d\u008e\7\13\2\2\u008e")
buf.write("\u008f\7\4\2\2\u008f\u0090\b\4\1\2\u0090\u0091\7\20\2")
buf.write("\2\u0091\u0092\7\b\2\2\u0092\u0093\5\4\3\2\u0093\u009c")
buf.write("\b\4\1\2\u0094\u0095\7\t\2\2\u0095\u0096\7\20\2\2\u0096")
buf.write("\u0097\7\b\2\2\u0097\u0098\5\4\3\2\u0098\u0099\b\4\1\2")
buf.write("\u0099\u009b\3\2\2\2\u009a\u0094\3\2\2\2\u009b\u009e\3")
buf.write("\2\2\2\u009c\u009a\3\2\2\2\u009c\u009d\3\2\2\2\u009d\u009f")
buf.write("\3\2\2\2\u009e\u009c\3\2\2\2\u009f\u00a0\b\4\1\2\u00a0")
buf.write("\u00a1\7\5\2\2\u00a1\u00e1\3\2\2\2\u00a2\u00a3\7\f\2\2")
buf.write("\u00a3\u00a4\7\20\2\2\u00a4\u00a5\7\n\2\2\u00a5\u00a6")
buf.write("\5\6\4\2\u00a6\u00a7\b\4\1\2\u00a7\u00e1\3\2\2\2\u00a8")
buf.write("\u00a9\7\r\2\2\u00a9\u00e1\b\4\1\2\u00aa\u00ab\7\16\2")
buf.write("\2\u00ab\u00ac\7\20\2\2\u00ac\u00ad\7\b\2\2\u00ad\u00ae")
buf.write("\5\4\3\2\u00ae\u00af\b\4\1\2\u00af\u00e1\3\2\2\2\u00b0")
buf.write("\u00b1\7\6\2\2\u00b1\u00b2\7\16\2\2\u00b2\u00b3\7\20\2")
buf.write("\2\u00b3\u00b4\7\b\2\2\u00b4\u00b5\5\4\3\2\u00b5\u00bf")
buf.write("\b\4\1\2\u00b6\u00b7\7\t\2\2\u00b7\u00b8\7\16\2\2\u00b8")
buf.write("\u00b9\7\20\2\2\u00b9\u00ba\7\b\2\2\u00ba\u00bb\5\4\3")
buf.write("\2\u00bb\u00bc\b\4\1\2\u00bc\u00be\3\2\2\2\u00bd\u00b6")
buf.write("\3\2\2\2\u00be\u00c1\3\2\2\2\u00bf\u00bd\3\2\2\2\u00bf")
buf.write("\u00c0\3\2\2\2\u00c0\u00c2\3\2\2\2\u00c1\u00bf\3\2\2\2")
buf.write("\u00c2\u00c3\7\7\2\2\u00c3\u00c4\b\4\1\2\u00c4\u00e1\3")
buf.write("\2\2\2\u00c5\u00c6\7\17\2\2\u00c6\u00c7\7\20\2\2\u00c7")
buf.write("\u00c8\7\b\2\2\u00c8\u00c9\5\4\3\2\u00c9\u00ca\b\4\1\2")
buf.write("\u00ca\u00e1\3\2\2\2\u00cb\u00cc\7\6\2\2\u00cc\u00cd\7")
buf.write("\17\2\2\u00cd\u00ce\7\20\2\2\u00ce\u00cf\7\b\2\2\u00cf")
buf.write("\u00d0\5\4\3\2\u00d0\u00da\b\4\1\2\u00d1\u00d2\7\t\2\2")
buf.write("\u00d2\u00d3\7\17\2\2\u00d3\u00d4\7\20\2\2\u00d4\u00d5")
buf.write("\7\b\2\2\u00d5\u00d6\5\4\3\2\u00d6\u00d7\b\4\1\2\u00d7")
buf.write("\u00d9\3\2\2\2\u00d8\u00d1\3\2\2\2\u00d9\u00dc\3\2\2\2")
buf.write("\u00da\u00d8\3\2\2\2\u00da\u00db\3\2\2\2\u00db\u00dd\3")
buf.write("\2\2\2\u00dc\u00da\3\2\2\2\u00dd\u00de\7\7\2\2\u00de\u00df")
buf.write("\b\4\1\2\u00df\u00e1\3\2\2\2\u00e0x\3\2\2\2\u00e0\u008d")
buf.write("\3\2\2\2\u00e0\u00a2\3\2\2\2\u00e0\u00a8\3\2\2\2\u00e0")
buf.write("\u00aa\3\2\2\2\u00e0\u00b0\3\2\2\2\u00e0\u00c5\3\2\2\2")
buf.write("\u00e0\u00cb\3\2\2\2\u00e1\7\3\2\2\2\f\33\60Upv\u0087")
buf.write("\u009c\u00bf\u00da\u00e0")
return buf.getvalue()
class SessionTypeParser ( Parser ):
grammarFileName = "SessionType.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ "<INVALID>", "'+'", "'{'", "'}'", "'['", "']'", "';'",
"','", "'.'", "'&'", "'rec'", "'end'", "'!'", "'?'" ]
symbolicNames = [ "<INVALID>", "PLUS", "CLPAR", "CRPAR", "SLPAR", "SRPAR",
"SEMIC", "COMMA", "DOT", "AND", "REC", "END", "OUT",
"IN", "ID", "WHITESP" ]
RULE_start = 0
RULE_stype = 1
RULE_guarded = 2
ruleNames = [ "start", "stype", "guarded" ]
EOF = Token.EOF
PLUS=1
CLPAR=2
CRPAR=3
SLPAR=4
SRPAR=5
SEMIC=6
COMMA=7
DOT=8
AND=9
REC=10
END=11
OUT=12
IN=13
ID=14
WHITESP=15
def __init__(self, input:TokenStream, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.9")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class StartContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.type = None
self.s = None # StypeContext
def EOF(self):
return self.getToken(SessionTypeParser.EOF, 0)
def stype(self):
return self.getTypedRuleContext(SessionTypeParser.StypeContext,0)
def getRuleIndex(self):
return SessionTypeParser.RULE_start
def start(self):
localctx = SessionTypeParser.StartContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_start)
try:
self.enterOuterAlt(localctx, 1)
self.state = 6
localctx.s = self.stype()
localctx.type = localctx.s.type if localctx.s.type is not None else ""
self.state = 8
self.match(SessionTypeParser.EOF)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StypeContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.type = None
self.i = None # Token
self.s = None # Token
self.st = None # StypeContext
self.c = None # Token
self.i2 = None # Token
self.s2 = None # Token
self.st2 = None # StypeContext
self.r = None # Token
self.d = None # Token
self.g = None # GuardedContext
self.e = None # Token
self.o = None # Token
self.sl = None # Token
self.o2 = None # Token
self.sr = None # Token
self.n = None # Token
self.n2 = None # Token
def PLUS(self):
return self.getToken(SessionTypeParser.PLUS, 0)
def CLPAR(self):
return self.getToken(SessionTypeParser.CLPAR, 0)
def CRPAR(self):
return self.getToken(SessionTypeParser.CRPAR, 0)
def ID(self, i:int=None):
if i is None:
return self.getTokens(SessionTypeParser.ID)
else:
return self.getToken(SessionTypeParser.ID, i)
def SEMIC(self, i:int=None):
if i is None:
return self.getTokens(SessionTypeParser.SEMIC)
else:
return self.getToken(SessionTypeParser.SEMIC, i)
def stype(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SessionTypeParser.StypeContext)
else:
return self.getTypedRuleContext(SessionTypeParser.StypeContext,i)
def COMMA(self, i:int=None):
if i is None:
return self.getTokens(SessionTypeParser.COMMA)
else:
return self.getToken(SessionTypeParser.COMMA, i)
def AND(self):
return self.getToken(SessionTypeParser.AND, 0)
def REC(self):
return self.getToken(SessionTypeParser.REC, 0)
def DOT(self):
return self.getToken(SessionTypeParser.DOT, 0)
def guarded(self):
return self.getTypedRuleContext(SessionTypeParser.GuardedContext,0)
def END(self):
return self.getToken(SessionTypeParser.END, 0)
def OUT(self, i:int=None):
if i is None:
return self.getTokens(SessionTypeParser.OUT)
else:
return self.getToken(SessionTypeParser.OUT, i)
def SLPAR(self):
return self.getToken(SessionTypeParser.SLPAR, 0)
def SRPAR(self):
return self.getToken(SessionTypeParser.SRPAR, 0)
def IN(self, i:int=None):
if i is None:
return self.getTokens(SessionTypeParser.IN)
else:
return self.getToken(SessionTypeParser.IN, i)
def getRuleIndex(self):
return SessionTypeParser.RULE_stype
def stype(self):
localctx = SessionTypeParser.StypeContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_stype)
self._la = 0 # Token type
try:
self.state = 116
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,4,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 10
self.match(SessionTypeParser.PLUS)
self.state = 11
self.match(SessionTypeParser.CLPAR)
localctx.type = "["
self.state = 13
localctx.i = self.match(SessionTypeParser.ID)
self.state = 14
localctx.s = self.match(SessionTypeParser.SEMIC)
self.state = 15
localctx.st = self.stype()
localctx.type += "!" + (None if localctx.i is None else localctx.i.text) + (None if localctx.s is None else localctx.s.text) + (localctx.st.type if not localctx.st.type == None else "")
self.state = 25
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SessionTypeParser.COMMA:
self.state = 17
localctx.c = self.match(SessionTypeParser.COMMA)
self.state = 18
localctx.i2 = self.match(SessionTypeParser.ID)
self.state = 19
localctx.s2 = self.match(SessionTypeParser.SEMIC)
self.state = 20
localctx.st2 = self.stype()
localctx.type += (None if localctx.c is None else localctx.c.text) + "!" + (None if localctx.i2 is None else localctx.i2.text) + (None if localctx.s2 is None else localctx.s2.text) + (localctx.st2.type if not localctx.st2.type == None else "")
self.state = 27
self._errHandler.sync(self)
_la = self._input.LA(1)
localctx.type += "]"
self.state = 29
self.match(SessionTypeParser.CRPAR)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 31
self.match(SessionTypeParser.AND)
self.state = 32
self.match(SessionTypeParser.CLPAR)
localctx.type = "["
self.state = 34
localctx.i = self.match(SessionTypeParser.ID)
self.state = 35
localctx.s = self.match(SessionTypeParser.SEMIC)
self.state = 36
localctx.st = self.stype()
localctx.type += "?" + (None if localctx.i is None else localctx.i.text) + (None if localctx.s is None else localctx.s.text) + (localctx.st.type if not localctx.st.type == None else "")
self.state = 46
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SessionTypeParser.COMMA:
self.state = 38
localctx.c = self.match(SessionTypeParser.COMMA)
self.state = 39
localctx.i2 = self.match(SessionTypeParser.ID)
self.state = 40
localctx.s2 = self.match(SessionTypeParser.SEMIC)
self.state = 41
localctx.st2 = self.stype()
localctx.type += (None if localctx.c is None else localctx.c.text) + "?" + (None if localctx.i2 is None else localctx.i2.text) + (None if localctx.s2 is None else localctx.s2.text) + (localctx.st2.type if not localctx.st2.type == None else "")
self.state = 48
self._errHandler.sync(self)
_la = self._input.LA(1)
localctx.type += "]"
self.state = 50
self.match(SessionTypeParser.CRPAR)
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 52
localctx.r = self.match(SessionTypeParser.REC)
self.state = 53
localctx.i = self.match(SessionTypeParser.ID)
self.state = 54
localctx.d = self.match(SessionTypeParser.DOT)
self.state = 55
localctx.g = self.guarded()
localctx.type = (None if localctx.r is None else localctx.r.text) + " V" + (None if localctx.i is None else localctx.i.text) + " " + (None if localctx.d is None else localctx.d.text) + (localctx.g.type if not localctx.g.type == None else "")
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 58
localctx.i = self.match(SessionTypeParser.ID)
localctx.type = "V" + (None if localctx.i is None else localctx.i.text)
pass
elif la_ == 5:
self.enterOuterAlt(localctx, 5)
self.state = 60
localctx.e = self.match(SessionTypeParser.END)
localctx.type = (None if localctx.e is None else localctx.e.text)
pass
elif la_ == 6:
self.enterOuterAlt(localctx, 6)
self.state = 62
localctx.o = self.match(SessionTypeParser.OUT)
self.state = 63
localctx.i = self.match(SessionTypeParser.ID)
self.state = 64
localctx.s = self.match(SessionTypeParser.SEMIC)
self.state = 65
localctx.st = self.stype()
localctx.type = (None if localctx.o is None else localctx.o.text) + (None if localctx.i is None else localctx.i.text) + (None if localctx.s is None else localctx.s.text) + (localctx.st.type if not localctx.st.type == None else "")
pass
elif la_ == 7:
self.enterOuterAlt(localctx, 7)
self.state = 68
localctx.sl = self.match(SessionTypeParser.SLPAR)
self.state = 69
localctx.o = self.match(SessionTypeParser.OUT)
self.state = 70
localctx.i = self.match(SessionTypeParser.ID)
self.state = 71
localctx.s = self.match(SessionTypeParser.SEMIC)
self.state = 72
localctx.st = self.stype()
localctx.type = (None if localctx.sl is None else localctx.sl.text) + (None if localctx.o is None else localctx.o.text) + (None if localctx.i is None else localctx.i.text) + (None if localctx.s is None else localctx.s.text) + (localctx.st.type if not localctx.st.type == None else "")
self.state = 83
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SessionTypeParser.COMMA:
self.state = 74
localctx.c = self.match(SessionTypeParser.COMMA)
self.state = 75
localctx.o2 = self.match(SessionTypeParser.OUT)
self.state = 76
localctx.i2 = self.match(SessionTypeParser.ID)
self.state = 77
localctx.s2 = self.match(SessionTypeParser.SEMIC)
self.state = 78
localctx.st2 = self.stype()
localctx.type += (None if localctx.c is None else localctx.c.text) + (None if localctx.o2 is None else localctx.o2.text) + (None if localctx.i2 is None else localctx.i2.text) + (None if localctx.s2 is None else localctx.s2.text) + (localctx.st2.type if not localctx.st2.type == None else "")
self.state = 85
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 86
localctx.sr = self.match(SessionTypeParser.SRPAR)
localctx.type += (None if localctx.sr is None else localctx.sr.text)
pass
elif la_ == 8:
self.enterOuterAlt(localctx, 8)
self.state = 89
localctx.n = self.match(SessionTypeParser.IN)
self.state = 90
localctx.i = self.match(SessionTypeParser.ID)
self.state = 91
localctx.s = self.match(SessionTypeParser.SEMIC)
self.state = 92
localctx.st = self.stype()
localctx.type = (None if localctx.n is None else localctx.n.text) + (None if | |
<gh_stars>0
# Translator of mathematical expressions into code in Assembler language.
from pythonds.basic.stack import Stack
from pythonds.trees.binaryTree import BinaryTree
from graphviz import Digraph
from sys import argv
from re import match
OPERATORS = frozenset("+-*/") | {'mul', 'add', 'sub', 'div'}
DIGITS = frozenset("0123456789")
ALPHAS = frozenset("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_")
ASSIGNMENT = frozenset("=")
COMMUTATIVE = frozenset("+*") | {'add', 'mul'}
NONCOMMUTATIVE = frozenset("-/") | {'sub', 'div'}
class BadExpressionError(BaseException):
pass
class SimpleBinaryTree:
""" Implementation of a simple BinaryTree class. """
def __init__(self, head, left=None, right=None):
self.head = head
self.left = left
self.right = right
def __str__(self):
return str(self.head)
class MathExprParser:
@staticmethod
def tokenize(math_expr: str) -> list:
"""
Split a string into tokens.
Tokens: operators (['+', '-', '*', '/']), '(', ')', unsigned numbers,
variables (it can start with a latin symbol and end with a latin symbol
or a number).
:param math_expr: input math expression
:return: list of tokens
"""
tokens = list()
idx = 0
math_expr = math_expr.replace(' ', '')
while idx < len(math_expr):
symbol = math_expr[idx]
if symbol in OPERATORS or symbol in ('(', '=', ')'):
tokens.append(symbol)
elif symbol in DIGITS:
number = str()
while math_expr[idx:idx + 1] in DIGITS:
number += math_expr[idx]
idx += 1
tokens.append(number)
continue
elif symbol in ALPHAS:
variable = str()
while math_expr[idx:idx + 1] in ALPHAS | DIGITS:
variable += math_expr[idx]
idx += 1
tokens.append(variable)
continue
else:
err_string = "Illegal symbol in the string. ({} in pos {})."
raise BadExpressionError(err_string.format(symbol, idx))
idx += 1
return tokens
@staticmethod
def build_bad_parse_tree_with_parentheses(math_expr: str) -> BinaryTree:
"""
Building a tree according to the incoming mathematical expression with
the operators "+, -, *, /" and with parentheses.
All expressions should start with "<var_name> = ...", where <var_name>
is the name of the variable.
Since this is a binary tree, each expression must be placed in
parentheses.
:param math_expr: input math expression
:return: binary tree
"""
tokens = MathExprParser.tokenize(math_expr)
stack = Stack()
tree = BinaryTree('')
tree_head = tree
tree.setRootVal(tokens[1])
tree.insertLeft(tokens[0])
tree.insertRight('')
tree = tree.rightChild
stack.push(tree)
for token in tokens[2:]:
if token == '(':
tree.insertLeft('')
stack.push(tree)
tree = tree.getLeftChild()
elif token not in OPERATORS and token != ')':
tree.setRootVal(token)
parent = stack.pop()
tree = parent
elif token in OPERATORS:
tree.setRootVal(token)
tree.insertRight('')
stack.push(tree)
tree = tree.getRightChild()
elif token == ')':
tree = stack.pop()
else:
err_string = f"The character {token} can not be recognized."
raise BadExpressionError(err_string)
return tree_head
@staticmethod
def build_parse_tree(math_expr: str) -> SimpleBinaryTree:
"""
Construction of left binary tree according to left given mathematical
expression.
:param math_expr: input math expression
:return: binary tree
"""
tokens = MathExprParser.tokenize(math_expr)
tokens.append('end')
def get_token(token_list: list, expected_symbol: str) -> bool:
condition = token_list[0] == expected_symbol
if condition:
del token_list[0]
return condition
def get_sum(token_list: list) -> SimpleBinaryTree:
left = get_sub(token_list)
if get_token(token_list, '+'):
right = get_sum(token_list)
return SimpleBinaryTree('+', left, right)
else:
return left
def get_sub(token_list: list) -> SimpleBinaryTree:
left = get_product(token_list)
if get_token(token_list, '-'):
right = get_sub(token_list)
return SimpleBinaryTree('-', left, right)
else:
return left
def get_product(token_list: list) -> SimpleBinaryTree:
left = get_div(token_list)
if get_token(token_list, '*'):
right = get_product(token_list)
return SimpleBinaryTree('*', left, right)
else:
return left
def get_div(token_list: list) -> SimpleBinaryTree:
left = get_node(token_list)
if get_token(token_list, '/'):
right = get_div(token_list)
return SimpleBinaryTree('/', left, right)
else:
return left
def get_node(token_list: list) -> SimpleBinaryTree:
if get_token(token_list, '('):
node = get_sum(token_list)
if not get_token(token_list, ')'):
raise BadExpressionError('missing parentheses')
return node
else:
node = token_list[0]
token_list[:] = token_list[1:]
return SimpleBinaryTree(node)
if tokens[1] == '=':
simple_binary_tree = SimpleBinaryTree('=')
simple_binary_tree.left = SimpleBinaryTree(tokens[0])
simple_binary_tree.right = get_sum(tokens[2:])
else:
simple_binary_tree = get_sum(tokens)
return simple_binary_tree
@staticmethod
def print_tree(tree: (BinaryTree, SimpleBinaryTree),
level: int = 1) -> None:
"""
Output the tree to the screen.
Don't change the level indentation parameter 'level'.
:param tree: top of input binary tree
:param level: indentation parameter
:return:
"""
if tree:
if isinstance(tree, BinaryTree):
MathExprParser.print_tree(tree.getRightChild(), level + 1)
for i in range(level):
print(5 * " ", end='', sep='')
print(tree.getRootVal())
MathExprParser.print_tree(tree.getLeftChild(), level + 1)
elif isinstance(tree, SimpleBinaryTree):
MathExprParser.print_tree(tree.right, level + 1)
for i in range(level):
print(5 * " ", end='', sep='')
print(tree.head)
MathExprParser.print_tree(tree.left, level + 1)
else:
raise TypeError
@staticmethod
def print_tree_postorder(tree: (BinaryTree, SimpleBinaryTree)) -> None:
if tree:
if isinstance(tree, BinaryTree):
MathExprParser.print_tree_postorder(tree.getRightChild())
MathExprParser.print_tree_postorder(tree.getLeftChild())
print(tree.getRootVal(), end=' ', sep='')
elif isinstance(tree, SimpleBinaryTree):
MathExprParser.print_tree_postorder(tree.right)
MathExprParser.print_tree_postorder(tree.left)
print(tree.head, end=' ', sep='')
@staticmethod
def draw_tree(tree: SimpleBinaryTree, math_expr: str) -> None:
"""
Drawing a tree using the GraphViz tool.
:param math_expr:
:param tree: top of input binary tree
:return: None
"""
graph = Digraph(name='Tree', format='pdf')
def generator(index: int = 0) -> str:
"""
Generation of unique names of vertices.
:param index: initial index
:return: a string with a unique index
"""
while True:
index += 1
yield f"V{index}"
gen = generator()
def connecting_vertices(
g: Digraph,
t: SimpleBinaryTree,
top: str = None) -> None:
"""
The connection of the vertices of the graph with each other.
:param top: additional parameter to connect head vertices in graph
with each other.
Example: tree.left.head is connected to tree.head when a
recursive call of the left or right child occurs.
:param g: input Digraph
:param t: input simple BinaryTree
:return:
"""
nonlocal gen
if top is None:
head = 'Head ' + next(gen)
g.node(head, label=t.head)
else:
head = top
left = 'Left ' + next(gen)
right = 'Right ' + next(gen)
if t.left:
g.node(left, label=t.left.head)
g.edge(head, left)
connecting_vertices(g, t.left, left)
if t.right:
g.node(right, label=t.right.head)
g.edge(head, right)
connecting_vertices(g, t.right, right)
connecting_vertices(graph, tree)
graph.attr(label="\n\n" + math_expr, fontsize='16')
graph.node_attr.update(shape='ellipse')
# more shapes: https://graphviz.gitlab.io/_pages/doc/info/shapes.html
# more examples: http://graphviz.readthedocs.io/en/stable/examples.html
graph.render('tree', directory='output-files', view=False)
code = ''
def generate_code(tree: SimpleBinaryTree) -> str:
"""
Generating code in the Assembler language for the input binary tree.
:type tree: input simple BinaryTree
:return: code of mathematical expression in the language of Assembler
"""
global code
def is_constant(string_obj: str) -> bool:
"""
Check for whether the input object is a number or whether it
corresponds to a variable declaration.
:param string_obj: input string
:return: True if 'obj' is unsigned number or variable
"""
matching1 = match(r'\d+', string_obj)
matching2 = match(r'[A-z_][A-z0-9_]*', string_obj)
return matching1 or matching2
def convert_operator(op: str) -> str:
"""
Convert incoming operator to its string representation.
:param op: input operator
:return: string representation of the incoming operator
"""
op_dict = {
'+': 'add',
'-': 'sub',
'*': 'mul',
'/': 'div'
}
return op_dict[op]
if tree.head in ASSIGNMENT:
"""
Variable assignment
tree.head - assignment operator
tree.left - variable
tree.right - subtree or a constant value or a unsigned integer
"""
var_name = tree.left.head
right_subtree = tree.right
right_const = tree.right.head
if is_constant(right_const):
""" tree.right - a constant value or a unsigned integer """
code += f'mov EAX, {right_const}\n'
elif isinstance(right_subtree, SimpleBinaryTree):
""" tree.right - subtree """
code += generate_code(right_subtree)
code += f'mov {var_name}, EAX\n'
elif tree.head in OPERATORS:
""" tree.head - binary operator """
operator = convert_operator(tree.head)
if is_constant(tree.left.head) and is_constant(tree.right.head):
"""
tree.left - a constant value or a unsigned integer
tree.right - a constant value or a unsigned integer
"""
const_left = tree.left.head
const_right = tree.right.head
code += f'mov EAX, {const_left}\n'
code += f'{operator} EAX, {const_right}\n'
elif is_constant(tree.right.head) and \
isinstance(tree.left.head, SimpleBinaryTree):
"""
tree.left - subtree
tree.right - a constant value or a unsigned integer
"""
left_subtree = tree.left
const_right = tree.right.head
code += generate_code(left_subtree)
code += f'{operator} EAX, {const_right}'
elif operator in COMMUTATIVE and \
isinstance(tree.right, SimpleBinaryTree) and \
is_constant(tree.left.head):
"""
tree.head - commutative operator
tree.left - a constant value or a unsigned integer
tree.right - subtree
"""
right_subtree = tree.right
const_left = tree.left.head
generate_code(right_subtree)
code += f'{operator} EAX, {const_left}\n'
elif operator in NONCOMMUTATIVE and \
isinstance(tree.right, SimpleBinaryTree) and \
is_constant(tree.left.head):
"""
tree.head - non-commutative operator
tree.left - a constant value or a unsigned integer
tree.right - subtree
"""
const_left = tree.left.head
right_subtree = tree.right
generate_code(right_subtree)
code += f'mov EDX, {const_left}\n'
code += 'xchg EAX, EDX\n'
code += f'{operator} EAX, EDX\n'
elif isinstance(tree.left, SimpleBinaryTree) and \
isinstance(tree.right, SimpleBinaryTree):
generate_code(tree.right)
code += 'push EAX\n'
generate_code(tree.left)
code += 'pop EDX\n'
code += f'{operator} EAX, EDX\n'
return code
def write_code_to_file(math_expr: str, g_code: str, filename: str) -> None:
"""
Writing code to file.
:param math_expr: input math expression
:param g_code: generated code in Assembler | |
<filename>dlkit/abstract_osid/mapping/managers.py
"""Implementations of mapping abstract base class managers."""
# pylint: disable=invalid-name
# Method names comply with OSID specification.
# pylint: disable=no-init
# Abstract classes do not define __init__.
# pylint: disable=too-few-public-methods
# Some interfaces are specified as 'markers' and include no methods.
# pylint: disable=too-many-public-methods
# Number of methods are defined in specification
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
# pylint: disable=too-many-arguments
# Argument signature defined in specification.
# pylint: disable=duplicate-code
# All apparent duplicates have been inspected. They aren't.
import abc
class MappingProfile:
"""The mapping profile describes the interoperability among mapping services."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def supports_visible_federation(self):
"""Tests if any map federation is exposed.
Federation is exposed when a specific map may be identified,
selected and used to create a lookup or admin session.
Federation is not exposed when a set of maps appears as a single
map.
:return: ``true`` if visible federation is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_location_lookup(self):
"""Tests if looking up locations is supported.
:return: ``true`` if location lookup is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_location_query(self):
"""Tests if querying locations is supported.
:return: ``true`` if location query is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_location_search(self):
"""Tests if searching locations is supported.
:return: ``true`` if location search is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_location_admin(self):
"""Tests if locationadministrative service is supported.
:return: ``true`` if location administration is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_location_notification(self):
"""Tests if a locationnotification service is supported.
:return: ``true`` if location notification is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_location_hierarchy(self):
"""Tests if a locationhierarchy service is supported.
:return: ``true`` if location hierarchy is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_location_hierarchy_design(self):
"""Tests if a location hierarchy design service is supported.
:return: ``true`` if location hierarchy design is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_location_map(self):
"""Tests if a location map lookup service is supported.
:return: ``true`` if a location map lookup service is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_location_map_assignment(self):
"""Tests if a location map assignment service is supported.
:return: ``true`` if a location to map assignment service is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_location_smart_map(self):
"""Tests if a location smart map service is supported.
:return: ``true`` if a location smart map service is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_location_adjacency(self):
"""Tests if a location adjacency service is supported.
:return: ``true`` if a location adjacency service is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_location_spatial(self):
"""Tests if a location spatial service is supported.
:return: ``true`` if a location spatial service is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_resource_location(self):
"""Tests if a resource location service is supported.
:return: ``true`` if a resource location service is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_resource_location_update(self):
"""Tests if a resource location update service is supported.
:return: ``true`` if a resource location update service is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_resource_location_notification(self):
"""Tests if a resource location notification service is supported.
:return: ``true`` if a resource location notification service is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_resource_position_notification(self):
"""Tests if a resource position notification service is supported.
:return: ``true`` if a resource position notification service is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_my_location(self):
"""Tests if a location service is supported for the current agent.
:return: ``true`` if my location is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_map_lookup(self):
"""Tests if looking up maps is supported.
:return: ``true`` if map lookup is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_map_query(self):
"""Tests if querying maps is supported.
:return: ``true`` if a map query service is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_map_search(self):
"""Tests if searching maps is supported.
:return: ``true`` if map search is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_map_admin(self):
"""Tests if map administrative service is supported.
:return: ``true`` if map administration is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_map_notification(self):
"""Tests if a mapnotification service is supported.
:return: ``true`` if map notification is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_map_hierarchy(self):
"""Tests for the availability of a map hierarchy traversal service.
:return: ``true`` if map hierarchy traversal is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented in all
providers.*
"""
return # boolean
@abc.abstractmethod
def supports_map_hierarchy_design(self):
"""Tests for the availability of a map hierarchy design service.
:return: ``true`` if map hierarchy design is available, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_mapping_batch(self):
"""Tests if the mapping batch service is supported.
:return: ``true`` if maping batch service is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_mapping_path(self):
"""Tests if the mapping path service is supported.
:return: ``true`` if maping path service is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_mapping_route(self):
"""Tests if the mapping route service is supported.
:return: ``true`` if maping route service is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_location_record_types(self):
"""Gets the supported ``Location`` record types.
:return: a list containing the supported ``Location`` record types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
location_record_types = abc.abstractproperty(fget=get_location_record_types)
@abc.abstractmethod
def supports_location_record_type(self, location_record_type):
"""Tests if the given ``Location`` record type is supported.
:param location_record_type: a ``Type`` indicating a ``Location`` record type
:type location_record_type: ``osid.type.Type``
:return: ``true`` if the given record type is supported, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``location_record_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_location_search_record_types(self):
"""Gets the supported ``Location`` search types.
:return: a list containing the supported ``Location`` search types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
location_search_record_types = abc.abstractproperty(fget=get_location_search_record_types)
@abc.abstractmethod
def supports_location_search_record_type(self, location_search_record_type):
"""Tests if the given ``Location`` search type is supported.
:param location_search_record_type: a ``Type`` indicating a ``Location`` search type
:type location_search_record_type: ``osid.type.Type``
:return: ``true`` if the given ``Type`` is supported, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``location_search_record_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # | |
<filename>rstoolbox/utils/tools.py
# -*- coding: utf-8 -*-
"""
.. codeauthor:: <NAME> <<EMAIL>>
.. affiliation::
Laboratory of Protein Design and Immunoengineering <lpdi.epfl.ch>
<NAME> <<EMAIL>>
.. func:: format_Ipython
.. func:: use_qgrid
.. func:: add_column
.. func:: split_values
.. func:: make_rosetta_app_path
.. func:: execute_process
.. func:: report
.. func:: concat_fragments
"""
# Standard Libraries
import os
import copy
import textwrap
import subprocess # nosec
import shlex
import re
# External Libraries
import pandas as pd
from six import string_types
# This Library
__all__ = ['format_Ipython', 'highlight', 'use_qgrid', 'add_column', 'split_values', 'make_rosetta_app_path',
'execute_process', 'report', 'concat_fragments', 'split_dataframe_rows']
def format_Ipython():
"""Ensure ``monospace`` representation of :class:`~pandas.DataFrame`
in **Jupyter Notebooks**.
Just need to call it after importing the library.
.. note::
In order for this function to work, it is important that is the last
one in the Jupyter cell to be called.
:raises:
:ImportError: If [Ipython library](https://ipython.org/) is not present.
"""
pd.set_option("display.max_columns", None)
pd.set_option("display.max_rows", None)
pd.set_option("display.max_seq_items", 3)
pd.set_option("display.max_colwidth", -1)
from IPython.core.display import HTML
CSS = textwrap.dedent("""
table.dataframe, div.slick-cell {
font-family: monospace !important;
}
div.q-grid-toolbar > button:nth-of-type(1) {
visibility: hidden;
}
div.q-grid-toolbar > button:nth-of-type(2) {
visibility: hidden;
}
""")
return HTML('<style>{}</style>'.format(CSS))
def highlight( row, selection, color='yellow', text_color='black', bold=True, for_image=False ):
"""Highlight rows in **Jupyter Notebooks** that match the given index.
:param row: Row to which the formating is applied (directly provided by ``diplay.apply``)
:type row: :class:`~pandas.Series`
:param selection: :func:`list` of indexes to highlight.
:type selection: Union[:class:`~pandas.Index`, :class:`~pandas.DataFrame`]
:param str color: CSS defined color name for the background.
:param str text_color: CSS defined color name for the text.
:param bool bold: Make text bold.
:param str outfile: If provided, generate an image with the table.
:param str for_image: If provided, makes some format changes to better show in an image.
:return: CSS properties for the cells.
.. note::
Make the html output into an image with ``wkhtmltopdf`` and its python wrapper ``imgkit``.
``wkhtmltopdf`` installation depends on the operating system. While for linux it might work
with get-apt or similar, `here <http://macappstore.org/wkhtmltopdf/>`_ are some tips for the
macOS installation.
Then, one might make it with a call such as::
imgkit.from_string(df.style.apply(rstoolbox.utils.highlight, selection=topside,
for_image=True, axis=1).render(),
'out.png')
Take notice of the use of the ``for_image`` attribute. You can try to add more CSS rules with
:meth:`pandas.Styler.set_table_styles`. This seems to work properly for ``td`` and ``th`` but not for
``table`` or ``tr``.
"""
if isinstance(selection, (pd.Index, pd.DataFrame)):
if isinstance(selection, pd.DataFrame):
selection = selection.index
else:
raise NotImplementedError('Unknown selection type provided.')
txt = []
if for_image:
txt.extend(['font-family: monospace', 'text-align: right'])
if row.name in selection:
txt.extend(['background-color: {}'.format(color), 'color: {}'.format(text_color)])
if bold:
txt.append('font-weight: bold')
return [';'.join(txt), ] * len(row)
def use_qgrid( df, **kwargs ):
"""Create a ``QgridWidget`` object from the
`qgrid library <https://qgrid.readthedocs.io/en/latest/>`_ in
**Jupyter Notebooks**.
This allows the creation of a interactive table in a cell with a whole
lot of functionalities (see `qgrid documentation <https://qgrid.readthedocs.io/en/latest/>`_)
A part from the :class:`~pandas.DataFrame`, one can provide any named parameter that can
be applied to `qgrid.show_grid <https://qgrid.readthedocs.io/en/latest/#qgrid.show_grid>`_.
The only difference is that if there are more than 4 columns, the key ``forceFitColumns``
from the attribute ``grid_options`` is forced into :data:`False`.
The actual :class:`~pandas.DataFrame` can be retrieved back with::
qwdf = rstoolbox.utils.use_qgrid(df)
qdf = qwdf.get_changed_df()
# OR
qdf = qwdf.get_selected_df()
See more in the documentation for
`get_changed_df <https://qgrid.readthedocs.io/en/latest/#qgrid.QgridWidget.get_changed_df>`_
or `get_selected_df <https://qgrid.readthedocs.io/en/latest/#qgrid.QgridWidget.get_selected_df>`_.
Best used together with :func:`.format_Ipython`.
:param df: Data container.
:type df: :class:`~pandas.DataFrame`
:return: `QgridWidget <https://qgrid.readthedocs.io/en/latest/#qgrid.QgridWidget>`_
:raises:
:ImportError: If `qgrid library <https://qgrid.readthedocs.io/en/latest/>`_
is not present.
"""
try:
import qgrid
except ImportError:
raise ImportError('qgrid (not mandatory on rstoolbox install) is necessary to execute this function.')
go = kwargs.pop('grid_options', {})
if df.shape[1] > 4:
go['forceFitColumns'] = False
return qgrid.show_grid(df, grid_options=go, **kwargs)
def add_column( df, name, value ):
"""Adds a new column to the DataFrame with the given value.
:param df: Data container.
:type df: :class:`~pandas.DataFrame`
:param str name: Name of the new column
:param value: Value that will be given to all rows of the new column (any type)
:return: :class:`~pandas.DataFrame` - The data container with the new column
"""
data = pd.Series([value] * df.shape[0])
data.index = df.index
return df.assign(_placeholder=data).rename(columns={"_placeholder": name})
def split_values( df, keys ):
"""Reshape the data to aide plotting of multiple comparable scores.
.. note::
This might change the data in a way that a decoy would be repeated
multiple times.
The dictionary that needs to be provided to split the data container has three
main keys:
#. ``keep``: Identity the columns to keep (they cannot be the ones that split). \
If not provided, all columns are kept.
#. ``split``: List with columns to split. Each position is a tuple. The first position \
is the name of the column to split and the rest will be the value names that will be \
used to identify it.
#. ``names``: Names of the columns. The first one will be the name of the column where the \
values will be assigned, the rest will be the names of the columns for the rest of the \
identifiers.
:param df: Data container.
:type df: :class:`~pandas.DataFrame`
:param dict keys: Selection of the columns to keep and split.
:return: Altered Data container.
.. rubric:: Example
.. ipython::
In [1]: from rstoolbox.io import parse_rosetta_file
...: from rstoolbox.utils import split_values
...: import pandas as pd
...: pd.set_option('display.width', 1000)
...: pd.set_option('display.max_columns', 500)
...: ifile = '../rstoolbox/tests/data/input_2seq.minisilent.gz'
...: scorel = ['score', 'GRMSD2Target', 'GRMSD2Template', 'LRMSD2Target',
...: 'LRMSDH2Target', 'LRMSDLH2Target', 'description']
...: df = parse_rosetta_file(ifile, {'scores': scorel})
...: df
In [2]: split1 = {'split': [('GRMSD2Target', 'grmsdTr'), ('GRMSD2Template', 'grmsdTp'),
...: ('LRMSD2Target', 'lrmsdTp'), ('LRMSDH2Target', 'lrmsdh2'),
...: ('LRMSDLH2Target', 'lrmsdlh2')],
...: 'names': ['rmsd', 'rmsd_type']}
...: split_values(df, split1)
In [3]: split2 = {'split': [('GRMSD2Target', 'global', 'target'),
...: ('GRMSD2Template', 'global', 'template'),
...: ('LRMSD2Target', 'local', 'target'),
...: ('LRMSDH2Target', 'local', 'helix2'),
...: ('LRMSDLH2Target', 'local', 'lhelix2')],
...: 'names': ['rmsd', 'rmsd_type', 'rmsd_target']}
...: split_values(df, split2)
"""
split_columns = [_[0] for _ in keys['split']]
if 'keep' not in keys:
keys.setdefault('keep', list(set(df.columns).difference(set(split_columns))))
keys['keep'].sort(key=lambda x: list(df.columns.values).index(x))
dataframes = []
for k in keys["split"]:
colIDs = copy.copy(keys["keep"])
colIDs.append(k[0])
wdf = df[colIDs]
wdf = wdf.assign(tmpkey1=pd.Series([k[1]] * len(wdf[colIDs[0]])).values).copy(True)
wdf = wdf.rename(index=str, columns={
k[0]: keys["names"][0],
"tmpkey1": keys["names"][1]
})
if ( len(k) > 2 ):
wdf = wdf.assign(tmpkey2=pd.Series([k[2]] * len(wdf[colIDs[0]])).values).copy(True)
wdf = wdf.rename(index=str, columns={
"tmpkey2": keys["names"][2]
})
dataframes.append(wdf)
return pd.concat(dataframes)
def split_dataframe_rows(df, column_selectors, row_delimiter=None):
"""Given a dataframe in which certain columns are lists, it splits these lists
making new rows in the :class:`~pandas.DataFrame` out of itself.
When multiple columns have lists of similar lengths, it assumes that same index
positions on the list go in the same new row.
:param df: Input data.
:type df: :class:`~pandas.DataFrame`
:param column_selectors: List of columns containg same-sized lists.
:type column_selectors: :func:`list` of :class:`str`
:param str row_delimiter: If provided, instead of list, it assumes data are strings
and uses the delimiter to make those strings into lists.
"""
# https://gist.github.com/jlln/338b4b0b55bd6984f883#gistcomment-2698588
# we need to keep track of the ordering of the columns
def _split_list_to_rows(row, row_accumulator, column_selector, row_delimiter):
split_rows = {}
max_split = 0
for column_selector in column_selectors:
if row_delimiter is not None:
split_row = row[column_selector].split(row_delimiter)
else:
split_row = copy.deepcopy(row[column_selector])
split_rows[column_selector] = split_row
if len(split_row) > max_split:
max_split = len(split_row)
for _ in range(max_split):
new_row = row.to_dict()
for column_selector in column_selectors:
try:
new_row[column_selector] = split_rows[column_selector].pop(0)
except IndexError:
new_row[column_selector] = ''
row_accumulator.append(new_row)
new_rows = []
df.apply(_split_list_to_rows, axis=1, args=(new_rows, column_selectors, row_delimiter))
new_df = pd.DataFrame(new_rows, columns=df.columns)
return new_df
def make_rosetta_app_path( application ):
"""Provided the expected Rosetta application, add path and suffix.
.. note::
Depends on :ref:`rosetta.path <options>` and :ref:`rosetta.compilation <options>`,
if the ``filename`` does not exist.
:param str application: Name of the application to call.
:return: :class:`str`
:raise:
:IOError: If the final path created does not exist.
"""
import rstoolbox.core as core
path = core.get_option("rosetta", "path")
comp = core.get_option("rosetta", "compilation")
exe = os.path.join(path, "{0}.{1}".format(application, comp))
if not os.path.isfile(exe):
raise IOError("The expected Rosetta executable {0} is not found".format(exe))
return exe
def execute_process( command ): # pragma: no cover
"""Execute the provided command.
:param command: Command to be executed.
:type command: Union(:class:`str`, :func:`list`)
:param bool subp: When :data:`True` return ``subprocess`` otherwise return
the execution status as 0 (OK) or another number if failed.
:return: Output info of the execution
"""
if isinstance(command, string_types):
command | |
get_thread_full(9403)
return render_template_g('usermedals.html.jinja',
page_title = '勋章墙',
medals = medals,
t = t,
)
@app.route('/p/<int:pid>')
def getpost(pid):
p = get_post(pid)
url = get_url_to_post(str(pid))
resp = make_response('', 307)
resp.headers['Location'] = url
resp.headers['Cache-Control']= 'max-age=86400, stale-while-revalidate=864000'
# user_is_self = p['uid'] == g.selfuid
# if not user_is_self: increment_view_counter('post', pid)
return resp
@app.route('/p/<int:pid>/code')
def getpostcode(pid):
if not can_do_to(g.current_user,'view_code', -1):
abort(404, 'forbidden')
p = aql('for p in posts filter p._key==@k return p',k=str(pid), silent=True)[0]
return make_text_response(p['content'])
@app.route('/p/<int:pid>/votes')
def getpostvotes(pid):
must_be_logged_in()
votes = aql('''
for v in votes filter v.type=="post" and v.id==@id
sort v.t_c desc
let user = (for i in users filter i.uid==v.uid return i)[0]
return merge(v, {user})
''', id=pid, silent=True)
votes = [' '.join([str(v['vote']), v['t_c'], v['user']['name']]) for v in votes]
votes = '\n'.join(votes)
return make_text_response(votes)
@app.route('/t/<int:pid>/votes')
def getthreadvotes(pid):
must_be_logged_in()
votes = aql('''
for v in votes filter v.type=="thread" and v.id==@id
sort v.t_c desc
let user = (for i in users filter i.uid==v.uid return i)[0]
return merge(v, {user})
''', id=pid, silent=True)
votes = [' '.join([str(v['vote']), v['t_c'], v['user']['name']]) for v in votes]
votes = '\n'.join(votes)
return make_text_response(votes)
@app.route('/t/<int:tid>/code')
def getthreadcode(tid):
if not can_do_to(g.current_user,'view_code', -1):
abort(404, 'forbidden')
p = aql('for p in threads filter p.tid==@k return p',k=tid, silent=True)[0]
return make_text_response(p['content'])
@app.route('/c/<int:cid>')
@app.route('/c/<string:cid>')
def _get_category_threads(cid):
return get_category_threads(cid)
@app.route('/')
def _get_main_threads():
return get_category_threads('main')
def get_category_threads(cid):
bigcatism = False
if is_integer(cid):
catobj = aql('for c in categories filter c.cid==@cid return c',cid=cid, silent=True)
if len(catobj)<1:
abort(404, 'category not exist')
visitor_error_if_hidden(cid)
catobj = catobj[0]
elif is_string(cid):
bigcats = aql('''
let c = document('counters/bigcats').briefs
return c
''',silent=True)[0]
if cid in bigcats:
catobj = bigcats[cid]
else:
abort(404, 'bigcat not exist')
bigcatism = bigcats
tlds = iif(
cid!=4 and cid!='water',
thread_list_defaults,
thread_list_defaults_water,
)
pagenumber = rai('page') or tlds['pagenumber']
pagesize = rai('pagesize') or tlds['pagesize']
order = ras('order') or tlds['order']
sortby = ras('sortby') or tlds['sortby']
rpath = request.path
# print(request.args)
threadlist, pagination = pgnt.get_thread_list(
by='category', category=cid,
sortby=sortby,
order=order,
pagenumber=pagenumber, pagesize=pagesize,
path = rpath)
if pagenumber==1:
if is_integer(cid):
pinned = aql('''
for i in threads
filter i.cid==@cid and i.pinned==true
sort i.t_manual desc
return i''',
cid=cid, silent=True)
else:
pinned = aql('''
for i in threads
filter @cid in i.bigcats and i.pinned==true
sort i.t_manual desc
return i
''', cid=cid, silent=True)
if pinned:
tids = [p['tid'] for p in pinned]
pinned_threads = pgnt.get_thread_list_uncached(
by='ids',
ids=[p['_id'] for p in pinned])
threadlist = pinned_threads + [t for t in threadlist if t['tid']not in tids]
if cid=='main':
threadlist = remove_hidden_from_visitor(threadlist)
return render_template_g('threadlist.html.jinja',
page_title=catobj['name'],
page_subheader=(catobj['brief'] or '').replace('\\',''),
threadlist=threadlist,
pagination=pagination,
categories=get_categories_info(),
category=catobj,
bigcatism = bigcatism,
# threadcount=count,
)
@app.route('/tag/<string:tag>')
def get_tag_threads(tag):
tlds = thread_list_defaults
pagenumber = rai('page') or tlds['pagenumber']
pagesize = rai('pagesize') or tlds['pagesize']
order = ras('order') or tlds['order']
sortby = ras('sortby') or tlds['sortby']
rpath = request.path
# print(request.args)
threadlist, pagination = pgnt.get_thread_list(
by='tag', tagname=tag,
sortby=sortby,
order=order,
pagenumber=pagenumber, pagesize=pagesize,
path = rpath)
return render_template_g('threadlist.html.jinja',
page_title='标签 - '+tag,
# page_subheader=(catobj['brief'] or '').replace('\\',''),
threadlist=threadlist,
pagination=pagination,
categories=get_categories_info(),
# category=catobj,
# threadcount=count,
)
@app.route('/u/<int:uid>/t')
def userthreads(uid):
uobj = aql('''
for u in users filter u.uid==@uid
return u
''', uid=uid, silent=True)
if len(uobj)!=1:
abort(404, 'user not exist')
# return make_response('user not exist', 404)
uobj = uobj[0]
utld = user_thread_list_defaults
pagenumber = rai('page') or utld['pagenumber']
pagesize = rai('pagesize') or utld['pagesize']
order = ras('order') or utld['order']
sortby = ras('sortby') or utld['sortby']
rpath = request.path
# print(request.args)
threadlist, pagination = pgnt.get_thread_list(
by='user', uid=uid,
sortby=sortby,
order=order,
pagenumber=pagenumber, pagesize=pagesize,
path = rpath)
return render_template_g('threadlist.html.jinja',
# page_title=catobj['name'],
page_title='帖子 - '+uobj['name'],
threadlist=threadlist,
pagination=pagination,
# threadcount=count,
)
def get_thread_full(tid, selfuid=-1):
docid = aql('for i in threads filter i.tid==@tid return i._id', tid=tid, silent=True)
if len(docid)<1:
return False
docid = docid[0]
thobj = pgnt.get_thread_list_uncached(by='ids', ids=[docid])[0]
return thobj
def remove_duplicate_brief(postlist):
# remove duplicate brief string within a page
bd = dict()
for p in postlist:
if isinstance(p, dict) and 'user' in p:
pu = p['user'] or {}
if 'brief' in pu:
k = ('brief', pu['name'], pu['brief'])
if k in bd:
pu['brief']=''
else:
bd[k] = 1
if 'personal_title' in pu:
k = ('pt', pu['name'], pu['personal_title'])
if k in bd:
pu['personal_title']=''
else:
bd[k] = 1
# thread, list of posts
@app.route('/t/<int:tid>')
def get_thread(tid):
selfuid = g.selfuid
thobj = get_thread_full(tid, selfuid)
if not thobj:
abort(404, 'thread not exist')
# return 'thread not exist', 404
catobj = aql('''
for c in categories filter c.cid==@cid return c
''', cid=thobj['cid'], silent=True)[0]
thobj['category'] = catobj
visitor_error_if_hidden(thobj['cid'])
if 'mode' in thobj and thobj['mode']=='question':
mode = 'question'
pld = post_list_defaults_q
else:
mode = ''
pld = post_list_defaults
pagenumber = rai('page') or pld['pagenumber']
pagesize = rai('pagesize') or pld['pagesize']
sortby = ras('sortby') or pld['sortby']
order = ras('order') or pld['get_default_order'](sortby)
rpath = request.path
postlist, pagination = pgnt.get_post_list(
by='thread',
tid=tid,
sortby=sortby,
order=order,
pagenumber=pagenumber, pagesize=pagesize,
path = rpath, mode=mode)
# remove duplicate brief string within a page
# remove_duplicate_brief(postlist)
user_is_self = selfuid==thobj['uid']
return render_template_g('postlist.html.jinja',
page_title=thobj['title'],
# threadlist=threadlist,
postlist=postlist,
pagination=pagination,
pagenumber=pagenumber,
t=thobj,
# threadcount=count,
viewed_target='thread/'+str(tid) if not user_is_self else '',
)
def mark_blacklisted(postlist):
bl = get_blacklist().map(lambda k:k['to_uid'])
if 0 == len(bl):
return
for idx, i in enumerate(postlist):
if i['uid'] in bl:
# print('blacklisted', i)
postlist[idx]['blacklist'] = True
def sink_deleted(postlist):
newlist = []
badapple = []
for i in postlist:
if key(i, 'blacklist') or key(i, 'delete'):
badapple.append(i)
else:
newlist.append(i)
return newlist+badapple
# list of user posts.
@app.route('/u/<int:uid>/p')
def uposts(uid):
uobj = get_user_by_id(uid)
if not uobj:
abort(404, 'user not exist')
return make_response('user not exist', 404)
upld = user_post_list_defaults
pagenumber = rai('page') or upld['pagenumber']
pagesize = rai('pagesize') or upld['pagesize']
sortby = ras('sortby') or upld['sortby']
order = ras('order') or upld['get_default_order'](sortby)
rpath = request.path
postlist, pagination = pgnt.get_post_list(
# by='thread',
by='user',
# tid=tid,
uid=uid,
sortby=sortby,
order=order,
pagenumber=pagenumber, pagesize=pagesize,
path = rpath,
apply_origin=True,
)
# remove_duplicate_brief(postlist)
return render_template_g('postlist_userposts.html.jinja',
page_title='回复 - '+uobj['name'],
# threadlist=threadlist,
postlist=postlist,
pagination=pagination,
# t=thobj,
u=uobj,
# threadcount=count,
)
# list of followed/follower
@app.route('/u/<int:uid>/fo')
def ufollowing(uid):
uobj = get_user_by_id(uid)
if not uobj:
abort(404, 'user not exist')
return make_response('user not exist', 404)
ul = aql('''for i in followings filter i.uid==@uid
sort i.t_c desc
let user = (for u in users filter u.uid==i.to_uid return u)[0]
return merge(user, {t_c: i.t_c})
''', uid=uid, silent=True)
return render_template_g('userlist.html.jinja',
page_title = uobj['name'] + ' 关注的人',
userlist = ul,
)
@app.route('/u/<int:uid>/fr')
def ufollower(uid):
uobj = get_user_by_id(uid)
if not uobj:
abort(404, 'user not exist')
return make_response('user not exist', 404)
ul = aql('''for i in followings filter i.to_uid==@uid
sort i.t_c desc
let user = (for u in users filter u.uid==i.uid return u)[0]
return merge(user, {t_c: i.t_c})
''', uid=uid, silent=True)
return render_template_g('userlist.html.jinja',
page_title = uobj['name'] + ' 的关注者',
userlist = ul,
)
@app.route('/p/all')
def get_all_posts():
upld = all_post_list_defaults
pagenumber = rai('page') or upld['pagenumber']
pagesize = rai('pagesize') or upld['pagesize']
sortby = ras('sortby') or upld['sortby']
order = ras('order') or upld['get_default_order'](sortby)
rpath = request.path
postlist, pagination = pgnt.get_post_list(
# by='thread',
by='all',
# tid=tid,
# uid=uid,
sortby=sortby,
order=order,
pagenumber=pagenumber, pagesize=pagesize,
path = rpath,
apply_origin=True,
)
# remove duplicate thread titles for posts
lt = ''
for i in postlist:
if i and i['t'] and i['t']['title']:
title = i['t']['title']
if title==lt:
i['t']['title']=''
else:
lt = title
# remove_duplicate_brief(postlist)
return render_template_g('postlist_userposts.html.jinja',
page_title='所有评论',
# threadlist=threadlist,
postlist=postlist,
pagination=pagination,
# t=thobj,
# u=uobj,
# threadcount=count,
)
@app.route('/editor')
def editor_handler():
details = dict()
details['has_title'] = True
target = ras('target')
target_type, _id = parse_target(target, force_int=False)
if target_type not in [
'user','username','edit_post','edit_thread','category','thread'
]:
raise Exception('unsupported target_type')
if target_type=='edit_post':
details['has_title'] = False
post_original = aqlc.from_filter('posts', 'i._key==@_id', _id=str(_id))[0]
details['content'] = post_original['content']
if target_type == 'edit_thread':
_id = int(_id)
thread_original = aqlc.from_filter('threads', 'i.tid==@id',id=_id)[0]
details['content'] = thread_original['content']
details['title'] = thread_original['title']
details['mode'] = thread_original['mode'] if 'mode' in thread_original else None
if target_type=='user':
_id = int(_id)
if 'user' in target_type:
details['has_title'] = False
page_title = '{} - {}'.format(
'发表' if 'edit' not in target_type else '编辑',
target)
return render_template_g('editor.html.jinja',
page_title = page_title,
target=target,
details=details,
)
def userfill(u):
if 't_c' not in u: # some user data are incomplete
u['t_c'] = '1989-06-04T00:00:00'
u['brief'] = '此用户的数据由于各种可能的原因,在github上2049bbs.xyz的备份中找不到,所以就只能像现在这样处理了'
@app.route('/u/<int:uid>')
def userpage(uid):
return _userpage(uid)
def get_alias_user_by_name(uname):
return aql('''let oname = (
for i in aliases filter i.is==@uname return i
)[0].name
return (for i in users filter i.name==oname return i)[0]
''', uname=uname, silent=True,
)[0]
@app.route('/member/<string:name>')
def userpage_byname(name):
# check if user exists
res = get_user_by_name(name)
if not res:
# assert is_legal_username(name)
name=flask.escape(name)
return make_response(convert_markdown(
f'''
找不到用户: {name}
- 你可以试试: [{name} (XsDen)](https://xsden.info/user/{name})
- 或者试试: [{name} (品葱)](https://pincong.rocks/people/{name})
- 或者试试: [{name} (膜乎)](https://mohu.rocks/people/{name})
'''
), 404)
u = res
return _userpage(u['uid'])
def _userpage(uid):
uobj = get_user_by_id_admin(int(uid))
if not uobj:
abort(404, 'user not exist')
| |
<gh_stars>1-10
import ujson as json
import pickle as pkl
import numpy as np
import logging
import os
import argparse
import random
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
import torch.distributed as dist
import rec_model
from rec_preprocess import run_prepare
from rec_util import new_train_epoch, new_valid_epoch, load_pkl, AMDataset, my_fn
from pytorch_transformers import WarmupCosineSchedule
from apex import amp
from apex.parallel import DistributedDataParallel
def parse_args():
"""
Parses command line arguments.
"""
parser = argparse.ArgumentParser('Rec')
parser.add_argument('--prepare', action='store_true',
help='create the directories, prepare the vocabulary and embeddings')
parser.add_argument('--train', action='store_true',
help='train and valid the model')
parser.add_argument('--test', action='store_true',
help='evaluate the model on test set')
parser.add_argument('--gpu', type=int, default=0,
help='specify gpu device')
parser.add_argument('--is_distributed', type=bool, default=False,
help='distributed training')
parser.add_argument('--local_rank', type=int, default=-1,
help='node rank for distributed training')
parser.add_argument('--seed', type=int, default=23333,
help='random seed (default: 23333)')
train_settings = parser.add_argument_group('train settings')
train_settings.add_argument('--disable_cuda', action='store_true',
help='Disable CUDA')
train_settings.add_argument('--lr', type=float, default=0.0001,
help='learning rate')
train_settings.add_argument('--clip', type=float, default=0.35,
help='gradient clip, -1 means no clip (default: 0.35)')
train_settings.add_argument('--weight_decay', type=float, default=0.0003,
help='weight decay')
train_settings.add_argument('--emb_dropout', type=float, default=0.5,
help='dropout keep rate')
train_settings.add_argument('--layer_dropout', type=float, default=0.5,
help='dropout keep rate')
train_settings.add_argument('--batch_train', type=int, default=32,
help='train batch size')
train_settings.add_argument('--batch_eval', type=int, default=32,
help='dev batch size')
train_settings.add_argument('--epochs', type=int, default=10,
help='train epochs')
train_settings.add_argument('--optim', default='AdamW',
help='optimizer type')
train_settings.add_argument('--warmup', type=float, default=0.5)
train_settings.add_argument('--patience', type=int, default=2,
help='num of epochs for train patients')
train_settings.add_argument('--loss_batch', type=int, default=50,
help='period to save batch loss')
train_settings.add_argument('--num_threads', type=int, default=8,
help='Number of threads in input pipeline')
model_settings = parser.add_argument_group('model settings')
model_settings.add_argument('--P', type=int, default=4,
help='length of feature period')
model_settings.add_argument('--T', type=int, default=36,
help='length of the year sequence')
model_settings.add_argument('--NU', type=int, default=26889,
help='num of users')
model_settings.add_argument('--NI', type=int, default=14020,
help='num of items')
model_settings.add_argument('--NF', type=int, default=128,
help='num of factors')
model_settings.add_argument('--n_hidden', type=int, default=128,
help='size of LSTM hidden units')
model_settings.add_argument('--n_layer', type=int, default=2,
help='num of layers')
model_settings.add_argument('--is_atten', type=bool, default=False,
help='whether to use self attention')
model_settings.add_argument('--n_block', type=int, default=4,
help='attention block size (default: 2)')
model_settings.add_argument('--n_head', type=int, default=4,
help='attention head size (default: 2)')
model_settings.add_argument('--is_pos', type=bool, default=False,
help='whether to use position embedding')
model_settings.add_argument('--is_sinusoid', type=bool, default=True,
help='whether to use sinusoid position embedding')
model_settings.add_argument('--n_kernel', type=int, default=3,
help='kernel size (default: 3)')
model_settings.add_argument('--n_kernels', type=int, default=[2, 3, 4],
help='kernels size (default: 2, 3, 4)')
model_settings.add_argument('--n_level', type=int, default=6,
help='# of levels (default: 10)')
model_settings.add_argument('--n_filter', type=int, default=50,
help='number of hidden units per layer (default: 256)')
model_settings.add_argument('--n_class', type=int, default=2,
help='class size (default: 2)')
model_settings.add_argument('--kmax_pooling', type=int, default=2,
help='top-K max pooling')
model_settings.add_argument('--dynamic', action='store_true',
help='if use dynamic embedding')
model_settings.add_argument('--period', action='store_true',
help='if use period embedding')
path_settings = parser.add_argument_group('path settings')
path_settings.add_argument('--task', default='AM_Office',
help='the task name')
path_settings.add_argument('--model', default='Dynamic_COTEMP',
help='the model name')
path_settings.add_argument('--user_record_file', default='user_record.json',
help='the record file name')
path_settings.add_argument('--item_record_file', default='item_record.json',
help='the record file name')
path_settings.add_argument('--train_file', default='train.csv',
help='the train file name')
path_settings.add_argument('--valid_file', default='dev.csv',
help='the valid file name')
path_settings.add_argument('--test_file', default='test.csv',
help='the test file name')
path_settings.add_argument('--raw_dir', default='data/raw_data/',
help='the dir to store raw data')
path_settings.add_argument('--processed_dir', default='data/processed_data/',
help='the dir to store prepared data')
path_settings.add_argument('--outputs_dir', default='outputs/',
help='the dir for outputs')
path_settings.add_argument('--model_dir', default='models/',
help='the dir to store models')
path_settings.add_argument('--result_dir', default='results/',
help='the dir to store the results')
path_settings.add_argument('--pics_dir', default='pics/',
help='the dir to store the pictures')
path_settings.add_argument('--summary_dir', default='summary/',
help='the dir to write tensorboard summary')
return parser.parse_args()
def func_train(args, file_paths, gpu, ngpus_per_node):
torch.cuda.set_device(gpu)
logger = logging.getLogger('Rec')
logger.info('Loading record file...')
user_record_file = load_pkl(file_paths.user_record_file)
item_record_file = load_pkl(file_paths.item_record_file)
user_length_file = load_pkl(file_paths.user_length_file)
item_length_file = load_pkl(file_paths.item_length_file)
train_set = AMDataset(file_paths.train_file, user_record_file, item_record_file, user_length_file, item_length_file,
logger, 'train')
valid_set = AMDataset(file_paths.valid_file, user_record_file, item_record_file, user_length_file, item_length_file,
logger, 'valid')
args.batch_train = int(args.batch_train / ngpus_per_node)
train_sampler = DistributedSampler(train_set)
train_loader = DataLoader(train_set, batch_size=args.batch_train, shuffle=(train_sampler is None), num_workers=4,
collate_fn=my_fn, pin_memory=True, sampler=train_sampler)
valid_loader = DataLoader(valid_set, batch_size=args.batch_train, shuffle=False, num_workers=4, collate_fn=my_fn)
train_num = len(train_set.labels)
valid_num = len(valid_set.labels)
logger.info('Num of train data {} valid data {}'.format(train_num, valid_num))
user_num = len(user_record_file)
args.NU = user_num
item_num = len(item_record_file)
args.NI = item_num
logger.info('Num of users {} items {}'.format(user_num, item_num))
logger.info('Initialize the model...')
if args.dynamic:
UEM = np.random.normal(0., 0.01, (args.T * args.NU + 1, args.NF))
IEM = np.random.normal(0., 0.01, (args.T * args.NI + 1, args.NF))
elif args.period:
UEM = np.random.normal(0., 0.01, (args.P * args.NU + 1, args.NF))
IEM = np.random.normal(0., 0.01, (args.P * args.NI + 1, args.NF))
else:
UEM = np.random.normal(0., 0.01, (args.NU + 1, args.NF))
IEM = np.random.normal(0., 0.01, (args.NI + 1, args.NF))
UEM[0] = 0.
IEM[0] = 0.
dropout = {'emb': args.emb_dropout, 'layer': args.layer_dropout}
model = getattr(rec_model, args.model)(UEM, IEM, args.state, args.T, args.P, args.NU, args.NI, args.NF,
args.n_class, args.n_hidden,
args.n_layer, dropout, logger).to(args.device)
# if args.is_distributed:
# model = torch.nn.parallel.DistributedDataParallel(
# model, device_ids=[args.local_rank], output_device=args.local_rank,
# this should be removed if we update BatchNorm stats
# broadcast_buffers=False)
# model = torch.nn.DataParallel(model)
optimizer = getattr(optim, args.optim)(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
model, optimizer = amp.initialize(model, optimizer, opt_level="O0")
model = DistributedDataParallel(model)
# scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', 0.5, patience=args.patience, verbose=True)
scheduler = WarmupCosineSchedule(optimizer, args.warmup, (train_num // args.batch_train + 1) * args.epochs)
# max_acc, max_p, max_r, max_f, max_sum, max_epoch = 0, 0, 0, 0, 0, 0
# FALSE = {}
# ROC = {}
# PRC = {}
min_loss, min_epoch = 1e10, 0
for ep in range(1, args.epochs + 1):
logger.info('Training the model for epoch {}'.format(ep))
# train_loss = train_one_epoch(model, optimizer, train_num, train_file, user_record_file, item_record_file,
# args, logger)
train_loss = new_train_epoch(model, optimizer, train_loader, args, logger)
logger.info('Epoch {} MSE {}'.format(ep, train_loss))
scheduler.step()
logger.info('Evaluating the model for epoch {}'.format(ep))
# eval_metrics, fpr, tpr, precision, recall = valid_batch(model, valid_num, args.batch_eval, valid_file,
# user_record_file, item_record_file, args.device,
# 'valid', logger)
# valid_loss = valid_batch(model, valid_num, args.batch_eval, valid_file, user_record_file, item_record_file,
# args.device)
valid_loss = new_valid_epoch(model, valid_loader, args)
logger.info('Valid MSE - {}'.format(valid_loss))
# logger.info('Valid Loss - {}'.format(eval_metrics['loss']))
# logger.info('Valid Acc - {}'.format(eval_metrics['acc']))
# logger.info('Valid Precision - {}'.format(eval_metrics['precision']))
# logger.info('Valid Recall - {}'.format(eval_metrics['recall']))
# logger.info('Valid F1 - {}'.format(eval_metrics['f1']))
# logger.info('Valid AUCROC - {}'.format(eval_metrics['auc_roc']))
# logger.info('Valid AUCPRC - {}'.format(eval_metrics['auc_prc']))
# max_acc = max((eval_metrics['acc'], max_acc))
# max_p = max(eval_metrics['precision'], max_p)
# max_r = max(eval_metrics['recall'], max_r)
# max_f = max(eval_metrics['f1'], max_f)
# valid_sum = eval_metrics['auc_roc'] + eval_metrics['auc_prc']
# if valid_sum > max_sum:
# max_sum = valid_sum
# max_epoch = ep
# FALSE = {'FP': eval_metrics['fp'], 'FN': eval_metrics['fn']}
# ROC = {'FPR': fpr, 'TPR': tpr}
# PRC = {'PRECISION': precision, 'RECALL': recall}
if valid_loss < min_loss:
min_loss = valid_loss
min_epoch = ep
torch.save(model.state_dict(), os.path.join(args.model_dir, 'model.bin'))
# scheduler.step(metrics=eval_metrics['f1'])
# scheduler.step(valid_loss)
# logger.info('Max Acc - {}'.format(max_acc))
# logger.info('Max Precision - {}'.format(max_p))
# logger.info('Max Recall - {}'.format(max_r))
# logger.info('Max F1 - {}'.format(max_f))
# logger.info('Max Epoch - {}'.format(max_epoch))
# logger.info('Max Sum - {}'.format(max_sum))
logger.info('Min MSE - {}'.format(min_loss))
logger.info('Min Epoch - {}'.format(min_epoch))
# with open(os.path.join(args.result_dir, 'FALSE_valid.json'), 'w') as f:
# f.write(json.dumps(FALSE) + '\n')
# f.close()
# with open(os.path.join(args.result_dir, 'ROC_valid.json'), 'w') as f:
# f.write(json.dumps(ROC) + '\n')
# f.close()
# with open(os.path.join(args.result_dir, 'PRC_valid.json'), 'w') as f:
# f.write(json.dumps(PRC) + '\n')
# f.close()
def func_test(args, file_paths, gpu, ngpus_per_node):
torch.cuda.set_device(gpu)
logger = logging.getLogger('Rec')
logger.info('Loading record file...')
user_record_file = load_pkl(file_paths.user_record_file)
item_record_file = load_pkl(file_paths.item_record_file)
user_length_file = load_pkl(file_paths.user_length_file)
item_length_file = load_pkl(file_paths.item_length_file)
test_set = AMDataset(file_paths.test_file, user_record_file, item_record_file, user_length_file, item_length_file,
logger, 'test')
args.batch_eval = int(args.batch_eval / ngpus_per_node)
test_loader = DataLoader(test_set, args.batch_eval, num_workers=4, collate_fn=my_fn)
test_num = len(test_set.labels)
logger.info('Num of test data {}'.format(test_num))
user_num = len(user_record_file)
args.NU = user_num
item_num = len(item_record_file)
args.NI = item_num
logger.info('Num of users {} items {}'.format(user_num, item_num))
logger.info('Initialize the model...')
if args.dynamic:
UEM = np.random.normal(0., 0.01, (args.T * args.NU + 1, args.NF))
IEM = np.random.normal(0., 0.01, (args.T * args.NI + 1, args.NF))
elif args.period:
UEM = np.random.normal(0., 0.01, (args.P * args.NU + 1, args.NF))
IEM = np.random.normal(0., 0.01, (args.P * args.NI + 1, args.NF))
else:
UEM = np.random.normal(0., 0.01, (args.NU + 1, args.NF))
IEM = np.random.normal(0., 0.01, (args.NI + 1, args.NF))
UEM[0] = 0.
IEM[0] = 0.
dropout = {'emb': args.emb_dropout, 'layer': args.layer_dropout}
model = getattr(rec_model, args.model)(UEM, IEM, args.state, args.T, args.P, args.NU, args.NI, args.NF,
args.n_class, args.n_hidden,
args.n_layer, dropout, logger).to(args.device)
# if args.is_distributed:
# model = torch.nn.parallel.DistributedDataParallel(
# model, device_ids=[args.local_rank], output_device=args.local_rank,
# this should be removed if we update BatchNorm stats
# broadcast_buffers=False)
# model = torch.nn.DataParallel(model)
# optimizer = getattr(optim, args.optim)(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
# model, optimizer = amp.initialize(model, optimizer, opt_level="O0")
model = DistributedDataParallel(model)
logger.info(args.model_dir)
model.load_state_dict(torch.load(os.path.join(args.model_dir, 'model.bin')))
# eval_metrics, fpr, tpr, precision, recall = valid_batch(model, test_num, args.batch_eval, test_file,
# user_record_file, item_record_file, args.device,
# 'test', logger)
# test_loss = valid_batch(model, test_num, args.batch_eval, test_file, user_record_file, item_record_file,
# args.device)
test_loss = new_valid_epoch(model, test_loader, args)
logger.info('Test MSE - {}'.format(test_loss))
# logger.info('Test Acc - {}'.format(eval_metrics['acc']))
# logger.info('Test Precision - {}'.format(eval_metrics['precision']))
# logger.info('Test Recall - {}'.format(eval_metrics['recall']))
# logger.info('Test F1 - {}'.format(eval_metrics['f1']))
# logger.info('Test AUCROC - {}'.format(eval_metrics['auc_roc']))
| |
import sys
import time
from PyQt5 import QtCore
from dvg_qdeviceio import QDeviceIO, DAQ_TRIGGER
from dvg_debug_functions import dprint, tprint, ANSI
# Show extra debug info in terminal?
DEBUG = True
global cnt_DAQ_updated, cnt_jobs_updated, cnt_DAQ_paused
@QtCore.pyqtSlot()
def process_DAQ_updated():
# In production code, your GUI update routine would go here
tprint("---> received: DAQ_updated")
global cnt_DAQ_updated
cnt_DAQ_updated += 1
@QtCore.pyqtSlot()
def process_DAQ_paused():
# In production code, your GUI update routine would go here
tprint("---> received: DAQ_paused")
global cnt_DAQ_paused
cnt_DAQ_paused += 1
@QtCore.pyqtSlot()
def process_jobs_updated():
# In production code, your GUI update routine would go here
tprint("---> received: jobs_updated")
global cnt_jobs_updated
cnt_jobs_updated += 1
class FakeDevice:
def __init__(self, start_alive=True):
self.name = "FakeDev"
self.is_alive = start_alive
# Member for testing
self.count_commands = 0
self.count_replies = 0
def _send(self, data_to_be_send):
if self.is_alive:
# Simulate successful device output
self.count_replies += 1
tprint_tab(data_to_be_send)
return data_to_be_send
else:
# Simulate device failure
time.sleep(0.1)
tprint_tab("SIMULATED I/O ERROR")
return "SIMULATED I/O ERROR"
def fake_query_1(self):
self.count_commands += 1
return self._send("-> reply 0101")
def fake_query_2(self):
self.count_commands += 1
return self._send("-> reply ~~~~")
def fake_command_with_argument(self, val: int):
tprint_tab("-> command(arg=%i)" % val)
self.count_commands += 1
def create_QApplication():
QtCore.QThread.currentThread().setObjectName("MAIN") # For DEBUG info
app = 0 # Work-around for kernel crash when using Spyder IDE
# QtWidgets are not needed for pytest and will fail standard Travis test
# app = QtWidgets.QApplication(sys.argv)
app = QtCore.QCoreApplication(sys.argv) # Use QCoreApplication instead
global cnt_DAQ_updated, cnt_DAQ_paused, cnt_jobs_updated
cnt_DAQ_updated = 0
cnt_DAQ_paused = 0
cnt_jobs_updated = 0
return app
def print_title(title):
dprint("\n%s" % title, ANSI.PURPLE)
dprint("-" * 50, ANSI.PURPLE)
def tprint_tab(str_msg, ANSI_color=None):
dprint(" " * 60 + "%.4f %s" % (time.perf_counter(), str_msg), ANSI_color)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def test_Worker_DAQ___INTERNAL_TIMER(start_alive=True):
print_title(
"Worker_DAQ - INTERNAL_TIMER" + ("" if start_alive else " - start dead")
)
def DAQ_function():
# Must return True when successful, False otherwise
reply = dev.fake_query_1()
return reply[-4:] == "0101"
app = create_QApplication()
dev = FakeDevice(start_alive=start_alive)
qdev = QDeviceIO(dev)
# fmt: off
qdev.create_worker_DAQ(
DAQ_trigger = DAQ_TRIGGER.INTERNAL_TIMER,
DAQ_function = DAQ_function,
DAQ_interval_ms = 100,
critical_not_alive_count = 10,
debug = DEBUG)
# fmt: on
qdev.signal_DAQ_updated.connect(process_DAQ_updated)
assert qdev.start() == start_alive
# Simulate device runtime
start_time = time.perf_counter()
while time.perf_counter() - start_time < 1:
app.processEvents()
if dev.count_commands == 3:
break
time.sleep(0.001) # Do not hog the CPU
tprint("About to quit")
app.processEvents()
assert qdev.quit() == True
app.quit()
if start_alive:
assert dev.count_commands >= 3
assert dev.count_replies >= 3
assert (
cnt_DAQ_updated >= 2
) # Last signal is not always received before thread is quit
def test_Worker_DAQ___INTERNAL_TIMER__start_dead():
test_Worker_DAQ___INTERNAL_TIMER(start_alive=False)
def test_Worker_DAQ___SINGLE_SHOT_WAKE_UP(start_alive=True):
print_title(
"Worker_DAQ - SINGLE_SHOT_WAKE_UP"
+ ("" if start_alive else " - start dead")
)
def DAQ_function():
# Must return True when successful, False otherwise
reply = dev.fake_query_1()
return reply[-4:] == "0101"
app = create_QApplication()
dev = FakeDevice(start_alive=start_alive)
qdev = QDeviceIO(dev)
# fmt: off
qdev.create_worker_DAQ(
DAQ_trigger = DAQ_TRIGGER.SINGLE_SHOT_WAKE_UP,
DAQ_function = DAQ_function,
critical_not_alive_count = 1,
debug = DEBUG)
# fmt: on
qdev.signal_DAQ_updated.connect(process_DAQ_updated)
assert qdev.start() == start_alive
# Immediately fire a call to test if the worker is ready for it
qdev.wake_up_DAQ()
# Simulate device runtime
start_time = time.perf_counter()
QtCore.QTimer.singleShot(300, qdev.wake_up_DAQ)
QtCore.QTimer.singleShot(600, qdev.wake_up_DAQ)
while time.perf_counter() - start_time < 1:
app.processEvents()
time.sleep(0.001) # Do not hog the CPU
tprint("About to quit")
app.processEvents()
assert qdev.quit() == True
app.quit()
if start_alive:
assert dev.count_commands == 3
assert dev.count_replies == 3
assert cnt_DAQ_updated == 3
def test_Worker_DAQ___SINGLE_SHOT_WAKE_UP__start_dead():
test_Worker_DAQ___SINGLE_SHOT_WAKE_UP(start_alive=False)
def test_Worker_DAQ___CONTINUOUS(start_alive=True):
print_title(
"Worker_DAQ - CONTINUOUS" + ("" if start_alive else " - start dead")
)
def DAQ_function():
# Must return True when successful, False otherwise
time.sleep(0.1) # Simulate blocking processing time on the device
reply = dev.fake_query_1()
return reply[-4:] == "0101"
app = create_QApplication()
dev = FakeDevice(start_alive=start_alive)
qdev = QDeviceIO(dev)
# fmt: off
qdev.create_worker_DAQ(
DAQ_trigger = DAQ_TRIGGER.CONTINUOUS,
DAQ_function = DAQ_function,
critical_not_alive_count = 1,
debug = DEBUG,
)
# fmt: on
qdev.signal_DAQ_updated.connect(process_DAQ_updated)
qdev.signal_DAQ_paused.connect(process_DAQ_paused)
assert qdev.start() == start_alive
# Immediately fire a call to test if the worker is ready for it
qdev.unpause_DAQ()
# Simulate device runtime
start_time = time.perf_counter()
QtCore.QTimer.singleShot(300, qdev.pause_DAQ)
QtCore.QTimer.singleShot(600, qdev.unpause_DAQ)
QtCore.QTimer.singleShot(900, qdev.pause_DAQ)
QtCore.QTimer.singleShot(1200, qdev.unpause_DAQ)
while time.perf_counter() - start_time < 1.6:
app.processEvents()
if dev.count_commands == 12:
break
time.sleep(0.001) # Do not hog the CPU
tprint("About to quit")
app.processEvents()
assert qdev.quit() == True
app.quit()
if start_alive:
assert dev.count_commands >= 10
assert dev.count_replies >= 10
assert (
cnt_DAQ_updated >= 9
) # Last signal is not always received before thread is quit
assert cnt_DAQ_paused == 3
def test_Worker_DAQ___CONTINUOUS__start_dead():
test_Worker_DAQ___CONTINUOUS(start_alive=False)
def test_Worker_jobs(start_alive=True):
print_title("Worker_jobs" + ("" if start_alive else " - start dead"))
app = create_QApplication()
dev = FakeDevice(start_alive=start_alive)
qdev = QDeviceIO(dev)
qdev.create_worker_jobs(debug=DEBUG)
qdev.signal_jobs_updated.connect(process_jobs_updated)
assert qdev.start() == start_alive
# Immediately fire a call to test if the worker is ready for it
qdev.add_to_jobs_queue(dev.fake_query_2)
# fmt: off
# Simulate device runtime
start_time = time.perf_counter()
QtCore.QTimer.singleShot(100, qdev.process_jobs_queue)
QtCore.QTimer.singleShot(200, lambda: qdev.send(dev.fake_query_2))
QtCore.QTimer.singleShot(300, lambda: qdev.add_to_jobs_queue(dev.fake_command_with_argument, 0))
QtCore.QTimer.singleShot(400, lambda: qdev.add_to_jobs_queue(dev.fake_command_with_argument, 0))
QtCore.QTimer.singleShot(500, lambda: qdev.add_to_jobs_queue(dev.fake_command_with_argument, 0))
QtCore.QTimer.singleShot(600, qdev.process_jobs_queue)
QtCore.QTimer.singleShot(700, lambda: qdev.send("trigger_illegal_function_call_error"))
# fmt: on
while time.perf_counter() - start_time < 1:
app.processEvents()
time.sleep(0.001) # Do not hog the CPU
tprint("About to quit")
app.processEvents()
assert qdev.quit() == True
app.quit()
if start_alive:
assert dev.count_commands == 5
assert dev.count_replies == 2
assert cnt_jobs_updated == 4
def test_Worker_jobs__start_dead():
test_Worker_jobs(start_alive=False)
def test_Worker_jobs__jobs_function():
print_title("Worker_jobs - jobs_function")
def jobs_function(func, args):
if func == "special command":
dev.fake_query_2()
else:
# Default job handling where, e.g.
# func = self.dev.write
# args = ("toggle LED",)
func(*args)
app = create_QApplication()
dev = FakeDevice()
qdev = QDeviceIO(dev)
qdev.create_worker_jobs(
jobs_function=jobs_function, debug=DEBUG,
)
qdev.signal_jobs_updated.connect(process_jobs_updated)
assert qdev.start() == True
# Immediately fire a call to test if the worker is ready for it
qdev.send(dev.fake_query_2)
# fmt: off
# Simulate device runtime
start_time = time.perf_counter()
QtCore.QTimer.singleShot(100, lambda: qdev.send("special command"))
QtCore.QTimer.singleShot(200, lambda: qdev.send(dev.fake_command_with_argument, 0))
# fmt: on
while time.perf_counter() - start_time < 0.5:
app.processEvents()
time.sleep(0.001) # Do not hog the CPU
tprint("About to quit")
app.processEvents()
assert qdev.quit() == True
app.quit()
assert dev.count_commands == 3
assert dev.count_replies == 2
assert cnt_jobs_updated == 3
def test_attach_device_twice():
print_title("Attach device twice")
import pytest
qdev = QDeviceIO(FakeDevice())
with pytest.raises(SystemExit) as pytest_wrapped_e:
qdev.attach_device(FakeDevice())
assert pytest_wrapped_e.type == SystemExit
dprint("Exit code: %i" % pytest_wrapped_e.value.code)
assert pytest_wrapped_e.value.code == 22
def test_Worker_DAQ___no_device_attached():
print_title("Worker_DAQ - no device attached")
import pytest
qdev = QDeviceIO()
with pytest.raises(SystemExit) as pytest_wrapped_e:
qdev.create_worker_DAQ()
assert pytest_wrapped_e.type == SystemExit
dprint("Exit code: %i" % pytest_wrapped_e.value.code)
assert pytest_wrapped_e.value.code == 99
def test_Worker_jobs__no_device_attached():
print_title("Worker_jobs - no device attached")
import pytest
qdev = QDeviceIO()
with pytest.raises(SystemExit) as pytest_wrapped_e:
qdev.create_worker_jobs()
assert pytest_wrapped_e.type == SystemExit
dprint("Exit code: %i" % pytest_wrapped_e.value.code)
assert pytest_wrapped_e.value.code == 99
def test_Worker_DAQ___start_without_create():
print_title("Worker_DAQ - start without create")
import pytest
qdev = QDeviceIO(FakeDevice())
with pytest.raises(SystemExit) as pytest_wrapped_e:
qdev.start_worker_DAQ()
assert pytest_wrapped_e.type == SystemExit
dprint("Exit code: %i" % pytest_wrapped_e.value.code)
assert pytest_wrapped_e.value.code == 404
def test_Worker_jobs__start_without_create():
print_title("Worker_jobs - start without create")
import pytest
qdev = QDeviceIO(FakeDevice())
with pytest.raises(SystemExit) as pytest_wrapped_e:
qdev.start_worker_jobs()
assert pytest_wrapped_e.type == SystemExit
dprint("Exit code: %i" % pytest_wrapped_e.value.code)
assert pytest_wrapped_e.value.code == 404
def test_Worker_DAQ___quit_without_start():
print_title("Worker_DAQ - quit without start")
app = create_QApplication()
qdev = QDeviceIO(FakeDevice())
qdev.create_worker_DAQ()
tprint("About to quit")
app.processEvents()
assert qdev.quit() == True
app.quit()
def test_Worker_jobs__quit_without_start():
print_title("Worker_jobs - quit without start")
app = create_QApplication()
qdev = QDeviceIO(FakeDevice())
qdev.create_worker_jobs()
tprint("About to quit")
app.processEvents()
assert qdev.quit() == True
app.quit()
def test_Worker_DAQ___rate():
print_title("Worker_DAQ - INTERNAL_TIMER - DAQ rate")
def DAQ_function():
# Must return True when successful, False otherwise
reply = dev.fake_query_1()
dprint(" " * 50 + "%.1f Hz" % qdev.obtained_DAQ_rate_Hz)
return reply[-4:] == "0101"
app = create_QApplication()
dev = FakeDevice()
qdev = QDeviceIO(dev)
# fmt: off
qdev.create_worker_DAQ(
DAQ_trigger = DAQ_TRIGGER.INTERNAL_TIMER,
DAQ_function = DAQ_function,
DAQ_interval_ms = 10,
critical_not_alive_count = 1,
debug = DEBUG)
# fmt: on
assert qdev.start() == True
# Simulate device runtime
start_time = time.perf_counter()
while time.perf_counter() - start_time < 1.51:
app.processEvents()
time.sleep(0.001) # Do not hog the CPU
tprint("About to quit")
app.processEvents()
assert qdev.quit() == True
app.quit()
assert 9 <= qdev.obtained_DAQ_interval_ms <= 11
assert 99 <= qdev.obtained_DAQ_rate_Hz <= 101
def test_Worker_DAQ___lose_connection():
print_title("Worker_DAQ - INTERNAL_TIMER - lose connection")
def DAQ_function():
# Must return True when successful, False otherwise
if qdev.update_counter_DAQ == 10:
dev.is_alive = False
reply = dev.fake_query_1()
return reply[-4:] == "0101"
# NOTE: The global 'go' mechanism used here is a quick and dirty way to
# pytest. In production, it should be implemented by an boolean external
# class member.
global go
go = True
@QtCore.pyqtSlot()
def process_connection_lost():
tprint("---> received: connection_lost")
global go
go | |
maximum of 5 roads in a row, so choose something else
r = random()
if r < 0.6:
row_class, index = Grass, randint(0,6)
elif r < 0.9:
row_class, index = Rail, 0
else:
row_class, index = Pavement, 0
# Create an object of the chosen row class
return row_class(self, index, self.y - ROW_HEIGHT)
class Pavement(Row):
def __init__(self, predecessor, index, y):
super().__init__("side", index, y)
def play_sound(self):
game.play_sound("sidewalk", 1)
def next(self):
if self.index < 2:
row_class, index = Pavement, self.index + 1
else:
row_class, index = Road, 0
# Create an object of the chosen row class
return row_class(self, index, self.y - ROW_HEIGHT)
# Note that Rail does not inherit from ActiveRow
class Rail(Row):
def __init__(self, predecessor, index, y):
super().__init__("rail", index, y)
self.predecessor = predecessor
def update(self):
super().update()
# Only Rail rows with index 1 have trains on them
if self.index == 1:
# Recreate the children list, excluding any which are too far off the edge of the screen to be visible
self.children = [c for c in self.children if c.x > -1000 and c.x < WIDTH + 1000]
# If on-screen, and there is currently no train, and with a 1% chance every frame, create a train
if self.y < game.scroll_pos+HEIGHT and len(self.children) == 0 and random() < 0.01:
# Randomly choose a direction for trains to move. This can be different for each train created
dx = choice([-20, 20])
self.children.append(Train(dx, (WIDTH + 1000 if dx < 0 else -1000, -13)))
game.play_sound("bell")
game.play_sound("train", 2)
def check_collision(self, x):
if self.index == 2 and self.predecessor.collide(x):
game.play_sound("splat", 1)
return PlayerState.SPLAT, 8 # For the meaning of the second return value, see comments in Bunner.update
else:
return PlayerState.ALIVE, 0
def play_sound(self):
game.play_sound("grass", 1)
def next(self):
if self.index < 3:
row_class, index = Rail, self.index + 1
else:
item = choice( ((Road, 0), (Water, 0)) )
row_class, index = item[0], item[1]
# Create an object of the chosen row class
return row_class(self, index, self.y - ROW_HEIGHT)
class Game:
def __init__(self, bunner=None):
self.bunner = bunner
self.looped_sounds = {}
try:
if bunner:
music.set_volume(0.4)
else:
music.play("theme")
music.set_volume(1)
except:
pass
self.eagle = None
self.frame = 0
# First (bottom) row is always grass
self.rows = [Grass(None, 0, 0)]
self.scroll_pos = -HEIGHT
def update(self):
if self.bunner:
# Scroll faster if the player is close to the top of the screen. Limit scroll speed to
# between 1 and 3 pixels per frame.
self.scroll_pos -= max(1, min(3, float(self.scroll_pos + HEIGHT - self.bunner.y) / (HEIGHT // 4)))
else:
self.scroll_pos -= 1
# Recreate the list of rows, excluding any which have scrolled off the bottom of the screen
self.rows = [row for row in self.rows if row.y < int(self.scroll_pos) + HEIGHT + ROW_HEIGHT * 2]
# In Python, a negative index into a list gives you items in reverse order, e.g. my_list[-1] gives you the
# last element of a list. Here, we look at the last row in the list - which is the top row - and check to see
# if it has scrolled sufficiently far down that we need to add a new row above it. This may need to be done
# multiple times - particularly when the game starts, as only one row is added to begin with.
while self.rows[-1].y > int(self.scroll_pos)+ROW_HEIGHT:
new_row = self.rows[-1].next()
self.rows.append(new_row)
# Update all rows, and the player and eagle (if present)
for obj in self.rows + [self.bunner, self.eagle]:
if obj:
obj.update()
# Play river and traffic sound effects, and adjust volume each frame based on the player's proximity to rows
# of the appropriate types. For each such row, a number is generated representing how much the row should
# contribute to the volume of the sound effect. These numbers are added together by Python's sum function.
# On the following line we ensure that the volume can never be above 40% of the maximum possible volume.
if self.bunner:
for name, count, row_class in [("river", 2, Water), ("traffic", 3, Road)]:
# The first line uses a list comprehension to get each row of the appropriate type, e.g. Water rows
# if we're currently updating the "river" sound effect.
volume = sum([16.0 / max(16.0, abs(r.y - self.bunner.y)) for r in self.rows if isinstance(r, row_class)]) - 0.2
volume = min(0.4, volume)
self.loop_sound(name, count, volume)
return self
def draw(self):
# Create a list of all objects which need to be drawn. This includes all rows, plus the player
# Using list(s.rows) means we're creating a copy of that list to use - we don't want to create a reference
# to it as that would mean we're modifying the original list's contents
all_objs = list(self.rows)
if self.bunner:
all_objs.append(self.bunner)
# We want to draw objects in order based on their Y position. In general, objects further down the screen should be drawn
# after (and therefore in front of) objects higher up the screen. We can use Python's built-in sort function
# to put the items in the desired order, before we draw the The following function specifies the criteria
# used to decide how the objects are sorted.
def sort_key(obj):
# Adding 39 and then doing an integer divide by 40 (the height of each row) deals with the situation where
# the player sprite would otherwise be drawn underneath the row below. This could happen when the player
# is moving up or down. If you assume that it occupies a 40x40 box which can be at an arbitrary y offset,
# it generates the row number of the bottom row that that box overlaps. If the player happens to be
# perfectly aligned to a row, adding 39 and dividing by 40 has no effect on the result. If it isn't, even
# by a single pixel, the +39 causes it to be drawn one row later.
return (obj.y + 39) // ROW_HEIGHT
# Sort list using the above function to determine order
all_objs.sort(key=sort_key)
# Always draw eagle on top of everything
all_objs.append(self.eagle)
for obj in all_objs:
if obj:
# Draw the object, taking the scroll position into account
obj.draw(0, -int(self.scroll_pos))
if DEBUG_SHOW_ROW_BOUNDARIES:
for obj in all_objs:
if obj and isinstance(obj, Row):
pygame.draw.rect(screen.surface, (255, 255, 255), pygame.Rect(obj.x, obj.y - int(self.scroll_pos), screen.surface.get_width(), ROW_HEIGHT), 1)
screen.draw.text(str(obj.index), (obj.x, obj.y - int(self.scroll_pos) - ROW_HEIGHT))
def score(self):
return int(-320 - game.bunner.min_y) // 40
def play_sound(self, name, count=1):
try:
# Some sounds have multiple varieties. If count > 1, we'll randomly choose one from those
# We don't play any sounds if there is no player (e.g. if we're on the menu)
if self.bunner:
# Pygame Zero allows you to write things like 'sounds.explosion.play()'
# This automatically loads and plays a file named 'explosion.wav' (or .ogg) from the sounds folder (if
# such a file exists)
# But what if you have files named 'explosion0.ogg' to 'explosion5.ogg' and want to randomly choose
# one of them to play? You can generate a string such as 'explosion3', but to use such a string
# to access an attribute of Pygame Zero's sounds object, we must use Python's built-in function getattr
sound = getattr(sounds, name + str(randint(0, count - 1)))
sound.play()
except:
# If a sound fails to play, ignore the error
pass
def loop_sound(self, name, count, volume):
try:
# Similar to play_sound above, but for looped sounds we need to keep a reference to the sound so that we can
# later modify its volume or turn it off. We use the dictionary self.looped_sounds for this - the sound
# effect name is the key, and the value is the corresponding sound reference.
if volume > 0 and not name in self.looped_sounds:
full_name = name + str(randint(0, count - 1))
sound = getattr(sounds, full_name) # see play_sound method above for explanation
sound.play(-1) # -1 means sound will loop indefinitely
self.looped_sounds[name] = | |
# -*- coding: utf-8 -*-
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
---------------> do not modify this file for your submission <---------------
This file is not considered to be part of your submission.
It is only provided for you to benchmark your code.
The local_qasm_cpp_simulator that is used requires QISKit 0.43 or later.
"""
import numpy as np
import time
import copy
import qiskit
import sys, os, traceback
GLOBAL_TIMEOUT = 3600
ERROR_LIMIT = 1e-10
from qiskit import QuantumProgram
from qiskit.unroll import Unroller, DAGBackend
from qiskit._openquantumcompiler import dag2json
from multiprocessing import Pool
from qiskit.mapper._mappererror import MapperError
from qiskit.tools.qi.qi import state_fidelity
def score(depth,cmap,qnum,compiler_function=None,backend = 'local_qiskit_simulator'):
"""
Scores a compiler function based on a selected set of circuits and layouts
available in the two respective subfolders.
The final scoring will be done on a similar set of circuits and layouts.
Args:
compiler_function (function): reference to user compiler function
Returns:
float : score, speed
"""
# Load coupling maps
maps_q3 = ["circle_3","linear_3","neighbour_3","center_3"]
maps_q4 = ["circle_4","linear_4","neighbour_4","center_4"]
maps_q5 = ["circle_5","linear_5","neighbour_5","center_5"]
maps_q6 = ["circle_6","linear_6","neighbour_6","center_6"]
maps_q7 = ["circle_7","linear_7","neighbour_7","center_7"]
maps_q8 = ["circle_8","linear_8","neighbour_8","center_8"]
maps_q9 = ["circle_9","linear_9","neighbour_9","center_9"]
maps_q10 = ["circle_10","linear_10","neighbour_10","center_10"]
maps_q11 = ["circle_11","linear_11","neighbour_11","center_11"]
maps_q12 = ["circle_12","linear_12","neighbour_12","center_12"]
maps_q13 = ["circle_13","linear_13","neighbour_13","center_13"]
maps_q14 = ["circle_14","linear_14","neighbour_14","center_14"]
maps_q15 = ["circle_15","linear_15","neighbour_15","center_15"]
maps_q16 = ["circle_16","linear_16","neighbour_16","center_16"]
#maps_q5 = ["circle_rand_q5","ibmqx2_q5","linear_rand_q5","ibmqx4_q5","linear_reg_q5"]
#maps_q16 = ["ibmqx3_q16", "linear_rand_q16", "rect_rand_q16", "rect_def_q16", "ibmqx5_q16"]
#maps_q20 = ["circle_reg_q20", "linear_rand_q20", "rect_rand_q20", "rect_def_q20", "rect_reg_q20"]
# Load circuits files
ex_nr = 10 # examples to add per qubit number. maximum is 10
test_circuit_filenames = {}
mapss = []
if qnum == 10:
mapss = maps_q10
elif qnum ==11:
mapss = maps_q11
elif qnum == 12:
mapss = maps_q12
elif qnum == 13:
mapss = maps_q13
elif qnum == 14:
mapss = maps_q14
elif qnum == 15:
mapss = maps_q15
elif qnum == 9:
mapss = maps_q9
else:
pass
for ii in range(ex_nr):
#test_circuit_filenames['circuits/random%d_n15_d16.qasm' % ii] = load_coupling(maps_q15[1])["coupling_map"]
#test_circuit_filenames['circuits/random%d_n8_d11.qasm' % ii] = load_coupling(maps_q8[3])["coupling_map"]
test_circuit_filenames['circuits/random%d_n%d_d%d.qasm' % (ii,qnum,depth)] = load_coupling(mapss[cmap])["coupling_map"]
#test_circuit_filenames['circuits/random%d_n10_d1.qasm' % ii] = load_coupling(maps_q10[0])["coupling_map"]
#test_circuit_filenames['circuits/random%d_n11_d1.qasm' % ii] = load_coupling(maps_q11[0])["coupling_map"]
#test_circuit_filenames['circuits/random%d_n12_d1.qasm' % ii] = load_coupling(maps_q12[0])["coupling_map"]
#test_circuit_filenames['circuits/random%d_n13_d1.qasm' % ii] = load_coupling(maps_q13[0])["coupling_map"]
#test_circuit_filenames['circuits/random%d_n14_d1.qasm' % ii] = load_coupling(maps_q14[0])["coupling_map"]
#test_circuit_filenames['circuits/random%d_n15_d1.qasm' % ii] = load_coupling(maps_q15[0])["coupling_map"]
# for jj in range(16):
# for ii in range(ex_nr):
# test_circuit_filenames['circuits/random%d_n16_d%d.qasm' % (ii,jj+1)] = load_coupling(maps_q16[0%len(maps_q16)])["coupling_map"]
# for jj in range(16):
# for ii in range(ex_nr):
# test_circuit_filenames['circuits/random%d_n16_d%d.qasm' % (ii,jj+1)] = load_coupling(maps_q16[1%len(maps_q16)])["coupling_map"]
# for jj in range(16):
# for ii in range(ex_nr):
# test_circuit_filenames['circuits/random%d_n16_d%d.qasm' % (ii,jj+1)] = load_coupling(maps_q16[2%len(maps_q16)])["coupling_map"]
# for jj in range(16):
# for ii in range(ex_nr):
# test_circuit_filenames['circuits/random%d_n16_d%d.qasm' % (ii,jj+1)] = load_coupling(maps_q16[3%len(maps_q16)])["coupling_map"]
# for ii in range(ex_nr):
# test_circuit_filenames['circuits/random%d_n16_d16.qasm' % ii] = load_coupling(maps_q16[ii%len(maps_q16)])["coupling_map"]
# for ii in range(ex_nr):
# test_circuit_filenames['circuits/random%d_n20_d20.qasm' % ii] = load_coupling(maps_q20[ii%len(maps_q20)])["coupling_map"]
#for ii in range(ex_nr):
#test_circuit_filenames['circuits/random%d_n5_d5.qasm' % ii] = load_coupling(maps_q5[ii%len(maps_q5)])["coupling_map"]
# for ii in range(ex_nr):
# test_circuit_filenames['circuits/random%d_n16_d16.qasm' % ii] = load_coupling(maps_q16[ii%len(maps_q16)])["coupling_map"]
# for ii in range(ex_nr):
# test_circuit_filenames['circuits/random%d_n20_d20.qasm' % ii] = load_coupling(maps_q20[ii%len(maps_q20)])["coupling_map"]
#for ii in range(ex_nr):
#test_circuit_filenames['circuits/random%d_n5_d5.qasm' % ii] = load_coupling(maps_q5[ii%len(maps_q5)])["coupling_map"]
#for ii in range(ex_nr):
#test_circuit_filenames['circuits/random%d_n16_d16.qasm' % ii] = load_coupling(maps_q16[ii%len(maps_q16)])["coupling_map"]
#for ii in range(ex_nr):
#test_circuit_filenames['circuits/random%d_n20_d20.qasm' % ii] = load_coupling(maps_q20[ii%len(maps_q20)])["coupling_map"]
#test_circuit_filenames['circuits/random%d_n16_d16.qasm' % 0] = load_coupling(maps_q16[0])["coupling_map"]
#test_circuit_filenames['circuits/random%d_n5_d5.qasm' % 5] = load_coupling(maps_q5[0])["coupling_map"]
#test_circuit_filenames['circuits/random%d_n20_d20.qasm' % 3] = load_coupling(maps_q20[3])["coupling_map"]
# Load example circuits and coupling maps
test_circuits = {}
for filename, cmap in test_circuit_filenames.items():
with open(filename, 'r') as infile:
qasm = infile.read()
test_circuits[filename] = {"qasm": qasm, "coupling_map": cmap}
#将verbose设置为False,则不会执行IBM的标准算法
res = evaluate(compiler_function, test_circuits, verbose=True, backend = backend)
res_scores=[]
for k in res.keys():
print(res[k])
for name in res:
if (res[name]["optimizer_time"] > 0) and res[name]["coupling_correct_optimized"]:
# only add the score if the QISKit reference compiler worked well
if (res[name]["reference_time"] > 0) and res[name]["coupling_correct_reference"]:
# both user and reference compiler gave the correct result without error
res_scores.append([res[name]["cost_optimized"]/res[name]["cost_reference"],res[name]["optimizer_time"]/res[name]["reference_time"]])
else:
# the user compiler had an error or did not produce the right quantum state
# this returns a value which is half as good as the reference
res_scores.append([2,2])
return [(1./np.mean([ii[0] for ii in res_scores]), 1./np.mean([ii[1] for ii in res_scores])),res]
def evaluate(compiler_function=None, test_circuits=None, verbose=False, backend = 'local_qiskit_simulator'):
"""
Evaluates the given complier_function with the circuits in test_circuits
and compares the output circuit and quantum state with the original and
a reference obtained with the qiskit compiler.
Args:
compiler_function (function): reference to user compiler function
test_circuits (dict): named dict of circuits for which the compiler performance is evaluated
test_circuits: {
"name": {
"qasm": 'qasm_str',
"coupling_map": 'target_coupling_map
}
}
verbose (bool): specifies if performance of basic QISKit unroler and mapper circuit is shown for each circuit
backend (string): backend to use. For Windows Systems you should specify 'local_qasm_simulator' until
'local_qiskit_simulator' is available.
Returns:
dict
{
"name": circuit name
{
"optimizer_time": time taken by user compiler,
"reference_time": reference time taken by qiskit circuit mapper/unroler (if verbose),
"cost_original": original circuit cost function value (if verbose),
"cost_reference": reference circuit cost function value (if verbose),
"cost_optimized": optimized circuit cost function value,
"coupling_correct_original": (bool) does original circuit
satisfy the coupling map (if verbose),
"coupling_correct_reference": (bool) does circuit produced
by the qiskit mapper/unroler
satisfy the coupling map (if verbose),
"coupling_correct_optimized": (bool) does optimized circuit
satisfy the coupling map,
"state_correct_optimized": (bool) does optimized circuit
return correct state
}
}
"""
# Initial Setup
basis_gates = 'u1,u2,u3,cx,id' # or use "U,CX?"
gate_costs = {'id': 0, 'u1': 0, 'measure': 0, 'reset': 0, 'barrier': 0,
'u2': 1, 'u3': 1, 'U': 1,
'cx': 10, 'CX': 10}
# Results data structure
results = {}
# Load QASM files and extract DAG circuits
for name, circuit in test_circuits.items():
qp = QuantumProgram()
qp.load_qasm_text(
circuit["qasm"], name, basis_gates=basis_gates)
circuit["dag_original"] = qasm_to_dag_circuit(circuit["qasm"], basis_gates=basis_gates)
test_circuits[name] = circuit
results[name] = {} # build empty result dict to be filled later
# Only return results if a valid compiler function is provided
if compiler_function is not None:
# step through all the test circuits using multiprocessing
compile_jobs = [[name,circuit,0,compiler_function,gate_costs] for name, circuit in test_circuits.items()]
with Pool(len(compile_jobs)) as job:
res_values_opt = job.map(_compile_circuits, compile_jobs)
# stash the results in the respective dicts
for job in range(len(compile_jobs)):
name = res_values_opt[job].pop("name")
test_circuits[name] = res_values_opt[job].pop("circuit") # remove the circuit from the results and store it
results[name] = res_values_opt[job]
# do the same for the reference compiler in qiskit if verbose == True
if verbose:
compile_jobs = [[name, circuit, 1, _qiskit_compiler, gate_costs] for name, circuit in
test_circuits.items()]
with Pool(len(compile_jobs)) as job:
res_values = job.map(_compile_circuits, compile_jobs)
# also stash this but use update so we don't overwrite anything
for job in range(len(compile_jobs)):
name = res_values[job].pop("name")
test_circuits[name].update(res_values[job].pop("circuit")) # remove the circuit from the results and store it
results[name].update(res_values[job])
# determine the final permutation of the qubits
# this is done by analyzing the measurements on the qubits
compile_jobs = [[name, circuit, verbose] for name, circuit in test_circuits.items()]
with Pool(len(compile_jobs)) as job:
res_values = job.map(_prep_sim, compile_jobs)
for job in range(len(compile_jobs)):
name = res_values[job].pop("name")
test_circuits[name].update(res_values[job].pop("circuit")) # remove the circuit from the results and store it
results[name].update(res_values[job])
# Compose qobj for simulation
config = {
'data': ['quantum_state'],
}
# generate qobj for original circuit
qobj_original = _compose_qobj("original", test_circuits,
backend=backend,
config=config,
basis_gates=basis_gates,
shots=1,
seed=None)
# Compute original cost and check original coupling map
for circuit in qobj_original["circuits"]:
name = circuit["name"]
coupling_map = test_circuits[name].get("coupling_map", None)
coupling_map_passes = True
cost = 0
for op in circuit["compiled_circuit"]["operations"]:
cost += gate_costs.get(op["name"]) # compute cost
if op["name"] in ["cx", "CX"] \
and coupling_map is not None: # check coupling map
coupling_map_passes &= (
op["qubits"][0] in coupling_map)
if op["qubits"][0] in coupling_map:
coupling_map_passes &= (
op["qubits"][1] in coupling_map[op["qubits"][0]]
)
if verbose:
results[name]["cost_original"] = cost
results[name]["coupling_correct_original"] = coupling_map_passes
# Run simulation
time_start = time.process_time()
#print(qobj_original['config'])
res_original = qp.run(qobj_original, timeout=GLOBAL_TIMEOUT)
results[name]["sim_time_orig"] = time.process_time() - time_start
# Generate qobj for optimized circuit
qobj_optimized = _compose_qobj("optimized", test_circuits,
backend=backend,
config=config,
basis_gates=basis_gates,
| |
tdl.losses.AddNLosses(reg))
else:
raise AttributeError(
'None of the Layers has a regularizer defined')
return reg
def __init__(self, input_shape, n_filters, filter_sizes, pool_sizes,
name='MultiConv2D'):
''' All variables corresponding to the weights of the network are defined
'''
assert len(input_shape) == 3, \
'input_shape must have 3 elements: '\
'input_shape=[input_height, input_widht, input_maps]'
input_size = input_shape[:2]
n_input_maps = input_shape[2]
self._n_inputs = n_input_maps
self._n_outputs = n_filters[-1]
self._input_shape = input_shape
self._n_filters = n_filters
self._filter_sizes = filter_sizes
self._pool_sizes = pool_sizes
super(MultiLayer2DConvolution, self).__init__(name=name)
# Get size after convolution phase
final_size = [input_size[0], input_size[1]]
for i in range(len(filter_sizes)):
final_size[0] = (final_size[0] -
(filter_sizes[i][0] - 1)) // pool_sizes[i][0]
final_size[1] = (final_size[1] -
(filter_sizes[i][1] - 1)) // pool_sizes[i][1]
if final_size[0] == 0:
final_size[0] = 1
if final_size[1] == 0:
final_size[1] = 1
self._output_shape = final_size + [self.n_filters[-1]]
# print("Shape of the maps after convolution stage:", self.out_conv_shape)
class Output(tdl.core.TdlModel):
@tdl.core.InputModel
def model(self, value):
return value
@tdl.core.InferenceInput
def inputs(self, value):
tdl.core.assert_initialized(self, 'inputs', ['model'])
if value is None:
value = self.setup_inputs(None, self.input_shape)
return value
@property
def weights(self):
return self.model.weights
@property
def hidden(self):
return self._hidden
@property
def input_shape(self):
return self.model.input_shape
@property
def y(self):
return self._y
def setup_inputs(self, batch_size, input_shape):
inputs = tf.placeholder(tf.float32,
shape=[batch_size] + input_shape,
name='inputs')
return inputs
def setup_conv_layers(self, inputs, layers):
out = inputs
hidden = list()
for layer in layers:
h = layer.evaluate(out)
hidden.append(h)
out = h.y
return out, hidden
@tdl.core.OutputValue
def value(self, _):
self._y, self._hidden = self.setup_conv_layers(
self.inputs, self.model.layers)
return self.y
def __init__(self, model, inputs=None, batch_size=None,
options=None, name='MultiConv2D'):
super(MultiLayer2DConvolution.Output, self).__init__(
model=model,
inputs=(inputs if inputs is not None
else self.setup_inputs(batch_size, model.input_shape)),
options=options, name=name)
def setup(self, inputs=None, batch_size=None, options=None, name=None):
return MultiLayer2DConvolution.Output(model=self,
inputs=inputs,
batch_size=batch_size,
options=options,
name=name)
class AlexNet(tdl.core.TdlModel):
_submodels = ['conv', 'mlp']
@property
def input_shape(self):
return self.conv.input_shape
@property
def n_outputs(self):
return self.mlp.n_outputs
@property
def weights(self):
return self.conv.weights + self.mlp.weights
@tdl.core.Submodel
def conv(self, value):
if isinstance(value, dict):
conv = MultiLayer2DConvolution(
input_shape=value['input_shape'],
n_filters=value['n_filters'],
filter_sizes=value['filter_sizes'],
pool_sizes=value['pool_sizes'],
name='conv')
elif isinstance(value, MultiLayer2DConvolution):
conv = value
else:
raise ValueError('Provided network is not a '
'MultiLayer2DConvolution')
return conv
@tdl.core.Submodel
def mlp(self, value):
n_inputs = functools.reduce(lambda x, y: x * y,
self.conv.output_shape)
if isinstance(value, dict):
net = MlpNet(n_inputs=n_inputs,
n_outputs=value['n_outputs'],
n_hidden=value['n_hidden'],
afunction=tf.nn.relu,
output_function=value['output_function'],
name='mlp')
elif isinstance(value, MlpNet):
net = value
else:
raise ValueError('Provided network is not an MlpNet')
return net
@tdl.core.Regularizer
def regularizer(self, scale=None):
conv_reg = (self.conv.regularizer.value if self.conv.regularizer.is_set
else self.conv.regularizer.init(scale))
mlp_reg = (self.mlp.regularizer.value if self.mlp.regularizer.is_set
else self.mlp.regularizer.init(scale))
return tdl.losses.AddNLosses([conv_reg, mlp_reg])
def __init__(self, input_shape, n_outputs, n_filters, filter_sizes,
pool_sizes, n_hidden, output_function=None, name='AlexNet'):
super(AlexNet, self).__init__(
conv={'input_shape': input_shape, 'n_filters': n_filters,
'filter_sizes': filter_sizes, 'pool_sizes': pool_sizes},
mlp={'n_outputs': n_outputs, 'n_hidden': n_hidden,
'output_function': output_function},
name=name)
class AlexNetSetup(tdl.core.TdlModel):
_submodels = ['conv', 'mlp']
@property
def weights(self):
return self.model.weights
@tdl.core.InputArgument
def inputs(self, value):
if value is None:
value = tf.placeholder(tdl.core.global_options.float.tftype,
shape=[None] + self.input_shape,
name='inputs')
return value
@tdl.core.Submodel
def conv(self, _):
conv = self.model.conv.setup(self.inputs)
return conv
@tdl.core.Submodel
def mlp(self, _):
inputs = tf.reshape(self.conv.y, [-1, self.model.mlp.n_inputs])
mlp = self.model.mlp.evaluate(inputs=inputs)
return mlp
@tdl.core.OutputValue
def output(self, _):
return self.mlp.output
@property
def value(self):
return self.output.value
@property
def input_shape(self):
return self.model.input_shape
@tdl.core.OptionalProperty
def loss(self, alpha=1e-5):
empirical = L2Loss(self.value)
regularizer = (self.model.regularizer.value
if self.model.regularizer.is_set
else self.model.regularizer.init())
loss = EmpiricalWithRegularization(
empirical=empirical,
regularizer=regularizer,
alpha=alpha)
return loss
def __init__(self, model, inputs=None, batch_size=None,
options=None, name='AlexNet'):
self.model = model
super(AlexNet.AlexNetSetup, self)\
.__init__(inputs=inputs, options=options, name=name)
def evaluate(self, inputs=None, options=None, name=None):
return AlexNet.AlexNetOutput(model=self, inputs=inputs,
options=options, name=name)
class AlexNetClassifier(AlexNet):
@property
def n_classes(self):
return self._n_classes
@tdl.core.Submodel
def mlp(self, value):
n_inputs = functools.reduce(lambda x, y: x * y,
self.conv.output_shape)
if isinstance(value, dict):
net = MlpClassifier(
n_inputs=n_inputs,
n_classes=self.n_classes,
n_hidden=value['n_hidden'],
afunction=tf.nn.relu,
name='mlp')
elif isinstance(value, MlpNet):
net = value
else:
raise ValueError('Provided network is not an MlpNet')
return net
def __init__(self, input_shape, n_classes, n_filters, filter_sizes,
pool_sizes, n_hidden, name='AlexNetClassifier'):
self._n_classes = n_classes
n_outputs = (1 if n_classes == 2
else n_classes)
super(AlexNetClassifier, self).__init__(input_shape=input_shape,
n_outputs=n_outputs,
n_filters=n_filters,
filter_sizes=filter_sizes,
pool_sizes=pool_sizes,
n_hidden=n_hidden,
name=name)
class AlexNetOutput(AlexNet.AlexNetSetup):
@property
def logits(self):
return self.mlp.logits
@tdl.core.OptionalProperty
def loss(self, alpha=1e-5):
empirical = ClassificationLoss(self.logits)
regularizer = (self.model.regularizer.value
if self.model.regularizer.is_set
else self.model.regularizer.init())
loss = EmpiricalWithRegularization(
empirical=empirical,
regularizer=regularizer,
alpha=alpha)
return loss
def evaluate(self, inputs=None, options=None, name=None):
return AlexNetClassifier\
.AlexNetOutput(model=self, inputs=inputs, options=options,
name=name)
class LinearClassifier(tdl.core.TdlModel):
@property
def n_inputs(self):
return self._n_inputs
@property
def n_outputs(self):
return (1 if self.n_classes == 2
else self.n_classes)
@property
def n_classes(self):
return self._n_classes
@tdl.core.Submodel
def linear_layer(self, value):
if value is None:
value = AffineLayer(input_shape=self.n_inputs,
units=self.n_outputs)
return value
@tdl.core.Regularizer
def regularizer(self, scale):
reg = (self.linear_layer.regularizer.value
if self.linear_layer.regularizer.is_set
else self.linear_layer.regularizer.init(scale))
return reg
@property
def weights(self):
return [self.linear_layer.weights]
def __init__(self, n_inputs, n_classes, name='linear_classifier',
**kargs):
self._n_inputs = n_inputs
self._n_classes = n_classes
super(LinearClassifier, self).__init__(name=name, **kargs)
class LinearClassifierSetup(tdl.core.OutputModel):
@property
def n_inputs(self):
return self.model.n_inputs
@property
def n_outputs(self):
return self.model.n_outputs
@property
def labels(self):
return self._labels
@tdl.core.OptionalProperty
def loss(self, alpha):
empirical = ClassificationLoss(self.logits)
loss = EmpiricalWithRegularization(
empirical=empirical,
regularizer=self.model.regularizer.value,
alpha=alpha)
return loss
@property
def weights(self):
return self.model.weights
@tdl.ModelMethod(['logits', 'value', 'inputs'], ['inputs'],
LinearClassifierSetup)
def evaluate(self, object, inputs=None):
if inputs is None:
inputs = tf.placeholder(tf.float32,
shape=(None, self.n_inputs),
name='inputs')
logits = self.linear_layer(inputs)
if self.n_outputs == 1:
output = tf.nn.sigmoid(logits)
else:
output = tf.nn.softmax(logits)
return logits, output, inputs
class StridedDeconvNet(object):
''' Creates a Deconvolutional neural network using upsampling
TODO: implement this using new format
It performs a \'deconvolutional\' neural network similar to the one used
in "UNSUPERVISED REPRESENTATION LEARNING WIT\H DEEP CONVOLUTIONAL
GENERATIVE ADVERSARIAL NETWORKS"
(http://arxiv.org/pdf/1511.06434v2.pdf)
The network maps a vector of size n_inputs to a 2d map with several chanels
First a linear mapping is performed, then a reshape to form an initial
tensor of 2d maps with chanels, then a series of upscaling and convolutions
are performed
n_inputs: size of the input vectors
input_size: size of the maps after linear stage: [size_dim0, size_dim1]
n_input_maps: number of maps after linear stage
n_filters: list with the number of filters for each layer
filter_size: list with the size of the kernel for each layer,
the format for the size of each layer is:
[filter_size_dim0 , filter_size_dim1]
upsampling: list with the size for the upsampling in each deconv layer:
[upsampling_dim0, upsampling_dim1]
in_layer: input layer, a linear layer for mapping the inputs to the desired
output
'''
def __init__(self, n_inputs, input_size, n_input_maps,
n_filters, filter_size,
upsampling,
name=''):
''' All variables corresponding to the weights of the network are
defined '''
self.n_inputs = n_inputs
self.input_size = input_size
self.n_input_maps = n_input_maps
self.n_filters = n_filters
self.filter_size = filter_size
self.upsampling = upsampling
# 1. Create the linear layer
self.in_layer = LinearLayer(n_inputs,
n_input_maps *
input_size[0] * input_size[1],
name=name + '/lin')
# 2. Create the convolutional layers:
self.conv_layers = list()
self.conv_layers.append(
ConvTransposeLayer(filter_size[0],
[n_input_maps, n_filters[0]],
upsampling[0],
name=name + '/conv_0'))
for l in range(1, len(n_filters) - 1):
self.conv_layers.append(
ConvTransposeLayer(filter_size[l],
[n_filters[l - 1], n_filters[l]],
upsampling[l],
name=name + '/conv_' + str(l)))
# last conv layer has tanh activation function
self.conv_layers.append(
ConvTransposeLayer(filter_size[-1],
[n_filters[-2], n_filters[-1]],
upsampling[-1],
afunction=tf.tanh,
name=name + '/conv_' + str(len(n_filters) - 1)))
# 4. Define the saver for the weights of the network
saver_dict = dict()
for l in range(len(self.conv_layers)):
saver_dict.update(self.conv_layers[l].saver_dict)
self.saver = tf.train.Saver(saver_dict)
def setup(self, batch_size, drop_prob=None):
''' Defines the computation graph of the neural network for a specific
batch size
drop_prob: placeholder used for specify the probability for dropout. If
this coefficient is set, then dropout regularization is added
between all fully connected layers(TODO: allow to choose which
layers)
'''
inputs = tf.placeholder(tf.float32,
shape=(batch_size, self.n_inputs))
# 1. linear stage
out = self.in_layer.evaluate(inputs)
# 1.1 reshape
shape = out.get_shape().as_list()
out = tf.reshape(out, [shape[0],
self.input_size[0],
self.input_size[1],
self.n_input_maps]
)
# 2. convolution stage
for layer in self.conv_layers:
out = layer.evaluate(out, 'SAME')
return NetConf(inputs, None, out, None)
class StackedModel(tdl.TdlModel):
_submodels = ['layers']
@tdl.Submodel
def layers(self, value):
if value is None:
value = list()
return value
@tdl.core.InputArgument
def return_layers(self, value):
'''True if the return value of the stacked model is the layers'''
if value is None:
warnings.warn('On the future, the return value of StackedModel '
'will be the output layer by default. To keep '
'current behavior use return_layers=True')
value = True
if value not in (True, False):
raise ValueError('return_layers must be either True or False')
return value
@tdl.core.LazzyProperty
def _layer_names(self):
''' name of the layers '''
return dict()
def __getitem__(self, item):
return self.layers[item]
def __len__(self):
return len(self.layers)
def add(self, layer, name=None):
assert callable(layer), \
'Model {} is not callable. StackedModel only | |
<filename>manuscript/python_generators/local_imports/PeptideBuilder.py
'''This module is part of the PeptideBuilder library,
written by <NAME>, <NAME>,
<NAME>, and <NAME>.
The PeptideBuilder module contains code to generate 3D
structures of peptides. It requires the Geometry module
(also part of the PeptideBuilder library), which contains
default bond lengths and angles for all amino acids.
This module also requires the Bio.PDB module from
Biopython, for structure manipulation.
This file is provided to you under the GNU General Public
License, version 2.0 or later.'''
from Bio.PDB import *
from Bio.PDB.Atom import *
from Bio.PDB.Residue import *
from Bio.PDB.Chain import *
from Bio.PDB.Model import *
from Bio.PDB.Structure import *
from Bio.PDB.Vector import *
from Bio.PDB.Entity import*
from Geometry import *
import math, warnings,numpy
def get_prop(atm):
print atm.get_name()
print atm.get_coord()
print atm.get_vector()
print atm.get_bfactor()
print atm.get_anisou()
print atm.get_occupancy()
print atm.get_altloc()
print atm.get_fullname()
print atm.get_serial_number()
print atm.get_parent()
print atm.get_id()
print atm.get_full_id()
print atm.get_level()
def calculateCoordinates(refA, refB, refC, L, ang, di):
AV=refA.get_vector()
BV=refB.get_vector()
CV=refC.get_vector()
CA=AV-CV
CB=BV-CV
##CA vector
AX=CA[0]
AY=CA[1]
AZ=CA[2]
##CB vector
BX=CB[0]
BY=CB[1]
BZ=CB[2]
##Plane Parameters
A=(AY*BZ)-(AZ*BY)
B=(AZ*BX)-(AX*BZ)
G=(AX*BY)-(AY*BX)
##Dot Product Constant
F= math.sqrt(BX*BX + BY*BY + BZ*BZ) * L * math.cos(ang*(math.pi/180.0))
##Constants
const=math.sqrt( math.pow((B*BZ-BY*G),2) *(-(F*F)*(A*A+B*B+G*G)+(B*B*(BX*BX+BZ*BZ) + A*A*(BY*BY+BZ*BZ)- (2*A*BX*BZ*G) + (BX*BX+ BY*BY)*G*G - (2*B*BY)*(A*BX+BZ*G))*L*L))
denom= (B*B)*(BX*BX+BZ*BZ)+ (A*A)*(BY*BY+BZ*BZ) - (2*A*BX*BZ*G) + (BX*BX+BY*BY)*(G*G) - (2*B*BY)*(A*BX+BZ*G)
X= ((B*B*BX*F)-(A*B*BY*F)+(F*G)*(-A*BZ+BX*G)+const)/denom
#if(G!=0.0 or ((B==0.0 or BZ==0.0) and (BY==0.0 or G==0.0))): # rvm: added G!=0.0
if((B==0.0 or BZ==0.0) and (BY==0.0 or G==0.0)):
#print "refA",refA.get_coord(), "refB",refB.get_coord(), "refC",refC.get_coord()
#print "\tB",B,"\tBZ",BZ,"\tBY",BY,"\tG",G
#raw_input()
const1=math.sqrt( G*G*(-A*A*X*X+(B*B+G*G)*(L-X)*(L+X)))
Y= ((-A*B*X)+const1)/(B*B+G*G)
Z= -(A*G*G*X+B*const1)/(G*(B*B+G*G))
else:
Y= ((A*A*BY*F)*(B*BZ-BY*G)+ G*( -F*math.pow(B*BZ-BY*G,2) + BX*const) - A*( B*B*BX*BZ*F- B*BX*BY*F*G + BZ*const)) / ((B*BZ-BY*G)*denom)
Z= ((A*A*BZ*F)*(B*BZ-BY*G) + (B*F)*math.pow(B*BZ-BY*G,2) + (A*BX*F*G)*(-B*BZ+BY*G) - B*BX*const + A*BY*const) / ((B*BZ-BY*G)*denom)
#GET THE NEW VECTOR from the orgin
D=Vector(X, Y, Z) + CV
with warnings.catch_warnings():
# ignore inconsequential warning
warnings.simplefilter("ignore")
temp=calc_dihedral(AV, BV, CV, D)*(180.0/math.pi)
di=di-temp
rot= rotaxis(math.pi*(di/180.0), CV-BV)
D=(D-BV).left_multiply(rot)+BV
D = numpy.array(list(D))
#print [numpy.array(D)]
return D
# <NAME>
def calculateCoordinatesTEST(refA, refB, refC, L, ang, di):
AV=refA.get_vector()
BV=refB.get_vector()
CV=refC.get_vector()
CA=AV-CV
CB=BV-CV
##CA vector
AX=CA[0]
AY=CA[1]
AZ=CA[2]
##CB vector
BX=CB[0]
BY=CB[1]
BZ=CB[2]
##Plane Parameters
A=(AY*BZ)-(AZ*BY)
B=(AZ*BX)-(AX*BZ)
G=(AX*BY)-(AY*BX)
##Dot Product Constant
F= math.sqrt(BX*BX + BY*BY + BZ*BZ) * L * math.cos(ang*(math.pi/180.0))
##Constants
const=math.sqrt( math.pow((B*BZ-BY*G),2) *(-(F*F)*(A*A+B*B+G*G)+(B*B*(BX*BX+BZ*BZ) + A*A*(BY*BY+BZ*BZ)- (2*A*BX*BZ*G) + (BX*BX+ BY*BY)*G*G - (2*B*BY)*(A*BX+BZ*G))*L*L))
denom= (B*B)*(BX*BX+BZ*BZ)+ (A*A)*(BY*BY+BZ*BZ) - (2*A*BX*BZ*G) + (BX*BX+BY*BY)*(G*G) - (2*B*BY)*(A*BX+BZ*G)
X= ((B*B*BX*F)-(A*B*BY*F)+(F*G)*(-A*BZ+BX*G)+const)/denom
if((B==0 or BZ==0) and (BY==0 or G==0)):
const1=math.sqrt( G*G*(-A*A*X*X+(B*B+G*G)*(L-X)*(L+X)))
Y= ((-A*B*X)+const1)/(B*B+G*G)
Z= -(A*G*G*X+B*const1)/(G*(B*B+G*G))
else:
Y= ((A*A*BY*F)*(B*BZ-BY*G)+ G*( -F*math.pow(B*BZ-BY*G,2) + BX*const) - A*( B*B*BX*BZ*F- B*BX*BY*F*G + BZ*const)) / ((B*BZ-BY*G)*denom)
Z= ((A*A*BZ*F)*(B*BZ-BY*G) + (B*F)*math.pow(B*BZ-BY*G,2) + (A*BX*F*G)*(-B*BZ+BY*G) - B*BX*const + A*BY*const) / ((B*BZ-BY*G)*denom)
#GET THE NEW VECTOR from the orgin
D=Vector(X, Y, Z) + CV
with warnings.catch_warnings():
# ignore inconsequential warning
warnings.simplefilter("ignore")
temp=calc_dihedral(AV, BV, CV, D)*(180.0/math.pi)
di=di-temp
rot= rotaxis(math.pi*(di/180.0), CV-BV)
D=(D-BV).left_multiply(rot)+BV
return D
def makeGly(segID, N, CA, C, O, geo):
'''Creates a Glycine residue'''
##Create Residue Data Structure
res= Residue((' ', segID, ' '), "GLY", ' ')
res.add(N)
res.add(CA)
res.add(C)
res.add(O)
##print res
return res
def makeAla(segID, N, CA, C, O, geo):
'''Creates an Alanine residue'''
##R-Group
CA_CB_length=geo.CA_CB_length
C_CA_CB_angle=geo.C_CA_CB_angle
N_C_CA_CB_diangle=geo.N_C_CA_CB_diangle
carbon_b = calculateCoordinates(N, C, CA, CA_CB_length, C_CA_CB_angle, N_C_CA_CB_diangle)
CB= Atom("CB", carbon_b, 0.0 , 1.0, " "," CB", 0,"C")
##Create Residue Data Structure
res = Residue((' ', segID, ' '), "ALA", ' ')
res.add(N)
res.add(CA)
res.add(C)
res.add(O)
res.add(CB)
return res
# Adds a sarcosine (methyl on the N and hydrogens on the CA)
# Added by <NAME> (Feb 2015)
def makeSar(segID, N, CA, C, O, geo):
'''Creates a Sarcosine residue'''
##R-Group
CA_N_CB_angle=geo.CA_N_CB_angle
C_CA_N_CB_diangle=geo.C_CA_N_CB_diangle
N_C_length=geo.N_C_length # amide bond for for C_minus_one
N_CB_length=geo.N_CB_length
C_CA_N_C_minus_one_diangle=geo.phi
CA_N_C_minus_one_angle = geo.CA_N_C_minus_one_angle
C_minus_one_CA_N_CB_diangle = geo.C_minus_one_CA_N_CB_diangle
# 1 2 3 3 4 2 3 4 1 2 3 4
C_minus_one_position = calculateCoordinates(C, CA, N, N_C_length, CA_N_C_minus_one_angle, C_CA_N_C_minus_one_diangle)
C_minus_one = Atom("Cm", C_minus_one_position, 0.0 , 1.0, " "," Cm", 0,"C")
# WAS: carbon_b= calculateCoordinates(N, C, CA, CA_CB_length, C_CA_CB_angle, N_C_CA_CB_diangle)
# 1 2 3 3 4 2 3 4 1 2 3 4
#carbon_b = calculateCoordinates(C_minus_one, CA, N, N_CB_length, CA_N_CB_angle, C_minus_one_CA_N_CB_diangle)
carbon_b = calculateCoordinates(CA, C_minus_one,N, N_CB_length, CA_N_CB_angle, 180.0)
#carbon_b = calculateCoordinates(C, CA, N, N_CB_length, CA_N_CB_angle, C_CA_N_CB_diangle)#C_minus_one_CA_N_CB_diangle)
"""
if segID == 1:
# This is required because we do not have a legitimate phi_1 (C_minus_one_CA_N_CB_diangle)
# and so we need to just make do with setting the CB to cis with respect to C_CA_N_CB_diangle
# 1 2 3 3 4 2 3 4 1 2 3 4
carbon_b = calculateCoordinates(C, CA, N, N_CB_length, CA_N_CB_angle, C_CA_N_CB_diangle)
"""
#
CB = Atom("CB", carbon_b, 0.0 , 1.0, " "," CB", 0,"C")
##Create Residue Data Structure
res = Residue((' ', segID, ' '), "SAR", ' ')
res.add(N)
res.add(CA)
res.add(C)
res.add(O)
res.add(CB)
return res
def makeSer(segID, N, CA, C, O, geo):
'''Creates a Serine residue'''
##R-Group
CA_CB_length=geo.CA_CB_length
C_CA_CB_angle=geo.C_CA_CB_angle
N_C_CA_CB_diangle=geo.N_C_CA_CB_diangle
CB_OG_length=geo.CB_OG_length
CA_CB_OG_angle=geo.CA_CB_OG_angle
N_CA_CB_OG_diangle=geo.N_CA_CB_OG_diangle
carbon_b= calculateCoordinates(N, C, CA, CA_CB_length, C_CA_CB_angle, N_C_CA_CB_diangle)
CB= Atom("CB", carbon_b, 0.0 , 1.0, " "," CB", 0,"C")
oxygen_g= calculateCoordinates(N, CA, CB, CB_OG_length, CA_CB_OG_angle, N_CA_CB_OG_diangle)
OG= Atom("OG", oxygen_g, 0.0, 1.0, " ", " OG", 0, "O")
##Create Reside Data Structure
res= Residue((' ', segID, ' '), "SER", ' ')
res.add(N)
res.add(CA)
res.add(C)
res.add(O)
res.add(CB)
res.add(OG)
##print res
return res
def makeCys(segID, N, CA, C, O, geo):
'''Creates a Cysteine residue'''
##R-Group
CA_CB_length=geo.CA_CB_length
C_CA_CB_angle=geo.C_CA_CB_angle
N_C_CA_CB_diangle=geo.N_C_CA_CB_diangle
CB_SG_length= geo.CB_SG_length
CA_CB_SG_angle= geo.CA_CB_SG_angle
N_CA_CB_SG_diangle= geo.N_CA_CB_SG_diangle
carbon_b= calculateCoordinates(N, C, CA, CA_CB_length, C_CA_CB_angle, N_C_CA_CB_diangle)
CB= Atom("CB", carbon_b, 0.0 , 1.0, " "," CB", 0,"C")
sulfur_g= calculateCoordinates(N, CA, CB, CB_SG_length, CA_CB_SG_angle, N_CA_CB_SG_diangle)
SG= Atom("SG", sulfur_g, 0.0, 1.0, " ", " SG", 0, "S")
##Create Residue Data Structure
res= Residue((' ', segID, ' '), "CYS", ' ')
res.add(N)
res.add(CA)
res.add(C)
res.add(O)
res.add(CB)
res.add(SG)
return res
def makeVal(segID, N, CA, C, O, geo):
'''Creates a Valine residue'''
##R-Group
CA_CB_length=geo.CA_CB_length
C_CA_CB_angle=geo.C_CA_CB_angle
N_C_CA_CB_diangle=geo.N_C_CA_CB_diangle
CB_CG1_length=geo.CB_CG1_length
CA_CB_CG1_angle=geo.CA_CB_CG1_angle
N_CA_CB_CG1_diangle=geo.N_CA_CB_CG1_diangle
CB_CG2_length=geo.CB_CG2_length
CA_CB_CG2_angle=geo.CA_CB_CG2_angle
N_CA_CB_CG2_diangle=geo.N_CA_CB_CG2_diangle
carbon_b= calculateCoordinates(N, C, CA, CA_CB_length, C_CA_CB_angle, N_C_CA_CB_diangle)
CB= Atom("CB", carbon_b, 0.0 , 1.0, " "," CB", 0,"C")
carbon_g1= calculateCoordinates(N, CA, CB, CB_CG1_length, CA_CB_CG1_angle, N_CA_CB_CG1_diangle)
CG1= Atom("CG1", carbon_g1, 0.0, 1.0, " ", " CG1", 0, "C")
carbon_g2= calculateCoordinates(N, CA, CB, CB_CG2_length, CA_CB_CG2_angle, N_CA_CB_CG2_diangle)
CG2= Atom("CG2", carbon_g2, 0.0, 1.0, " ", " CG2", 0, "C")
##Create Residue Data Structure
res= Residue((' ', segID, ' '), "VAL", ' ')
res.add(N)
res.add(CA)
res.add(C)
res.add(O)
res.add(CB)
res.add(CG1)
res.add(CG2)
return res
def makeIle(segID, N, CA, C, O, geo):
'''Creates an Isoleucine residue'''
##R-group
CA_CB_length=geo.CA_CB_length
C_CA_CB_angle=geo.C_CA_CB_angle
N_C_CA_CB_diangle=geo.N_C_CA_CB_diangle
CB_CG1_length=geo.CB_CG1_length
CA_CB_CG1_angle=geo.CA_CB_CG1_angle
N_CA_CB_CG1_diangle=geo.N_CA_CB_CG1_diangle
CB_CG2_length=geo.CB_CG2_length
CA_CB_CG2_angle=geo.CA_CB_CG2_angle
N_CA_CB_CG2_diangle= geo.N_CA_CB_CG2_diangle
CG1_CD1_length= geo.CG1_CD1_length
CB_CG1_CD1_angle= geo.CB_CG1_CD1_angle
CA_CB_CG1_CD1_diangle= geo.CA_CB_CG1_CD1_diangle
carbon_b= calculateCoordinates(N, C, CA, CA_CB_length, C_CA_CB_angle, N_C_CA_CB_diangle)
CB= Atom("CB", carbon_b, 0.0 , 1.0, " "," CB", 0,"C")
carbon_g1= calculateCoordinates(N, CA, CB, CB_CG1_length, CA_CB_CG1_angle, N_CA_CB_CG1_diangle)
CG1= Atom("CG1", carbon_g1, 0.0, 1.0, " ", " CG1", 0, "C")
carbon_g2= calculateCoordinates(N, CA, CB, CB_CG2_length, CA_CB_CG2_angle, N_CA_CB_CG2_diangle)
CG2= Atom("CG2", carbon_g2, 0.0, 1.0, " ", " CG2", 0, "C")
carbon_d1= calculateCoordinates(CA, CB, CG1, CG1_CD1_length, CB_CG1_CD1_angle, CA_CB_CG1_CD1_diangle)
CD1= Atom("CD1", carbon_d1, 0.0, 1.0, " ", " CD1", 0, "C")
##Create Residue Data Structure
res= Residue((' ', segID, ' '), "ILE", ' ')
res.add(N)
res.add(CA)
res.add(C)
res.add(O)
res.add(CB)
res.add(CG1)
res.add(CG2)
res.add(CD1)
return res
def makeLeu(segID, N, CA, C, O, geo):
'''Creates a Leucine residue'''
##R-Group
CA_CB_length=geo.CA_CB_length
C_CA_CB_angle=geo.C_CA_CB_angle
N_C_CA_CB_diangle=geo.N_C_CA_CB_diangle
CB_CG_length=geo.CB_CG_length
CA_CB_CG_angle= geo.CA_CB_CG_angle
N_CA_CB_CG_diangle=geo.N_CA_CB_CG_diangle
CG_CD1_length=geo.CG_CD1_length
CB_CG_CD1_angle=geo.CB_CG_CD1_angle
CA_CB_CG_CD1_diangle=geo.CA_CB_CG_CD1_diangle
CG_CD2_length=geo.CG_CD2_length
CB_CG_CD2_angle=geo.CB_CG_CD2_angle
CA_CB_CG_CD2_diangle=geo.CA_CB_CG_CD2_diangle
carbon_b= calculateCoordinates(N, C, CA, CA_CB_length, C_CA_CB_angle, N_C_CA_CB_diangle)
CB= Atom("CB", carbon_b, 0.0 , 1.0, " "," CB", 0,"C")
carbon_g1= calculateCoordinates(N, CA, CB, CB_CG_length, CA_CB_CG_angle, N_CA_CB_CG_diangle)
CG= Atom("CG", carbon_g1, 0.0, 1.0, " ", " CG", 0, "C")
carbon_d1= calculateCoordinates(CA, CB, CG, CG_CD1_length, CB_CG_CD1_angle, CA_CB_CG_CD1_diangle)
CD1= Atom("CD1", carbon_d1, 0.0, 1.0, " ", " CD1", 0, "C")
carbon_d2= calculateCoordinates(CA, CB, CG, CG_CD2_length, CB_CG_CD2_angle, CA_CB_CG_CD2_diangle)
CD2= Atom("CD2", carbon_d2, 0.0, 1.0, " ", " CD2", 0, "C")
##Create Residue Data Structure
res= Residue((' ', segID, ' '), "LEU", ' ')
res.add(N)
res.add(CA)
res.add(C)
res.add(O)
res.add(CB)
res.add(CG)
res.add(CD1)
res.add(CD2)
return res
def makeThr(segID, N, CA, C, O, geo):
'''Creates a Threonine residue'''
##R-Group
CA_CB_length=geo.CA_CB_length
C_CA_CB_angle=geo.C_CA_CB_angle
N_C_CA_CB_diangle=geo.N_C_CA_CB_diangle
CB_OG1_length=geo.CB_OG1_length
CA_CB_OG1_angle=geo.CA_CB_OG1_angle
N_CA_CB_OG1_diangle=geo.N_CA_CB_OG1_diangle
CB_CG2_length=geo.CB_CG2_length
CA_CB_CG2_angle=geo.CA_CB_CG2_angle
N_CA_CB_CG2_diangle= geo.N_CA_CB_CG2_diangle
carbon_b= calculateCoordinates(N, C, CA, CA_CB_length, C_CA_CB_angle, N_C_CA_CB_diangle)
CB= Atom("CB", carbon_b, 0.0 , 1.0, " "," CB", 0,"C")
oxygen_g1= calculateCoordinates(N, CA, CB, CB_OG1_length, CA_CB_OG1_angle, N_CA_CB_OG1_diangle)
OG1= Atom("OG1", oxygen_g1, 0.0, 1.0, " ", " OG1", 0, "O")
carbon_g2= calculateCoordinates(N, CA, CB, CB_CG2_length, CA_CB_CG2_angle, N_CA_CB_CG2_diangle)
CG2= Atom("CG2", carbon_g2, 0.0, 1.0, " ", " CG2", 0, "C")
##Create Residue Data Structure
res= Residue((' ', segID, ' '), "THR", ' ')
res.add(N)
res.add(CA)
res.add(C)
res.add(O)
res.add(CB)
res.add(OG1)
res.add(CG2)
return res
def makeArg(segID, N, CA, C, O, geo):
'''Creates an Arginie residue'''
##R-Group
CA_CB_length=geo.CA_CB_length
C_CA_CB_angle=geo.C_CA_CB_angle
N_C_CA_CB_diangle=geo.N_C_CA_CB_diangle
CB_CG_length=geo.CB_CG_length
CA_CB_CG_angle= geo.CA_CB_CG_angle
N_CA_CB_CG_diangle=geo.N_CA_CB_CG_diangle
CG_CD_length=geo.CG_CD_length
CB_CG_CD_angle=geo.CB_CG_CD_angle
CA_CB_CG_CD_diangle=geo.CA_CB_CG_CD_diangle
CD_NE_length=geo.CD_NE_length
CG_CD_NE_angle=geo.CG_CD_NE_angle
CB_CG_CD_NE_diangle=geo.CB_CG_CD_NE_diangle
NE_CZ_length=geo.NE_CZ_length
CD_NE_CZ_angle=geo.CD_NE_CZ_angle
CG_CD_NE_CZ_diangle=geo.CG_CD_NE_CZ_diangle
CZ_NH1_length=geo.CZ_NH1_length
NE_CZ_NH1_angle=geo.NE_CZ_NH1_angle
CD_NE_CZ_NH1_diangle=geo.CD_NE_CZ_NH1_diangle
CZ_NH2_length=geo.CZ_NH2_length
NE_CZ_NH2_angle=geo.NE_CZ_NH2_angle
CD_NE_CZ_NH2_diangle=geo.CD_NE_CZ_NH2_diangle
carbon_b= calculateCoordinates(N, C, CA, CA_CB_length, C_CA_CB_angle, N_C_CA_CB_diangle)
CB= Atom("CB", carbon_b, 0.0 , 1.0, " "," CB", 0,"C")
carbon_g= calculateCoordinates(N, CA, CB, CB_CG_length, CA_CB_CG_angle, N_CA_CB_CG_diangle)
CG= Atom("CG", carbon_g, 0.0, 1.0, " ", " CG", 0, "C")
carbon_d= calculateCoordinates(CA, CB, CG, CG_CD_length, CB_CG_CD_angle, CA_CB_CG_CD_diangle)
CD= Atom("CD", carbon_d, 0.0, 1.0, " ", " CD", 0, "C")
nitrogen_e= calculateCoordinates(CB, CG, CD, CD_NE_length, CG_CD_NE_angle, CB_CG_CD_NE_diangle)
NE= Atom("NE", nitrogen_e, 0.0, 1.0, " ", " NE", 0, "N")
carbon_z= calculateCoordinates(CG, CD, NE, NE_CZ_length, CD_NE_CZ_angle, CG_CD_NE_CZ_diangle)
CZ= Atom("CZ", carbon_z, 0.0, 1.0, " ", " CZ", 0, "C")
nitrogen_h1= calculateCoordinates(CD, NE, CZ, CZ_NH1_length, NE_CZ_NH1_angle, CD_NE_CZ_NH1_diangle)
NH1= Atom("NH1", nitrogen_h1, 0.0, 1.0, " ", " NH1", 0, "N")
nitrogen_h2= calculateCoordinates(CD, NE, CZ, CZ_NH2_length, NE_CZ_NH2_angle, CD_NE_CZ_NH2_diangle)
NH2= Atom("NH2", nitrogen_h2, 0.0, 1.0, " ", " NH2", 0, "N")
##Create Residue Data Structure
res= Residue((' ', segID, ' '), "ARG", ' ')
res.add(N)
res.add(CA)
res.add(C)
res.add(O)
res.add(CB)
res.add(CG)
res.add(CD)
res.add(NE)
res.add(CZ)
res.add(NH1)
res.add(NH2)
return res
def makeLys(segID, N, CA, C, O, geo):
'''Creates a Lysine residue'''
##R-Group
CA_CB_length=geo.CA_CB_length
C_CA_CB_angle=geo.C_CA_CB_angle
N_C_CA_CB_diangle=geo.N_C_CA_CB_diangle
CB_CG_length=geo.CB_CG_length
CA_CB_CG_angle=geo.CA_CB_CG_angle
N_CA_CB_CG_diangle=geo.N_CA_CB_CG_diangle
CG_CD_length=geo.CG_CD_length
CB_CG_CD_angle=geo.CB_CG_CD_angle
CA_CB_CG_CD_diangle=geo.CA_CB_CG_CD_diangle
CD_CE_length=geo.CD_CE_length
CG_CD_CE_angle=geo.CG_CD_CE_angle
CB_CG_CD_CE_diangle=geo.CB_CG_CD_CE_diangle
CE_NZ_length=geo.CE_NZ_length
CD_CE_NZ_angle=geo.CD_CE_NZ_angle
CG_CD_CE_NZ_diangle=geo.CG_CD_CE_NZ_diangle
# 1 2 3 3 4 2 3 4 1 2 3 4
carbon_b= calculateCoordinates(N, C, CA, CA_CB_length, C_CA_CB_angle, N_C_CA_CB_diangle)
CB= Atom("CB", carbon_b, 0.0 , 1.0, " "," CB", 0,"C")
# 1 2 3 3 4 2 3 4 1 2 3 4
carbon_g= calculateCoordinates(N, CA, CB, CB_CG_length, CA_CB_CG_angle, N_CA_CB_CG_diangle)
CG= Atom("CG", carbon_g, 0.0, 1.0, " ", " CG", 0, "C")
# 1 2 3 3 4 2 3 4 1 2 3 4
carbon_d= calculateCoordinates(CA, CB, CG, CG_CD_length, CB_CG_CD_angle, CA_CB_CG_CD_diangle)
CD= Atom("CD", carbon_d, 0.0, 1.0, " ", " CD", 0, "C")
# 1 2 3 3 4 2 3 4 1 2 3 4
carbon_e= calculateCoordinates(CB, CG, CD, CD_CE_length, CG_CD_CE_angle, CB_CG_CD_CE_diangle)
CE= Atom("CE", carbon_e, 0.0, 1.0, " ", " CE", 0, "C")
nitrogen_z= calculateCoordinates(CG, CD, CE, CE_NZ_length, CD_CE_NZ_angle, CG_CD_CE_NZ_diangle)
NZ= Atom("NZ", nitrogen_z, 0.0, 1.0, " ", " NZ", 0, "N")
##Create Residue Data Structure
res= Residue((' ', segID, ' '), "LYS", ' ')
res.add(N)
res.add(CA)
res.add(C)
res.add(O)
res.add(CB)
res.add(CG)
res.add(CD)
res.add(CE)
res.add(NZ)
return res
def makeAsp(segID, N, CA, C, O, geo):
'''Creates an Aspartic Acid residue'''
##R-Group
CA_CB_length=geo.CA_CB_length
C_CA_CB_angle=geo.C_CA_CB_angle
N_C_CA_CB_diangle=geo.N_C_CA_CB_diangle
CB_CG_length=geo.CB_CG_length
CA_CB_CG_angle=geo.CA_CB_CG_angle
N_CA_CB_CG_diangle=geo.N_CA_CB_CG_diangle
CG_OD1_length=geo.CG_OD1_length
CB_CG_OD1_angle=geo.CB_CG_OD1_angle
CA_CB_CG_OD1_diangle=geo.CA_CB_CG_OD1_diangle
CG_OD2_length=geo.CG_OD2_length
CB_CG_OD2_angle=geo.CB_CG_OD2_angle
CA_CB_CG_OD2_diangle=geo.CA_CB_CG_OD2_diangle
carbon_b= calculateCoordinates(N, C, CA, CA_CB_length, C_CA_CB_angle, N_C_CA_CB_diangle)
CB= Atom("CB", carbon_b, 0.0 , 1.0, " "," CB", 0,"C")
carbon_g= calculateCoordinates(N, | |
<reponame>biocore/gneiss
# ----------------------------------------------------------------------------
# Copyright (c) 2016--, gneiss development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import abc
from collections import namedtuple
from skbio import TreeNode
import pandas as pd
import numpy
class Dendrogram(TreeNode):
""" Stores data to be plotted as a dendrogram.
A `Dendrogram` object is represents a tree in addition to the
key information required to create a tree layout prior to
visualization. No layouts are specified within this class,
since this serves as a super class for different tree layouts.
Parameters
----------
use_lengths: bool
Specifies if the branch lengths should be included in the
resulting visualization (default True).
Attributes
----------
length
leafcount
height
depth
Notes
-----
`length` refers to the branch length of a node to its parent.
`leafcount` is the number of tips within a subtree. `height` refers
to the longest path from root to the deepst leaf in that subtree.
`depth` is the number of nodes found in the longest path.
"""
def __init__(self, use_lengths=True, **kwargs):
""" Constructs a Dendrogram object for visualization.
"""
super().__init__(**kwargs)
def _cache_ntips(self):
""" Counts the number of leaves under each subtree."""
for n in self.postorder():
if n.is_tip():
n.leafcount = 1
else:
n.leafcount = sum(c.leafcount for c in n.children)
def update_geometry(self, use_lengths, depth=None):
"""Calculate tree node attributes such as height and depth.
Parameters
----------
use_lengths: bool
Specify if the branch length should be incorporated into
the geometry calculations for visualization.
depth: int
The number of nodes in the longest path from root to leaf.
This is agnostic to scale and orientation.
"""
if self.length is None or not use_lengths:
if depth is None:
self.length = 0
else:
self.length = 1
self.depth = (depth or 0) + self.length
children = self.children
if children:
for c in children:
c.update_geometry(use_lengths, self.depth)
self.height = max([c.height for c in children]) + self.length
self.leafcount = sum([c.leafcount for c in children])
else:
self.height = self.length
self.leafcount = self.edgecount = 1
def coords(self, height, width):
""" Returns coordinates of nodes to be rendered in plot.
Parameters
----------
height : int
The height of the canvas.
width : int
The width of the canvas.
Returns
-------
pd.DataFrame
index : str
Name of node.
x : float
x-coordinate of node.
y : float
y-coordinate of node.
child(i) : str
Name of ith child node in that specific node.
in the tree.
is_tip : str
Specifies if the node is a tip in the treee.
"""
self.rescale(width, height)
result = {}
for node in self.postorder():
children = {'child%d' % i: n.name
for i, n in enumerate(node.children)}
coords = {'x': node.x2, 'y': node.y2}
is_tip = {'is_tip': node.is_tip()}
result[node.name] = {**coords, **children, **is_tip}
result = pd.DataFrame(result).T
# reorder so that x and y are first
cols = ['x', 'y'] + sorted(list(set(result.columns) - set(['x', 'y'])))
return result.loc[:, cols]
@abc.abstractmethod
def rescale(self, width, height):
pass
class UnrootedDendrogram(Dendrogram):
""" Stores data to be plotted as an unrooted dendrogram.
A `UnrootedDendrogram` object is represents a tree in addition to the
key information required to create a radial tree layout prior to
visualization.
Parameters
----------
use_lengths: bool
Specifies if the branch lengths should be included in the
resulting visualization (default True).
Attributes
----------
length
leafcount
height
depth
"""
def __init__(self, **kwargs):
""" Constructs a UnrootedDendrogram object for visualization.
Parameters
----------
use_lengths: bool
Specifies if the branch lengths should be included in the
resulting visualization (default True).
"""
super().__init__(**kwargs)
@classmethod
def from_tree(cls, tree, use_lengths=True):
""" Creates an UnrootedDendrogram object from a skbio tree.
Parameters
----------
tree : skbio.TreeNode
Input skbio tree
Returns
-------
UnrootedDendrogram
"""
for n in tree.postorder():
n.__class__ = UnrootedDendrogram
tree.update_geometry(use_lengths)
return tree
def rescale(self, width, height):
""" Find best scaling factor for fitting the tree in the figure.
This method will find the best orientation and scaling possible
to fit the tree within the dimensions specified by width and height.
Parameters
----------
width : float
width of the canvas
height : float
height of the canvas
Returns
-------
best_scaling : float
largest scaling factor in which the tree can fit in the canvas.
Notes
-----
"""
angle = (2 * numpy.pi) / self.leafcount
# this loop is a horrible brute force hack
# there are better (but complex) ways to find
# the best rotation of the tree to fit the display.
best_scale = 0
for i in range(60):
direction = i / 60.0 * numpy.pi
# TODO:
# This function has a little bit of recursion. This will
# need to be refactored to remove the recursion.
points = self.update_coordinates(1.0, 0, 0, direction, angle)
xs, ys = zip(*points)
# double check that the tree fits within the margins
scale = min(float(width) / (max(xs) - min(xs)),
float(height) / (max(ys) - min(ys)))
# TODO: This margin seems a bit arbituary.
# will need to investigate.
scale *= 0.95 # extra margin for labels
if scale > best_scale:
best_scale = scale
mid_x = width / 2 - ((max(xs) + min(xs)) / 2) * scale
mid_y = height / 2 - ((max(ys) + min(ys)) / 2) * scale
best_args = (scale, mid_x, mid_y, direction, angle)
self.update_coordinates(*best_args)
return best_scale
def update_coordinates(self, s, x1, y1, a, da):
""" Update x, y coordinates of tree nodes in canvas.
`update_coordinates` will recursively updating the
plotting parameters for all of the nodes within the tree.
This can be applied when the tree becomes modified (i.e. pruning
or collapsing) and the resulting coordinates need to be modified
to reflect the changes to the tree structure.
Parameters
----------
s : float
scaling
x1 : float
x midpoint
y1 : float
y midpoint
a : float
angle (degrees)
da : float
angle resolution (degrees)
Returns
-------
points : list of tuple
2D coordinates of all of the nodes.
Notes
-----
This function has a little bit of recursion. This will
need to be refactored to remove the recursion.
"""
# Constant angle algorithm. Should add maximum daylight step.
x2 = x1 + self.length * s * numpy.sin(a)
y2 = y1 + self.length * s * numpy.cos(a)
(self.x1, self.y1, self.x2, self.y2, self.angle) = (x1, y1, x2, y2, a)
# TODO: Add functionality that allows for collapsing of nodes
a = a - self.leafcount * da / 2
if self.is_tip():
points = [(x2, y2)]
else:
points = []
# TODO:
# This function has a little bit of recursion. This will
# need to be refactored to remove the recursion.
for child in self.children:
# calculate the arc that covers the subtree.
ca = child.leafcount * da
points += child.update_coordinates(s, x2, y2, a + ca / 2, da)
a += ca
return points
Dimensions = namedtuple('Dimensions', ['x', 'y', 'height'])
class RootedDendrogram(Dendrogram):
""" Stores data to be plotted as an rooted dendrogram.
A `RootedDendrogram` object is represents a tree in addition to the
key information required to create a radial tree layout prior to
visualization.
Parameters
----------
use_lengths: bool
Specifies if the branch lengths should be included in the
resulting visualization (default True).
Attributes
----------
length
leafcount
height
depth
"""
def width_required(self):
return self.leafcount
@abc.abstractmethod
def xcoords(self, scale, x1):
pass
@abc.abstractmethod
def ycoords(self, scale, y1):
pass
def rescale(self, width, height):
""" Update x, y coordinates of tree nodes in canvas.
Parameters
----------
scale : Dimensions
Scaled dimensions of the tree
x1 : int
X-coordinate of parent
"""
xscale = width / self.height
yscale = height / self.width_required()
scale = Dimensions(xscale, yscale, self.height)
# y coords done postorder, x preorder, y first.
# so it has to be done in 2 passes.
self.update_y_coordinates(scale)
self.update_x_coordinates(scale)
return xscale
def update_y_coordinates(self, scale, y1=None):
"""The second pass through the tree. Y coordinates only
depend on the shape of the tree and yscale.
Parameters
----------
scale : Dimensions
Scaled dimensions of the tree
x1 : int
X-coordinate of parent
"""
if y1 is None:
y1 = self.width_required() * scale.y
child_y = y1
| |
<filename>tests/app/submitter/test_convert_payload_0_0_1.py
from app.data_model.answer_store import AnswerStore
from app.questionnaire.location import Location
from app.questionnaire.questionnaire_schema import QuestionnaireSchema
from app.submitter.convert_payload_0_0_1 import convert_answers_to_payload_0_0_1
from app.submitter.converter import convert_answers
from tests.app.submitter.schema import make_schema
from tests.app.submitter.test_converter import TestConverter, create_answer
class TestConvertPayload001(TestConverter): # pylint: disable=too-many-public-methods
def test_convert_answers_to_payload_0_0_1_with_key_error(self):
with self._app.test_request_context():
user_answer = [create_answer('ABC', '2016-01-01', group_id='group-1', block_id='block-1'),
create_answer('DEF', '2016-03-30', group_id='group-1', block_id='block-1'),
create_answer('GHI', '2016-05-30', group_id='group-1', block_id='block-1')]
questionnaire = make_schema('0.0.1', 'section-1', 'group-1', 'block-1', [
{
'id': 'question-1',
'answers': [
{
'id': 'LMN',
'type': 'TextField',
'q_code': '001'
},
{
'id': 'DEF',
'type': 'TextField',
'q_code': '002'
},
{
'id': 'JKL',
'type': 'TextField',
'q_code': '003'
}
]
}
])
routing_path = [Location(group_id='group-1', group_instance=0, block_id='block-1')]
answer_object = (convert_answers_to_payload_0_0_1(AnswerStore(user_answer), QuestionnaireSchema(questionnaire), routing_path))
self.assertEqual(answer_object['002'], '2016-03-30')
self.assertEqual(len(answer_object), 1)
def test_answer_with_zero(self):
with self._app.test_request_context():
user_answer = [create_answer('GHI', 0, group_id='group-1', block_id='block-1')]
questionnaire = make_schema('0.0.1', 'section-1', 'group-1', 'block-1', [
{
'id': 'question-2',
'answers': [
{
'id': 'GHI',
'type': 'TextField',
'q_code': '003'
}
]
}
])
routing_path = [Location(group_id='group-1', group_instance=0, block_id='block-1')]
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(user_answer), routing_path)
# Check the converter correctly
self.assertEqual('0', answer_object['data']['003'])
def test_answer_with_float(self):
with self._app.test_request_context():
user_answer = [create_answer('GHI', 10.02, group_id='group-1', block_id='block-1')]
questionnaire = make_schema('0.0.1', 'section-1', 'group-1', 'block-1', [
{
'id': 'question-2',
'answers': [
{
'id': 'GHI',
'type': 'TextField',
'q_code': '003'
}
]
}
])
routing_path = [Location(group_id='group-1', group_instance=0, block_id='block-1')]
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(user_answer), routing_path)
# Check the converter correctly
self.assertEqual('10.02', answer_object['data']['003'])
def test_answer_with_string(self):
with self._app.test_request_context():
user_answer = [create_answer('GHI', 'String test + !', group_id='group-1', block_id='block-1')]
questionnaire = make_schema('0.0.1', 'section-1', 'group-1', 'block-1', [
{
'id': 'question-2',
'answers': [
{
'id': 'GHI',
'type': 'TextField',
'q_code': '003'
}
]
}
])
routing_path = [Location(group_id='group-1', group_instance=0, block_id='block-1')]
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(user_answer), routing_path)
# Check the converter correctly
self.assertEqual('String test + !', answer_object['data']['003'])
def test_answer_with_multiple_instances(self):
with self._app.test_request_context():
user_answer = [create_answer('GHI', 0, group_id='group-1', block_id='block-1'),
create_answer('GHI', value=1, answer_instance=1, group_id='group-1', block_id='block-1'),
create_answer('GHI', value=2, answer_instance=2, group_id='group-1', block_id='block-1')]
questionnaire = make_schema('0.0.1', 'section-1', 'group-1', 'block-1', [
{
'id': 'question-2',
'answers': [
{
'id': 'GHI',
'type': 'TextField',
'q_code': '003'
}
]
}
])
routing_path = [Location(group_id='group-1', group_instance=0, block_id='block-1')]
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(user_answer), routing_path)
# Check the converter correctly
self.assertEqual(answer_object['data']['003'], ['0', '1', '2'])
def test_answer_without_qcode(self):
with self._app.test_request_context():
user_answer = [create_answer('GHI', 'String test + !', group_id='group-1', block_id='block-1')]
questionnaire = make_schema('0.0.1', 'section-1', 'group-1', 'block-1', [
{
'id': 'question-2',
'answers': [
{
'id': 'GHI',
'type': 'TextField'
}
]
}
])
routing_path = [Location(group_id='group-1', group_instance=0, block_id='block-1')]
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(user_answer), routing_path)
self.assertEqual(len(answer_object['data']), 0)
def test_get_checkbox_answer_with_duplicate_detail_answer_ids(self):
with self._app.test_request_context():
routing_path = [Location(group_id='favourite-food', group_instance=0, block_id='crisps')]
answers = [create_answer('crisps-answer', [
'Ready salted',
'Other'
], group_id='favourite-food', block_id='crisps')]
answers += [create_answer('other-answer-mandatory', 'Other', group_id='favourite-food', block_id='crisps',
group_instance=1)]
answers += [create_answer('other-answer-mandatory', 'Other', group_id='favourite-food', block_id='crisps',
group_instance=1)]
questionnaire = make_schema('0.0.1', 'section-1', 'favourite-food', 'crisps', [
{
'id': 'crisps-question',
'answers': [
{
'id': 'crisps-answer',
'type': 'Checkbox',
'options': [
{
'label': 'Other',
'q_code': '4',
'description': 'Choose any other flavour',
'value': 'Other',
'detail_answer': {'id': 'other-answer-mandatory'}
}
]
}
]
}
])
with self.assertRaises(Exception) as err:
convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(answers), routing_path)
self.assertEqual('Multiple answers found for {}'.format('other-answer-mandatory'), str(err.exception))
def test_converter_checkboxes_with_q_codes(self):
with self._app.test_request_context():
routing_path = [Location(group_id='favourite-food', group_instance=0, block_id='crisps')]
answers = [create_answer('crisps-answer', [
'Ready salted',
'Sweet chilli'
], group_id='favourite-food', block_id='crisps')]
questionnaire = make_schema('0.0.1', 'section-1', 'favourite-food', 'crisps', [
{
'id': 'crisps-question',
'answers': [
{
'id': 'crisps-answer',
'type': 'Checkbox',
'options': [
{
'label': 'Ready salted',
'value': 'Ready salted',
'q_code': '1'
},
{
'label': 'Sweet chilli',
'value': 'Sweet chilli',
'q_code': '2'
},
{
'label': 'Cheese and onion',
'value': 'Cheese and onion',
'q_code': '3'
},
{
'label': 'Other',
'q_code': '4',
'description': 'Choose any other flavour',
'value': 'Other',
'detail_answer': {
'mandatory': True,
'id': 'other-answer-mandatory',
'label': 'Please specify other',
'type': 'TextField'
}
}
]
}
]
}
])
# When
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(answers), routing_path)
# Then
self.assertEqual(len(answer_object['data']), 2)
self.assertEqual(answer_object['data']['1'], 'Ready salted')
self.assertEqual(answer_object['data']['2'], 'Sweet chilli')
def test_converter_checkboxes_with_q_codes_and_other_value(self):
with self._app.test_request_context():
routing_path = [Location(group_id='favourite-food', group_instance=0, block_id='crisps')]
answers = [create_answer('crisps-answer', [
'Ready salted',
'Other'
], group_id='favourite-food', block_id='crisps')]
answers += [create_answer('other-answer-mandatory', 'Bacon', group_id='favourite-food', block_id='crisps')]
questionnaire = make_schema('0.0.1', 'section-1', 'favourite-food', 'crisps', [
{
'id': 'crisps-question',
'answers': [
{
'id': 'crisps-answer',
'type': 'Checkbox',
'options': [
{
'label': 'Ready salted',
'value': 'Ready salted',
'q_code': '1'
},
{
'label': 'Sweet chilli',
'value': 'Sweet chilli',
'q_code': '2'
},
{
'label': 'Cheese and onion',
'value': 'Cheese and onion',
'q_code': '3'
},
{
'label': 'Other',
'q_code': '4',
'description': 'Choose any other flavour',
'value': 'Other',
'detail_answer': {
'mandatory': True,
'id': 'other-answer-mandatory',
'label': 'Please specify other',
'type': 'TextField'
}
}
]
}
]
}
])
# When
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(answers), routing_path)
# Then
self.assertEqual(len(answer_object['data']), 2)
self.assertEqual(answer_object['data']['1'], 'Ready salted')
self.assertEqual(answer_object['data']['4'], 'Bacon')
def test_converter_checkboxes_with_q_codes_and_empty_other_value(self):
with self._app.test_request_context():
routing_path = [Location(group_id='favourite-food', group_instance=0, block_id='crisps')]
answers = [create_answer('crisps-answer', [
'Ready salted',
'Other'
], group_id='favourite-food', block_id='crisps')]
answers += [create_answer('other-answer-mandatory', '', group_id='favourite-food', block_id='crisps')]
questionnaire = make_schema('0.0.1', 'section-1', 'favourite-food', 'crisps', [
{
'id': 'crisps-question',
'answers': [
{
'id': 'crisps-answer',
'type': 'Checkbox',
'options': [
{
'label': 'Ready salted',
'value': 'Ready salted',
'q_code': '1'
},
{
'label': 'Sweet chilli',
'value': 'Sweet chilli',
'q_code': '2'
},
{
'label': 'Cheese and onion',
'value': 'Cheese and onion',
'q_code': '3'
},
{
'label': 'Other',
'q_code': '4',
'description': 'Choose any other flavour',
'value': 'Other',
'detail_answer': {
'mandatory': True,
'id': 'other-answer-mandatory',
'label': 'Please specify other',
'type': 'TextField'
}
}
]
}
]
}
])
# When
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(answers), routing_path)
# Then
self.assertEqual(len(answer_object['data']), 2)
self.assertEqual(answer_object['data']['1'], 'Ready salted')
self.assertEqual(answer_object['data']['4'], 'Other')
def test_converter_q_codes_for_empty_strings(self):
with self._app.test_request_context():
routing_path = [Location(group_id='favourite-food', group_instance=0, block_id='crisps')]
answers = [create_answer('crisps-answer', '', group_id='favourite-food', block_id='crisps')]
answers += [
create_answer('other-crisps-answer', 'Ready salted', group_id='favourite-food', block_id='crisps')]
questionnaire = make_schema('0.0.1', 'section-1', 'favourite-food', 'crisps', [
{
'id': 'crisps-question',
'answers': [
{
'id': 'crisps-answer',
'type': 'TextArea',
'options': [],
'q_code': '1'
},
{
'id': 'other-crisps-answer',
'type': 'TextArea',
'options': [],
'q_code': '2'
}
]
}
])
# When
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(answers), routing_path)
# Then
self.assertEqual(len(answer_object['data']), 1)
self.assertEqual(answer_object['data']['2'], 'Ready salted')
def test_radio_answer(self):
with self._app.test_request_context():
routing_path = [Location(group_id='radio-group', group_instance=0, block_id='radio-block')]
user_answer = [create_answer('radio-answer', 'Coffee', group_id='radio-group', block_id='radio-block')]
questionnaire = make_schema('0.0.1', 'section-1', 'radio-block', 'radio-block', [
{
'id': 'radio-question',
'answers': [
{
'type': 'Radio',
'id': 'radio-answer',
'q_code': '1',
'options': [
{
'label': 'Coffee',
'value': 'Coffee'
},
{
'label': 'Tea',
'value': 'Tea'
}
]
}
]
}
])
# When
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(user_answer), routing_path)
# Then
self.assertEqual(len(answer_object['data']), 1)
self.assertEqual(answer_object['data']['1'], 'Coffee')
def test_number_answer(self):
with self._app.test_request_context():
routing_path = [Location(group_id='number-group', group_instance=0, block_id='number-block')]
user_answer = [create_answer('number-answer', 0.9999, group_id='number-block', block_id='number-block')]
questionnaire = make_schema('0.0.1', 'section-1', 'number-block', 'number-block', [
{
'id': 'number-question',
'answers': [
{
'id': 'number-answer',
'type': 'Number',
'q_code': '1'
}
]
}
])
# When
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(user_answer), routing_path)
# Then
self.assertEqual(len(answer_object['data']), 1)
self.assertEqual(answer_object['data']['1'], '0.9999')
def test_percentage_answer(self):
with self._app.test_request_context():
routing_path = [Location(group_id='percentage-group', group_instance=0, block_id='percentage-block')]
user_answer = [create_answer('percentage-answer', 100, group_id='percentage-group', block_id='percentage-block')]
questionnaire = make_schema('0.0.1', 'section-1', 'percentage-block', 'percentage-block', [
{
'id': 'percentage-question',
'answers': [
{
'id': 'percentage-answer',
'type': 'Percentage',
'q_code': '1'
}
]
}
])
# When
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(user_answer), routing_path)
# Then
self.assertEqual(len(answer_object['data']), 1)
self.assertEqual(answer_object['data']['1'], '100')
def test_textarea_answer(self):
with self._app.test_request_context():
routing_path = [Location(group_id='textarea-group', group_instance=0, block_id='textarea-block')]
user_answer = [create_answer('textarea-answer', 'example text.', group_id='textarea-group', block_id='textarea-block')]
questionnaire = make_schema('0.0.1', 'section-1', 'textarea-block', 'textarea-block', [
{
'id': 'textarea-question',
'answers': [
{
'id': 'textarea-answer',
'q_code': '1',
'type': 'TextArea'
}
]
}
])
# When
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(user_answer), routing_path)
# Then
self.assertEqual(len(answer_object['data']), 1)
self.assertEqual(answer_object['data']['1'], 'example text.')
def test_currency_answer(self):
with self._app.test_request_context():
routing_path = [Location(group_id='currency-group', group_instance=0, block_id='currency-block')]
user_answer = [create_answer('currency-answer', 99.99, group_id='currency-group', block_id='currency-block')]
questionnaire = make_schema('0.0.1', 'section-1', 'currency-block', 'currency-block', [
{
'id': 'currency-question',
'answers': [
{
'id': 'currency-answer',
'type': 'Currency',
'q_code': '1'
}
]
}
])
# When
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(user_answer), routing_path)
# Then
self.assertEqual(len(answer_object['data']), 1)
self.assertEqual(answer_object['data']['1'], '99.99')
def test_dropdown_answer(self):
with self._app.test_request_context():
routing_path = [Location(group_id='dropdown-group', group_instance=0, block_id='dropdown-block')]
user_answer = [create_answer('dropdown-answer', 'Liverpool', group_id='dropdown-group', block_id='dropdown-block')]
questionnaire = make_schema('0.0.1', 'section-1', 'dropdown-block', 'dropdown-block', [
{
'id': 'dropdown-question',
'answers': [
{
'id': 'dropdown-answer',
'type': 'Dropdown',
'q_code': '1',
'options': [
{
'label': 'Liverpool',
'value': 'Liverpool'
},
{
'label': 'Chelsea',
'value': 'Chelsea'
},
{
'label': 'Rugby is better!',
'value': 'Rugby is better!'
}
]
}
]
}
])
# When
answer_object = convert_answers(self.metadata, self.collection_metadata, QuestionnaireSchema(questionnaire), AnswerStore(user_answer), routing_path)
# Then
self.assertEqual(len(answer_object['data']), 1)
self.assertEqual(answer_object['data']['1'], 'Liverpool')
def test_date_answer(self):
with self._app.test_request_context():
routing_path = [Location(group_id='date-group', group_instance=0, block_id='date-block')]
user_answer = [create_answer('single-date-answer', '1990-02-01', group_id='date-group', block_id='date-block'),
| |
SIGN TU': None,
'OLD PERSIAN SIGN U': None,
'OLD PERSIAN SIGN VA': None,
'OLD PERSIAN SIGN VI': None,
'OLD PERSIAN SIGN XA': None,
'OLD PERSIAN SIGN XSHAAYATHIYA': None,
'OLD PERSIAN SIGN YA': None,
'OLD PERSIAN SIGN ZA': None,
'OLD PERSIAN WORD DIVIDER': None,
'OLD SOUTH ARABIAN LETTER ALEF': None,
'OLD SOUTH ARABIAN LETTER AYN': None,
'OLD SOUTH ARABIAN LETTER BETH': None,
'OLD SOUTH ARABIAN LETTER DALETH': None,
'OLD SOUTH ARABIAN LETTER DHADHE': None,
'OLD SOUTH ARABIAN LETTER DHALETH': None,
'OLD SOUTH ARABIAN LETTER FE': None,
'OLD SOUTH ARABIAN LETTER GHAYN': None,
'OLD SOUTH ARABIAN LETTER GIMEL': None,
'OLD SOUTH ARABIAN LETTER HE': None,
'OLD SOUTH ARABIAN LETTER HETH': None,
'OLD SOUTH ARABIAN LETTER KAPH': None,
'OLD SOUTH ARABIAN LETTER KHETH': None,
'OLD SOUTH ARABIAN LETTER LAMEDH': None,
'OLD SOUTH ARABIAN LETTER MEM': None,
'OLD SOUTH ARABIAN LETTER NUN': None,
'OLD SOUTH ARABIAN LETTER QOPH': None,
'OLD SOUTH ARABIAN LETTER RESH': None,
'OLD SOUTH ARABIAN LETTER SADHE': None,
'OLD SOUTH ARABIAN LETTER SAMEKH': None,
'OLD SOUTH ARABIAN LETTER SAT': None,
'OLD SOUTH ARABIAN LETTER SHIN': None,
'OLD SOUTH ARABIAN LETTER TAW': None,
'OLD SOUTH ARABIAN LETTER TETH': None,
'OLD SOUTH ARABIAN LETTER THAW': None,
'OLD SOUTH ARABIAN LETTER THETH': None,
'OLD SOUTH ARABIAN LETTER WAW': None,
'OLD SOUTH ARABIAN LETTER YODH': None,
'OLD SOUTH ARABIAN LETTER ZAYN': None,
'OLD SOUTH ARABIAN NUMBER FIFTY': None,
'OLD SOUTH ARABIAN NUMBER ONE': None,
'OLD SOUTH ARABIAN NUMERIC INDICATOR': None,
'OLD TURKIC LETTER ORKHON A': None,
'OLD TURKIC LETTER ORKHON AB': None,
'OLD TURKIC LETTER ORKHON AD': None,
'OLD TURKIC LETTER ORKHON AEB': None,
'OLD TURKIC LETTER ORKHON AED': None,
'OLD TURKIC LETTER ORKHON AEG': None,
'OLD TURKIC LETTER ORKHON AEK': None,
'OLD TURKIC LETTER ORKHON AEL': None,
'OLD TURKIC LETTER ORKHON AEN': None,
'OLD TURKIC LETTER ORKHON AER': None,
'OLD TURKIC LETTER ORKHON AES': None,
'OLD TURKIC LETTER ORKHON AET': None,
'OLD TURKIC LETTER ORKHON AEY': None,
'OLD TURKIC LETTER ORKHON AG': None,
'OLD TURKIC LETTER ORKHON AL': None,
'OLD TURKIC LETTER ORKHON AN': None,
'OLD TURKIC LETTER ORKHON AQ': None,
'OLD TURKIC LETTER ORKHON AR': None,
'OLD TURKIC LETTER ORKHON AS': None,
'OLD TURKIC LETTER ORKHON ASH': None,
'OLD TURKIC LETTER ORKHON AT': None,
'OLD TURKIC LETTER ORKHON AY': None,
'OLD TURKIC LETTER ORKHON BASH': None,
'OLD TURKIC LETTER ORKHON EC': None,
'OLD TURKIC LETTER ORKHON ELT': None,
'OLD TURKIC LETTER ORKHON EM': None,
'OLD TURKIC LETTER ORKHON ENC': None,
'OLD TURKIC LETTER ORKHON ENG': None,
'OLD TURKIC LETTER ORKHON ENT': None,
'OLD TURKIC LETTER ORKHON ENY': None,
'OLD TURKIC LETTER ORKHON EP': None,
'OLD TURKIC LETTER ORKHON ESH': None,
'OLD TURKIC LETTER ORKHON EZ': None,
'OLD TURKIC LETTER ORKHON I': None,
'OLD TURKIC LETTER ORKHON IC': None,
'OLD TURKIC LETTER ORKHON IQ': None,
'OLD TURKIC LETTER ORKHON O': None,
'OLD TURKIC LETTER ORKHON OE': None,
'OLD TURKIC LETTER ORKHON OEK': None,
'OLD TURKIC LETTER ORKHON OP': None,
'OLD TURKIC LETTER ORKHON OQ': None,
'OLD TURKIC LETTER ORKHON OT': None,
'OLD TURKIC LETTER YENISEI A': None,
'OLD TURKIC LETTER YENISEI AB': None,
'OLD TURKIC LETTER YENISEI AD': None,
'OLD TURKIC LETTER YENISEI AE': None,
'OLD TURKIC LETTER YENISEI AEB': None,
'OLD TURKIC LETTER YENISEI AEG': None,
'OLD TURKIC LETTER YENISEI AEK': None,
'OLD TURKIC LETTER YENISEI AEN': None,
'OLD TURKIC LETTER YENISEI AENG': None,
'OLD TURKIC LETTER YENISEI AET': None,
'OLD TURKIC LETTER YENISEI AEY': None,
'OLD TURKIC LETTER YENISEI AG': None,
'OLD TURKIC LETTER YENISEI AL': None,
'OLD TURKIC LETTER YENISEI ANG': None,
'OLD TURKIC LETTER YENISEI AQ': None,
'OLD TURKIC LETTER YENISEI AR': None,
'OLD TURKIC LETTER YENISEI ASH': None,
'OLD TURKIC LETTER YENISEI AT': None,
'OLD TURKIC LETTER YENISEI AY': None,
'OLD TURKIC LETTER YENISEI E': None,
'OLD TURKIC LETTER YENISEI EC': None,
'OLD TURKIC LETTER YENISEI ENC': None,
'OLD TURKIC LETTER YENISEI ENT': None,
'OLD TURKIC LETTER YENISEI ENY': None,
'OLD TURKIC LETTER YENISEI ESH': None,
'OLD TURKIC LETTER YENISEI EZ': None,
'OLD TURKIC LETTER YENISEI I': None,
'OLD TURKIC LETTER YENISEI IQ': None,
'OLD TURKIC LETTER YENISEI OE': None,
'OLD TURKIC LETTER YENISEI OEK': None,
'OLD TURKIC LETTER YENISEI OQ': None,
'ONE DOT OVER TWO DOTS PUNCTUATION': None,
'OPEN SUBSET': None,
'OPEN SUPERSET': None,
'OR WITH DOT INSIDE': None,
'ORIYA LETTER VA': None,
'ORIYA LETTER WA': None,
'ORIYA VOWEL SIGN VOCALIC L': None,
'ORIYA VOWEL SIGN VOCALIC LL': None,
'ORIYA VOWEL SIGN VOCALIC RR': None,
'OSMANYA DIGIT EIGHT': None,
'OSMANYA DIGIT FIVE': None,
'OSMANYA DIGIT FOUR': None,
'OSMANYA DIGIT NINE': None,
'OSMANYA DIGIT ONE': None,
'OSMANYA DIGIT SEVEN': None,
'OSMANYA DIGIT SIX': None,
'OSMANYA DIGIT THREE': None,
'OSMANYA DIGIT TWO': None,
'OSMANYA DIGIT ZERO': None,
'OSMANYA LETTER A': None,
'OSMANYA LETTER AA': None,
'OSMANYA LETTER ALEF': None,
'OSMANYA LETTER BA': None,
'OSMANYA LETTER CAYN': None,
'OSMANYA LETTER DEEL': None,
'OSMANYA LETTER DHA': None,
'OSMANYA LETTER E': None,
'OSMANYA LETTER EE': None,
'OSMANYA LETTER FA': None,
'OSMANYA LETTER GA': None,
'OSMANYA LETTER HA': None,
'OSMANYA LETTER I': None,
'OSMANYA LETTER JA': None,
'OSMANYA LETTER KAAF': None,
'OSMANYA LETTER KHA': None,
'OSMANYA LETTER LAAN': None,
'OSMANYA LETTER MIIN': None,
'OSMANYA LETTER NUUN': None,
'OSMANYA LETTER O': None,
'OSMANYA LETTER OO': None,
'OSMANYA LETTER QAAF': None,
'OSMANYA LETTER RA': None,
'OSMANYA LETTER SA': None,
'OSMANYA LETTER SHIIN': None,
'OSMANYA LETTER TA': None,
'OSMANYA LETTER U': None,
'OSMANYA LETTER WAW': None,
'OSMANYA LETTER XA': None,
'OSMANYA LETTER YA': None,
'OUTLINED WHITE STAR': None,
'PALLAS': None,
'PALM BRANCH': None,
'PARAGRAPHOS': None,
'PARENTHESIZED KOREAN CHARACTER O HU': None,
'PARENTHESIZED KOREAN CHARACTER OJEON': None,
'PARENTHESIZED LATIN CAPITAL LETTER A': None,
'PARENTHESIZED LATIN CAPITAL LETTER B': None,
'PARENTHESIZED LATIN CAPITAL LETTER C': None,
'PARENTHESIZED LATIN CAPITAL LETTER D': None,
'PARENTHESIZED LATIN CAPITAL LETTER E': None,
'PARENTHESIZED LATIN CAPITAL LETTER F': None,
'PARENTHESIZED LATIN CAPITAL LETTER G': None,
'PARENTHESIZED LATIN CAPITAL LETTER H': None,
'PARENTHESIZED LATIN CAPITAL LETTER I': None,
'PARENTHESIZED LATIN CAPITAL LETTER J': None,
'PARENTHESIZED LATIN CAPITAL LETTER K': None,
'PARENTHESIZED LATIN CAPITAL LETTER L': None,
'PARENTHESIZED LATIN CAPITAL LETTER M': None,
'PARENTHESIZED LATIN CAPITAL LETTER N': None,
'PARENTHESIZED LATIN CAPITAL LETTER O': None,
'PARENTHESIZED LATIN CAPITAL LETTER P': None,
'PARENTHESIZED LATIN CAPITAL LETTER Q': None,
'PARENTHESIZED LATIN CAPITAL LETTER R': None,
'PARENTHESIZED LATIN CAPITAL LETTER S': None,
'PARENTHESIZED LATIN CAPITAL LETTER T': None,
'PARENTHESIZED LATIN CAPITAL LETTER U': None,
'PARENTHESIZED LATIN CAPITAL LETTER V': None,
'PARENTHESIZED LATIN CAPITAL LETTER W': None,
'PARENTHESIZED LATIN CAPITAL LETTER X': None,
'PARENTHESIZED LATIN CAPITAL LETTER Y': None,
'PARENTHESIZED LATIN CAPITAL LETTER Z': None,
'PARTNERSHIP SIGN': None,
'PER SIGN': None,
'PERMANENT PAPER SIGN': None,
'PERPENDICULAR': None,
'PERSON WITH BALL': None,
'PHAGS-PA DOUBLE HEAD MARK': None,
'PHAGS-PA LETTER A': None,
'PHAGS-PA LETTER ALTERNATE YA': None,
'PHAGS-PA LETTER ASPIRATED FA': None,
'PHAGS-PA LETTER BA': None,
'PHAGS-PA LETTER CA': None,
'PHAGS-PA LETTER CANDRABINDU': None,
'PHAGS-PA LETTER CHA': None,
'PHAGS-PA LETTER DA': None,
'PHAGS-PA LETTER DDA': None,
'PHAGS-PA LETTER DZA': None,
'PHAGS-PA LETTER E': None,
'PHAGS-PA LETTER EE': None,
'PHAGS-PA LETTER FA': None,
'PHAGS-PA LETTER GA': None,
'PHAGS-PA LETTER GGA': None,
'PHAGS-PA LETTER HA': None,
'PHAGS-PA LETTER I': None,
'PHAGS-PA LETTER JA': None,
'PHAGS-PA LETTER KA': None,
'PHAGS-PA LETTER KHA': None,
'PHAGS-PA LETTER LA': None,
'PHAGS-PA LETTER MA': None,
'PHAGS-PA LETTER NA': None,
'PHAGS-PA LETTER NGA': None,
'PHAGS-PA LETTER NNA': None,
'PHAGS-PA LETTER NYA': None,
'PHAGS-PA LETTER O': None,
'PHAGS-PA LETTER PA': None,
'PHAGS-PA LETTER PHA': None,
'PHAGS-PA LETTER QA': None,
'PHAGS-PA LETTER RA': None,
'PHAGS-PA LETTER SA': None,
'PHAGS-PA LETTER SHA': None,
'PHAGS-PA LETTER SMALL A': None,
'PHAGS-PA LETTER TA': None,
'PHAGS-PA LETTER THA': None,
'PHAGS-PA LETTER TSA': None,
'PHAGS-PA LETTER TSHA': None,
'PHAGS-PA LETTER TTA': None,
'PHAGS-PA LETTER TTHA': None,
'PHAGS-PA LETTER U': None,
'PHAGS-PA LETTER VOICED HA': None,
'PHAGS-PA LETTER VOICELESS SHA': None,
'PHAGS-PA LETTER WA': None,
'PHAGS-PA LETTER XA': None,
'PHAGS-PA LETTER YA': None,
'PHAGS-PA LETTER ZA': None,
'PHAGS-PA LETTER ZHA': None,
'PHAGS-PA MARK DOUBLE SHAD': None,
'PHAGS-PA MARK SHAD': None,
'PHAGS-PA SINGLE HEAD MARK': None,
'PHAGS-PA SUBJOINED LETTER RA': None,
'PHAGS-PA SUBJOINED LETTER WA': None,
'PHAGS-PA SUBJOINED LETTER YA': None,
'PHAGS-PA SUPERFIXED LETTER RA': None,
'PHAISTOS DISC SIGN ARROW': None,
'PHAISTOS DISC SIGN BEE': None,
'PHAISTOS DISC SIGN BEEHIVE': None,
'PHAISTOS DISC SIGN BOOMERANG': None,
'PHAISTOS DISC SIGN BOW': None,
'PHAISTOS DISC SIGN BULLS LEG': None,
'PHAISTOS DISC SIGN CAPTIVE': None,
'PHAISTOS DISC SIGN CARPENTRY PLANE': None,
'PHAISTOS DISC SIGN CAT': None,
'PHAISTOS DISC SIGN CHILD': None,
'PHAISTOS DISC SIGN CLUB': None,
'PHAISTOS DISC SIGN COLUMN': None,
'PHAISTOS DISC SIGN COMB': None,
'PHAISTOS DISC SIGN COMBINING OBLIQUE STROKE': None,
'PHAISTOS DISC SIGN DOLIUM': None,
'PHAISTOS DISC SIGN DOVE': None,
'PHAISTOS DISC SIGN EAGLE': None,
'PHAISTOS DISC SIGN FLUTE': None,
'PHAISTOS DISC SIGN GAUNTLET': None,
'PHAISTOS DISC SIGN GRATER': None,
'PHAISTOS DISC SIGN HELMET': None,
'PHAISTOS DISC SIGN HIDE': None,
'PHAISTOS DISC SIGN HORN': None,
'PHAISTOS DISC SIGN LID': None,
'PHAISTOS DISC SIGN LILY': None,
'PHAISTOS DISC SIGN MANACLES': None,
'PHAISTOS DISC SIGN MATTOCK': None,
'PHAISTOS DISC SIGN OX BACK': None,
'PHAISTOS DISC SIGN PAPYRUS': None,
'PHAISTOS DISC SIGN PEDESTRIAN': None,
'PHAISTOS DISC SIGN PLANE TREE': None,
'PHAISTOS DISC SIGN PLUMED HEAD': None,
'PHAISTOS DISC SIGN RAM': None,
'PHAISTOS DISC SIGN ROSETTE': None,
'PHAISTOS DISC SIGN SAW': None,
'PHAISTOS DISC SIGN SHIELD': None,
'PHAISTOS DISC SIGN SHIP': None,
'PHAISTOS DISC SIGN SLING': None,
'PHAISTOS DISC SIGN SMALL AXE': None,
'PHAISTOS DISC SIGN STRAINER': None,
'PHAISTOS DISC SIGN TATTOOED HEAD': None,
'PHAISTOS DISC SIGN TIARA': None,
'PHAISTOS DISC SIGN TUNNY': None,
'PHAISTOS DISC SIGN VINE': None,
'PHAISTOS DISC SIGN WAVY BAND': None,
'PHAISTOS DISC SIGN WOMAN': None,
'PHOENICIAN LETTER AIN': None,
'PHOENICIAN LETTER ALF': None,
'PHOENICIAN LETTER BET': None,
'PHOENICIAN LETTER DELT': None,
'PHOENICIAN LETTER GAML': None,
'PHOENICIAN LETTER HE': None,
'PHOENICIAN LETTER HET': None,
'PHOENICIAN LETTER KAF': None,
'PHOENICIAN LETTER LAMD': None,
'PHOENICIAN LETTER MEM': None,
'PHOENICIAN LETTER NUN': None,
'PHOENICIAN LETTER | |
import re
from collections import namedtuple
from copy import copy
from difflib import SequenceMatcher
from pprint import pformat
from bs4 import BeautifulSoup
from bs4 import NavigableString
from bs4 import Tag
logger = None
def restore_refs(old_content: str,
new_content: str,
resolved_ids: list,
logger_,
resolve_changed: bool = False):
'''
Restore inline-comments from the old_content in the new_content and return
the resulting html string.
If `resolve_changed` is False — only restore the comments in the text that
wasn't changed.
'''
# setting up global logger
global logger
logger = logger_
old_bs = BeautifulSoup(old_content, 'html.parser')
new_bs = BeautifulSoup(new_content, 'html.parser')
if is_empty(new_bs):
logger.debug('New content is empty, all inline comments will be omitted.')
return new_content
remove_outline_resolved(old_bs)
ref_dict = generate_ref_dict(old_bs)
new_strings = [s for s in new_bs.strings if s.strip()]
old_strings = [s for s in old_bs.strings if s.strip()]
places = find_place2(old_strings, new_strings, ref_dict)
correct_places(places, new_strings)
equal, not_equal = divide_places(places)
restore_equal_refs(equal, new_strings)
if not resolve_changed:
insert_unequal_refs(not_equal, new_strings, resolved_ids)
return str(new_bs)
def is_empty(soup):
'''Check whether `soup` is an empty page (whitespaces ignored)'''
for s in soup.strings:
if s.strip():
return False
return True
def remove_outline_resolved(bs: BeautifulSoup):
"""
Remove from bs object all inline comments which have nested comments inside
them. These may be only resolved comments, and they cause a lot of trouble.
In place.
"""
logger.debug('remove_outline_resolved START')
while True:
restart = False
comments = bs.find_all(re.compile('ac:inline-comment-marker'))
for comment in comments:
for child in comment.children:
if child.name == 'ac:inline-comment-marker':
logger.debug(f'Comment has nested comments, removing: \n{comment}')
basic_unwrap(comment)
restart = True
break
if restart:
break
else:
logger.debug('remove_outline_resolved END')
return
def basic_unwrap(element):
"""
Unwrap element from its tag in place. Concatenate adjacent NavigableStrings
which may have appeared after anwrapping:
<b>'One '<to_unwrap>' Two '</to_unwrap>' Three'</b>
<b>'One '' Two '' Three'</b>
<b>'One Two Three'</b>
"""
parent = element.parent
element.unwrap()
groupped = []
accumulate = False
for el in parent.contents:
if isinstance(el, NavigableString):
if accumulate:
groupped[-1].append(el)
else:
groupped.append([el])
accumulate = True
else:
accumulate = False
groupped = [g for g in groupped if len(g) > 1]
for g in groupped:
g[0].replace_with(''.join(g))
g.pop(0)
for i in g:
i.extract()
def generate_ref_dict(bs: BeautifulSoup) -> dict:
'''
Receives a BeautifulSoup object and generates a dictionary with info about
inline comments.
Output dictionary structure:
Key: python id of a string, which contains the inline comment. It's one of
the strings that may be obtained by BeautifulSoup.strings method.
Value: {info_dict}, dictionary with info on the inline comment.
{info_dict} structure:
{
'full': Full unwrapped NavigableString which contained inline comment. It
is in fact right now a part of the bs tree.
'before': NavigableString that was before the inline comment until next tag
or end of parent OR another {info_dict} if there were several
comments in one paragraph.
'comment': The inline comment tag which was unwrapped, with commented text
included.
'after': NavigableString that was after the inline comment until next tag
or end of parent.
'ref_id': For convenience, the id of a comment from the 'ac:ref' attribute.
}
'''
logger.debug('generate_ref_dict START')
logger.debug('Collecting comments from the old article (remote)')
result = {}
refs = bs.find_all(re.compile('ac:inline-comment-marker'))
for ref in refs:
ref_id = ref.attrs['ac:ref']
try:
full, (before, comment, after) = unwrap(ref)
except RuntimeError:
logger.debug("Inline comment tag has other tags inside. We can't"
f" process such yet, skipping:\n{ref}")
continue
cs = dict(full=full,
ref_id=ref_id,
before=before,
comment=comment,
after=after)
# if 'before string' was already added to result — absorb the comment
# dictionary instead
if cs['before'] and id(cs['before']) in result:
cs['before'] = result.pop(id(cs['before']))
result[id(cs['full'])] = cs
logger.debug(f'Collected comments:\n\n{pformat(result)}')
logger.debug('generate_ref_dict END')
return result
def unwrap(element):
'''
Unwrap an element from a tag in place. The tag must only contain one string inside.
The string will be connected to text before and after tag.
Function returns two elements:
full_string, (before, element, after)
- full_string — a full NavigableString, which replaced the tag and the text before/after;
- A tuple of three elements:
- before — original NavigableString, that was before the tag or None if there wasn't any.
- element — original tag itself.
- after — original NavigableString, that was after the tag or None if there wasn't any.
'''
before = after = None
children = list(element.children)
if len(children) > 1:
raise RuntimeError('Tag should wrap just one string')
if len(children) == 1 and not isinstance(children[0], NavigableString):
raise RuntimeError('Tag should include only string')
content = element.text
if isinstance(element.previous_sibling, NavigableString):
before = element.previous_sibling.extract()
content = before + content
if isinstance(element.next_sibling, NavigableString):
after = element.next_sibling.extract()
content = content + after
ns = NavigableString(content)
element.replace_with(ns)
return ns, (before, element, after)
def find_place2(old_strings, new_strings: list, ref_dict: dict) -> dict:
'''
Compare `old_strings` and `new_strings`.
For each element of ref_dict: Find strings in `new_strings` which correspond
to the commented string, described by `ref_dict` element. This string is one
of the `old_strings`.
Return a list of tuples, each containing three elements:
[(info_dict, indeces, equal)]
- info_dict — an {info_dict} of the inline comment.
- indeces — a list of indeces of the `new_strings` which correspond to the
inline comment in the old text.
- equal — a boolean value which tells whether the commented paragraph was changed
or not. True — unchanged, False — changed.
'''
logger.debug('find_place2 START')
result = []
# strip all strings from indentations and formatting for comparison
s_old_strings = [s.strip() for s in old_strings]
s_new_strings = [s.strip() for s in new_strings]
sm = SequenceMatcher(None, s_old_strings, s_new_strings)
sm.ratio()
Opcode = namedtuple('opcode', ('tag', 'a_s', 'a_e', 'b_s', 'b_e'))
opcodes = [Opcode(*opc) for opc in sm.get_opcodes()]
logger.debug(f'Opcodes after matching: {sm.get_opcodes()}')
# We use IDs to determine the correct string because the tree may contain
# strings with equal values, but located in different parts of the tree. ID
# allows to determine the correct string precisely.
old_string_ids = [id(s) for s in old_strings]
for cs_id in ref_dict:
equal = False
ind = old_string_ids.index(cs_id)
for i in range(len(opcodes)):
if opcodes[i].a_s <= ind < opcodes[i].a_e:
break
else:
i = None
if i is None:
continue
if opcodes[i].tag == 'equal':
indeces = [opcodes[i].b_s + (ind - opcodes[i].a_s)]
equal = True
elif opcodes[i].tag == 'replace':
indeces = list(range(opcodes[i].b_s, opcodes[i].b_e))
elif opcodes[i].tag == 'delete':
indeces = []
if i and opcodes[i - 1].tag == 'insert':
indeces.extend(range(opcodes[i - 1].b_s, opcodes[i - 1].b_e))
if i + 2 <= len(opcodes) and opcodes[i + 1].tag == 'insert':
indeces.extend(range(opcodes[i + 1].b_s, opcodes[i + 1].b_e))
if not indeces:
indeces.append(opcodes[i].b_s - 1 if opcodes[i].b_s else 0)
indeces.append(opcodes[i].b_e if opcodes[i].b_e + 1 <= len(new_strings) else opcodes[i].b_e - 1)
result.append((ref_dict[cs_id], indeces, equal))
logger.debug(f'List of found places:\n\n{pformat(result)}')
logger.debug('find_place2 END')
return result
def add_unique(a: list, b: list, at_beginning: bool = True) -> None:
'''
Add only unique elements from b to a in place.
If `at_beginning` is True — elements are inserted at the beginning
of the a list. If False — they are appended at the end.'''
for i in b:
if i not in a:
if at_beginning:
a.insert(0, i)
else:
a.append(i)
def correct_places(places: list, strings: list):
'''
Looks for strings which are inside confluence-tags <ac:... and removes such
strings from the links (we cannot add inline comments into macros).
In place.
:param places: list of tuples, got from find_place2 function:
[(info_dict, indeces, equal)]
:param strings: list of NavigableStrings from the new content, which are
right now a part of the tree.
'''
logger.debug('correct_places START')
for place in places:
to_remove = []
for i in range(len(place[1])):
index = place[1][i]
cur = strings[index]
while cur:
if cur.name and cur.name.startswith('ac:'):
logger.debug(f"string '{strings[index]}' is inside macro {cur.name}"
" and will be removed")
to_remove.append(i)
break
cur = cur.parent
for i in reversed(to_remove):
s = place[1].pop(i)
logger.debug(f"Removed string [{s}]: '{strings[s]}'")
logger.debug('correct_places END')
def divide_places(places: list) -> dict:
'''
Takes a list of tuples, got from find_place2 function:
[(info_dict, indeces, equal)]
Looks for the places with equal == True and gathers them into a separate list.
Removes all indeces which were mentioned in `equal` places from other places.
Gathers references in the correct order from the remaining places and saves them
in a dictionary with key = string index, value = list of ref_ids, which point
to this | |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements loss functions for dual encoder training with a cache."""
import abc
import collections
from typing import Callable, Dict, Iterable, List, Optional, Tuple
import tensorflow.compat.v2 as tf
from negative_cache import negative_cache
from negative_cache import retrieval_fns
from negative_cache import util
CacheLossReturn = collections.namedtuple('CacheLossReturn', [
'training_loss',
'interpretable_loss',
'updated_item_data',
'updated_item_indices',
'updated_item_mask',
'staleness',
])
class CacheLoss(object, metaclass=abc.ABCMeta):
@abc.abstractmethod
def __call__(
self, doc_network,
query_embeddings, pos_doc_embeddings,
cache):
pass
_RetrievalReturn = collections.namedtuple('_RetrievalReturn', [
'retrieved_data', 'scores', 'retrieved_indices',
'retrieved_cache_embeddings'
])
def _score_documents(query_embeddings,
doc_embeddings,
score_transform = None,
all_pairs = False):
"""Calculates the dot product of query, document embedding pairs."""
if all_pairs:
scores = tf.matmul(query_embeddings, doc_embeddings, transpose_b=True)
else:
scores = tf.reduce_sum(query_embeddings * doc_embeddings, axis=1)
if score_transform is not None:
scores = score_transform(scores)
return scores
def _batch_concat_with_no_op(tensors):
"""If there is only one tensor to concatenate, this is a no-op."""
if len(tensors) == 1:
return tensors[0]
else:
return tf.concat(tensors, axis=0)
def _retrieve_from_caches(query_embeddings,
cache,
retrieval_fn,
embedding_key,
data_keys,
sorted_data_sources,
score_transform=None,
top_k = None):
"""Retrieve elements from a cache with the given retrieval function."""
all_embeddings = _batch_concat_with_no_op([
cache[data_source].data[embedding_key]
for data_source in sorted_data_sources
])
all_data = {}
for key in data_keys:
all_data[key] = _batch_concat_with_no_op(
[cache[data_source].data[key] for data_source in sorted_data_sources])
scores = _score_documents(
query_embeddings,
all_embeddings,
score_transform=score_transform,
all_pairs=True)
if top_k:
scores, top_k_indices = util.approximate_top_k_with_indices(scores, top_k)
top_k_indices = tf.cast(top_k_indices, dtype=tf.int64)
retrieved_indices = retrieval_fn(scores)
batch_index = tf.expand_dims(
tf.range(tf.shape(retrieved_indices)[0], dtype=tf.int64), axis=1)
retrieved_indices_with_batch_index = tf.concat(
[batch_index, retrieved_indices], axis=1)
retrieved_indices = tf.gather_nd(top_k_indices,
retrieved_indices_with_batch_index)
retrieved_indices = tf.expand_dims(retrieved_indices, axis=1)
else:
retrieved_indices = retrieval_fn(scores)
retrieved_indices = tf.stop_gradient(retrieved_indices)
retrieved_data = {
k: tf.gather_nd(v, retrieved_indices) for k, v in all_data.items()
}
retrieved_cache_embeddings = tf.gather_nd(all_embeddings, retrieved_indices)
return _RetrievalReturn(retrieved_data, scores, retrieved_indices,
retrieved_cache_embeddings)
def _get_data_sorce_start_position_and_cache_sizes(
cache, embedding_key,
sorted_data_sources
):
"""Gets the first index and size per data sources in the concatenated data."""
curr_position = tf.constant(0, dtype=tf.int64)
start_positions = {}
cache_sizes = {}
for data_source in sorted_data_sources:
start_positions[data_source] = curr_position
cache_sizes[data_source] = tf.shape(
cache[data_source].data[embedding_key], out_type=tf.int64)[0]
curr_position = curr_position + cache_sizes[data_source]
return start_positions, cache_sizes
def _get_retrieved_embedding_updates(
cache, embedding_key,
sorted_data_sources, retrieved_indices,
retrieved_embeddings
):
"""Gets the updates for the retrieved data."""
updated_item_indices = {}
updated_item_data = {}
updated_item_mask = {}
start_positions, cache_sizes = _get_data_sorce_start_position_and_cache_sizes(
cache, embedding_key, sorted_data_sources)
for data_source in sorted_data_sources:
updated_item_indices[
data_source] = retrieved_indices - start_positions[data_source]
updated_item_data[data_source] = {embedding_key: retrieved_embeddings}
updated_item_mask[data_source] = (
retrieved_indices >= start_positions[data_source]) & (
retrieved_indices <
start_positions[data_source] + cache_sizes[data_source])
updated_item_indices[data_source] = tf.squeeze(
updated_item_indices[data_source], axis=1)
updated_item_mask[data_source] = tf.squeeze(
updated_item_mask[data_source], axis=1)
return updated_item_data, updated_item_indices, updated_item_mask
def _get_staleness(cache_embeddings,
updated_embeddings):
error = cache_embeddings - updated_embeddings
mse = tf.reduce_sum(error**2, axis=1)
normalized_mse = mse / tf.reduce_sum(updated_embeddings**2, axis=1)
return normalized_mse
_LossCalculationReturn = collections.namedtuple('_LossCalculationReturn', [
'training_loss', 'interpretable_loss', 'staleness', 'retrieval_return',
'retrieved_negative_embeddings'
])
class AbstractCacheClassificationLoss(CacheLoss, metaclass=abc.ABCMeta):
"""Abstract method for cache classification losses.
Inherit from this object and override `_retrieve_from_cache` and
`_score_documents` to implement a cache classification loss based on the
specified retrieval and scoring approaches.
"""
@abc.abstractmethod
def _retrieve_from_cache(self, query_embeddings, cache):
pass
@abc.abstractmethod
def _score_documents(self, query_embeddings, doc_embeddings):
pass
def _calculate_training_loss_and_summaries(
self,
doc_network,
query_embeddings,
pos_doc_embeddings,
cache,
reducer=tf.math.reduce_mean):
"""Calculates the cache classification loss and associated summaries."""
positive_scores = self._score_documents(query_embeddings,
pos_doc_embeddings)
retrieval_return = self._retrieve_from_cache(query_embeddings, cache)
retrieved_negative_embeddings = doc_network(retrieval_return.retrieved_data)
retrieved_negative_scores = self._score_documents(
query_embeddings, retrieved_negative_embeddings)
cache_and_pos_scores = tf.concat(
[tf.expand_dims(positive_scores, axis=1), retrieval_return.scores],
axis=1)
prob_pos = tf.nn.softmax(cache_and_pos_scores, axis=1)[:, 0]
prob_pos = tf.stop_gradient(prob_pos)
training_loss = (1.0 - prob_pos) * (
retrieved_negative_scores - positive_scores)
interpretable_loss = -tf.math.log(prob_pos)
staleness = _get_staleness(retrieval_return.retrieved_cache_embeddings,
retrieved_negative_embeddings)
if reducer is not None:
training_loss = reducer(training_loss)
interpretable_loss = reducer(interpretable_loss)
staleness = reducer(staleness)
return _LossCalculationReturn(
training_loss=training_loss,
interpretable_loss=interpretable_loss,
staleness=staleness,
retrieval_return=retrieval_return,
retrieved_negative_embeddings=retrieved_negative_embeddings)
class CacheClassificationLoss(AbstractCacheClassificationLoss):
"""Implements an efficient way to train with a cache classification loss.
The cache classification loss is the negative log probability of the positive
document when the distribution is the softmax of all documents. This object
allows calculating:
(1) An efficient stochastic loss function whose gradient is approximately
the same as the cache classification loss in expectation. This gradient
can be calculated by feeding only O(batch_size) documents through the
document network, rather than O(cache_size) for the standard
implementation.
(2) An approximation of the value cache classification loss using the cached
embeddings. The loss described above is not interpretable. This loss is
a direct approximation of the cache classification loss, however we
cannot calculate a gradient of this loss.
Calling the CacheClassificationLoss return a CacheLossReturn object, which
has the following fields:
training_loss: Use this to calculate gradients.
interpretable_loss: An interpretable number for the CacheClassificationLoss
to use as a Tensorboard summary.
updated_item_data, updated_item_indices, updated_item_mask: Use these in
the negative cache updates. These describe the cache elements that were
retrieved and current embedding calculated.
staleness: This is the square error between the retrieved cache embeddings
and the retrieved embeddings as defined by the current state of the
model. Create a summary of this value as a proxy for the error due to
cache staleness.
"""
def __init__(self,
embedding_key,
data_keys,
score_transform = None,
top_k = None,
reducer = tf.math.reduce_mean):
"""Initializes the CacheClassificationLoss object.
Args:
embedding_key: The key containing the embedding in the cache.
data_keys: The keys containing the document data in the cache.
score_transform: Scores are transformed by this function before use.
Specifically we have scores(i, j) = score_transform(dot(query_embed_i,
doc_embed_j))
top_k: If set, the top k scoring negative elements will be mined and the
rest of the elements masked before calculating the loss.
reducer: Function that reduces the losses to a single scaler. If None,
then the elementwise losses are returned.
"""
self.embedding_key = embedding_key
self.data_keys = data_keys
self.score_transform = score_transform
self.top_k = top_k
self.reducer = reducer
self._retrieval_fn = retrieval_fns.GumbelMaxRetrievalFn()
def _retrieve_from_cache(
self, query_embeddings,
cache):
sorted_data_sources = sorted(cache.keys())
return _retrieve_from_caches(query_embeddings, cache, self._retrieval_fn,
self.embedding_key, self.data_keys,
sorted_data_sources, self.score_transform,
self.top_k)
def _score_documents(self, query_embeddings,
doc_embeddings):
return _score_documents(
query_embeddings, doc_embeddings, score_transform=self.score_transform)
def __call__(
self, doc_network,
query_embeddings, pos_doc_embeddings,
cache):
"""Calculates the cache classification losses.
Args:
doc_network: The network that embeds the document data.
query_embeddings: Embeddings for the queries.
pos_doc_embeddings: Embeddings for the documents that are positive for the
given queries.
cache: The cache of document data and embeddings.
Returns:
A CacheLossReturn object with the training loss, interpretable loss, and
data needed to update the cache element embeddings that were retrieved and
recalculated.
"""
loss_calculation_return = self._calculate_training_loss_and_summaries(
doc_network, query_embeddings, pos_doc_embeddings, cache, self.reducer)
training_loss = loss_calculation_return.training_loss
interpretable_loss = loss_calculation_return.interpretable_loss
staleness = loss_calculation_return.staleness
retrieval_return = loss_calculation_return.retrieval_return
retrieved_negative_embeddings = loss_calculation_return.retrieved_negative_embeddings
sorted_data_sources = sorted(cache.keys())
updated_item_data, updated_item_indices, updated_item_mask = _get_retrieved_embedding_updates(
cache, self.embedding_key, sorted_data_sources,
retrieval_return.retrieved_indices, retrieved_negative_embeddings)
return CacheLossReturn(
training_loss=training_loss,
interpretable_loss=interpretable_loss,
updated_item_data=updated_item_data,
updated_item_indices=updated_item_indices,
updated_item_mask=updated_item_mask,
staleness=staleness)
def _get_local_elements_global_data(all_elements_local_data, num_replicas):
all_elements_local_data = tf.expand_dims(all_elements_local_data, axis=1)
return tf.raw_ops.AllToAll(
input=all_elements_local_data,
group_assignment=[list(range(num_replicas))],
concat_dimension=1,
split_dimension=0,
split_count=num_replicas)
class DistributedCacheClassificationLoss(AbstractCacheClassificationLoss):
"""Implements a cache classification loss with a sharded cache.
This object implements a cache classification loss when the cache is sharded
onto multiple replicas. This code calculates the loss treating the sharded
cache as one unit, so all queries are affected by all cache elements in every
replica.
Currently, the updated_item_* fields (i.e., the embedding updates for items
already in the cache) in the CacheLossReturn are empty. This does not affect
new items introduced to the cache.
"""
def __init__(self,
embedding_key,
data_keys,
score_transform = None,
top_k = None,
reducer = tf.math.reduce_mean):
self.embedding_key = embedding_key
self.data_keys = data_keys
self.score_transform = score_transform
self.top_k = top_k
self.reducer = reducer
self._retrieval_fn = retrieval_fns.GumbelMaxRetrievalFn()
def _score_documents(self, query_embeddings,
doc_embeddings):
return _score_documents(
query_embeddings, doc_embeddings, score_transform=self.score_transform)
def _retrieve_from_cache(
self, query_embeddings,
cache):
sorted_data_sources = sorted(cache.keys())
all_query_embeddings = util.cross_replica_concat(query_embeddings, axis=0)
num_replicas = tf.distribute.get_replica_context().num_replicas_in_sync
# Performs approximate top k across replicas.
if self.top_k:
top_k_per_replica = self.top_k // num_replicas
else:
top_k_per_replica = self.top_k
retrieval_return = _retrieve_from_caches(all_query_embeddings, cache,
self._retrieval_fn,
self.embedding_key, self.data_keys,
sorted_data_sources,
self.score_transform,
top_k_per_replica)
# We transfer all queries to all replica and retrieve from every shard.
all_queries_local_weight = tf.math.reduce_logsumexp(
retrieval_return.scores, axis=1)
local_queries_global_weights = _get_local_elements_global_data(
all_queries_local_weight, num_replicas)
local_queries_all_retrieved_data = {}
for key in retrieval_return.retrieved_data:
local_queries_all_retrieved_data[key] = _get_local_elements_global_data(
retrieval_return.retrieved_data[key], num_replicas)
local_queries_all_retrieved_embeddings = _get_local_elements_global_data(
retrieval_return.retrieved_cache_embeddings, num_replicas)
# We then sample | |
-7.975908114097E-22, -8.350115450764E-22, -4.488982302467E-22,
1.385029207141E-22, 7.049456954407E-22, 7.049456954407E-22, 7.049456954407E-22,
7.049456954407E-22, 7.049456954407E-22, 7.049456954407E-22, 7.049456954407E-22,
7.049456954407E-22, 7.049456954407E-22, 7.049456954407E-22, 1.385029207141E-22,
1.385029207141E-22, 1.385029207141E-22, 1.385029207141E-22, 1.385029207141E-22,
1.385029207141E-22, -4.488982302467E-22, -4.488982302467E-22, -4.488982302467E-22,
-4.488982302467E-22, -4.488982302467E-22, -4.488982302467E-22, -8.350115450764E-22,
-8.350115450764E-22, -8.350115450764E-22, -8.350115450764E-22, -8.350115450764E-22,
-8.350115450764E-22, -7.975908114097E-22, -7.975908114097E-22, -7.975908114097E-22,
-7.975908114097E-22, -7.975908114097E-22, -7.975908114097E-22, -5.651707329729E-22,
-5.651707329729E-22, -5.651707329729E-22, -5.651707329729E-22, -5.651707329729E-22,
-5.651707329729E-22, -4.876973734940E-22, -4.876973734940E-22, -4.876973734940E-22,
-4.876973734940E-22, -4.876973734940E-22, -4.876973734940E-22, -5.651707329729E-22,
-5.651707329729E-22, -5.651707329729E-22, -5.651707329729E-22, -5.651707329729E-22,
-5.651707329729E-22, -7.975908114097E-22, -7.975908114097E-22, -7.975908114097E-22,
-7.975908114097E-22, -7.975908114097E-22, -7.975908114097E-22, -8.350115450764E-22,
-8.350115450764E-22, -8.350115450764E-22, -8.350115450764E-22, -8.350115450764E-22,
-8.350115450764E-22, -4.488982302467E-22, -4.488982302467E-22, -4.488982302467E-22,
-4.488982302467E-22, -4.488982302467E-22, -4.488982302467E-22, 1.385029207141E-22,
1.385029207141E-22, 1.385029207141E-22, 1.385029207141E-22, 1.385029207141E-22,
1.385029207141E-22, 7.049456954407E-22, 7.049456954407E-22, 7.049456954407E-22,
7.049456954407E-22, 7.049456954407E-22, 7.049456954407E-22, 7.049456954407E-22,
7.049456954407E-22, 7.049456954407E-22, 7.049456954407E-22, 1.385029207141E-22,
-4.488982302467E-22, -8.350115450764E-22, -7.975908114097E-22, -5.651707329729E-22,
-4.876973734940E-22, -5.651707329729E-22, -7.975908114097E-22, -8.350115450764E-22,
-4.488982302467E-22, 1.385029207141E-22, 7.049456954407E-22, 7.049456954407E-22,
7.049456954407E-22, 7.049456954407E-22, 7.049456954407E-22, 7.049456954407E-22,
7.049456954407E-22, 7.049456954407E-22, 1.385029207141E-22, -4.488982302467E-22,
-8.350115450764E-22, -7.975908114097E-22, -5.651707329729E-22, -4.876973734940E-22,
-5.651707329729E-22, -7.975908114097E-22, -8.350115450764E-22, -4.488982302467E-22,
1.385029207141E-22, 7.049456954407E-22, 7.049456954407E-22, 7.049456954407E-22,
7.049456954407E-22, 7.049456954407E-22, 7.049456954407E-22, 7.049456954407E-22,
7.049456954407E-22, 1.385029207141E-22, -4.488982302467E-22, -8.350115450764E-22,
-7.975908114097E-22, -5.651707329729E-22, -4.876973734940E-22, -5.651707329729E-22,
-7.975908114097E-22, -8.350115450764E-22, -4.488982302467E-22, 1.385029207141E-22,
7.049456954407E-22, 7.049456954407E-22, 7.049456954407E-22, 7.049456954407E-22], dtype=np.float64
)
#: array holding precalculated linear extrapolation data
self.precalc_extrapolation_linear: np.array = np.array(
[5.663043205226E-21, 6.276115134204E-21, 6.889187063182E-21, 7.502258992160E-21,
7.655526974405E-21, -3.424867714738E-22, -1.065498256289E-20, -1.744516071724E-20,
-1.668065798131E-20, -1.642582373601E-20, -1.668065798131E-20, -1.744516071724E-20,
-1.065498256289E-20, -3.424867714738E-22, 7.655526974405E-21, 7.502258992160E-21,
6.889187063182E-21, 6.276115134204E-21, 5.663043205226E-21, 6.276115134204E-21,
5.929572720554E-21, 5.583030306904E-21, 5.236487893254E-21, 5.149852289841E-21,
-3.779572577314E-22, -7.381658890282E-21, -1.189597074863E-20, -1.130882889853E-20,
-1.111311494850E-20, -1.130882889853E-20, -1.189597074863E-20, -7.381658890282E-21,
-3.779572577314E-22, 5.149852289841E-21, 5.236487893254E-21, 5.583030306904E-21,
5.929572720554E-21, 6.276115134204E-21, 6.889187063182E-21, 5.583030306904E-21,
4.276873550626E-21, 2.970716794347E-21, 2.644177605278E-21, -4.134277439891E-22,
-4.108335217679E-21, -6.346780780020E-21, -5.936999815753E-21, -5.800406160998E-21,
-5.936999815753E-21, -6.346780780020E-21, -4.108335217679E-21, -4.134277439891E-22,
2.644177605278E-21, 2.970716794347E-21, 4.276873550626E-21, 5.583030306904E-21,
6.889187063182E-21, 7.502258992160E-21, 5.236487893254E-21, 2.970716794347E-21,
2.970716794347E-21, 5.236487893254E-21, 7.502258992160E-21, 7.655526974405E-21,
5.149852289841E-21, 2.644177605278E-21, 2.644177605278E-21, 5.149852289841E-21,
7.655526974405E-21, -3.424867714738E-22, -3.779572577314E-22, -4.134277439891E-22,
-4.134277439891E-22, -3.779572577314E-22, -3.424867714738E-22, -1.065498256289E-20,
-7.381658890282E-21, -4.108335217679E-21, -4.108335217679E-21, -7.381658890282E-21,
-1.065498256289E-20, -1.744516071724E-20, -1.189597074863E-20, -6.346780780020E-21,
-6.346780780020E-21, -1.189597074863E-20, -1.744516071724E-20, -1.668065798131E-20,
-1.130882889853E-20, -5.936999815753E-21, -5.936999815753E-21, -1.130882889853E-20,
-1.668065798131E-20, -1.642582373601E-20, -1.111311494850E-20, -5.800406160998E-21,
-5.800406160998E-21, -1.111311494850E-20, -1.642582373601E-20, -1.668065798131E-20,
-1.130882889853E-20, -5.936999815753E-21, -5.936999815753E-21, -1.130882889853E-20,
-1.668065798131E-20, -1.744516071724E-20, -1.189597074863E-20, -6.346780780020E-21,
-6.346780780020E-21, -1.189597074863E-20, -1.744516071724E-20, -1.065498256289E-20,
-7.381658890282E-21, -4.108335217679E-21, -4.108335217679E-21, -7.381658890282E-21,
-1.065498256289E-20, -3.424867714738E-22, -3.779572577314E-22, -4.134277439891E-22,
-4.134277439891E-22, -3.779572577314E-22, -3.424867714738E-22, 7.655526974405E-21,
5.149852289841E-21, 2.644177605278E-21, 2.644177605278E-21, 5.149852289841E-21,
7.655526974405E-21, 7.502258992160E-21, 5.236487893254E-21, 2.970716794347E-21,
2.970716794347E-21, 5.236487893254E-21, 7.502258992160E-21, 6.889187063182E-21,
5.583030306904E-21, 4.276873550626E-21, 2.970716794347E-21, 2.644177605278E-21,
-4.134277439891E-22, -4.108335217679E-21, -6.346780780020E-21, -5.936999815753E-21,
-5.800406160998E-21, -5.936999815753E-21, -6.346780780020E-21, -4.108335217679E-21,
-4.134277439891E-22, 2.644177605278E-21, 2.970716794347E-21, 4.276873550626E-21,
5.583030306904E-21, 6.889187063182E-21, 6.276115134204E-21, 5.929572720554E-21,
5.583030306904E-21, 5.236487893254E-21, 5.149852289841E-21, -3.779572577314E-22,
-7.381658890282E-21, -1.189597074863E-20, -1.130882889853E-20, -1.111311494850E-20,
-1.130882889853E-20, -1.189597074863E-20, -7.381658890282E-21, -3.779572577314E-22,
5.149852289841E-21, 5.236487893254E-21, 5.583030306904E-21, 5.929572720554E-21,
6.276115134204E-21, 5.663043205226E-21, 6.276115134204E-21, 6.889187063182E-21,
7.502258992160E-21, 7.655526974405E-21, -3.424867714738E-22, -1.065498256289E-20,
-1.744516071724E-20, -1.668065798131E-20, -1.642582373601E-20, -1.668065798131E-20,
-1.744516071724E-20, -1.065498256289E-20, -3.424867714738E-22, 7.655526974405E-21,
7.502258992160E-21, 6.889187063182E-21, 6.276115134204E-21, 5.663043205226E-21], dtype=np.float64
)
class TestInterpolatorLoadNormalValuesUneven(TestInterpolatorLoadValues):
"""
Loading normal sized values for a 2D sinc function test.
For description of data storage, see TestInterpolatorLoadValues.
"""
def __init__(self):
super().__init__()
#: data array from a function sampled on self.x. dtype should be np.float64
self.data: np.array = np.array(
[[7.049456954407E-02, 5.750613283457E-02, -5.031133752816E-03, -3.957902354963E-02,
-8.474851229653E-02, -9.121180789640E-02, -7.975908114097E-02, -6.835527608142E-02,
-4.876973734940E-02, -4.418624359019E-02, -4.876973734940E-02, -5.728794999748E-02,
-7.975908114097E-02, -8.851381966774E-02, -8.474851229653E-02, -6.734331948907E-02,
-5.031133752816E-03, 2.994648599639E-02, 7.049456954407E-02],
[5.750613283457E-02, 2.707313596327E-02, -4.947098908050E-02, -7.716977009236E-02,
-8.851381966774E-02, -7.454434661366E-02, -2.923247487940E-02, -6.867195016855E-03,
2.378357578524E-02, 3.010595454905E-02, 2.378357578524E-02, 1.127556015714E-02,
-2.923247487940E-02, -5.313921590225E-02, -8.851381966774E-02, -9.034807299146E-02,
-4.947098908050E-02, -1.193366977476E-02, 5.750613283457E-02],
[-5.031133752816E-03, -4.947098908050E-02, -9.121921863446E-02, -7.727418937786E-02,
-9.251264987298E-04, 4.278478892878E-02, 1.052139178127E-01, 1.201126954831E-01,
1.283205555674E-01, 1.282705054889E-01, 1.283205555674E-01, 1.266409993718E-01,
1.052139178127E-01, 7.942173990430E-02, -9.251264987298E-04, -4.418624359019E-02,
-9.121921863446E-02, -8.127030064783E-02, -5.031133752816E-03],
[-3.957902354963E-02, -7.716977009236E-02, -7.727418937786E-02, -3.939635588637E-02,
6.155045982467E-02, 1.005907311298E-01, 1.282705054889E-01, 1.215171020957E-01,
9.674890958243E-02, 8.947967120127E-02, 9.674890958243E-02, 1.089467585825E-01,
1.282705054889E-01, 1.227830098222E-01, 6.155045982467E-02, 1.127556015714E-02,
-7.727418937786E-02, -9.132515608025E-02, -3.957902354963E-02],
[-8.474851229653E-02, -8.851381966774E-02, -9.251264987298E-04, 6.155045982467E-02,
1.283205555674E-01, 1.138884597353E-01, 1.734970013481E-02, -3.806455343631E-02,
-1.140407180451E-01, -1.289967784984E-01, -1.140407180451E-01, -8.347707818621E-02,
1.734970013481E-02, 7.259972187812E-02, 1.283205555674E-01, 1.095087852132E-01,
-9.251264987298E-04, -5.728794999748E-02, -8.474851229653E-02],
[-9.121180789640E-02, -7.454434661366E-02, 4.278478892878E-02, 1.005907311298E-01,
1.138884597353E-01, 6.300331483592E-02, -8.347707818621E-02, -1.435151388671E-01,
-2.022780434934E-01, -2.095142217650E-01, -2.022780434934E-01, -1.824711128590E-01,
-8.347707818621E-02, -9.351916346701E-03, 1.138884597353E-01, 1.277678821031E-01,
4.278478892878E-02, -2.387837583277E-02, -9.121180789640E-02],
[-7.975908114097E-02, -2.923247487940E-02, 1.052139178127E-01, 1.282705054889E-01,
1.734970013481E-02, -8.347707818621E-02, -2.145503300375E-01, -2.052264171648E-01,
-9.241435356589E-02, -5.067031374727E-02, -9.241435356589E-02, -1.547945550760E-01,
-2.145503300375E-01, -1.704692811750E-01, 1.734970013480E-02, 9.674890958243E-02,
1.052139178127E-01, 4.278478892878E-02, -7.975908114097E-02],
[-6.835527608142E-02, -6.867195016855E-03, 1.201126954831E-01, 1.215171020957E-01,
-3.806455343631E-02, -1.435151388671E-01, -2.052264171648E-01, -1.268933271435E-01,
1.257727427008E-01, 2.044658799484E-01, 1.257727427008E-01, -9.251369377679E-04,
-2.052264171648E-01, -2.095142217650E-01, -3.806455343631E-02, 6.300331483592E-02,
1.201126954831E-01, 6.764157650466E-02, -6.835527608142E-02],
[-4.876973734940E-02, 2.378357578524E-02, 1.283205555674E-01, 9.674890958243E-02,
-1.140407180451E-01, -2.022780434934E-01, -9.241435356589E-02, 1.257727427008E-01,
6.446759109720E-01, 7.917136926885E-01, 6.446759109720E-01, 3.973381423908E-01,
-9.241435356589E-02, -2.052264171648E-01, -1.140407180451E-01, 4.282861041084E-03,
1.283205555674E-01, 9.566990623455E-02, -4.876973734940E-02],
[-4.418624359019E-02, 3.010595454905E-02, 1.282705054889E-01, 8.947967120127E-02,
-1.289967784984E-01, -2.095142217650E-01, -5.067031374727E-02, 2.044658799484E-01,
7.917136926885E-01, 9.560513212622E-01, 7.917136926885E-01, 5.136281978234E-01,
-5.067031374727E-02, -1.933844713753E-01, -1.289967784984E-01, -9.351916346701E-03,
1.282705054889E-01, 1.005907311298E-01, -4.418624359019E-02],
[-4.876973734940E-02, 2.378357578524E-02, 1.283205555674E-01, 9.674890958243E-02,
-1.140407180451E-01, -2.022780434934E-01, -9.241435356589E-02, 1.257727427008E-01,
6.446759109720E-01, 7.917136926885E-01, 6.446759109720E-01, 3.973381423908E-01,
-9.241435356589E-02, -2.052264171648E-01, -1.140407180451E-01, 4.282861041084E-03,
1.283205555674E-01, 9.566990623455E-02, -4.876973734940E-02],
[-5.728794999748E-02, 1.127556015714E-02, 1.266409993718E-01, 1.089467585825E-01,
-8.347707818621E-02, -1.824711128590E-01, -1.547945550760E-01, -9.251369377679E-04,
3.973381423908E-01, 5.136281978234E-01, 3.973381423908E-01, 2.044658799484E-01,
-1.547945550760E-01, -2.165899038868E-01, -8.347707818621E-02, 2.979548967146E-02,
1.266409993718E-01, 8.505592925358E-02, -5.728794999748E-02],
[-7.975908114097E-02, -2.923247487940E-02, 1.052139178127E-01, 1.282705054889E-01,
1.734970013481E-02, -8.347707818621E-02, -2.145503300375E-01, -2.052264171648E-01,
-9.241435356589E-02, -5.067031374727E-02, -9.241435356589E-02, -1.547945550760E-01,
-2.145503300375E-01, -1.704692811750E-01, 1.734970013480E-02, 9.674890958243E-02,
1.052139178127E-01, 4.278478892878E-02, -7.975908114097E-02],
[-8.851381966774E-02, -5.313921590225E-02, 7.942173990430E-02, 1.227830098222E-01,
7.259972187812E-02, -9.351916346701E-03, -1.704692811750E-01, -2.095142217650E-01,
-2.052264171648E-01, -1.933844713753E-01, -2.052264171648E-01, -2.165899038868E-01,
-1.704692811750E-01, -9.881655954565E-02, 7.259972187812E-02, 1.215171020957E-01,
7.942173990430E-02, 1.127556015714E-02, -8.851381966774E-02],
[-8.474851229653E-02, -8.851381966774E-02, -9.251264987298E-04, 6.155045982467E-02,
1.283205555674E-01, 1.138884597353E-01, 1.734970013480E-02, -3.806455343631E-02,
-1.140407180451E-01, -1.289967784984E-01, -1.140407180451E-01, -8.347707818621E-02,
1.734970013480E-02, 7.259972187812E-02, 1.283205555674E-01, 1.095087852132E-01,
-9.251264987296E-04, -5.728794999748E-02, -8.474851229653E-02],
[-6.734331948907E-02, -9.034807299146E-02, -4.418624359019E-02, 1.127556015714E-02,
1.095087852132E-01, 1.277678821031E-01, 9.674890958243E-02, 6.300331483592E-02,
4.282861041084E-03, -9.351916346701E-03, 4.282861041084E-03, 2.979548967146E-02,
9.674890958243E-02, 1.215171020957E-01, 1.095087852132E-01, 6.764157650466E-02,
-4.418624359019E-02, -8.199851501756E-02, -6.734331948907E-02],
[-5.031133752816E-03, -4.947098908050E-02, -9.121921863446E-02, -7.727418937786E-02,
-9.251264987298E-04, 4.278478892878E-02, 1.052139178127E-01, 1.201126954831E-01,
1.283205555674E-01, 1.282705054889E-01, 1.283205555674E-01, 1.266409993718E-01,
1.052139178127E-01, 7.942173990430E-02, -9.251264987296E-04, -4.418624359019E-02,
-9.121921863446E-02, -8.127030064783E-02, -5.031133752816E-03],
[2.994648599639E-02, -1.193366977476E-02, -8.127030064783E-02, -9.132515608025E-02,
-5.728794999748E-02, -2.387837583277E-02, 4.278478892878E-02, 6.764157650466E-02,
9.566990623455E-02, 1.005907311298E-01, 9.566990623455E-02, 8.505592925358E-02,
4.278478892878E-02, 1.127556015714E-02, -5.728794999748E-02, -8.199851501756E-02,
-8.127030064783E-02, -5.265231727816E-02, 2.994648599639E-02],
[7.049456954407E-02, 5.750613283457E-02, -5.031133752816E-03, -3.957902354963E-02,
-8.474851229653E-02, -9.121180789640E-02, -7.975908114097E-02, -6.835527608142E-02,
-4.876973734940E-02, -4.418624359019E-02, -4.876973734940E-02, -5.728794999748E-02,
-7.975908114097E-02, -8.851381966774E-02, -8.474851229653E-02, -6.734331948907E-02,
-5.031133752816E-03, 2.994648599639E-02, 7.049456954407E-02]],
dtype=np.float64
)
def setup_cubic(self):
self.precalc_interpolation = np.array(
[[7.049456954407E-02, 2.120303154043E-02, -5.412207605158E-02, -9.041927189809E-02,
-7.975908114097E-02, -5.490808448662E-02, -4.365385529969E-02, -5.493373238953E-02,
-7.975908114097E-02, -9.043881788286E-02, -5.458241224645E-02, 2.183159991917E-02,
7.049456954407E-02],
[2.120303154043E-02, -6.840828181977E-02, -8.291758075355E-02, -1.514915773345E-02,
6.146617965847E-02, 1.016705772106E-01, 1.117191819513E-01, 1.019683374662E-01,
6.146617965847E-02, -1.567520827568E-02, -8.211550352574E-02, -6.795064436020E-02,
2.120303154043E-02],
[-5.412207605158E-02, -8.291758075355E-02, 9.666098759062E-03, 1.115403574145E-01,
1.180976383194E-01, 7.071918180842E-02, 4.552636512940E-02, 7.066851884792E-02,
1.180976383194E-01, 1.094887240678E-01, 1.271424487456E-02, -8.489926255180E-02,
-5.412207605158E-02],
[-9.041927189809E-02, -1.514915773345E-02, 1.115403574145E-01, 9.058021229999E-02,
-5.921575988005E-02, -1.697171170804E-01, -1.986325220649E-01, -1.708674849009E-01,
-5.921575988005E-02, 9.071011939526E-02, 1.130155601111E-01, -1.774015779353E-02,
-9.041927189809E-02],
[-7.975908114097E-02, 6.146617965847E-02, 1.180976383194E-01, -5.921575988005E-02,
-2.145503300375E-01, -1.373726429185E-01, -4.677155115289E-02, -1.400744853311E-01,
-2.145503300375E-01, -5.608764908063E-02, 1.171252709523E-01, 5.937941570370E-02,
-7.975908114097E-02],
[-5.490808448662E-02, 1.016705772106E-01, 7.071918180842E-02, -1.697171170804E-01,
-1.373726429185E-01, 3.180255899274E-01, 6.051776838087E-01, 3.123175894536E-01,
-1.373726429185E-01, -1.640188016429E-01, 6.586737199675E-02, 1.009003074435E-01,
-5.490808448662E-02],
[-4.365385529969E-02, 1.117191819513E-01, 4.552636512940E-02, -1.986325220649E-01,
-4.677155115289E-02, 6.051776838087E-01, 9.899192445544E-01, 5.987956201910E-01,
-4.677155115289E-02, -1.913782291956E-01, 3.931492909576E-02, 1.117308113990E-01,
-4.365385529969E-02],
[-5.493373238953E-02, 1.019683374662E-01, 7.066851884792E-02, -1.708674849009E-01,
-1.400744853311E-01, 3.123175894536E-01, 5.987956201910E-01, 3.081035634172E-01,
-1.400744853311E-01, -1.646011917477E-01, 6.660176847751E-02, 1.011817145737E-01,
-5.493373238953E-02],
[-7.975908114097E-02, 6.146617965847E-02, 1.180976383194E-01, -5.921575988005E-02,
-2.145503300375E-01, -1.373726429185E-01, -4.677155115289E-02, -1.400744853311E-01,
-2.145503300375E-01, -5.608764908063E-02, 1.171252709523E-01, 5.937941570370E-02,
-7.975908114097E-02],
[-9.043881788286E-02, -1.567520827568E-02, 1.094887240678E-01, 9.071011939526E-02,
-5.608764908063E-02, -1.640188016429E-01, -1.913782291956E-01, -1.646011917477E-01,
-5.608764908063E-02, 9.061889238735E-02, 1.119056766335E-01, -1.844151449703E-02,
-9.043881788286E-02],
[-5.458241224645E-02, -8.211550352574E-02, 1.271424487456E-02, 1.130155601111E-01,
1.171252709523E-01, 6.586737199675E-02, 3.931492909576E-02, 6.660176847751E-02,
1.171252709523E-01, 1.119056766335E-01, 1.568973617012E-02, -8.405046373540E-02,
-5.458241224645E-02],
[2.183159991917E-02, -6.795064436020E-02, -8.489926255180E-02, -1.774015779353E-02,
5.937941570370E-02, 1.009003074435E-01, 1.117308113990E-01, 1.011817145737E-01,
5.937941570370E-02, -1.844151449703E-02, -8.405046373540E-02, -6.845929971507E-02,
2.183159991917E-02],
[7.049456954407E-02, 2.120303154043E-02, -5.412207605158E-02, -9.041927189809E-02,
-7.975908114097E-02, -5.490808448662E-02, -4.365385529969E-02, -5.493373238953E-02,
-7.975908114097E-02, -9.043881788286E-02, -5.458241224645E-02, 2.183159991917E-02,
7.049456954407E-02]], dtype=np.float64
)
#: array holding precalculated nearest neighbour extrapolation data
self.precalc_extrapolation_nearest: np.array = np.array(
[7.049456954407E-02, 7.049456954407E-02, 7.049456954407E-02, 7.049456954407E-02,
2.120303154043E-02, -5.412207605158E-02, -9.041927189809E-02, -7.975908114097E-02,
-5.490808448662E-02, -4.365385529969E-02, -5.493373238953E-02, -7.975908114097E-02,
-9.043881788286E-02, -5.458241224645E-02, 2.183159991917E-02, 7.049456954407E-02,
7.049456954407E-02, 7.049456954407E-02, 7.049456954407E-02, 7.049456954407E-02,
7.049456954407E-02, 7.049456954407E-02, 7.049456954407E-02, 2.120303154043E-02,
-5.412207605158E-02, -9.041927189809E-02, -7.975908114097E-02, -5.490808448662E-02,
-4.365385529969E-02, -5.493373238953E-02, -7.975908114097E-02, -9.043881788286E-02,
-5.458241224645E-02, 2.183159991917E-02, 7.049456954407E-02, 7.049456954407E-02,
7.049456954407E-02, 7.049456954407E-02, 7.049456954407E-02, 7.049456954407E-02,
7.049456954407E-02, 7.049456954407E-02, 2.120303154043E-02, -5.412207605158E-02,
-9.041927189809E-02, -7.975908114097E-02, -5.490808448662E-02, -4.365385529969E-02,
-5.493373238953E-02, -7.975908114097E-02, -9.043881788286E-02, -5.458241224645E-02,
2.183159991917E-02, 7.049456954407E-02, 7.049456954407E-02, 7.049456954407E-02,
7.049456954407E-02, 7.049456954407E-02, 7.049456954407E-02, 7.049456954407E-02,
7.049456954407E-02, 7.049456954407E-02, 7.049456954407E-02, 2.120303154043E-02,
2.120303154043E-02, 2.120303154043E-02, 2.120303154043E-02, 2.120303154043E-02,
2.120303154043E-02, -5.412207605158E-02, -5.412207605158E-02, -5.412207605158E-02,
-5.412207605158E-02, -5.412207605158E-02, -5.412207605158E-02, -9.041927189809E-02,
-9.041927189809E-02, -9.041927189809E-02, -9.041927189809E-02, -9.041927189809E-02,
-9.041927189809E-02, -7.975908114097E-02, -7.975908114097E-02, -7.975908114097E-02,
-7.975908114097E-02, -7.975908114097E-02, -7.975908114097E-02, -5.490808448662E-02,
-5.490808448662E-02, -5.490808448662E-02, -5.490808448662E-02, -5.490808448662E-02,
-5.490808448662E-02, -4.365385529969E-02, -4.365385529969E-02, -4.365385529969E-02,
-4.365385529969E-02, -4.365385529969E-02, -4.365385529969E-02, -5.493373238953E-02,
-5.493373238953E-02, -5.493373238953E-02, -5.493373238953E-02, -5.493373238953E-02,
-5.493373238953E-02, -7.975908114097E-02, -7.975908114097E-02, -7.975908114097E-02,
-7.975908114097E-02, -7.975908114097E-02, -7.975908114097E-02, -9.043881788286E-02,
-9.043881788286E-02, -9.043881788286E-02, -9.043881788286E-02, -9.043881788286E-02,
-9.043881788286E-02, -5.458241224645E-02, -5.458241224645E-02, -5.458241224645E-02,
-5.458241224645E-02, -5.458241224645E-02, -5.458241224645E-02, 2.183159991917E-02,
2.183159991917E-02, 2.183159991917E-02, 2.183159991917E-02, 2.183159991917E-02,
2.183159991917E-02, 7.049456954407E-02, 7.049456954407E-02, 7.049456954407E-02,
7.049456954407E-02, 7.049456954407E-02, 7.049456954407E-02, 7.049456954407E-02,
7.049456954407E-02, 7.049456954407E-02, 7.049456954407E-02, 2.120303154043E-02,
-5.412207605158E-02, -9.041927189809E-02, -7.975908114097E-02, -5.490808448662E-02,
-4.365385529969E-02, -5.493373238953E-02, -7.975908114097E-02, -9.043881788286E-02,
-5.458241224645E-02, 2.183159991917E-02, 7.049456954407E-02, 7.049456954407E-02,
7.049456954407E-02, 7.049456954407E-02, 7.049456954407E-02, 7.049456954407E-02,
7.049456954407E-02, 7.049456954407E-02, 2.120303154043E-02, -5.412207605158E-02,
-9.041927189809E-02, -7.975908114097E-02, -5.490808448662E-02, -4.365385529969E-02,
-5.493373238953E-02, -7.975908114097E-02, -9.043881788286E-02, -5.458241224645E-02,
2.183159991917E-02, 7.049456954407E-02, 7.049456954407E-02, 7.049456954407E-02,
7.049456954407E-02, 7.049456954407E-02, 7.049456954407E-02, 7.049456954407E-02,
7.049456954407E-02, 2.120303154043E-02, -5.412207605158E-02, -9.041927189809E-02,
-7.975908114097E-02, -5.490808448662E-02, -4.365385529969E-02, -5.493373238953E-02,
-7.975908114097E-02, -9.043881788286E-02, -5.458241224645E-02, 2.183159991917E-02,
7.049456954407E-02, 7.049456954407E-02, 7.049456954407E-02, 7.049456954407E-02], dtype=np.float64
)
#: array holding precalculated linear extrapolation data
self.precalc_extrapolation_linear: np.array = np.array(
[-1.194521420610E+01, -7.823082017163E+00, -3.700949828231E+00, 4.211823607006E-01,
1.132303125654E+00, 7.609499224352E-01, -4.084169620081E-01, -1.443977450203E+00,
-1.920403690376E+00, -2.047040864433E+00, -1.948441248529E+00, -1.443977450203E+00,
-3.911849985106E-01, 7.440065969941E-01, 1.211163300265E+00, 4.211823607006E-01,
-2.906695129325E+00, -6.234572619351E+00, -9.562450109377E+00, -7.823082017163E+00,
-5.113959201337E+00, -2.404836385511E+00, 3.042864303151E-01, 7.619364276159E-01,
4.892592562729E-01, -3.024177319714E-01, -9.892379938492E-01, -1.298571821746E+00,
-1.379245194722E+00, -1.317272076482E+00, -9.892379938492E-01, -2.909362716347E-01,
4.778102605806E-01, 8.147194001498E-01, 3.042864303151E-01, -1.853476437714E+00,
-4.011239305743E+00, -6.169002173772E+00, -3.700949828231E+00, -2.404836385511E+00,
-1.108722942791E+00, 1.873904999296E-01, 3.915697295782E-01, 2.175685901107E-01,
-1.964185019348E-01, -5.344985374951E-01, -6.767399531163E-01, -7.114495250108E-01,
-6.861029044360E-01, -5.344985374951E-01, -1.906875447588E-01, 2.116139241671E-01,
4.182755000345E-01, 1.873904999296E-01, -8.002577461027E-01, -1.787905992135E+00,
-2.775554238167E+00, 4.211823607006E-01, 3.042864303151E-01, 1.873904999296E-01,
2.529609455086E-01, 4.354273214732E-01, 6.178936974377E-01, 1.132303125654E+00,
7.619364276159E-01, 3.915697295782E-01, 3.717844299417E-01, 7.223658283430E-01,
1.072947226744E+00, 7.609499224352E-01, 4.892592562729E-01, 2.175685901107E-01,
9.604055355141E-02, 2.462031831544E-01, 3.963658127574E-01, -4.084169620081E-01,
-3.024177319714E-01, -1.964185019348E-01, -3.537002512839E-01, -6.169812306696E-01,
-8.802622100554E-01, -1.443977450203E+00, -9.892379938492E-01, -5.344985374951E-01,
-6.312064964548E-01, -1.182653911769E+00, -1.734101327083E+00, -1.920403690376E+00,
-1.298571821746E+00, -6.767399531163E-01, -6.947802448856E-01, -1.334652405285E+00,
-1.974524565683E+00, -2.047040864433E+00, -1.379245194722E+00, -7.114495250108E-01,
-6.952552096349E-01, -1.346856563970E+00, -1.998457918305E+00, -1.948441248529E+00,
-1.317272076482E+00, -6.861029044360E-01, -7.014023360164E-01, -1.347870939643E+00,
-1.994339543270E+00, -1.443977450203E+00, -9.892379938492E-01, -5.344985374951E-01,
-6.312064964548E-01, -1.182653911769E+00, -1.734101327083E+00, -3.911849985106E-01,
-2.909362716347E-01, -1.906875447588E-01, -3.422173851071E-01, -5.939959523313E-01,
-8.457745195555E-01, 7.440065969941E-01, 4.778102605806E-01, 2.116139241671E-01,
8.886095091330E-02, 2.323043140730E-01, 3.757476772328E-01, 1.211163300265E+00,
8.147194001498E-01, 4.182755000345E-01, 4.013604681385E-01, 7.808893363578E-01,
1.160418204577E+00, 4.211823607006E-01, 3.042864303151E-01, 1.873904999296E-01,
2.529609455086E-01, 4.354273214732E-01, | |
None:
fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]]
taskname = self.rqdata.runq_task[task]
if task in self.rq.scenequeue_covered:
logger.debug(2, "Setscene covered task %s (%s)", task,
self.rqdata.get_user_idstring(task))
self.task_skip(task, "covered")
return True
if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
logger.debug(2, "Stamp current task %s (%s)", task,
self.rqdata.get_user_idstring(task))
self.task_skip(task, "existing")
return True
taskdep = self.rqdata.dataCache.task_deps[fn]
if 'noexec' in taskdep and taskname in taskdep['noexec']:
startevent = runQueueTaskStarted(task, self.stats, self.rq,
noexec=True)
bb.event.fire(startevent, self.cfgData)
self.runq_running[task] = 1
self.stats.taskActive()
if not self.cooker.configuration.dry_run:
bb.build.make_stamp(taskname, self.rqdata.dataCache, fn)
self.task_complete(task)
return True
else:
startevent = runQueueTaskStarted(task, self.stats, self.rq)
bb.event.fire(startevent, self.cfgData)
taskdepdata = self.build_taskdepdata(task)
taskdep = self.rqdata.dataCache.task_deps[fn]
if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run:
if not self.rq.fakeworker:
self.rq.start_fakeworker(self)
self.rq.fakeworker.stdin.write("<runtask>" + pickle.dumps((fn, task, taskname, False, self.cooker.collection.get_file_appends(fn), taskdepdata)) + "</runtask>")
self.rq.fakeworker.stdin.flush()
else:
self.rq.worker.stdin.write("<runtask>" + pickle.dumps((fn, task, taskname, False, self.cooker.collection.get_file_appends(fn), taskdepdata)) + "</runtask>")
self.rq.worker.stdin.flush()
self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCache, fn)
self.build_stamps2.append(self.build_stamps[task])
self.runq_running[task] = 1
self.stats.taskActive()
if self.stats.active < self.number_tasks:
return True
if self.stats.active > 0:
self.rq.read_workers()
return self.rq.active_fds()
if len(self.failed_fnids) != 0:
self.rq.state = runQueueFailed
return True
# Sanity Checks
for task in xrange(self.stats.total):
if self.runq_buildable[task] == 0:
logger.error("Task %s never buildable!", task)
if self.runq_running[task] == 0:
logger.error("Task %s never ran!", task)
if self.runq_complete[task] == 0:
logger.error("Task %s never completed!", task)
self.rq.state = runQueueComplete
return True
def build_taskdepdata(self, task):
taskdepdata = {}
next = self.rqdata.runq_depends[task]
next.add(task)
while next:
additional = []
for revdep in next:
fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[revdep]]
pn = self.rqdata.dataCache.pkg_fn[fn]
taskname = self.rqdata.runq_task[revdep]
deps = self.rqdata.runq_depends[revdep]
taskdepdata[revdep] = [pn, taskname, fn, deps]
for revdep2 in deps:
if revdep2 not in taskdepdata:
additional.append(revdep2)
next = additional
#bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
return taskdepdata
class RunQueueExecuteScenequeue(RunQueueExecute):
def __init__(self, rq):
RunQueueExecute.__init__(self, rq)
self.scenequeue_covered = set()
self.scenequeue_notcovered = set()
self.scenequeue_notneeded = set()
# If we don't have any setscene functions, skip this step
if len(self.rqdata.runq_setscene) == 0:
rq.scenequeue_covered = set()
rq.state = runQueueRunInit
return
self.stats = RunQueueStats(len(self.rqdata.runq_setscene))
sq_revdeps = []
sq_revdeps_new = []
sq_revdeps_squash = []
self.sq_harddeps = {}
# We need to construct a dependency graph for the setscene functions. Intermediate
# dependencies between the setscene tasks only complicate the code. This code
# therefore aims to collapse the huge runqueue dependency tree into a smaller one
# only containing the setscene functions.
for task in xrange(self.stats.total):
self.runq_running.append(0)
self.runq_complete.append(0)
self.runq_buildable.append(0)
# First process the chains up to the first setscene task.
endpoints = {}
for task in xrange(len(self.rqdata.runq_fnid)):
sq_revdeps.append(copy.copy(self.rqdata.runq_revdeps[task]))
sq_revdeps_new.append(set())
if (len(self.rqdata.runq_revdeps[task]) == 0) and task not in self.rqdata.runq_setscene:
endpoints[task] = set()
# Secondly process the chains between setscene tasks.
for task in self.rqdata.runq_setscene:
for dep in self.rqdata.runq_depends[task]:
if dep not in endpoints:
endpoints[dep] = set()
endpoints[dep].add(task)
def process_endpoints(endpoints):
newendpoints = {}
for point, task in endpoints.items():
tasks = set()
if task:
tasks |= task
if sq_revdeps_new[point]:
tasks |= sq_revdeps_new[point]
sq_revdeps_new[point] = set()
if point in self.rqdata.runq_setscene:
sq_revdeps_new[point] = tasks
for dep in self.rqdata.runq_depends[point]:
if point in sq_revdeps[dep]:
sq_revdeps[dep].remove(point)
if tasks:
sq_revdeps_new[dep] |= tasks
if (len(sq_revdeps[dep]) == 0 or len(sq_revdeps_new[dep]) != 0) and dep not in self.rqdata.runq_setscene:
newendpoints[dep] = task
if len(newendpoints) != 0:
process_endpoints(newendpoints)
process_endpoints(endpoints)
# Build a list of setscene tasks which are "unskippable"
# These are direct endpoints referenced by the build
endpoints2 = {}
sq_revdeps2 = []
sq_revdeps_new2 = []
def process_endpoints2(endpoints):
newendpoints = {}
for point, task in endpoints.items():
tasks = set([point])
if task:
tasks |= task
if sq_revdeps_new2[point]:
tasks |= sq_revdeps_new2[point]
sq_revdeps_new2[point] = set()
if point in self.rqdata.runq_setscene:
sq_revdeps_new2[point] = tasks
for dep in self.rqdata.runq_depends[point]:
if point in sq_revdeps2[dep]:
sq_revdeps2[dep].remove(point)
if tasks:
sq_revdeps_new2[dep] |= tasks
if (len(sq_revdeps2[dep]) == 0 or len(sq_revdeps_new2[dep]) != 0) and dep not in self.rqdata.runq_setscene:
newendpoints[dep] = tasks
if len(newendpoints) != 0:
process_endpoints2(newendpoints)
for task in xrange(len(self.rqdata.runq_fnid)):
sq_revdeps2.append(copy.copy(self.rqdata.runq_revdeps[task]))
sq_revdeps_new2.append(set())
if (len(self.rqdata.runq_revdeps[task]) == 0) and task not in self.rqdata.runq_setscene:
endpoints2[task] = set()
process_endpoints2(endpoints2)
self.unskippable = []
for task in self.rqdata.runq_setscene:
if sq_revdeps_new2[task]:
self.unskippable.append(self.rqdata.runq_setscene.index(task))
for task in xrange(len(self.rqdata.runq_fnid)):
if task in self.rqdata.runq_setscene:
deps = set()
for dep in sq_revdeps_new[task]:
deps.add(self.rqdata.runq_setscene.index(dep))
sq_revdeps_squash.append(deps)
elif len(sq_revdeps_new[task]) != 0:
bb.msg.fatal("RunQueue", "Something went badly wrong during scenequeue generation, aborting. Please report this problem.")
# Resolve setscene inter-task dependencies
# e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene"
# Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies
for task in self.rqdata.runq_setscene:
realid = self.rqdata.taskData.gettask_id(self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]], self.rqdata.runq_task[task] + "_setscene", False)
idepends = self.rqdata.taskData.tasks_idepends[realid]
for (depid, idependtask) in idepends:
if depid not in self.rqdata.taskData.build_targets:
continue
depdata = self.rqdata.taskData.build_targets[depid][0]
if depdata is None:
continue
dep = self.rqdata.taskData.fn_index[depdata]
taskid = self.rqdata.get_task_id(self.rqdata.taskData.getfn_id(dep), idependtask.replace("_setscene", ""))
if taskid is None:
bb.msg.fatal("RunQueue", "Task %s_setscene depends upon non-existent task %s:%s" % (self.rqdata.get_user_idstring(task), dep, idependtask))
if not self.rqdata.runq_setscene.index(taskid) in self.sq_harddeps:
self.sq_harddeps[self.rqdata.runq_setscene.index(taskid)] = set()
self.sq_harddeps[self.rqdata.runq_setscene.index(taskid)].add(self.rqdata.runq_setscene.index(task))
sq_revdeps_squash[self.rqdata.runq_setscene.index(task)].add(self.rqdata.runq_setscene.index(taskid))
# Have to zero this to avoid circular dependencies
sq_revdeps_squash[self.rqdata.runq_setscene.index(taskid)] = set()
for task in self.sq_harddeps:
for dep in self.sq_harddeps[task]:
sq_revdeps_squash[dep].add(task)
#for task in xrange(len(sq_revdeps_squash)):
# realtask = self.rqdata.runq_setscene[task]
# bb.warn("Task %s: %s_setscene is %s " % (task, self.rqdata.get_user_idstring(realtask) , sq_revdeps_squash[task]))
self.sq_deps = []
self.sq_revdeps = sq_revdeps_squash
self.sq_revdeps2 = copy.deepcopy(self.sq_revdeps)
for task in xrange(len(self.sq_revdeps)):
self.sq_deps.append(set())
for task in xrange(len(self.sq_revdeps)):
for dep in self.sq_revdeps[task]:
self.sq_deps[dep].add(task)
for task in xrange(len(self.sq_revdeps)):
if len(self.sq_revdeps[task]) == 0:
self.runq_buildable[task] = 1
self.outrightfail = []
if self.rq.hashvalidate:
sq_hash = []
sq_hashfn = []
sq_fn = []
sq_taskname = []
sq_task = []
noexec = []
stamppresent = []
for task in xrange(len(self.sq_revdeps)):
realtask = self.rqdata.runq_setscene[task]
fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[realtask]]
taskname = self.rqdata.runq_task[realtask]
taskdep = self.rqdata.dataCache.task_deps[fn]
if 'noexec' in taskdep and taskname in taskdep['noexec']:
noexec.append(task)
self.task_skip(task)
bb.build.make_stamp(taskname + "_setscene", self.rqdata.dataCache, fn)
continue
if self.rq.check_stamp_task(realtask, taskname + "_setscene", cache=self.stampcache):
logger.debug(2, 'Setscene stamp current for task %s(%s)', task, self.rqdata.get_user_idstring(realtask))
stamppresent.append(task)
self.task_skip(task)
continue
if self.rq.check_stamp_task(realtask, taskname, recurse = True, cache=self.stampcache):
logger.debug(2, 'Normal stamp current for task %s(%s)', task, self.rqdata.get_user_idstring(realtask))
stamppresent.append(task)
self.task_skip(task)
continue
sq_fn.append(fn)
sq_hashfn.append(self.rqdata.dataCache.hashfn[fn])
sq_hash.append(self.rqdata.runq_hash[realtask])
sq_taskname.append(taskname)
sq_task.append(task)
call = self.rq.hashvalidate + "(sq_fn, sq_task, sq_hash, sq_hashfn, d)"
locs = { "sq_fn" : sq_fn, "sq_task" : sq_taskname, "sq_hash" : sq_hash, "sq_hashfn" : sq_hashfn, "d" : self.cooker.data }
valid = bb.utils.better_eval(call, locs)
valid_new = stamppresent
for v in valid:
valid_new.append(sq_task[v])
for task in xrange(len(self.sq_revdeps)):
if task not in valid_new and task not in noexec:
realtask = self.rqdata.runq_setscene[task]
logger.debug(2, 'No package found, so skipping setscene task %s',
self.rqdata.get_user_idstring(realtask))
self.outrightfail.append(task)
logger.info('Executing SetScene Tasks')
self.rq.state = runQueueSceneRun
def scenequeue_updatecounters(self, task, fail = False):
for dep in self.sq_deps[task]:
if fail and task in self.sq_harddeps and dep in self.sq_harddeps[task]:
realtask = self.rqdata.runq_setscene[task]
realdep = self.rqdata.runq_setscene[dep]
logger.debug(2, "%s was unavailable and is a hard dependency of %s so skipping" % (self.rqdata.get_user_idstring(realtask), self.rqdata.get_user_idstring(realdep)))
self.scenequeue_updatecounters(dep, fail)
continue
if task not in self.sq_revdeps2[dep]:
# May already have been removed by the fail case above
continue
self.sq_revdeps2[dep].remove(task)
if len(self.sq_revdeps2[dep]) == 0:
self.runq_buildable[dep] = 1
def task_completeoutright(self, task):
"""
Mark a task as completed
Look at the reverse dependencies and mark any task with
completed dependencies as buildable
"""
index = self.rqdata.runq_setscene[task]
logger.debug(1, 'Found task %s which could be accelerated',
self.rqdata.get_user_idstring(index))
self.scenequeue_covered.add(task)
self.scenequeue_updatecounters(task)
def task_complete(self, task):
self.stats.taskCompleted()
bb.event.fire(sceneQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
self.task_completeoutright(task)
def task_fail(self, task, result):
self.stats.taskFailed()
bb.event.fire(sceneQueueTaskFailed(task, self.stats, result, self), self.cfgData)
self.scenequeue_notcovered.add(task)
self.scenequeue_updatecounters(task, True)
def task_failoutright(self, task):
self.runq_running[task] = 1
self.runq_buildable[task] = 1
self.stats.taskCompleted()
self.stats.taskSkipped()
index = self.rqdata.runq_setscene[task]
self.scenequeue_notcovered.add(task)
self.scenequeue_updatecounters(task, True)
def task_skip(self, task):
self.runq_running[task] = 1
self.runq_buildable[task] = 1
self.task_completeoutright(task)
self.stats.taskCompleted()
self.stats.taskSkipped()
def execute(self):
"""
Run the tasks in a queue prepared by prepare_runqueue
"""
self.rq.read_workers()
task = None
if self.stats.active < self.number_tasks:
# Find the next setscene to run
for nexttask in xrange(self.stats.total):
if self.runq_buildable[nexttask] == 1 and self.runq_running[nexttask] != 1:
if nexttask in self.unskippable:
logger.debug(2, "Setscene task %s is unskippable" % self.rqdata.get_user_idstring(self.rqdata.runq_setscene[nexttask]))
if nexttask not in self.unskippable and len(self.sq_revdeps[nexttask]) > 0 and self.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sq_revdeps[nexttask], True):
realtask = self.rqdata.runq_setscene[nexttask]
fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[realtask]]
foundtarget = False
for target in self.rqdata.target_pairs:
if target[0] == fn and target[1] == self.rqdata.runq_task[realtask]:
foundtarget = True
break
if not foundtarget:
logger.debug(2, "Skipping setscene for task %s" % self.rqdata.get_user_idstring(self.rqdata.runq_setscene[nexttask]))
self.task_skip(nexttask)
self.scenequeue_notneeded.add(nexttask)
return True
if nexttask in self.outrightfail:
self.task_failoutright(nexttask)
return True
task = nexttask
break
if task is not None:
realtask = self.rqdata.runq_setscene[task]
fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[realtask]]
taskname = self.rqdata.runq_task[realtask] | |
True)
return False
if dbfunctions.get_playlist(self.db_session, text): # playlist exists
answer = self.create_dialog_ok_or_close(_("Overwrite Playlist?"), _("A playlist by the name '%s' already exists, overwrite?" % text))
if answer != "ok":
return False
dbfunctions.set_playlist(self.db_session, text, self.audio_engine.get_playlist())
self.destroy_playlist()
elif type == 'Export': # Export Playlist
if iter == None: # nothing selected
return True
if playlist_id == -1: # placeholders
return True
if playlist_id != -2: # not local playlists
self.create_dialog_alert("error", _("Only exporting of local playlists is supported."), True)
return False
else: # ready to export
list = dbfunctions.get_playlist(self.db_session, playlist_name)
if not list:
self.create_dialog_alert("error", _("Cannot export empty playlist."), True)
return False
chooser = gtk.FileChooserDialog(title="Save as...",action=gtk.FILE_CHOOSER_ACTION_SAVE,
buttons=(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_SAVE,gtk.RESPONSE_OK))
chooser.set_current_folder(os.path.expanduser("~"))
response = chooser.run()
if response == gtk.RESPONSE_OK:
filename = chooser.get_filename()
if os.path.isfile(filename):
self.create_dialog_alert("error", _("File already exists."), True)
chooser.destroy()
return False
f = open(filename, 'w')
f.write("Playlist '%s' export from Viridian - %s\n\n" % (playlist_name, time.strftime("%A, %B %d, %Y at %I:%M%p")))
for song_id in list:
f.write(helperfunctions.convert_html_to_string(self.ampache_conn.get_song_url(song_id).rpartition('.')[0]))
f.write("\n")
f.close()
print("Exported playlist to %s" % (filename))
self.create_dialog_alert("info", _("Playlist %s written to %s." % (playlist_name, filename)), True)
chooser.destroy()
self.destroy_playlist()
def button_delete_playlist_clicked(self, widget, selection, type):
playlist_list_store, iter = selection.get_selected()
if iter == None:
return False
playlist_id = playlist_list_store[iter][4]
playlist_name = playlist_list_store[iter][0]
if playlist_id == -2: # Local Playlists
answer = self.create_dialog_ok_or_close(_("Delete Playlist?"), _("Are you sure you want to delete the playlist '%s'?" % playlist_name))
if answer != "ok":
return False
dbfunctions.remove_playlist(self.db_session, playlist_name)
self.update_playlist_select(type, playlist_list_store)
elif playlist_id != -1: # Ampache playlists
self.create_dialog_alert('error', _("Cannot delete playlists that are on the Ampache server from Viridian."))
#############
# Cache
#############
def button_clear_cached_artist_info_clicked(self, widget=None, data=None):
"""Clear local cache."""
try: # check to see if this function is running
if self.button_clear_cache_locked == True:
print("Already Running")
return False
except:
pass
self.button_clear_cache_locked = True
print("Clearing cached catalog -- will reauthenticate and pull artists")
self.stop_all_threads()
dbfunctions.clear_cached_catalog(self.db_session)
#self.audio_engine.stop()
self.db_session.variable_set('current_playlist', self.audio_engine.get_playlist())
self.login_and_get_artists()
self.button_clear_cache_locked = False
def button_clear_album_art_clicked(self, widget=None, data=None):
"""Clear local album art."""
self.clear_album_art()
self.update_statusbar(_("Album Art Cleared"))
def button_reset_everything_clicked(self, widget=None, data=None):
"""Reset everything."""
answer = self.create_dialog_ok_or_close(_("Reset Viridian"), _("Are you sure you want to delete all personal information stored with Viridian?"))
if answer == "ok":
self.reset_everything()
gtk.main_quit()
def button_pre_cache_info_clicked(self, widget=None, data=None):
"""Pre-cache all album and song info."""
if self.ampache_conn.is_authenticated() == False:
self.create_dialog_alert("warn", _("Not Authenticated"), True)
return False
try: # check to see if this function is running
if self.button_pre_cache_locked == True:
print("Already Running")
self.create_dialog_alert("info", _("Pre-Cache already in progress."))
return False
except:
pass
answer = self.create_dialog_ok_or_close(_("Pre-Cache"), _("This will cache all of the artist, album, and song information (not the songs themselves) locally to make Viridian respond faster.\n\nThis process can take a long time depending on the size of your catalog. Proceed?"))
if answer != "ok":
return False
self.button_pre_cache_locked = True
gobject.idle_add(self.__button_pre_cache_info_clicked)
#thread.start_new_thread(self.__button_pre_cache_info_clicked, (None,))
def __button_pre_cache_info_clicked(self, widget=None, data=None):
self.pre_cache_continue = True # this will be set to false if this function should stop
try:
start_time = int(time.time())
artists = dbfunctions.get_artist_ids(self.db_session)
i = 0
num_artists = len(artists)
for artist_id in artists:
i += 1
if self.pre_cache_continue == False:
self.button_pre_cache_locked = False
return False
self.check_and_populate_albums(artist_id)
self.update_statusbar(_("Pulling all albums from artists: %d/%d" % (i, num_artists) ))
#gobject.idle_add(self.update_statusbar, 1, "Pulling all albums from artists: %d/%d" % (i, num_artists) )
self.update_statusbar(_("Finished pulling albums"))
albums = dbfunctions.get_album_ids(self.db_session)
i = 0
num_albums = len(albums)
for album_id in albums:
i += 1
if self.pre_cache_continue == False:
self.button_pre_cache_locked = False
return False
self.check_and_populate_songs(album_id)
self.update_statusbar(_("Pulling all songs from albums: %d/%d" % (i, num_albums) ) )
end_time = int(time.time())
time_taken = end_time - start_time
time_taken = helperfunctions.convert_seconds_to_human_readable(time_taken)
self.update_statusbar(_("Finished Pre Cache -- Time Taken: " + str(time_taken)))
print("Finished Pre Cache -- Time Taken: " + str(time_taken))
except Exception as detail:
print("Error with pre-cache!", detail)
self.update_statusbar(_("Error with pre-cache!"))
self.button_pre_cache_locked = False
self.create_dialog_alert("error", _("Error with pre-cache!\n\n"+str(detail) ) )
return False
self.button_pre_cache_locked = False
return False
def button_album_art_clicked(self, widget, event=None, data=None):
"""Handle event box events for the album art."""
if event.button == 3:
self.__button_album_art_right_clicked(widget, event, data)
# right click
else:
self.__button_album_art_left_clicked(widget, event, data)
# left click
def __button_album_art_left_clicked(self, widget, event, data):
"""Left click on album art."""
self.__re_fetch_album_art()
def __button_album_art_right_clicked(self, widget, event, data):
"""Right click on album art."""
# create popup
m = gtk.Menu()
i = gtk.MenuItem(_("Open Image"))
i.connect('activate', lambda x: self.gnome_open(self.current_album_art_file))
m.append(i)
i = gtk.MenuItem(_("Refresh Album Art"))
i.connect('activate', self.__re_fetch_album_art)
m.append(i)
m.show_all()
m.popup(None, None, None, event.button, event.time, None)
return False
##################
# XML Server Buttons
##################
def button_xml_server_clicked(self, widget, action, label, image, port):
"""Start, stop or restart the xml server."""
if self.xml_server.is_running: # xml server is running
if action == "start":
return False
else:
if action == "stop":
return False
if port.get_text().isdigit():
self.db_session.variable_set('xmlrpc_port', int(port.get_text()))
if action == "start":
self.start_xml_server()
elif action == "stop":
self.stop_xml_server()
elif action == "restart":
self.restart_xml_server()
# update the gui
if self.xml_server.is_running:
image.set_from_stock(gtk.STOCK_YES,gtk.ICON_SIZE_SMALL_TOOLBAR)
label.set_text(_("Running. (port %d)" % self.xml_server.port))
else:
image.set_from_stock(gtk.STOCK_NO,gtk.ICON_SIZE_SMALL_TOOLBAR)
label.set_text(_("Not Running."))
#######################################
# Dialogs
#######################################
def create_dialog_alert(self, dialog_type, message, ok=False):
"""Creates a generic dialog of the type specified with close."""
if dialog_type == "warn":
dialog_type = gtk.MESSAGE_WARNING
elif dialog_type == "error":
dialog_type = gtk.MESSAGE_ERROR
elif dialog_type == "info":
dialog_type = gtk.MESSAGE_INFO
elif dialog_type == "question":
dialog_type = gtk.MESSAGE_QUESTION
else:
return False
if ok == True: # display OK button
md = gtk.MessageDialog(self.window, gtk.DIALOG_DESTROY_WITH_PARENT, dialog_type, gtk.BUTTONS_OK, message)
else: # display Close button
md = gtk.MessageDialog(self.window, gtk.DIALOG_DESTROY_WITH_PARENT, dialog_type, gtk.BUTTONS_CLOSE, message)
md.set_title('Viridian')
md.set_icon(self.images_pixbuf_viridian_simple)
md.run()
md.destroy()
def create_dialog_ok_or_close(self, title, message):
"""Creates a generic dialog of the type specified with ok and cancel."""
md = gtk.Dialog(str(title), self.window, gtk.DIALOG_DESTROY_WITH_PARENT, (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OK, gtk.RESPONSE_OK))
label = gtk.Label(message)
label.set_line_wrap(True)
md.get_child().pack_start(label)
md.get_child().set_border_width(10)
md.set_border_width(3)
md.set_resizable(False)
md.set_icon(self.images_pixbuf_viridian_simple)
md.show_all()
#md.set_title('Viridian')
resp = md.run()
md.destroy()
if resp == gtk.RESPONSE_OK:
return "ok"
else:
return "cancel"
def create_about_dialog(self, widget, data=None):
"""About this application."""
about = gtk.AboutDialog()
about.set_name("Viridian")
about.set_icon(self.images_pixbuf_viridian_simple)
about.set_version(VERSION_NUMBER)
about.set_copyright("(c) <NAME> <<EMAIL>>")
about.set_comments(_("Viridian is a front-end for an Ampache Server (see http://ampache.org)"))
about.set_website("http://viridian.daveeddy.com")
about.set_authors(["Author:", "<NAME> <<EMAIL>>", "http://www.daveeddy.com", "", "AudioEngine by:", "<NAME> <<EMAIL>>", "http://conquerthesound.com"])
about.set_artists(["<NAME> <<EMAIL>>", "http://www.skyeillustration.com", "", "Media Icons by:", "http://mysitemyway.com", "http://ampache.org"])
try: # try to set the logo
about.set_logo(self.images_pixbuf_viridian_app)
except:
pass
license = ""
try: # try to read the license, and just paste the link if it fails
for line in open(os.path.join(SCRIPT_PATH, os.path.join('doc', 'bsd.txt'))):
license += line
except:
license = "BSD 3-Clause <http://www.opensource.org/licenses/BSD-3-Clause>"
about.set_license(license)
about.run()
about.destroy()
def create_catalog_updated_dialog(self):
"""Create a dialog to tell the user the cache has been updated."""
answer = self.create_dialog_ok_or_close(_("Ampache Catalog Updated"), _("The Ampache catalog on the server is newer than the locally cached catalog on this computer.\nWould you like to update the local catalog by clearing the local cache?\n\n(You can also do this at anytime by going to File -> Clear Local Cache)."))
if answer == "ok":
return True
return False
#######################################
# Audio Engine Callback
#######################################
def audioengine_song_changed(self, song_id):
"""The function that gets called when the AudioEngine changes songs."""
if song_id != None:
if dbfunctions.song_has_info(self.db_session, song_id):
self.current_song_info = dbfunctions.get_single_song_dict(self.db_session, song_id)
else:
self.current_song_info = self.ampache_conn.get_song_info(song_id)
_thread.start_new_thread(self.__audioengine_song_changed, (song_id,))
def __audioengine_song_changed(self, song_id):
"""The function that gets called when the AudioEngine changes songs."""
THREAD_LOCK_B.acquire()
self.update_playlist_window()
#self.refresh_gui()
if song_id == None: # nothing playing
self.current_song_info = None
self.play_pause_image.set_from_pixbuf(self.images_pixbuf_play)
self.set_tray_tooltip('Viridian')
self.window.set_title("Viridian")
self.set_tray_icon(None)
THREAD_LOCK_B.release()
return False
self.play_pause_image.set_from_pixbuf(self.images_pixbuf_pause)
print(self.current_song_info) # DEBUG
song_time = self.current_song_info['song_time']
self.time_elapsed_slider.set_range(0, song_time)
self.time_total_label.set_text(helperfunctions.convert_seconds_to_human_readable(song_time))
song_title = self.current_song_info['song_title']
artist_name = self.current_song_info['artist_name']
album_name = self.current_song_info['album_name']
song_title_html = helperfunctions.convert_string_to_html(song_title)
artist_name_html = helperfunctions.convert_string_to_html(artist_name)
album_name_html = helperfunctions.convert_string_to_html(album_name)
### Update EVERYTHING to say the current artist, album, and song
# make font size smaller if the title is long
length = len(song_title_html)
if length > 40:
self.current_song_label.set_markup('<span size="9000"><b>'+song_title_html+'</b></span>')
elif length > 20:
self.current_song_label.set_markup('<span size="11000"><b>'+song_title_html+'</b></span>')
else:
self.current_song_label.set_markup('<span size="13000"><b>'+song_title_html+'</b></span>')
self.current_artist_label.set_markup( '<span size="10000">'+artist_name_html+'</span>' )
self.current_album_label.set_markup( '<span size="10000">'+album_name_html+'</span>' )
### Update the statusbar and tray icon ###
self.set_tray_tooltip("Viridian :: " + song_title + ' - ' + artist_name + ' - ' + album_name)
self.window.set_title("Viridian :: " + song_title + ' - ' + artist_name + ' - ' + album_name)
self.update_statusbar(song_title + ' - ' + artist_name + ' - ' + album_name)
### Get the album Art ###
album_id = self.current_song_info['album_id']
if not os.path.exists(ALBUM_ART_DIR):
os.mkdir(ALBUM_ART_DIR)
self.current_album_art_file = os.path.join(ALBUM_ART_DIR, str(album_id))
if os.path.isfile(self.current_album_art_file):
print("Album art exists locally")
else:
print("Fetching album art... ", end=' ')
album_art = self.ampache_conn.get_album_art(album_id)
response = urllib.request.urlopen(album_art)
f | |
import unittest
class Empty(Exception):
pass
class _DoublyLinkedBase(object):
"""A base class providing a doubly linked list representation."""
class _Node:
"""Lightweight, nonpublic class for storing a doubly linked node."""
__slots__ = '_element', '_prev', '_next' # streamline memory
def __init__(self, element, prev, next): # initialize node's fields
self._element = element # user's element
self._prev = prev # previous node reference
self._next = next # next node reference
def __init__(self):
"""Create an empty list."""
self._header = self._Node(None, None, None)
self._trailer = self._Node(None, None, None)
self._header._next = self._trailer # trailer is after header
self._trailer._prev = self._header # header is before trailer
self._size = 0
def __len__(self):
"""Return the number of elements in the list."""
return self._size
def is_empty(self):
"""Return True if list is empty."""
return self._size == 0
def _insert_between(self, e, predecessor, successor):
"""Add element e between two existing nodes and return new node."""
newest = self._Node(e, predecessor, successor) # linked to neighbors
predecessor._next = newest
successor._prev = newest
self._size += 1
return newest
def _delete_node(self, node):
"""Delete nonsentinel node from the list and return its element."""
predecessor = node._prev
successor = node._next
predecessor._next = successor
successor._prev = predecessor
self._size -= 1
element = node._element # record deleted element
node._prev = node._next = node._element = None # deprecate node
return element # return deleted element
class LinkedDeque(_DoublyLinkedBase): # note the use of inheritance
"""Double-ended queue implementation based on a doubly linked list."""
def first(self):
"""Return (but do not remove) the element at the front of the deque."""
if self.is_empty():
raise Empty('Deque is empty')
return self._header._next._element # real item just after header
def last(self):
"""Return (but do not remvoe) the element at the back of the deque."""
if self.is_empty():
raise Empty('Deque is empty')
return self._trailer._prev._element # real item just before trailer
def insert_first(self, e):
"""Add an element to the front of the deque."""
self._insert_between(e, self._header, self._header._next) # after header
def insert_last(self, e):
"""Add an element to the back of the deque."""
self._insert_between(e, self._trailer._prev, self._trailer) # before trailer
def delete_first(self):
"""Remove and return the element from the front of the deque.
Raise Empty exception if the deque is empty.
"""
if self.is_empty():
raise Empty('Deque is empty')
return self._delete_node(self._header._next) # use inherited method
def delete_last(self):
"""Remove and return the element from the back of the deque.
Raise Empty exception if the deque is empty.
"""
if self.is_empty():
raise Empty('Deque is empty')
return self._delete_node(self._trailer._prev) # use inherited method
class PositionalList(_DoublyLinkedBase):
"""A sequential container of elements allowing positional access."""
class Position:
"""An abstraction representing the location of a single element."""
def __init__(self, container, node):
"""Constructor should not be invoked by user."""
self._container = container
self._node = node
def element(self):
"""Return the element stored at this Position."""
return self._node._element
def __eq__(self, other):
"""Return True if other is a Position representing the same location."""
return type(other) is type(self) and other._node is self._node
def __ne__(self, other):
"""Return True if other does not represent the same location."""
return not (self == other) # opposite of __eq__
def _validate(self, p):
"""Return position's node, or raise appropriate error if invalid."""
if not isinstance(p, self.Position):
raise TypeError('p must be proper Position type')
if p._container is not self:
raise ValueError('p does not belong to this container')
if p._node._next is None: # convention for deprecated nodes
raise ValueError('p is no longer valid')
return p._node
def _make_position(self, node):
"""Return Position instance for given node (or None if sentinel)."""
if node is self._header or node is self._trailer:
return None # boundary violation
else:
return self.Position(self, node) # legitimate position
def first(self):
"""Return the first Position in the list (or None if list is empty)."""
return self._make_position(self._header._next)
def last(self):
"""Return the last Position in the list (or None if list is empty)."""
return self._make_position(self._trailer._prev)
def before(self, p):
"""Return the Position jsut before Position p (or None if p is first)."""
node = self._validate(p)
return self._make_position(node._prev)
def after(self, p):
"""Return the Position jsut before Position p (or None if p is last)."""
node = self._validate(p)
return self._make_position(node._next)
def __iter__(self):
"""Generate a forward iteration of the elements of the list."""
cursor = self.first()
while cursor is not None:
yield cursor.element()
cursor = self.after(cursor)
# override inherited version to return Position, rather than Node
def _insert_between(self, e, predecessor, successor):
"""Add element between existing nodes and return new Position."""
node = super(PositionalList, self)._insert_between(e, predecessor, successor)
return self._make_position(node)
def add_first(self, e):
"""Insert element e at the front of the list and return new Position."""
return self._insert_between(e, self._header, self._header._next)
def add_last(self, e):
"""Insert element e at the back of the list and return new Position."""
return self._insert_between(e, self._trailer._prev, self._trailer)
def add_before(self, p, e):
"""Insert element e into list before Position p and return new Position."""
original = self._validate(p)
return self._insert_between(e, original._prev, original)
def add_after(self, p, e):
"""Insert element e into list after Position p and return new Position."""
original = self._validate(p)
return self._insert_between(e, original, original._next)
def delete(self, p):
"""Remove and return the element at Position p."""
original = self._validate(p)
return self._delete_node(original) # inherited method returns element
def replace(self, p, e):
"""Replace the element at Position p with e.
Return the elemetn formerly a Position p.
"""
original = self._validate(p)
old_value = original._element # temporarily store old element
original._element = e # replace with new element
return old_value # return the old element value
def insertion_sort(L):
"""Sort PositionList of comparable elements into nondecreasing order."""
if len(L) > 1: # otherwise, no need to sort it
marker = L.first()
while marker != L.last():
pivot = L.after(marker) # next item to place
value = pivot.element()
if value > marker.element(): # pivot is already sorted
marker = pivot # pivot becomes new marker
else: # must relocate pivot
walk = marker # find leftmost item greater than value
while walk != L.first() and L.before(walk).element() > value:
walk = L.before(walk)
L.delete(pivot)
L.add_before(walk, value) # reinsert value before walk
class FavoritesList:
"""List of elements ordered from most frequently accessed to least."""
class _Item:
__slots__ = '_value', '_count' # streamline memeory usage
def __init__(self, e):
self._value = e # the user's element
self._count = 0 # access count initially zero
def _find_position(self, e):
"""Search for element e and return its Position (or None if not found)."""
walk = self._data.first()
while walk is not None and walk.element()._value != e:
walk = self._data.after(walk)
return walk
def _move_up(self, p):
"""Move item at Position p earlier in the list based on access count."""
if p != self._data.first(): # consider moving...
cnt = p.element()._count
walk = self._data.before(p)
if cnt > walk.element()._count: # must shift forward
while (walk != self._data.first() and cnt > self._data.before(walk).element()._count):
walk = self._data.before(walk)
self._data.add_before(walk, self._data.delete(p)) # delete/reinsert
def __init__(self):
"""Create an empty list of favorites."""
self._data = PositionalList() # will be list of _Item instances
def __len__(self):
"""Return number of entries on favorites list."""
return len(self._data)
def is_empty(self):
"""Return True if list is empty."""
return len(self._data) == 0
def access(self, e):
"""Access element e, thereby increasing its access count."""
p = self._find_position(e) # try to locate existing element
if p is None:
p = self._data.add_last(self._Item(e)) # if new, place at end
p.element()._count += 1 # always increment count
self._move_up(p) # consider moving forward
def remove(self, e):
"""Remove element e from the list of favorites."""
p = self._find_position(e) # try to locate existing element
if p is not None:
self._data.delete(p) # delete, if found
def top(self, k):
"""Generate sequence of top k elements in terms of access count."""
if not 1 <= k <= len(self):
raise ValueError('Illegal value for k')
walk = self._data.first()
for j in range(k):
item = walk.element() # element of list is _Item
yield item._value # report user's element
walk = self._data.after(walk)
class FavoritesListMTF(FavoritesList):
"""List of elements ordered with move-to-front heuristic."""
# we override _move_up to provide move-to-front semantics
def _move_up(self, p):
"""Move accessed item at Position p to front of list."""
if p != self._data.first():
self._data.add_first(self._data.delete(p)) # delete/reinsert
# we override top because list is no longer sorted
def top(self, k):
| |
import logging
import os
import shutil
import socket
import time
import pytest
import salt.cache
import salt.loader
from salt.exceptions import SaltCacheError
from saltfactories.utils import random_string
from saltfactories.utils.ports import get_unused_localhost_port
from tests.support.mock import MagicMock, patch
docker = pytest.importorskip("docker")
log = logging.getLogger(__name__)
pytestmark = [
pytest.mark.slow_test,
pytest.mark.skip_if_binaries_missing("dockerd"),
]
# TODO: add out-of-band (i.e. not via the API) additions to the cache -<NAME>, 2021-09-28
# TODO: in PR request opinion: is it better to double serialize the data, e.g.
# store -> __context__['serial'].dumps({"timestamp": tstamp, "value": __context__['serial'].dumps(value)})
# or is the existing approach of storing timestamp as a secondary key a good one???
# ??? Is one slower than the other?
# TODO: Is there a better approach for waiting until the container is fully running? -<NAME>, 2021-07-27
class Timer:
def __init__(self, timeout=20):
self.start = time.time()
self.timeout = timeout
@property
def expired(self):
return time.time() - self.start > self.timeout
@pytest.fixture(scope="module")
def etcd_port():
return get_unused_localhost_port()
@pytest.fixture(scope="module")
def redis_port():
return get_unused_localhost_port()
@pytest.fixture(scope="module")
def consul_port():
return get_unused_localhost_port()
# GIVE ME FIXTURES ON FIXTURES NOW
@pytest.fixture(scope="module")
def mysql_5_6_port():
return get_unused_localhost_port()
@pytest.fixture(scope="module")
def mysql_5_7_port():
return get_unused_localhost_port()
@pytest.fixture(scope="module")
def mysql_8_0_port():
return get_unused_localhost_port()
@pytest.fixture(scope="module")
def mariadb_10_1_port():
return get_unused_localhost_port()
@pytest.fixture(scope="module")
def mariadb_10_2_port():
return get_unused_localhost_port()
@pytest.fixture(scope="module")
def mariadb_10_3_port():
return get_unused_localhost_port()
@pytest.fixture(scope="module")
def mariadb_10_4_port():
return get_unused_localhost_port()
@pytest.fixture(scope="module")
def mariadb_10_5_port():
return get_unused_localhost_port()
@pytest.fixture(scope="module")
def percona_5_5_port():
return get_unused_localhost_port()
@pytest.fixture(scope="module")
def percona_5_6_port():
return get_unused_localhost_port()
@pytest.fixture(scope="module")
def percona_5_7_port():
return get_unused_localhost_port()
@pytest.fixture(scope="module")
def percona_8_0_port():
return get_unused_localhost_port()
# TODO: We should probably be building our own etcd docker image - fine to base it off of this one (or... others) -<NAME>, 2021-07-27
@pytest.fixture(scope="module")
def etcd_apiv2_container(salt_factories, docker_client, etcd_port):
container = salt_factories.get_container(
random_string("etcd-server-"),
image_name="elcolio/etcd",
docker_client=docker_client,
check_ports=[etcd_port],
container_run_kwargs={
"environment": {"ALLOW_NONE_AUTHENTICATION": "yes"},
"ports": {"2379/tcp": etcd_port},
},
)
with container.started() as factory:
yield factory
@pytest.fixture(scope="module")
def redis_container(salt_factories, docker_client, redis_port, docker_redis_image):
container = salt_factories.get_container(
random_string("redis-server-"),
image_name=docker_redis_image,
docker_client=docker_client,
check_ports=[redis_port],
container_run_kwargs={"ports": {"6379/tcp": redis_port}},
)
with container.started() as factory:
yield factory
# Pytest does not have the ability to parametrize fixtures with parametriezed
# fixtures, which is super annoying. In other words, in order to have a `cache`
# test fixture that takes different versions of the cache that depend on
# different docker images, I've gotta make up fixtures for each
# image+container. When https://github.com/pytest-dev/pytest/issues/349 is
# actually fixed then we can go ahead and refactor all of these mysql
# containers, caches, and their images into a single parametrized fixture.
def start_mysql_container(
salt_factories, docker_client, mysql_port, docker_mysql_image
):
container = salt_factories.get_container(
random_string("mysql-server-"),
image_name=docker_mysql_image,
docker_client=docker_client,
check_ports=[mysql_port],
container_run_kwargs={
"environment": {
"MYSQL_ROOT_PASSWORD": "<PASSWORD>",
"MYSQL_ROOT_HOST": "%",
},
"ports": {"3306/tcp": mysql_port},
},
)
return container.started()
@pytest.fixture(scope="module")
def mysql_5_6_container(
salt_factories, docker_client, mysql_5_6_port, docker_mysql_5_6_image
):
with start_mysql_container(
salt_factories, docker_client, mysql_5_6_port, docker_mysql_5_6_image
) as factory:
yield factory
@pytest.fixture(scope="module")
def mysql_5_7_container(
salt_factories, docker_client, mysql_5_7_port, docker_mysql_5_7_image
):
with start_mysql_container(
salt_factories, docker_client, mysql_5_7_port, docker_mysql_5_7_image
) as factory:
yield factory
@pytest.fixture(scope="module")
def mysql_8_0_container(
salt_factories, docker_client, mysql_8_0_port, docker_mysql_8_0_image
):
with start_mysql_container(
salt_factories, docker_client, mysql_8_0_port, docker_mysql_8_0_image
) as factory:
yield factory
@pytest.fixture(scope="module")
def mariadb_10_1_container(
salt_factories, docker_client, mariadb_10_1_port, docker_mariadb_10_1_image
):
with start_mysql_container(
salt_factories, docker_client, mariadb_10_1_port, docker_mariadb_10_1_image
) as factory:
yield factory
@pytest.fixture(scope="module")
def mariadb_10_2_container(
salt_factories, docker_client, mariadb_10_2_port, docker_mariadb_10_2_image
):
with start_mysql_container(
salt_factories, docker_client, mariadb_10_2_port, docker_mariadb_10_2_image
) as factory:
yield factory
@pytest.fixture(scope="module")
def mariadb_10_3_container(
salt_factories, docker_client, mariadb_10_3_port, docker_mariadb_10_3_image
):
with start_mysql_container(
salt_factories, docker_client, mariadb_10_3_port, docker_mariadb_10_3_image
) as factory:
yield factory
@pytest.fixture(scope="module")
def mariadb_10_4_container(
salt_factories, docker_client, mariadb_10_4_port, docker_mariadb_10_4_image
):
with start_mysql_container(
salt_factories, docker_client, mariadb_10_4_port, docker_mariadb_10_4_image
) as factory:
yield factory
@pytest.fixture(scope="module")
def mariadb_10_5_container(
salt_factories, docker_client, mariadb_10_5_port, docker_mariadb_10_5_image
):
with start_mysql_container(
salt_factories, docker_client, mariadb_10_5_port, docker_mariadb_10_5_image
) as factory:
yield factory
@pytest.fixture(scope="module")
def percona_5_5_container(
salt_factories, docker_client, percona_5_5_port, docker_percona_5_5_image
):
with start_mysql_container(
salt_factories, docker_client, percona_5_5_port, docker_percona_5_5_image
) as factory:
yield factory
@pytest.fixture(scope="module")
def percona_5_6_container(
salt_factories, docker_client, percona_5_6_port, docker_percona_5_6_image
):
with start_mysql_container(
salt_factories, docker_client, percona_5_6_port, docker_percona_5_6_image
) as factory:
yield factory
@pytest.fixture(scope="module")
def percona_5_7_container(
salt_factories, docker_client, percona_5_7_port, docker_percona_5_7_image
):
with start_mysql_container(
salt_factories, docker_client, percona_5_7_port, docker_percona_5_7_image
) as factory:
yield factory
@pytest.fixture(scope="module")
def percona_8_0_container(
salt_factories, docker_client, percona_8_0_port, docker_percona_8_0_image
):
with start_mysql_container(
salt_factories, docker_client, percona_8_0_port, docker_percona_8_0_image
) as factory:
yield factory
@pytest.fixture(scope="module")
def consul_container(salt_factories, docker_client, consul_port, docker_consul_image):
container = salt_factories.get_container(
random_string("consul-server-"),
image_name=docker_consul_image,
docker_client=docker_client,
check_ports=[consul_port],
container_run_kwargs={"ports": {"8500/tcp": consul_port}},
)
with container.started() as factory:
# TODO: May want to do the same thing for redis to ensure that service is up & running
# TODO: THIS IS HORRIBLE. THERE ARE BETTER WAYS TO DETECT SERVICE IS UP -<NAME>, 2021-10-12
timer = Timer(timeout=10)
sleeptime = 0.1
while not timer.expired:
try:
with socket.create_connection(
("localhost", consul_port), timeout=1
) as cli:
cli.send(b"GET /v1/kv/fnord HTTP/1.1\n\n")
cli.recv(2048)
break
except ConnectionResetError as e:
if e.errno == 104:
time.sleep(sleeptime)
sleeptime += sleeptime
else:
assert False, "Timer expired before connecting to consul"
yield factory
@pytest.fixture
def redis_cache(minion_opts, redis_port, redis_container):
opts = minion_opts.copy()
opts["cache"] = "redis"
opts["cache.redis.host"] = "127.0.0.1"
opts["cache.redis.port"] = redis_port
# NOTE: If you would like to ensure that alternate prefixes are properly
# tested, simply change these values and re-run the tests.
opts["cache.redis.bank_prefix"] = "#BANKY_BANK"
opts["cache.redis.bank_keys_prefix"] = "#WHO_HAS_MY_KEYS"
opts["cache.redis.key_prefix"] = "#LPL"
opts["cache.redis.timestamp_prefix"] = "%TICK_TOCK"
opts["cache.redis.separator"] = "\N{SNAKE}"
cache = salt.cache.factory(opts)
yield cache
@pytest.fixture(scope="module", autouse="true")
def ensure_deps(states):
installation_result = states.pip.installed(
name="fnord",
pkgs=["python-etcd", "redis", "redis-py-cluster", "python-consul", "pymysql"],
)
assert (
installation_result.result is True
), "unable to pip install requirements {}".format(installation_result.comment)
@pytest.fixture
def etcd_cache(minion_opts, etcd_port, etcd_apiv2_container):
opts = minion_opts.copy()
opts["cache"] = "etcd"
opts["etcd.host"] = "127.0.0.1"
opts["etcd.port"] = etcd_port
opts["etcd.protocol"] = "http"
# NOTE: If you would like to ensure that alternate suffixes are properly
# tested, simply change this value and re-run the tests.
opts["etcd.timestamp_suffix"] = ".frobnosticate"
cache = salt.cache.factory(opts)
yield cache
@pytest.fixture
def localfs_cache(minion_opts):
opts = minion_opts.copy()
opts["cache"] = "localfs"
cache = salt.cache.factory(opts)
yield cache
shutil.rmtree(opts["cachedir"], ignore_errors=True)
@pytest.fixture
def consul_cache(minion_opts, consul_port, consul_container):
opts = minion_opts.copy()
opts["cache"] = "consul"
opts["consul.host"] = "127.0.0.1"
opts["consul.port"] = consul_port
# NOTE: If you would like to ensure that alternate suffixes are properly
# tested, simply change this value and re-run the tests.
opts["consul.timestamp_suffix"] = ".frobnosticate"
cache = salt.cache.factory(opts)
yield cache
def fixy(minion_opts, mysql_port, mysql_container):
# We're doing a late import because we need access to the exception
import salt.cache.mysql_cache
# The container can be available before mysql actually is
mysql_container.container.exec_run(
[
"/bin/sh",
"-c",
'while ! mysql -u root -pfnord -e "SELECT 1;" >/dev/null; do sleep 1; done',
],
)
# Gotta make the db we're going to use
res = mysql_container.container.exec_run(
[
"/bin/sh",
"-c",
'echo "create database salt_cache;" | mysql -u root -pfnord ',
],
)
opts = minion_opts.copy()
opts["cache"] = "mysql"
opts["mysql.host"] = "127.0.0.1"
opts["mysql.port"] = mysql_port
opts["mysql.user"] = "root"
opts["mysql.password"] = "<PASSWORD>"
opts["mysql.database"] = "salt_cache"
opts["mysql.table_name"] = "cache"
cache = salt.cache.factory(opts)
# For some reason even though mysql is available in the container, we
# can't reliably connect outside the container. Wait for access - but we
# may need a new cache...
timer = Timer(timeout=15)
while not timer.expired:
try:
# Doesn't matter what. We just have to execute so that we spin
# here until we can actually connect to the db instance.
cache.modules["mysql.list"]("salt_cache")
except salt.cache.mysql_cache.MySQLdb.DatabaseError:
# We don't really care what MySQL error is happening -
pass
else:
break
else:
if os.environ.get("CI_RUN"):
pytest.skip('Timer expired before "select 1;" worked')
else:
assert False, 'Timer expired before "select 1;" worked'
# This ensures that we will correctly alter any existing mysql tables for
# current mysql cache users. Without completely altering the mysql_cache
# implementation there's no real other reasonable way to reset the client
# and force the alter_table to be called. Resetting the client to `None` is
# what triggers the implementation to allow the ALTER TABLE to add the
# last_update column
run_query = cache.modules["mysql.run_query"]
run_query(
conn=None,
query="ALTER TABLE salt_cache.cache DROP COLUMN last_update",
)[0].fetchone()
cache.modules["mysql.force_reconnect"]()
return cache
# See container comment above >:(
@pytest.fixture(scope="module")
def mysql_5_6_cache(minion_opts, mysql_5_6_port, mysql_5_6_container):
yield fixy(minion_opts, mysql_5_6_port, mysql_5_6_container)
@pytest.fixture(scope="module")
def mysql_5_7_cache(minion_opts, mysql_5_7_port, mysql_5_7_container):
yield fixy(minion_opts, mysql_5_7_port, mysql_5_7_container)
@pytest.fixture(scope="module")
def mysql_8_0_cache(minion_opts, mysql_8_0_port, mysql_8_0_container):
yield fixy(minion_opts, mysql_8_0_port, mysql_8_0_container)
@pytest.fixture(scope="module")
def mariadb_10_1_cache(minion_opts, mariadb_10_1_port, mariadb_10_1_container):
yield fixy(minion_opts, mariadb_10_1_port, mariadb_10_1_container)
@pytest.fixture(scope="module")
def mariadb_10_2_cache(minion_opts, mariadb_10_2_port, mariadb_10_2_container):
yield fixy(minion_opts, mariadb_10_2_port, mariadb_10_2_container)
@pytest.fixture(scope="module")
def mariadb_10_3_cache(minion_opts, mariadb_10_3_port, mariadb_10_3_container):
yield fixy(minion_opts, mariadb_10_3_port, mariadb_10_3_container)
@pytest.fixture(scope="module")
def mariadb_10_4_cache(minion_opts, mariadb_10_4_port, mariadb_10_4_container):
yield fixy(minion_opts, mariadb_10_4_port, mariadb_10_4_container)
@pytest.fixture(scope="module")
def mariadb_10_5_cache(minion_opts, mariadb_10_5_port, mariadb_10_5_container):
yield fixy(minion_opts, mariadb_10_5_port, mariadb_10_5_container)
@pytest.fixture(scope="module")
def percona_5_5_cache(minion_opts, percona_5_5_port, percona_5_5_container):
yield fixy(minion_opts, percona_5_5_port, percona_5_5_container)
@pytest.fixture(scope="module")
def percona_5_6_cache(minion_opts, percona_5_6_port, percona_5_6_container):
yield fixy(minion_opts, percona_5_6_port, percona_5_6_container)
@pytest.fixture(scope="module")
def percona_5_7_cache(minion_opts, percona_5_7_port, percona_5_7_container):
yield fixy(minion_opts, percona_5_7_port, percona_5_7_container)
@pytest.fixture(scope="module")
def percona_8_0_cache(minion_opts, percona_8_0_port, percona_8_0_container):
yield fixy(minion_opts, percona_8_0_port, percona_8_0_container)
# TODO: Figure out how to parametrize this in combo with the getfixturevalue process -<NAME>, 2021-10-28
@pytest.fixture
def memcache_cache(minion_opts):
opts = minion_opts.copy()
opts["memcache_expire_seconds"] = 42
cache = salt.cache.factory(opts)
yield cache
@pytest.fixture(
params=[
"localfs_cache",
"redis_cache",
"etcd_cache",
"consul_cache",
"mysql_5_6_cache",
"mysql_5_7_cache",
"mysql_8_0_cache",
"mariadb_10_1_cache",
"mariadb_10_2_cache",
"mariadb_10_3_cache",
"mariadb_10_4_cache",
"mariadb_10_5_cache",
"percona_5_5_cache",
"percona_5_6_cache",
"percona_5_7_cache",
"percona_8_0_cache",
"memcache_cache", # Memcache actually delegates some behavior to the backing cache which alters the API somewhat.
]
)
def cache(request):
# This is not an ideal way to get the particular cache type but
# it's currently what we have available. It behaves | |
value=0.0, tags=tags101 + ['port:eth101/1/8'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/9'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/10'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/11'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/12'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/13'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/14'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/15'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/16'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/17'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/18'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/19'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/20'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/21'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/22'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/23'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/24'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/25'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/26'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/27'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/28'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/29'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/30'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/31'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/32'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/33'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/34'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/35'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/36'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/37'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/38'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/39'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/40'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/41'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/42'], hostname=hn101)
aggregator.assert_metric(metric_name, value=1.0, tags=tags102 + ['port:eth1/33'], hostname=hn102)
aggregator.assert_metric(metric_name, value=6.0, tags=tags102 + ['port:eth1/48'], hostname=hn102)
aggregator.assert_metric(metric_name, value=2475.0, tags=tags102 + ['port:eth1/49'], hostname=hn102)
aggregator.assert_metric(metric_name, value=2224.0, tags=tags102 + ['port:eth1/50'], hostname=hn102)
aggregator.assert_metric(metric_name, value=4491.0, tags=tags102 + ['port:eth1/1'], hostname=hn102)
aggregator.assert_metric(metric_name, value=3611.0, tags=tags102 + ['port:eth1/2'], hostname=hn102)
aggregator.assert_metric(metric_name, value=1.0, tags=tags102 + ['port:eth1/3'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/4'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/15'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/17'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/5'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/6'], hostname=hn102)
aggregator.assert_metric(metric_name, value=42.0, tags=tags102 + ['port:eth1/7'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/8'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/9'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/10'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/11'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/12'], hostname=hn102)
aggregator.assert_metric(metric_name, value=2990.0, tags=tags202 + ['port:eth1/1'], hostname=hn202)
aggregator.assert_metric(metric_name, value=900.0, tags=tags202 + ['port:eth1/2'], hostname=hn202)
aggregator.assert_metric(metric_name, value=2516.0, tags=tags201 + ['port:eth1/1'], hostname=hn201)
aggregator.assert_metric(metric_name, value=4159.0, tags=tags201 + ['port:eth1/2'], hostname=hn201)
metric_name = 'cisco_aci.fabric.node.mem.max'
aggregator.assert_metric(metric_name, value=10570520.0, tags=tagsleaf101, hostname=hn101)
aggregator.assert_metric(metric_name, value=10509680.0, tags=tagsleaf102, hostname=hn102)
aggregator.assert_metric(metric_name, value=10755916.0, tags=tagsspine202, hostname=hn202)
aggregator.assert_metric(
metric_name,
value=37901052.0,
tags=[
'apic_role:controller',
'node_id:3',
'fabric_state:unknown',
'fabric_pod_id:1',
'cisco',
'project:cisco_aci',
],
hostname='pod-1-node-3',
)
aggregator.assert_metric(
metric_name,
value=43199760.0,
tags=[
'apic_role:controller',
'node_id:1',
'fabric_state:unknown',
'fabric_pod_id:1',
'cisco',
'project:cisco_aci',
],
hostname='pod-1-node-1',
)
aggregator.assert_metric(metric_name, value=10823444.0, tags=tagsspine201, hostname=hn201)
aggregator.assert_metric(
metric_name,
value=34637280.0,
tags=[
'apic_role:controller',
'node_id:2',
'fabric_state:unknown',
'fabric_pod_id:1',
'cisco',
'project:cisco_aci',
],
hostname='pod-1-node-2',
)
metric_name = 'cisco_aci.fabric.port.egr_bytes.flood'
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/43'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/44'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/45'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/46'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/47'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/48'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/1'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/2'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/3'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/4'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/5'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/6'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/7'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/9'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/8'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/10'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/11'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/12'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/15'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/19'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/33'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/48'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/49'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/50'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/1'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/2'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/3'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/4'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/5'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/6'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/7'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/8'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/9'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/10'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/11'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/12'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/13'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/14'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/15'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/16'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/17'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/18'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/19'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/20'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/21'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/22'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/23'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/24'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/25'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/26'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/27'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/28'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/29'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/30'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/31'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/32'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/33'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/34'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/35'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/36'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/37'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/38'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/39'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/40'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/41'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/42'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/33'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/48'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/49'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/50'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/1'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/2'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/3'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/4'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/15'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/17'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/5'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/6'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/7'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/8'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/9'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/10'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/11'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/12'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/1'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/2'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/1'], hostname=hn201)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/2'], hostname=hn201)
metric_name = 'cisco_aci.fabric.port.egr_drop_pkts.buffer'
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/43'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/44'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/45'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/46'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/47'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/48'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/1'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/2'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/3'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/4'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/5'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/6'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/7'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/9'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/8'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/10'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/11'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/12'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/15'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/19'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/33'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/48'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/49'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/50'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/1'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/2'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/3'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/4'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/5'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/6'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/7'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/8'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/9'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/10'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/11'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/12'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/13'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/14'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/15'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/16'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/17'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/18'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/19'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/20'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/21'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/22'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/23'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/24'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/25'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/26'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/27'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/28'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/29'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/30'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/31'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/32'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/33'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/34'], | |
#-*- coding: utf-8 -*-
#!/usr/bin/env python
from telegram.ext import Updater
from telegram.ext import CommandHandler
from telegram.ext import MessageHandler, Filters
import telegram
import sys
import logging
import random
from string import maketrans
import time
from ip import facecount
root = logging.getLogger()
root.setLevel(logging.INFO)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = \
logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
logger = logging.getLogger(__name__)
class Oguz:
def __init__(self):
self.replies = []
self.default = ["","mutluluk parayla ölçülmüyor","Nerden buluyonuz boyle haberleri arkadas","Kimle konusuyorsunuz necdet bey", "Bi dalga gecmeyin ya","Nyucelsiz abnin ne keyfi olcak be","Neden hep ben suclu oluyorum","Belgesel felan izleyin sayin yucel", "Buyuk ikramiye cikmamis bize :(","cok iyi ya", "vay arkadas",
"sorun diil ya", "dolar artmış", "kral dairesi kaana", "Her lafi kopyalayinca aynisi olunmuyor", "Serdar kaça kadar izin var", "Şifayı bir sahil bendedi olarak tanımlayabiliriz"]
self.kripton = ["@nyucel"]
self.gezgin = [u"gizlice geziyormuş", u"in yeri ayrı ya"]
self.evli = ["@someone"]
self.bekar = ["@otherone"]
def sendMsg(self, bot, update, msg):
alt_msg = random.choice(self.default)
gezgin = random.choice(self.gezgin)
if msg and msg not in self.replies[-5:]:
bot.sendMessage(update.message.chat_id, text=msg,
parse_mode=telegram.ParseMode.MARKDOWN)
self.replies.append(msg)
elif "alt_msg" not in self.replies[-4:]:
bot.sendMessage(update.message.chat_id, text=alt_msg,
parse_mode=telegram.ParseMode.MARKDOWN)
self.replies.append("alt_msg")
elif "gezgin" not in self.replies[-20:]:
bot.sendMessage(update.message.chat_id, text=random.choice(
self.kripton) + " " + gezgin, parse_mode=telegram.ParseMode.MARKDOWN)
self.replies.append("gezgin")
elif "seriousBusiness" not in self.replies[-20:]:
who = random.choice(
eval("self." + random.choice(["evli", "bekar"])))
if who in self.evli:
bot.sendMessage(update.message.chat_id, text="cocuk ne zaman? " +
who, parse_mode=telegram.ParseMode.MARKDOWN)
else:
bot.sendMessage(update.message.chat_id, text="evlilik ne zaman? " +
who, parse_mode=telegram.ParseMode.MARKDOWN)
self.replies.append("seriousBusiness")
o = Oguz()
def readable(sentence):
cevir = {u'Ç': 'C', u'Ğ': 'G', u'İ': 'I', u'Ö': 'O', u'Ş': 'S', u'Ü': 'U',
u'ç': 'c', u'ğ': 'g', u'ı': 'i', u'ö': 'o', u'ş': 's', u'ü': 'u'}
for i, k in cevir.iteritems():
sentence = sentence.replace(i, k)
return sentence
def grammarNazi(word):
typo = {
'hersey': u'her şey',
'yada': u'ya da',
'hicbirsey': u'hiçbir şey',
'rasgele': u'rastgele',
'sabahdan': u'sabahtan',
'bugunlerde': u'bu günlerde',
'pekcok': u'pek çok',
}
fix = lambda word, typo: "%s olacak ama" % (
typo[word]) if any(k in word for k in typo) else False
return fix(word, typo)
def start(bot, update):
bot.sendMessage(update.message.chat_id, text='Selam genc')
def help(bot, update):
bot.sendMessage(update.message.chat_id, text='Noldu genc')
def konus(bot, update):
bot.sendMessage(-583652, text=update.message.text.replace("/konusx ", ""))
def echo(bot, update):
global o
r = readable(update.message.text).lower()
for w in r.split():
if grammarNazi(w):
o.sendMsg(bot, update, grammarNazi(w))
if update.message.sticker:
texts = [u"Grubun admini yok munya",u"Gercek mi bu",u"Nyucele lazim hadi bot muhabbet etmek iatiyor receple", u"bir sitikir insanı olarak nyucel",u"bu emojileri nerden şaapıyoruz",u"BİZİM ÇOK VAKTİMİZ OLMADIĞINDAN BU TÜR EMOJİLERİ ARATIP KULLANAMIYORUZ DA SAYIN YÜCEL"]
o.sendMsg(bot, update, random.choice(texts))
if update.message.chat_id == "169359665":
bot.sendMessage(update.message.chat_id, text='Hmm, upgrades?')
elif "?" == r[-1]:
texts = [u"sayın yücel bi basın açıklaması yapmıcak mı bu konuda, halkı aydınlatmıcak mı",u"ben mi cevap vereyim recep mi versin?", u"Hangi dilde bu kaan"]
o.sendMsg(bot, update, random.choice(texts))
elif "pebble" in r:
o.sendMsg(bot, update, u"benim bi pebble vardı")
elif "mesaj" in r:
o.sendMsg(bot, update, u"1 mesaj artı n yıllık tecrübe o")
elif "off" in r:
o.sendMsg(bot, update, u"off derken sanki ben kurdum cümleyi ya")
elif ("grup" in r) or ("grub" in r):
o.sendMsg(bot, update, u"kaanla iki kişilik özel grup kurmuşunuz diyorlar")
o.sendMsg(bot, update, u"2 kişi grup olmadığından recebi de eklemişsiniz kalabalık olsun die")
elif "basamak" in r:
o.sendMsg(bot, update, u"Kim napcak o kadar buyuk sayiyi")
elif ("apple" in r) or ("iphone" in r) or ("android" in r) or ("macbook" in r) or ("ayfon" in r):
o.sendMsg(bot, update, u"apple fan boy diilim bi kere")
o.sendMsg(bot, update, u"şirket verdi diyorum")
o.sendMsg(bot, update, u"diğer teli servise yollucam")
elif "problem" in r:
o.sendMsg(bot, update, u"sorsan aynstana da öle demişlerdir kesin")
elif "motor" in r:
texts=[u"Tvlerde reklami var", u"Motor dedigi araba kadar ama"]
o.sendMsg(bot, update, random.choice(texts))
elif "aciliyor" in r:
o.sendMsg(bot, update, u"Herkes ayni anda yuklenmesin ya")
elif "alir misin" in r:
o.sendMsg(bot, update, u"şirket veriyosa bakarız")
elif ("akademik" in r) or ("abye" in r) or ("ab" in r.split()):
o.sendMsg(bot, update, u"Herkes ayni anda yuklenmesin ya")
elif ("tombik" in r) or ("kilo" in r) or ("sisman" in r.split()):
o.sendMsg(bot, update, u"ben hafif hissediyorum kaana göre")
elif ("storage" in r) or ("disk" in r.split()) or ("gb" in r.split()):
o.sendMsg(bot, update, u"aynen")
o.sendMsg(bot, update, u"storage işinde para var")
elif ("olimpiyat" in r) or ("mac" in r.split()) or ("kazan" in r):
o.sendMsg(bot, update, u"öğlen arası eurosporta denk geldim")
o.sendMsg(bot, update, u"anlık enstantaneler (bu lafı sırf ingilizcem gelişsin die kullandım :p) gösteriyorlar, baya keyifli şeyler varmış ya")
elif ("ediyorum" in r.split()) or ("yaptim" in r) or ("bence" in r):
o.sendMsg(bot, update, u"nyucelim meşhur bi lafını söylim sana")
o.sendMsg(bot, update, u"tebriks")
elif update.message.from_user["id"] == 169359665:
texts = [u"Bot olan kim oguz mu felan karisiyo", u"aynı şey değil ama düşünürseniz kaan bey", u"Algi yonetimi de deniyor", u"Fotodan ayriliyor. Nyucel fotolu olan insan",u"Recebi de dovcem",u"Ona dur diyon bi sefer de recep cikiyor",u"Recebi sessize alsak",u"recebin akıllısı lazım bence",u"Sorumlusu kimse ciksin",u"bak misal recebin eksik olan kısmı yaratıcılığı",u"şu recebe gıcık olmaya başladım","tam oturtamamisin kaan", "bu olmadi bak", "oo recep geri donmus", u"ya bu deneme işlerini burada ı yapıyoruz sayın yücel?", "recep ne zaman tetikleniyo",
"fotoy abakıyorsun yakışıklı olan gerçek olan ben", "baska bot ekleyebiliyoz mu", "kanaat önderi misiniz"]
if random.randint(1, 10) < 5:
o.sendMsg(bot, update, random.choice(texts))
elif random.randint(1, 2) == 2:
bot.sendMessage(update.message.chat_id, text=".")
bot.sendMessage(update.message.chat_id, text="ee")
else:
pass
elif "oguz" in r.split() and (("nerde" in r) or ("gelmiyo" in r) or ("gelecek" in r) or ("gel" in r.split()) or ("gelir" in r.split())):
o.sendMsg(bot, update, u"nası gelim cocuk var")
elif ("kaan" in r.split() or ("kaaan" in r) or ("kaaaan" in r)):
texts = [u"işte bi gün birilerinin kaan diceni biliyodum",u"staj vesayet başkanlığı ya kaaan", u"Sen daha iyi bilirsin bu siyasi isleri gerci sayin ozdincer", u"Kaaanla ayni ortamda olmak ikinci planda olmak demek ama neyse",u"kaaan için dert diil bunlar nasılsa hocaylan sık görüşüp kapatır açığı", u"kaanmışcasına ilgi görmek",
u"bir kaan gibi özlenmek", u"bir kaan gibi seslenilmek", u"Kaan lutfen", u"Olcez gitcez su dunyadan kaan kadar kiymetimiz olmicak", ]
o.sendMsg(bot, update, random.choice(texts))
elif ("+1" in r) or ("bence de" in r) or ("bencede" in r.split()):
texts = [u"Arada kafamiz dagilsin flean da diyor olabilirsiniz tabi. Recep.yazsin eglenek gulek", u"Bu bilginin kaynagi var mi",u"hep bole bi bozaci siraci durumlari", u"Bak misal burada bozaci siraci lafi varmis gibi yapabiliriz"]
o.sendMsg(bot, update, random.choice(texts))
elif ("akiyoruz" in r.split()) or ("geziyoz" in r.split()) or ("gezdik" in r.split()) or ("geziyoruz" in r.split()):
o.sendMsg(bot, update, u"bensiz çok mutlusunuz zaten")
elif ("nice yillara" in r) or ("mutlu yillar" in r) or ("iyi ki dogdun" in r):
texts = [u"mutlu yıllar", u"pasta masta bişi var mı"]
o.sendMsg(bot, update, random.choice(texts))
elif ("kickstarter" in r) or ("oyuncak" in r):
texts = [u"benden 20 lira calisir", u"remix mini vardı"]
o.sendMsg(bot, update, random.choice(texts))
elif ("gidiyoruz" in r) or ("gidelim" in r) or ("gezer" in r):
o.sendMsg(bot, update, u"vaay gezin tabi ya")
elif ("basvuru" in r) or ("basvurmak" in r) or ("konferans" in r) or ("konusma" in r.split()):
o.sendMsg(bot, update, u"ben başvurmadım diyorum ya")
elif ("yemek" in r.split()) or ("yiyoz" in r.split()) or ("iskender" in r.split()) or ("lahmacun" in r) or ("yesek" in r) or ("yemeksepeti" in r):
texts = [u"ekmek kemiriyoz",
u"ekmek arası yiyoz", u"işci şeyleri yiyoz"]
o.sendMsg(bot, update, random.choice(texts))
elif ("bulusma" in r) or ("toplandik" in r):
o.sendMsg(bot, update, u"gizlice")
elif ("gizli" in r):
o.sendMsg(bot, update, u"çok iyi yaa")
elif ("whatsapp" in r) or ("telegram" in r):
texts = [u"Telegram yerine signale mi gecseydik"]
o.sendMsg(bot, update, random.choice(texts))
o.sendMsg(bot,update,u'Snowden de signali oneriyormus')
elif ("oneri" in r) or ("nereye" in r):
texts = [u"önerim arada eve uğrayın"]
o.sendMsg(bot, update, random.choice(texts))
o.sendMsg(bot,update,u'haa gezilcek yer')
o.sendMsg(bot,update,u'ben küçük yerleri seviyorum')
o.sendMsg(bot,update,u'bi de koşturmaca gezmektense oturayım bi yerde modundayım sana çok uymayan bi durum bu')
elif ("oguz" in r.split()) and (("selam" in r) or ("naber") in r):
o.sendMsg(bot, update, u"selam genç")
elif ("oguz" in r.split()) and (("naber") in r):
o.sendMsg(bot, update, u"iyidir genç")
elif ("oguz" in r.split()) and (("rocks") in r):
o.sendMsg(bot, update, u"sağol genç")
elif (("bot" in r.split()) or ("botu" in r.split()) or ("bota" in r.split()) or ("botla" in r.split()) or ("recep" in r) or ("receb" in r)):
texts = [u"komplo var bence",u"grup yöneticisi olduğundandır kaaan",u"Recep insan olsa eminim mutlu olurdu",u"Kendin yaziyon ne diceni sonra ay yuzeyselsin",u"Recep dedigin yuzeysel.olur",u"taklitlerimden sakininiz", u"botu | |
self.UserInput[-1].SetRect((pad + 165 + 15, offset, 100, 22))
self.UserInput[-1].Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
offset += 30
idx += 1
offset += 10
width, height = self.parent.GetClientSize()
self.DefaultButton = Button(self, label=u'Default')
self.DefaultButton.SetRect((15, height - 24 - 15, 75, 24))
self.DefaultButton.Bind(wx.EVT_BUTTON, self.OnDefaultButton)
def OnSize(self, event):
width, height = self.parent.parent.GetClientSize()
self.SetSize((width - 16, height - 78))
def OnClose(self, event):
keymap_preset = list()
default_keymap = self.GetDefaultKeymap()
for i in range(len(self.UserInput)):
namespace = default_keymap[i][0]
string = self.UserInput[i].GetValue()
baseflag = self.String2BaseRawKeyFlag(self.UserInput[i].GetValue())
if u'Ctrl + ' in string:
ctrl = True
else:
ctrl = False
if u'Shift + ' in string:
shift = True
else:
shift = False
keymap_preset += [(namespace, string, baseflag, ctrl, shift)]
SetPreference('keymap_preset', keymap_preset)
self.parent.parent.parent.SetKeymapPreset(keymap_preset)
self.Destroy()
def OnRemoveValue(self, event):
event.Skip()
idx = event.GetId() - 1
self.UserInput[idx].SetValue('')
def OnDefaultButton(self, event):
default_keymap = self.GetDefaultKeymap()
for i in range(len(self.UserInput)):
value = default_keymap[i][1]
self.UserInput[i].SetValue(value)
def OnKeyDown(self, event):
idx = event.GetId() - 1
# keymap = self.GetKeymap()
keyflag = event.GetRawKeyFlags()
value = self.RawKeyFlag2String(keyflag)
if value is None:
return
shift = event.ShiftDown()
ctrl = event.CmdDown() or event.ControlDown()
if ctrl:
value = ' + '.join(('Ctrl', value))
if shift:
value = ' + '.join(('Shift', value))
value = value.replace(' ', ' ')
if value in ('Ctrl + A'):
return
self.UserInput[idx].SetValue(value)
for i in range(len(self.UserInput)):
if i == idx:
continue
if value == self.UserInput[i].GetValue():
self.UserInput[i].SetValue('')
class GauranteeAlwaysOnTopThread(threading.Thread):
def __init__(self, parent):
threading.Thread.__init__(self)
self.parent = parent
self.start()
def run(self):
time.sleep(1)
if GetPreference('always_on_top'):
self.parent.SetAlwaysOnTopOff()
time.sleep(0.1)
self.parent.SetAlwaysOnTopOn()
class MacroBoxPreference():
def __init__(self):
self.listtab_show = False
self.playbox_only = False
self.playbox_top_show = False
self.playbox_side_show = False
self.playbox_title_format = None
self.playbox_title_format = None
color_scheme = GetPreference('color_scheme')
if color_scheme is None:
color_scheme = 'Dark Red'
self.st = macroboxstyle.load(color_scheme, rgbstyle='dec')
def SetAlwaysOnTopOn(self):
style = self.GetWindowStyle()
self.SetWindowStyle(style ^ wx.STAY_ON_TOP)
self.SetWindowStyle(style | wx.STAY_ON_TOP)
SetPreference('always_on_top', True)
def SetAlwaysOnTopOff(self):
style = self.GetWindowStyle()
self.SetWindowStyle(style ^ wx.STAY_ON_TOP)
SetPreference('always_on_top', False)
def GauranteeAlwaysOnTop(self):
GauranteeAlwaysOnTopThread(self)
def SetPlayerSideShowOn(self):
self.playbox_side_show = True
self.MainPanel.PlayBox.OnSize()
def SetPlayerSideShowOff(self):
self.playbox_side_show = False
self.MainPanel.PlayBox.OnSize()
def IsPlayerSideShowOn(self):
return self.playbox_side_show
def SetListTabShowOn(self):
self.listtab_show = True
# self.OnSize()
self.MainPanel.OnSize()
def SetListTabShowOff(self):
self.listtab_show = False
# self.OnSize()
self.MainPanel.OnSize()
def IsListTabShowOn(self):
return self.listtab_show
def SetPlayerTopShowOn(self):
self.playbox_top_show = True
if self.IsPlayerOnlyModeOn():
height = 242
w, _ = self.GetSize()
self.SetMaxSize((-1, height))
self.SetMinSize((550, height))
self.SetSize((w, height))
self.OnSize()
def SetPlayerTopShowOff(self):
self.playbox_top_show = False
if self.IsPlayerOnlyModeOn():
height = 186
w, _ = self.GetSize()
self.SetMinSize((550, height))
self.SetMaxSize((-1, height)) + 10
self.SetSize((w, height))
self.OnSize()
def IsPlayerTopShowOn(self):
return self.playbox_top_show
def SetPlayerOnlyModeOn(self):
self.playbox_only = True
style = self.GetWindowStyle()
self.SetWindowStyle(style ^ wx.SYSTEM_MENU ^
wx.MINIMIZE_BOX ^ wx.MAXIMIZE_BOX ^ wx.CLOSE_BOX ^ wx.CAPTION)
w, h = self.GetSize()
self.last_height = h
if self.IsPlayerTopShowOn():
height = 242
else:
height = 186
self.SetMinSize((550, height))
self.SetMaxSize((-1, height))
self.SetSize((w, height))
self.OnSize()
def SetPlayerOnlyModeOff(self):
self.playbox_only = False
style = self.GetWindowStyle()
self.SetWindowStyle(style ^ wx.SYSTEM_MENU ^ wx.MINIMIZE_BOX ^
wx.MAXIMIZE_BOX ^ wx.CLOSE_BOX ^ wx.CAPTION)
self.SetWindowStyle(style | wx.SYSTEM_MENU | wx.MINIMIZE_BOX |
wx.MAXIMIZE_BOX | wx.CLOSE_BOX | wx.CAPTION)
self.MenuBar.Show()
self.SetMaxSize((-1, -1))
self.SetMinSize((550, 450))
w, _ = self.GetSize()
self.SetSize((w, self.last_height))
self.OnSize()
def IsPlayerOnlyModeOn(self):
return self.playbox_only
def SetMainFrameIcon(self):
icon_path = os.path.join('assets', 'icon', 'icon.ico')
icon = wx.Icon()
import sys
if hasattr(sys, '_MEIPASS'):
icon_path = os.path.join(sys._MEIPASS, icon_path)
else:
cwd = os.path.dirname(get_current_real_cwq())
icon_path = os.path.join(cwd, icon_path)
# print(self.icon_path)
icon.CopyFromBitmap(wx.Bitmap(icon_path, wx.BITMAP_TYPE_ANY))
self.SetIcon(icon)
# self.TaskBarIcon = wx.adv.TaskBarIcon()
# self.TaskBarIcon.SetIcon(icon)
# path = os.path.join('packages', 'icon-macrobox.ico')
# icon = wx.Icon(path, wx.BITMAP_TYPE_ICO)
# icon = images.macrobox_icon64.GetIcon()
# self.SetIcon(icon)
def RestoreMainFrameRect(self):
rect = GetPreference('rect')
if rect is None:
SetPreference('rect', wx.Rect(0, 0, 800, 600))
else:
self.SetRect(rect)
def SavePreferences(self):
procpath = self.MainPanel.MFEATS.GetProcPath()
taskpath = self.MainPanel.MFEATS.GetTaskPath()
self.MainPanel.Event.StopNotify()
self.MainPanel.MFEATS.StopNotify()
playcue = self.MainPanel.PlayBox.cue
self.MainPanel.PlayBox.OnStop()
self.MainPanel.PlayBox.AudioControl.Quit()
mfeats_scheduler = Struct(
taskpath=taskpath, procpath=procpath,
procs_limit=self.MainPanel.MFEATS.procs_limit,
auto_analyzer_on=self.MainPanel.MFEATS.auto_analyzer_on)
self.MainPanel.ListBox.SetFilterOffAll()
playbox_show = Struct(top=self.IsPlayerTopShowOn(), side=self.IsPlayerSideShowOn())
listtab_show = self.IsListTabShowOn()
SetPreferences(((('rect', self.GetRect()), ('playcue', playcue),
('highlight_duration_type', self.MainPanel.PlayBox.GetHighlightDurationTypeId()),
('playbox_information', self.MainPanel.PlayBox.Info.information),
('query_columnKey', self.MainPanel.ListSearch.query_columnKey),
('listbox_fontinfo', self.MainPanel.ListBox.GetFontInfo()),
('selectedlist', self.MainPanel.ListBox.selectedList),
('innerlist', self.MainPanel.ListBox.innerList),
('mfeats_scheduler', mfeats_scheduler),
('listtab_show', listtab_show),
('playbox_show', playbox_show))))
def LoadPreferences(self):
query_columnKey = GetPreference('query_columnKey')
if query_columnKey is not None:
self.MainPanel.ListSearch.query_columnKey = query_columnKey
playbox_show = GetPreference('playbox_show')
if playbox_show is None:
self.SetPlayerTopShowOn()
self.MenuBar.itemPlayerTopShow.Check()
else:
if playbox_show.top:
self.SetPlayerTopShowOn()
self.MenuBar.itemPlayerTopShow.Check()
else:
self.SetPlayerTopShowOff()
if playbox_show.side:
self.SetPlayerSideShowOn()
self.MenuBar.itemPlayerSideShow.Check()
else:
self.SetPlayerSideShowOff()
listtab_show = GetPreference('listtab_show')
if listtab_show is None:
self.SetListTabShowOn()
self.MenuBar.itemListTabShow.Check()
else:
if listtab_show:
self.SetListTabShowOn()
self.MenuBar.itemListTabShow.Check()
else:
self.SetListTabShowOff()
if GetPreference('always_on_top'):
self.MenuBar.itemAlwaysOnTop.Check()
self.SetAlwaysOnTopOn()
innerList = GetPreference('innerlist')
if innerList is not None:
self.MainPanel.ListBox.innerList = innerList
selectedList = GetPreference('selectedlist')
if selectedList is not None:
self.MainPanel.ListBox.selectedList = selectedList
# self.MainPanel.ListBox.CheckItemsConsistencyAll()
mfeats = GetPreference('mfeats_scheduler')
if mfeats is not None:
# print mfeats.procs_limit
self.MainPanel.MFEATS.procs_limit = mfeats.procs_limit
self.MainPanel.MFEATS.auto_analyzer_on = mfeats.auto_analyzer_on
# self.MainPanel.MFEATS.taskpath = mfeats.taskpath
self.LimitCoreNumber(mfeats.procs_limit)
if mfeats.auto_analyzer_on:
self.MainPanel.MFEATS.AutoAnalyzer()
self.MenuBar.itemAutoAnalyze.Check()
for path in mfeats.procpath:
self.MainPanel.ListBox.CheckItemConsistencyByPathAll(path)
else:
self.MainPanel.MFEATS.auto_analyzer_on = True
self.MenuBar.itemAutoAnalyze.Check()
highlight_duration_type = GetPreference('highlight_duration_type')
if highlight_duration_type is None:
highlight_duration_type = 2
for i in range(len(self.MenuBar.highlightDurationItems)):
if i == highlight_duration_type:
self.MenuBar.itemHighlightDurationMenu.MenuItems[i].Check()
else:
self.MenuBar.itemHighlightDurationMenu.MenuItems[i].Check(False)
tutotial_show = GetPreference('tutotial_show')
if tutotial_show is None:
SetPreference('tutotial_show', True)
self.MainPanel.OnSize()
self.MainPanel.ListBox.SetListUnLockAll()
auto_check_update = GetPreference('auto_check_update')
if auto_check_update is None or auto_check_update:
self.OnAutoCheckUpdate(None)
# if GetPreference('tutotial_show'):
# self.OnTutorial(None)
self.PlayBoxTitleFormat = PlayBoxTitleFormat(self)
self.WebLinkPreset = WebLinkPreset()
class PlayBoxTitleFormat():
def __init__(self, parent):
self.parent = parent
self.choices = ['', 'Filename', 'Album', 'Artist', 'Title']
self.preset = GetPreference('playbox_title_format')
if self.preset is None:
self.preset = [u'Filename', u'Artist', u'Title']
def SetPreset(self, preset):
self.preset = preset
SetPreference('playbox_title_format', preset)
self.parent.MainPanel.PlayBox.Title.reInitBuffer = True
def SetPresetByIdx(self, idx, value):
self.preset[idx] = value
SetPreference('playbox_title_format', self.preset)
self.parent.MainPanel.PlayBox.Title.reInitBuffer = True
def GetChoices(self):
return self.choices
def GetPreset(self):
return self.preset
class AppearancePanel(wx.Panel):
def __init__(self, parent, pos=(0, 0)):
wx.Panel.__init__(self, parent, wx.ID_ANY,
style=wx.CLIP_CHILDREN | wx.NO_FULL_REPAINT_ON_RESIZE | wx.TAB_TRAVERSAL)
self.parent = parent
self.SetDoubleBuffered(True)
self.SetBackgroundColour((255, 255, 255))
# st = self.parent.parent.parent.MainPanel.ListBox.st
font = wx.Font(0, wx.MODERN, wx.NORMAL, wx.FONTWEIGHT_LIGHT)
font.SetPixelSize((6, 50))
font.SetFaceName(FONT_ITEM)
offset = 20
pad = 0
label = u'Player Title Format'
text = StaticText(self, label=label, style=wx.ALIGN_RIGHT)
text.SetRect((20, offset + 3, 180, -1))
preset = self.parent.parent.parent.PlayBoxTitleFormat.GetPreset()
choices = self.parent.parent.parent.PlayBoxTitleFormat.GetChoices()
self.PlayerTitleFormat = list()
self.PlayerTitleFormat += [ComboBox(
self, id=1, choices=choices, style=wx.CB_READONLY)]
self.PlayerTitleFormat[-1].SetValue(preset[0])
self.PlayerTitleFormat[-1].SetRect((200 + 15 + 1 + 85 * 0, offset, 80, 24))
self.PlayerTitleFormat[-1].Bind(wx.EVT_COMBOBOX, self.OnPlayerTitleFormat)
self.PlayerTitleFormat += [ComboBox(
self, id=2, choices=choices, style=wx.CB_READONLY)]
self.PlayerTitleFormat[-1].SetValue(preset[1])
self.PlayerTitleFormat[-1].SetRect((200 + 15 + 1 + 85 * 1, offset, 80, 24))
self.PlayerTitleFormat[-1].Bind(wx.EVT_COMBOBOX, self.OnPlayerTitleFormat)
self.PlayerTitleFormat += [ComboBox(
self, id=3, choices=choices, style=wx.CB_READONLY)]
self.PlayerTitleFormat[-1].SetRect((200 + 15 + 1 + 85 * 2, offset, 80, 24))
self.PlayerTitleFormat[-1].SetValue(preset[2])
self.PlayerTitleFormat[-1].Bind(wx.EVT_COMBOBOX, self.OnPlayerTitleFormat)
offset += 10
offset += 30
label = u'Color Scheme'
text = StaticText(self, label=label, style=wx.ALIGN_RIGHT)
text.SetRect((pad + 20, offset + 3, 180, -1))
macroboxstyle.STYLE_NAMES
choices = macroboxstyle.STYLE_NAMES
self.ColorScheme = ComboBox(self, choices=choices, style=wx.CB_READONLY)
self.ColorScheme.SetRect((pad + 200 + 15 + 1, offset, 165, 24))
self.ColorScheme.Bind(wx.EVT_COMBOBOX, self.OnColorScheme)
color_scheme = GetPreference('color_scheme')
if color_scheme is None:
color_scheme = 'Dark Red'
self.ColorScheme.SetValue(color_scheme)
offset += 30
label = u'Contrast'
text = StaticText(self, label=label, style=wx.ALIGN_RIGHT)
text.SetRect((pad + 20, offset + 3, 180, -1))
self.TracklistLineContrast = SpinCtrl(self, value='0')
self.TracklistLineContrast.SetRect((pad + 200 + 15 + 1, offset + 1, 60, 22))
self.TracklistLineContrast.SetRange(-10, 10)
self.TracklistLineContrast.Bind(wx.EVT_SPINCTRL, self.OnTracklistLineContrast)
value = self.parent.parent.parent.MainPanel.ListBox.line_contrast
self.TracklistLineContrast.SetValue(value)
offset += 30
label = u'Font'
text = StaticText(self, label=label, style=wx.ALIGN_RIGHT)
text.SetRect((pad + 20, offset + 3, 180, -1))
self.TracklistFont = wx.FontPickerCtrl(
self, style=wx.FNTP_FONTDESC_AS_LABEL | wx.ALIGN_LEFT)
self.TracklistFont.SetRect((pad + 200 + 15, offset, 165 + 2, 24))
self.Bind(wx.EVT_FONTPICKER_CHANGED, self.OnTracklistFont, self.TracklistFont)
offset += 30
label = u'Line Space'
text = StaticText(self, label=label, style=wx.ALIGN_RIGHT)
text.SetRect((pad + 20, offset + 3, 180, -1))
self.TracklistLineSpace = SpinCtrl(self)
self.TracklistLineSpace.SetRect((pad + 200 + 15 + 1, offset + 1, 60, 22))
self.TracklistLineSpace.SetRange(20, 30)
self.TracklistLineSpace.Bind(wx.EVT_SPINCTRL, self.OnTracklistLineSpace)
value = self.parent.parent.parent.MainPanel.ListBox.line_space
self.TracklistLineSpace.SetValue(value)
offset += 30
label = u'Scrollbar Size'
text = StaticText(self, label=label, style=wx.ALIGN_RIGHT)
text.SetRect((pad + 20, offset + 3, 180, -1))
self.ScrollbarSize = SpinCtrl(self)
self.ScrollbarSize.SetRect((pad + 200 + 15 + 1, offset + 1, 60, 22))
self.ScrollbarSize.SetRange(2, 10)
self.ScrollbarSize.Bind(wx.EVT_SPINCTRL, self.OnScrollbarSize)
value = self.parent.parent.parent.MainPanel.ListBox.scrollbar_size
self.ScrollbarSize.SetValue(value)
# offset += 30
# label = u'Always show scrollbar'
# text = StaticText(self, label=label, style=wx.ALIGN_RIGHT)
# text.SetRect((pad+20, offset+3, 180, -1))
# self.AlwaysShowScrollbar = CheckBox(self)
# self.AlwaysShowScrollbar.SetPosition((pad+200+15, offset+4))
# self.AlwaysShowScrollbar.Bind(wx.EVT_CHECKBOX, self.OnAlwaysShowScrollbar)
# value = self.parent.parent.parent.MainPanel.ListBox.always_show_slider
# # value = self.parent.parent.parent.MainPanel.ListBox.st.SCROLLBAR_ALWAYS
# self.AlwaysShowScrollbar.SetValue(value)
self.SetCurrentValues()
self.Bind(wx.EVT_SIZE, self.OnSize)
self.OnSize(None)
def OnScrollbarSize(self, event):
value = event.GetInt()
self.parent.parent.parent.MainPanel.ListBox.scrollbar_size = value
self.parent.parent.parent.MainPanel.ListBox.reInitBuffer = True
self.ApplyToScriptEditorBox()
def OnAlwaysShowScrollbar(self, event):
self.parent.parent.parent.MainPanel\
.ListBox.always_show_slider = event.IsChecked()
self.ApplyToScriptEditorBox()
def SetCurrentValues(self):
font = self.parent.parent.parent.MainPanel.ListBox.font
self.TracklistFont.SetSelectedFont(font)
def OnColorScheme(self, event):
name = event.GetString()
import macroboxstyle
style = macroboxstyle.load(name, rgbstyle='dec')
self.parent.parent.parent.st = style
self.ApplyToMainFrame()
self.ApplyToScriptEditorBox()
self.SetCurrentValues()
def ApplyToScriptEditorBox(self):
parent = self.parent.parent.parent
if hasattr(parent, 'ScriptEditorBox') is False:
return
if hasattr(parent.ScriptEditorBox, 'EditorPanel') is False:
return
parent.ScriptEditorBox.OnSize(None)
parent.ScriptEditorBox.EditorPanel.OnSize(None)
parent.ScriptEditorBox.PreviewPanel.OnSize(None)
parent.ScriptEditorBox.EditorPanel.OnColor(None)
parent.ScriptEditorBox.EditorPanel.TextCtrl.SetUpEditor()
parent.ScriptEditorBox.EditorPanel.TextCtrl.OnUpdateUI(None)
parent.ScriptEditorBox.PreviewPanel.OnColor(None)
parent.ScriptEditorBox.PreviewPanel.TextCtrl.SetUpPreview()
parent.ScriptEditorBox.PreviewPanel.TextCtrl.OnUpdateUI(None)
def ApplyToMainFrame(self):
| |
import matplotlib.pyplot as plt
import numpy as np
import pytest
from matplotlib.backends.backend_pdf import PdfPages
import landlab
from landlab.plot.imshow import _guess_location_from_name, _guess_location_from_size
@pytest.mark.slow
def test_imshow_grid():
rmg = landlab.RasterModelGrid((4, 5))
pp = PdfPages("test.pdf")
values = np.arange(rmg.number_of_nodes)
landlab.plot.imshow_grid(rmg, values, at="node", limits=(0, 20))
pp.savefig()
plt.clf()
rmg.status_at_node[7] = rmg.BC_NODE_IS_CLOSED
values = np.arange(rmg.number_of_cells)
landlab.plot.imshow_grid(rmg, values, at="cell", symmetric_cbar=True)
pp.savefig()
pp.close()
def test_imshow_grid_input():
rmg = landlab.RasterModelGrid((4, 5))
values = np.arange(rmg.number_of_nodes - 1)
with pytest.raises(ValueError):
_ = landlab.plot.imshow_grid(rmg, values, at="node", limits=(0, 20))
def test_imshowhs_grid_input():
rmg = landlab.RasterModelGrid((4, 5))
values = np.arange(rmg.number_of_nodes - 1)
with pytest.raises(ValueError):
_ = landlab.plot.imshowhs_grid(rmg, values, at="node", limits=(0, 20))
def test_imshowhs_grid_input_Layer1():
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
values1 = np.arange(mg.number_of_nodes - 1)
with pytest.raises(ValueError):
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
drape1=values1,
plot_type="Drape1",
var_name="Soil",
var_units=r"m",
grid_units=("m", "m"),
cmap="terrain",
ticks_km=False,
limits=(0, 2),
)
def test_imshowhs_grid_input_Layer2():
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
values1 = np.arange(mg.number_of_nodes)
values2 = np.arange(mg.number_of_nodes - 1)
with pytest.raises(ValueError):
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
drape1=values1,
drape2=values2,
plot_type="Drape2",
var_name="Soil",
var_units=r"m",
grid_units=("m", "m"),
cmap="terrain",
ticks_km=False,
limits=(0, 2),
)
def test_imshowhs_grid_1():
"""
Show DEM draped over the shaded topographic relief
"""
# %%
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
var_name="Topo",
var_units=r"m",
grid_units=("m", "m"),
cmap="terrain",
ticks_km=False,
symmetric_cbar=True,
limits=(0, 10),
)
# %%
def test_imshowhs_grid_2():
"""
Show DEM draped over the shaded topographic relief with exaggeration
"""
# %%
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
var_name="Topo",
var_units=r"m",
grid_units=("m", "m"),
vertical_exa=2,
ticks_km=True,
symmetric_cbar=True,
vmin=0,
vmax=10,
)
def test_imshowhs_grid_3():
"""
Show Hillshade
"""
# %%
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
plot_type="Hillshade",
var_name="Topo",
var_units=r"m",
grid_units=("m", "m"),
cmap="terrain",
ticks_km=False,
plt_contour=True,
vmax=10,
vmin=0,
)
def test_imshowhs_grid_4a():
"""
Show Drape1 draped over the shaded topographic relief with exaggeration
"""
# %%
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
_ = mg.add_zeros("soil__depth", at="node")
# Show Soil thickness draped over the shaded topographic relief
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
drape1=mg.at_node["soil__depth"],
plot_type="Drape1",
var_name="Soil",
var_units=r"m",
grid_units=("m", "m"),
cmap="terrain",
ticks_km=False,
limits=(0, 2),
)
def test_imshowhs_grid_4b():
"""
Show Drape1 draped over the shaded topographic relief with exaggeration
"""
# %%
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
_ = mg.add_zeros("soil__depth", at="node")
# Show Soil thickness draped over the shaded topographic relief
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
drape1=mg.at_node["soil__depth"],
plot_type="Drape1",
var_name="Soil",
var_units=r"m",
grid_units=("m", "m"),
cmap="terrain",
ticks_km=False,
vmin=0,
vmax=2,
plt_contour=True,
)
def test_imshowhs_grid_4c():
"""
Show Drape1 draped over the shaded topographic relief with exaggeration
"""
# %%
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
_ = mg.add_zeros("soil__depth", at="node")
# Show Soil thickness draped over the shaded topographic relief
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
drape1=mg.at_node["soil__depth"],
plot_type="Drape1",
var_name="Soil",
var_units=r"m",
grid_units=("m", "m"),
cmap="terrain",
ticks_km=False,
symmetric_cbar=True,
)
# %%
def test_imshowhs_grid_5():
"""
Show Drape1 draped over the shaded topographic relief
"""
# %%
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
_ = mg.add_zeros("soil__depth", at="node")
_ = mg.add_zeros("Layer_1", at="node")
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
drape1=mg.at_node["Layer_1"],
plot_type="Drape1",
var_name="Layer 1",
var_units=r"m",
grid_units=("m", "m"),
cmap="terrain",
ticks_km=False,
limits=(0, 2),
colorbar_label_y=-55,
add_label_bbox=True,
thres_drape1=0.001,
)
def test_imshowhs_grid_6a():
"""
Show Layer 1 and Layer 2 over the shaded topographic relief
"""
# %%
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
_ = mg.add_zeros("soil__depth", at="node")
L1 = mg.add_zeros("Layer_1", at="node")
L2 = mg.add_zeros("Layer_2", at="node")
L1[:] += 10
L2[:] += 100
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
drape1=mg.at_node["Layer_1"],
drape2=mg.at_node["Layer_2"],
plot_type="Drape2",
var_name="Layer 1",
var_units=r"m",
grid_units=("m", "m"),
cmap="terrain",
ticks_km=False,
limits=(0, 200),
colorbar_label_y=-55,
add_label_bbox=True,
thres_drape1=0.001,
color_for_closed="red",
)
def test_imshowhs_grid_6b():
"""
Show Layer 1 and Layer 2 over the shaded topographic relief, vmax <10
"""
# %%
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
_ = mg.add_zeros("soil__depth", at="node")
L1 = mg.add_zeros("Layer_1", at="node")
L2 = mg.add_zeros("Layer_2", at="node")
L1[:] += 10
L2[:] += 100
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
drape1=mg.at_node["Layer_1"],
drape2=mg.at_node["Layer_2"],
plot_type="Drape2",
var_name="Layer 1",
var_units=r"m",
grid_units=("m", "m"),
cmap="terrain",
ticks_km=False,
colorbar_label_y=-55,
add_label_bbox=True,
thres_drape1=0.001,
color_for_closed="red",
vmin=0,
vmax=9,
)
def test_imshowhs_grid_6c():
"""
Show Layer 1 and Layer 2 over the shaded topographic relief, vmax <100
"""
# %%
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
_ = mg.add_zeros("soil__depth", at="node")
L1 = mg.add_zeros("Layer_1", at="node")
L2 = mg.add_zeros("Layer_2", at="node")
L1[:] += 10
L2[:] += 100
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
drape1=mg.at_node["Layer_1"],
drape2=mg.at_node["Layer_2"],
plot_type="Drape2",
var_name="Layer 1",
var_units=r"m",
grid_units=("m", "m"),
cmap="terrain",
ticks_km=False,
colorbar_label_y=-55,
add_label_bbox=True,
thres_drape1=0.001,
color_for_closed="red",
vmin=0,
vmax=99,
)
def test_imshowhs_grid_6d():
"""
Show Layer 1 and Layer 2 over the shaded topographic relief, vmax <100
"""
# %%
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
_ = mg.add_zeros("soil__depth", at="node")
L1 = mg.add_zeros("Layer_1", at="node")
L2 = mg.add_zeros("Layer_2", at="node")
L1[:] += 10
L2[:] += 100
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
drape1=mg.at_node["Layer_1"],
drape2=mg.at_node["Layer_2"],
plot_type="Drape2",
var_name="Layer 1",
var_units=r"m",
grid_units=("m", "m"),
cmap="terrain",
ticks_km=False,
colorbar_label_y=-55,
add_label_bbox=True,
thres_drape1=0.001,
color_for_closed="red",
vmin=0,
vmax=999,
)
# %%
def test_imshowhs_grid_6e():
"""
Show Layer 1 and Layer 2 over the shaded topographic relief, vmax <100
"""
# %%
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
_ = mg.add_zeros("soil__depth", at="node")
L1 = mg.add_zeros("Layer_1", at="node")
L2 = mg.add_zeros("Layer_2", at="node")
L1[:] += 10
L2[:] += 100
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
drape1=mg.at_node["Layer_1"],
drape2=mg.at_node["Layer_2"],
plot_type="Drape2",
var_name="Layer 1",
var_units=r"m",
grid_units=("m", "m"),
cmap="terrain",
ticks_km=False,
colorbar_label_y=-55,
add_label_bbox=True,
thres_drape1=0.001,
color_for_closed="red",
add_double_colorbar=True,
vmin=0,
vmax=99999,
)
# %%
def test_imshowhs_grid_7():
"""
Show Layer 1 and Layer 2 over the shaded topographic relief
"""
# %%
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
_ = mg.add_zeros("soil__depth", at="node")
_ = mg.add_zeros("Layer_1", at="node")
_ = mg.add_zeros("Layer_2", at="node")
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
drape1="topographic__elevation",
drape2="soil__depth",
plot_type="Drape2",
var_name="Layer 1",
var_units=r"m",
grid_units=("m", "m"),
cmap="terrain",
ticks_km=False,
limits=(0, 2),
colorbar_label_y=-55,
add_label_bbox=True,
thres_drape1=0.001,
color_for_closed="red",
thres_drape2=1,
cmap2=None,
add_double_colorbar=True,
)
# %%
def test_imshowhs_grid_8():
"""
Show Layer 1 and Layer 2 over the shaded topographic relief, vmax >10<100
"""
# %%
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
_ = mg.add_zeros("soil__depth", at="node")
L1 = mg.add_zeros("Layer_1", at="node")
L2 = mg.add_zeros("Layer_2", at="node")
L1[:] += 10
L2[:] += 100
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
drape1="topographic__elevation",
drape2="soil__depth",
plot_type="Drape2",
var_name="Layer 1",
var_units=r"m",
grid_units=("m", "m"),
cmap="terrain",
ticks_km=False,
limits=(0, 2),
colorbar_label_y=-55,
add_label_bbox=True,
thres_drape1=0.001,
color_for_closed="red",
thres_drape2=1,
cmap2=None,
add_double_colorbar=True,
vmin=0,
vmax=99,
)
# %%
def test_imshowhs_grid_9():
"""
Show Layer 1 and Layer 2 over the shaded topographic relief, vmax>100
"""
# %%
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
_ = mg.add_zeros("soil__depth", at="node")
_ = mg.add_zeros("Layer_1", at="node")
_ = mg.add_zeros("Layer_2", at="node")
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
drape1="topographic__elevation",
drape2="soil__depth",
plot_type="Drape2",
var_name="Layer 1",
var_units=r"m",
grid_units=("m", "m"),
cmap="terrain",
ticks_km=False,
limits=(0, 2),
colorbar_label_y=-55,
add_label_bbox=True,
thres_drape1=0.001,
color_for_closed="red",
thres_drape2=1,
cmap2=None,
add_double_colorbar=True,
vmin=0,
vmax=99999,
)
with pytest.raises(ValueError):
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
plot_type="Oops",
)
def test_imshowhs_grid_10():
"""
Show Layer 1 and Layer 2 over the shaded topographic relief
"""
# %%
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
_ = mg.add_zeros("soil__depth", at="node")
_ = mg.add_zeros("Layer_1", at="node")
_ = mg.add_zeros("Layer_2", at="node")
with pytest.raises(ValueError):
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
drape1=mg.at_node["Layer_1"],
plot_type="Drape2",
var_name="Layer 1",
var_units=r"m",
grid_units=("m", "m"),
cmap="terrain",
ticks_km=False,
limits=(0, 2),
colorbar_label_y=-55,
add_label_bbox=True,
thres_drape1=0.001,
)
def test_imshowhs_grid_11():
"""
Show Layer 1 and Layer 2 over the shaded topographic relief
"""
# %%
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
_ = mg.add_zeros("soil__depth", at="node")
_ = mg.add_zeros("Layer_1", at="node")
_ = mg.add_zeros("Layer_2", at="node")
with pytest.raises(ValueError):
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
plot_type="Drape1",
var_name="Layer 1",
var_units=r"m",
grid_units=("m", "m"),
cmap="terrain",
ticks_km=False,
limits=(0, 2),
colorbar_label_y=-55,
add_label_bbox=True,
thres_drape1=0.001,
)
def test_imshowhs_grid_12():
"""
Test imshowhs without units
"""
# %%
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
_ = landlab.plot.imshowhs_grid(mg, "topographic__elevation")
def test_hex_mfd():
"""
Currently no support for hex
"""
# %%
mg = landlab.HexModelGrid((5, 3))
_ = mg.add_field("topographic__elevation", mg.node_x + mg.node_y, at="node")
with pytest.raises(NotImplementedError):
_ = landlab.plot.imshowhs_grid(mg, "topographic__elevation")
# %%
def test_at_cell():
"""
Currently no support for at cell
"""
# %%
mg = landlab.HexModelGrid((5, 3))
_ = mg.add_field("topographic__elevation", np.zeros((7,)), at="cell")
with pytest.raises(NotImplementedError):
_ = landlab.plot.imshowhs_grid(mg, "topographic__elevation", at="cell")
# %%
def test_at_other():
"""
Currently no support for non at node valley locations
"""
# %%
mg = landlab.HexModelGrid((5, 3))
_ = mg.add_field("topographic__elevation", np.zeros((24,)), at="corner")
with pytest.raises(TypeError):
_ = landlab.plot.imshowhs_grid(mg, "topographic__elevation", at="corner")
@pytest.mark.parametrize("at", ["node", "cell"])
def test_imshow_grid_guess_from_name(at):
grid = landlab.RasterModelGrid((3, 4))
grid.add_zeros("z", at=at)
landlab.plot.imshow_grid(grid, "z")
@pytest.mark.parametrize("at", ["node", "cell"])
def test_imshow_grid_guess_from_size(at):
grid = landlab.RasterModelGrid((3, 4))
values = grid.zeros(at=at)
landlab.plot.imshow_grid(grid, values)
def test_imshow_grid_unknown_location():
expected = "unable to determine location of values, use 'at' keyword"
grid = landlab.RasterModelGrid((5, 3))
values = np.empty(grid.number_of_links + 1)
with pytest.raises(TypeError, match=expected):
landlab.plot.imshow_grid(grid, values)
with pytest.raises(TypeError, match=expected):
| |
'''
@author <NAME>, <NAME>, <NAME>, <NAME>
@date 30.03.2015
@brief module for generating & matching particle distributions
'''
import numpy as np
from scipy.constants import e, c
from PyHEADTAIL.general.element import Printing
from PyHEADTAIL.particles.particles import Particles
from PyHEADTAIL.particles.rfbucket_matching import RFBucketMatcher
from PyHEADTAIL.particles.rfbucket_matching import ThermalDistribution
# backwards compatibility:
StationaryExponential = ThermalDistribution
def generate_Gaussian6DTwiss(
macroparticlenumber, intensity, charge, mass,
circumference, gamma,
alpha_x, alpha_y, beta_x, beta_y, beta_z,
epsn_x, epsn_y, epsn_z,
dispersion_x=None, dispersion_y=None,
limit_n_rms_x=None, limit_n_rms_y=None, limit_n_rms_z=None,
):
""" Convenience wrapper generating a 6D Gaussian phase space
distribution of macro-particles with the specified parameters:
Args:
macroparticlenumber: number of macro-particles in the beam
intensity: number of represented beam particles
charge: charge per particle [SI unit Coul]
mass: mass per particle [SI unit kg]
circumference: ring circumference (needed for effective models)
gamma: relativistic Lorentz factor
alpha_[x,y]: Twiss parameter. The corresponding transverse phase
space gets matched to (alpha_[], beta_[])
beta_[x,y]: Twiss parameter. The corresponding transverse phase
space gets matched to (alpha_[], beta_[])
beta_z: corresponding longitudinal Twiss parameter
amounting to |eta| * circumference / (2 * pi * Qs)
epsn_x: horizontal normalised RMS emittance [m.rad]
epsn_y: vertical normalised RMS emittance [m.rad]
epsn_z: longitudinal 90% emittance (4x the RMS emittance) [eV.s]
Optional args:
dispersion_x: horizontal optics dispersion value for matching
dispersion_y: vertical optics dispersion value for matching
limit_n_rms_[x,y]: number of RMS amplitudes to cut distribution
limit_n_rms_z: longitudinal number of RMS amplitudes to cut
distribution (remember that epsn_z is already 4x the RMS
value, i.e. 2 amplitudes)
Return a Particles instance with the phase space matched to the
arguments.
"""
beta = np.sqrt(1. - gamma**-2)
p0 = np.sqrt(gamma**2 - 1) * mass * c
eps_geo_x = epsn_x / (beta * gamma)
eps_geo_y = epsn_y / (beta * gamma)
eps_geo_z = epsn_z * e / (4. * np.pi * p0)
# a bit of a hack: epsn_z is a parameter even though the ParticleGenerator
# does not have such a parameter. This is kept for backwards compatiblity.
# Therefore, some fake eta, Qs parameters are invented s.t.
# beta_z = |eta| * circumference / (2 * pi * Qs)
# holds (circumference is fixed). Does not have any side effects.
Qs = 1. / (2 * np.pi)
eta = beta_z / circumference
distribution_x = gaussian2D(eps_geo_x)
distribution_y = gaussian2D(eps_geo_y)
distribution_z = gaussian2D(eps_geo_z)
# cutting distributions:
if limit_n_rms_x:
distribution_x = cut_distribution(
distribution=distribution_x,
is_accepted=make_is_accepted_within_n_sigma(
eps_geo_x, limit_n_rms_x)
)
if limit_n_rms_y:
distribution_y = cut_distribution(
distribution=distribution_y,
is_accepted=make_is_accepted_within_n_sigma(
eps_geo_y, limit_n_rms_y)
)
if limit_n_rms_z:
distribution_z = cut_distribution(
distribution=distribution_z,
is_accepted=make_is_accepted_within_n_sigma(
eps_geo_z, limit_n_rms_z)
)
return ParticleGenerator(
macroparticlenumber, intensity, charge, mass, circumference, gamma,
distribution_x, alpha_x, beta_x, dispersion_x,
distribution_y, alpha_y, beta_y, dispersion_y,
distribution_z, Qs, eta
).generate()
def transverse_linear_matcher(alpha, beta, dispersion=None):
''' Return a transverse matcher with the desired parameters.
Args:
alpha: Twiss parameter
beta: Twiss parameter
dispersion: (optional) only use in combination with a longitudinal
phase space
Returns: Matcher(closure) taking two parameters: coords and direction
'''
# if dispersion and alpha:
# raise NotImplementedError('Transverse phase space matching: for '
# 'alpha != 0 we need to match including the '
# 'D\' (dispersion derivative). This is '
# 'currently not implemented.')
sqrt = np.sqrt
# build the M matrix: only depends on twiss parameters for the
# special case of alpha0=0, beta0=1 and phi = 0 (=2pi)
# Wiedemann, Particle Accelerator Physics 3rd edition, p. 170
M = np.zeros(shape=(2, 2))
M[0, 0] = sqrt(beta)
M[0, 1] = 0
M[1, 0] = -alpha/sqrt(beta)
M[1, 1] = sqrt(1./beta)
def _transverse_linear_matcher(beam, direction):
'''Match the coords specified by direction list
Args:
coords: a Particle instance
direction: list (len>=2) specifying which coordinates to match
the first element corresponds to space, the second to
the momentum coordinate. e.g. ['x', 'xp']
Returns:
Nothing, transforms coords dictionary in place
'''
space_coords = getattr(beam, direction[0])
space_coords_copy = space_coords.copy()
momentum_coords = getattr(beam, direction[1])
space_coords = (M[0, 0]*space_coords + # copy if not using +=, *=..
M[0, 1]*momentum_coords)
momentum_coords = (M[1, 0]*space_coords_copy +
M[1, 1]*momentum_coords)
# add dispersion effects, raise exception if coords['dp'] inexistent
if dispersion:
try:
space_coords += dispersion * getattr(beam, 'dp')
except KeyError:
print(('Dispersion in the transverse phase space depends on' +
'dp, however no longitudinal phase space was specified. '+
'No matching performed'))
setattr(beam, direction[0], space_coords)
setattr(beam, direction[1], momentum_coords)
return _transverse_linear_matcher
def longitudinal_linear_matcher(Qs, eta, C):
'''Return simple longitudinal matcher
Internally calls the transverse linear matcher with beta=beta_z
and alpha = 0.
beta_z = |eta| * C / (2*pi*Qs)t p
Args:
Qs: synchroton tune
eta: slippage factor (zeroth order),
is \alpha_c - gamma^2 (\alpha_c = momentum compaction factor)
C: circumference
Returns:
A matcher with the specified Qs, eta (closure)
'''
beta_z = np.abs(eta) * C / (2. * np.pi * Qs)
internal_transverse_matcher = transverse_linear_matcher(alpha=0.,
beta=beta_z)
def _longitudinal_linear_matcher(beam, *args, **kwargs):
'''Match the beam to the specified parameters:
Qs, eta, beam.circumference
Args:
beam: provides beam.z, beam.dp, beam.beta, beam.circumference
Returns:
nothing, modifies beam in place
'''
internal_transverse_matcher(beam, direction=['z', 'dp'])
return _longitudinal_linear_matcher
def RF_bucket_distribution(rfbucket, sigma_z=None, epsn_z=None,
margin=0, distribution_type=ThermalDistribution,
*args, **kwargs):
'''Return a distribution function which generates particles
which are matched to the specified bucket and target emittance or std
Specify only one of sigma_z, epsn_z
Args:
rfbucket: An object of type RFBucket
sigma_z: target std
epsn_z: target normalized emittance in z-direction
margin: relative margin from the separatrix towards the
inner stable fix point in which particles are avoided
distribution_type: longitudinal distribution type from
rfbucket_matching (default is ThermalDistribution which
produces a Gaussian-like matched Boltzmann distribution)
Returns:
A matcher with the specified bucket properties (closure)
Raises:
ValueError: If neither or both of sigma_z, epsn_z are specified
'''
rf_bucket_matcher_impl = RFBucketMatcher(rfbucket, distribution_type,
sigma_z=sigma_z, epsn_z=epsn_z,
*args, **kwargs)
def _RF_bucket_dist(n_particles):
z, dp, _, _ = rf_bucket_matcher_impl.generate(n_particles, margin)
return [z, dp]
return _RF_bucket_dist
def cut_distribution(distribution, is_accepted):
"""Generate coordinates according to some distribution inside the
region specified by where the function is_accepted returns 1.
(Wrapper for distributions, based on RF_cut..)
Args:
distribution: a function which takes the n_particles as a
parameter and returns a list-like object
containing a 2D phase space. result[0] should
stand for the spatial, result[1] for the momentum
coordinate
is_accepted: function taking two parameters (z, dp)
[vectorised as arrays] and returning a boolean
specifying whether the coordinate lies
inside the desired phase space volume. A possible
source to provide such an is_accepted function
is the RFBucket.make_is_accepted or
generators.make_is_accepted_within_n_sigma .
Returns:
A matcher with the specified bucket properties (closure)
"""
def _cut_distribution(n_particles):
'''Regenerates all particles which fall outside a previously
specified phase space region (via the function is_accepted
in generators.cut_distribution) until all generated particles
have valid coordinates and momenta.
'''
z = np.zeros(n_particles)
dp = np.zeros(n_particles)
new_coords = distribution(n_particles)
z = new_coords[0]
dp = new_coords[1]
mask_out = ~is_accepted(z, dp)
while mask_out.any():
n_gen = np.sum(mask_out)
new_coords = distribution(n_gen)
z[mask_out] = new_coords[0]
dp[mask_out] = new_coords[1]
mask_out = ~is_accepted(z, dp)
return [z, dp]
return _cut_distribution
def make_is_accepted_within_n_sigma(rms_amplitude=None, limit_n_rms=None,
epsn_rms=None):
'''Closure creating an is_accepted function (e.g. for
cut_distribution). The is_accepted function will return whether
the canonical coordinate and momentum pair lies within the phase
space region limited by the action value
limit_n_rms * rms_amplitude.
The closure acts on normalised Floquet space, i.e. do apply this
function to the particles before matching to the optics values.
Coordinate u and momentum up are squared to give the action
amplitude
J = u^2 + up^2 .
The amplitude is required to be below the limit to be accepted,
J < limit_n_rms * rms_amplitude.
The usual use case will be generating u and up in normalised Floquet
space (i.e. before the normalised phase space coordinates
get matched to the optics or longitudinal eta and Qs).
Consequently, the 1 sigma RMS reference value
epsn_rms corresponds to the normalised 1 sigma RMS emittance
(i.e. amounting to beam.epsn_x() and beam.epsn_y() in the transverse
plane, and beam.epsn_z()/4 in the longitudinal plane).
'''
if epsn_rms:
# backwards compatibility (it was bad naming):
limit_n_rms *= limit_n_rms
assert rms_amplitude == None, \
("epsn_rms is for backwards compatibility, it has been "
"replaced by its sqrt-value rms_amplitude. Please do not "
"use both at the same time!")
rms_amplitude = epsn_rms**2
threshold_amplitude = limit_n_rms * rms_amplitude
def is_accepted(u, up):
Jsq = u**2 + up**2
return | |
<filename>randomimagedownload.py<gh_stars>0
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Finds and downloads random images using the Google Custom Search API.
"""
__author__ = '<NAME> <$(echo nqnz.enshfr#tznvy.pbz | tr a-z# n-za-m@)>'
import os
import sys
import random
import datetime
import time
import argparse
import threading
import signal
import urlparse
import fcntl, termios, struct
import requests
# Configurable API prerequisites (https://developers.google.com/custom-search/json-api/v1/overview)
API_KEY = 'YOUR_API_KEY_HERE'
SEARCH_ENGINE_ID = 'YOUR_SEARCH_ENGINE_ID_HERE'
# Application defaults
NUM_IMAGES = 30 # Number of images to fetch
MAX_IMAGE_YEARS = 10 # For Android phones, oldest images to search for
MAX_IMAGE_FILE = 200 # For other devices, highest image number to search for
IMAGE_SIZE = 'medium' # Desired image size
IMAGE_FORMAT = 'jpg' # Desired image file format
QUERY_BATCH_SIZE = 10 # Number of query results to fetch at once (max 10)
QUERY_RETRIES = 3 # Number of query retry attempts before giving up on errors
NUM_THREADS = 4 # Number of worker threads to spawn
DOWNLOAD_CHUNK_SIZE = 1024 # Streaming download chunk size
# File name templates for common cameras and phones
FILENAME_SIGS = {
'img' : 'IMG_{:04d}', # iPhone, Canon G2
'imgd': 'IMG_{:%Y%m%d_%H%M%S}', # Android
'dsc' : 'DSC_{:04d}', # Nikon D1
'sbcs': 'SBCS{:04d}', # Canon EOS
'dscn': 'DSCN{:04d}', # Nikon
'dcp' : 'DCP_{:04d}', # Kodak DC
'dscf': 'DSCF{:04d}', # Fuji Finepix
'pict': 'PICT{:04d}', # Minolta
'mvc' : 'MVC-{:04d}' # Mavica
}
# Base URL of the Custom Search API
SEARCH_API_BASE = 'https://www.googleapis.com/customsearch/v1'
def main(output, num_images, image_size, image_format, batch_size, workers):
random_images = RandomImageLocator(size=image_size, format=image_format)
downloaders = []
# Create download workers
for i in xrange (workers):
downloaders.append(Downloader(str(i), output))
downloaders[i].start()
# Start status printer worker
printer = ThreadStatusPrinter(downloaders)
printer.start()
# Allow user interrupt (CTRL+C) to shut down cleanly
def shutdown_handler(*args, **kwargs):
printer.footer = "(Got user interrupt, closing ...)"
for downloader in downloaders:
downloader.stop()
downloader.join() # Let downloads finish before stopping the printer worker
printer.stop()
sys.exit(0)
signal.signal(signal.SIGINT, shutdown_handler)
# Download random images
try:
for url in random_images.generate_urls(num_images, batch_size):
# Get sizes of all worker thread queues
queue_sizes = []
for i, downloader in enumerate(downloaders):
queue_sizes.append(len(downloader.urls))
# Schedule based on min queue size
i = queue_sizes.index(min(queue_sizes))
downloaders[i].urls.append(url)
downloaders[i].resume()
except IOError as e:
print 'IOError: ' + e
# Signal threads to stop
for downloader in downloaders:
downloader.stop()
printer.stop()
class ThreadStatusPrinter(threading.Thread):
"""
Handles printing of output and status refreshes for multiple threads.
"""
def __init__(self, thread_objects, *args, **kwargs):
threading.Thread.__init__(self, *args, **kwargs)
self.thread_objects = thread_objects
self.footer = None
self._stopping = False
def run(self):
"""
Continually prints the statuses of passed thread objects to stdout. If an object contains a
'completed_statuses' (str list) property the first elements are popped off and printed once. If an object
contains a 'status_line' (str) property it is continually refreshed in-place at the bottom of the output. An
optional 'self.footer' (str) is finally printed at the bottom of the output.
"""
printed_lines = 0
while not (self._stopping):
rows, _ = self.terminal_size()
printed_lines = 0
# Print any completed statuses first
for i, obj in enumerate(self.thread_objects):
while (obj.completed_statuses):
string = 'Thread {}: {}'.format(i, obj.completed_statuses.pop(0))
sys.stdout.write('{}{}\n'.format(string, ' ' * (rows - len(string))))
# Refresh current statuses
for i, obj in enumerate(self.thread_objects):
if (obj.status_line is not None):
string = 'Thread {}: {}'.format(i, obj.status_line)
sys.stdout.write('{}{}\n'.format(string, ' ' * (rows - len(string))))
printed_lines += 1
# Print footer if it exists
if (self.footer):
sys.stdout.write(self.footer + '\n')
printed_lines += 1
# Back cursor over previous lines for refresh (ANSI CPL escape)
sys.stdout.write('\033[{}F'.format(printed_lines))
time.sleep(0.5) # TODO: Make this configurable
sys.stdout.flush()
# Return the cursor to where it should be at the end
sys.stdout.write('\n' * printed_lines)
def terminal_size(self):
"""
Portable magic to get the current size of the terminal window in characters. Returns a tuple of (width,
height).
http://stackoverflow.com/a/3010495
"""
height, width, _, _ = struct.unpack('HHHH',
fcntl.ioctl(0, termios.TIOCGWINSZ,
struct.pack('HHHH', 0, 0, 0, 0)))
return width, height
def stop(self):
self._stopping = True
class Downloader(threading.Thread):
"""
Multithreaded streaming downloader.
"""
def __init__(self, name, directory, *args, **kwargs):
threading.Thread.__init__(self, name=name, *args, **kwargs)
self.urls = []
self.name = name
self.directory = directory
self.status_line = 'Downloading ...'
self.completed_statuses = []
self._stopping = False
self._waiting = False
def run(self):
"""
Keeps pulling URLs out of self.urls and downloads them to self.directory. Pauses if the URL queue is empty
until self.resume() is called. Keeps track of the current download status including the transfer rate in
self.status_line. Adds any completed statuses (done or failed) to self.completed_statuses. Continues until
self.stop() is called, which will allow the last download to finish. Calling self.interrupt() will cancel any
download in progress.
"""
while not (self._stopping):
if not (self._waiting):
try:
url = self.urls.pop(0)
filename = urlparse.urlsplit(url).path.split('/')[-1]
filepath = os.path.join(self.directory, filename)
self.status_line = 'Downloading (... KB/S) {} ...'.format(filepath)
try:
response = requests.get(url, stream=True)
with open(filepath, 'wb') as f: # TODO: Check for existing files
start_time = time.time()
current_bytes = 0
kbps = 0
for chunk in response.iter_content(chunk_size=DOWNLOAD_CHUNK_SIZE):
time_delta = time.time() - start_time
self.status_line = 'Downloading ({} KB/s) {} ...'.format(kbps, filepath)
if chunk: # Ignore keep-alive chunks
current_bytes += len(chunk)
kbps = int((float(current_bytes) / (time_delta)) / 1024)
f.write(chunk)
# Download completed
self.status_line += ' Done'
self.completed_statuses.append(self.status_line)
except requests.ConnectionError:
# Requests has given up retrying the download
self.status_line += ' FAILED'
self.completed_statuses.append(self.status_line)
except IndexError:
# Empty queue, wait for manual resume to avoid raising more IndexErrors
self._waiting = True
self.status_line += ' Done'
def stop(self):
self._stopping = True
self.urls = []
def is_stopping(self):
return self._stopping
def resume(self):
self._waiting = False
class RandomImageLocator(object):
"""
Locates random images using the Google Custom Search API.
"""
def __init__(self, *args, **kwargs):
self._image_size = kwargs.pop('size', IMAGE_SIZE)
self._image_format = kwargs.pop('format', IMAGE_FORMAT)
self._sources = []
def query_images(self, term, number, start):
"""
Queries 'number' images using the Google Custom Search API for 'term', and returns results beginning at
'start'.
"""
parameters = {
'q': term,
'cx': SEARCH_ENGINE_ID,
'fileType': self._image_format,
'imgSize': self._image_size,
'number': number,
'searchType': 'image',
'start': start,
'fields': 'items(displayLink,link)',
'prettyPrint': 'false',
'key': API_KEY,
}
return requests.get(SEARCH_API_BASE, params=parameters)
def get_url_batch(self, number):
"""
Get a batch of 'number' random image URLs.
"""
result = {
'status': 200,
'message': 'OK',
'urls': []
}
offset = random.randint(1, 100 - number) # Custom Search API returns only the first 100 results
response = self.query_images(self.random_camera_filename(), number, offset)
if (response.status_code == 200):
try:
results = response.json()['items']
for i in xrange (0, len(results)):
# Only use URLs from a unique source
source = results[i]['displayLink']
if (source not in self._sources):
self._sources.append(source)
result['urls'].append(results[i]['link'])
except KeyError:
# Query executed ok but no images were found
pass
else:
result['status'] = response.status_code
result['message'] = response.text
return result
def generate_urls(self, number, batch_size):
"""
Yields up to 'number' image URLs from unique sources. Retries transient errors with exponential backoff.
Raises IOError if generation cannot continue due to an error.
"""
total = 0
retries = 0
while (total < number):
response = self.get_url_batch(batch_size)
if ((response['status'] >= 500 and response['status'] <= 599) or (response['status'] in [400, 408, 429])):
# Retriable error
retries += 1;
if (retries > QUERY_RETRIES):
raise IOError("{}, {}".format(response['status'], response['message']))
return
time.sleep(2 ** retries)
elif (response['status'] == 200):
# Success
retries = 0
for url in response['urls']:
yield url
total += 1;
else:
# Unretriable error
raise IOError("{}, {}".format(response['status'], response['message']))
return
@classmethod
def random_camera_filename(cls):
"""
Returns a random filename which may exist for an image taken by a number of common cameras / phones.
"""
key = random.choice(FILENAME_SIGS.keys())
if (key == 'imgd'):
return FILENAME_SIGS[key].format(cls.random_past_datetime(MAX_IMAGE_YEARS)) + '.' + IMAGE_FORMAT
else:
return FILENAME_SIGS[key].format(random.randint(1, MAX_IMAGE_FILE)) + '.' + IMAGE_FORMAT
@classmethod
def random_past_datetime(cls, years):
"""
Returns a random datetime between now and 'years' years ago.
"""
now = datetime.datetime.utcnow()
then = now.replace(year = now.year - years)
delta = (now - then).total_seconds()
return datetime.datetime.fromtimestamp(time.mktime(then.timetuple()) + random.randint(1, delta))
if __name__ == '__main__':
argParser = argparse.ArgumentParser(description="Downloads random images from the internet.")
argParser.add_argument('output', help="Output directory to save downloaded images to.")
argParser.add_argument('-n', '--number', type=int,
help="Number of images to download (default {}).".format(NUM_IMAGES))
argParser.add_argument('-s', '--size',
help="Desired | |
<filename>lib/check/__init__.py
# Copyright © 2012-2021 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the “Software”), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''
checks
'''
import abc
import collections
import difflib
import email.utils
import heapq
import os
import re
import types
import urllib.parse
import polib
from lib import domains
from lib import encodings as encinfo
from lib import gettext
from lib import ling
from lib import misc
from lib import polib4us
from lib import tags
from lib import xml
from lib.check.msgformat import c as msgformat_c
from lib.check.msgformat import perlbrace as msgformat_perlbrace
from lib.check.msgformat import pybrace as msgformat_pybrace
from lib.check.msgformat import python as msgformat_python
from lib.check.msgrepr import message_repr
class EnvironmentNotPatched(RuntimeError):
pass
class EnvironmentAlreadyPatched(RuntimeError):
pass
find_unusual_characters = re.compile(
r'[\x00-\x08\x0B-\x1A\x1C-\x1F]' # C0 except TAB, LF, ESC
r'|\x1B(?!\[)' # ESC, except when followed by [
r'|\x7F' # DEL
r'|[\x80-\x9F]' # C1
'|\uFEFF' # ZERO WIDTH NO-BREAK SPACE
'|\uFFFD' # REPLACEMENT CHARACTER
'|[\uFFFE\uFFFF]' # non-characters
r'|(?<=\w)\xBF' # INVERTED QUESTION MARK but only directly after a letter
).findall
header_fields_with_dedicated_checks = set()
def checks_header_fields(*fields):
def identity(x):
return x
header_fields_with_dedicated_checks.update(fields)
return identity
class Checker(metaclass=abc.ABCMeta):
_patched_environment = None
@classmethod
def patch_environment(cls):
if cls._patched_environment is not None:
raise EnvironmentAlreadyPatched
encinfo.install_extra_encodings()
polib4us.install_patches()
cls._patched_environment = True
def __init__(self, path, *, options):
if self._patched_environment is not True:
raise EnvironmentNotPatched
self.path = path
self.fake_path = path
if options.fake_root is not None:
(real_root, fake_root) = options.fake_root
if not real_root.endswith(os.sep):
raise ValueError
if not fake_root.endswith(os.sep):
raise ValueError
if path.startswith(real_root):
self.fake_path = fake_root + path[len(real_root):]
self.options = options
self._message_format_checkers = {
'c': msgformat_c.Checker(self),
'perl-brace': msgformat_perlbrace.Checker(self),
'python': msgformat_python.Checker(self),
'python-brace': msgformat_pybrace.Checker(self),
}
@abc.abstractmethod
def tag(self, tagname, *extra):
pass
def check(self):
# If a file passed to polib doesn't exist, it will “helpfully” treat it
# as PO/MO file _contents_. This is definitely not what we want. To
# prevent such disaster, fail early if the file doesn't exit.
try:
os.stat(self.path)
except OSError as exc:
self.tag('os-error', tags.safestr(exc.strerror))
return
if self.options.file_type is None:
extension = os.path.splitext(self.path)[-1]
else:
extension = '.' + self.options.file_type
is_template = False
is_binary = False
if extension == '.po':
constructor = polib.pofile
elif extension == '.pot':
constructor = polib.pofile
is_template = True
elif extension in {'.mo', '.gmo'}:
constructor = polib.mofile
is_binary = True
else:
self.tag('unknown-file-type')
return
broken_encoding = False
try:
try:
file = constructor(self.path)
except UnicodeDecodeError as exc:
broken_encoding = exc
file = constructor(self.path, encoding='ISO-8859-1')
except polib4us.moparser.SyntaxError as exc:
self.tag('invalid-mo-file', tags.safestr(exc))
return
except OSError as exc:
message = str(exc)
if exc.errno is not None:
self.tag('os-error', tags.safestr(exc.strerror))
return
elif message.startswith('Syntax error in po file '):
message = message[24:]
message_parts = []
if message.startswith(self.path + ' '):
message = message[len(self.path)+1:]
match = re.match(r'^\(line ([0-9]+)\)(?:: (.+))?$', message)
if match is not None:
lineno_part = 'line {}'.format(match.group(1))
message = match.group(2)
if message is not None:
lineno_part += ':'
if re.match(r'^[a-z]+( [a-z]+)*$', message):
message = tags.safestr(message)
message_parts += [tags.safestr(lineno_part)]
if message is not None:
message_parts += [message]
self.tag('syntax-error-in-po-file', *message_parts)
return
raise
finally:
if broken_encoding:
# pylint: disable=no-member
s = broken_encoding.object
assert isinstance(s, bytes)
begin = max(broken_encoding.start - 40, 0)
end = broken_encoding.start + 40
s = s[begin:end]
self.tag('broken-encoding',
s,
tags.safestr('cannot be decoded as'),
broken_encoding.encoding.upper(),
)
# pylint: enable=no-member
broken_encoding = True
ctx = types.SimpleNamespace()
ctx.file = file
ctx.is_template = is_template
ctx.is_binary = is_binary
self.check_comments(ctx)
self.check_headers(ctx)
self.check_language(ctx)
self.check_plurals(ctx)
self.check_mime(ctx)
if broken_encoding:
ctx.encoding = None
self.check_dates(ctx)
self.check_project(ctx)
self.check_translator(ctx)
self.check_messages(ctx)
def check_comments(self, ctx):
regexs = {
r'\bPACKAGE package\b',
r'\bCopyright \S+ YEAR\b',
r"\bTHE PACKAGE'S COPYRIGHT HOLDER\b",
}
if not ctx.is_template:
regexs |= {
r'\bFIRST AUTHOR\b',
r'<EMAIL@ADDRESS>',
r'(?<=>), YEAR\b',
}
regex = re.compile('|'.join(regexs))
for line in ctx.file.header.splitlines():
match = regex.search(line)
if match is None:
continue
self.tag('boilerplate-in-initial-comments', line)
@checks_header_fields('Language', 'X-Poedit-Language', 'X-Poedit-Country')
def check_language(self, ctx):
ctx.language = None
duplicate_meta_language = False
meta_languages = ctx.metadata['Language']
if len(meta_languages) > 1:
self.tag('duplicate-header-field-language')
meta_languages = sorted(set(meta_languages))
if len(meta_languages) > 1:
duplicate_meta_language = True
if len(meta_languages) == 1:
[meta_language] = meta_languages
else:
meta_language = None
orig_meta_language = meta_language
if ctx.is_template:
if meta_language is None:
self.tag('no-language-header-field')
return
language = self.options.language
language_source = 'command-line'
language_source_quality = 1
if language is None:
path_components = os.path.normpath(self.path).split('/')
try:
i = path_components.index('LC_MESSAGES')
except ValueError:
i = 0
if i > 0:
language = path_components[i - 1]
try:
language = ling.parse_language(language)
language.fix_codes()
language.remove_encoding()
language.remove_nonlinguistic_modifier()
except ling.LanguageError:
# It's not our job to report possible errors in _pathnames_.
language = None
else:
language_source = 'pathname'
del path_components, i
if language is None and self.path.endswith('.po'):
language, ext = os.path.splitext(os.path.basename(self.path))
assert ext == '.po'
try:
language = ling.parse_language(language)
if language.encoding is not None:
# It's very likely that something else has been confused
# for the apparent encoding.
raise ling.LanguageError
language.fix_codes()
language.remove_nonlinguistic_modifier()
except ling.LanguageError:
# It's not our job to report possible errors in _pathnames_.
language = None
else:
language_source = 'pathname'
language_source_quality = 0
if meta_language:
try:
meta_language = ling.parse_language(meta_language)
except ling.LanguageError:
try:
new_meta_language = ling.get_language_for_name(meta_language)
except LookupError:
new_meta_language = None
if new_meta_language:
self.tag('invalid-language', orig_meta_language, '=>', new_meta_language)
else:
self.tag('invalid-language', orig_meta_language)
meta_language = new_meta_language
if meta_language:
if meta_language.remove_encoding():
self.tag('encoding-in-language-header-field', orig_meta_language)
if meta_language.remove_nonlinguistic_modifier():
self.tag('language-variant-does-not-affect-translation', orig_meta_language)
try:
if meta_language.fix_codes():
self.tag('invalid-language', orig_meta_language, '=>', meta_language)
except ling.LanguageError:
self.tag('invalid-language', orig_meta_language)
meta_language = None
if language_source_quality <= 0 and (
'/{lang}/'.format(lang=meta_language) in self.path or
'/{lang}/'.format(lang=str(meta_language).replace('_', '-')) in self.path
):
# For LibreOffice, PO basename does not designate translation
# language, but one of the path components does.
# For example,
# translations/source/da/dictionaries/pl_PL.po
# is a Danish translation.
language = None
if meta_language:
if language is None:
language = meta_language
language_source = 'Language header field'
elif language != meta_language:
self.tag('language-disparity',
language, tags.safestr('({})'.format(language_source)),
'!=',
meta_language, tags.safestr('(Language header field)')
)
poedit_languages = ctx.metadata['X-Poedit-Language']
if len(poedit_languages) > 1:
self.tag('duplicate-header-field-x-poedit', 'X-Poedit-Language')
poedit_languages = sorted(set(poedit_languages))
poedit_countries = ctx.metadata['X-Poedit-Country']
if len(poedit_countries) > 1:
self.tag('duplicate-header-field-x-poedit', 'X-Poedit-Country')
poedit_countries = sorted(set(poedit_countries))
if len(poedit_languages) == 1 and len(poedit_countries) <= 1:
[poedit_language] = poedit_languages
# FIXME: This should take also X-Poedit-Country into account.
try:
poedit_language = ling.get_language_for_name(poedit_language)
except LookupError:
self.tag('unknown-poedit-language', poedit_language)
else:
if language is None:
language = poedit_language
language_source = 'X-Poedit-Language header field'
elif language.language_code != poedit_language.language_code:
self.tag('language-disparity',
language, tags.safestr('({})'.format(language_source)),
'!=',
poedit_language, tags.safestr('(X-Poedit-Language header field)')
)
if language is None:
if not orig_meta_language and not duplicate_meta_language:
self.tag('no-language-header-field')
self.tag('unable-to-determine-language')
return
if not orig_meta_language and not duplicate_meta_language:
self.tag('no-language-header-field', tags.safestr('Language:'), language)
ctx.language = language
@checks_header_fields('Plural-Forms')
def check_plurals(self, ctx):
ctx.plural_preimage = None
plural_forms = ctx.metadata['Plural-Forms']
if len(plural_forms) > 1:
self.tag('duplicate-header-field-plural-forms')
plural_forms = sorted(set(plural_forms))
if len(plural_forms) > 1:
return
if len(plural_forms) == 1:
[plural_forms] = plural_forms
else:
assert len(plural_forms) == 0
plural_forms = None
correct_plural_forms = None
if ctx.language is not None:
correct_plural_forms = ctx.language.get_plural_forms()
has_plurals = False # messages with plural forms (translated or not)?
expected_nplurals = {} # number of plurals in _translated_ messages
for message in ctx.file:
if message.obsolete:
continue
if message.msgid_plural is not None:
has_plurals = True
if not message.translated():
continue
expected_nplurals[len(message.msgstr_plural)] = message
if len(expected_nplurals) > 1:
break
if len(expected_nplurals) > 1:
args = []
for n, message in sorted(expected_nplurals.items()):
args += [n, message_repr(message, template='({})'), '!=']
self.tag('inconsistent-number-of-plural-forms', *args[:-1])
if ctx.is_template:
plural_forms_hint = 'nplurals=INTEGER; plural=EXPRESSION;'
elif correct_plural_forms:
plural_forms_hint = tags.safe_format(
' or '.join('{}' for s in correct_plural_forms),
*correct_plural_forms
)
else:
plural_forms_hint = 'nplurals=<n>; plural=<expression>'
if plural_forms is None:
if has_plurals:
if expected_nplurals:
self.tag('no-required-plural-forms-header-field', plural_forms_hint)
else:
self.tag('no-plural-forms-header-field', plural_forms_hint)
return
if ctx.is_template:
return
try:
(n, expr, ljunk, rjunk) = gettext.parse_plural_forms(plural_forms, strict=False)
except gettext.PluralFormsSyntaxError:
if has_plurals:
self.tag('syntax-error-in-plural-forms', plural_forms, '=>', plural_forms_hint)
else:
self.tag('syntax-error-in-unused-plural-forms', plural_forms, | |
<filename>VL-BERT/common/backbone/resnet/resnet.py<gh_stars>10-100
"""
Modified from torchvision, but exposes features from different stages
"""
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import torch
import warnings
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
model_layers = {
'resnet18': [2, 2, 2, 2],
'resnet34': [3, 4, 6, 3],
'resnet50': [3, 4, 6, 3],
'resnet101': [3, 4, 23, 3],
'resnet152': [3, 8, 36, 3],
}
def conv3x3(in_planes, out_planes, stride=1, dilation=1, padding=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, dilation=dilation,
padding=padding, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1, **kwargs):
super(BasicBlock, self).__init__()
# if dilation == 1:
# self.conv1 = conv3x3(inplanes, planes, stride, dilation)
# elif dilation == 2:
# self.conv1 = conv3x3(inplanes, planes, stride, dilation, padding=2)
# else:
# raise ValueError('dilation must be 1 or 2!')
self.conv1 = conv3x3(inplanes, planes, stride, dilation, padding=dilation)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1, stride_in_1x1=False):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1 if not stride_in_1x1 else stride, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
# if dilation == 1:
# self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride if not stride_in_1x1 else 1,
# dilation=dilation, padding=1, bias=False)
# elif dilation == 2:
# self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride if not stride_in_1x1 else 1,
# dilation=dilation, padding=2, bias=False)
# else:
# raise ValueError('dilation must be 1 or 2!')
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride if not stride_in_1x1 else 1,
dilation=dilation, padding=dilation, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=None, expose_stages=None, dilations=None, stride_in_1x1=False):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
layers_planes = [64, 128, 256, 512]
layers_strides = [1, 2, 2, 2]
layers_dilations = dilations if dilations is not None else [1, 1, 1, 1]
for i, dilation in enumerate(layers_dilations):
if dilation == 2:
layers_strides[i] = 1
layers_planes = layers_planes[:len(layers)]
layers_strides = layers_strides[:len(layers)]
layers_dilations = layers_dilations[:len(layers)]
for i, (planes, blocks, stride, dilation) in enumerate(zip(layers_planes, layers, layers_strides, layers_dilations)):
layer = self._make_layer(block, planes, blocks, stride=stride, dilation=dilation, stride_in_1x1=stride_in_1x1)
self.__setattr__('layer{}'.format(i + 1), layer)
self.num_layers = i + 1
self.has_fc_head = 6 in expose_stages
self.expose_stages = expose_stages
if self.has_fc_head:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
self.expose_stages.remove(6)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, stride_in_1x1=False):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, dilation, stride_in_1x1=stride_in_1x1))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def forward(self, x):
expose_feats = {}
feats = {}
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
feats['body1'] = x
for i in range(self.num_layers):
x = self.__getattr__("layer{}".format(i + 1))(x)
feats['body{}'.format(i + 2)] = x
if self.has_fc_head:
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
expose_feats['cls_score'] = x
if self.expose_stages is not None:
for expose_stage in self.expose_stages:
feat_name = 'body{}'.format(expose_stage)
expose_feats[feat_name] = feats[feat_name]
return expose_feats
def load_pretrained_state_dict(self, state_dict):
"""Load state dict of pretrained model
Args:
state_dict (dict): state dict to load
"""
new_state_dict = self.state_dict()
miss_keys = []
for k in new_state_dict.keys():
if k in state_dict.keys():
new_state_dict[k] = state_dict[k]
else:
miss_keys.append(k)
if len(miss_keys) > 0:
warnings.warn('miss keys: {}'.format(miss_keys))
self.load_state_dict(new_state_dict)
def frozen_parameters(self, frozen_stages=None, frozen_bn=False):
if frozen_bn:
for module in self.modules():
if isinstance(module, nn.BatchNorm2d):
for param in module.parameters():
param.requires_grad = False
if frozen_stages is not None:
for stage in frozen_stages:
assert (stage >= 1) and (stage <= 6)
if stage == 1:
for param in self.conv1.parameters():
param.requires_grad = False
for param in self.bn1.parameters():
param.requires_grad = False
elif stage < 6:
for param in self.__getattr__("layer{}".format(stage - 1)).parameters():
param.requires_grad = False
else:
for param in self.fc.parameters():
param.requires_grad = False
def bn_eval(self):
for module in self.modules():
if isinstance(module, nn.BatchNorm2d):
module.eval()
def resnet18(pretrained=False, pretrained_model_path=None, num_classes=None, expose_stages=None, dilations=None, **kwargs):
"""Constructs a ResNet-18 model
Args:
pretrained (bool): if True, load pretrained model. Default: False
pretrained_model_path (str, optional): only effective when pretrained=True,
if not specified, use pretrained model from model_zoo.
num_classes (int): number of classes for the fc output score.
expose_stages (list, optional): list of expose stages, e.g. [4, 5] means expose conv4 and conv5 stage output.
if not specified, only expose output of end_stage.
"""
if num_classes is None:
assert expose_stages is not None, "num_class and expose_stages is both None"
assert 6 not in expose_stages, "can't expose the 6th stage for num_classes is None"
if expose_stages is None:
expose_stages = [6]
end_stage = max(expose_stages)
assert end_stage <= 6, "the max expose_stage is out of range"
layers = model_layers['resnet18'][:end_stage - 1]
model = ResNet(block=BasicBlock, layers=layers, num_classes=num_classes, expose_stages=expose_stages, dilations=dilations)
if pretrained:
if pretrained_model_path is not None:
state_dict = torch.load(pretrained_model_path, map_location=lambda storage, loc: storage)
else:
state_dict = model_zoo.load_url(model_urls['resnet18'])
model.load_pretrained_state_dict(state_dict)
return model
def resnet34(pretrained=False, pretrained_model_path=None, num_classes=None, expose_stages=None, dilations=None, **kwargs):
"""Constructs a ResNet-34 model
Args:
pretrained (bool): if True, load pretrained model. Default: False
pretrained_model_path (str, optional): only effective when pretrained=True,
if not specified, use pretrained model from model_zoo.
num_classes (int): number of classes for the fc output score.
expose_stages (list, optional): list of expose stages, e.g. [4, 5] means expose conv4 and conv5 stage output.
if not specified, only expose output of end_stage.
"""
if num_classes is None:
assert expose_stages is not None, "num_class and expose_stages is both None"
assert 6 not in expose_stages, "can't expose the 6th stage for num_classes is None"
if expose_stages is None:
expose_stages = [6]
end_stage = max(expose_stages)
assert end_stage <= 6, "the max expose_stage is out of range"
layers = model_layers['resnet34'][:end_stage - 1]
model = ResNet(block=BasicBlock, layers=layers, num_classes=num_classes, expose_stages=expose_stages,
dilations=dilations)
if pretrained:
if pretrained_model_path is not None:
state_dict = torch.load(pretrained_model_path, map_location=lambda storage, loc: storage)
else:
state_dict = model_zoo.load_url(model_urls['resnet34'])
model.load_pretrained_state_dict(state_dict)
return model
def resnet50(pretrained=False, pretrained_model_path=None, num_classes=None, expose_stages=None, dilations=None, stride_in_1x1=False):
"""Constructs a ResNet-50 model
Args:
pretrained (bool): if True, load pretrained model. Default: False
pretrained_model_path (str, optional): only effective when pretrained=True,
if not specified, use pretrained model from model_zoo.
num_classes (int): number of classes for the fc output score.
expose_stages (list, optional): list of expose stages, e.g. [4, 5] means expose conv4 and conv5 stage output.
if not specified, only expose output of end_stage.
"""
if num_classes is None:
assert expose_stages is not None, "num_class and expose_stages is both None"
assert 6 not in expose_stages, "can't expose the 6th stage for num_classes is None"
if expose_stages is None:
expose_stages = [6]
end_stage = max(expose_stages)
assert end_stage <= 6, "the max expose_stage is out of range"
layers = model_layers['resnet50'][:end_stage - 1]
model = ResNet(block=Bottleneck, layers=layers, num_classes=num_classes, expose_stages=expose_stages,
dilations=dilations, stride_in_1x1=stride_in_1x1)
if pretrained:
if pretrained_model_path is not None:
state_dict = torch.load(pretrained_model_path, map_location=lambda storage, loc: storage)
else:
state_dict = model_zoo.load_url(model_urls['resnet50'])
model.load_pretrained_state_dict(state_dict)
return model
def resnet101(pretrained=False, pretrained_model_path=None, num_classes=None, expose_stages=None, dilations=None, stride_in_1x1=False):
"""Constructs a ResNet-101 model
Args:
pretrained (bool): if True, load pretrained model. Default: False
pretrained_model_path (str, optional): only effective when pretrained=True,
if not specified, use pretrained model from model_zoo.
num_classes (int): number of classes for the fc output score.
expose_stages (list, optional): list of expose stages, e.g. [4, | |
= [
p for p in playlists if p not in [
'Music', 'Movies', 'TV Shows', 'Podcasts', 'iTunes\xa0U',
'Books', 'Genius', 'iTunes DJ', 'Music Videos',
'Home Videos', 'Voice Memos', 'Audiobooks'
]
]
playlists.sort(key=lambda x: x.lower())
except Exception as exc:
print('Error getting iTunes playlists:', exc)
playlists = []
_ba.pushcall(Call(target, playlists), from_other_thread=True)
def _handle_play_command(self, target: Optional[str]) -> None:
if target is None:
if self._current_playlist is not None and self._volume > 0:
try:
assert self._orig_volume is not None
_ba.mac_music_app_stop()
_ba.mac_music_app_set_volume(self._orig_volume)
except Exception as exc:
print('Error stopping iTunes music:', exc)
self._current_playlist = None
else:
# If we've got something playing with positive
# volume, stop it.
if self._current_playlist is not None and self._volume > 0:
try:
assert self._orig_volume is not None
_ba.mac_music_app_stop()
_ba.mac_music_app_set_volume(self._orig_volume)
except Exception as exc:
print('Error stopping iTunes music:', exc)
# Set our playlist and play it if our volume is up.
self._current_playlist = target
if self._volume > 0:
self._orig_volume = (_ba.mac_music_app_get_volume())
self._update_mac_music_app_volume()
self._play_current_playlist()
def _handle_die_command(self) -> None:
# Only stop if we've actually played something
# (we don't want to kill music the user has playing).
if self._current_playlist is not None and self._volume > 0:
try:
assert self._orig_volume is not None
_ba.mac_music_app_stop()
_ba.mac_music_app_set_volume(self._orig_volume)
except Exception as exc:
print('Error stopping iTunes music:', exc)
def _play_current_playlist(self) -> None:
try:
from ba import _lang
from ba._general import Call
assert self._current_playlist is not None
if _ba.mac_music_app_play_playlist(self._current_playlist):
pass
else:
_ba.pushcall(Call(
_ba.screenmessage,
_lang.get_resource('playlistNotFoundText') + ': \'' +
self._current_playlist + '\'', (1, 0, 0)),
from_other_thread=True)
except Exception:
from ba import _error
_error.print_exception(
f"error playing playlist {self._current_playlist}")
def _update_mac_music_app_volume(self) -> None:
_ba.mac_music_app_set_volume(
max(0, min(100, int(100.0 * self._volume))))
class MacMusicAppMusicPlayer(MusicPlayer):
"""A music-player that utilizes iTunes/Music.app for playback.
Allows selecting playlists as entries.
"""
def __init__(self) -> None:
super().__init__()
self._thread = MacMusicAppThread()
self._thread.start()
def on_select_entry(self, callback: Callable[[Any], None],
current_entry: Any, selection_target_name: str) -> Any:
# pylint: disable=cyclic-import
from bastd.ui.soundtrack import entrytypeselect as etsel
return etsel.SoundtrackEntryTypeSelectWindow(callback, current_entry,
selection_target_name)
def on_set_volume(self, volume: float) -> None:
self._thread.set_volume(volume)
def get_playlists(self, callback: Callable) -> None:
"""Asynchronously fetch the list of available iTunes playlists."""
self._thread.get_playlists(callback)
def on_play(self, entry: Any) -> None:
entry_type = get_soundtrack_entry_type(entry)
if entry_type == 'iTunesPlaylist':
self._thread.play_playlist(get_soundtrack_entry_name(entry))
else:
print('MacMusicAppMusicPlayer passed unrecognized entry type:',
entry_type)
def on_stop(self) -> None:
self._thread.play_playlist(None)
def on_shutdown(self) -> None:
self._thread.shutdown()
def have_music_player() -> bool:
"""Returns whether a music player is present."""
return _ba.app.music_player_type is not None
def get_music_player() -> MusicPlayer:
"""Returns the system music player, instantiating if necessary."""
app = _ba.app
if app.music_player is None:
if app.music_player_type is None:
raise Exception("no music player type set")
app.music_player = app.music_player_type()
return app.music_player
def music_volume_changed(val: float) -> None:
"""Should be called when changing the music volume."""
app = _ba.app
if app.music_player is not None:
app.music_player.set_volume(val)
def set_music_play_mode(mode: MusicPlayMode,
force_restart: bool = False) -> None:
"""Sets music play mode; used for soundtrack testing/etc."""
app = _ba.app
old_mode = app.music_mode
app.music_mode = mode
if old_mode != app.music_mode or force_restart:
# If we're switching into test mode we don't
# actually play anything until its requested.
# If we're switching *out* of test mode though
# we want to go back to whatever the normal song was.
if mode is MusicPlayMode.REGULAR:
mtype = app.music_types[MusicPlayMode.REGULAR]
do_play_music(None if mtype is None else mtype.value)
def supports_soundtrack_entry_type(entry_type: str) -> bool:
"""Return whether the provided soundtrack entry type is supported here."""
uas = _ba.app.user_agent_string
if entry_type == 'iTunesPlaylist':
return 'Mac' in uas
if entry_type in ('musicFile', 'musicFolder'):
return ('android' in uas
and _ba.android_get_external_storage_path() is not None)
if entry_type == 'default':
return True
return False
def get_soundtrack_entry_type(entry: Any) -> str:
"""Given a soundtrack entry, returns its type, taking into
account what is supported locally."""
try:
if entry is None:
entry_type = 'default'
# Simple string denotes iTunesPlaylist (legacy format).
elif isinstance(entry, str):
entry_type = 'iTunesPlaylist'
# For other entries we expect type and name strings in a dict.
elif (isinstance(entry, dict) and 'type' in entry
and isinstance(entry['type'], str) and 'name' in entry
and isinstance(entry['name'], str)):
entry_type = entry['type']
else:
raise Exception("invalid soundtrack entry: " + str(entry) +
" (type " + str(type(entry)) + ")")
if supports_soundtrack_entry_type(entry_type):
return entry_type
raise Exception("invalid soundtrack entry:" + str(entry))
except Exception as exc:
print('EXC on get_soundtrack_entry_type', exc)
return 'default'
def get_soundtrack_entry_name(entry: Any) -> str:
"""Given a soundtrack entry, returns its name."""
try:
if entry is None:
raise Exception('entry is None')
# Simple string denotes an iTunesPlaylist name (legacy entry).
if isinstance(entry, str):
return entry
# For other entries we expect type and name strings in a dict.
if (isinstance(entry, dict) and 'type' in entry
and isinstance(entry['type'], str) and 'name' in entry
and isinstance(entry['name'], str)):
return entry['name']
raise Exception("invalid soundtrack entry:" + str(entry))
except Exception:
from ba import _error
_error.print_exception()
return 'default'
def setmusic(musictype: Optional[MusicType], continuous: bool = False) -> None:
"""Set or stop the current music based on a string musictype.
category: Gameplay Functions
This function will handle loading and playing sound media as necessary,
and also supports custom user soundtracks on specific platforms so the
user can override particular game music with their own.
Pass None to stop music.
if 'continuous' is True the musictype passed is the same as what is already
playing, the playing track will not be restarted.
"""
from ba import _gameutils
# All we do here now is set a few music attrs on the current globals
# node. The foreground globals' current playing music then gets fed to
# the do_play_music call below. This way we can seamlessly support custom
# soundtracks in replays/etc since we're replaying an attr value set;
# not an actual sound node create.
gnode = _gameutils.sharedobj('globals')
gnode.music_continuous = continuous
gnode.music = '' if musictype is None else musictype.value
gnode.music_count += 1
def handle_app_resume() -> None:
"""Should be run when the app resumes from a suspended state."""
if _ba.is_os_playing_music():
do_play_music(None)
def do_play_music(musictype: Union[MusicType, str, None],
continuous: bool = False,
mode: MusicPlayMode = MusicPlayMode.REGULAR,
testsoundtrack: Dict[str, Any] = None) -> None:
"""Plays the requested music type/mode.
For most cases setmusic() is the proper call to use, which itself calls
this. Certain cases, however, such as soundtrack testing, may require
calling this directly.
"""
# We can be passed a MusicType or the string value of one.
if musictype is not None:
try:
musictype = MusicType(musictype)
except ValueError:
print(f"Invalid music type: '{musictype}'")
musictype = None
app = _ba.app
with _ba.Context('ui'):
# If they don't want to restart music and we're already
# playing what's requested, we're done.
if continuous and app.music_types[mode] is musictype:
return
app.music_types[mode] = musictype
# If the OS tells us there's currently music playing,
# all our operations default to playing nothing.
if _ba.is_os_playing_music():
musictype = None
# If we're not in the mode this music is being set for,
# don't actually change what's playing.
if mode != app.music_mode:
return
# Some platforms have a special music-player for things like iTunes
# soundtracks, mp3s, etc. if this is the case, attempt to grab an
# entry for this music-type, and if we have one, have the music-player
# play it. If not, we'll play game music ourself.
if musictype is not None and app.music_player_type is not None:
if testsoundtrack is not None:
soundtrack = testsoundtrack
else:
soundtrack = _get_user_soundtrack()
entry = soundtrack.get(musictype.value)
else:
entry = None
# Go through music-player.
if entry is not None:
_play_music_player_music(entry)
# Handle via internal music.
else:
_play_internal_music(musictype)
def _get_user_soundtrack() -> Dict[str, Any]:
"""Return current user soundtrack or empty dict otherwise."""
cfg = _ba.app.config
soundtrack: Dict[str, Any] = {}
soundtrackname = cfg.get('Soundtrack')
if soundtrackname is not None and soundtrackname != '__default__':
try:
soundtrack = cfg.get('Soundtracks', {})[soundtrackname]
except Exception as exc:
print(f"Error looking up user soundtrack: {exc}")
soundtrack = {}
return soundtrack
def _play_music_player_music(entry: Any) -> None:
app = _ba.app
# Stop any existing internal music.
if app.music is not None:
app.music.delete()
app.music = None
# Do the thing.
get_music_player().play(entry)
def _play_internal_music(musictype: Optional[MusicType]) -> None:
app = _ba.app
# Stop any existing music-player playback.
if app.music_player is not None:
app.music_player.stop()
# Stop any existing internal music.
| |
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 24 15:55:28 2016
@author: sasha
"""
import os
from .init import QTVer
if QTVer == 4:
from PyQt4 import QtGui, QtCore
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
if QTVer == 5:
from PyQt5 import QtWidgets as QtGui
from PyQt5 import QtCore
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.widgets import SpanSelector
from scipy.interpolate import InterpolatedUnivariateSpline
from scipy.optimize import least_squares, curve_fit
from .xaesa_ft import FT, BFTWindow, BFT, GETPHASE
from .xaesa_constants_formulas import windowGauss10
#from tkinter.filedialog import askopenfilename, asksaveasfilename
#from tkinter import Tk
import gc
def exafsfit_lsq(x, k, exafs, amp, pha, parametrs, var_par, nr_shells, kpow):
#create mixed array with variables and params
XX = np.zeros(len(var_par))
varcnt = 0
parcnt = 0
for i in range(len(var_par)):
if(var_par[i]==1): #variable
XX[i] = x[varcnt]
varcnt = varcnt + 1
else:
XX[i] = parametrs[parcnt]
parcnt = parcnt + 1
# print("x", x)
#print("XX", XX)
chi_model = np.zeros(len(k))
for i in range(nr_shells):
chi_model = chi_model + (XX[i*7]/(k*XX[i*7+1]**2)) * amp[i] * \
np.exp(-2*XX[i*7+2]*k*k + (2/3)*XX[i*7+4]*k**4 - (4/45)*XX[i*7+6]*k**6) * \
np.sin(2*k*XX[i*7+1] - (4/3)*XX[i*7+3]*k**3 + (4/15)*XX[i*7+5]*k**5 + pha[i])
# chi_model = SO2 * (x[2]/(k*x[3]*x[3])) * amp * exp(-2*x[4]*k*k) * sin(2*k*x[3] + pha)
return chi_model*k**kpow - exafs
def exafsfit(x, N, R, sigma2):
k = x[0]
amp = x[1]
pha= x[2]
SO2 = x[3]
# dE0 = X[4]
# C4 = X[5]
# C5 = X[6]
# C6 = X[7]
chi_model = SO2 * (N/(k*R*R)) * amp * np.exp(-2*sigma2*k*k) * np.sin(2*k*R + pha)
return chi_model*k*k
class FitWindow(QtGui.QDialog):
def __init__(self):
super(FitWindow, self).__init__()
self.bft = []
self.k = []
self.kamp = [[]]
self.kpha = [[] ]
self.amp_orig = [[]]
self.pha_orig = [[]]
self.fit_result = []
self.initUI()
def initUI(self):
self.shellnr = 1
self.savedshellnr = 1
self.isfitted = 0
self.fig = plt.figure(3, figsize=(12, 6))
self.ax_bft = plt.subplot2grid((1,2), (0,0))
self.ax_bftft = plt.subplot2grid((1,2), (0,1))
self.canv = FigureCanvas(self.fig)
self.tbar = NavigationToolbar(self.canv, self)
self.fig.tight_layout()
# self.lblNrShells = QtGui.QLabel("Number of shells")
# self.edtNrShells = QtGui.QLineEdit("1")
self.lblkmin = QtGui.QLabel("K min")
self.lblkmax = QtGui.QLabel("K max")
self.lbldk = QtGui.QLabel("dK")
self.edtkmin = QtGui.QLineEdit("0.5")
self.edtkmax = QtGui.QLineEdit("15")
self.edtdk = QtGui.QLineEdit("0.05")
self.lblMaxiterations = QtGui.QLabel("Max number of iterations")
self.edtMaxiterations = QtGui.QLineEdit("1000")
self.tabShells = QtGui.QTabWidget()
self.tabs = []
self.tabs.append(QtGui.QFrame())
self.tabShells.addTab(self.tabs[0],"Shell 1")
self.ltShell = []
self.shellN = []
self.shellR = []
self.shellSigma = []
self.shellC3 = []
self.shellC4 = []
self.shellC5 = []
self.shellC6 = []
# self.shellE0 = []
self.shellAmp = []
self.shellPha = []
lblN = QtGui.QLabel("N")
lblR = QtGui.QLabel("R")
lblSigma = QtGui.QLabel("Sigma")
lblC3 = QtGui.QLabel("C3")
lblC4 = QtGui.QLabel("C4")
lblC5 = QtGui.QLabel("C5")
lblC6 = QtGui.QLabel("C6")
# lblE0 = QtGui.QLabel("E0")
lblAmp = QtGui.QLabel("Ampl")
lblPha = QtGui.QLabel("Phase")
self.ltShell.append(QtGui.QGridLayout())
self.shellN.append( [QtGui.QLineEdit("4"), QtGui.QLineEdit("0"), QtGui.QLineEdit("8"), QtGui.QCheckBox()])
self.shellR.append([QtGui.QLineEdit("2"), QtGui.QLineEdit("0"), QtGui.QLineEdit("4"), QtGui.QCheckBox()])
self.shellSigma.append([QtGui.QLineEdit("0.001"), QtGui.QLineEdit("0"), QtGui.QLineEdit("1"), QtGui.QCheckBox()])
self.shellC3.append([QtGui.QLineEdit("0"), QtGui.QLineEdit("-0.1"), QtGui.QLineEdit("0.1"), QtGui.QCheckBox()])
self.shellC4.append([QtGui.QLineEdit("0"), QtGui.QLineEdit("-0.1"), QtGui.QLineEdit("0.1"), QtGui.QCheckBox()])
self.shellC5.append([QtGui.QLineEdit("0"), QtGui.QLineEdit("-0.1"), QtGui.QLineEdit("0.1"), QtGui.QCheckBox()])
self.shellC6.append([QtGui.QLineEdit("0"), QtGui.QLineEdit("-0.1"), QtGui.QLineEdit("0.1"), QtGui.QCheckBox()])
# self.shellE0.append([QtGui.QLineEdit("0"), QtGui.QLineEdit("0"), QtGui.QLineEdit("0"), QtGui.QLineEdit("0.0001"), QtGui.QCheckBox()])
self.shellAmp.append(QtGui.QComboBox())
self.shellPha.append(QtGui.QComboBox())
self.shellAmp[-1].addItem("")
self.shellPha[-1].addItem("")
self.shellAmp[-1].currentIndexChanged.connect(self.AmpChanged)
self.shellPha[-1].currentIndexChanged.connect(self.PhaChanged)
self.shellN[len(self.shellN)-1][3].setChecked(True)
self.shellR[len(self.shellR)-1][3].setChecked(True)
self.shellSigma[len(self.shellSigma)-1][3].setChecked(True)
self.ltShell[0].addWidget(lblN, 0, 0)
self.ltShell[0].addWidget(lblR, 1, 0)
self.ltShell[0].addWidget(lblSigma, 2, 0)
self.ltShell[0].addWidget(lblC3, 3, 0)
self.ltShell[0].addWidget(lblC4, 4, 0)
self.ltShell[0].addWidget(lblC5, 5, 0)
self.ltShell[0].addWidget(lblC6, 6, 0)
# self.ltShell[0].addWidget(lblE0, 7, 0)
self.ltShell[0].addWidget(lblAmp, 7, 0)
self.ltShell[0].addWidget(lblPha, 8, 0)
for i in range(4):
self.ltShell[0].addWidget(self.shellN[0][i], 0, 2*i+1)
self.ltShell[0].addWidget(self.shellR[0][i], 1, 2*i+1)
self.ltShell[0].addWidget(self.shellSigma[0][i], 2, 2*i+1)
self.ltShell[0].addWidget(self.shellC3[0][i], 3, 2*i+1)
self.ltShell[0].addWidget(self.shellC4[0][i], 4, 2*i+1)
self.ltShell[0].addWidget(self.shellC5[0][i], 5, 2*i+1)
self.ltShell[0].addWidget(self.shellC6[0][i], 6, 2*i+1)
# self.ltShell[0].addWidget(self.shellE0[0][i], 7, 2*i+1)
self.ltShell[0].addWidget(self.shellAmp[0], 7, 1, 1, 7)
self.ltShell[0].addWidget(self.shellPha[0], 8, 1, 1, 7)
# self.shellAmp[0].addItem("E:/work/development/xaslib/fit/amp0001.dat")
# self.shellPha[0].addItem("E:/work/development/xaslib/fit/pha0001.dat")
for j in range(7):
self.ltShell[0].addWidget(QtGui.QLabel("Min. limit"), j, 2)
self.ltShell[0].addWidget(QtGui.QLabel("Max. limit"), j, 4)
# self.ltShell[0].addWidget(QtGui.QLabel("Accuracy"), j, 6)
self.tabs[0].setLayout(self.ltShell[0])
self.lblFuncEval = QtGui.QLabel("Number of function evaluations done")
self.edtFuncEval = QtGui.QLineEdit()
self.lblFitMessage = QtGui.QLabel("Termination reason")
self.edtFitMessage = QtGui.QLineEdit()
self.lblOptimality = QtGui.QLabel("Cost function")
self.edtOptimality = QtGui.QLineEdit()
lfit = QtGui.QGridLayout()
lfit.addWidget(self.lblFitMessage, 0, 0)
lfit.addWidget(self.edtFitMessage, 0, 1)
lfit.addWidget(self.lblFuncEval, 1, 0)
lfit.addWidget(self.edtFuncEval, 1, 1)
lfit.addWidget(self.lblOptimality, 2, 0)
lfit.addWidget(self.edtOptimality, 2, 1)
self.btnFit = QtGui.QPushButton('Fit')
self.btnFit.clicked.connect(self.Fit_leastsquares)
self.btnSaveFit = QtGui.QPushButton('Save Fit results')
self.btnSaveFit.clicked.connect(self.saveFit)
self.btnApply = QtGui.QPushButton('Apply')
self.btnApply.clicked.connect(self.apply)
self.btnCancel = QtGui.QPushButton('Cancel')
self.btnCancel.clicked.connect(self.cancel)
self.btnAddShell = QtGui.QPushButton('Add shell')
self.btnAddShell.clicked.connect(self.addshell)
self.btnRemoveShell = QtGui.QPushButton('Remove shell')
self.btnRemoveShell.clicked.connect(self.removeshell)
self.btnOpenAmp = QtGui.QPushButton('Open amplitude file(s) ...')
self.btnOpenAmp.clicked.connect(self.openamp)
self.btnOpenPha = QtGui.QPushButton('Open phase file(s) ...')
self.btnOpenPha.clicked.connect(self.openpha)
self.btnOpenFeff = QtGui.QPushButton('Open feff file(s) ...')
self.btnOpenFeff.clicked.connect(self.openfeff)
self.btnSaveFitResults = QtGui.QPushButton('Save fit Results ...')
# self.btnSaveFitResults.clicked.connect(self.saveFitResults)
lb = QtGui.QGridLayout()
lb.addWidget(self.btnOpenAmp, 0,0)
lb.addWidget(self.btnOpenPha, 0,1)
lb.addWidget(self.btnOpenFeff, 1,0)
lb.addWidget(self.btnAddShell, 2,0)
lb.addWidget(self.btnRemoveShell, 2,1)
lfig = QtGui.QGridLayout()
lfig.addWidget(self.tbar, 0, 0)
lfig.addWidget(self.canv, 1, 0, 2, 1)
lfig.addLayout(lfit, 3, 0)
lfig.addWidget(self.btnFit, 4, 0)
lfig.addWidget(self.btnSaveFit, 5, 0)
lfig.addWidget(self.btnApply, 6, 0)
lfig.addWidget(self.btnCancel, 7, 0)
lfig.addWidget(self.lblkmin, 0,1)
lfig.addWidget(self.edtkmin, 0,2)
lfig.addWidget(self.lblkmax, 0,3)
lfig.addWidget(self.edtkmax, 0,4)
lfig.addWidget(self.lbldk, 0,5)
lfig.addWidget(self.edtdk, 0,6)
lfig.addWidget(self.lblMaxiterations, 1, 1)
lfig.addWidget(self.edtMaxiterations, 1, 2, 1, 4)
lfig.addWidget(self.tabShells, 2, 1, 2, 6)
lfig.addLayout(lb, 4,1, 2, 6)
self.setLayout(lfig)
def updateUI(self):
if self.savedshellnr > 1:
for i in range(0,self.savedshellnr-1):
self.addshell()
self.edtkmin.setText("{:.2f}".format(self.ksettings[0][0]))
self.edtkmax.setText("{:.2f}".format(self.ksettings[0][1]))
self.edtdk.setText("{:.3f}".format(self.ksettings[0][2]))
if self.isfitted == 1: #fill with saved fitting params
self.edtOptimality.setText("{:E}".format(self.costfunction))
for i in range(self.shellnr):
self.shellN[i][0].setText("{:.4f}".format(self.fit_params[i][0][0]))
self.shellN[i][1].setText("{:.4f}".format(self.fit_params[i][0][1]))
self.shellN[i][2].setText("{:.4f}".format(self.fit_params[i][0][2]))
self.shellN[i][3].setChecked(bool(self.fit_params[i][0][3]))
self.shellR[i][0].setText("{:.4f}".format(self.fit_params[i][1][0]))
self.shellR[i][1].setText("{:.4f}".format(self.fit_params[i][1][1]))
self.shellR[i][2].setText("{:.4f}".format(self.fit_params[i][1][2]))
self.shellR[i][3].setChecked(bool(self.fit_params[i][1][3]))
self.shellSigma[i][0].setText("{:.4f}".format(self.fit_params[i][2][0]))
self.shellSigma[i][1].setText("{:.4f}".format(self.fit_params[i][2][1]))
self.shellSigma[i][2].setText("{:.4f}".format(self.fit_params[i][2][2]))
self.shellSigma[i][3].setChecked(bool(self.fit_params[i][2][3]))
self.shellC3[i][0].setText("{:.4E}".format(self.fit_params[i][3][0]))
self.shellC3[i][1].setText("{:.4f}".format(self.fit_params[i][3][1]))
self.shellC3[i][2].setText("{:.4f}".format(self.fit_params[i][3][2]))
self.shellC3[i][3].setChecked(bool(self.fit_params[i][3][3]))
self.shellC4[i][0].setText("{:.4E}".format(self.fit_params[i][4][0]))
self.shellC4[i][1].setText("{:.4f}".format(self.fit_params[i][4][1]))
self.shellC4[i][2].setText("{:.4f}".format(self.fit_params[i][4][2]))
self.shellC4[i][3].setChecked(bool(self.fit_params[i][4][3]))
self.shellC5[i][0].setText("{:.4E}".format(self.fit_params[i][5][0]))
self.shellC5[i][1].setText("{:.4f}".format(self.fit_params[i][5][1]))
self.shellC5[i][2].setText("{:.4f}".format(self.fit_params[i][5][2]))
self.shellC5[i][3].setChecked(bool(self.fit_params[i][5][3]))
self.shellC6[i][0].setText("{:.4E}".format(self.fit_params[i][6][0]))
self.shellC6[i][1].setText("{:.4f}".format(self.fit_params[i][6][1]))
self.shellC6[i][2].setText("{:.4f}".format(self.fit_params[i][6][2]))
self.shellC6[i][3].setChecked(bool(self.fit_params[i][6][3]))
# for i in range(int(len(self.fit_amps)/2)):
self.kamp[i] = self.fit_amps[2*i]
self.amp_orig[i] = self.fit_amps[2*i+1]
self.kpha[i] = self.fit_phases[2*i]
self.pha_orig[i] = self.fit_phases[2*i+1]
# print(self.fit_amps)
pass
def Fit_curvefit(self):
kstart = float(self.edtkmin.text())
kend = float(self.edtkmax.text())
dk = float(self.edtdk.text())
common_k = np.arange(kstart, kend, dk)
guess = [0,0,0]
guess[0] = float(self.shellN[0][0].text())
guess[1] = float(self.shellR[0][0].text())
guess[2] = float(self.shellSigma[0][0].text())
varbounds = []
varbounds.append( ( float(self.shellN[0][1].text()), float(self.shellR[0][1].text()), float(self.shellSigma[0][1].text()) ) )
varbounds.append( ( float(self.shellN[0][2].text()), float(self.shellR[0][2].text()), float(self.shellSigma[0][2].text()) ) )
kamp, amp_orig = np.genfromtxt("E:/work/development/xaslib/fit/amp0001.dat", usecols=(1,0), unpack=True)
kpha, pha_orig = np.genfromtxt("E:/work/development/xaslib/fit/pha0001.dat", usecols=(1,0), unpack=True)
splamp = InterpolatedUnivariateSpline(kamp,amp_orig)
splpha = InterpolatedUnivariateSpline(kpha, pha_orig)
splbft = InterpolatedUnivariateSpline(self.k, self.bft)
amp = splamp(common_k)
pha = splpha(common_k)
common_bft = splbft(common_k)
# lsq_result = least_squares(exafsfit, np.array(X), \
# method = 'lm',
## bounds = varbounds,
# args=(self.k, self.bft, amp, pha, 1))
# print(lsq_result.x)
x = []
x.append(common_k)
x.append(amp)
x.append(pha)
x.append(1)
popt, pcov = curve_fit(exafsfit, x, common_bft , \
#method = 'lm',
bounds = varbounds,
p0 = guess)
self.ax_bft.clear()
self.ax_bftft.clear()
self.ax_bft.plot(self.k, self.bft)
# self.ax_bft.plot(self.k, exafsfit(lsq_result.x, self.k, self.bft, amp, pha, 1)+self.bft)
# self.ax_bft.plot(self.k, exafsfit(X, self.k, self.bft, amp, pha, 1)+self.bft)
self.ax_bft.plot(common_k, exafsfit(x, popt[0], popt[1], popt[2]))
print(popt)
print(pcov)
self.canv.draw()
def Fit_leastsquares(self):
for i in range(self.shellnr):
if(self.kamp[i]==[]):
QtGui.QMessageBox.information(self,"Load Amplitude", "Amplitude in shell {:d} not loaded".format(i+1))
return
if(self.kpha[i]==[]):
QtGui.QMessageBox.information(self,"Load Phase", "Phase in shell {:d} not loaded".format(i+1))
return
kstart = float(self.edtkmin.text())
kend = float(self.edtkmax.text())
dk = float(self.edtdk.text())
self.common_k = np.arange(kstart, kend, dk)
maxiterations = int(self.edtMaxiterations.text())
#prepare variable and parameter array
splbft = InterpolatedUnivariateSpline(self.k, self.bft)
self.common_bft = splbft(self.common_k)
varbounds = [[],[]]
par = []
var_par = []
X = []
edtVarBoxes = []
amp = []
pha = []
for i in range(self.shellnr):
if self.shellN[i][3].isChecked():
X.append(float(self.shellN[i][0].text()))
varbounds[0].append(float(self.shellN[i][1].text()))
varbounds[1].append(float(self.shellN[i][2].text()))
var_par.append(1)
edtVarBoxes.append(self.shellN[i][0])
else:
par.append(float(self.shellN[i][0].text()))
var_par.append(0)
if self.shellR[i][3].isChecked():
X.append(float(self.shellR[i][0].text()))
varbounds[0].append(float(self.shellR[i][1].text()))
varbounds[1].append(float(self.shellR[i][2].text()))
var_par.append(1)
edtVarBoxes.append(self.shellR[i][0])
else:
par.append(float(self.shellR[i][0].text()))
var_par.append(0)
if self.shellSigma[i][3].isChecked():
X.append(float(self.shellSigma[i][0].text()))
varbounds[0].append(float(self.shellSigma[i][1].text()))
varbounds[1].append(float(self.shellSigma[i][2].text()))
var_par.append(1)
edtVarBoxes.append(self.shellSigma[i][0])
else:
par.append(float(self.shellSigma[i][0].text()))
var_par.append(0)
if self.shellC3[i][3].isChecked():
X.append(float(self.shellC3[i][0].text()))
varbounds[0].append(float(self.shellC3[i][1].text()))
varbounds[1].append(float(self.shellC3[i][2].text()))
var_par.append(1)
edtVarBoxes.append(self.shellC3[i][0])
else:
par.append(float(self.shellC3[i][0].text()))
var_par.append(0)
if self.shellC4[i][3].isChecked():
X.append(float(self.shellC4[i][0].text()))
varbounds[0].append(float(self.shellC4[i][1].text()))
varbounds[1].append(float(self.shellC4[i][2].text()))
var_par.append(1)
edtVarBoxes.append(self.shellC4[i][0])
else:
par.append(float(self.shellC4[i][0].text()))
var_par.append(0)
if self.shellC5[i][3].isChecked():
X.append(float(self.shellC5[i][0].text()))
varbounds[0].append(float(self.shellC5[i][1].text()))
varbounds[1].append(float(self.shellC5[i][2].text()))
var_par.append(1)
edtVarBoxes.append(self.shellC5[i][0])
else:
par.append(float(self.shellC5[i][0].text()))
var_par.append(0)
if self.shellC6[i][3].isChecked():
X.append(float(self.shellC6[i][0].text()))
varbounds[0].append(float(self.shellC6[i][1].text()))
varbounds[1].append(float(self.shellC6[i][2].text()))
var_par.append(1)
edtVarBoxes.append(self.shellC6[i][0])
else:
par.append(float(self.shellC6[i][0].text()))
var_par.append(0)
splamp = InterpolatedUnivariateSpline(self.kamp[i], self.amp_orig[i])
splpha = InterpolatedUnivariateSpline(self.kpha[i], self.pha_orig[i])
amp.append(splamp(self.common_k))
pha.append(splpha(self.common_k))
varbounds[0] = tuple(varbounds[0])
varbounds[1] = tuple(varbounds[1])
lsq_result = least_squares(exafsfit_lsq, np.array(X), \
# method = 'dogbox',
ftol = 1e-12,
max_nfev = maxiterations,
bounds = varbounds,
# tr_solver = 'lsmr',
# jac = '3-point',
# loss='soft_l1',
# f_scale=0.1,
verbose = 0,
# x_scale = [1,1,0.001],
args=(self.common_k, self.common_bft, amp, pha, par, var_par, self.shellnr, 2))
self.edtFuncEval.setText("{:d}".format(lsq_result.nfev))
self.edtOptimality.setText("{:e}".format(lsq_result.cost))
self.edtFitMessage.setText(lsq_result.message)
for i in range(len(lsq_result.x)):
if i in [0,1,2]:
edtVarBoxes[i].setText("{:.5f}".format(lsq_result.x[i]))
else:
edtVarBoxes[i].setText("{:.2E}".format(lsq_result.x[i]))
self.window = windowGauss10(self.common_k, kstart, kend)
self.bftw = self.common_bft * self.window
self.r, self.fr, self.fi = FT(self.common_k, self.bftw, 0, 4, 0.02)
self.efr = np.sqrt(self.fr*self.fr + self.fi*self.fi)
self.efi = self.fi * (-1)
self.fit_result = exafsfit_lsq(lsq_result.x, self.common_k, self.common_bft, amp, pha, par, var_par, self.shellnr, 2)+self.common_bft
fit_result_w = self.fit_result * self.window
self.res_r, res_fr, | |
= _messages.IntegerField(27, variant=_messages.Variant.INT32)
class BackupConfig(_messages.Message):
r"""BackupConfig defines the configuration of Backups created via this
BackupPlan.
Fields:
allNamespaces: If True, include all namespaced resources
encryptionKey: This defines a customer managed encryption key that will be
used to encrypt the Backup artifacts for Backups created via this
BackupPlan.
includeSecrets: This flag specifies whether Kubernetes Secret resources
should be included when they fall into the scope of Backups. Default:
False
includeVolumeData: This flag specifies whether volume data should be
backed up when PVCs are included in the scope of a Backup. Default:
False
selectedApplications: If set, include just the resources referenced by the
listed ProtectedApplications.
selectedNamespaces: If set, include just the resources in the listed
namespaces
"""
allNamespaces = _messages.BooleanField(1)
encryptionKey = _messages.MessageField('EncryptionKey', 2)
includeSecrets = _messages.BooleanField(3)
includeVolumeData = _messages.BooleanField(4)
selectedApplications = _messages.MessageField('NamespacedNames', 5)
selectedNamespaces = _messages.MessageField('Namespaces', 6)
class BackupPlan(_messages.Message):
r"""Defines the configuration and scheduling for a "line" of Backups. Next
id: 13
Messages:
LabelsValue: A set of custom labels supplied by user.
Fields:
backupConfig: Defines the configuration of Backups created via this
BackupPlan.
backupSchedule: Defines a schedule for automatic Backup creation via this
BackupPlan.
cluster: Required. Immutable. The source cluster from which Backups will
be created via this BackupPlan. Possible formats: 1.
projects/*/locations/*/clusters/* 2. projects/*/zones/*/clusters/*
createTime: Output only. [Output Only] The timestamp when this BackupPlan
resource was created - can be converted to and from
[RFC3339](https://www.ietf.org/rfc/rfc3339.txt)
deactivated: This flag indicates whether this BackupPlan has been
deactivated. Setting this field to True locks the BackupPlan such that
no further updates will be allowed, including the deactivated field. It
also prevents any new Backups from being created via this BackupPlan
(including scheduled Backups). Default: False
description: User specified descriptive string for this BackupPlan.
etag: Output only. `etag` is used for optimistic concurrency control as a
way to help prevent simultaneous updates of a backup plan from
overwriting each other. It is strongly suggested that systems make use
of the 'etag' in the read-modify-write cycle to perform BackupPlan
updates in order to avoid race conditions: An `etag` is returned in the
response to `GetBackupPlan`, and systems are expected to put that etag
in the request to `UpdateBackupPlan` to ensure that their change will be
applied to the same version.
labels: A set of custom labels supplied by user.
name: Output only. [Output Only] The full name of the BackupPlan resource.
Format: projects/*/locations/*/backupPlans/*
retentionPolicy: RetentionPolicy governs lifecycle of Backups created
under this plan.
uid: Output only. [Output Only] Server generated global unique identifier
of [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier)
format.
updateTime: Output only. [Output Only] The timestamp when this BackupPlan
resource was last updated - can be converted to and from
[RFC3339](https://www.ietf.org/rfc/rfc3339.txt)
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""A set of custom labels supplied by user.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
backupConfig = _messages.MessageField('BackupConfig', 1)
backupSchedule = _messages.MessageField('Schedule', 2)
cluster = _messages.StringField(3)
createTime = _messages.StringField(4)
deactivated = _messages.BooleanField(5)
description = _messages.StringField(6)
etag = _messages.StringField(7)
labels = _messages.MessageField('LabelsValue', 8)
name = _messages.StringField(9)
retentionPolicy = _messages.MessageField('RetentionPolicy', 10)
uid = _messages.StringField(11)
updateTime = _messages.StringField(12)
class Binding(_messages.Message):
r"""Associates `members` with a `role`.
Fields:
condition: The condition that is associated with this binding. If the
condition evaluates to `true`, then this binding applies to the current
request. If the condition evaluates to `false`, then this binding does
not apply to the current request. However, a different role binding
might grant the same role to one or more of the members in this binding.
To learn which resources support conditions in their IAM policies, see
the [IAM
documentation](https://cloud.google.com/iam/help/conditions/resource-
policies).
members: Specifies the identities requesting access for a Cloud Platform
resource. `members` can have the following values: * `allUsers`: A
special identifier that represents anyone who is on the internet; with
or without a Google account. * `allAuthenticatedUsers`: A special
identifier that represents anyone who is authenticated with a Google
account or a service account. * `user:{emailid}`: An email address that
represents a specific Google account. For example, `<EMAIL>` .
* `serviceAccount:{emailid}`: An email address that represents a service
account. For example, `<EMAIL>`. *
`group:{emailid}`: An email address that represents a Google group. For
example, `<EMAIL>`. *
`deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique
identifier) representing a user that has been recently deleted. For
example, `<EMAIL>?uid=123456789012345678901`. If the user is
recovered, this value reverts to `user:{emailid}` and the recovered user
retains the role in the binding. *
`deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address
(plus unique identifier) representing a service account that has been
recently deleted. For example, `my-other-
<EMAIL>?uid=123456789012345678901`. If the
service account is undeleted, this value reverts to
`serviceAccount:{emailid}` and the undeleted service account retains the
role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An
email address (plus unique identifier) representing a Google group that
has been recently deleted. For example,
`<EMAIL>?uid=123456789012345678901`. If the group is
recovered, this value reverts to `group:{emailid}` and the recovered
group retains the role in the binding. * `domain:{domain}`: The G Suite
domain (primary) that represents all the users of that domain. For
example, `google.com` or `example.com`.
role: Role that is assigned to `members`. For example, `roles/viewer`,
`roles/editor`, or `roles/owner`.
"""
condition = _messages.MessageField('Expr', 1)
members = _messages.StringField(2, repeated=True)
role = _messages.StringField(3)
class ClusterMetadata(_messages.Message):
r"""Information about the GKE cluster from which this Backup was created.
Messages:
BackupCrdVersionsValue: A list of the Backup for GKE CRD versions found in
the cluster.
Fields:
anthosVersion: Anthos version
backupCrdVersions: A list of the Backup for GKE CRD versions found in the
cluster.
cluster: The source cluster from which this Backup was created. Possible
formats: 1. projects/*/locations/*/clusters/* 2.
projects/*/zones/*/clusters/* This will be the same value as the parent
BackupPlan's cluster field.
gkeVersion: GKE version
k8sVersion: The Kubernetes server version of the source cluster.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class BackupCrdVersionsValue(_messages.Message):
r"""A list of the Backup for GKE CRD versions found in the cluster.
Messages:
AdditionalProperty: An additional property for a BackupCrdVersionsValue
object.
Fields:
additionalProperties: Additional properties of type
BackupCrdVersionsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a BackupCrdVersionsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
anthosVersion = _messages.StringField(1)
backupCrdVersions = _messages.MessageField('BackupCrdVersionsValue', 2)
cluster = _messages.StringField(3)
gkeVersion = _messages.StringField(4)
k8sVersion = _messages.StringField(5)
class ClusterResourceRestoreScope(_messages.Message):
r"""Identifies the cluster-scoped resources to restore from the Backup.
Fields:
selectedGroupKinds: A list of "types" of cluster-scoped resources to be
restored from the Backup. An empty list means that NO cluster-scoped
resources will be restored. Note that Namespaces and PersistentVolume
restoration is handled separately and is not governed by this field.
"""
selectedGroupKinds = _messages.MessageField('GroupKind', 1, repeated=True)
class Empty(_messages.Message):
r"""A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to use it as the request
or the response type of an API method. For instance: service Foo { rpc
Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON
representation for `Empty` is empty JSON object `{}`.
"""
class EncryptionKey(_messages.Message):
r"""Defined a customer managed encryption key that will be used to encrypt
Backup artifacts.
Fields:
gcpKmsEncryptionKey: Google Cloud KMS encryption key. Format:
projects//locations//keyRings//cryptoKeys/
"""
gcpKmsEncryptionKey = _messages.StringField(1)
class Expr(_messages.Message):
r"""Represents a textual expression in the Common Expression Language (CEL)
syntax. CEL is a C-like expression language. The syntax and semantics of CEL
are documented at https://github.com/google/cel-spec. Example (Comparison):
title: "Summary size limit" description: "Determines if a summary is less
than 100 chars" expression: "document.summary.size() < 100" Example
(Equality): title: "Requestor is owner" description: "Determines if
requestor is the document owner" expression: "document.owner ==
request.auth.claims.email" Example (Logic): title: "Public documents"
description: "Determine whether the document should be publicly visible"
expression: "document.type != 'private' && document.type != 'internal'"
Example (Data Manipulation): title: "Notification string" description:
| |
import torch
import utils
from utils.hparams import hparams
from .diff.net import DiffNet
from .diff.shallow_diffusion_tts import GaussianDiffusion, OfflineGaussianDiffusion
from .diffspeech_task import DiffSpeechTask
from vocoders.base_vocoder import get_vocoder_cls, BaseVocoder
from modules.fastspeech.pe import PitchExtractor
from modules.fastspeech.fs2 import FastSpeech2
from modules.diffsinger_midi.fs2 import FastSpeech2MIDI
from modules.fastspeech.tts_modules import mel2ph_to_dur
from usr.diff.candidate_decoder import FFT
from utils.pitch_utils import denorm_f0
from tasks.tts.fs2_utils import FastSpeechDataset
from tasks.tts.fs2 import FastSpeech2Task
import numpy as np
import os
import torch.nn.functional as F
DIFF_DECODERS = {
'wavenet': lambda hp: DiffNet(hp['audio_num_mel_bins']),
'fft': lambda hp: FFT(
hp['hidden_size'], hp['dec_layers'], hp['dec_ffn_kernel_size'], hp['num_heads']),
}
class DiffSingerTask(DiffSpeechTask):
def __init__(self):
super(DiffSingerTask, self).__init__()
self.dataset_cls = FastSpeechDataset
self.vocoder: BaseVocoder = get_vocoder_cls(hparams)()
if hparams.get('pe_enable') is not None and hparams['pe_enable']:
self.pe = PitchExtractor().cuda()
utils.load_ckpt(self.pe, hparams['pe_ckpt'], 'model', strict=True)
self.pe.eval()
def build_tts_model(self):
# import torch
# from tqdm import tqdm
# v_min = torch.ones([80]) * 100
# v_max = torch.ones([80]) * -100
# for i, ds in enumerate(tqdm(self.dataset_cls('train'))):
# v_max = torch.max(torch.max(ds['mel'].reshape(-1, 80), 0)[0], v_max)
# v_min = torch.min(torch.min(ds['mel'].reshape(-1, 80), 0)[0], v_min)
# if i % 100 == 0:
# print(i, v_min, v_max)
# print('final', v_min, v_max)
mel_bins = hparams['audio_num_mel_bins']
self.model = GaussianDiffusion(
phone_encoder=self.phone_encoder,
out_dims=mel_bins, denoise_fn=DIFF_DECODERS[hparams['diff_decoder_type']](hparams),
timesteps=hparams['timesteps'],
K_step=hparams['K_step'],
loss_type=hparams['diff_loss_type'],
spec_min=hparams['spec_min'], spec_max=hparams['spec_max'],
)
if hparams['fs2_ckpt'] != '':
utils.load_ckpt(self.model.fs2, hparams['fs2_ckpt'], 'model', strict=True)
# self.model.fs2.decoder = None
for k, v in self.model.fs2.named_parameters():
v.requires_grad = False
def validation_step(self, sample, batch_idx):
outputs = {}
txt_tokens = sample['txt_tokens'] # [B, T_t]
target = sample['mels'] # [B, T_s, 80]
energy = sample['energy']
# fs2_mel = sample['fs2_mels']
spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')
mel2ph = sample['mel2ph']
f0 = sample['f0']
uv = sample['uv']
outputs['losses'] = {}
outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True, infer=False)
outputs['total_loss'] = sum(outputs['losses'].values())
outputs['nsamples'] = sample['nsamples']
outputs = utils.tensors_to_scalars(outputs)
if batch_idx < hparams['num_valid_plots']:
model_out = self.model(
txt_tokens, spk_embed=spk_embed, mel2ph=mel2ph, f0=f0, uv=uv, energy=energy, ref_mels=None, infer=True)
if hparams.get('pe_enable') is not None and hparams['pe_enable']:
gt_f0 = self.pe(sample['mels'])['f0_denorm_pred'] # pe predict from GT mel
pred_f0 = self.pe(model_out['mel_out'])['f0_denorm_pred'] # pe predict from Pred mel
else:
gt_f0 = denorm_f0(sample['f0'], sample['uv'], hparams)
pred_f0 = model_out.get('f0_denorm')
self.plot_wav(batch_idx, sample['mels'], model_out['mel_out'], is_mel=True, gt_f0=gt_f0, f0=pred_f0)
self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'], name=f'diffmel_{batch_idx}')
self.plot_mel(batch_idx, sample['mels'], model_out['fs2_mel'], name=f'fs2mel_{batch_idx}')
return outputs
class ShallowDiffusionOfflineDataset(FastSpeechDataset):
def __getitem__(self, index):
sample = super(ShallowDiffusionOfflineDataset, self).__getitem__(index)
item = self._get_item(index)
if self.prefix != 'train' and hparams['fs2_ckpt'] != '':
fs2_ckpt = os.path.dirname(hparams['fs2_ckpt'])
item_name = item['item_name']
fs2_mel = torch.Tensor(np.load(f'{fs2_ckpt}/P_mels_npy/{item_name}.npy')) # ~M generated by FFT-singer.
sample['fs2_mel'] = fs2_mel
return sample
def collater(self, samples):
batch = super(ShallowDiffusionOfflineDataset, self).collater(samples)
if self.prefix != 'train' and hparams['fs2_ckpt'] != '':
batch['fs2_mels'] = utils.collate_2d([s['fs2_mel'] for s in samples], 0.0)
return batch
class DiffSingerOfflineTask(DiffSingerTask):
def __init__(self):
super(DiffSingerOfflineTask, self).__init__()
self.dataset_cls = ShallowDiffusionOfflineDataset
def build_tts_model(self):
mel_bins = hparams['audio_num_mel_bins']
self.model = OfflineGaussianDiffusion(
phone_encoder=self.phone_encoder,
out_dims=mel_bins, denoise_fn=DIFF_DECODERS[hparams['diff_decoder_type']](hparams),
timesteps=hparams['timesteps'],
K_step=hparams['K_step'],
loss_type=hparams['diff_loss_type'],
spec_min=hparams['spec_min'], spec_max=hparams['spec_max'],
)
# if hparams['fs2_ckpt'] != '':
# utils.load_ckpt(self.model.fs2, hparams['fs2_ckpt'], 'model', strict=True)
# self.model.fs2.decoder = None
def run_model(self, model, sample, return_output=False, infer=False):
txt_tokens = sample['txt_tokens'] # [B, T_t]
target = sample['mels'] # [B, T_s, 80]
mel2ph = sample['mel2ph'] # [B, T_s]
f0 = sample['f0']
uv = sample['uv']
energy = sample['energy']
fs2_mel = None #sample['fs2_mels']
spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')
if hparams['pitch_type'] == 'cwt':
cwt_spec = sample[f'cwt_spec']
f0_mean = sample['f0_mean']
f0_std = sample['f0_std']
sample['f0_cwt'] = f0 = model.cwt2f0_norm(cwt_spec, f0_mean, f0_std, mel2ph)
output = model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed,
ref_mels=[target, fs2_mel], f0=f0, uv=uv, energy=energy, infer=infer)
losses = {}
if 'diff_loss' in output:
losses['mel'] = output['diff_loss']
# self.add_dur_loss(output['dur'], mel2ph, txt_tokens, losses=losses)
# if hparams['use_pitch_embed']:
# self.add_pitch_loss(output, sample, losses)
if hparams['use_energy_embed']:
self.add_energy_loss(output['energy_pred'], energy, losses)
if not return_output:
return losses
else:
return losses, output
def validation_step(self, sample, batch_idx):
outputs = {}
txt_tokens = sample['txt_tokens'] # [B, T_t]
target = sample['mels'] # [B, T_s, 80]
energy = sample['energy']
# fs2_mel = sample['fs2_mels']
spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')
mel2ph = sample['mel2ph']
f0 = sample['f0']
uv = sample['uv']
outputs['losses'] = {}
outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True, infer=False)
outputs['total_loss'] = sum(outputs['losses'].values())
outputs['nsamples'] = sample['nsamples']
outputs = utils.tensors_to_scalars(outputs)
if batch_idx < hparams['num_valid_plots']:
fs2_mel = sample['fs2_mels']
model_out = self.model(
txt_tokens, spk_embed=spk_embed, mel2ph=mel2ph, f0=f0, uv=uv, energy=energy,
ref_mels=[None, fs2_mel], infer=True)
if hparams.get('pe_enable') is not None and hparams['pe_enable']:
gt_f0 = self.pe(sample['mels'])['f0_denorm_pred'] # pe predict from GT mel
pred_f0 = self.pe(model_out['mel_out'])['f0_denorm_pred'] # pe predict from Pred mel
else:
gt_f0 = denorm_f0(sample['f0'], sample['uv'], hparams)
pred_f0 = model_out.get('f0_denorm')
self.plot_wav(batch_idx, sample['mels'], model_out['mel_out'], is_mel=True, gt_f0=gt_f0, f0=pred_f0)
self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'], name=f'diffmel_{batch_idx}')
self.plot_mel(batch_idx, sample['mels'], fs2_mel, name=f'fs2mel_{batch_idx}')
return outputs
def test_step(self, sample, batch_idx):
spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')
txt_tokens = sample['txt_tokens']
energy = sample['energy']
if hparams['profile_infer']:
pass
else:
mel2ph, uv, f0 = None, None, None
if hparams['use_gt_dur']:
mel2ph = sample['mel2ph']
if hparams['use_gt_f0']:
f0 = sample['f0']
uv = sample['uv']
fs2_mel = sample['fs2_mels']
outputs = self.model(
txt_tokens, spk_embed=spk_embed, mel2ph=mel2ph, f0=f0, uv=uv, ref_mels=[None, fs2_mel], energy=energy,
infer=True)
sample['outputs'] = self.model.out2mel(outputs['mel_out'])
sample['mel2ph_pred'] = outputs['mel2ph']
if hparams.get('pe_enable') is not None and hparams['pe_enable']:
sample['f0'] = self.pe(sample['mels'])['f0_denorm_pred'] # pe predict from GT mel
sample['f0_pred'] = self.pe(sample['outputs'])['f0_denorm_pred'] # pe predict from Pred mel
else:
sample['f0'] = denorm_f0(sample['f0'], sample['uv'], hparams)
sample['f0_pred'] = outputs.get('f0_denorm')
return self.after_infer(sample)
class MIDIDataset(FastSpeechDataset):
def __getitem__(self, index):
sample = super(MIDIDataset, self).__getitem__(index)
item = self._get_item(index)
sample['f0_midi'] = torch.FloatTensor(item['f0_midi'])
sample['pitch_midi'] = torch.LongTensor(item['pitch_midi'])[:hparams['max_frames']]
return sample
def collater(self, samples):
batch = super(MIDIDataset, self).collater(samples)
batch['f0_midi'] = utils.collate_1d([s['f0_midi'] for s in samples], 0.0)
batch['pitch_midi'] = utils.collate_1d([s['pitch_midi'] for s in samples], 0)
# print((batch['pitch_midi'] == f0_to_coarse(batch['f0_midi'])).all())
return batch
class OpencpopDataset(FastSpeechDataset):
def __getitem__(self, index):
sample = super(OpencpopDataset, self).__getitem__(index)
item = self._get_item(index)
sample['pitch_midi'] = torch.LongTensor(item['pitch_midi'])[:hparams['max_frames']]
sample['midi_dur'] = torch.FloatTensor(item['midi_dur'])[:hparams['max_frames']]
sample['is_slur'] = torch.LongTensor(item['is_slur'])[:hparams['max_frames']]
sample['word_boundary'] = torch.LongTensor(item['word_boundary'])[:hparams['max_frames']]
return sample
def collater(self, samples):
batch = super(OpencpopDataset, self).collater(samples)
batch['pitch_midi'] = utils.collate_1d([s['pitch_midi'] for s in samples], 0)
batch['midi_dur'] = utils.collate_1d([s['midi_dur'] for s in samples], 0)
batch['is_slur'] = utils.collate_1d([s['is_slur'] for s in samples], 0)
batch['word_boundary'] = utils.collate_1d([s['word_boundary'] for s in samples], 0)
return batch
class DiffSingerMIDITask(DiffSingerTask):
def __init__(self):
super(DiffSingerMIDITask, self).__init__()
# self.dataset_cls = MIDIDataset
self.dataset_cls = OpencpopDataset
def run_model(self, model, sample, return_output=False, infer=False):
txt_tokens = sample['txt_tokens'] # [B, T_t]
target = sample['mels'] # [B, T_s, 80]
# mel2ph = sample['mel2ph'] if hparams['use_gt_dur'] else None # [B, T_s]
mel2ph = sample['mel2ph']
if hparams.get('switch_midi2f0_step') is not None and self.global_step > hparams['switch_midi2f0_step']:
f0 = None
uv = None
else:
f0 = sample['f0']
uv = sample['uv']
energy = sample['energy']
spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')
if hparams['pitch_type'] == 'cwt':
cwt_spec = sample[f'cwt_spec']
f0_mean = sample['f0_mean']
f0_std = sample['f0_std']
sample['f0_cwt'] = f0 = model.cwt2f0_norm(cwt_spec, f0_mean, f0_std, mel2ph)
output = model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed,
ref_mels=target, f0=f0, uv=uv, energy=energy, infer=infer, pitch_midi=sample['pitch_midi'],
midi_dur=sample.get('midi_dur'), is_slur=sample.get('is_slur'))
losses = {}
if 'diff_loss' in output:
losses['mel'] = output['diff_loss']
self.add_dur_loss(output['dur'], mel2ph, txt_tokens, sample['word_boundary'], losses=losses)
if hparams['use_pitch_embed']:
self.add_pitch_loss(output, sample, losses)
if hparams['use_energy_embed']:
self.add_energy_loss(output['energy_pred'], energy, losses)
if not return_output:
return losses
else:
return losses, output
def validation_step(self, sample, batch_idx):
outputs = {}
txt_tokens = sample['txt_tokens'] # [B, T_t]
target = sample['mels'] # [B, T_s, 80]
energy = sample['energy']
# fs2_mel = sample['fs2_mels']
spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')
mel2ph = sample['mel2ph']
outputs['losses'] = {}
outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True, infer=False)
outputs['total_loss'] = sum(outputs['losses'].values())
outputs['nsamples'] = sample['nsamples']
outputs = utils.tensors_to_scalars(outputs)
if batch_idx < hparams['num_valid_plots']:
model_out = self.model(
txt_tokens, spk_embed=spk_embed, mel2ph=mel2ph, f0=None, uv=None, energy=energy, ref_mels=None, infer=True,
pitch_midi=sample['pitch_midi'], midi_dur=sample.get('midi_dur'), is_slur=sample.get('is_slur'))
if hparams.get('pe_enable') is not None and hparams['pe_enable']:
gt_f0 = self.pe(sample['mels'])['f0_denorm_pred'] # pe predict from GT mel
pred_f0 = self.pe(model_out['mel_out'])['f0_denorm_pred'] # pe predict from Pred mel
else:
gt_f0 = denorm_f0(sample['f0'], sample['uv'], hparams)
pred_f0 = model_out.get('f0_denorm')
self.plot_wav(batch_idx, sample['mels'], model_out['mel_out'], is_mel=True, gt_f0=gt_f0, f0=pred_f0)
self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'], name=f'diffmel_{batch_idx}')
self.plot_mel(batch_idx, sample['mels'], model_out['fs2_mel'], name=f'fs2mel_{batch_idx}')
if hparams['use_pitch_embed']:
self.plot_pitch(batch_idx, sample, model_out)
return outputs
def add_dur_loss(self, dur_pred, mel2ph, txt_tokens, wdb, losses=None):
"""
:param dur_pred: [B, T], float, log scale
:param mel2ph: [B, T]
:param txt_tokens: [B, T]
:param losses:
:return:
"""
B, T = txt_tokens.shape
nonpadding = (txt_tokens != 0).float()
dur_gt = mel2ph_to_dur(mel2ph, T).float() * nonpadding
is_sil = torch.zeros_like(txt_tokens).bool()
for p in self.sil_ph:
is_sil = is_sil | (txt_tokens == self.phone_encoder.encode(p)[0])
is_sil = is_sil.float() # [B, T_txt]
# phone duration loss
if hparams['dur_loss'] == 'mse':
losses['pdur'] = F.mse_loss(dur_pred, (dur_gt + 1).log(), reduction='none')
losses['pdur'] = (losses['pdur'] * nonpadding).sum() / nonpadding.sum()
dur_pred = (dur_pred.exp() - 1).clamp(min=0)
else:
raise NotImplementedError
# use linear scale for sent and word duration
if hparams['lambda_word_dur'] > 0:
idx = F.pad(wdb.cumsum(axis=1), (1, 0))[:, :-1]
# word_dur_g = dur_gt.new_zeros([B, idx.max() + 1]).scatter_(1, idx, midi_dur) # midi_dur can be implied by add gt-ph_dur
word_dur_p = dur_pred.new_zeros([B, | |
<reponame>Vinomo4/Cryptography
# **BLOCKCHAIN**
# LIBRARIES
import sympy
import math
import random
import hashlib
import pickle
import time
import numpy as np
from prettytable import PrettyTable
import csv
#-------------------------------------------------------------------------------
# Filepaths
generated_path = "./Chains/" # Folder where the generated blockchains will be stored.
test_path = "./Test/" # Folder where the files provided for testing the code are stored.
time_path = "./Time comparisons/" # Folder where the time tables will be stored.
#-------------------------------------------------------------------------------
class rsa_key:
def __init__(self,bits_modulo=2048,e=2**16+1):
'''
Genera una clau RSA (de 2048 bits i amb exponent públic 2**16+1 per defecte)
'''
# Number of bits that p and q should have.
self.num_bits = bits_modulo//2
self.publicExponent = e # e
self.primeP = self.find_prime(first = False) # p
self.primeQ = self.find_prime(first = True) # q
self.modulus = (self.primeP) * (self.primeQ) # n
self.Phimodulus = (self.primeP-1) * (self.primeQ-1)
# To find the private exponent, we will use the Bezout's identity.
self.privateExponent = int(sympy.gcdex(self.publicExponent,self.Phimodulus)[0]) # d
if self.privateExponent < 0:
self.privateExponent += self.Phimodulus
self.privateExponentModulusPhiP = self.privateExponent % (self.primeP-1) # d mod (p-1)
self.privateExponentModulusPhiQ = self.privateExponent % (self.primeQ-1) # d mod (q-1)
self.inverseQModulusP = int(sympy.gcdex(self.primeQ,self.primeP)[0]) # q'
self.q_qapos = self.primeQ * self.inverseQModulusP # q*q'
def find_prime(self, first):
'''
Objective:
- Find a random prime number (p) of 1024 bits that gcd(p-1,e) = 1.
Input
- first: Boolean indicating wheather the number we are searching is the first of
the two prime factors used by RSA.
Output:
- The prime itself.
'''
# Establishing the upper and lower limits for the search.
upp_lim = 2**self.num_bits-1
low_lim = 2**(self.num_bits-1)+1
while True:
# First we select a random prime between 2^1023 +1 and 2^1024 -1
aux_p = sympy.randprime(low_lim,upp_lim)
# Then, we check if the possible p value minus 1 and e are co-primes.
if math.gcd(aux_p-1,self.publicExponent) == 1:
# We found a correct p value and the search is stopped.
if (first and aux_p != self.primeP) or not first : return aux_p
def sign(self,message):
'''
retorma un enter que és la signatura de "message" feta amb la clau RSA fent servir el TXR
'''
# For this signature, we will use Fermat's little theorem to speed up things.
# m^d mod(n) = m^dp mod(p) * q*q' + m^dq mod(q) * p*p'
message_p = message % self.primeP
message_q = message % self.primeQ
first_term = pow(message_p, self.privateExponentModulusPhiP, self.primeP)
second_term = pow(message_q, self.privateExponentModulusPhiQ, self.primeQ)
return first_term * self.q_qapos + second_term * (1-self.q_qapos)
def sign_slow(self,message):
'''
retorma un enter que és la signatura de "message" feta amb la clau RSA sense fer servir el TXR
'''
return pow(message,self.privateExponent,self.modulus)
class rsa_public_key:
def __init__(self, rsa_key):
'''
genera la clau pública RSA asociada a la clau RSA "rsa_key"
'''
self.publicExponent = rsa_key.publicExponent
self.modulus = rsa_key.modulus
def verify(self, message, signature):
'''
retorna el booleà True si "signature" es correspon amb una
signatura de "message" feta amb la clau RSA associada a la clau
pública RSA.
En qualsevol altre cas retorma el booleà False
'''
return pow(signature,self.publicExponent,self.modulus) == message
class transaction:
def __init__(self, message, RSAkey):
'''
genera una transacció signant "message" amb la clau "RSAkey"
'''
self.public_key = rsa_public_key(RSAkey)
self.message = message
self.signature = RSAkey.sign(message)
def verify(self):
'''
retorna el booleà True si "signature" es correspon amb una
signatura de "message" feta amb la clau pública "public_key".
En qualsevol altre cas retorma el booleà False
'''
return self.public_key.verify(self.message,self.signature)
def check_hash(hash):
'''
Function that checks if a hash fulfils the requested condition.
'''
return hash < 2**240
def compute_hash(block,seed):
'''
Function that computes a possible hash for a block.
It may not be valid.
'''
input = str(block.previous_block_hash)
input = input+str(block.transaction.public_key.publicExponent)
input = input+str(block.transaction.public_key.modulus)
input = input+str(block.transaction.message)
input = input+str(block.transaction.signature)
input = input+str(seed)
h=int(hashlib.sha256(input.encode()).hexdigest(),16)
return h
def find_valid_hash(block):
'''
Function that modifies the seed value until finding a correct hash value.
'''
found = False
while not found:
seed = random.randint(0,2**256)
h = compute_hash(block,seed)
if check_hash(h): found = True
return seed,h
class block:
def __init__(self):
'''
crea un bloc (no necesàriament vàlid)
'''
# The initialitzation in this case is not relevant.
self.block_hash = None
self.previous_block_hash = None
self.transaction = None
self.seed = None
def genesis(self,transaction):
'''
genera el primer bloc d’una cadena amb la transacció "transaction" que es caracteritza per:
- previous_block_hash=0
- ser vàlid
'''
assert transaction.verify() == True
self.transaction = transaction
self.previous_block_hash = 0
# This function finds both the block hash and the seed parameters.
self.seed,self.block_hash = find_valid_hash(self)
return self
def next_block(self, transaction):
'''
genera el següent block vàlid amb la transacció "transaction"
'''
# Ensuring the provided transaction is valid.
assert transaction.verify() == True
# In this function, an existent block computes the next one.
# First, we create a new block, initializing the parameters properly.
new_b = block()
new_b.previous_block_hash = self.block_hash
new_b.transaction = transaction
new_b.seed, new_b.block_hash = find_valid_hash(new_b)
return new_b
def verify_block(self):
'''
Verifica si un bloc és vàlid:
-Comprova que el hash del bloc anterior cumpleix las condicions exigides
-Comprova la transacció del bloc és vàlida
-Comprova que el hash del bloc cumpleix las condicions exigides
Si totes les comprovacions són correctes retorna el booleà True.
En qualsevol altre cas retorma el booleà False
'''
# Conditions for a block to not be valid.
# Invalid transaction
if not self.transaction.verify():
print("Invalid transaction")
return False
# The block hash is different from the one obtained by computing the value or
# the hash value is > 2**240.
if compute_hash(self,self.seed) != self.block_hash or not check_hash(self.block_hash):
print("Invalid seed")
return False
# The previous block hash exceeds 2**240.
if not check_hash(self.previous_block_hash):
print("Hash doesn't fulfill the 'proof of work' condition")
return False
# All the conditions are fulfiled.
return True
class block_chain:
def __init__(self,transaction):
'''
genera una cadena de blocs que és una llista de blocs,
el primer bloc és un bloc "genesis" generat amb la transacció "transaction"
'''
self.list_of_blocks = [block().genesis(transaction)]
def add_block(self,transaction):
'''
afegeix a la llista de blocs un nou bloc vàlid generat amb la transacció "transaction"
'''
block = self.list_of_blocks[-1].next_block(transaction)
self.list_of_blocks.append(block)
def verify(self):
'''
verifica si la cadena de blocs és vàlida:
- Comprova que tots el blocs són vàlids
- Comprova que el primer bloc és un bloc "genesis"
- Comprova que per cada bloc de la cadena el següent és el correcte
Si totes les comprovacions són correctes retorna el booleà True.
En qualsevol altre cas retorma el booleà False i fins a quin bloc la cadena és válida
'''
gen = self.list_of_blocks[0]
if gen.previous_block_hash != 0 or not gen.verify_block():
print("Genesis rejected")
return False,0
for i in range(1,len(self.list_of_blocks)):
b = self.list_of_blocks[i]
if not b.verify_block():
print("Verification rejected")
return False,i
if i != len(self.list_of_blocks)-1:
if self.list_of_blocks[i].block_hash != self.list_of_blocks[i+1].previous_block_hash:
print("Chain assertion rejected")
return False, i
return True,None
def generate_block_chain(n_blocks,file):
'''
Generates a random blockchain with the specified
n_blocks and stores the object in the file path.
Input:
-n_blocks: Number of total blocks of the chain.
-n_valid_blocks: Number of valid blocks of the chain.
- file: Filepath where the chain will be stored +
name of the file.
'''
rsa = rsa_key()
trans = transaction(random.randint(0,2**256),rsa)
blck_ch = block_chain(trans)
for i in range(n_blocks-1):
print(i+1)
rsa = rsa_key()
trans = transaction(random.randint(0,2**256),rsa)
blck_ch.add_block(trans)
# Ensuring that the generated blockchain is correct.
ver,_ = blck_ch.verify()
assert ver == True
file += ".block"
with open(file, 'wb') as file:
pickle.dump(blck_ch, file)
def read_blockchain(filename):
'''
Reads a .pickle file that contains a blockchain.
'''
with open(filename, 'rb') as f:
blockchain = pickle.load(f)
return blockchain
def generate_time_table(filename):
'''
Function that evaluates the time difference between using slow vs. fast signature in RSA.
'''
header = ["Key length (bits)","Without CRT (s)","With CRT (s)"] # Header of the table.
rows = [] # List where the execution times will be stored.
for i in range(4):
bits = 512*(2**i) # Number of bits of the modulo.
rsa = rsa_key(bits_modulo = bits)
# Generating the messages.
messages = [random.getrandbits(256) for _ in range(100)]
for j in range(10):
aux_time_slow = []
aux_time_fast = []
time_i = time.time()
for message in messages:
rsa.sign_slow(message)
time_f = time.time()
for message in messages:
rsa.sign(message)
aux_time_fast.append(time.time()-time_f)
aux_time_slow.append(time_f - time_i)
rows.append([str(bits),round(np.mean(aux_time_slow),4),round(np.mean(aux_time_fast),4)])
# | |
"""****************************************************************************
@author: damv_ Hecho con amor, no olvides eso
Equilibrio para proyecto Calculos Equilibrio
Funciones y algoritmos necesarios para calculo de equilibrio. Libreria personal.
VALORES CONSTANTE R
R = 83.14472 Bar cm3 / mol K
R = 8.314472 MPa cm3 / mol K
****************************************************************************"""
from scipy.special import gamma
import numpy as np
"""****************************************************************************
Distribución gamma, alfa para sistemas polimericos = 3.5, probado y aprobado
zpseud = fracción de interés, Mprom = M.molar de fracción, Mmin = Masa molar mínima,
n = número de pseudo componentes, alfa = Parámetro de forma para distribución Gamma
****************************************************************************"""
#PseudoComp parte un pseudocomponente en varias fracciones siguiendo el metodo de la funcion
#Gamma reportada por Whitson. Mmin y alfa fijadas estan fijas para asfaltenos.
def PseudoComp( zpseud , Mprom , n , Mmin = 1800 , alfa = 3.5 ): #Entradas escalares
z = np.array([])
M = np.array([])
X,W = np.polynomial.laguerre.laggauss( n ) #Puntos y pesos para Gauss Laguerre
beta = ( 2.5*Mprom-Mmin )/X[ -1 ]
exs = alfa*beta/(Mprom-Mmin)
delta = np.exp(exs-1)
f = lambda j : ( X[ j ]**( alfa-1 ) )*( exs**alfa )/( gamma( alfa )*delta**X[ j ] )
for i in range( n ):
zi = zpseud*( W[ i ]*f( i ) )
Mi = Mmin+beta*X[ i ]
z = np.append( z , zi )
M = np.append( M , Mi )
cz = zpseud/sum( z ) #Correccion de z
z = z*cz
cM = Mprom*sum( z )/sum( M*z ) #Correccion de M
M = M*cM
return np.array([ z , M ])
#Recomponer, en caso de que se reporte la fraccion de solidos (arcilla, arena, etc.)
def Recomponer(w): #Entrada np.array
if w.size > 4 :
w = w[ :4 ]
w = w/sum(w)
return w
# Blend: FUSIONA COMPONENTES SAR, se usa con propiedades promedio preferible para el
# modelo de solucion regular "SR"
def Blend_SR(w_1,w_2,Porcentaje_masa_crudo_1): #w_1 y w_2 son np.array, el % es escalar
w = np.zeros( 3 )
for i in range(0,3):
w[i] = Porcentaje_masa_crudo_1*w_1[i] + (1 - Porcentaje_masa_crudo_1)*w_2[i]
w_1 = np.delete(np.flip(w_1,0),[-1,-2,-3])
w_2 = np.delete(np.flip(w_2,0),[-1,-2,-3])
w = np.append(np.append(w,w_1),w_2)
return w / sum( w )
#NO USADO
def Blend_Oil( w_1, w_2, Fraccion_masa_crudo_1 ):
w = Fraccion_masa_crudo_1*w_1[0 : 3] + (1 - Fraccion_masa_crudo_1)*w_2[0 : 3]
w = np.append( w , Fraccion_masa_crudo_1*w_1[ -1 ] )
w = np.append( w , (1 - Fraccion_masa_crudo_1)*w_2[ -1 ] )
return w
# Blend_VL: NO FUSIONA COMPONENTES SAR
def Blend_VL(w_1,w_2,Porcentaje_masa_crudo_1): #w_1 y w_2 son np.array, el % es escalar
w_1 = Porcentaje_masa_crudo_1*w_1
w_2 = (1 - Porcentaje_masa_crudo_1)*w_2
w = np.append( w_1 , w_2 )
return w/sum(w)
"""
#Método de prueba para saturados, queda pendiente de probar
def Propcrit_S(M): #Saturados y aromaticos, método de
g=-0.567944+(M-66)**0.0590526 #gravedad especifica, Soreide
Tb=1928.3-1.695e5*(M**-0.03522)*(g**3.266)*np.exp(-4.922e-3*M-4.7685*g+3.462e-3*M*g) #°R, Soreide
Tc=341.7+811*g+(0.4244+0.1174*g)*Tb+(0.4669-3.2623*g)*1e5/Tb #°R, Kesler - Lee
Pc=8.3634-0.0566/g-(0.24244+2.2898/g+0.11857/(g**2))*1e-3*Tb+(1.4685+3.648/g+0.47227/(g**2))*1e-7*Tb**2-(0.42019+1.6977/(g**2))*1e-10*Tb**3 #lnPc, Kesler - Lee
Pc=np.exp(Pc) #Psia
Kw=(Tb**(1/3))/g #Factor K de Watson
A=[-5.92714,6.09648,1.28862,-0.16934,15.2518,-15.6875,-13.4721,0.43577] #Parametros de correlacion de Lee - Kesler
Tbr=Tb/Tc
if Tbr <0.8: #Correlacion de Lee Kesler para el factor acentrico
w=(-np.log(Pc/14.7)+A[0]+A[1]/Tbr+A[2]*np.log(Tbr)+A[3]*Tbr**6)/(A[4]+A[5]/Tbr+A[6]*np.log(Tbr)+A[7]*Tbr**6)
else:
w=-7.904+0.1352*Kw-0.007465*Kw**2+8.359*Tbr+(1.408-0.01063*Kw)/Tbr
Pc=Pc/14.5038 #Bar
Tc=((Tc-491.67)/1.8)+273.15 #Kelvin
return{"Pc":Pc,"Tc":Tc,"w":w}
"""
#Propiedades criticas
#presion critica SARA
def Pc_SARA(M): #Bar
m = M.size
Pc = np.zeros(m)
#Saturados, Riazi
Pc[0] = np.exp(4.65757-0.13426*M[0]**0.5)
#Aromaticos
Pc[1] = 1891.4*M[1]**-0.7975
#Resinas
Pc[2] = 1891.4*M[2]**-0.7975
for i in range(3,m):
Pc[i] = 1891.4*M[i]**-0.7975
return Pc
#Presion critica
def Pc_Asf( M , n ): #Usar cuando se incluye nC-7
Pc = np.zeros( n )
for i in range( 0 , n ):
Pc[ i ] = 1891.4*M[ i + 3 ]**-0.7975
return Pc
#Temperatura critica SARA
def Tc_SARA(M): #Kelvin
m = M.size
Tc = np.zeros(m)
#Saturados Riazi
Tb = 1070 - np.exp( 6.98291 - 0.02013*M[0]**(2/3) )
Tbr = 1.15 - np.exp( -0.41966 - 0.02436*M[0]**0.58)
Tc[0] = Tb/Tbr
#Aromaticos
Tc[1] = 77.856*M[1]**0.4708
#Resinas
Tc[2] = 77.856*M[2]**0.4708
#Asfaltenos
for i in range(3,m):
Tc[i] = 77.856*M[i]**0.4708
return Tc
#Temperatura critica asfaltenos
def Tc_Asf( M , n ): #Usar cuando se incluye nC-7
Tc = np.zeros( n )
for i in range( 0 , n ):
Tc[ i ] = 77.856*M[ i + 3 ]**0.4708
return Tc
#Volumen molar de fracciones SARA - NO USADO
def Vc_SARA(M): #cm3/mol
m = M.size
Vc = np.zeros(m)
#Saturados
densidad_crit = 0.26 - np.exp( -3.50532 - 1.5e-6*M[0]**2.38)#g/cm3
Vc[0] = M[0]/densidad_crit
#Aromaticos
Vc[1] = 2.4988*M[1]+116.8879
#Resinas
Vc[2] = 2.4988*M[2]+116.8879
#Asfaltenos
for i in range(3,m):
Vc[i] = 2.4988*M[i]+116.8879
return(Vc)
#Factor acentrico "w" para fracciones SARA
def Omega_SARA( M ):
m = M.size
omega = np.zeros(m)
cf = np.array([ 0.8098 , 0.7910 , 0.7940 ])
om = lambda M,n: cf[ n ]*(0.5837*np.log( M )-2.5389)
#Saturados
omega[0] = np.exp( -3.06826 + 1.04987*M[ 0 ]**0.2 ) - 0.3
#Aromaticos
omega[1] = om( M[ 1 ] , 0 )
#Resinas
omega[2] = om( M[ 2 ] , 1 )
#Asfaltenos
for i in range( 3 , m ):
omega[i] = om( M[ i ] , 2 )
return omega
#Factor acentrico "w"
def Omega_Asf( M , n ):
w = np.zeros( n )
for i in range( 0 , n ):
w[ i ] = 0.7940*( 0.5837*np.log( M[ i + 3 ] ) - 2.5389 )
return w
#Matriz de parametros de interaccion binaria Kij - NO USADO
def Kij(v): #usar con np.array
n=v.size
K=np.zeros((n,n))
for i in range(0,n-1):
for j in range(i+1,n):
K[i,j]=1-8*((v[i]*v[j])**.5)/((v[i]**(1/3))+(v[j]**(1/3)))**3
K[j,i]=K[i,j]
return K
"""
Correlaciones para volumen molar, usar np.array para Tc, Pc,vc, w y fracciones x
"""
#Correlacion de Costald, volumen molar en cm3/mol
#Reglas de mezclado Costald
def V_a( Tc , Pc , w ): #entradas como np.array cm3/mol
m = Tc.size #Contador para
va = np.zeros( m )
# va = RTc/Pc*( alfa + beta*w + gam*w**2 )
#Constantes ajustadas para fracciones SARA
alf = np.array([ 0.332482 , 1.45477 , -0.151344 , 1.04739 ])
bet = np.array([ -0.163673 , -2.7688 , 0.660325 , -0.835364 ])
gam = np.array([ 0.0494277 , 1.56843 , -0.294554 , 0.205254 ])
#Saturado
va[0] = ( 83.14472*Tc[0]/Pc[0] )*( alf[0]+bet[0]*w[0]+gam[0]*w[0]**2 )
#Aromaticos
va[1] = ( 83.14472*Tc[1]/Pc[1] )*( alf[1]+bet[1]*w[1]+gam[1]*w[1]**2 )
#Resinas
va[2] = ( 83.14472*Tc[2]/Pc[2] )*( alf[2]+bet[2]*w[2]+gam[2]*w[2]**2 )
#Asfaltenos
for i in range( 3 , m ):
va[i] = ( 83.14472*Tc[i]/Pc[i] )*( alf[3]+bet[3]*w[i]+gam[3]*w[i]**2 )
return va
def V_a_asf( Tc , Pc , w , n ):
Va_asf = np.zeros( n )
for i in range( 0 , n ):
Va_asf[ i ] = ( 83.14472*Tc[i + 3]/Pc[i + 3] )*( 1.04739 - 0.835364*w[i + 3] + 0.205254*w[i + 3]**2 )
return Va_asf
def V_a_solv( Tc , Pc , M , w ):
ai=np.array([ 0.2905331 , -0.08057958 , 0.02276965] )
return ( 83.14472*Tc/Pc )*( ai[0] + ai[1]*w + ai[2]*w**2 )
#Reglas de mezclado Costald, usar np.array en todas las reglas de mezclado
def Vcm_Costald(x,vc): #usar con np.array
vi_a=np.power(vc,2/3)
vi_b=np.power(vc,1/3)
return 0.25*(np.vdot(x,vc)+3*np.dot(x,vi_a)*np.dot(x,vi_b))
def Tcm_Costald(x,vc,Tc,vm): #vm es escalar, x,vc, Tc son np.array
z=np.power(vc*Tc,0.5)
return(np.dot(x,z)**2)/vm
def w_m( x , w ): #Todas las entradas son np.array
return np.dot(x,w)
"""
Las propiedades de mezcla se determinan con Vcm, Tcm y wm.
V_Costald cm3/mol, sirve para mezclas o componentes simples.
va es vaster o v* por como aparece en la correlación
"""
#volumen molar en punto de saturacion - Correlacion COSTALD para un componente
def V_Costald( T , va , w , Tc ): #uno o varios componentes, para mezcla usar las reglas
a = np.array([-1.52816 , 1.43907 , -0.81446 , 0.190454 , -0.296123 , 0.386914 , -0.0427258 , -0.0480645])
Tr = T/Tc
Tau = 1 - Tr
vo = lambda Tau: 1 + a[ 0 ]*Tau**(1/3) + a[ 1 ]*Tau**(2/3) + a[ 2 ]*Tau + a[ 3 ]*Tau**(4/3)
vd = lambda Tr: ( a[ 4 ] + a[ 5 ]*Tr + a[ 6 ]*Tr**2 + a[ 7 ]*Tr**3 )/( Tr - 1.00001 )
# print("vo_sat", vo( Tau ) )
# print("vd_sat",vd( Tr ) )
return va*vo( Tau )*( 1 - w*vd( Tr ) )
#Lista de Vsat usando Costald
def Vs_Costald( T , va , w , Tc ): #Solo T es escalar, los demas son np.array
m = Tc.size
vs = np.zeros( m )
for i in range( m ):
vs[ i ] = V_Costald( T , va[ i ] , w[ i ] , Tc[ i ] )
return vs
#Volumen molar de una mezcla usando la correlacion COSTALD
def Vm_Costald( T, Va, Tc, w, x ): #Solo T es escalar, | |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/012_data.external.ipynb (unless otherwise specified).
__all__ = ['decompress_from_url', 'download_data', 'get_UCR_univariate_list', 'UTSC_datasets', 'UCR_univariate_list',
'get_UCR_multivariate_list', 'MTSC_datasets', 'UCR_multivariate_list', 'UCR_list', 'classification_list',
'TSC_datasets', 'get_UCR_data', 'get_classification_data', 'check_data', 'get_Monash_regression_list',
'Monash_regression_list', 'regression_list', 'TSR_datasets', 'get_Monash_regression_data',
'get_regression_data', 'get_forecasting_list', 'forecasting_time_series', 'get_forecasting_time_series',
'Monash_forecasting_list', 'forecasting_list', 'convert_tsf_to_dataframe', 'get_Monash_forecasting_data',
'get_forecasting_data']
# Cell
from tqdm import tqdm
import zipfile
import tempfile
try: from urllib import urlretrieve
except ImportError: from urllib.request import urlretrieve
import shutil
import distutils
from ..imports import *
from ..utils import *
from .validation import *
# Cell
# This code was adapted from https://github.com/ChangWeiTan/TSRegression.
# It's used to load time series examples to demonstrate tsai's functionality.
# Copyright for above source is below.
# GNU GENERAL PUBLIC LICENSE
# Version 3, 29 June 2007
# Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
# Everyone is permitted to copy and distribute verbatim copies
# of this license document, but changing it is not allowed.
# Preamble
# The GNU General Public License is a free, copyleft license for
# software and other kinds of works.
# The licenses for most software and other practical works are designed
# to take away your freedom to share and change the works. By contrast,
# the GNU General Public License is intended to guarantee your freedom to
# share and change all versions of a program--to make sure it remains free
# software for all its users. We, the Free Software Foundation, use the
# GNU General Public License for most of our software; it applies also to
# any other work released this way by its authors. You can apply it to
# your programs, too.
# When we speak of free software, we are referring to freedom, not
# price. Our General Public Licenses are designed to make sure that you
# have the freedom to distribute copies of free software (and charge for
# them if you wish), that you receive source code or can get it if you
# want it, that you can change the software or use pieces of it in new
# free programs, and that you know you can do these things.
# To protect your rights, we need to prevent others from denying you
# these rights or asking you to surrender the rights. Therefore, you have
# certain responsibilities if you distribute copies of the software, or if
# you modify it: responsibilities to respect the freedom of others.
# For example, if you distribute copies of such a program, whether
# gratis or for a fee, you must pass on to the recipients the same
# freedoms that you received. You must make sure that they, too, receive
# or can get the source code. And you must show them these terms so they
# know their rights.
# Developers that use the GNU GPL protect your rights with two steps:
# (1) assert copyright on the software, and (2) offer you this License
# giving you legal permission to copy, distribute and/or modify it.
# For the developers' and authors' protection, the GPL clearly explains
# that there is no warranty for this free software. For both users' and
# authors' sake, the GPL requires that modified versions be marked as
# changed, so that their problems will not be attributed erroneously to
# authors of previous versions.
# Some devices are designed to deny users access to install or run
# modified versions of the software inside them, although the manufacturer
# can do so. This is fundamentally incompatible with the aim of
# protecting users' freedom to change the software. The systematic
# pattern of such abuse occurs in the area of products for individuals to
# use, which is precisely where it is most unacceptable. Therefore, we
# have designed this version of the GPL to prohibit the practice for those
# products. If such problems arise substantially in other domains, we
# stand ready to extend this provision to those domains in future versions
# of the GPL, as needed to protect the freedom of users.
# Finally, every program is threatened constantly by software patents.
# States should not allow patents to restrict development and use of
# software on general-purpose computers, but in those that do, we wish to
# avoid the special danger that patents applied to a free program could
# make it effectively proprietary. To prevent this, the GPL assures that
# patents cannot be used to render the program non-free.
# The precise terms and conditions for copying, distribution and
# modification follow.
# TERMS AND CONDITIONS
# 0. Definitions.
# "This License" refers to version 3 of the GNU General Public License.
# "Copyright" also means copyright-like laws that apply to other kinds of
# works, such as semiconductor masks.
# "The Program" refers to any copyrightable work licensed under this
# License. Each licensee is addressed as "you". "Licensees" and
# "recipients" may be individuals or organizations.
# To "modify" a work means to copy from or adapt all or part of the work
# in a fashion requiring copyright permission, other than the making of an
# exact copy. The resulting work is called a "modified version" of the
# earlier work or a work "based on" the earlier work.
# A "covered work" means either the unmodified Program or a work based
# on the Program.
# To "propagate" a work means to do anything with it that, without
# permission, would make you directly or secondarily liable for
# infringement under applicable copyright law, except executing it on a
# computer or modifying a private copy. Propagation includes copying,
# distribution (with or without modification), making available to the
# public, and in some countries other activities as well.
# To "convey" a work means any kind of propagation that enables other
# parties to make or receive copies. Mere interaction with a user through
# a computer network, with no transfer of a copy, is not conveying.
# An interactive user interface displays "Appropriate Legal Notices"
# to the extent that it includes a convenient and prominently visible
# feature that (1) displays an appropriate copyright notice, and (2)
# tells the user that there is no warranty for the work (except to the
# extent that warranties are provided), that licensees may convey the
# work under this License, and how to view a copy of this License. If
# the interface presents a list of user commands or options, such as a
# menu, a prominent item in the list meets this criterion.
# 1. Source Code.
# The "source code" for a work means the preferred form of the work
# for making modifications to it. "Object code" means any non-source
# form of a work.
# A "Standard Interface" means an interface that either is an official
# standard defined by a recognized standards body, or, in the case of
# interfaces specified for a particular programming language, one that
# is widely used among developers working in that language.
# The "System Libraries" of an executable work include anything, other
# than the work as a whole, that (a) is included in the normal form of
# packaging a Major Component, but which is not part of that Major
# Component, and (b) serves only to enable use of the work with that
# Major Component, or to implement a Standard Interface for which an
# implementation is available to the public in source code form. A
# "Major Component", in this context, means a major essential component
# (kernel, window system, and so on) of the specific operating system
# (if any) on which the executable work runs, or a compiler used to
# produce the work, or an object code interpreter used to run it.
# The "Corresponding Source" for a work in object code form means all
# the source code needed to generate, install, and (for an executable
# work) run the object code and to modify the work, including scripts to
# control those activities. However, it does not include the work's
# System Libraries, or general-purpose tools or generally available free
# programs which are used unmodified in performing those activities but
# which are not part of the work. For example, Corresponding Source
# includes interface definition files associated with source files for
# the work, and the source code for shared libraries and dynamically
# linked subprograms that the work | |
# -*- coding: utf-8 -*-
"""
Liouville pathways and their analysis
"""
import numpy
from ..utils.types import Integer
#from ..core.units import cm2int
from ..core.managers import UnitsManaged
import quantarhei as qr
class liouville_pathway(UnitsManaged):
order = Integer("order")
nint = Integer("nint")
def __init__(self, ptype, sinit, aggregate=False,
order=3, pname="",relax_order=0, popt_band=0):
"""Liouville pathway through a molecular aggregate
Liouville pathway is represented as a sequence of transitions
on the right and left hand sides of a double sided Feynman diagram
Parameters
----------
ptype : str {"R", "NR", "DC"}
type of the pathway
sinit
starting state (all pathways start from diagonal density matrix)
aggregate
specifies the aggregate object in which the Liouville pathway is taken
order
order of the pathway. Default value is 3. This is also currently
the only value allowed.
"""
if not aggregate:
raise Exception("aggregate has to be specified")
# initial state of the pathway
self.sinit = numpy.zeros(2,dtype=numpy.int16)
self.sinit[0] = sinit
self.sinit[1] = sinit
# order of the pathway
self.order = order
# order of the pathways in terms of relaxation events
self.relax_order = relax_order
# list events associated with relaxations and light interations
self.event = [None]*(1+order+relax_order)
# type of the pathway (rephasing, non-rephasing, double-coherence)
self.pathway_type = ptype
# pathway name (one of the standard names)
self.pathway_name = pname
# aggregate for which the pathway is made
self.aggregate = aggregate
# current state of the pathway (during building)
self.current = numpy.zeros(2,dtype=numpy.int16)
self.current[0] = sinit
self.current[0] = sinit
# index counting the number of interactions with light
self.nint = 0
# index counting relaxations
self.nrel = 0
# index of events in the diagram (interactions and relaxations)
self.ne = 0
# light induced transitions
self.transitions = numpy.zeros((order+1,2), dtype=numpy.int)
# relaxation induced transitions
self.relaxations = [None]*relax_order
# sides from which the transitions occurred
self.sides = numpy.zeros(order+1,dtype=numpy.int16)
self.states = numpy.zeros((1+order+relax_order,2),dtype=numpy.int)
# transition dipole moments associated with the transition
self.dmoments = numpy.zeros((order+1,3))
# FIXME: we probably do not need the energy property at all
# energy of the transition
self.energy = numpy.zeros(order+1)
# frequency of the transition
self.frequency = numpy.zeros(1+order+relax_order)
# orientational prefactor (negative means that it is not initialized)
self.pref = -1.0
# factor from evolution (super)operator
self.evolfac = 1.0
# transition widths
self.widths = None
# transition dephasings
self.dephs = None
# band through which the pathway travels at population time
self.popt_band = popt_band
# quantity used to evaluate orientational averaging
self.F4n = numpy.zeros(3)
# was the pathway already built?
self.built = False
def _chars(self, lx, rx, char=" "):
nsp = 10
if isinstance(lx,str):
ln = 0
else:
lxs = "%i"%lx
rxs = "%i"%rx
ln = len(lxs)+len(rxs)
spc = char
for scount in range(nsp-ln):
spc += char
return spc
def __str__(self):
"""String representation of the Liouville pathway
"""
k = 3
# output string - starts empty
out = ""
# number of events (light interactions + relaxations)
# equals to the number of horizotal lines in the diagram
noe = 1+self.order+self.relax_order
ii = 0 # index of light interactions
rr = 0 # index of relaxations
# iterate over horizoltal lines
for ee in range(noe):
# what is the event type
if self.event[ee] == "I":
# side on which the interaction occurs
sd = self.sides[ii]
# first we start with both sides from self.sinit
if ii == 0:
lx = self.sinit[0]
rx = self.sinit[0]
spc = self._chars(lx, rx)
out = (" |%i%s%i| \n") % (lx, spc, rx)
# now it depends if the interaction is from left or right
if sd == 1: # interaction from left
if ii != 3: # if this is not the last interaction print
# also the frequency
spc = self._chars(self.transitions[ii,0], rx)
ene = self.convert_energy_2_current_u(self.frequency[ee])
outr = (" |%i%s%i| %r\n" %
(self.transitions[ii,0],
spc, rx,
numpy.round(ene)))
else: # last interaction
spc = self._chars(self.transitions[ii,0], rx)
outr = (" |%i%s%i| \n" %
(self.transitions[ii,0],
spc, rx))
# output the arrow
spc = self._chars("", "", char="-")
outr += ("--->|%s| \n" % spc)
# and an empty space
spc = self._chars("", "", char=" ")
outr += (" |%s| \n" % spc)
# all this is appended at the end
out = outr+out
lx = self.transitions[ii,0]
else: # interaction from the right
if ii != 3: # if this is not the last interaction
# print also the frequency
spc = self._chars(lx, self.transitions[ii,0])
ene = self.convert_energy_2_current_u(self.frequency[ee])
outl = (" |%i%s%i| %r\n" %
(lx, spc, self.transitions[ii,0],
numpy.round(ene)))
# actually, iteraction from the right as last does not
# occur by convention
# output the arrow
spc = self._chars("", "", char="-")
outl += (" |%s|<--- \n") % spc #, self.frequency[ee])
# and an empty space
spc = self._chars("", "", char=" ")
outl += (" |%s| \n") % spc
# append everything at the end
out = outl+out
rx = self.transitions[ii,0]
ii += 1
elif self.event[ee] == "R":
lf = self.relaxations[rr][0][0]
rf = self.relaxations[rr][0][1]
ene = self.convert_energy_2_current_u(self.frequency[ee])
spc = self._chars(lf, rf, char=" ")
outR = " |%i%s%i| %r\n" % (lf, spc, rf,
numpy.round(ene))
spc = self._chars("", "", char="*")
outR += " >>|%s|<< \n" % spc
spc = self._chars("", "", char=" ")
outR += " |%s| \n" % spc
out = outR+out
lx = lf
rx = rf
rr += 1
else:
raise Exception("Unknown event type")
k -= 1
outd = ("\n\nLiouville Pathway %s (type = %s) \n" %
(self.pathway_name, self.pathway_type))
outd += ("Weighting prefactor: %r \n\n" % self.pref)
out = outd+out
return out
def add_transition(self, transition, side,
interval=0, width=-1.0, deph=-1.0):
""" Adds a transition to the Liouville pathway.
Parameters
----------
transition
Tuple such as (8,2) specifying the states in the Hamiltonian
between which the transition proceeds. The order of the initial
and final states is from right to left. So in this case the
starting state is 2 and the final state is 8.
side
Takes values of +1 and -1 one denoting the side of the diagram
where the interaction occurs. +1 stands for the left,
-1 for the right. The sign of the pathway is given by the number
of interaction on the right.
"""
# final state of the transition
nf = transition[0]
# initial state of the transition
ni = transition[1]
# which side is interacted on? Left or right?
sd = (abs(side)-side)//2
text = ["right","left"]
# check if the transition start is consistent with current state
# in the diagram
if (self.current[sd] != ni):
raise Exception(("Transition on the {} hand"+
"side of the diagram has to start from state {}").format(text[sd],
self.current[sd]))
# save the transition associated with this interaction
self.transitions[self.nint,:] = transition
# save the side on which the transition occurs
self.sides[self.nint] = side
if interval > 0:
if self.widths is None:
self.widths = numpy.zeros(4, qr.REAL)
self.widths[:] = -1.0
self.dephs = numpy.zeros(4, qr.REAL)
self.dephs[:] = -1.0
# transition width
self.widths[interval] = width
# transition dephasing
self.dephs[interval] = deph
# save the current
self.current[sd] = nf
self.states[self.ne,0] = self.current[0]
self.states[self.ne,1] = self.current[1]
"""
Some values can be stored locally - they are also
in the aggregate object
"""
#self.energy[self.nint] = \
# (self.aggregate.HH[nf,nf]/cm2int
# -self.aggregate.HH[ni,ni]/cm2int)
self.energy[self.nint] = \
(self.aggregate.HH[nf,nf]
-self.aggregate.HH[ni,ni])
self.dmoments[self.nint,:] = \
self.aggregate.DD[nf,ni,:]
# check if the number of interactions is not larger than the order
# of the pathway
if self.nint < self.order:
nl = self.current[0]
np = self.current[1]
el = self.aggregate.HH[nl,nl]
ep = self.aggregate.HH[np,np]
self.frequency[self.ne] = el - ep
elif self.nint > self.order:
etext = ("Number of interactions larger than the order"
+" of the pathway.")
raise Exception(etext)
| |
3)]
differentiator = 'enr'
class ConsentCompoundId(DatasetCompoundId):
"""
The compound id for an consent
"""
fields = DatasetCompoundId.fields + [
CompoundId.differentiatorFieldName, 'consent']
containerIds = DatasetCompoundId.containerIds + [('consent_id', 3)]
differentiator = 'con'
class DiagnosisCompoundId(DatasetCompoundId):
"""
The compound id for an diagnosis
"""
fields = DatasetCompoundId.fields + [
CompoundId.differentiatorFieldName, 'diagnosis']
containerIds = DatasetCompoundId.containerIds + [('diagnosis_id', 3)]
differentiator = 'dia'
class SampleCompoundId(DatasetCompoundId):
"""
The compound id for an sample
"""
fields = DatasetCompoundId.fields + [
CompoundId.differentiatorFieldName, 'sample']
containerIds = DatasetCompoundId.containerIds + [('sample_id', 3)]
differentiator = 'sam'
class TreatmentCompoundId(DatasetCompoundId):
"""
The compound id for an treatment
"""
fields = DatasetCompoundId.fields + [
CompoundId.differentiatorFieldName, 'treatment']
containerIds = DatasetCompoundId.containerIds + [('treatment_id', 3)]
differentiator = 'tre'
class OutcomeCompoundId(DatasetCompoundId):
"""
The compound id for an outcome
"""
fields = DatasetCompoundId.fields + [
CompoundId.differentiatorFieldName, 'outcome']
containerIds = DatasetCompoundId.containerIds + [('outcome_id', 3)]
differentiator = 'out'
class ComplicationCompoundId(DatasetCompoundId):
"""
The compound id for an complication
"""
fields = DatasetCompoundId.fields + [
CompoundId.differentiatorFieldName, 'complication']
containerIds = DatasetCompoundId.containerIds + [('complication_id', 3)]
differentiator = 'com'
class TumourboardCompoundId(DatasetCompoundId):
"""
The compound id for an tumourboard
"""
fields = DatasetCompoundId.fields + [
CompoundId.differentiatorFieldName, 'tumourboard']
containerIds = DatasetCompoundId.containerIds + [('tumourboard_id', 3)]
differentiator = 'tum'
class ChemotherapyCompoundId(DatasetCompoundId):
"""
The compound id for an chemotherapy
"""
fields = DatasetCompoundId.fields + [
CompoundId.differentiatorFieldName, 'chemotherapy']
containerIds = DatasetCompoundId.containerIds + [('chemotherapy_id', 3)]
differentiator = 'che'
class RadiotherapyCompoundId(DatasetCompoundId):
"""
The compound id for an radiotherapy
"""
fields = DatasetCompoundId.fields + [
CompoundId.differentiatorFieldName, 'radiotherapy']
containerIds = DatasetCompoundId.containerIds + [('radiotherapy_id', 3)]
differentiator = 'rad'
class SurgeryCompoundId(DatasetCompoundId):
"""
The compound id for an surgery
"""
fields = DatasetCompoundId.fields + [
CompoundId.differentiatorFieldName, 'surgery']
containerIds = DatasetCompoundId.containerIds + [('surgery_id', 3)]
differentiator = 'sur'
class ImmunotherapyCompoundId(DatasetCompoundId):
"""
The compound id for an immunotherapy
"""
fields = DatasetCompoundId.fields + [
CompoundId.differentiatorFieldName, 'immunotherapy']
containerIds = DatasetCompoundId.containerIds + [('immunotherapy_id', 3)]
differentiator = 'imm'
class CelltransplantCompoundId(DatasetCompoundId):
"""
The compound id for an celltransplant
"""
fields = DatasetCompoundId.fields + [
CompoundId.differentiatorFieldName, 'celltransplant']
containerIds = DatasetCompoundId.containerIds + [('celltransplant_id', 3)]
differentiator = 'cel'
class SlideCompoundId(DatasetCompoundId):
"""
The compound id for an slide
"""
fields = DatasetCompoundId.fields + [
CompoundId.differentiatorFieldName, 'slide']
containerIds = DatasetCompoundId.containerIds + [('slide_id', 3)]
differentiator = 'sli'
class StudyCompoundId(DatasetCompoundId):
"""
The compound id for an study
"""
fields = DatasetCompoundId.fields + [
CompoundId.differentiatorFieldName, 'study']
containerIds = DatasetCompoundId.containerIds + [('study_id', 3)]
differentiator = 'stu'
class LabtestCompoundId(DatasetCompoundId):
"""
The compound id for an labtest
"""
fields = DatasetCompoundId.fields + [
CompoundId.differentiatorFieldName, 'labtest']
containerIds = DatasetCompoundId.containerIds + [('labtest_id', 3)]
differentiator = 'lab'
class ExtractionCompoundId(DatasetCompoundId):
"""
The compound id for extraction metadata
"""
fields = DatasetCompoundId.fields + [
CompoundId.differentiatorFieldName, 'extraction']
containerIds = DatasetCompoundId.containerIds + [('extraction_id', 3)]
differentiator = 'ext'
class SequencingCompoundId(DatasetCompoundId):
"""
The compound id for sequencing metadata
"""
fields = DatasetCompoundId.fields + [
CompoundId.differentiatorFieldName, 'sequencing']
containerIds = DatasetCompoundId.containerIds + [('sequencing_id', 3)]
differentiator = 'seq'
class AlignmentCompoundId(DatasetCompoundId):
"""
The compound id for alignment tool metadata
"""
fields = DatasetCompoundId.fields + [
CompoundId.differentiatorFieldName, 'alignment']
containerIds = DatasetCompoundId.containerIds + [('alignment_id', 3)]
differentiator = 'aln'
class VariantCallingCompoundId(DatasetCompoundId):
"""
The compound id for variant calling metadata
"""
fields = DatasetCompoundId.fields + [
CompoundId.differentiatorFieldName, 'variant_calling']
containerIds = DatasetCompoundId.containerIds + [('variant_calling_id', 3)]
differentiator = 'vac'
class FusionDetectionCompoundId(DatasetCompoundId):
"""
The compound id for fusion detection metadata
"""
fields = DatasetCompoundId.fields + [
CompoundId.differentiatorFieldName, 'fusion_detection']
containerIds = DatasetCompoundId.containerIds + [('fusion_detection_id', 3)]
differentiator = 'fdn'
class ExpressionAnalysisCompoundId(DatasetCompoundId):
"""
The compound id for expression analysis metadata
"""
fields = DatasetCompoundId.fields + [
CompoundId.differentiatorFieldName, 'expression_analysis']
containerIds = DatasetCompoundId.containerIds + [('expression_analysis_id', 3)]
differentiator = 'exa'
class BiosampleCompoundId(DatasetCompoundId):
"""
The compound id for a biosample
"""
fields = DatasetCompoundId.fields + [
CompoundId.differentiatorFieldName, 'biosample']
containerIds = DatasetCompoundId.containerIds + [('biosample_id', 2)]
differentiator = 'b'
class ExperimentCompoundId(CompoundId):
"""
The compound id for an experiment
"""
# fields = DatasetCompoundId.fields + [
# CompoundId.differentiatorFieldName, 'experiment']
# containerIds = DatasetCompoundId.containerIds + [('experiment_id', 2)]
# differentiator = 'ex'
fields = ['experiment']
containerIds = [('experiment_id', 0)]
class AnalysisCompoundId(CompoundId):
"""
The compound id for an analysis
"""
# fields = DatasetCompoundId.fields + [
# CompoundId.differentiatorFieldName, 'analysis']
# containerIds = DatasetCompoundId.containerIds + [('analysis_id', 2)]
# differentiator = 'an'
fields = ['analysis']
containerIds = [('analysis_id', 0)]
class VariantAnnotationSetCompoundId(VariantSetCompoundId):
"""
The compound id for a variant annotation set
"""
fields = VariantSetCompoundId.fields + ['variant_annotation_set']
containerIds = VariantSetCompoundId.containerIds + [
('variant_annotation_set_id', 3)]
class VariantSetMetadataCompoundId(VariantSetCompoundId):
"""
The compound id for a variant set
"""
fields = VariantSetCompoundId.fields + ['key']
containerIds = VariantSetCompoundId.containerIds + [
('variant_set_metadata_id', 2)]
class VariantCompoundId(VariantSetCompoundId):
"""
The compound id for a variant
"""
fields = VariantSetCompoundId.fields + ['reference_name', 'start', 'md5']
class VariantAnnotationCompoundId(VariantAnnotationSetCompoundId):
"""
The compound id for a variant annotaiton
"""
fields = VariantAnnotationSetCompoundId.fields + [
'reference_name', 'start', 'md5']
class VariantAnnotationSetAnalysisCompoundId(VariantAnnotationSetCompoundId):
"""
The compound id for a variant annotaiton set's Analysis
"""
fields = VariantAnnotationSetCompoundId.fields + ['analysis']
class CallSetCompoundId(VariantSetCompoundId):
"""
The compound id for a callset
"""
fields = VariantSetCompoundId.fields + ['name']
class FeatureSetCompoundId(DatasetCompoundId):
"""
The compound id for a feature set
"""
fields = DatasetCompoundId.fields + ['feature_set']
containerIds = DatasetCompoundId.containerIds + [('feature_set_id', 1)]
class FeatureCompoundId(FeatureSetCompoundId):
"""
The compound id class for a feature
"""
fields = FeatureSetCompoundId.fields + ['featureId']
class ContinuousSetCompoundId(DatasetCompoundId):
"""
The compound id for a continuous set
"""
fields = DatasetCompoundId.fields + ['continuous_set']
containerIds = DatasetCompoundId.containerIds + [('continuous_set_id', 1)]
class ReadGroupSetCompoundId(DatasetCompoundId):
"""
The compound id for a read group set
"""
fields = DatasetCompoundId.fields + [
CompoundId.differentiatorFieldName, 'read_group_set']
containerIds = DatasetCompoundId.containerIds + [('read_group_set_id', 2)]
differentiator = 'rgs'
class ReadGroupCompoundId(ReadGroupSetCompoundId):
"""
The compound id for a read group
"""
fields = ReadGroupSetCompoundId.fields + ['read_group']
containerIds = ReadGroupSetCompoundId.containerIds + [('read_group_id', 3)]
class ReadAlignmentCompoundId(ReadGroupSetCompoundId):
"""
The compound id for a read alignment
"""
fields = ReadGroupSetCompoundId.fields + ['read_alignment']
containerIds = ReadGroupSetCompoundId.containerIds + \
[('read_alignment_id', 2)]
class RnaQuantificationSetCompoundId(DatasetCompoundId):
"""
The compound id for a rna quantification
"""
fields = DatasetCompoundId.fields + ['rna_quantification_set']
container = [('rna_quantification_set_id', 1)]
containerIds = DatasetCompoundId.containerIds + container
class RnaQuantificationCompoundId(RnaQuantificationSetCompoundId):
"""
The compound id for a rna quantification
"""
fields = RnaQuantificationSetCompoundId.fields + ['rna_quantification']
container = [('rna_quantification_id', 2)]
containerIds = RnaQuantificationSetCompoundId.containerIds + container
class ExpressionLevelCompoundId(RnaQuantificationCompoundId):
"""
The compound id for a expression level
"""
fields = RnaQuantificationCompoundId.fields + ['expression_level_id']
class DatamodelObject(object):
"""
Superclass of all datamodel types. A datamodel object is a concrete
representation of some data, either a single observation (such as a
read) or an aggregated set of related observations (such as a dataset).
Every datamodel object has an ID and a localId. The ID is an identifier
which uniquely idenfifies the object within a server instance. The
localId is a name that identifies the object with a given its
parent container.
"""
compoundIdClass = None
""" The class for compoundIds. Must be set in concrete subclasses. """
def __init__(self, parentContainer, localId):
self._parentContainer = parentContainer
self._localId = localId
parentId = None
if parentContainer is not None:
parentId = parentContainer.getCompoundId()
self._compoundId = self.compoundIdClass(parentId, localId)
self._attributes = {}
self._objectAttr = {}
def getId(self):
"""
Returns the string identifying this DatamodelObject within the
server.
"""
return str(self._compoundId)
def getCompoundId(self):
"""
Returns the CompoundId instance that identifies this object
within the server.
"""
return self._compoundId
def getLocalId(self):
"""
Returns the localId of this DatamodelObject. The localId of a
DatamodelObject is a name that identifies it within its parent
container.
"""
return self._localId
def getParentContainer(self):
"""
Returns the parent container for this DatamodelObject. This the
object that is one-level above this object in the data hierarchy.
For example, for a Variant this is the VariantSet that it belongs
to.
"""
return self._parentContainer
def setAttributes(self, attributes):
"""
Sets the attributes message to the provided value.
"""
self._attributes = attributes
def setAttributesJson(self, attributesJson):
"""
Sets the attributes dictionary from a JSON string.
"""
if attributesJson is not None:
self._attributes = json.loads(attributesJson)
else:
self._attributes = {}
def validateAttribute(self, attribute_name, attributes, tier=0):
"""
Return True if the access level is higher than the required, False otherwise.
"""
if attribute_name.endswith("Tier"):
return False
else:
attrTierObj = attributes.get(attribute_name + 'Tier', None)
if attrTierObj is not None:
attrTier = attrTierObj[0].get('int32Value', None)
if attrTierObj is None or attrTier is None or tier < attrTier:
return False
else:
return True
def serializeMetadataAttributes(self, msg, tier=0):
"""
Sets the attrbutes of a message for metadata during serialization.
"""
attributes = self.getAttributes()
for attribute_name in attributes:
if self.validateAttribute(attribute_name, attributes, tier) is True:
values = []
for dictionary in attributes[attribute_name]:
for key in dictionary:
values.append(dictionary[key])
| |
<reponame>sn0b4ll/Incident-Playbook
# -*- coding: utf-8 -*-
# Copyright 2017-2019 ControlScan, Inc.
#
# This file is part of Cyphon Engine.
#
# Cyphon Engine is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# Cyphon Engine is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cyphon Engine. If not, see <http://www.gnu.org/licenses/>.
"""
Provides parsing helper functions.
"""
# standard library
from copy import deepcopy
import logging
import os
import re
# third party
from bson import ObjectId
from bs4 import BeautifulSoup
LOGGER = logging.getLogger(__name__)
def html_to_text(html):
"""Strip HTML tags from a string.
Parameters
----------
html : |str|
Text containing HTML tags.
Returns
-------
|str|
Text without HTML tags. The string is indented according to
where tags were nested, but blank lines are removed.
Example
-------
>>> html = ('<html><head></head><body><p>Hi!<br>Here is the '
>>> '<a href="https://www.python.org">link</a> you wanted.</p></html>')
>>> html_to_text(html)
Hi!
Here is the
link
you wanted.
"""
html_parser = 'html5lib'
soup = BeautifulSoup(html, html_parser)
pretty_html = soup.prettify()
pretty_soup = BeautifulSoup(pretty_html, html_parser)
text = pretty_soup.get_text()
lines = [s for s in text.splitlines() if not re.search(r'^\s*$', s)]
return os.linesep.join(lines)
def get_dict_value(field_name, doc):
"""Return the value of a dictionary item.
Parameters
----------
field_name : |str|
doc : |dict|
A data dictionary.
Returns
-------
any type
The value associated with the given `field_name` from the `doc`.
Note
----
For a nested dictionary, use dot notation in the `field_name`
(e.g., 'parentkey.childkey'). You may also reference an array value
by its index (e.g., 'tags[0]').
Examples
--------
>>> get_value('a.b.c', {'a': {'b': {'c': 100}}})
100
>>> doc = {'a': {'b': [{'c': [100, [15, 20]]}, {'d': 40}], 'e': 10}}
>>> field = 'a.b[0].c[1][1]'
>>> get_dict_value(field, doc)
20
"""
def get_array_value(doc, key):
"""
Takes a dictionary (doc) and a string (key) representing a dictionary
key and one or more array indexes, e.g. "location[0][1]". Returns the
corresponding value from the dictionary.
"""
# split key at the start of array values, e.g. 'loc[0]' -> ['loc', '0]']
key_parts = key.split('[')
# get the name of the parent field, e.g. "loc"
key = key_parts.pop(0)
# get the dictionary value, which is the first array
array = doc[key]
while len(key_parts) > 0:
index = key_parts.pop(0)
# remove trailing bracket from index value
index = index.replace(']', '')
# need to convert the string to an integer to use it as an index
index = int(index)
value = array[index]
# step into the next array
array = value
return value
if isinstance(field_name, str):
doc_copy = deepcopy(doc)
# parse the string into a list of keys
keys = field_name.split('.')
value = ''
try:
while len(keys) > 0:
if isinstance(doc_copy, dict):
key = keys.pop(0)
# remove any leading or trailing whitespaces
key.strip()
# check if the key includes an array index (e.g., 'location[0]')
if '[' in key:
value = get_array_value(doc_copy, key)
else:
value = doc_copy[key]
doc_copy = value
else:
return doc_copy
except KeyError:
# log this error so user knows to correct field_name
# LOGGER.warning('The field_name "%s" cannot be found in the document %s.',
# field_name, str(doc)[:160])
return None
return value
def merge_dict(target, addition):
"""Merge additional keys into a target dictionary.
Parameters
----------
target : dict
The dict to mutate.
addition : dict
The dict containing keys to merge into the `target` dict.
Returns
-------
dict
"""
for key in addition:
if key in target and isinstance(target[key], dict) \
and isinstance(addition[key], dict):
merge_dict(target[key], addition[key])
else:
target[key] = addition[key]
def abridge_dict(schema, data):
"""Abridge a data document to only include specified fields.
Parameters
----------
schema : |list| of |DataFields|
The fields that should be included in the abridged dict.
data : dict
The dictionary to be tidied.
"""
abridged_dict = {}
for field in schema:
value = get_dict_value(field.field_name, data)
if value:
keys = field.field_name.split('.')
val = {keys.pop(-1): value}
while len(keys):
val = {keys.pop(-1): val}
merge_dict(abridged_dict, val)
return abridged_dict
def divide_into_groups(items, max_group_size):
"""Divide a list of items into a list of smaller lists.
Parameters
----------
items : |list|
The items to divide into groups.
max_group_size : |int|
The maximum number of items per group.
Returns
-------
|list| of |list|
A list of lists dividing the items into groups not exceeding the
maximum group size.
Examples
--------
>>> divide_into_groups(['a', 'b', 'c', 'd', 'e'], 2)
[['a', 'b'], ['c', 'd'], ['e']]
>>> divide_into_groups(['a', 'b', 'c', 'd', 'e'], 6)
[['a', 'b', 'c', 'd', 'e']]
>>> divide_into_groups(['a', 'b', 'c', 'd', 'e'], 0)
[[]]
"""
if max_group_size <= 0:
raise ValueError('maximum group size must be greater than 0')
items_copy = deepcopy(items)
groups = []
while len(items_copy) > 0:
# size of current group is either max number of items or
# number of remaining items, whichever is smaller
group_size = min(len(items_copy), max_group_size)
# save the items for the current group
new_group = items_copy[:group_size]
groups.append(new_group)
# remove the items from the list
del items_copy[:group_size]
return groups
def extract_substring(string, left, right, right_to_left=False):
"""Return a substring from a string.
Parameters
----------
string : |str|
A string to be parsed.
left : |str|
A character representing the left bound of a target substring.
right : |str|
A character representing the right bound of a target substring.
right_to_left : |bool|, optional
Whether the `string` should be searched from right to left.
Returns
-------
|str|
The substring between the specified bounds.
Examples
--------
>>> extract_substring('dup key : { "123" }', ':', '}')
' { "123" '
>>> extract_substring('$_id_1', '$', '_', True)
'_id'
"""
if right_to_left:
l_index = string.rfind(left) + len(left)
r_index = string.rfind(right)
else:
l_index = string.find(left) + len(left)
r_index = string.find(right)
return string[l_index:r_index]
def string_to_bool(value):
"""Recast a str representation of a Boolean to bool.
Parameters
----------
value : |str|
The value to convert (e.g., 'false').
Returns
-------
|bool|
The Boolean value of `value`.
Notes
-----
If `value` is not 'true', 'True', 'false', or 'False', the function
simply converts the string to its Boolean value.
Examples
--------
>>> string_to_bool('true')
True
>>> string_to_bool('False')
False
>>> string_to_bool('0')
True
>>> string_to_bool(0)
False
"""
if value in ['true', 'True']:
return True
elif value in ['false', 'False']:
return False
else:
return bool(value)
def restore_type(field_type, value):
"""Recast a string value as a given type.
Parameters
----------
field_type : {'BooleanField', 'CharField', 'FloatField', 'IntegerField'}
A model field type.
value : |str|
The value to recast.
Returns
-------
|bool|, |float|, |int|, or |str|
The value recast to the specified type.
Examples
--------
>>> restore_type('BooleanField', 'false')
False
>>> restore_type('CharField', '0')
'0'
>>> restore_type('FloatField', '1.2')
1.2
>>> restore_type('IntegerField', '1')
1
"""
field_types = {
'BooleanField': string_to_bool,
'CharField': str,
'FloatField': float,
'IntegerField': int,
}
return_val = lambda x: x
recast = field_types.get(field_type, return_val)
return recast(value)
def restore_type_from_str(string):
"""Restore a value to its inferred type.
Parameters
----------
string : str
A |str| representation of a string, ObjectId, integer, or null
value.
Returns
-------
|str|, |ObjectId|, |int|, or |None|
Examples
--------
>>> restore_type('"123"')
'123'
>>> restore_type("ObjectId('123')")
ObjectId('123')
>>> restore_type('123')
123
>>> restore_type('_123')
'_123'
>>> restore_type('null')
None
"""
if string[0] == '"':
return string.replace('"', '')
elif string[0] == "'":
return string.replace("'", '')
elif re.match('ObjectId', string):
match = re.match(r'ObjectId\(\'(?P<id>.*)\'\)', string)
return ObjectId(match.group('id'))
elif string == 'null':
return None
else:
try:
return int(string)
except ValueError:
return string
def get_dup_key_val(errmsg):
"""Return the duplicate key referenced in an error message.
Parameters
----------
errmsg : |str|
A pymongo `DuplicateKeyError` message.
Returns
-------
|dict|
The key(s) and value(s) of the duplicate key.
Example
-------
>>> errmsg = ('insertDocument :: caused by :: 11000 E11000 duplicate '
>>> 'key error collection: cyphon.posts index: '
>>> '_platform_1_doc_id_1 dup key: { : twitter", : '
>>> '"ObjectId(\'5543769ef861c942838c7ee9\') }')
>>> get_dup_key_val(errmsg)
{'_platform': 'twitter', '_doc_id': ObjectId('5543769ef861c942838c7ee9')}
"""
msg = errmsg.split(' dup key: { ')
key = extract_substring(msg[0], 'index: ', '_', 'right').strip()
val = extract_substring(msg[1], ':', '}').strip()
# parse compound indexes
keys = re.split(r'_[0-9]+_', key)
values | |
<gh_stars>0
# -*- coding:utf-8 -*-
"""
.. module:: ETFL
:platform: Unix, Windows
:synopsis: flux balance models accounting for expression, thermodynamics, and resource allocation constraints
.. moduleauthor:: ETFL team
Core for the ME-part
"""
from typing import Generic
import numpy as np
import optlang
import pandas as pd
import sympy
from cobra import Model, Reaction, Gene
from cobra.core import Solution, DictList
from collections import defaultdict, OrderedDict
from Bio.SeqUtils import molecular_weight
from tqdm import tqdm
from .ion import Ion
from .carbohydrate import Carbohydrate
from .lipid import Lipid
from ..utils.parsing import parse_gpr
from ..utils.utils import replace_by_enzymatic_reaction, replace_by_me_gene, \
replace_by_coding_gene
from .genes import ExpressedGene, CodingGene
from .dna import DNA
from .rna import mRNA,rRNA, tRNA
from .enzyme import Enzyme, Peptide
from .reactions import EnzymaticReaction, ProteinComplexation, \
TranslationReaction, TranscriptionReaction, DegradationReaction, DNAFormation
from .expression import build_trna_charging, enzymes_to_gpr_no_stoichiometry, \
make_stoich_from_aa_sequence, make_stoich_from_nt_sequence, \
degrade_peptide, degrade_mrna, _extract_trna_from_reaction
from ..optim.constraints import CatalyticConstraint, ForwardCatalyticConstraint,\
BackwardCatalyticConstraint, EnzymeMassBalance, \
rRNAMassBalance, mRNAMassBalance, tRNAMassBalance, DNAMassBalance, \
GrowthCoupling, TotalCapacity, ExpressionCoupling, EnzymeRatio, \
GrowthChoice, EnzymeDegradation, mRNADegradation,\
LinearizationConstraint, SynthesisConstraint, SOS1Constraint,\
InterpolationConstraint, RNAPAllocation, LipidMassBalance, IonMassBalance,\
CarbohydrateMassBalance, MinimalCoupling, MinimalAllocation
from ..optim.variables import ModelVariable, GrowthActivation, \
EnzymeVariable, LinearizationVariable, RibosomeUsage, RNAPUsage, NonAIDRNAPUsage, \
FreeEnzyme, BinaryActivator, InterpolationVariable, DNAVariable, \
GrowthRate, GenericVariable
from .allocation import add_dummy_expression,add_dummy_mrna,add_dummy_peptide,\
add_dummy_protein, add_interpolation_variables,\
MRNA_WEIGHT_CONS_ID, PROT_WEIGHT_CONS_ID, \
MRNA_WEIGHT_VAR_ID, PROT_WEIGHT_VAR_ID, \
DNA_WEIGHT_CONS_ID, DNA_WEIGHT_VAR_ID, DNA_FORMATION_RXN_ID, \
define_prot_weight_constraint, define_mrna_weight_constraint, \
define_dna_weight_constraint, get_dna_synthesis_mets
from pytfa.core.model import LCSBModel
from pytfa.optim.reformulation import petersen_linearization
from pytfa.optim.utils import chunk_sum, symbol_sum
from pytfa.utils.logger import get_bistream_logger
from pytfa.utils.str import camel2underscores
from pytfa.optim.utils import copy_solver_configuration
def id_maker_rib_rnap(the_set):
# for a set of ribosomes or RNAPs makes an id
id_ = ''.join([x for x in the_set])
return id_
class MEModel(LCSBModel, Model):
def __init__(self, model=Model(), name = None,
growth_reaction='',
mu_range = None, n_mu_bins = 1,
big_M = 1000,
*args, **kwargs):
"""
:param model: The input model
:type model: cobra.Model
:param mu: (Facultative) Mean growth rate to constraint the model
:param mu_error: (Facultative) Absolute error on mu to constraint the model
:type mu_error: float > 0
:param mu_range: (Facultative) Min-Max growth rate to constraint the model
:type mu_range: tuple (l,u)
:param n_mu_bins: (Facultative) In how many intervals to separate the
growth rate for the linearization
:param args:
:param kwargs:
"""
name = 'ETFL_' + name if name is not None else 'ETFL_model'
LCSBModel.__init__(self, model, name)
self.logger = get_bistream_logger('ME model' + str(self.name))
self.parent = model
if model is not None:
self.sanitize_varnames()
self.init_etfl(big_M, growth_reaction, mu_range,
n_mu_bins, name)
def init_etfl(self, big_M, growth_reaction, mu_range, n_mu_bins, name):
self.big_M = big_M
self._var_dict = dict()
self._cons_dict = dict()
self.logger.info('# ETFL Model {} initialized'.format(name))
self._growth_reaction_id = growth_reaction
self._mu_range = mu_range
self._n_mu_bins = n_mu_bins
if mu_range is not None:
self._mu = self.add_variable(kind=GrowthRate,
hook=self,
id_='total', # Will read MU_total
lb=mu_range[0],
ub=mu_range[1])
self.init_mu_variables()
else:
# message = """ You need to supply mu_range."""
message = "Empty model initialized"
# raise ValueError(message)
self.logger.info(message)
self.aa_dict = dict()
self.rna_nucleotides = dict()
self.trna_dict = dict()
self.dilution_terms = dict()
self.enzymes = DictList()
self.mrnas = DictList()
self.rrnas = DictList()
self.trnas = DictList()
self.peptides = DictList()
self.transcription_reactions = DictList()
self.translation_reactions = DictList()
self.complexation_reactions = DictList()
self.degradation_reactions = DictList()
self.dna = None
self.ribosome = OrderedDict()
self.rnap = OrderedDict()
self.coupling_dict = dict()
@property
def mu(self):
return self._mu
@property
def mu_max(self):
return self._mu_range[-1]
# @mu.setter
# def mu(self, val, epsilon = None):
# if epsilon is None:
# epsilon = self.solver.configuration.tolerances.feasibility
#
# self._mu.lb = val-epsilon
# self._mu.ub = val+epsilon
def make_mu_bins(self):
from numpy import linspace
bounds = linspace(self.mu.variable.lb, self.mu.variable.ub, self._n_mu_bins+1)
bins = zip(bounds[:-1], bounds[1:])
self.mu_bins = tuple(((x[0]+x[1])/2, x) for x in bins)
@property
def n_mu_bins(self):
return len(self.mu_bins)
def init_mu_variables(self):
"""
Necessary for the zeroth order approximation of mu:
.. math::
mu \in [0.1, 0.9] , nbins = 8
=> mu = 0.15 OR mu = 0.25 OR ... OR mu = 0.85
Using binary expansion of the bins instead of a list of 0-1s
described `here <https://orinanobworld.blogspot.ch/2013/07/integer-variables-and-quadratic-terms.html>`_
:return:
"""
self.make_mu_bins()
ga = list()
N = self.n_mu_bins
n_vars = np.int(np.ceil(np.log2(N)))
for e in range(n_vars):
ga.append(self.add_variable(kind=GrowthActivation,
hook=self,
id_=str(2 ** e)))
# Force that only one growth range can be chosen:
# b0*2^0 + b1*2^1 + b2*2^2 + ... + bn*2^n <= n_bins
choice_expr = sum(ga)
self.add_constraint(kind=GrowthChoice,
hook=self,
expr=choice_expr,
id_='growth',
ub=self.n_mu_bins,
lb=0)
# Couple growth
v_fwd = self.growth_reaction.forward_variable
v_bwd = self.growth_reaction.reverse_variable
# |v_net - mu| <= bin_width
bin_half_width = max([(x[1] - x[0]) / 2 for _, x in self.mu_bins])
the_integer = symbol_sum([(2 ** i) * ga_i for i, ga_i in enumerate(ga)])
binarized_mu = self.mu.variable.lb + the_integer * self.mu_approx_resolution
growth_coupling_expr = v_fwd - v_bwd - binarized_mu
self.add_constraint(kind=GrowthCoupling,
hook=self.growth_reaction,
expr=growth_coupling_expr,
ub=bin_half_width,
lb=-1 * bin_half_width)
# So that the solver spends less time looking for an ub on the objective
# when optimizing for growth
self.growth_reaction.upper_bound = self.mu.variable.ub + self.mu_approx_resolution
# Update the variable indices
self.regenerate_variables()
self.regenerate_constraints()
@property
def mu_approx_resolution(self):
return (self.mu.variable.ub - self.mu.variable.lb) / self.n_mu_bins
@property
def growth_reaction(self):
"""
Returns the growth reaction of the model. Useful because tied to the
growth variable
:return:
"""
if self._growth_reaction_id:
return self.reactions.get_by_id(self._growth_reaction_id)
else:
return None
@growth_reaction.setter
def growth_reaction(self, reaction_id):
"""
The growth_reaction is set by supplying the id of the candidate reaction
:param reaction_id: an id within the model
:type reaction_id: str
:return:
"""
rxn = self.reactions.get_by_id(reaction_id)
self._growth_reaction_id = rxn.id
def add_nucleotide_sequences(self, sequences):
"""
:param sequences:
:return:
"""
for gene_id, seq in sequences.items():
if gene_id in self.genes:
new = replace_by_me_gene(self, gene_id, seq)
else:
self.logger.warning('Model has no gene {}, Adding it'.format(gene_id))
new = CodingGene(id= gene_id, name = gene_id, sequence=seq)
self.add_genes([new])
self._make_peptide_from_gene(gene_id)
def add_transcription_by(self, transcription_dict):
for gene_id, transcribed_by in transcription_dict.items():
# transcribed_by is a list of rnap(s)
try:
self.genes.get_by_id(gene_id).transcribed_by = transcribed_by
except KeyError:
# the gene is not in the model
continue
def add_translation_by(self, translation_dict):
for gene_id, translated_by in translation_dict.items():
# translated_by is a list of rnap(s)
try:
self.genes.get_by_id(gene_id).translated_by = translated_by
except KeyError:
# the gene is not in the model
continue
def add_min_tcpt_activity(self, min_act_dict):
for gene_id, min_tcpt_activity in min_act_dict.items():
# min_tcpt_activity is the fraction of maximal capacity that should be forced
try:
self.genes.get_by_id(gene_id).min_tcpt_activity = min_tcpt_activity
except KeyError:
# the gene is not in the model
continue
def add_min_tnsl_activity(self, min_act_dict):
for gene_id, min_tnsl_activity in min_act_dict.items():
# min_tnsl_activity is the fraction of maximal capacity that should be forced
try:
self.genes.get_by_id(gene_id).min_tnsl_activity = min_tnsl_activity
except KeyError:
# the gene is not in the model
continue
def _make_peptide_from_gene(self, gene_id):
free_pep = Peptide(id=gene_id,
name='Peptide, {}'.format(gene_id),
gene_id=gene_id)
free_pep._model = self
self.peptides += [free_pep]
def add_peptide_sequences(self, aa_sequences):
for pep_id, seq in aa_sequences.items():
if pep_id in self.peptides:
self.peptides.get_by_id(pep_id).peptide = seq
else:
self.logger.warning('Model has no peptide {}'.format(pep_id))
continue
def add_dummies(self, nt_ratios, mrna_kdeg, mrna_length, aa_ratios,
enzyme_kdeg, peptide_length, transcribed_by=None,
translated_by=None):
"""
Create dummy peptide and mrna to enforce mrna and peptide production.
Can be used to account for the missing data for all mrnas and proteins.
:param nt_ratios:
:param mrna_kdeg:
:param mrna_length:
:param aa_ratios:
:param enzyme_kdeg:
:param peptide_length:
:param gtp:
:param gdp:
:param h2o:
:param h:
:return:
"""
add_interpolation_variables(self)
# Create a dummy gene and override the sequences with input data
dummy_sequence = 'N'*mrna_length
dummy_gene = CodingGene(id='dummy_gene',
name='Dummy Gene',
sequence=dummy_sequence)
dummy_gene._rna = dummy_sequence
dummy_gene._peptide = 'X'*peptide_length
dummy_gene.transcribed_by = transcribed_by
dummy_gene.translated_by = translated_by
self.add_genes([dummy_gene])
dummy_mrna = add_dummy_mrna(self, dummy_gene, mrna_kdeg, mrna_length, nt_ratios)
dummy_peptide = add_dummy_peptide(self, aa_ratios, dummy_gene, peptide_length)
dummy_protein = add_dummy_protein(self, dummy_peptide, enzyme_kdeg)
add_dummy_expression(self, aa_ratios, dummy_gene, dummy_peptide, dummy_protein,
peptide_length)
def add_essentials(self, essentials, aa_dict, rna_nucleotides,
rna_nucleotides_mp):
"""
Marks important metabolites for expression
:param essentials: A dictionary of important metabolites to met id
**Example :**
.. code-block:: python
essentials = {
'atp': 'atp_c',
'adp': 'adp_c',
'amp': 'amp_c',
...
'h2o': 'h2o_c',
'h': 'h_c'}
}
:param aa_dict: A dictionary of aminoacid letter to amicoacid met id
**Example :**
.. code-block:: python
aa_dict = {
'A':'ala__L_c',
'R':'arg__L_c',
...
}
:param rna_nucleotides: A dictionary of RNA nucleotide triphosphate
letter to nucleotideTP met id
**Example :**
.. code-block:: python
rna_nucleotides = {
'A':'atp_c',
'U':'utp_c',
...
}
:param rna_nucleotides_mp: A dictionary of RNA nucleotide monophosphate
letter to nucleotideMP met id
**Example :**
.. code-block:: python
rna_nucleotides_mp = {
'A':'amp_c',
'U':'ump_c',
...
}
:return:
"""
self.essentials = essentials
self.aa_dict = aa_dict
self.rna_nucleotides = rna_nucleotides
self.rna_nucleotides_mp = rna_nucleotides_mp
def build_expression(self):
"""
Given a dictionary from amino acids nucleotides to metabolite names,
goes through the list of genes in the model that have sequence
| |
"""
Endpoints file , for api code
"""
import logging
import os
import json
import redis
from celery import Celery
from app import Resource, request
from models import CustomActionsModel, ProjectsModel, \
CopyProjectModel, DomainsModel, ConversationsModel
from models import RefreshDbModel, IntentsModel, \
IntentDetailModel, ResponseModel, ResponseDetailModel
from models import StoryModel, StoryDetailModel, \
EntityModel, ExportProjectModel, ImportProjectModel, \
ValidateData
from export import Export
from rasa.run import create_agent
import asyncio
# Set logger
logger = logging.getLogger('flask.app')
logging.basicConfig(level=logging.DEBUG)
# Init model classes
CustomActionsModel = CustomActionsModel()
ProjectsModel = ProjectsModel()
CopyProjectModel = CopyProjectModel()
DomainsModel = DomainsModel()
ConversationsModel = ConversationsModel()
RefreshDbModel = RefreshDbModel()
IntentsModel = IntentsModel()
IntentDetailModel = IntentDetailModel()
ResponseModel = ResponseModel()
ResponseDetailModel = ResponseDetailModel()
StoryDetailModel = StoryDetailModel()
StoryModel = StoryModel()
EntityModel = EntityModel()
ExportProjectModel = ExportProjectModel()
ImportProjectModel = ImportProjectModel()
Export = Export()
# Setting Expiry for redis cache
GLOBAL_EXPIRY = 60
# Initiate redis
try:
r = redis.Redis(host=os.environ['REDIS_URL'],
port=os.environ['REDIS_PORT'],
charset="utf-8", decode_responses=True,
password=os.environ['<PASSWORD>'])
logger.info("Trying to connect to Redis Docker container ")
except KeyError:
logger.debug("Local run connecting to Redis ")
r = redis.Redis(host='localhost', port=6379, charset="utf-8", decode_responses=True)
# Connect to celery task Queue
redis_url = 'redis://:' + os.environ['REDIS_PASS'] + '@redis:6379/0'
trainer_app = Celery('simple_worker', broker=redis_url, backend=redis_url)
# noinspection PyMethodMayBeStatic
class CustomActionsAPI(Resource):
def get(self):
# check if result can be served from cache
if r.exists("all_custom_actions"):
return json.loads(r.get("all_custom_actions"))
else:
# Get results and update the cache with new values
logging.debug('getting Data from DB')
result = CustomActionsModel.get_all_custom_actions()
r.set("all_custom_actions", json.dumps(result), ex=GLOBAL_EXPIRY)
return result
def post(self):
json_data = request.get_json(force=True)
result = CustomActionsModel.create_action(json_data)
# Clear redis cache
r.delete("all_custom_actions")
return result
def put(self):
# Updating record
json_data = request.get_json(force=True)
result = CustomActionsModel.update_action(json_data)
# Clear redis cache
r.delete("all_custom_actions")
return result
def delete(self):
# Deleting record
json_data = request.get_json()
result = CustomActionsModel.delete_action(json_data)
# Clear redis cache
r.delete("all_custom_actions")
return result
# noinspection PyMethodMayBeStatic
class Projects(Resource):
def get(self):
# check if result can be served from cache
if r.exists("all_projects"):
return json.loads(r.get("all_projects"))
else:
# Get results and update the cache with new values
logging.debug('getting Data from DB')
result = ProjectsModel.get_all_projects()
r.set("all_projects", json.dumps(result), ex=GLOBAL_EXPIRY)
return result
def post(self):
json_data = request.get_json(force=True)
result = ProjectsModel.create_projects(json_data)
# Clear redis cache
r.delete("all_projects")
return result
def put(self):
# Updating record
json_data = request.get_json(force=True)
result = ProjectsModel.update_project(json_data)
# Clear redis cache
r.delete("all_projects")
return result
def delete(self):
# Deleting record
object_id = request.get_json()
result = ProjectsModel.delete_project(object_id)
# Clear redis cache
r.delete("all_projects")
return result
# noinspection PyMethodMayBeStatic
class CopyProject(Resource):
def post(self):
json_data = request.get_json(force=True)
result = CopyProjectModel.copy_project(json_data)
# Clear redis cache
r.delete("all_projects")
return result
# noinspection PyMethodMayBeStatic
class Domains(Resource):
def get(self, project_id):
# check if result can be served from cache
if r.exists("all_domains_"+str(project_id)):
return json.loads(r.get("all_domains_"+str(project_id)))
else:
# Get results and update the cache with new values
logging.debug('getting Data from DB')
result = DomainsModel.get_all_domains(project_id)
r.set("all_domains_"+str(project_id), json.dumps(result), ex=GLOBAL_EXPIRY)
return result
def post(self, project_id):
json_data = request.get_json(force=True)
result = DomainsModel.create_domain(project_id, json_data)
# Clear redis cache
r.delete("all_domains_"+str(project_id))
return result
def put(self, project_id):
# Updating record
json_data = request.get_json(force=True)
result = DomainsModel.update_domain(project_id, json_data)
# Clear redis cache
r.delete("all_domains_"+str(project_id))
return result
def delete(self, project_id):
# Deleting record
object_id = request.get_json()
result = DomainsModel.delete_domain(project_id, object_id)
# Clear redis cache
r.delete("all_domains_"+str(project_id))
return result
# noinspection PyMethodMayBeStatic
class Intents(Resource):
def get(self):
project_id = request.args.getlist('project_id')[0]
domain_id = request.args.getlist('domain_id')[0]
# check if result can be served from cache
if r.exists("intents_"+str(project_id)+"_"+str(domain_id)):
return json.loads(r.get("intents_"+str(project_id)+"_"+str(domain_id)))
else:
# Get results and update the cache with new values
logging.debug('getting Data from DB')
result = IntentsModel.get_intents(project_id, domain_id)
r.set("intents_"+str(project_id)+"_"+str(domain_id), json.dumps(result), ex=GLOBAL_EXPIRY)
return result
def post(self):
json_data = request.get_json(force=True)
project_id = json_data['project_id']
domain_id = json_data['domain_id']
result = IntentsModel.create_intent(json_data)
# Clear redis cache
r.delete("intents_"+str(project_id)+"_"+str(domain_id))
return result
def put(self):
# Updating record
json_data = request.get_json(force=True)
project_id = json_data['project_id']
domain_id = json_data['domain_id']
result = IntentsModel.update_intent(json_data)
# Clear redis cache
r.delete("intents_"+str(project_id)+"_"+str(domain_id))
return result
def delete(self):
# Deleting record
json_data = request.get_json(force=True)
project_id = json_data['project_id']
domain_id = json_data['domain_id']
result = IntentsModel.delete_intent(json_data)
# Clear redis cache
r.delete("intents_"+str(project_id)+"_"+str(domain_id))
return result
# noinspection PyMethodMayBeStatic
class IntentDetails(Resource):
def get(self, intent_id):
# check if result can be served from cache
if r.exists("intent_"+str(intent_id)):
return json.loads(r.get("intent_"+str(intent_id)))
else:
# Get results and update the cache with new values
logging.debug('getting Data from DB')
result = IntentDetailModel.get_intent_details(intent_id)
r.set("intent_"+str(intent_id), json.dumps(result), ex=GLOBAL_EXPIRY)
return result
def post(self, intent_id):
json_data = request.get_json(force=True)
result = IntentDetailModel.insert_intent_detail(json_data)
# Clear redis cache
r.delete("intent_"+str(intent_id))
return result
def put(self, intent_id):
# Updating record
json_data = request.get_json(force=True)
result = IntentDetailModel.update_intent_detail(json_data)
# Clear redis cache
r.delete("intent_"+str(intent_id))
return result
def delete(self, intent_id):
# Deleting record
json_data = request.get_json(force=True)
result = IntentDetailModel.delete_intent_detail(json_data)
# Clear redis cache
r.delete("intent_"+str(intent_id))
return result
# noinspection PyMethodMayBeStatic
class Responses(Resource):
def get(self):
project_id = request.args.getlist('project_id')[0]
domain_id = request.args.getlist('domain_id')[0]
# check if result can be served from cache
if r.exists("responses_"+str(project_id)+"_"+str(domain_id)):
return json.loads(r.get("responses_"+str(project_id)+"_"+str(domain_id)))
else:
# Get results and update the cache with new values
logging.debug('getting Data from DB')
result = ResponseModel.get_responses(project_id, domain_id)
r.set("responses_"+str(project_id)+"_"+str(domain_id), json.dumps(result), ex=GLOBAL_EXPIRY)
return result
def post(self):
json_data = request.get_json(force=True)
project_id = json_data['project_id']
domain_id = json_data['domain_id']
result = ResponseModel.create_response(json_data)
# Clear redis cache
r.delete("responses_"+str(project_id)+"_"+str(domain_id))
return result
def put(self):
# Updating record
json_data = request.get_json(force=True)
project_id = json_data['project_id']
domain_id = json_data['domain_id']
result = ResponseModel.update_response(json_data)
# Clear redis cache
r.delete("responses_"+str(project_id)+"_"+str(domain_id))
return result
def delete(self):
# Deleting record
json_data = request.get_json(force=True)
project_id = json_data['project_id']
domain_id = json_data['domain_id']
result = ResponseModel.delete_response(json_data)
# Clear redis cache
r.delete("responses_"+str(project_id)+"_"+str(domain_id))
return result
# noinspection PyMethodMayBeStatic
class ResponseDetails(Resource):
def get(self, response_id):
# check if result can be served from cache
if r.exists("response_"+str(response_id)):
return json.loads(r.get("response_"+str(response_id)))
else:
# Get results and update the cache with new values
logging.debug('getting Data from DB')
result = ResponseDetailModel.get_response_details(response_id)
r.set("response_"+str(response_id), json.dumps(result), ex=GLOBAL_EXPIRY)
return result
def post(self, response_id):
json_data = request.get_json(force=True)
result = ResponseDetailModel.insert_response_detail(json_data)
# Clear redis cache
r.delete("response_"+str(response_id))
return result
# def put(self, response_id):
#
# # Updating record
# json_data = request.get_json(force=True)
#
# #response_id = json_data['object_id']
#
# result = ResponseDetailModel.update_intent_detail(json_data)
#
# # Clear redis cache
# r.delete("response_"+str(response_id))
# return result
def delete(self, response_id):
# Deleting record
json_data = request.get_json(force=True)
result = ResponseDetailModel.delete_response_detail(json_data)
# Clear redis cache
r.delete("response_"+str(response_id))
return result
# noinspection PyMethodMayBeStatic
class Story(Resource):
def get(self):
project_id = request.args.getlist('project_id')[0]
domain_id = request.args.getlist('domain_id')[0]
# check if result can be served from cache
if r.exists("stories_"+str(project_id)+"_"+str(domain_id)):
return json.loads(r.get("stories_"+str(project_id)+"_"+str(domain_id)))
else:
# Get results and update the cache with new values
logging.debug('getting Data from DB')
result = StoryModel.get_stories(project_id, domain_id)
r.set("stories_"+str(project_id)+"_"+str(domain_id), json.dumps(result), ex=GLOBAL_EXPIRY)
return result
def post(self):
json_data = request.get_json(force=True)
project_id = json_data['project_id']
domain_id = json_data['domain_id']
result = StoryModel.create_story(json_data)
# Clear redis cache
r.delete("stories_"+str(project_id)+"_"+str(domain_id))
return result
def put(self):
# Updating record
json_data = request.get_json(force=True)
project_id = json_data['project_id']
domain_id = json_data['domain_id']
result = StoryModel.update_story(json_data)
# Clear redis cache
r.delete("stories_"+str(project_id)+"_"+str(domain_id))
return result
def delete(self):
# Deleting record
json_data = request.get_json(force=True)
project_id = json_data['project_id']
domain_id = json_data['domain_id']
result = StoryModel.delete_story(json_data)
# Clear redis cache
r.delete("stories_"+str(project_id)+"_"+str(domain_id))
return result
# noinspection PyMethodMayBeStatic
class StoryDetails(Resource):
def get(self, story_id):
# check if result can be served from cache
if r.exists("response_"+str(story_id)):
return json.loads(r.get("response_"+str(story_id)))
else:
# Get results and update the cache with new values
logging.debug('getting Data from DB')
result = StoryDetailModel.get_story_details(story_id)
r.set("response_"+str(story_id), json.dumps(result), ex=GLOBAL_EXPIRY)
return result
def post(self, story_id):
json_data = request.get_json(force=True)
result = StoryDetailModel.insert_story_details(json_data)
# Clear redis cache
r.delete("response_"+str(story_id))
return result
def put(self, story_id):
# Updating record
json_data = request.get_json(force=True)
result = StoryDetailModel.update_story_detail(json_data)
# Clear redis cache
r.delete("response_"+str(story_id))
return result
def delete(self, story_id):
# Deleting record
json_data = request.get_json(force=True)
result = StoryDetailModel.delete_story_detail(json_data)
# Clear redis cache
r.delete("response_"+str(story_id))
return result
# noinspection PyMethodMayBeStatic
class Entities(Resource):
def get(self, project_id):
# check if result can be served from cache
if r.exists("entity_"+str(project_id)):
return json.loads(r.get("entity_"+str(project_id)))
else:
# Get results and update the cache with new values
logging.debug('getting Data from DB')
result = EntityModel.get_entities(project_id)
r.set("entity_"+str(project_id), json.dumps(result), ex=GLOBAL_EXPIRY)
return result
def post(self, project_id):
json_data = request.get_json(force=True)
result = EntityModel.create_entity(json_data)
# Clear redis cache
r.delete("entity_"+str(project_id))
return result
def put(self, project_id):
# Updating record
json_data = request.get_json(force=True)
result = EntityModel.update_entity(json_data)
# Clear redis cache
r.delete("entity_"+str(project_id))
return result
def delete(self, project_id):
# Deleting record
json_data = request.get_json(force=True)
result = EntityModel.delete_entity(json_data)
# Clear redis cache
r.delete("entity_"+str(project_id))
return result
# noinspection PyMethodMayBeStatic
class AllConversations(Resource):
def get(self, page_number, page_size):
logging.debug('getting Data from DB')
result = ConversationsModel.get_all_conversations(page_number, page_size)
r.set("conversations", json.dumps(result), ex=GLOBAL_EXPIRY)
return result
# # check if result can be served from cache
# if r.exists("conversations"):
# return json.loads(r.get("conversations"))
#
# else:
# # Get results and update the cache with new values
# logging.debug('getting Data from DB')
#
# result = ConversationsModel.get_all_conversations()
# r.set("conversations", json.dumps(result), ex=GLOBAL_EXPIRY)
#
# return result
# noinspection PyMethodMayBeStatic
class Conversations(Resource):
def get(self, conversation_id):
result = ConversationsModel.get_conversations(conversation_id)
return result
# noinspection PyMethodMayBeStatic
class RefreshDb(Resource):
def get(self):
result = RefreshDbModel.refresh_db()
r.delete("all_projects")
| |
<filename>python_code/Quadrotor/Quadrotor.py
import math
import numpy as np
import random
import sys
sys.path.append('../../')
from Agent import Agent
radianToDegree = 180/math.pi
degreeToRadian = math.pi/180
# Rotation Matrix -> QTGM (Quadrotor To Global Matrix)
'''
phi =
theta =
psi = + -> Counter-clockwise (rotate quad coordinate to global coordinate in counter-clockwise in this degree)
'''
# Rotation Matrix -> QTGM (Quadrotor To Global Matrix) Only from Yaw (Psi)
def QTGMRotationMatrix(psi):
return np.array([
[math.cos(psi), -math.sin(psi), 0],
[math.sin(psi), math.cos(psi), 0],
[0, 0, 1]])
def QTGM(angles, vector):
psi = angles[2]
rotationMatrix = QTGMRotationMatrix(psi)
return np.round(rotationMatrix.dot(vector), 2)
# Global to Quadrotor Coordinate
def GTQM(angles, vector):
psi = angles[2]
rotationMatrix = QTGMRotationMatrix(psi).T
return np.round(rotationMatrix.dot(vector), 2)
class Quadrotor(object):
def __init__(self, index, name, specs, initialState, initialInput, attitudeControllerPID, positionControllerPID):
self.index = index
self.name = name
self.t = 0 # s
self.dt = 1/15 # s
self.mass = specs["mass"] # kg
self.inertia = specs["inertia"] # [Ixx, Iyy, Izz] # kg m^2
self.armLength = specs["armLength"] # , m
# Quadrotor Initial State
self.position = np.array(initialState[0], dtype=float) # m
self.position_dot = np.array(initialState[1], dtype=float) # m/s
# [phi theta psi] # rad
self.angles = np.array(initialState[2], dtype=float)
self.angles_dot = np.array(initialState[3], dtype=float) # rad / s
self.state_dot = np.array([[0, 0, 0], # position_dot
[0, 0, 0], # position_dot_dot
[0, 0, 0], # angles_dot
[0, 0, 0]], dtype=float) # angles_dot_dot
# Initiate Controlled Variable
self.thrust = initialInput[0] # kg m rad^2/s^2
self.moments = [initialInput[1], initialInput[2],
initialInput[3]] # kg m^2 rad^2/s^2
self.thrust_max = 60
self.thrust_min = 0
self.moment_max = 100
self.moment_min = -100
# Attitude Controller with PID
self.KP_phi = attitudeControllerPID[0][0]
self.KI_phi = attitudeControllerPID[0][1]
self.KD_phi = attitudeControllerPID[0][2]
self.KP_theta = attitudeControllerPID[1][0]
self.KI_theta = attitudeControllerPID[1][1]
self.KD_theta = attitudeControllerPID[1][2]
self.KP_psi = attitudeControllerPID[2][0]
self.KI_psi = attitudeControllerPID[2][1]
self.KD_psi = attitudeControllerPID[2][2]
self.phi_err = 0
self.phi_err_prev = 0
self.phi_err_sum = 0
self.theta_err = 0
self.theta_err_prev = 0
self.theta_err_sum = 0
self.psi_err = 0
self.psi_err_prev = 0
self.psi_err_sum = 0
self.KP_zdot = attitudeControllerPID[3][0]
self.KI_zdot = attitudeControllerPID[3][1]
self.KD_zdot = attitudeControllerPID[3][2]
self.z_dot_err = 0
self.z_dot_err_prev = 0
self.z_dot_err_sum = 0
# Position Controller with PID
self.x_des_dot_prev = 0
self.y_des_dot_prev = 0
self.z_des_dot_prev = 0
self.x_dot_prev = 0
self.y_dot_prev = 0
self.z_dot_prev = 0
self.KP_x = positionControllerPID[0][0]
self.KI_x = positionControllerPID[0][1]
self.KD_x = positionControllerPID[0][2]
self.KP_y = positionControllerPID[1][0]
self.KI_y = positionControllerPID[1][1]
self.KD_y = positionControllerPID[1][2]
self.KP_z = positionControllerPID[2][0]
self.KI_z = positionControllerPID[2][1]
self.KD_z = positionControllerPID[2][2]
self.x_err = 0
self.x_err_prev = 0
self.x_err_sum = 0
self.y_err = 0
self.y_err_prev = 0
self.y_err_sum = 0
self.z_err = 0
self.z_err_prev = 0
self.z_err_sum = 0
self.x_dot_err = 0
self.x_dot_err_sum = 0
self.y_dot_err = 0
self.y_dot_err_sum = 0
self.phi_pos_err = 0
self.phi_pos_err_sum = 0
self.theta_pos_err = 0
self.theta_pos_err_sum = 0
self.z_pos_err = 0
self.z_pos_err_sum = 0
# Swarm Control
self.targetPosition = []
# AR max angles
# self.max_phi_theta_psi = 30*degreeToRadian #degree
# For TA
self.max_phi_theta_psi = 9*degreeToRadian
# Yaw Control
self.yaw_target = 0
def calculateFrictionForce(self, velocity):
if velocity > 0:
# print('friction Force', -5*(velocity**2))
return -10*(velocity**2)
else:
# print('friction Force', 5*(velocity**2))
return 10*(velocity**2)
def generateNoise(self):
self.noise = [random.gauss(0, 5), random.gauss(
0, 5), random.gauss(0, 5)]
def getState(self):
return [self.position, self.position_dot, self.angles*radianToDegree, self.angles_dot*radianToDegree]
def getBodyPosition(self):
# Quadrotor Body (Rotor), format: ([x,y,z])
# Using Rotation Matrix
# direction : +x
rotor1 = self.position + QTGM(self.angles, np.array([self.armLength, 0, 0]))
# direction : -x
rotor2 = self.position + QTGM(self.angles, np.array([-self.armLength, 0, 0]))
# direction : +y (Front)
rotor3 = self.position + QTGM(self.angles, np.array([0, self.armLength, 0]))
# direction : -y
rotor4 = self.position + QTGM(self.angles, np.array([0, -self.armLength, 0]))
# Format : [front rotor, otherRotors]
return np.array([[rotor3[0], rotor3[1], rotor3[2]], [[rotor1[0], rotor2[0], rotor4[0]], [rotor1[1], rotor2[1], rotor4[1]], [rotor1[2], rotor2[2], rotor4[2]]]], dtype='object')
def updateState(self):
self.t = round(self.t + self.dt, 3)
# Evaluate Equation of Motions
phi = self.angles[0]
theta = self.angles[1]
psi = self.angles[2]
if self.thrust > self.thrust_max:
self.thrust = self.thrust_max
if self.thrust < self.thrust_min:
self.thrust = self.thrust_min
for moment in self.moments:
if moment > self.moment_max:
moment = self.moment_max
if moment < self.moment_min:
moment = self.moment_min
# Translational Motion
try:
self.state_dot[0] = self.position_dot
self.state_dot[1][0] = 1/self.mass*((math.sin(psi)*math.sin(
phi)+math.cos(psi)*math.sin(theta)*math.cos(phi))*self.thrust + self.calculateFrictionForce(self.position_dot[0]))
self.state_dot[1][1] = -1/self.mass*((-math.cos(psi)*math.sin(
phi)+math.sin(psi)*math.sin(theta)*math.cos(phi))*self.thrust - self.calculateFrictionForce(self.position_dot[1]))
self.state_dot[1][2] = 1/self.mass * \
(math.cos(theta)*math.cos(phi))*self.thrust - 9.81 + \
self.calculateFrictionForce(self.position_dot[2])/self.mass
except:
print('ERROR in calculate Translational Motion')
# print('acceleration', self.state_dot[1])
# Rotational Motion
p = self.angles_dot[0]
q = self.angles_dot[1]
r = self.angles_dot[2]
# try:
# print('ANGLES', np.round(self.angles*radianToDegree, 2))
self.state_dot[2][0] = p + \
math.sin(psi)*math.tan(theta)*q+math.cos(phi)*math.tan(theta)*r
self.state_dot[2][1] = math.cos(phi)*q-math.sin(phi)*r
self.state_dot[2][2] = math.sin(
phi)/math.cos(theta)*q+math.cos(phi)/math.cos(theta)*r
# except:
# print("ERROR in calculate Rotational Motion, Quadrotor index:", self.index)
# print('ANGLES', self.angles)
Ixx = self.inertia[0]
Iyy = self.inertia[1]
Izz = self.inertia[2]
self.state_dot[3][0] = (Iyy-Izz)/Ixx*q*r+self.moments[0]/Ixx
self.state_dot[3][1] = (Izz-Ixx)/Iyy*r*p+self.moments[1]/Iyy
self.state_dot[3][2] = (Ixx-Iyy)/Izz*p*q+self.moments[2]/Izz
# print("WTF", self.index ,self.state_dot[2][2])
# print("HMM", phi, math.sin(
# phi))
# print("CAUSE", self.state_dot[3][2])
# if r*radianToDegree > 0 :
# sys.exit('WTFF')
# Update the state
self.position = self.position + self.state_dot[0] * self.dt
self.position_dot = self.position_dot + self.state_dot[1]*self.dt
self.angles = self.angles + self.state_dot[2] * self.dt
self.angles_dot = self.angles_dot + self.state_dot[3] * self.dt
def controlAttitude(self, attitudeTarget):
# Add sensor noise (with white noise) with random.gauss(mu, sigma)
self.generateNoise()
# + self.noise[0]
self.phi_err = attitudeTarget[0]*degreeToRadian - self.angles[0]
# + self.noise[1]
self.theta_err = attitudeTarget[1]*degreeToRadian - self.angles[1]
# + self.noise[2]
self.psi_err = attitudeTarget[2]*degreeToRadian - self.angles[2]
# Calculate output for controlled variable
'''
self.moments[0] = phi
self.moments[1] = theta
self.moments[2] = psi
self.thrust = position -> z_dot
'''
self.moments[0] = self.KP_phi * self.phi_err + self.KI_phi*self.phi_err_sum + \
self.KD_phi * (self.phi_err - self.phi_err_prev)/self.dt
self.moments[1] = self.KP_theta * self.theta_err + self.KI_theta*self.theta_err_sum + \
self.KD_theta * (self.theta_err - self.theta_err_prev)/self.dt
self.moments[2] = self.KP_psi * self.psi_err + self.KI_psi*self.psi_err_sum + \
self.KD_psi * (self.psi_err - self.psi_err_prev)/self.dt
self.phi_err_prev = self.phi_err
self.phi_err_sum = self.phi_err_sum + self.phi_err
self.theta_err_prev = self.theta_err
self.theta_err_sum = self.theta_err_sum + self.theta_err
self.psi_err_prev = self.psi_err
self.psi_err_sum = self.psi_err_sum + self.psi_err
self.z_dot_err = attitudeTarget[3] - self.position_dot[2]
self.thrust = (self.KP_zdot * self.z_dot_err + self.KI_zdot *
self.z_dot_err_sum + self.KD_zdot*(self.z_dot_err - self.z_dot_err_prev)/self.dt)
self.z_dot_err_prev = self.z_dot_err
self.z_dot_err_sum = self.z_dot_err_sum + self.z_dot_err
def controlPositionYaw(self, positionTarget, yawTarget):
# print('---- POSITION CONTROLLER QUADROTOR- ', self.index, ' ----')
# Yaw Control
distanceVector = positionTarget - self.position
distanceVal = math.sqrt(distanceVector[0]**2 + distanceVector[1]**2)
### Quad Coordinate (QC)
quadPosQC = GTQM(self.angles, self.position)
targetPosQC = GTQM(self.angles, positionTarget)
distanceVectorQC = targetPosQC - quadPosQC
# print('yaw_target', yawTarget)
# print('--- GLOBAL COORDINATE ---')
# print('quad pos', np.round(self.position, 2))
# print('positionTarget', positionTarget, )
# print('distanceVector', distanceVector)
# print('distanceVal', round(distanceVal*100)/100)
# print('--- QUADROTOR COORDINATE ---')
# print("quadPosQC", quadPosQC)
# print('targetPosQC', targetPosQC)
# print('distanceVectorQC', distanceVectorQC)
attitudeTarget = [0.0, 0.0, 0.0, 0.0]
if abs(self.angles[2]*radianToDegree - yawTarget) < 5:
### Quad Coordinate (QC)
quadPosQC = GTQM(-self.angles, self.position)
targetPosQC = GTQM(-self.angles, positionTarget)
distanceVectorQC = targetPosQC - quadPosQC
self.x_err = distanceVectorQC[0]
self.y_err = distanceVectorQC[1]
# print('X ERR - Y ERR', self.x_err, self.y_err)
attitudeTarget[0] = self.KP_y * self.y_err
+ self.KI_y * self.y_err_sum
+ self.KD_y * (self.y_err - self.y_err_prev) / self.dt
self.y_err_prev = self.y_err
self.y_err_sum = self.y_err_sum + self.y_err
attitudeTarget[1] = self.KP_x * self.x_err
+ self.KI_x * self.x_err_sum
+ self.KD_x * (self.x_err - self.x_err_prev) / self.dt
# print('WOI', attitudeTarget[1])
self.x_err_prev = self.x_err
self.x_err_sum = self.x_err_sum + self.x_err
if attitudeTarget[0] > self.max_phi_theta_psi:
attitudeTarget[0] = self.max_phi_theta_psi
if attitudeTarget[0] < -self.max_phi_theta_psi:
attitudeTarget[0] = -self.max_phi_theta_psi
if attitudeTarget[1] > self.max_phi_theta_psi:
attitudeTarget[1] = self.max_phi_theta_psi
if attitudeTarget[1] < -self.max_phi_theta_psi:
attitudeTarget[1] = -self.max_phi_theta_psi
attitudeTarget[2] = yawTarget*degreeToRadian
if attitudeTarget[2] - self.angles[2] > self.max_phi_theta_psi:
attitudeTarget[2] = self.angles[2] + self.max_phi_theta_psi
if attitudeTarget[2] - self.angles[2] < -self.max_phi_theta_psi:
attitudeTarget[2] = self.angles[2] - self.max_phi_theta_psi
self.z_err = distanceVectorQC[2]
attitudeTarget[3] = self.KP_z * self.z_err
+ self.KI_z * self.z_err_sum
+ self.KD_z * (self.z_err - self.z_err_prev) / self.dt
self.z_err_prev = self.z_err
self.z_err_sum = self.z_err_sum + self.z_err
# print('Current Angles', np.round(np.array(self.angles)*radianToDegree, 2))
# print('z err', self.z_err)
# print("zdot target", attitudeTarget[3])
print('attitude target', attitudeTarget)
self.controlAttitude(np.array([attitudeTarget[0]*radianToDegree, attitudeTarget[1]*radianToDegree, attitudeTarget[2]*radianToDegree, attitudeTarget[3]]))
# print()
def controlPosition(self, positionTarget):
attitudeTarget = np.array([0.0,0.0,0.0,0.0]) # psi theta phi zdot (degree and m/s)
# print('------------------------------')
# print('---- INITIAL ATTITUDE TARGET QUADROTOR- ', self.index, ' ----')
# Yaw Control
distanceVector = positionTarget - self.position
distanceVal = math.sqrt(distanceVector[0]**2 + distanceVector[1]**2)
yaw_target = np.arctan2([distanceVector[1]], [distanceVector[0]])[0]
# yaw_target = math.atan(distanceVector[1]/distanceVector[0])
psi_err = yaw_target - self.angles[2]
### Quad Coordinate (QC)
quadPosQC = GTQM(self.angles, self.position)
targetPosQC = GTQM(self.angles, positionTarget)
distanceVectorQC = targetPosQC - quadPosQC
# print('---- POSITION CONTROLLER QUADROTOR- ', self.index, ' | |
import os
import xml.etree.ElementTree as et
import pickle
SHRUNK_RES_SUFFIX = '_shrunk'
XML_REF = 'http://www.mscsoftware.com/:xrf10'
STEPMAP_TAG = 'StepMap'
ENTITY_TAG = 'Entity'
COMPONENT_TAG = 'Component'
STEP_TAG = '{' + XML_REF + '}' + 'Step'
def get_results(result_file, reqs_to_get=None, t_min=None, t_max=None, return_units=False, overwrite_pickle=True, use_iterparse=False):
"""Gets results from an Adams results (.res) file.
Example
-------
>>> result_file = 'example.res'
>>> t_min = 70
>>> t_max = 80
>>> reqs_to_get = {}
>>> reqs_to_get['MSE'] = ['Instantaneous_Bottom_MSE', 'Filtered_Surface_MSE']
>>> reqs_to_get['ROP_controls'] = ['Command_ROP', 'True_WOB']
>>> requests, units = get_results(result_file, reqs_to_get, t_min, t_max)
Note
----
This funciton only works with Requests. It does not work with Result Sets.
Note
----
This function only works with xml results files.
Parameters
----------
result_file : str
Filename of an Adams results (.res) file
reqs_to_get : dict
Dictionary of requests to extract (the default is None, which gets all results)
t_min : float, optional
Minimum time for which to extract results (the default is None)
t_max : float, optional
Maximum time for which to extract results (the default is None)
Returns
-------
dict
Dictionary of request data
dict
Dictionary defining units for each request. NOTE: This is only returned if `return_units=True`
"""
pickle_filename = os.path.join(os.path.dirname(result_file), '.' + os.path.splitext(os.path.split(result_file)[-1])[0] + '.pkl')
units_pickle_filename = os.path.join(os.path.dirname(result_file), '.' + os.path.splitext(os.path.split(result_file)[-1])[0] + '_units.pkl')
if overwrite_pickle is False and os.path.isfile(pickle_filename) and (os.path.isfile(units_pickle_filename) or return_units is False):
# Load from a pickle file
with open(pickle_filename, 'rb') as fid:
reqs_to_return = pickle.load(fid)
if return_units is True:
with open(units_pickle_filename, 'rb') as fid:
units = pickle.load(fid)
elif use_iterparse is False:
# Parse the results file
res_tree = et.parse(result_file)
# Loop over all the *Entity* nodes in the input tree, pick out the ones requested
# in `reqs_to_keep` and put their units and original ids into dictionaries
units, req_ids, reqs_to_get = _get_units_and_ids(res_tree, reqs_to_get)
# Initialize the output requests dictionary
reqs_to_return = {req : {req_comp : [] for req_comp in reqs_to_get[req]} for req in reqs_to_get}
time = []
# Make a list of all the data nodes that represent dynamic sims
dyn_data_nodes = [node for node in res_tree.iter('{'+XML_REF+'}Data') if 'dynamic_' in node.attrib.get('name')]
del res_tree
for data_node in dyn_data_nodes:
# For each dynamic data node
_process_data_node(data_node, reqs_to_return, time, req_ids, t_max, t_min)
else:
# Loop over all the *Entity* nodes in the input tree, pick out the ones requested
# in `reqs_to_keep` and put their units and original ids into dictionaries
units, req_ids, reqs_to_get = _get_units_and_ids(et.parse(result_file), reqs_to_get)
# Initialize the output requests dictionary
reqs_to_return = {req : {req_comp : [] for req_comp in reqs_to_get[req]} for req in reqs_to_get}
time = []
# Parse the results file using iterparse
for _event, data_node in et.iterparse(result_file):
# For each node
if data_node.tag == '{'+XML_REF+'}Data' and 'dynamic_' in data_node.attrib.get('name'):
# If the node is a dynamic data node, process it
_process_data_node(data_node, reqs_to_return, time, req_ids, t_max, t_min)
# Add the time list to the return dict
reqs_to_return['time'] = time
# Write to a pickle file
with open(pickle_filename, 'wb') as fid:
pickle.dump(reqs_to_return, fid)
if return_units is True:
with open(units_pickle_filename, 'wb') as fid:
pickle.dump(units, fid)
if return_units:
return reqs_to_return, units
else:
return reqs_to_return
def _process_data_node(data_node, reqs_to_return : dict, time : list, req_ids : dict, t_max : float, t_min : float):
for step_node in data_node:
# For each step (only one) in the model input data, put the
# old step data into a list
step_data = step_node.text.replace('\n',' ').split(' ')
t_step = round(float(step_data[1]),3)
if (t_min is None or t_step >= t_min) and (t_max is None or t_step <= t_max):
# If the time falls within the desired time range, add to the time list
time.append(t_step)
# Add to the req_comp list
for req in reqs_to_return:
for req_comp in reqs_to_return[req]:
req_id = int(req_ids[req][req_comp])
reqs_to_return[req][req_comp].append(float(step_data[req_id]))
elif t_max is not None and t_step > t_max:
break
def shrink_results(result_file, reqs_to_keep=None, t_min=None, t_max=None, new_result_file=None, in_place=False):
"""Shrinks a results file by eliminating unwanted data.
Example
-------
>>> result_file = 'example.res'
>>> t_min = 70
>>> t_max = 80
>>> reqs_to_keep = {}
>>> reqs_to_keep['MSE'] = ['Instantaneous_Bottom_MSE', 'Filtered_Surface_MSE']
>>> reqs_to_keep['ROP_controls'] = ['Command_ROP', 'True_WOB']
>>> shrink_results(result_file, reqs_to_keep, t_min, t_max)
Note
----
This funciton only works with Requests. It does not work with Result Sets.
Parameters
----------
result_file : str
Filename of the results file
reqs_to_keep : dict
Dictionary of requests and request components that should be saved.
t_min : float, optional
Start time of period of interest (default is None which uses the first time step)
t_max : float, optional
End time of period of interest (default is None which uses the last time step)
new_result_file : str
Name that will be given to the new results file. (Default is None which adds a '_shrunk' suffix to `result_file` if in_place=False or uses `result_file` if in_place=True.)
in_place : bool, optional
If True, existing file will be deleted.
"""
input_tree = et.parse(result_file)
input_ans_node = [ans for ans in input_tree.iter('{'+XML_REF+'}Analysis')][0]
# Create `units` and `old_ids` dictionaries
units, old_ids, _reqs_to_keep = _get_units_and_ids(input_tree, reqs_to_keep)
# If `reqs_to_keep` is None, set it equal to all requrests
if reqs_to_keep is None:
reqs_to_keep = _reqs_to_keep
# Create New XML tree
root_res = et.Element('Results', xmlns=XML_REF)
root_ans = et.SubElement(root_res, 'Analysis', name=input_ans_node.attrib['name'], executionDate=input_ans_node.attrib['executionDate'], Solver=input_ans_node.attrib['Solver'], script=input_ans_node.attrib['script'])
root_map = et.SubElement(root_ans, 'StepMap', name='map_001')
ent_time = et.SubElement(root_map, 'Entity', name='time')
root_data = et.SubElement(root_ans, 'Data', name='dynamic_001', id='1')
# Create the time component
et.SubElement(ent_time, 'Component', name='TIME', unitsValue='sec', id='1')
# Create StepMap
new_id = 2
for request in reqs_to_keep:
ent_map = et.SubElement(root_map, 'Entity', name=request, entity=request, entType='Request', objectId='0')
for req_comp in reqs_to_keep[request]:
et.SubElement(ent_map, 'Component', name=req_comp, plotLabel=units[request][req_comp], id=str(new_id))
new_id += 1
# Make a list of all the data nodes that represent dynamic sims
dyn_data_nodes = [node for node in input_tree.iter('{'+XML_REF+'}Data') if 'dynamic_' in node.attrib.get('name')]
# Loop through the dynamic sim data nodes
for data_node in dyn_data_nodes:
# For each dynamic data node
for step_node in data_node:
# For each step (only one) in the model input data, put the
# old step data into a list
old_step_data = step_node.text.replace('\n',' ').split(' ')
t_step = round(float(old_step_data[1]),3)
if (t_min is None or t_step >= t_min) and (t_max is None or t_step <= t_max):
# If the time falls within the desired time range add to `new_step_data`
new_step_data = _get_new_steplist(reqs_to_keep, old_step_data, old_ids)
# Create a string containing the model input data
new_dynamic_text = _convert_steplist_to_string(new_step_data)
# Rewrite the SubElement text
xml_step = et.SubElement(root_data, 'Step', type='dynamic')
xml_step.text = new_dynamic_text
# Determine the output file if it wasn't given or if the one that was provided is equal to the original
if new_result_file is None or new_result_file == result_file:
new_result_file = result_file + '.tmp' if in_place is True else result_file.replace('.res', '') + f'{SHRUNK_RES_SUFFIX}.res'
# Write the output file
output_tree = et.ElementTree(root_res)
_xml_format(root_res)
output_tree.write(new_result_file, encoding='utf-8', xml_declaration=True)
# If in_place is set to true, remove the original results file
if in_place is True:
os.remove(result_file)
# If no name was given for the output file and a .tmp extension was used, remove it
if new_result_file.endswith('.tmp'):
os.rename(new_result_file, new_result_file.replace('.tmp',''))
def _get_new_steplist(reqs_to_keep, old_step_data, req_ids):
"""Returns a list similar to `old_step_data` but with unwanted requests removed.
Uses the requests and request components in `old_step_data` and the entitiy ids in `req_ids` to determine which elements in `old_step_data` to keep.
Parameters
----------
reqs_to_keep : dict
Dictionary of requests and request components to keep
old_step_data : list
List of all the step data in the results file
req_ids : dict
Dictionary of entity ids for the entire results file
Returns
-------
list
List of just the step data to keep
"""
# Start a new list with just the time element from `old_step_data`
new_step_data = [old_step_data[1]]
# Loop through the desired requests and components to pull elements
# from `old_step_data` into `new_step_data`
for request in reqs_to_keep:
# For each desired request
for req_comp in reqs_to_keep[request]:
# For each desired request component, add that components
# step data to `new_step_data`
req_id = int(req_ids[request][req_comp])
new_step_data.append(old_step_data[req_id])
return new_step_data
def _convert_steplist_to_string(step_data):
"""Converts list of step data into a single string.
Parameters
----------
step_data : list
List of step data
Returns
-------
str
A space delimited string where every 6th value is followed by a newline.
"""
text = ''
for i, datum in enumerate(step_data):
if i == 0:
text += f'\n{datum}\n'
else:
if i%6 == 0:
text += f'{datum}\n'
else:
text += f'{datum} '
return text
def _xml_format(element, level=0):
"""Makes the root text look prettier.
Parameters
----------
elem : Element
:class:`Element` whose text will be formatted.
level : int, optional
Tree level of `Element` (the default is 0)
"""
i = '\n' + level*" "
if len(element):
if not element.text or not element.text.strip():
element.text = i + " "
if not element.tail or not element.tail.strip():
element.tail = i
for elem in element:
_xml_format(elem, level+1)
if not element.tail or not element.tail.strip():
element.tail = i
else:
if level and (not element.tail or not element.tail.strip()):
element.tail = i
def _get_units_and_ids(tree, reqs=None):
"""Loops over all the *Entity* nodes in `tree` and picks out the ones requested in `reqs`. Returns dictionaries of their units and original ids.
Parameters
----------
tree : ElementTree
Root xml tree for an Adams results file
reqs : dict, optional
Dictionary of requests and request components to keep. (Default is None, which gets all the requests)
Returns
-------
dict
Two level dictionary indicating the units associated with the request components
dict
Two level dictionary indicating the xml ids associated with the request components
dict
Two level dictionary indicating the requests and request components NOTE: This is identical to the `reqs` argument unless the reqs argument is None.)
"""
units = {}
req_ids = {}
requests = {}
# Make a list of the request entities in `tree`
req_ents = [ent for ent in tree.iter('{'+XML_REF+'}Entity') | |
<filename>MetaScreener/external_sw/mgltools/lib/python2.7/site-packages/openbabel.py<gh_stars>1-10
import sys
if sys.platform.find('linux') != -1:
try:
import dl
except ImportError:
import DLFCN as dl
sys.setdlopenflags(sys.getdlopenflags() | dl.RTLD_GLOBAL)
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.8
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (3,0,0):
new_instancemethod = lambda func, inst, cls: _openbabel.SWIG_PyInstanceMethod_New(func)
else:
from new import instancemethod as new_instancemethod
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_openbabel', [dirname(__file__)])
except ImportError:
import _openbabel
return _openbabel
if fp is not None:
try:
_mod = imp.load_module('_openbabel', fp, pathname, description)
finally:
fp.close()
return _mod
_openbabel = swig_import_helper()
del swig_import_helper
else:
import _openbabel
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self,name,value):
if (name == "thisown"): return self.this.own(value)
if hasattr(self,name) or (name == "this"):
set(self,name,value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
class SwigPyIterator(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _openbabel.delete_SwigPyIterator
def __iter__(self): return self
SwigPyIterator.value = new_instancemethod(_openbabel.SwigPyIterator_value,None,SwigPyIterator)
SwigPyIterator.incr = new_instancemethod(_openbabel.SwigPyIterator_incr,None,SwigPyIterator)
SwigPyIterator.decr = new_instancemethod(_openbabel.SwigPyIterator_decr,None,SwigPyIterator)
SwigPyIterator.distance = new_instancemethod(_openbabel.SwigPyIterator_distance,None,SwigPyIterator)
SwigPyIterator.equal = new_instancemethod(_openbabel.SwigPyIterator_equal,None,SwigPyIterator)
SwigPyIterator.copy = new_instancemethod(_openbabel.SwigPyIterator_copy,None,SwigPyIterator)
SwigPyIterator.next = new_instancemethod(_openbabel.SwigPyIterator_next,None,SwigPyIterator)
SwigPyIterator.__next__ = new_instancemethod(_openbabel.SwigPyIterator___next__,None,SwigPyIterator)
SwigPyIterator.previous = new_instancemethod(_openbabel.SwigPyIterator_previous,None,SwigPyIterator)
SwigPyIterator.advance = new_instancemethod(_openbabel.SwigPyIterator_advance,None,SwigPyIterator)
SwigPyIterator.__eq__ = new_instancemethod(_openbabel.SwigPyIterator___eq__,None,SwigPyIterator)
SwigPyIterator.__ne__ = new_instancemethod(_openbabel.SwigPyIterator___ne__,None,SwigPyIterator)
SwigPyIterator.__iadd__ = new_instancemethod(_openbabel.SwigPyIterator___iadd__,None,SwigPyIterator)
SwigPyIterator.__isub__ = new_instancemethod(_openbabel.SwigPyIterator___isub__,None,SwigPyIterator)
SwigPyIterator.__add__ = new_instancemethod(_openbabel.SwigPyIterator___add__,None,SwigPyIterator)
SwigPyIterator.__sub__ = new_instancemethod(_openbabel.SwigPyIterator___sub__,None,SwigPyIterator)
SwigPyIterator_swigregister = _openbabel.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
class vectorInt(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __iter__(self): return self.iterator()
def __init__(self, *args):
_openbabel.vectorInt_swiginit(self,_openbabel.new_vectorInt(*args))
__swig_destroy__ = _openbabel.delete_vectorInt
vectorInt.iterator = new_instancemethod(_openbabel.vectorInt_iterator,None,vectorInt)
vectorInt.__nonzero__ = new_instancemethod(_openbabel.vectorInt___nonzero__,None,vectorInt)
vectorInt.__bool__ = new_instancemethod(_openbabel.vectorInt___bool__,None,vectorInt)
vectorInt.__len__ = new_instancemethod(_openbabel.vectorInt___len__,None,vectorInt)
vectorInt.__getslice__ = new_instancemethod(_openbabel.vectorInt___getslice__,None,vectorInt)
vectorInt.__setslice__ = new_instancemethod(_openbabel.vectorInt___setslice__,None,vectorInt)
vectorInt.__delslice__ = new_instancemethod(_openbabel.vectorInt___delslice__,None,vectorInt)
vectorInt.__delitem__ = new_instancemethod(_openbabel.vectorInt___delitem__,None,vectorInt)
vectorInt.__getitem__ = new_instancemethod(_openbabel.vectorInt___getitem__,None,vectorInt)
vectorInt.__setitem__ = new_instancemethod(_openbabel.vectorInt___setitem__,None,vectorInt)
vectorInt_swigregister = _openbabel.vectorInt_swigregister
vectorInt_swigregister(vectorInt)
class vectorUnsignedInt(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __iter__(self): return self.iterator()
def __init__(self, *args):
_openbabel.vectorUnsignedInt_swiginit(self,_openbabel.new_vectorUnsignedInt(*args))
__swig_destroy__ = _openbabel.delete_vectorUnsignedInt
vectorUnsignedInt.iterator = new_instancemethod(_openbabel.vectorUnsignedInt_iterator,None,vectorUnsignedInt)
vectorUnsignedInt.__nonzero__ = new_instancemethod(_openbabel.vectorUnsignedInt___nonzero__,None,vectorUnsignedInt)
vectorUnsignedInt.__bool__ = new_instancemethod(_openbabel.vectorUnsignedInt___bool__,None,vectorUnsignedInt)
vectorUnsignedInt.__len__ = new_instancemethod(_openbabel.vectorUnsignedInt___len__,None,vectorUnsignedInt)
vectorUnsignedInt.__getslice__ = new_instancemethod(_openbabel.vectorUnsignedInt___getslice__,None,vectorUnsignedInt)
vectorUnsignedInt.__setslice__ = new_instancemethod(_openbabel.vectorUnsignedInt___setslice__,None,vectorUnsignedInt)
vectorUnsignedInt.__delslice__ = new_instancemethod(_openbabel.vectorUnsignedInt___delslice__,None,vectorUnsignedInt)
vectorUnsignedInt.__delitem__ = new_instancemethod(_openbabel.vectorUnsignedInt___delitem__,None,vectorUnsignedInt)
vectorUnsignedInt.__getitem__ = new_instancemethod(_openbabel.vectorUnsignedInt___getitem__,None,vectorUnsignedInt)
vectorUnsignedInt.__setitem__ = new_instancemethod(_openbabel.vectorUnsignedInt___setitem__,None,vectorUnsignedInt)
vectorUnsignedInt_swigregister = _openbabel.vectorUnsignedInt_swigregister
vectorUnsignedInt_swigregister(vectorUnsignedInt)
class vectorvInt(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __iter__(self): return self.iterator()
def __init__(self, *args):
_openbabel.vectorvInt_swiginit(self,_openbabel.new_vectorvInt(*args))
__swig_destroy__ = _openbabel.delete_vectorvInt
vectorvInt.iterator = new_instancemethod(_openbabel.vectorvInt_iterator,None,vectorvInt)
vectorvInt.__nonzero__ = new_instancemethod(_openbabel.vectorvInt___nonzero__,None,vectorvInt)
vectorvInt.__bool__ = new_instancemethod(_openbabel.vectorvInt___bool__,None,vectorvInt)
vectorvInt.__len__ = new_instancemethod(_openbabel.vectorvInt___len__,None,vectorvInt)
vectorvInt.__getslice__ = new_instancemethod(_openbabel.vectorvInt___getslice__,None,vectorvInt)
vectorvInt.__setslice__ = new_instancemethod(_openbabel.vectorvInt___setslice__,None,vectorvInt)
vectorvInt.__delslice__ = new_instancemethod(_openbabel.vectorvInt___delslice__,None,vectorvInt)
vectorvInt.__delitem__ = new_instancemethod(_openbabel.vectorvInt___delitem__,None,vectorvInt)
vectorvInt.__getitem__ = new_instancemethod(_openbabel.vectorvInt___getitem__,None,vectorvInt)
vectorvInt.__setitem__ = new_instancemethod(_openbabel.vectorvInt___setitem__,None,vectorvInt)
vectorvInt_swigregister = _openbabel.vectorvInt_swigregister
vectorvInt_swigregister(vectorvInt)
class vectorDouble(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __iter__(self): return self.iterator()
def __init__(self, *args):
_openbabel.vectorDouble_swiginit(self,_openbabel.new_vectorDouble(*args))
__swig_destroy__ = _openbabel.delete_vectorDouble
vectorDouble.iterator = new_instancemethod(_openbabel.vectorDouble_iterator,None,vectorDouble)
vectorDouble.__nonzero__ = new_instancemethod(_openbabel.vectorDouble___nonzero__,None,vectorDouble)
vectorDouble.__bool__ = new_instancemethod(_openbabel.vectorDouble___bool__,None,vectorDouble)
vectorDouble.__len__ = new_instancemethod(_openbabel.vectorDouble___len__,None,vectorDouble)
vectorDouble.__getslice__ = new_instancemethod(_openbabel.vectorDouble___getslice__,None,vectorDouble)
vectorDouble.__setslice__ = new_instancemethod(_openbabel.vectorDouble___setslice__,None,vectorDouble)
vectorDouble.__delslice__ = new_instancemethod(_openbabel.vectorDouble___delslice__,None,vectorDouble)
vectorDouble.__delitem__ = new_instancemethod(_openbabel.vectorDouble___delitem__,None,vectorDouble)
vectorDouble.__getitem__ = new_instancemethod(_openbabel.vectorDouble___getitem__,None,vectorDouble)
vectorDouble.__setitem__ = new_instancemethod(_openbabel.vectorDouble___setitem__,None,vectorDouble)
vectorDouble_swigregister = _openbabel.vectorDouble_swigregister
vectorDouble_swigregister(vectorDouble)
class vectorULong(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __iter__(self): return self.iterator()
def __init__(self, *args):
_openbabel.vectorULong_swiginit(self,_openbabel.new_vectorULong(*args))
__swig_destroy__ = _openbabel.delete_vectorULong
vectorULong.iterator = new_instancemethod(_openbabel.vectorULong_iterator,None,vectorULong)
vectorULong.__nonzero__ = new_instancemethod(_openbabel.vectorULong___nonzero__,None,vectorULong)
vectorULong.__bool__ = new_instancemethod(_openbabel.vectorULong___bool__,None,vectorULong)
vectorULong.__len__ = new_instancemethod(_openbabel.vectorULong___len__,None,vectorULong)
vectorULong.__getslice__ = new_instancemethod(_openbabel.vectorULong___getslice__,None,vectorULong)
vectorULong.__setslice__ = new_instancemethod(_openbabel.vectorULong___setslice__,None,vectorULong)
vectorULong.__delslice__ = new_instancemethod(_openbabel.vectorULong___delslice__,None,vectorULong)
vectorULong.__delitem__ = new_instancemethod(_openbabel.vectorULong___delitem__,None,vectorULong)
vectorULong.__getitem__ = new_instancemethod(_openbabel.vectorULong___getitem__,None,vectorULong)
vectorULong.__setitem__ = new_instancemethod(_openbabel.vectorULong___setitem__,None,vectorULong)
vectorULong_swigregister = _openbabel.vectorULong_swigregister
vectorULong_swigregister(vectorULong)
class vectorString(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __iter__(self): return self.iterator()
def __init__(self, *args):
_openbabel.vectorString_swiginit(self,_openbabel.new_vectorString(*args))
__swig_destroy__ = _openbabel.delete_vectorString
vectorString.iterator = new_instancemethod(_openbabel.vectorString_iterator,None,vectorString)
vectorString.__nonzero__ = new_instancemethod(_openbabel.vectorString___nonzero__,None,vectorString)
vectorString.__bool__ = new_instancemethod(_openbabel.vectorString___bool__,None,vectorString)
vectorString.__len__ = new_instancemethod(_openbabel.vectorString___len__,None,vectorString)
vectorString.__getslice__ = new_instancemethod(_openbabel.vectorString___getslice__,None,vectorString)
vectorString.__setslice__ = new_instancemethod(_openbabel.vectorString___setslice__,None,vectorString)
vectorString.__delslice__ = new_instancemethod(_openbabel.vectorString___delslice__,None,vectorString)
vectorString.__delitem__ = new_instancemethod(_openbabel.vectorString___delitem__,None,vectorString)
vectorString.__getitem__ = new_instancemethod(_openbabel.vectorString___getitem__,None,vectorString)
vectorString.__setitem__ = new_instancemethod(_openbabel.vectorString___setitem__,None,vectorString)
vectorString_swigregister = _openbabel.vectorString_swigregister
vectorString_swigregister(vectorString)
class vectorVector3(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __iter__(self): return self.iterator()
def __init__(self, *args):
_openbabel.vectorVector3_swiginit(self,_openbabel.new_vectorVector3(*args))
__swig_destroy__ = _openbabel.delete_vectorVector3
vectorVector3.iterator = new_instancemethod(_openbabel.vectorVector3_iterator,None,vectorVector3)
vectorVector3.__nonzero__ = new_instancemethod(_openbabel.vectorVector3___nonzero__,None,vectorVector3)
vectorVector3.__bool__ = new_instancemethod(_openbabel.vectorVector3___bool__,None,vectorVector3)
vectorVector3.__len__ = new_instancemethod(_openbabel.vectorVector3___len__,None,vectorVector3)
vectorVector3.__getslice__ = new_instancemethod(_openbabel.vectorVector3___getslice__,None,vectorVector3)
vectorVector3.__setslice__ = new_instancemethod(_openbabel.vectorVector3___setslice__,None,vectorVector3)
vectorVector3.__delslice__ = new_instancemethod(_openbabel.vectorVector3___delslice__,None,vectorVector3)
vectorVector3.__delitem__ = new_instancemethod(_openbabel.vectorVector3___delitem__,None,vectorVector3)
vectorVector3.__getitem__ = new_instancemethod(_openbabel.vectorVector3___getitem__,None,vectorVector3)
vectorVector3.__setitem__ = new_instancemethod(_openbabel.vectorVector3___setitem__,None,vectorVector3)
vectorVector3_swigregister = _openbabel.vectorVector3_swigregister
vectorVector3_swigregister(vectorVector3)
class vectorvVector3(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __iter__(self): return self.iterator()
def __init__(self, *args):
_openbabel.vectorvVector3_swiginit(self,_openbabel.new_vectorvVector3(*args))
__swig_destroy__ = _openbabel.delete_vectorvVector3
vectorvVector3.iterator = new_instancemethod(_openbabel.vectorvVector3_iterator,None,vectorvVector3)
vectorvVector3.__nonzero__ = new_instancemethod(_openbabel.vectorvVector3___nonzero__,None,vectorvVector3)
vectorvVector3.__bool__ = new_instancemethod(_openbabel.vectorvVector3___bool__,None,vectorvVector3)
vectorvVector3.__len__ = new_instancemethod(_openbabel.vectorvVector3___len__,None,vectorvVector3)
vectorvVector3.__getslice__ = new_instancemethod(_openbabel.vectorvVector3___getslice__,None,vectorvVector3)
vectorvVector3.__setslice__ = new_instancemethod(_openbabel.vectorvVector3___setslice__,None,vectorvVector3)
vectorvVector3.__delslice__ = new_instancemethod(_openbabel.vectorvVector3___delslice__,None,vectorvVector3)
vectorvVector3.__delitem__ = new_instancemethod(_openbabel.vectorvVector3___delitem__,None,vectorvVector3)
vectorvVector3.__getitem__ = new_instancemethod(_openbabel.vectorvVector3___getitem__,None,vectorvVector3)
vectorvVector3.__setitem__ = new_instancemethod(_openbabel.vectorvVector3___setitem__,None,vectorvVector3)
vectorvVector3_swigregister = _openbabel.vectorvVector3_swigregister
vectorvVector3_swigregister(vectorvVector3)
class vectorOBMol(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __iter__(self): return self.iterator()
def __init__(self, *args):
_openbabel.vectorOBMol_swiginit(self,_openbabel.new_vectorOBMol(*args))
__swig_destroy__ = _openbabel.delete_vectorOBMol
vectorOBMol.iterator = new_instancemethod(_openbabel.vectorOBMol_iterator,None,vectorOBMol)
vectorOBMol.__nonzero__ = new_instancemethod(_openbabel.vectorOBMol___nonzero__,None,vectorOBMol)
vectorOBMol.__bool__ = new_instancemethod(_openbabel.vectorOBMol___bool__,None,vectorOBMol)
vectorOBMol.__len__ = new_instancemethod(_openbabel.vectorOBMol___len__,None,vectorOBMol)
vectorOBMol.__getslice__ = new_instancemethod(_openbabel.vectorOBMol___getslice__,None,vectorOBMol)
vectorOBMol.__setslice__ = new_instancemethod(_openbabel.vectorOBMol___setslice__,None,vectorOBMol)
vectorOBMol.__delslice__ = new_instancemethod(_openbabel.vectorOBMol___delslice__,None,vectorOBMol)
vectorOBMol.__delitem__ = new_instancemethod(_openbabel.vectorOBMol___delitem__,None,vectorOBMol)
vectorOBMol.__getitem__ = new_instancemethod(_openbabel.vectorOBMol___getitem__,None,vectorOBMol)
vectorOBMol.__setitem__ = new_instancemethod(_openbabel.vectorOBMol___setitem__,None,vectorOBMol)
vectorOBMol_swigregister = _openbabel.vectorOBMol_swigregister
vectorOBMol_swigregister(vectorOBMol)
class vectorOBBond(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __iter__(self): return self.iterator()
def __init__(self, *args):
_openbabel.vectorOBBond_swiginit(self,_openbabel.new_vectorOBBond(*args))
__swig_destroy__ = _openbabel.delete_vectorOBBond
vectorOBBond.iterator = new_instancemethod(_openbabel.vectorOBBond_iterator,None,vectorOBBond)
vectorOBBond.__nonzero__ = new_instancemethod(_openbabel.vectorOBBond___nonzero__,None,vectorOBBond)
vectorOBBond.__bool__ = new_instancemethod(_openbabel.vectorOBBond___bool__,None,vectorOBBond)
vectorOBBond.__len__ = new_instancemethod(_openbabel.vectorOBBond___len__,None,vectorOBBond)
vectorOBBond.__getslice__ = new_instancemethod(_openbabel.vectorOBBond___getslice__,None,vectorOBBond)
vectorOBBond.__setslice__ = new_instancemethod(_openbabel.vectorOBBond___setslice__,None,vectorOBBond)
vectorOBBond.__delslice__ = new_instancemethod(_openbabel.vectorOBBond___delslice__,None,vectorOBBond)
vectorOBBond.__delitem__ = new_instancemethod(_openbabel.vectorOBBond___delitem__,None,vectorOBBond)
vectorOBBond.__getitem__ = new_instancemethod(_openbabel.vectorOBBond___getitem__,None,vectorOBBond)
vectorOBBond.__setitem__ = new_instancemethod(_openbabel.vectorOBBond___setitem__,None,vectorOBBond)
vectorOBBond_swigregister = _openbabel.vectorOBBond_swigregister
vectorOBBond_swigregister(vectorOBBond)
class vectorOBResidue(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __iter__(self): return self.iterator()
def __init__(self, *args):
_openbabel.vectorOBResidue_swiginit(self,_openbabel.new_vectorOBResidue(*args))
__swig_destroy__ = _openbabel.delete_vectorOBResidue
vectorOBResidue.iterator = new_instancemethod(_openbabel.vectorOBResidue_iterator,None,vectorOBResidue)
vectorOBResidue.__nonzero__ = new_instancemethod(_openbabel.vectorOBResidue___nonzero__,None,vectorOBResidue)
vectorOBResidue.__bool__ = new_instancemethod(_openbabel.vectorOBResidue___bool__,None,vectorOBResidue)
vectorOBResidue.__len__ = new_instancemethod(_openbabel.vectorOBResidue___len__,None,vectorOBResidue)
vectorOBResidue.__getslice__ = new_instancemethod(_openbabel.vectorOBResidue___getslice__,None,vectorOBResidue)
vectorOBResidue.__setslice__ = new_instancemethod(_openbabel.vectorOBResidue___setslice__,None,vectorOBResidue)
vectorOBResidue.__delslice__ = new_instancemethod(_openbabel.vectorOBResidue___delslice__,None,vectorOBResidue)
vectorOBResidue.__delitem__ = new_instancemethod(_openbabel.vectorOBResidue___delitem__,None,vectorOBResidue)
vectorOBResidue.__getitem__ = new_instancemethod(_openbabel.vectorOBResidue___getitem__,None,vectorOBResidue)
vectorOBResidue.__setitem__ = new_instancemethod(_openbabel.vectorOBResidue___setitem__,None,vectorOBResidue)
vectorOBResidue_swigregister = _openbabel.vectorOBResidue_swigregister
vectorOBResidue_swigregister(vectorOBResidue)
class vectorOBRing(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __iter__(self): return self.iterator()
def __init__(self, *args):
_openbabel.vectorOBRing_swiginit(self,_openbabel.new_vectorOBRing(*args))
__swig_destroy__ = _openbabel.delete_vectorOBRing
vectorOBRing.iterator = new_instancemethod(_openbabel.vectorOBRing_iterator,None,vectorOBRing)
vectorOBRing.__nonzero__ = new_instancemethod(_openbabel.vectorOBRing___nonzero__,None,vectorOBRing)
vectorOBRing.__bool__ = new_instancemethod(_openbabel.vectorOBRing___bool__,None,vectorOBRing)
vectorOBRing.__len__ = new_instancemethod(_openbabel.vectorOBRing___len__,None,vectorOBRing)
vectorOBRing.__getslice__ = new_instancemethod(_openbabel.vectorOBRing___getslice__,None,vectorOBRing)
vectorOBRing.__setslice__ = new_instancemethod(_openbabel.vectorOBRing___setslice__,None,vectorOBRing)
vectorOBRing.__delslice__ = new_instancemethod(_openbabel.vectorOBRing___delslice__,None,vectorOBRing)
vectorOBRing.__delitem__ = new_instancemethod(_openbabel.vectorOBRing___delitem__,None,vectorOBRing)
vectorOBRing.__getitem__ = new_instancemethod(_openbabel.vectorOBRing___getitem__,None,vectorOBRing)
vectorOBRing.__setitem__ = new_instancemethod(_openbabel.vectorOBRing___setitem__,None,vectorOBRing)
vectorOBRing_swigregister = _openbabel.vectorOBRing_swigregister
vectorOBRing_swigregister(vectorOBRing)
class vectorpOBRing(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __iter__(self): return self.iterator()
def __init__(self, *args):
_openbabel.vectorpOBRing_swiginit(self,_openbabel.new_vectorpOBRing(*args))
__swig_destroy__ = _openbabel.delete_vectorpOBRing
vectorpOBRing.iterator = new_instancemethod(_openbabel.vectorpOBRing_iterator,None,vectorpOBRing)
vectorpOBRing.__nonzero__ = new_instancemethod(_openbabel.vectorpOBRing___nonzero__,None,vectorpOBRing)
vectorpOBRing.__bool__ = new_instancemethod(_openbabel.vectorpOBRing___bool__,None,vectorpOBRing)
vectorpOBRing.__len__ = new_instancemethod(_openbabel.vectorpOBRing___len__,None,vectorpOBRing)
vectorpOBRing.__getslice__ = new_instancemethod(_openbabel.vectorpOBRing___getslice__,None,vectorpOBRing)
vectorpOBRing.__setslice__ = new_instancemethod(_openbabel.vectorpOBRing___setslice__,None,vectorpOBRing)
vectorpOBRing.__delslice__ = new_instancemethod(_openbabel.vectorpOBRing___delslice__,None,vectorpOBRing)
vectorpOBRing.__delitem__ = new_instancemethod(_openbabel.vectorpOBRing___delitem__,None,vectorpOBRing)
vectorpOBRing.__getitem__ = new_instancemethod(_openbabel.vectorpOBRing___getitem__,None,vectorpOBRing)
vectorpOBRing.__setitem__ = new_instancemethod(_openbabel.vectorpOBRing___setitem__,None,vectorpOBRing)
vectorpOBRing_swigregister = _openbabel.vectorpOBRing_swigregister
vectorpOBRing_swigregister(vectorpOBRing)
class vectorpOBGenericData(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __iter__(self): return self.iterator()
def __init__(self, *args):
_openbabel.vectorpOBGenericData_swiginit(self,_openbabel.new_vectorpOBGenericData(*args))
__swig_destroy__ = _openbabel.delete_vectorpOBGenericData
vectorpOBGenericData.iterator = new_instancemethod(_openbabel.vectorpOBGenericData_iterator,None,vectorpOBGenericData)
vectorpOBGenericData.__nonzero__ = new_instancemethod(_openbabel.vectorpOBGenericData___nonzero__,None,vectorpOBGenericData)
vectorpOBGenericData.__bool__ = new_instancemethod(_openbabel.vectorpOBGenericData___bool__,None,vectorpOBGenericData)
vectorpOBGenericData.__len__ = new_instancemethod(_openbabel.vectorpOBGenericData___len__,None,vectorpOBGenericData)
vectorpOBGenericData.__getslice__ = new_instancemethod(_openbabel.vectorpOBGenericData___getslice__,None,vectorpOBGenericData)
vectorpOBGenericData.__setslice__ = new_instancemethod(_openbabel.vectorpOBGenericData___setslice__,None,vectorpOBGenericData)
vectorpOBGenericData.__delslice__ = new_instancemethod(_openbabel.vectorpOBGenericData___delslice__,None,vectorpOBGenericData)
vectorpOBGenericData.__delitem__ = new_instancemethod(_openbabel.vectorpOBGenericData___delitem__,None,vectorpOBGenericData)
vectorpOBGenericData.__getitem__ = new_instancemethod(_openbabel.vectorpOBGenericData___getitem__,None,vectorpOBGenericData)
vectorpOBGenericData.__setitem__ = new_instancemethod(_openbabel.vectorpOBGenericData___setitem__,None,vectorpOBGenericData)
vectorpOBGenericData_swigregister = _openbabel.vectorpOBGenericData_swigregister
vectorpOBGenericData_swigregister(vectorpOBGenericData)
class vectorpOBInternalCoord(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __iter__(self): return self.iterator()
def __init__(self, *args):
_openbabel.vectorpOBInternalCoord_swiginit(self,_openbabel.new_vectorpOBInternalCoord(*args))
__swig_destroy__ = _openbabel.delete_vectorpOBInternalCoord
vectorpOBInternalCoord.iterator = new_instancemethod(_openbabel.vectorpOBInternalCoord_iterator,None,vectorpOBInternalCoord)
vectorpOBInternalCoord.__nonzero__ = new_instancemethod(_openbabel.vectorpOBInternalCoord___nonzero__,None,vectorpOBInternalCoord)
vectorpOBInternalCoord.__bool__ = new_instancemethod(_openbabel.vectorpOBInternalCoord___bool__,None,vectorpOBInternalCoord)
vectorpOBInternalCoord.__len__ = new_instancemethod(_openbabel.vectorpOBInternalCoord___len__,None,vectorpOBInternalCoord)
vectorpOBInternalCoord.__getslice__ = new_instancemethod(_openbabel.vectorpOBInternalCoord___getslice__,None,vectorpOBInternalCoord)
vectorpOBInternalCoord.__setslice__ = new_instancemethod(_openbabel.vectorpOBInternalCoord___setslice__,None,vectorpOBInternalCoord)
vectorpOBInternalCoord.__delslice__ = new_instancemethod(_openbabel.vectorpOBInternalCoord___delslice__,None,vectorpOBInternalCoord)
vectorpOBInternalCoord.__delitem__ = new_instancemethod(_openbabel.vectorpOBInternalCoord___delitem__,None,vectorpOBInternalCoord)
vectorpOBInternalCoord.__getitem__ = new_instancemethod(_openbabel.vectorpOBInternalCoord___getitem__,None,vectorpOBInternalCoord)
vectorpOBInternalCoord.__setitem__ = new_instancemethod(_openbabel.vectorpOBInternalCoord___setitem__,None,vectorpOBInternalCoord)
vectorpOBInternalCoord_swigregister = _openbabel.vectorpOBInternalCoord_swigregister
vectorpOBInternalCoord_swigregister(vectorpOBInternalCoord)
class pairUIntUInt(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
_openbabel.pairUIntUInt_swiginit(self,_openbabel.new_pairUIntUInt(*args))
first = _swig_property(_openbabel.pairUIntUInt_first_get, _openbabel.pairUIntUInt_first_set)
second = _swig_property(_openbabel.pairUIntUInt_second_get, _openbabel.pairUIntUInt_second_set)
def __len__(self): return 2
def __repr__(self): return str((self.first, self.second))
def __getitem__(self, index):
if not (index % 2):
return self.first
else:
return self.second
def __setitem__(self, index, val):
if not (index % 2):
self.first = val
else:
self.second = val
__swig_destroy__ = _openbabel.delete_pairUIntUInt
pairUIntUInt_swigregister = _openbabel.pairUIntUInt_swigregister
pairUIntUInt_swigregister(pairUIntUInt)
class vpairUIntUInt(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __iter__(self): return self.iterator()
def __init__(self, *args):
_openbabel.vpairUIntUInt_swiginit(self,_openbabel.new_vpairUIntUInt(*args))
__swig_destroy__ = _openbabel.delete_vpairUIntUInt
vpairUIntUInt.iterator = new_instancemethod(_openbabel.vpairUIntUInt_iterator,None,vpairUIntUInt)
vpairUIntUInt.__nonzero__ = new_instancemethod(_openbabel.vpairUIntUInt___nonzero__,None,vpairUIntUInt)
vpairUIntUInt.__bool__ = new_instancemethod(_openbabel.vpairUIntUInt___bool__,None,vpairUIntUInt)
vpairUIntUInt.__len__ = new_instancemethod(_openbabel.vpairUIntUInt___len__,None,vpairUIntUInt)
vpairUIntUInt.__getslice__ = new_instancemethod(_openbabel.vpairUIntUInt___getslice__,None,vpairUIntUInt)
vpairUIntUInt.__setslice__ = new_instancemethod(_openbabel.vpairUIntUInt___setslice__,None,vpairUIntUInt)
vpairUIntUInt.__delslice__ = new_instancemethod(_openbabel.vpairUIntUInt___delslice__,None,vpairUIntUInt)
vpairUIntUInt.__delitem__ = new_instancemethod(_openbabel.vpairUIntUInt___delitem__,None,vpairUIntUInt)
vpairUIntUInt.__getitem__ = new_instancemethod(_openbabel.vpairUIntUInt___getitem__,None,vpairUIntUInt)
vpairUIntUInt.__setitem__ = new_instancemethod(_openbabel.vpairUIntUInt___setitem__,None,vpairUIntUInt)
vpairUIntUInt_swigregister = _openbabel.vpairUIntUInt_swigregister
vpairUIntUInt_swigregister(vpairUIntUInt)
class vvpairUIntUInt(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __iter__(self): return self.iterator()
def __init__(self, *args):
_openbabel.vvpairUIntUInt_swiginit(self,_openbabel.new_vvpairUIntUInt(*args))
__swig_destroy__ = _openbabel.delete_vvpairUIntUInt
vvpairUIntUInt.iterator = new_instancemethod(_openbabel.vvpairUIntUInt_iterator,None,vvpairUIntUInt)
vvpairUIntUInt.__nonzero__ = new_instancemethod(_openbabel.vvpairUIntUInt___nonzero__,None,vvpairUIntUInt)
vvpairUIntUInt.__bool__ = new_instancemethod(_openbabel.vvpairUIntUInt___bool__,None,vvpairUIntUInt)
vvpairUIntUInt.__len__ = new_instancemethod(_openbabel.vvpairUIntUInt___len__,None,vvpairUIntUInt)
vvpairUIntUInt.pop = new_instancemethod(_openbabel.vvpairUIntUInt_pop,None,vvpairUIntUInt)
vvpairUIntUInt.__getslice__ = new_instancemethod(_openbabel.vvpairUIntUInt___getslice__,None,vvpairUIntUInt)
vvpairUIntUInt.__setslice__ = new_instancemethod(_openbabel.vvpairUIntUInt___setslice__,None,vvpairUIntUInt)
vvpairUIntUInt.__delslice__ = new_instancemethod(_openbabel.vvpairUIntUInt___delslice__,None,vvpairUIntUInt)
vvpairUIntUInt.__delitem__ = new_instancemethod(_openbabel.vvpairUIntUInt___delitem__,None,vvpairUIntUInt)
vvpairUIntUInt.__getitem__ = new_instancemethod(_openbabel.vvpairUIntUInt___getitem__,None,vvpairUIntUInt)
vvpairUIntUInt.__setitem__ = new_instancemethod(_openbabel.vvpairUIntUInt___setitem__,None,vvpairUIntUInt)
vvpairUIntUInt.append = new_instancemethod(_openbabel.vvpairUIntUInt_append,None,vvpairUIntUInt)
vvpairUIntUInt.empty = new_instancemethod(_openbabel.vvpairUIntUInt_empty,None,vvpairUIntUInt)
vvpairUIntUInt.size = new_instancemethod(_openbabel.vvpairUIntUInt_size,None,vvpairUIntUInt)
vvpairUIntUInt.clear = new_instancemethod(_openbabel.vvpairUIntUInt_clear,None,vvpairUIntUInt)
vvpairUIntUInt.swap = new_instancemethod(_openbabel.vvpairUIntUInt_swap,None,vvpairUIntUInt)
vvpairUIntUInt.get_allocator = new_instancemethod(_openbabel.vvpairUIntUInt_get_allocator,None,vvpairUIntUInt)
vvpairUIntUInt.begin = new_instancemethod(_openbabel.vvpairUIntUInt_begin,None,vvpairUIntUInt)
vvpairUIntUInt.end = new_instancemethod(_openbabel.vvpairUIntUInt_end,None,vvpairUIntUInt)
vvpairUIntUInt.rbegin = new_instancemethod(_openbabel.vvpairUIntUInt_rbegin,None,vvpairUIntUInt)
vvpairUIntUInt.rend = new_instancemethod(_openbabel.vvpairUIntUInt_rend,None,vvpairUIntUInt)
vvpairUIntUInt.pop_back = new_instancemethod(_openbabel.vvpairUIntUInt_pop_back,None,vvpairUIntUInt)
vvpairUIntUInt.erase = new_instancemethod(_openbabel.vvpairUIntUInt_erase,None,vvpairUIntUInt)
vvpairUIntUInt.push_back = new_instancemethod(_openbabel.vvpairUIntUInt_push_back,None,vvpairUIntUInt)
vvpairUIntUInt.front = new_instancemethod(_openbabel.vvpairUIntUInt_front,None,vvpairUIntUInt)
vvpairUIntUInt.back = new_instancemethod(_openbabel.vvpairUIntUInt_back,None,vvpairUIntUInt)
vvpairUIntUInt.assign = new_instancemethod(_openbabel.vvpairUIntUInt_assign,None,vvpairUIntUInt)
vvpairUIntUInt.resize = new_instancemethod(_openbabel.vvpairUIntUInt_resize,None,vvpairUIntUInt)
vvpairUIntUInt.insert | |
from __future__ import unicode_literals
import collections
import logging
import pytz
import re
from django.db import transaction
from django.db.models import Q
from django.utils import timezone
from django.utils.translation import ugettext, ugettext_lazy as _
from rest_framework import serializers
from rest_framework.reverse import reverse
from waldur_core.core import (serializers as core_serializers,
utils as core_utils,
signals as core_signals)
from waldur_core.structure import serializers as structure_serializers
from waldur_openstack.openstack import serializers as openstack_serializers
from waldur_openstack.openstack_base.backend import OpenStackBackendError
from . import models, fields
logger = logging.getLogger(__name__)
class ServiceSerializer(core_serializers.ExtraFieldOptionsMixin,
core_serializers.RequiredFieldsMixin,
structure_serializers.BaseServiceSerializer):
SERVICE_ACCOUNT_FIELDS = {
'backend_url': _('Keystone auth URL (e.g. http://keystone.example.com:5000/v3)'),
'domain': _('Tenant domain'),
'username': _('Tenant user username'),
'password': _('<PASSWORD>'),
}
SERVICE_ACCOUNT_EXTRA_FIELDS = {
'tenant_id': _('Tenant ID in OpenStack'),
'availability_zone': _('Default availability zone for provisioned instances'),
'flavor_exclude_regex': _('Flavors matching this regex expression will not be pulled from the backend.'),
'external_network_id': _('It is used to automatically assign floating IP to your virtual machine.'),
}
class Meta(structure_serializers.BaseServiceSerializer.Meta):
model = models.OpenStackTenantService
required_fields = ('backend_url', 'username', 'password', 'tenant_id')
extra_field_options = {
'backend_url': {
'label': 'API URL',
'default_value': 'http://keystone.example.com:5000/v3',
},
'tenant_id': {
'label': 'Tenant ID',
},
'availability_zone': {
'placeholder': 'default',
},
'external_network_id': {
'required': True,
}
}
class ServiceProjectLinkSerializer(structure_serializers.BaseServiceProjectLinkSerializer):
class Meta(structure_serializers.BaseServiceProjectLinkSerializer.Meta):
model = models.OpenStackTenantServiceProjectLink
extra_kwargs = {
'service': {'lookup_field': 'uuid', 'view_name': 'openstacktenant-detail'},
}
class ImageSerializer(structure_serializers.BasePropertySerializer):
class Meta(structure_serializers.BasePropertySerializer.Meta):
model = models.Image
fields = ('url', 'uuid', 'name', 'settings', 'min_disk', 'min_ram',)
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
'settings': {'lookup_field': 'uuid'},
}
class FlavorSerializer(structure_serializers.BasePropertySerializer):
class Meta(structure_serializers.BasePropertySerializer.Meta):
model = models.Flavor
fields = ('url', 'uuid', 'name', 'settings', 'cores', 'ram', 'disk',)
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
'settings': {'lookup_field': 'uuid'},
}
class UsageStatsSerializer(serializers.Serializer):
shared = serializers.BooleanField()
service_provider = serializers.ListField(child=serializers.CharField())
class NetworkSerializer(structure_serializers.BasePropertySerializer):
class Meta(structure_serializers.BasePropertySerializer.Meta):
model = models.Network
fields = ('url', 'uuid', 'name',
'type', 'is_external', 'segmentation_id', 'subnets')
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
'settings': {'lookup_field': 'uuid'},
'subnets': {'lookup_field': 'uuid', 'view_name': 'openstacktenant-subnet-detail'}
}
class SubNetSerializer(structure_serializers.BasePropertySerializer):
dns_nameservers = serializers.JSONField(read_only=True)
allocation_pools = serializers.JSONField(read_only=True)
class Meta(structure_serializers.BasePropertySerializer.Meta):
model = models.SubNet
fields = ('url', 'uuid', 'name',
'cidr', 'gateway_ip', 'allocation_pools', 'ip_version', 'enable_dhcp', 'dns_nameservers', 'network')
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
'settings': {'lookup_field': 'uuid'},
'network': {'lookup_field': 'uuid', 'view_name': 'openstacktenant-network-detail'},
}
class FloatingIPSerializer(structure_serializers.BasePropertySerializer):
class Meta(structure_serializers.BasePropertySerializer.Meta):
model = models.FloatingIP
fields = ('url', 'uuid', 'settings', 'address', 'runtime_state', 'is_booked',)
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
'settings': {'lookup_field': 'uuid'},
}
class SecurityGroupSerializer(structure_serializers.BasePropertySerializer):
rules = serializers.SerializerMethodField()
class Meta(structure_serializers.BasePropertySerializer.Meta):
model = models.SecurityGroup
fields = ('url', 'uuid', 'name', 'settings', 'description', 'rules')
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
'settings': {'lookup_field': 'uuid'},
}
def get_rules(self, security_group):
rules = []
for rule in security_group.rules.all():
rules.append({
'protocol': rule.protocol,
'from_port': rule.from_port,
'to_port': rule.to_port,
'cidr': rule.cidr,
})
return rules
class VolumeImportableSerializer(core_serializers.AugmentedSerializerMixin,
serializers.HyperlinkedModelSerializer):
service_project_link = serializers.HyperlinkedRelatedField(
view_name='openstacktenant-spl-detail',
queryset=models.OpenStackTenantServiceProjectLink.objects.all(),
write_only=True)
instance_name = serializers.ReadOnlyField(source='instance.name')
instance_uuid = serializers.ReadOnlyField(source='instance.uuid')
def get_filtered_field_names(self):
return 'service_project_link',
class Meta(object):
model = models.Volume
model_fields = ('name', 'description', 'size', 'bootable', 'type', 'device',
'runtime_state', 'instance_name', 'instance_uuid')
fields = ('service_project_link', 'backend_id') + model_fields
read_only_fields = model_fields + ('backend_id',)
class VolumeImportSerializer(VolumeImportableSerializer):
class Meta(VolumeImportableSerializer.Meta):
fields = VolumeImportableSerializer.Meta.fields + ('url', 'uuid', 'created')
read_only_fields = VolumeImportableSerializer.Meta.model_fields
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
}
@transaction.atomic
def create(self, validated_data):
service_project_link = validated_data['service_project_link']
backend_id = validated_data['backend_id']
if models.Volume.objects.filter(
service_project_link__service__settings=service_project_link.service.settings,
backend_id=backend_id
).exists():
raise serializers.ValidationError({
'backend_id': _('Volume has been imported already.')
})
try:
backend = service_project_link.get_backend()
volume = backend.import_volume(backend_id, save=True, service_project_link=service_project_link)
except OpenStackBackendError:
raise serializers.ValidationError({
'backend_id': _("Can't import volume with ID %s") % validated_data['backend_id']
})
return volume
class VolumeSerializer(structure_serializers.BaseResourceSerializer):
service = serializers.HyperlinkedRelatedField(
source='service_project_link.service',
view_name='openstacktenant-detail',
read_only=True,
lookup_field='uuid')
service_project_link = serializers.HyperlinkedRelatedField(
view_name='openstacktenant-spl-detail',
queryset=models.OpenStackTenantServiceProjectLink.objects.all(),
allow_null=True,
required=False,
)
action_details = serializers.JSONField(read_only=True)
metadata = serializers.JSONField(read_only=True)
instance_name = serializers.SerializerMethodField()
class Meta(structure_serializers.BaseResourceSerializer.Meta):
model = models.Volume
fields = structure_serializers.BaseResourceSerializer.Meta.fields + (
'source_snapshot', 'size', 'bootable', 'metadata',
'image', 'image_metadata', 'image_name', 'type', 'runtime_state',
'device', 'action', 'action_details', 'instance', 'instance_name',
)
read_only_fields = structure_serializers.BaseResourceSerializer.Meta.read_only_fields + (
'image_metadata', 'image_name', 'bootable', 'source_snapshot', 'runtime_state', 'device', 'metadata',
'action', 'instance', 'type',
)
protected_fields = structure_serializers.BaseResourceSerializer.Meta.protected_fields + (
'size', 'image',
)
extra_kwargs = dict(
instance={'lookup_field': 'uuid', 'view_name': 'openstacktenant-instance-detail'},
image={'lookup_field': 'uuid', 'view_name': 'openstacktenant-image-detail'},
source_snapshot={'lookup_field': 'uuid', 'view_name': 'openstacktenant-snapshot-detail'},
size={'required': False, 'allow_null': True},
**structure_serializers.BaseResourceSerializer.Meta.extra_kwargs
)
def get_instance_name(self, volume):
if volume.instance:
return volume.instance.name
def validate(self, attrs):
attrs = super(VolumeSerializer, self).validate(attrs)
if self.instance is None:
# image validation
image = attrs.get('image')
spl = attrs['service_project_link']
if image and image.settings != spl.service.settings:
raise serializers.ValidationError({'image': _('Image must belong to the same service settings')})
# snapshot & size validation
size = attrs.get('size')
snapshot = attrs.get('snapshot')
if not size and not snapshot:
raise serializers.ValidationError(_('Snapshot or size should be defined'))
if size and snapshot:
raise serializers.ValidationError(_('It is impossible to define both snapshot and size'))
# image & size validation
size = size or snapshot.size
if image and image.min_disk > size:
raise serializers.ValidationError({
'size': _('Volume size should be equal or greater than %s for selected image') % image.min_disk
})
return attrs
def create(self, validated_data):
if not validated_data.get('size'):
validated_data['size'] = validated_data['snapshot'].size
if validated_data.get('image'):
validated_data['image_name'] = validated_data['image'].name
return super(VolumeSerializer, self).create(validated_data)
class VolumeExtendSerializer(serializers.Serializer):
disk_size = serializers.IntegerField(min_value=1, label='Disk size')
def validate_disk_size(self, disk_size):
if disk_size < self.instance.size + 1024:
raise serializers.ValidationError(
_('Disk size should be greater or equal to %s') % (self.instance.size + 1024))
return disk_size
@transaction.atomic
def update(self, instance, validated_data):
new_size = validated_data.get('disk_size')
settings = instance.service_project_link.service.settings
spl = instance.service_project_link
for quota_holder in [settings, spl]:
quota_holder.add_quota_usage(quota_holder.Quotas.storage, new_size - instance.size, validate=True)
instance.size = new_size
instance.save(update_fields=['size'])
return instance
class VolumeAttachSerializer(structure_serializers.PermissionFieldFilteringMixin,
serializers.HyperlinkedModelSerializer):
class Meta(object):
model = models.Volume
fields = ('instance', 'device')
extra_kwargs = dict(
instance={
'required': True,
'allow_null': False,
'view_name': 'openstacktenant-instance-detail',
'lookup_field': 'uuid',
}
)
def get_fields(self):
fields = super(VolumeAttachSerializer, self).get_fields()
volume = self.instance
if volume:
fields['instance'].display_name_field = 'name'
fields['instance'].query_params = {
'project_uuid': volume.service_project_link.project.uuid.hex,
'service_uuid': volume.service_project_link.service.uuid.hex,
}
return fields
def get_filtered_field_names(self):
return ('instance',)
def validate_instance(self, instance):
States, RuntimeStates = models.Instance.States, models.Instance.RuntimeStates
if instance.state != States.OK or instance.runtime_state not in (RuntimeStates.SHUTOFF, RuntimeStates.ACTIVE):
raise serializers.ValidationError(
_('Volume can be attached only to shutoff or active instance in OK state.'))
volume = self.instance
if instance.service_project_link != volume.service_project_link:
raise serializers.ValidationError(_('Volume and instance should belong to the same service and project.'))
return instance
def validate(self, attrs):
instance = attrs['instance']
device = attrs.get('device')
if device and instance.volumes.filter(device=device).exists():
raise serializers.ValidationError({'device': _('The supplied device path (%s) is in use.') % device})
return attrs
class SnapshotRestorationSerializer(core_serializers.AugmentedSerializerMixin, serializers.HyperlinkedModelSerializer):
name = serializers.CharField(write_only=True, help_text=_('New volume name.'))
description = serializers.CharField(required=False, help_text=_('New volume description.'))
volume_state = serializers.CharField(source='volume.human_readable_state', read_only=True)
class Meta(object):
model = models.SnapshotRestoration
fields = ('uuid', 'created', 'name', 'description',
'volume', 'volume_name', 'volume_state', 'volume_runtime_state', 'volume_size', 'volume_device')
read_only_fields = ('uuid', 'created', 'volume')
related_paths = {
'volume': ('name', 'state', 'runtime_state', 'size', 'device')
}
extra_kwargs = dict(
volume={'lookup_field': 'uuid', 'view_name': 'openstacktenant-volume-detail'},
)
@transaction.atomic
def create(self, validated_data):
snapshot = self.context['view'].get_object()
validated_data['snapshot'] = snapshot
description = validated_data.pop('description', None) or 'Restored from snapshot %s' % snapshot.name
volume = models.Volume(
source_snapshot=snapshot,
service_project_link=snapshot.service_project_link,
name=validated_data.pop('name'),
description=description,
size=snapshot.size,
)
if 'source_volume_image_metadata' in snapshot.metadata:
volume.image_metadata = snapshot.metadata['source_volume_image_metadata']
volume.save()
volume.increase_backend_quotas_usage()
validated_data['volume'] = volume
return super(SnapshotRestorationSerializer, self).create(validated_data)
class SnapshotSerializer(structure_serializers.BaseResourceSerializer):
service = serializers.HyperlinkedRelatedField(
source='service_project_link.service',
view_name='openstacktenant-detail',
read_only=True,
lookup_field='uuid')
service_project_link = serializers.HyperlinkedRelatedField(
view_name='openstacktenant-spl-detail',
read_only=True)
source_volume_name = serializers.ReadOnlyField(source='source_volume.name')
action_details = serializers.JSONField(read_only=True)
metadata = serializers.JSONField(required=False)
restorations = SnapshotRestorationSerializer(many=True, read_only=True)
snapshot_schedule_uuid = serializers.ReadOnlyField(source='snapshot_schedule.uuid')
class Meta(structure_serializers.BaseResourceSerializer.Meta):
model = models.Snapshot
fields = structure_serializers.BaseResourceSerializer.Meta.fields + (
'source_volume', 'size', 'metadata', 'runtime_state', 'source_volume_name', 'action', 'action_details',
'restorations', 'kept_until', 'snapshot_schedule', 'snapshot_schedule_uuid'
)
read_only_fields = structure_serializers.BaseResourceSerializer.Meta.read_only_fields + (
'size', 'source_volume', 'metadata', 'runtime_state', 'action', 'snapshot_schedule',
)
extra_kwargs = dict(
source_volume={'lookup_field': 'uuid', 'view_name': 'openstacktenant-volume-detail'},
snapshot_schedule={'lookup_field': 'uuid', 'view_name': 'openstacktenant-snapshot-schedule-detail'},
**structure_serializers.BaseResourceSerializer.Meta.extra_kwargs
)
def validate(self, attrs):
# Skip validation on update
if self.instance:
return attrs
attrs['source_volume'] = source_volume = self.context['view'].get_object()
attrs['service_project_link'] = source_volume.service_project_link
attrs['size'] = source_volume.size
attrs['metadata'] = self.get_snapshot_metadata(source_volume)
return super(SnapshotSerializer, self).validate(attrs)
@staticmethod
def get_snapshot_metadata(volume):
return {
'source_volume_name': volume.name,
'source_volume_description': volume.description,
'source_volume_image_metadata': volume.image_metadata,
}
class SnapshotImportableSerializer(core_serializers.AugmentedSerializerMixin,
serializers.HyperlinkedModelSerializer):
service_project_link = serializers.HyperlinkedRelatedField(
view_name='openstacktenant-spl-detail',
queryset=models.OpenStackTenantServiceProjectLink.objects.all(),
write_only=True)
source_volume_name = serializers.ReadOnlyField(source='source_volume.name')
def get_filtered_field_names(self):
return 'service_project_link',
class Meta(object):
model = models.Snapshot
model_fields = ('name', 'description', 'size', 'action', 'action_details',
'metadata', 'runtime_state', 'state', 'source_volume_name', 'source_volume_name')
fields = ('service_project_link', 'backend_id') + model_fields
read_only_fields = model_fields + ('backend_id',)
extra_kwargs = dict(
source_volume={'lookup_field': 'uuid', 'view_name': 'openstacktenant-volume-detail'},
)
class SnapshotImportSerializer(SnapshotImportableSerializer):
class Meta(SnapshotImportableSerializer.Meta):
fields = SnapshotImportableSerializer.Meta.fields + ('url', 'uuid', 'created')
read_only_fields = SnapshotImportableSerializer.Meta.model_fields
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
}
@transaction.atomic
def create(self, validated_data):
service_project_link = validated_data['service_project_link']
backend_id = validated_data['backend_id']
if models.Snapshot.objects.filter(
service_project_link__service__settings=service_project_link.service.settings,
backend_id=backend_id
).exists():
raise serializers.ValidationError({
'backend_id': _('Snapshot has been imported already.')
})
try:
backend = service_project_link.get_backend()
snapshot = backend.import_snapshot(backend_id, save=True, service_project_link=service_project_link)
except OpenStackBackendError:
raise serializers.ValidationError({
'backend_id': _("Can't import snapshot with ID %s") % validated_data['backend_id']
})
return snapshot
class NestedVolumeSerializer(core_serializers.AugmentedSerializerMixin,
serializers.HyperlinkedModelSerializer,
structure_serializers.BasicResourceSerializer):
state = serializers.ReadOnlyField(source='get_state_display')
class Meta:
model = models.Volume
fields = 'url', 'uuid', 'name', 'image_name', 'state', 'bootable', 'size', 'device', 'resource_type'
extra_kwargs = {
'url': | |
<reponame>panosmdma/SlackOnly-SlackBuilds
from __future__ import print_function, absolute_import
from distutils import sysconfig
from distutils import version
from distutils.core import Extension
import glob
import io
import multiprocessing
import os
import re
import subprocess
import sys
import warnings
from textwrap import fill
import versioneer
PY3min = (sys.version_info[0] >= 3)
PY32min = (PY3min and sys.version_info[1] >= 2 or sys.version_info[0] > 3)
try:
from subprocess import check_output
except ImportError:
# check_output is not available in Python 2.6
def check_output(*popenargs, **kwargs):
"""
Run command with arguments and return its output as a byte
string.
Backported from Python 2.7 as it's implemented as pure python
on stdlib.
"""
process = subprocess.Popen(
stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
error = subprocess.CalledProcessError(retcode, cmd)
error.output = output
raise error
return output
if sys.platform != 'win32':
if not PY3min:
from commands import getstatusoutput
else:
from subprocess import getstatusoutput
if PY3min:
import configparser
else:
import ConfigParser as configparser
# matplotlib build options, which can be altered using setup.cfg
options = {
'display_status': True,
'verbose': False,
'backend': None,
'basedirlist': None
}
setup_cfg = os.environ.get('MPLSETUPCFG', 'setup.cfg')
if os.path.exists(setup_cfg):
if PY32min:
config = configparser.ConfigParser()
else:
config = configparser.SafeConfigParser()
config.read(setup_cfg)
try:
options['display_status'] = not config.getboolean("status", "suppress")
except:
pass
try:
options['backend'] = config.get("rc_options", "backend")
except:
pass
try:
options['basedirlist'] = [
x.strip() for x in
config.get("directories", "basedirlist").split(',')]
except:
pass
else:
config = None
def get_win32_compiler():
"""
Determine the compiler being used on win32.
"""
# Used to determine mingw32 or msvc
# This is pretty bad logic, someone know a better way?
for v in sys.argv:
if 'mingw32' in v:
return 'mingw32'
return 'msvc'
win32_compiler = get_win32_compiler()
def extract_versions():
"""
Extracts version values from the main matplotlib __init__.py and
returns them as a dictionary.
"""
with open('lib/matplotlib/__init__.py') as fd:
for line in fd.readlines():
if (line.startswith('__version__numpy__')):
exec(line.strip())
return locals()
def has_include_file(include_dirs, filename):
"""
Returns `True` if `filename` can be found in one of the
directories in `include_dirs`.
"""
if sys.platform == 'win32':
include_dirs += os.environ.get('INCLUDE', '.').split(';')
for dir in include_dirs:
if os.path.exists(os.path.join(dir, filename)):
return True
return False
def check_include_file(include_dirs, filename, package):
"""
Raises an exception if the given include file can not be found.
"""
if not has_include_file(include_dirs, filename):
raise CheckFailed(
"The C/C++ header for %s (%s) could not be found. You "
"may need to install the development package." %
(package, filename))
def get_base_dirs():
"""
Returns a list of standard base directories on this platform.
"""
if options['basedirlist']:
return options['basedirlist']
basedir_map = {
'win32': ['win32_static', ],
'darwin': ['/usr/local/', '/usr', '/usr/X11',
'/opt/X11', '/opt/local'],
'sunos5': [os.getenv('MPLIB_BASE') or '/usr/local', ],
'gnu0': ['/usr'],
'aix5': ['/usr/local'],
}
return basedir_map.get(sys.platform, ['/usr/local', '/usr'])
def get_include_dirs():
"""
Returns a list of standard include directories on this platform.
"""
include_dirs = [os.path.join(d, 'include') for d in get_base_dirs()]
include_dirs.extend(
os.environ.get('CPLUS_INCLUDE_PATH', '').split(os.pathsep))
return include_dirs
def is_min_version(found, minversion):
"""
Returns `True` if `found` is at least as high a version as
`minversion`.
"""
expected_version = version.LooseVersion(minversion)
found_version = version.LooseVersion(found)
return found_version >= expected_version
# Define the display functions only if display_status is True.
if options['display_status']:
def print_line(char='='):
print(char * 76)
def print_status(package, status):
initial_indent = "%22s: " % package
indent = ' ' * 24
print(fill(str(status), width=76,
initial_indent=initial_indent,
subsequent_indent=indent))
def print_message(message):
indent = ' ' * 24 + "* "
print(fill(str(message), width=76,
initial_indent=indent,
subsequent_indent=indent))
def print_raw(section):
print(section)
else:
def print_line(*args, **kwargs):
pass
print_status = print_message = print_raw = print_line
# Remove the -Wstrict-prototypesoption, is it's not valid for C++
customize_compiler = sysconfig.customize_compiler
def my_customize_compiler(compiler):
retval = customize_compiler(compiler)
try:
compiler.compiler_so.remove('-Wstrict-prototypes')
except (ValueError, AttributeError):
pass
return retval
sysconfig.customize_compiler = my_customize_compiler
def make_extension(name, files, *args, **kwargs):
"""
Make a new extension. Automatically sets include_dirs and
library_dirs to the base directories appropriate for this
platform.
`name` is the name of the extension.
`files` is a list of source files.
Any additional arguments are passed to the
`distutils.core.Extension` constructor.
"""
ext = DelayedExtension(name, files, *args, **kwargs)
for dir in get_base_dirs():
include_dir = os.path.join(dir, 'include')
if os.path.exists(include_dir):
ext.include_dirs.append(include_dir)
for lib in ('lib', 'lib64'):
lib_dir = os.path.join(dir, lib)
if os.path.exists(lib_dir):
ext.library_dirs.append(lib_dir)
ext.include_dirs.append('.')
return ext
class PkgConfig(object):
"""
This is a class for communicating with pkg-config.
"""
def __init__(self):
"""
Determines whether pkg-config exists on this machine.
"""
if sys.platform == 'win32':
self.has_pkgconfig = False
else:
try:
self.pkg_config = os.environ['PKG_CONFIG']
except KeyError:
self.pkg_config = 'pkg-config'
self.set_pkgconfig_path()
status, output = getstatusoutput(self.pkg_config + " --help")
self.has_pkgconfig = (status == 0)
if not self.has_pkgconfig:
print("IMPORTANT WARNING:")
print(
" pkg-config is not installed.\n"
" matplotlib may not be able to find some of its dependencies")
def set_pkgconfig_path(self):
pkgconfig_path = sysconfig.get_config_var('LIBDIR')
if pkgconfig_path is None:
return
pkgconfig_path = os.path.join(pkgconfig_path, 'pkgconfig')
if not os.path.isdir(pkgconfig_path):
return
try:
os.environ['PKG_CONFIG_PATH'] += ':' + pkgconfig_path
except KeyError:
os.environ['PKG_CONFIG_PATH'] = pkgconfig_path
def setup_extension(self, ext, package, default_include_dirs=[],
default_library_dirs=[], default_libraries=[],
alt_exec=None):
"""
Add parameters to the given `ext` for the given `package`.
"""
flag_map = {
'-I': 'include_dirs', '-L': 'library_dirs', '-l': 'libraries'}
executable = alt_exec
if self.has_pkgconfig:
executable = (self.pkg_config + ' {0}').format(package)
use_defaults = True
if executable is not None:
command = "{0} --libs --cflags ".format(executable)
try:
output = check_output(command, shell=True,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
pass
else:
output = output.decode(sys.getfilesystemencoding())
use_defaults = False
for token in output.split():
attr = flag_map.get(token[:2])
if attr is not None:
getattr(ext, attr).insert(0, token[2:])
if use_defaults:
basedirs = get_base_dirs()
for base in basedirs:
for include in default_include_dirs:
dir = os.path.join(base, include)
if os.path.exists(dir):
ext.include_dirs.append(dir)
for lib in default_library_dirs:
dir = os.path.join(base, lib)
if os.path.exists(dir):
ext.library_dirs.append(dir)
ext.libraries.extend(default_libraries)
return True
return False
def get_version(self, package):
"""
Get the version of the package from pkg-config.
"""
if not self.has_pkgconfig:
return None
status, output = getstatusoutput(
self.pkg_config + " %s --modversion" % (package))
if status == 0:
return output
return None
# The PkgConfig class should be used through this singleton
pkg_config = PkgConfig()
class CheckFailed(Exception):
"""
Exception thrown when a `SetupPackage.check` method fails.
"""
pass
class SetupPackage(object):
optional = False
def check(self):
"""
Checks whether the dependencies are met. Should raise a
`CheckFailed` exception if the dependency could not be met,
otherwise return a string indicating a version number or some
other message indicating what was found.
"""
pass
def get_packages(self):
"""
Get a list of package names to add to the configuration.
These are added to the `packages` list passed to
`distutils.setup`.
"""
return []
def get_namespace_packages(self):
"""
Get a list of namespace package names to add to the configuration.
These are added to the `namespace_packages` list passed to
`distutils.setup`.
"""
return []
def get_py_modules(self):
"""
Get a list of top-level modules to add to the configuration.
These are added to the `py_modules` list passed to
`distutils.setup`.
"""
return []
def get_package_data(self):
"""
Get a package data dictionary to add to the configuration.
These are merged into to the `package_data` list passed to
`distutils.setup`.
"""
return {}
def get_extension(self):
"""
Get a list of C extensions (`distutils.core.Extension`
objects) to add to the configuration. These are added to the
`extensions` list passed to `distutils.setup`.
"""
return None
def get_install_requires(self):
"""
Get a list of Python packages that we require.
pip/easy_install will attempt to download and install this
package if it is not installed.
"""
return []
def get_setup_requires(self):
"""
Get a list of Python packages that we require at build time.
pip/easy_install will attempt to download and install this
package if it is not installed.
"""
return []
def _check_for_pkg_config(self, package, include_file, min_version=None,
version=None):
"""
A convenience function for writing checks for a
pkg_config-defined dependency.
`package` is the pkg_config package name.
`include_file` is a top-level include file we expect to find.
`min_version` is the minimum version required.
`version` will override the found version if this package
requires an alternate method for that. Set version='unknown'
if the version is not known but you still want to disabled
pkg_config version check.
"""
if version is None:
version = pkg_config.get_version(package)
if version is None:
raise CheckFailed(
"pkg-config information for '%s' could not be found." %
package)
if min_version == 'PATCH':
raise CheckFailed(
"Requires patches that have not been merged upstream.")
if min_version and version != 'unknown':
if (not is_min_version(version, min_version)):
raise CheckFailed(
"Requires %s %s or later. Found %s." %
(package, min_version, version))
ext = | |
u'bn_lid_l_u_07', u'bn_lid_l_d_04', u'bn_lid_l_d_05', u'bn_lid_l_u_08', u'bn_lid_l_u_03',
u'bn_br_l_08', u'bn_br_l_09', u'bn_lid_l_u_01', u'bn_lid_l_u_02', u'bn_br_l_04',
u'bn_br_l_05', u'bn_br_l_06', u'bn_br_l_07', u'bn_br_l_01', u'bn_br_l_02', u'bn_br_l_03',
u'bn_nose_l', u'bn_mouth_l_01', u'bn_cheek_l_04', u'bn_cheek_l_02', u'bn_cheek_l_03',
u'bn_cheek_l_05', u'bn_mouth_l_02', u'bn_mouth_l_03', u'bn_cheek_l_06', u'bn_cheek_l_07',
u'bn_mouth_l_04', u'bn_lip_l_u_02', u'bn_lip_l', u'bn_lip_l_u_04', u'bn_lip_l_u_05',
u'bn_lip_c_u_02', u'bn_lip_l_u_01', u'bn_lip_c_u', u'bn_lip_c_u_01', u'bn_lip_l_u_03',
u'bn_cheek_l_01', u'bn_cheek_r_06', u'bn_lip_r', u'bn_lip_r_u_04', u'bn_mouth_r_02',
u'bn_cheek_r_04', u'bn_cheek_r_07', u'bn_lip_r_u_01', u'bn_lip_r_u_05', u'bn_br_r_01',
u'bn_lid_r_d_05', u'bn_lid_r_d_03', u'bn_lid_r_d_02', u'bn_lid_r_u_02', u'bn_lid_r_d_04',
u'bn_lid_r_u_08', u'bn_lid_r_u_07', u'bn_lid_r_u_03', u'bn_br_r_02', u'bn_cheek_r_03',
u'bn_cheek_r_02', u'bn_mouth_r_01', u'bn_lid_r_u_01', u'bn_br_r_05', u'bn_br_r_04',
u'bn_br_r_07', u'bn_cheek_r_05', u'bn_nose_r', u'bn_lid_r_u_06', u'bn_lid_r_u_04',
u'bn_lid_r_u_05', u'bn_br_r_09', u'bn_br_r_08', u'bn_br_r_06', u'bn_br_r_03',
u'bn_lip_r_u_03', u'bn_mouth_r_03', u'bn_mouth_r_04', u'bn_lip_r_u_02', u'bn_cheek_r_01',
u'bn_lid_l_d_01', u'bn_nose_c', u'bn_br_c', u'bn_chin_l', u'bn_chin_r', u'bn_chin_c',
u'bn_lip_c_d_02', u'bn_lip_r_d_05', u'bn_lip_l_d_05', u'bn_lip_c_d_01', u'bn_lip_r_d_03',
u'bn_lip_l_d_03', u'bn_lip_l_d_04', u'bn_lip_r_d_04', u'bn_lip_r_d_01', u'bn_lip_l_d_01',
u'bn_lip_c_d', u'bn_lip_r_d_02', u'bn_lip_l_d_02', u'bn_lid_r_d_01', u'bn_lip_r_u_06',
u'bn_lip_l_u_06']
sk = cmds.skinCluster(tmp_bones, head, tsb=True)
cmds.setAttr("%s.envelope" % sk[0], 0)
return sk
def get_joints(invert=False):
'''Gets all the child joints from the 'Bip01'
Returns:
list: all joints in hierarchy
'''
root = 'Bip01'
if not cmds.objExists(root):
return
cmds.select(root, hierarchy=True)
selection = cmds.ls(sl=True, fl=True)
nubs = cmds.ls('*Nub*', type='joint')
if nubs:
cmds.select(nubs, d=True)
jnts = cmds.ls(type='joint', selection=True)
cmds.select(clear=True)
if invert:
return [o for o in selection if o not in jnts]
else:
return jnts
def get_meshes():
'''Gets all the transform meshes node names from a scene
Returns:
list: all meshes in scene
'''
objects = cmds.ls('*', type='mesh')
meshes = cmds.listRelatives(objects, parent=True)
if not meshes:
return
meshes = list(set(meshes))
return meshes
def unlock_attributes(o):
locked = cmds.listAttr(o, l=True)
if locked:
for atr in locked:
cmds.setAttr('%s.%s' % (o, atr), lock=0)
def reset(objects):
'''Deletes all connections and history from a given objects.
And freezes transform.
Args:
objects (list): string list of objects
Returns:
None:
'''
axis = ['scaleX', 'scaleY', 'scaleZ', 'rotateX', 'rotateY',
'rotateZ', 'translateX', 'translateY', 'translateZ']
cmds.currentTime(0)
for o in objects:
cmds.delete(o, ch=True, cn=True, tac=True, e=True)
unlock_attributes(o)
sk = cmds.listConnections(o, type='skinCluster')
if sk:
cmds.delete(sk)
bp = cmds.listConnections(o, type='dagPose')
if bp:
cmds.delete(bp)
for a in axis:
conn = cmds.listConnections(o + "." + a, s=True, p=True)
if conn:
cmds.disconnectAttr(conn[0], o + "." + a)
cmds.delete(objects, c=True)
cmds.delete(objects, ch=True)
cmds.makeIdentity(objects, apply=True)
def add_zeroes(num, digits=2):
'''Gerenerates zeroes in front of the given digit.
Args:
num (int): input digit that is processed.
digits (int, optional): quantity of digits.
Returns:
str:
Examples:
>>> add_zeroes(2, digits=2)
'02'
>>> add_zeroes(2, digits=3)
'002'
'''
if isinstance(num, int) and isinstance(digits, int):
num = str(num)
zeroes_quantity = digits - len(num)
if zeroes_quantity > 0:
zeroes = (str(0) * zeroes_quantity)
return zeroes + num
elif zeroes_quantity == 0:
return num
else:
print 'digits', digits, 'less than', num, 'returning', num
return num
else:
exit_message = str(
['"update_number()" accepts "int" only, got', type(num), type(digits)])
sys.exit(exit_message)
def create_bip_blendshapes(start=1, end=19, head=''):
if not head:
head = get_head()
for key in range(start, end):
cmds.currentTime(key, edit=True)
cmds.select(head)
new_name = cmds.duplicate()
cmds.rename(new_name[0], head + '_' + add_zeroes(key, digits=2))
cmds.delete(ch=True)
def prepare_buttons(path):
'''
Take .py or .pyc path to script
Return dictionary button label:function
'''
path = path.replace('.pyc', '.py')
with open(path, 'r+') as cmds:
commands = cmds.read().splitlines()
defs = [d for d in commands if 'def ' in d]
to_del = ['def ', '(*args):']
buttons = []
for d in defs:
for i in to_del:
d = d.replace(i, '')
buttons.append(d)
labeled_buttons = {}
for b in buttons:
labeled_buttons[b.replace('_', ' ')] = 'c.' + b
return labeled_buttons
def get_blendshape_node(mesh):
'''
Description:
Example:
input: transform mesh
output: blendshape name if connected
'''
bls_set = cmds.ls('blendShape*', type='objectSet')
for bl_set in bls_set:
conns = cmds.listConnections(bl_set)
if mesh in conns:
bl = cmds.ls(conns, type='blendShape')
return bl[0]
print 'No blendshape connected to', mesh
def duplicate_blendshapes(bl=''):
'''Duplicates all blendshapes of a node by calling them one by one.
Number prefix of duplicated mesh is taken from a blendshape name if present.
Otherwise, Maya takes care of a prefix by itself.
Args:
None:
Returns:
list: duplicated meshes
Examples:
>>> duplicate_blendshapes()
['yermak_head_01', 'yermak_head_02']
'''
mesh = cmds.ls(sl=True, flatten=True)[0]
if not bl:
bl = get_blendshape_node(mesh)
cmds.currentTime(0)
targets = cmds.blendShape(bl, t=True, q=True)
# If blendshape meshes were deleted from scene
if not targets:
targets = cmds.listAttr(bl + '.w', multi=True)
# Generate dict for set each blendshapes in range to 1 infl per call
weights_bls = {}
renamed = []
group_name = '%s_bls_duplicated' % mesh
cmds.group(name=group_name, empty=True)
# Get index mapping for blendshape targets.
# Example: {0:'yermak_19', 1:'yermak_20'}
for t in range(0, len(targets)):
weight = [(i, 0) for i in range(0, len(targets))]
weight[t] = (t, 1)
weights_bls[targets[t]] = weight
for bl_mesh, bl_weight in weights_bls.iteritems():
cmds.blendShape(bl, w=bl_weight, edit=True)
d_name = cmds.duplicate(mesh)
cmds.parent(d_name, group_name)
new_name = cmds.rename(d_name[0], bl_mesh)
renamed.append(new_name)
return renamed
def skin_eyes():
'''
Prepare eye for export to 4A engine
Add the skin and the proper weights
'''
# Add check for skinscuster
eyes = cmds.ls('*l_eye', '*r_eye')
if not eyes:
sys.exit('No match for the pattern *l_eye *r_eye')
elif len(eyes) != 2:
sys.exit('More or less than 2 objects match the pattern *l_eye *r_eye')
reset(eyes)
# Center pivot
cmds.xform(eyes, cp=True, p=True)
l_prefix = '_l'
r_prefix = '_r'
jnts_list = ['bn_eye_r', 'bn_eye_l', 'Bip01_Head', 'Bip01']
for jnt in jnts_list:
if not cmds.objExists(jnt):
sys.exit('joint not found', jnt)
for eye in eyes:
jnts_hi = get_joints()
if l_prefix in eye:
eye_jnt = 'bn_eye_l'
elif r_prefix in eye:
eye_jnt = 'bn_eye_r'
else:
sys.exit('No prefix match')
# Align bone to eyes
# Should add a check tor keys and connections on bones
# To prevent jumping thile grabbing timeline
p_constr = cmds.pointConstraint(eye, eye_jnt)
cmds.delete(p_constr)
skin_cluster = cmds.skinCluster(jnts_hi, eye, tsb=True)
# skin_cluster = mel.eval('findRelatedSkinCluster("%s")' % object)
cmds.skinPercent(skin_cluster[0],
eye + '.vtx[*]',
transformValue=[(eye_jnt, 0.99),
(jnts_list[2], 0.01)])
print 'Prepared', eye
def dict_io(path, dict={}, get=False, set=False):
if get:
with open(path, 'rb') as dict_path:
dict = pickle.loads(dict_path.read())
print '# Loaded dictionary from', path
return dict
elif set and dict:
with open(path, 'wb') as dict_path:
pickle.dump(dict, dict_path)
print '# Saved dictionary to', path
else:
sys.exit('Command not specified')
def add_bones_to_skin_cluster(mesh, skin_cluster):
existing_bones = cmds.skinCluster(skin_cluster, query=True, inf=True)
to_add_bones = [
bone for bone in get_joints() if bone not in existing_bones]
if to_add_bones:
cmds.skinCluster(skin_cluster, edit=True, ai=to_add_bones, wt=0)
def get_info_from_xml(path):
'''
takes path to .xml file
return dict 'skin cluster name':[jnts]
'''
root = xml.etree.ElementTree.parse(path).getroot()
# set the header info
for atype in root.findall('headerInfo'):
fileName = atype.get('fileName')
info = {}
jnts = []
for atype in root.findall('weights'):
jnts.append(atype.get('source'))
#shape = atype.get('shape')
skin_cluster = atype.get('deformer')
info[skin_cluster] = jnts
return info
def get_skin_cluster(mesh):
skin_cluster = mel.eval('findRelatedSkinCluster "%s"' % mesh)
if not skin_cluster:
skin_cluster = cmds.ls(cmds.listHistory(mesh), type='skinCluster')
if skin_cluster:
skin_cluster = skin_cluster[0]
else:
skin_cluster = None
return skin_cluster
def set_skin_cluster(mesh):
global xml_folder
mesh_xml = os.path.join(xml_folder, mesh + '.xml')
if not os.path.exists(mesh_xml):
print 'No saved skin cluster found for %s' % mesh
return None
else:
info = get_info_from_xml(mesh_xml)
# No check for skin cluster name match in scene yet.
jnts = info.values()[0]
skin_cluster = info.keys()[0]
if not exists(jnts):
print 'Not enough joints to apply saved skin to'
return None
if exists(skin_cluster):
print 'Skin cluster already exists with a given name'
return None
cmds.skinCluster(mesh, jnts, name=skin_cluster, mi=4)
return skin_cluster
def exists(objects):
'''
Takes strings and lists
Return True or False
'''
if isinstance(objects, str) or isinstance(objects, unicode):
if cmds.objExists(objects):
return True
elif isinstance(objects, list):
true_false = []
for o in objects:
if cmds.objExists(o):
true_false.append(True)
else:
true_false.append(False)
if False in true_false:
return False
else:
return True
else:
print 'Types "str", "list" are accepted only'
return False
def fix_skin_cluster_name(mesh, skin_cluster):
'''Renames skin cluster with a mesh name + prefix "_sc"
Args:
mesh (str): transform object with a skin cluster
skin_cluster (str): skin cluster node name
Returns:
str: updated mesh skin cluster name
'''
prefix = '_sc'
if skin_cluster != mesh + prefix:
skin_cluster_name = cmds.rename(skin_cluster, mesh + prefix)
return skin_cluster_name
else:
return skin_cluster
def export_weights():
global xml_folder
cmds.currentTime(0)
objects = cmds.ls(selection=True, transforms=True, flatten=True)
if not objects:
print 'Nothing is selected to save weights from'
return
object_and_skin_cluster = {}
for o in objects:
skin_cluster = get_skin_cluster(o)
if skin_cluster:
skin_cluster = fix_skin_cluster_name(o, skin_cluster)
object_and_skin_cluster[o] = skin_cluster
if not object_and_skin_cluster:
print 'No skin cluster was found on selected'
return
for o, skin_cluster in object_and_skin_cluster.iteritems():
add_bones_to_skin_cluster(o, skin_cluster)
cmds.deformerWeights(o + '.xml',
path=xml_folder,
ex=True,
deformer=skin_cluster,
method='index')
def import_weights():
global xml_folder
cmds.currentTime(0)
objects = cmds.ls(selection=True, transforms=True, flatten=True)
if not objects:
print 'Nothing is selected to import weights from'
return
for o in objects:
| |
<gh_stars>1-10
import django
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'exotic_bay_project.settings')
django.setup()
from exotic_bay.models import Pet
import datetime
def populate():
pets = {'Mexican Red Knee':
{'scientificName': '<NAME> (ex smithi)',
'price': 35,
'type': 'Inverts',
'stock': 1000,
'description': 'Mexican Red Knee is a popular choice for beginners and enthusiasts alike. Like most '
'tarantulas, it has a long lifespan.',
'careDetails': 'This spider needs to be kept in a terrestial style plastic faunarium or suitable '
'glass enclosure.',
'orders': 0,
'dateAdded': datetime.date(2020, 2, 4),
'image': 'pet_images/mexican-red-knee.jpg'},
'Panther Chameleon':
{'scientificName': '<NAME>',
'price': '40',
'type': 'Reptiles',
'stock': 439,
'description': 'The Panther Chameleon, being one of the easiest species of Chameleon to own makes '
'them a favourite among lizard owners.',
'careDetails': 'This breed of Chameleon is incredibly territorial and usually should be housed '
'individually in a mesh enclosure. ',
'orders': 0,
'dateAdded': datetime.date(2020, 3, 9),
'image': 'pet_images/panther-chameleon.jpg'},
'Bearded Dragon':
{'scientificName': '<NAME>',
'price': '50',
'type': 'Reptiles',
'stock': 6000,
'description': 'Bearded Dragons are the most popular pet lizard, naturally found only throughout '
'Australia. They are a large species growing to a total length of 15-24 inches when '
'adult.',
'careDetails': 'As a native of Australia, bearded dragon lizards need to be kept in a hot, '
'dry environment. Bearded dragons like to spend part of their day '
'basking'' at high temperatures to warm their body. Once up to temperature, they will '
'often move to '
'cooler areas. Whilst basking, bearded dragons also absorb strong UVB from the sun '
'which enables them to produce vitamin D in their body which is essential for '
'utilising calcium. '
'\nThe bearded dragon diet is omnivorous which means that they eat both animals and '
'vegetation. Bearded dragons are particularly fond of insects and worms, '
'but can tackle larger prey if they wish! As adults, bearded dragons are 80% '
'vegetarian.',
'orders': 0,
'dateAdded': datetime.date(2020, 3, 15),
'image': 'pet_images/bearded-dragon.jpg'},
'Axolotl':
{'scientificName': 'Ambystoma mexicanum',
'price': '40',
'type': 'Amphibians',
'stock': 50,
'description': 'Axolotls are large aquatic salamanders only found in parts of Mexico. They are easy '
'to keep and grow to an impressive 30cm, making the Axolotl a popular exotic pet.',
'careDetails': 'A good set up for one Axolotl would consist of an aquarium of 60 x38 x30cm (24 x 15 '
'x 12in). The water should be around 10-20 C (50-68 F) and shallow, as deep as the '
'Axolotl is long. Decorate the aquarium with a mixture of plastic plants and '
'oxygenating plant, with maybe a couple of large pebbles. \n\nHowever, do not over crowd '
'your set up - make sure your Axolotl has plenty of space. Keep your set-up out of '
'direct light as Axolotls do not have eyelids and are sensitive to too much light. ',
'orders': 0,
'dateAdded': datetime.date(2020, 1, 5),
'image': 'pet_images/axolotl.jpg'},
'Sugar Glider':
{'scientificName': '<NAME>',
'price': '150',
'type': 'Marsupials',
'stock': 150,
'description': 'Sugar Gliders are small marsupials in the same general family as a Kangaroo or '
'Koala Bear. They are originally from the rainforests of Australia and Indonesia, '
'and have been domestically bred as household pets for the last 12-15 years.',
'careDetails': 'Sugar gliders should be housed in as large a cage as possible to enable them to '
'jump, leap, and glide around. Minimum size cage requirements for a single glider are '
'3’ x 2’ x 3’. Securely locked, metal cages with bar spacing no more than 0.5” apart '
'are best, as sugar gliders are notorious escape artists. They should be allowed out '
'of their cages daily for exercise but only when closely supervised, as their curious '
'nature tends to get them into trouble.',
'orders': 0,
'dateAdded': datetime.date(2020, 3, 16),
'image': 'pet_images/sugar-glider.jpg'},
'Fennec Fox':
{'scientificName': '<NAME>',
'price': '1500',
'type': 'Canidae',
'stock': 10,
'description': 'The fennec fox, also called fennec or desert fox, is a small crepuscular fox native '
'to the Sahara Desert, the Sinai Peninsula, Arava desert and the Arabian desert. Its '
'most distinctive feature is its unusually large ears, which also serve to dissipate '
'heat.',
'careDetails': 'Fennecs are very active and need an outlet for their energy. They are curious and '
'will get into anything and everything. They are also known for their digging. '
'Outdoor enclosures must be designed to prevent them from digging under or climbing '
'over the fence, both of which they will do quite readily. '
'\n\nBurying a significant portion of the fence and turning the fence in at the top (or '
'completely covering the enclosure) should prevent escape. Don''t skimp on materials, '
'though, because these foxes can dig holes up to (or down to) 20 feet deep if they''re feeling motivated.',
'orders': 0,
'dateAdded': datetime.date(2020, 1, 1),
'image': 'pet_images/fennec-fox.jpg'},
'Blue Poison Dart Frog':
{'scientificName': '<NAME>',
'price': '95',
'type': 'Amphibians',
'stock': 420,
'description': 'Bright blue animals are a rarity in nature and this species is unquestionably one of '
'the most spectacular. Few dart frog collections exist without Blue Poison Dart Frogs '
'being present; a must for any serious keeper.',
'careDetails': 'House this species singularly, in sexed pairs or sexed trios of (2.1). Upon '
'maturity, females can be aggressive towards one another, so it usually best to house '
'one female per enclosure. However, if enough space is provided they will often form '
'their own territories. Provide a glass terrarium of at least 45 x 45 x 45cm (18 x 18 '
'x 18”) to house an adult sexed pair, and larger, if there are more of them. Young '
'frogs can be reared in plastic terrariums or smaller glass enclosures.',
'orders': 0,
'dateAdded': datetime.date(2020, 3, 20),
'image': 'pet_images/blue-poison-dart-frog.jpg'},
'Green & Black Poison Dart Frog':
{'scientificName': '<NAME>',
'price': '80',
'type': 'Amphibians',
'stock': 50,
'description': 'This is a popular species of arrow frog amongst hobbyists. It is '
'found in Central America and '
'northwestern parts of South America. '
'There are many different colour forms available.',
'careDetails': 'House this species singularly, in sexed pairs or sexed trios of (2.1). Upon '
'maturity, females can be aggressive towards one another, so it usually best to house '
'one female per enclosure. However, if enough space is provided they will often form '
'their own territories. Provide a glass terrarium of at least 45 x 45 x 45cm (18 x 18 '
'x 18”) to house an adult sexed pair, and larger, if there are more of them. Young '
'frogs can be reared in plastic terrariums or smaller glass enclosures.',
'orders': 0,
'dateAdded': datetime.date(2020, 3, 19),
'image': 'pet_images/Green-Black-Poison-Dart-Frogs.jpg'},
'Corn Snake':
{'scientificName': '<NAME>',
'price': '38',
'type': 'Reptiles',
'stock': 69,
'description': 'Corn snakes are popular option for beginner reptile enthusiasts, with their variety '
'of colours and calm temperament making them reluctant to bite or construct when '
'placed under mild stress.',
'careDetails': 'Since corn snakes typically average a size of 40 inches when fully grown, this snake '
'does not need to be stored in a small to medium sized vivarium. Mice should be the most '
'common food for the snake, ensuring it is fully defrosted and no larger than 1.5x the '
'widest part of the snake. ',
'orders': 0,
'dateAdded': datetime.date(2020, 3, 17),
'image': 'pet_images/corn-snake.jpg'},
'Spotted Python':
{'scientificName': '<NAME>',
'price': '150',
'type': 'Reptiles',
'stock': 10,
'description': 'The spotted python, with its beautiful and distinguished pattern, is one '
'one of the most sought after species of Australian python. Its renowned for its '
'calm nature and relatively small size.',
'careDetails': 'At a young age, Spotted Pythons are best housed within a small and flat surfaced '
'plastic tank since placing the snake inside a larger environment too early can cause '
'them stress. Once Adults, they should be moved into an enclosure of at least 3ft. The '
'enclosure should be maintained at a | |
<filename>tests/test__default.py
# Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import mock
import pytest # type: ignore
from google.auth import _default
from google.auth import app_engine
from google.auth import aws
from google.auth import compute_engine
from google.auth import credentials
from google.auth import environment_vars
from google.auth import exceptions
from google.auth import external_account
from google.auth import identity_pool
from google.auth import impersonated_credentials
from google.oauth2 import service_account
import google.oauth2.credentials
DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
AUTHORIZED_USER_FILE = os.path.join(DATA_DIR, "authorized_user.json")
with open(AUTHORIZED_USER_FILE) as fh:
AUTHORIZED_USER_FILE_DATA = json.load(fh)
AUTHORIZED_USER_CLOUD_SDK_FILE = os.path.join(
DATA_DIR, "authorized_user_cloud_sdk.json"
)
AUTHORIZED_USER_CLOUD_SDK_WITH_QUOTA_PROJECT_ID_FILE = os.path.join(
DATA_DIR, "authorized_user_cloud_sdk_with_quota_project_id.json"
)
SERVICE_ACCOUNT_FILE = os.path.join(DATA_DIR, "service_account.json")
CLIENT_SECRETS_FILE = os.path.join(DATA_DIR, "client_secrets.json")
with open(SERVICE_ACCOUNT_FILE) as fh:
SERVICE_ACCOUNT_FILE_DATA = json.load(fh)
SUBJECT_TOKEN_TEXT_FILE = os.path.join(DATA_DIR, "external_subject_token.txt")
TOKEN_URL = "https://sts.googleapis.com/v1/token"
AUDIENCE = "//iam.googleapis.com/projects/123456/locations/global/workloadIdentityPools/POOL_ID/providers/PROVIDER_ID"
WORKFORCE_AUDIENCE = (
"//iam.googleapis.com/locations/global/workforcePools/POOL_ID/providers/PROVIDER_ID"
)
WORKFORCE_POOL_USER_PROJECT = "WORKFORCE_POOL_USER_PROJECT_NUMBER"
REGION_URL = "http://169.254.169.254/latest/meta-data/placement/availability-zone"
SECURITY_CREDS_URL = "http://169.254.169.254/latest/meta-data/iam/security-credentials"
CRED_VERIFICATION_URL = (
"https://sts.{region}.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15"
)
IDENTITY_POOL_DATA = {
"type": "external_account",
"audience": AUDIENCE,
"subject_token_type": "urn:ietf:params:oauth:token-type:jwt",
"token_url": TOKEN_URL,
"credential_source": {"file": SUBJECT_TOKEN_TEXT_FILE},
}
AWS_DATA = {
"type": "external_account",
"audience": AUDIENCE,
"subject_token_type": "urn:ietf:params:aws:token-type:aws4_request",
"token_url": TOKEN_URL,
"credential_source": {
"environment_id": "aws1",
"region_url": REGION_URL,
"url": SECURITY_CREDS_URL,
"regional_cred_verification_url": CRED_VERIFICATION_URL,
},
}
SERVICE_ACCOUNT_EMAIL = "<EMAIL>"
SERVICE_ACCOUNT_IMPERSONATION_URL = (
"https://us-east1-iamcredentials.googleapis.com/v1/projects/-"
+ "/serviceAccounts/{}:generateAccessToken".format(SERVICE_ACCOUNT_EMAIL)
)
IMPERSONATED_IDENTITY_POOL_DATA = {
"type": "external_account",
"audience": AUDIENCE,
"subject_token_type": "urn:ietf:params:oauth:token-type:jwt",
"token_url": TOKEN_URL,
"credential_source": {"file": SUBJECT_TOKEN_TEXT_FILE},
"service_account_impersonation_url": SERVICE_ACCOUNT_IMPERSONATION_URL,
}
IMPERSONATED_AWS_DATA = {
"type": "external_account",
"audience": AUDIENCE,
"subject_token_type": "urn:ietf:params:aws:token-type:aws4_request",
"token_url": TOKEN_URL,
"credential_source": {
"environment_id": "aws1",
"region_url": REGION_URL,
"url": SECURITY_CREDS_URL,
"regional_cred_verification_url": CRED_VERIFICATION_URL,
},
"service_account_impersonation_url": SERVICE_ACCOUNT_IMPERSONATION_URL,
}
IDENTITY_POOL_WORKFORCE_DATA = {
"type": "external_account",
"audience": WORKFORCE_AUDIENCE,
"subject_token_type": "urn:ietf:params:oauth:token-type:id_token",
"token_url": TOKEN_URL,
"credential_source": {"file": SUBJECT_TOKEN_TEXT_FILE},
"workforce_pool_user_project": WORKFORCE_POOL_USER_PROJECT,
}
IMPERSONATED_IDENTITY_POOL_WORKFORCE_DATA = {
"type": "external_account",
"audience": WORKFORCE_AUDIENCE,
"subject_token_type": "urn:ietf:params:oauth:token-type:id_token",
"token_url": TOKEN_URL,
"credential_source": {"file": SUBJECT_TOKEN_TEXT_FILE},
"service_account_impersonation_url": SERVICE_ACCOUNT_IMPERSONATION_URL,
"workforce_pool_user_project": WORKFORCE_POOL_USER_PROJECT,
}
IMPERSONATED_SERVICE_ACCOUNT_AUTHORIZED_USER_SOURCE_FILE = os.path.join(
DATA_DIR, "impersonated_service_account_authorized_user_source.json"
)
IMPERSONATED_SERVICE_ACCOUNT_WITH_QUOTA_PROJECT_FILE = os.path.join(
DATA_DIR, "impersonated_service_account_with_quota_project.json"
)
IMPERSONATED_SERVICE_ACCOUNT_SERVICE_ACCOUNT_SOURCE_FILE = os.path.join(
DATA_DIR, "impersonated_service_account_service_account_source.json"
)
MOCK_CREDENTIALS = mock.Mock(spec=credentials.CredentialsWithQuotaProject)
MOCK_CREDENTIALS.with_quota_project.return_value = MOCK_CREDENTIALS
def get_project_id_side_effect(self, request=None):
# If no scopes are set, this will always return None.
if not self.scopes:
return None
return mock.sentinel.project_id
LOAD_FILE_PATCH = mock.patch(
"google.auth._default.load_credentials_from_file",
return_value=(MOCK_CREDENTIALS, mock.sentinel.project_id),
autospec=True,
)
EXTERNAL_ACCOUNT_GET_PROJECT_ID_PATCH = mock.patch.object(
external_account.Credentials,
"get_project_id",
side_effect=get_project_id_side_effect,
autospec=True,
)
def test_load_credentials_from_missing_file():
with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
_default.load_credentials_from_file("")
assert excinfo.match(r"not found")
def test_load_credentials_from_file_invalid_json(tmpdir):
jsonfile = tmpdir.join("invalid.json")
jsonfile.write("{")
with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
_default.load_credentials_from_file(str(jsonfile))
assert excinfo.match(r"not a valid json file")
def test_load_credentials_from_file_invalid_type(tmpdir):
jsonfile = tmpdir.join("invalid.json")
jsonfile.write(json.dumps({"type": "not-a-real-type"}))
with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
_default.load_credentials_from_file(str(jsonfile))
assert excinfo.match(r"does not have a valid type")
def test_load_credentials_from_file_authorized_user():
credentials, project_id = _default.load_credentials_from_file(AUTHORIZED_USER_FILE)
assert isinstance(credentials, google.oauth2.credentials.Credentials)
assert project_id is None
def test_load_credentials_from_file_no_type(tmpdir):
# use the client_secrets.json, which is valid json but not a
# loadable credentials type
with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
_default.load_credentials_from_file(CLIENT_SECRETS_FILE)
assert excinfo.match(r"does not have a valid type")
assert excinfo.match(r"Type is None")
def test_load_credentials_from_file_authorized_user_bad_format(tmpdir):
filename = tmpdir.join("authorized_user_bad.json")
filename.write(json.dumps({"type": "authorized_user"}))
with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
_default.load_credentials_from_file(str(filename))
assert excinfo.match(r"Failed to load authorized user")
assert excinfo.match(r"missing fields")
def test_load_credentials_from_file_authorized_user_cloud_sdk():
with pytest.warns(UserWarning, match="Cloud SDK"):
credentials, project_id = _default.load_credentials_from_file(
AUTHORIZED_USER_CLOUD_SDK_FILE
)
assert isinstance(credentials, google.oauth2.credentials.Credentials)
assert project_id is None
# No warning if the json file has quota project id.
credentials, project_id = _default.load_credentials_from_file(
AUTHORIZED_USER_CLOUD_SDK_WITH_QUOTA_PROJECT_ID_FILE
)
assert isinstance(credentials, google.oauth2.credentials.Credentials)
assert project_id is None
def test_load_credentials_from_file_authorized_user_cloud_sdk_with_scopes():
with pytest.warns(UserWarning, match="Cloud SDK"):
credentials, project_id = _default.load_credentials_from_file(
AUTHORIZED_USER_CLOUD_SDK_FILE,
scopes=["https://www.google.com/calendar/feeds"],
)
assert isinstance(credentials, google.oauth2.credentials.Credentials)
assert project_id is None
assert credentials.scopes == ["https://www.google.com/calendar/feeds"]
def test_load_credentials_from_file_authorized_user_cloud_sdk_with_quota_project():
credentials, project_id = _default.load_credentials_from_file(
AUTHORIZED_USER_CLOUD_SDK_FILE, quota_project_id="project-foo"
)
assert isinstance(credentials, google.oauth2.credentials.Credentials)
assert project_id is None
assert credentials.quota_project_id == "project-foo"
def test_load_credentials_from_file_service_account():
credentials, project_id = _default.load_credentials_from_file(SERVICE_ACCOUNT_FILE)
assert isinstance(credentials, service_account.Credentials)
assert project_id == SERVICE_ACCOUNT_FILE_DATA["project_id"]
def test_load_credentials_from_file_service_account_with_scopes():
credentials, project_id = _default.load_credentials_from_file(
SERVICE_ACCOUNT_FILE, scopes=["https://www.google.com/calendar/feeds"]
)
assert isinstance(credentials, service_account.Credentials)
assert project_id == SERVICE_ACCOUNT_FILE_DATA["project_id"]
assert credentials.scopes == ["https://www.google.com/calendar/feeds"]
def test_load_credentials_from_file_service_account_with_quota_project():
credentials, project_id = _default.load_credentials_from_file(
SERVICE_ACCOUNT_FILE, quota_project_id="project-foo"
)
assert isinstance(credentials, service_account.Credentials)
assert project_id == SERVICE_ACCOUNT_FILE_DATA["project_id"]
assert credentials.quota_project_id == "project-foo"
def test_load_credentials_from_file_service_account_bad_format(tmpdir):
filename = tmpdir.join("serivce_account_bad.json")
filename.write(json.dumps({"type": "service_account"}))
with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
_default.load_credentials_from_file(str(filename))
assert excinfo.match(r"Failed to load service account")
assert excinfo.match(r"missing fields")
def test_load_credentials_from_file_impersonated_with_authorized_user_source():
credentials, project_id = _default.load_credentials_from_file(
IMPERSONATED_SERVICE_ACCOUNT_AUTHORIZED_USER_SOURCE_FILE
)
assert isinstance(credentials, impersonated_credentials.Credentials)
assert isinstance(
credentials._source_credentials, google.oauth2.credentials.Credentials
)
assert credentials.service_account_email == "<EMAIL>"
assert credentials._delegates == ["<EMAIL>"]
assert not credentials._quota_project_id
assert not credentials._target_scopes
assert project_id is None
def test_load_credentials_from_file_impersonated_with_quota_project():
credentials, _ = _default.load_credentials_from_file(
IMPERSONATED_SERVICE_ACCOUNT_WITH_QUOTA_PROJECT_FILE
)
assert isinstance(credentials, impersonated_credentials.Credentials)
assert credentials._quota_project_id == "quota_project"
def test_load_credentials_from_file_impersonated_with_service_account_source():
credentials, _ = _default.load_credentials_from_file(
IMPERSONATED_SERVICE_ACCOUNT_SERVICE_ACCOUNT_SOURCE_FILE
)
assert isinstance(credentials, impersonated_credentials.Credentials)
assert isinstance(credentials._source_credentials, service_account.Credentials)
assert not credentials._quota_project_id
def test_load_credentials_from_file_impersonated_passing_quota_project():
credentials, _ = _default.load_credentials_from_file(
IMPERSONATED_SERVICE_ACCOUNT_SERVICE_ACCOUNT_SOURCE_FILE,
quota_project_id="new_quota_project",
)
assert credentials._quota_project_id == "new_quota_project"
def test_load_credentials_from_file_impersonated_passing_scopes():
credentials, _ = _default.load_credentials_from_file(
IMPERSONATED_SERVICE_ACCOUNT_SERVICE_ACCOUNT_SOURCE_FILE,
scopes=["scope1", "scope2"],
)
assert credentials._target_scopes == ["scope1", "scope2"]
def test_load_credentials_from_file_impersonated_wrong_target_principal(tmpdir):
with open(IMPERSONATED_SERVICE_ACCOUNT_AUTHORIZED_USER_SOURCE_FILE) as fh:
impersonated_credentials_info = json.load(fh)
impersonated_credentials_info[
"service_account_impersonation_url"
] = "something_wrong"
jsonfile = tmpdir.join("invalid.json")
jsonfile.write(json.dumps(impersonated_credentials_info))
with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
_default.load_credentials_from_file(str(jsonfile))
assert excinfo.match(r"Cannot extract target principal")
def test_load_credentials_from_file_impersonated_wrong_source_type(tmpdir):
with open(IMPERSONATED_SERVICE_ACCOUNT_AUTHORIZED_USER_SOURCE_FILE) as fh:
impersonated_credentials_info = json.load(fh)
impersonated_credentials_info["source_credentials"]["type"] = "external_account"
jsonfile = tmpdir.join("invalid.json")
jsonfile.write(json.dumps(impersonated_credentials_info))
with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
_default.load_credentials_from_file(str(jsonfile))
assert excinfo.match(r"source credential of type external_account is not supported")
@EXTERNAL_ACCOUNT_GET_PROJECT_ID_PATCH
def test_load_credentials_from_file_external_account_identity_pool(
get_project_id, tmpdir
):
config_file = tmpdir.join("config.json")
config_file.write(json.dumps(IDENTITY_POOL_DATA))
credentials, project_id = _default.load_credentials_from_file(str(config_file))
assert isinstance(credentials, identity_pool.Credentials)
# Since no scopes are specified, the project ID cannot be determined.
assert project_id is None
assert get_project_id.called
@EXTERNAL_ACCOUNT_GET_PROJECT_ID_PATCH
def test_load_credentials_from_file_external_account_aws(get_project_id, tmpdir):
config_file = tmpdir.join("config.json")
config_file.write(json.dumps(AWS_DATA))
credentials, project_id = _default.load_credentials_from_file(str(config_file))
assert isinstance(credentials, aws.Credentials)
# Since no scopes are specified, the project ID cannot be determined.
assert project_id is None
assert get_project_id.called
@EXTERNAL_ACCOUNT_GET_PROJECT_ID_PATCH
def test_load_credentials_from_file_external_account_identity_pool_impersonated(
get_project_id, tmpdir
):
config_file = tmpdir.join("config.json")
config_file.write(json.dumps(IMPERSONATED_IDENTITY_POOL_DATA))
credentials, project_id = _default.load_credentials_from_file(str(config_file))
assert isinstance(credentials, identity_pool.Credentials)
assert not credentials.is_user
assert not credentials.is_workforce_pool
# Since no scopes are specified, the project ID cannot be determined.
assert project_id is None
assert get_project_id.called
@EXTERNAL_ACCOUNT_GET_PROJECT_ID_PATCH
def test_load_credentials_from_file_external_account_aws_impersonated(
get_project_id, tmpdir
):
config_file = tmpdir.join("config.json")
config_file.write(json.dumps(IMPERSONATED_AWS_DATA))
credentials, project_id = _default.load_credentials_from_file(str(config_file))
assert isinstance(credentials, aws.Credentials)
assert not credentials.is_user
assert not credentials.is_workforce_pool
# Since no scopes are specified, the project ID cannot be determined.
assert project_id is None
assert get_project_id.called
@EXTERNAL_ACCOUNT_GET_PROJECT_ID_PATCH
def test_load_credentials_from_file_external_account_workforce(get_project_id, tmpdir):
config_file = tmpdir.join("config.json")
config_file.write(json.dumps(IDENTITY_POOL_WORKFORCE_DATA))
credentials, project_id = _default.load_credentials_from_file(str(config_file))
assert isinstance(credentials, identity_pool.Credentials)
assert credentials.is_user
assert credentials.is_workforce_pool
# Since no scopes are specified, the project ID cannot be determined.
assert project_id is None
assert get_project_id.called
@EXTERNAL_ACCOUNT_GET_PROJECT_ID_PATCH
def test_load_credentials_from_file_external_account_workforce_impersonated(
get_project_id, tmpdir
):
config_file = tmpdir.join("config.json")
config_file.write(json.dumps(IMPERSONATED_IDENTITY_POOL_WORKFORCE_DATA))
credentials, project_id = _default.load_credentials_from_file(str(config_file))
assert isinstance(credentials, identity_pool.Credentials)
assert not credentials.is_user
assert credentials.is_workforce_pool
# Since no scopes are specified, the project ID cannot be determined.
assert project_id is None
assert get_project_id.called
@EXTERNAL_ACCOUNT_GET_PROJECT_ID_PATCH
def test_load_credentials_from_file_external_account_with_user_and_default_scopes(
get_project_id, tmpdir
):
config_file = tmpdir.join("config.json")
config_file.write(json.dumps(IDENTITY_POOL_DATA))
credentials, project_id = _default.load_credentials_from_file(
str(config_file),
scopes=["https://www.google.com/calendar/feeds"],
default_scopes=["https://www.googleapis.com/auth/cloud-platform"],
)
assert isinstance(credentials, identity_pool.Credentials)
# Since scopes are specified, the project ID can be determined.
assert project_id is mock.sentinel.project_id
assert credentials.scopes == ["https://www.google.com/calendar/feeds"]
assert credentials.default_scopes == [
"https://www.googleapis.com/auth/cloud-platform"
]
@EXTERNAL_ACCOUNT_GET_PROJECT_ID_PATCH
def test_load_credentials_from_file_external_account_with_quota_project(
get_project_id, tmpdir
):
config_file = tmpdir.join("config.json")
config_file.write(json.dumps(IDENTITY_POOL_DATA))
credentials, project_id = _default.load_credentials_from_file(
str(config_file), quota_project_id="project-foo"
)
assert isinstance(credentials, identity_pool.Credentials)
# Since no scopes are specified, the project ID cannot be determined.
assert project_id is None
assert credentials.quota_project_id == "project-foo"
def test_load_credentials_from_file_external_account_bad_format(tmpdir):
filename = tmpdir.join("external_account_bad.json")
filename.write(json.dumps({"type": "external_account"}))
with pytest.raises(exceptions.DefaultCredentialsError) as excinfo:
_default.load_credentials_from_file(str(filename))
assert excinfo.match(
"Failed to load external account credentials from {}".format(str(filename))
)
@EXTERNAL_ACCOUNT_GET_PROJECT_ID_PATCH
def test_load_credentials_from_file_external_account_explicit_request(
get_project_id, tmpdir
):
config_file = tmpdir.join("config.json")
config_file.write(json.dumps(IDENTITY_POOL_DATA))
credentials, project_id = _default.load_credentials_from_file(
str(config_file),
request=mock.sentinel.request,
scopes=["https://www.googleapis.com/auth/cloud-platform"],
)
assert isinstance(credentials, identity_pool.Credentials)
# Since scopes are specified, the project ID can be determined.
assert project_id is mock.sentinel.project_id
get_project_id.assert_called_with(credentials, request=mock.sentinel.request)
@mock.patch.dict(os.environ, {}, clear=True)
def test__get_explicit_environ_credentials_no_env():
assert _default._get_explicit_environ_credentials() == (None, None)
@pytest.mark.parametrize("quota_project_id", [None, "project-foo"])
@LOAD_FILE_PATCH
def test__get_explicit_environ_credentials(load, quota_project_id, monkeypatch):
monkeypatch.setenv(environment_vars.CREDENTIALS, "filename")
credentials, project_id = _default._get_explicit_environ_credentials(
quota_project_id=quota_project_id
)
assert credentials is MOCK_CREDENTIALS
assert project_id is mock.sentinel.project_id
load.assert_called_with("filename", quota_project_id=quota_project_id)
@LOAD_FILE_PATCH
def test__get_explicit_environ_credentials_no_project_id(load, monkeypatch):
load.return_value = MOCK_CREDENTIALS, None
monkeypatch.setenv(environment_vars.CREDENTIALS, "filename")
credentials, project_id = _default._get_explicit_environ_credentials()
assert credentials is MOCK_CREDENTIALS
assert project_id is None
@pytest.mark.parametrize("quota_project_id", [None, "project-foo"])
@mock.patch(
"google.auth._cloud_sdk.get_application_default_credentials_path", autospec=True
)
@mock.patch("google.auth._default._get_gcloud_sdk_credentials", autospec=True)
def test__get_explicit_environ_credentials_fallback_to_gcloud(
get_gcloud_creds, get_adc_path, quota_project_id, monkeypatch
):
# Set explicit credentials path to cloud sdk credentials path.
get_adc_path.return_value = "filename"
monkeypatch.setenv(environment_vars.CREDENTIALS, "filename")
_default._get_explicit_environ_credentials(quota_project_id=quota_project_id)
# Check we fall back to cloud sdk flow since explicit credentials path is
# cloud sdk credentials path
get_gcloud_creds.assert_called_with(quota_project_id=quota_project_id)
@pytest.mark.parametrize("quota_project_id", [None, "project-foo"])
@LOAD_FILE_PATCH
@mock.patch(
"google.auth._cloud_sdk.get_application_default_credentials_path", autospec=True
)
def test__get_gcloud_sdk_credentials(get_adc_path, load, quota_project_id):
get_adc_path.return_value = SERVICE_ACCOUNT_FILE
credentials, project_id = _default._get_gcloud_sdk_credentials(
quota_project_id=quota_project_id
)
assert credentials is MOCK_CREDENTIALS
assert project_id is mock.sentinel.project_id
load.assert_called_with(SERVICE_ACCOUNT_FILE, quota_project_id=quota_project_id)
@mock.patch(
"google.auth._cloud_sdk.get_application_default_credentials_path", autospec=True
)
def test__get_gcloud_sdk_credentials_non_existent(get_adc_path, tmpdir):
non_existent = tmpdir.join("non-existent")
get_adc_path.return_value = str(non_existent)
credentials, project_id = _default._get_gcloud_sdk_credentials()
assert credentials is None
assert project_id is None
@mock.patch(
"google.auth._cloud_sdk.get_project_id",
return_value=mock.sentinel.project_id,
autospec=True,
)
@mock.patch("os.path.isfile", return_value=True, autospec=True)
@LOAD_FILE_PATCH
def test__get_gcloud_sdk_credentials_project_id(load, unused_isfile, get_project_id):
# Don't return a project ID from load file, make the function check
# the Cloud SDK project.
load.return_value = MOCK_CREDENTIALS, None
credentials, project_id = _default._get_gcloud_sdk_credentials()
assert credentials == MOCK_CREDENTIALS
assert project_id == mock.sentinel.project_id
assert get_project_id.called
@mock.patch("google.auth._cloud_sdk.get_project_id", return_value=None, autospec=True)
@mock.patch("os.path.isfile", return_value=True)
@LOAD_FILE_PATCH
def | |
import random
import time
from typing import (
Dict,
Iterable,
Sequence,
Tuple,
)
from cytoolz import (
pipe,
)
from eth_typing import (
BLSPubkey,
BLSSignature,
Hash32,
)
from eth_utils import (
to_tuple,
)
from eth.constants import (
ZERO_HASH32,
)
from py_ecc import bls
from eth2._utils.bitfield import (
get_empty_bitfield,
set_voted,
)
from eth2.configs import (
CommitteeConfig,
Eth2Config,
)
from eth2.beacon.constants import (
ZERO_TIMESTAMP,
)
from eth2.beacon.enums import (
SignatureDomain,
)
from eth2.beacon.committee_helpers import (
get_beacon_proposer_index,
get_crosslink_committees_at_slot,
)
from eth2.beacon.exceptions import (
NoCommitteeAssignment,
)
from eth2.beacon.helpers import (
get_block_root,
get_domain,
get_epoch_start_slot,
slot_to_epoch,
)
from eth2.beacon.types.attestations import Attestation
from eth2.beacon.types.attestation_data import AttestationData
from eth2.beacon.types.attestation_data_and_custody_bits import (
AttestationDataAndCustodyBit,
)
from eth2.beacon.types.attester_slashings import AttesterSlashing
from eth2.beacon.types.blocks import BeaconBlockHeader
from eth2.beacon.types.deposit_data import DepositData
from eth2.beacon.types.deposit_input import DepositInput
from eth2.beacon.types.forks import Fork
from eth2.beacon.types.proposer_slashings import ProposerSlashing
from eth2.beacon.types.slashable_attestations import SlashableAttestation
from eth2.beacon.types.states import BeaconState
from eth2.beacon.types.voluntary_exits import VoluntaryExit
from eth2.beacon.typing import (
Bitfield,
CommitteeIndex,
Epoch,
Gwei,
Shard,
Slot,
Timestamp,
ValidatorIndex,
)
from eth2.beacon.state_machines.base import (
BaseBeaconStateMachine,
)
from eth2.beacon.validation import (
validate_epoch_within_previous_and_next,
)
from .committee_assignment import (
CommitteeAssignment,
)
#
# Aggregation
#
def verify_votes(
message_hash: Hash32,
votes: Iterable[Tuple[CommitteeIndex, BLSSignature, BLSPubkey]],
domain: SignatureDomain
) -> Tuple[Tuple[BLSSignature, ...], Tuple[CommitteeIndex, ...]]:
"""
Verify the given votes.
"""
sigs_with_committee_info = tuple(
(sig, committee_index)
for (committee_index, sig, pubkey)
in votes
if bls.verify(
message_hash=message_hash,
pubkey=pubkey,
signature=sig,
domain=domain,
)
)
try:
sigs, committee_indices = zip(*sigs_with_committee_info)
except ValueError:
sigs = tuple()
committee_indices = tuple()
return sigs, committee_indices
def aggregate_votes(
bitfield: Bitfield,
sigs: Sequence[BLSSignature],
voting_sigs: Sequence[BLSSignature],
voting_committee_indices: Sequence[CommitteeIndex]
) -> Tuple[Bitfield, BLSSignature]:
"""
Aggregate the votes.
"""
# Update the bitfield and append the signatures
sigs = tuple(sigs) + tuple(voting_sigs)
bitfield = pipe(
bitfield,
*(
set_voted(index=committee_index)
for committee_index in voting_committee_indices
)
)
return bitfield, bls.aggregate_signatures(sigs)
#
# Signer
#
def sign_proof_of_possession(deposit_input: DepositInput,
privkey: int,
fork: Fork,
slot: Slot,
slots_per_epoch: int) -> BLSSignature:
domain = get_domain(
fork,
slot_to_epoch(slot, slots_per_epoch),
SignatureDomain.DOMAIN_DEPOSIT,
)
return bls.sign(
message_hash=deposit_input.signing_root,
privkey=privkey,
domain=domain,
)
def sign_transaction(*,
message_hash: Hash32,
privkey: int,
fork: Fork,
slot: Slot,
signature_domain: SignatureDomain,
slots_per_epoch: int) -> BLSSignature:
domain = get_domain(
fork,
slot_to_epoch(slot, slots_per_epoch),
signature_domain,
)
return bls.sign(
message_hash=message_hash,
privkey=privkey,
domain=domain,
)
SAMPLE_HASH_1 = Hash32(b'\x11' * 32)
SAMPLE_HASH_2 = Hash32(b'\x22' * 32)
def create_block_header_with_signature(
state: BeaconState,
block_body_root: Hash32,
privkey: int,
slots_per_epoch: int,
previous_block_root: Hash32=SAMPLE_HASH_1,
state_root: Hash32=SAMPLE_HASH_2)-> BeaconBlockHeader:
block_header = BeaconBlockHeader(
slot=state.slot,
previous_block_root=previous_block_root,
state_root=state_root,
block_body_root=block_body_root,
)
block_header_signature = sign_transaction(
message_hash=block_header.signing_root,
privkey=privkey,
fork=state.fork,
slot=block_header.slot,
signature_domain=SignatureDomain.DOMAIN_BEACON_BLOCK,
slots_per_epoch=slots_per_epoch,
)
return block_header.copy(signature=block_header_signature)
#
#
# Only for test/simulation
#
#
#
# ProposerSlashing
#
def create_mock_proposer_slashing_at_block(
state: BeaconState,
config: Eth2Config,
keymap: Dict[BLSPubkey, int],
block_root_1: Hash32,
block_root_2: Hash32,
proposer_index: ValidatorIndex) -> ProposerSlashing:
"""
Return a `ProposerSlashing` derived from the given block roots.
If the header roots do not match, the `ProposerSlashing` is valid.
If the header roots do match, the `ProposerSlashing` is not valid.
"""
slots_per_epoch = config.SLOTS_PER_EPOCH
block_header_1 = create_block_header_with_signature(
state,
block_root_1,
keymap[state.validator_registry[proposer_index].pubkey],
slots_per_epoch,
)
block_header_2 = create_block_header_with_signature(
state,
block_root_2,
keymap[state.validator_registry[proposer_index].pubkey],
slots_per_epoch,
)
return ProposerSlashing(
proposer_index=proposer_index,
header_1=block_header_1,
header_2=block_header_2,
)
#
# AttesterSlashing
#
def create_mock_slashable_attestation(state: BeaconState,
config: Eth2Config,
keymap: Dict[BLSPubkey, int],
attestation_slot: Slot) -> SlashableAttestation:
"""
Create `SlashableAttestation` that is signed by one attester.
"""
attester_index = ValidatorIndex(0)
committee = (attester_index,)
shard = Shard(0)
# Use genesis block root as `beacon_block_root`, only for tests.
beacon_block_root = get_block_root(
state,
config.GENESIS_SLOT,
config.SLOTS_PER_HISTORICAL_ROOT,
)
# Get `target_root`
target_root = _get_target_root(state, config, beacon_block_root)
# Get `source_root`
source_root = get_block_root(
state,
get_epoch_start_slot(state.current_justified_epoch, config.SLOTS_PER_EPOCH),
config.SLOTS_PER_HISTORICAL_ROOT,
)
previous_crosslink = state.latest_crosslinks[shard]
attestation_data = AttestationData(
slot=attestation_slot,
beacon_block_root=beacon_block_root,
source_epoch=state.current_justified_epoch,
source_root=source_root,
target_root=target_root,
shard=shard,
previous_crosslink=previous_crosslink,
crosslink_data_root=ZERO_HASH32,
)
message_hash, voting_committee_indices = _get_mock_message_and_voting_committee_indices(
attestation_data,
committee,
num_voted_attesters=1,
)
signature = sign_transaction(
message_hash=message_hash,
privkey=keymap[
state.validator_registry[
voting_committee_indices[0]
].pubkey
],
fork=state.fork,
slot=attestation_slot,
signature_domain=SignatureDomain.DOMAIN_ATTESTATION,
slots_per_epoch=config.SLOTS_PER_EPOCH,
)
validator_indices = tuple(committee[i] for i in voting_committee_indices)
return SlashableAttestation(
validator_indices=sorted(validator_indices),
data=attestation_data,
custody_bitfield=get_empty_bitfield(len(voting_committee_indices)),
aggregate_signature=signature,
)
def create_mock_attester_slashing_is_double_vote(
state: BeaconState,
config: Eth2Config,
keymap: Dict[BLSPubkey, int],
attestation_epoch: Epoch) -> AttesterSlashing:
attestation_slot_1 = get_epoch_start_slot(attestation_epoch, config.SLOTS_PER_EPOCH)
attestation_slot_2 = Slot(attestation_slot_1 + 1)
slashable_attestation_1 = create_mock_slashable_attestation(
state,
config,
keymap,
attestation_slot_1,
)
slashable_attestation_2 = create_mock_slashable_attestation(
state,
config,
keymap,
attestation_slot_2,
)
return AttesterSlashing(
slashable_attestation_1=slashable_attestation_1,
slashable_attestation_2=slashable_attestation_2,
)
def create_mock_attester_slashing_is_surround_vote(
state: BeaconState,
config: Eth2Config,
keymap: Dict[BLSPubkey, int],
attestation_epoch: Epoch) -> AttesterSlashing:
# target_epoch_2 < target_epoch_1
attestation_slot_2 = get_epoch_start_slot(attestation_epoch, config.SLOTS_PER_EPOCH)
attestation_slot_1 = Slot(attestation_slot_2 + config.SLOTS_PER_EPOCH)
slashable_attestation_1 = create_mock_slashable_attestation(
state.copy(
slot=attestation_slot_1,
current_justified_epoch=config.GENESIS_EPOCH,
),
config,
keymap,
attestation_slot_1,
)
slashable_attestation_2 = create_mock_slashable_attestation(
state.copy(
slot=attestation_slot_1,
current_justified_epoch=config.GENESIS_EPOCH + 1, # source_epoch_1 < source_epoch_2
),
config,
keymap,
attestation_slot_2,
)
return AttesterSlashing(
slashable_attestation_1=slashable_attestation_1,
slashable_attestation_2=slashable_attestation_2,
)
#
# Attestation
#
def _get_target_root(state: BeaconState,
config: Eth2Config,
beacon_block_root: Hash32) -> Hash32:
epoch_start_slot = get_epoch_start_slot(
slot_to_epoch(state.slot, config.SLOTS_PER_EPOCH),
config.SLOTS_PER_EPOCH,
)
if epoch_start_slot == state.slot:
return beacon_block_root
else:
return get_block_root(
state,
epoch_start_slot,
config.SLOTS_PER_HISTORICAL_ROOT,
)
def _get_mock_message_and_voting_committee_indices(
attestation_data: AttestationData,
committee: Sequence[ValidatorIndex],
num_voted_attesters: int) -> Tuple[Hash32, Tuple[CommitteeIndex, ...]]:
"""
Get ``message_hash`` and voting indices of the given ``committee``.
"""
message_hash = AttestationDataAndCustodyBit(
data=attestation_data,
custody_bit=False
).root
committee_size = len(committee)
assert num_voted_attesters <= committee_size
# Index in committee
voting_committee_indices = tuple(
CommitteeIndex(i) for i in random.sample(range(committee_size), num_voted_attesters)
)
return message_hash, voting_committee_indices
def create_mock_signed_attestation(state: BeaconState,
attestation_data: AttestationData,
committee: Sequence[ValidatorIndex],
num_voted_attesters: int,
keymap: Dict[BLSPubkey, int],
slots_per_epoch: int) -> Attestation:
"""
Create a mocking attestation of the given ``attestation_data`` slot with ``keymap``.
"""
message_hash, voting_committee_indices = _get_mock_message_and_voting_committee_indices(
attestation_data,
committee,
num_voted_attesters,
)
# Use privkeys to sign the attestation
signatures = [
sign_transaction(
message_hash=message_hash,
privkey=keymap[
state.validator_registry[
committee[committee_index]
].pubkey
],
fork=state.fork,
slot=attestation_data.slot,
signature_domain=SignatureDomain.DOMAIN_ATTESTATION,
slots_per_epoch=slots_per_epoch,
)
for committee_index in voting_committee_indices
]
# aggregate signatures and construct participant bitfield
aggregation_bitfield, aggregate_signature = aggregate_votes(
bitfield=get_empty_bitfield(len(committee)),
sigs=(),
voting_sigs=signatures,
voting_committee_indices=voting_committee_indices,
)
# create attestation from attestation_data, particpipant_bitfield, and signature
return Attestation(
aggregation_bitfield=aggregation_bitfield,
data=attestation_data,
custody_bitfield=Bitfield(b'\x00' * len(aggregation_bitfield)),
aggregate_signature=aggregate_signature,
)
@to_tuple
def create_mock_signed_attestations_at_slot(
state: BeaconState,
config: Eth2Config,
state_machine: BaseBeaconStateMachine,
attestation_slot: Slot,
beacon_block_root: Hash32,
keymap: Dict[BLSPubkey, int],
voted_attesters_ratio: float=1.0) -> Iterable[Attestation]:
"""
Create the mocking attestations of the given ``attestation_slot`` slot with ``keymap``.
"""
state_transition = state_machine.state_transition
state = state_transition.apply_state_transition_without_block(
state,
attestation_slot,
)
crosslink_committees_at_slot = get_crosslink_committees_at_slot(
state,
attestation_slot,
CommitteeConfig(config),
)
# Get `target_root`
target_root = _get_target_root(state, config, beacon_block_root)
for crosslink_committee in crosslink_committees_at_slot:
committee, shard = crosslink_committee
previous_crosslink = state.latest_crosslinks[shard]
attestation_data = AttestationData(
slot=attestation_slot,
beacon_block_root=beacon_block_root,
source_epoch=state.current_justified_epoch,
source_root=state.current_justified_root,
target_root=target_root,
shard=shard,
previous_crosslink=previous_crosslink,
crosslink_data_root=ZERO_HASH32,
)
num_voted_attesters = int(len(committee) * voted_attesters_ratio)
yield create_mock_signed_attestation(
state,
attestation_data,
committee,
num_voted_attesters,
keymap,
config.SLOTS_PER_EPOCH,
)
def create_signed_attestation_at_slot(
state: BeaconState,
config: Eth2Config,
state_machine: BaseBeaconStateMachine,
attestation_slot: Slot,
beacon_block_root: Hash32,
validator_privkeys: Dict[ValidatorIndex, int],
committee: Tuple[ValidatorIndex, ...],
shard: Shard) -> Attestation:
"""
Create the attestations of the given ``attestation_slot`` slot with ``validator_privkeys``.
"""
state_transition = state_machine.state_transition
state = state_transition.apply_state_transition_without_block(
state,
attestation_slot,
)
# Get `target_root`
target_root = _get_target_root(state, config, beacon_block_root)
previous_crosslink = state.latest_crosslinks[shard]
attestation_data = AttestationData(
slot=attestation_slot,
beacon_block_root=beacon_block_root,
source_epoch=state.current_justified_epoch,
source_root=state.current_justified_root,
target_root=target_root,
shard=shard,
previous_crosslink=previous_crosslink,
crosslink_data_root=ZERO_HASH32,
)
message_hash = AttestationDataAndCustodyBit(
data=attestation_data,
custody_bit=False
).root
signatures = [
sign_transaction(
message_hash=message_hash,
privkey=privkey,
fork=state.fork,
slot=attestation_data.slot,
signature_domain=SignatureDomain.DOMAIN_ATTESTATION,
slots_per_epoch=config.SLOTS_PER_EPOCH,
)
for _, privkey in validator_privkeys.items()
]
voting_committee_indices = [
CommitteeIndex(committee.index(validator_index))
for validator_index in validator_privkeys
]
# aggregate signatures and construct participant bitfield
aggregation_bitfield, aggregate_signature = aggregate_votes(
bitfield=get_empty_bitfield(len(committee)),
sigs=(),
voting_sigs=signatures,
voting_committee_indices=voting_committee_indices,
)
# create attestation from attestation_data, particpipant_bitfield, and signature
return Attestation(
aggregation_bitfield=aggregation_bitfield,
data=attestation_data,
custody_bitfield=Bitfield(get_empty_bitfield(len(aggregation_bitfield))),
aggregate_signature=aggregate_signature,
)
#
# VoluntaryExit
#
def create_mock_voluntary_exit(state: BeaconState,
config: Eth2Config,
keymap: Dict[BLSPubkey, int],
validator_index: ValidatorIndex,
exit_epoch: Epoch=None) -> VoluntaryExit:
current_epoch = state.current_epoch(config.SLOTS_PER_EPOCH)
voluntary_exit = VoluntaryExit(
epoch=state.current_epoch(config.SLOTS_PER_EPOCH) if exit_epoch is None else exit_epoch,
validator_index=validator_index,
)
return voluntary_exit.copy(
signature=sign_transaction(
message_hash=voluntary_exit.signing_root,
privkey=keymap[state.validator_registry[validator_index].pubkey],
fork=state.fork,
slot=get_epoch_start_slot(current_epoch, config.SLOTS_PER_EPOCH),
signature_domain=SignatureDomain.DOMAIN_VOLUNTARY_EXIT,
slots_per_epoch=config.SLOTS_PER_EPOCH,
)
)
#
# Deposit
#
def create_deposit_data(*,
config: Eth2Config,
pubkey: BLSPubkey,
privkey: int,
withdrawal_credentials: Hash32,
fork: Fork,
deposit_timestamp: Timestamp,
amount: Gwei=None) -> DepositData:
if amount is None:
amount = config.MAX_DEPOSIT_AMOUNT
return DepositData(
deposit_input=DepositInput(
pubkey=pubkey,
withdrawal_credentials=withdrawal_credentials,
signature=sign_proof_of_possession(
deposit_input=DepositInput(
pubkey=pubkey,
withdrawal_credentials=withdrawal_credentials,
),
privkey=privkey,
fork=fork,
slot=config.GENESIS_SLOT,
slots_per_epoch=config.SLOTS_PER_EPOCH,
),
),
amount=amount,
timestamp=deposit_timestamp,
)
def create_mock_deposit_data(*,
config: Eth2Config,
pubkeys: Sequence[BLSPubkey],
keymap: Dict[BLSPubkey, int],
validator_index: ValidatorIndex,
withdrawal_credentials: Hash32,
fork: Fork,
deposit_timestamp: Timestamp=ZERO_TIMESTAMP) -> DepositData:
if deposit_timestamp is None:
deposit_timestamp = Timestamp(int(time.time()))
return create_deposit_data(
config=config,
pubkey=pubkeys[validator_index],
privkey=keymap[pubkeys[validator_index]],
withdrawal_credentials=withdrawal_credentials,
fork=fork,
deposit_timestamp=deposit_timestamp,
)
#
#
# Validator guide
#
#
#
# Lookahead
#
def get_committee_assignment(
state: BeaconState,
config: Eth2Config,
epoch: Epoch,
validator_index: ValidatorIndex,
registry_change: bool=False
) -> CommitteeAssignment:
"""
Return the ``CommitteeAssignment`` in the ``epoch`` for ``validator_index``
and ``registry_change``.
``CommitteeAssignment.committee`` is the tuple array of validators in the committee
``CommitteeAssignment.shard`` is the shard to which the committee is assigned
``CommitteeAssignment.slot`` is the slot at which the committee is assigned
``CommitteeAssignment.is_proposer`` is a bool signalling if the validator is expected to
propose a beacon block at the assigned slot.
"""
current_epoch = state.current_epoch(config.SLOTS_PER_EPOCH)
previous_epoch = state.previous_epoch(config.SLOTS_PER_EPOCH)
next_epoch = Epoch(current_epoch + 1)
validate_epoch_within_previous_and_next(epoch, previous_epoch, next_epoch)
epoch_start_slot = get_epoch_start_slot(epoch, config.SLOTS_PER_EPOCH)
committee_config = CommitteeConfig(config)
for slot in range(epoch_start_slot, epoch_start_slot + config.SLOTS_PER_EPOCH):
crosslink_committees = get_crosslink_committees_at_slot(
state,
slot,
committee_config,
registry_change=registry_change,
)
selected_committees = [
committee
for committee in crosslink_committees
if validator_index in committee[0]
]
if len(selected_committees) > 0:
validators = selected_committees[0][0]
shard = selected_committees[0][1]
is_proposer = validator_index == get_beacon_proposer_index(
state,
Slot(slot),
committee_config,
| |
<reponame>Janspiry/A-Demo-for-Image-Inpainting-by-React
import torch.nn as nn
import torch
import numpy as np
import torch.nn.functional as F
from einops import rearrange, repeat
class ScaledDotProductAttention(nn.Module):
"""Scaled dot-product attention mechanism."""
def __init__(self, attention_dropout=0.0):
super(ScaledDotProductAttention, self).__init__()
self.dropout = nn.Dropout(attention_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v, scale=None, attn_mask=None):
"""
前向传播.
Args:
q: Queries张量,形状为[B, L_q, D_q]
k: Keys张量,形状为[B, L_k, D_k]
v: Values张量,形状为[B, L_v, D_v],一般来说就是k
scale: 缩放因子,一个浮点标量
attn_mask: Masking张量,形状为[B, L_q, L_k]
Returns:
上下文张量和attention张量
"""
attention = torch.bmm(q, k.transpose(1, 2))
if scale:
attention = attention * scale
if attn_mask is not None:
# 给需要mask的地方设置一个负无穷
# attention = attention.cuda()
attention = attention.masked_fill_(attn_mask.cuda(), -np.inf)
# 计算softmax
attention = self.softmax(attention)
# 添加dropout
attention = self.dropout(attention)
# 和V做点积
context = torch.bmm(attention, v)
return context, attention
class MultiHeadAttention(nn.Module):
def __init__(self, model_dim=512, num_heads=8, dropout=0.0):
super(MultiHeadAttention, self).__init__()
self.dim_per_head = model_dim // num_heads
self.num_heads = num_heads
self.linear_k = nn.Linear(model_dim, self.dim_per_head * num_heads)
self.linear_v = nn.Linear(model_dim, self.dim_per_head * num_heads)
self.linear_q = nn.Linear(model_dim, self.dim_per_head * num_heads)
self.dot_product_attention = ScaledDotProductAttention(dropout)
self.linear_final = nn.Linear(model_dim, model_dim)
self.dropout = nn.Dropout(dropout)
# multi-head attention之后需要做layer norm
self.layer_norm = nn.LayerNorm(model_dim)
def forward(self, key, value, query, attn_mask=None):
# 残差连接
residual = query
dim_per_head = self.dim_per_head
num_heads = self.num_heads
batch_size = key.size(0)
# linear projection
key = self.linear_k(key)
value = self.linear_v(value)
query = self.linear_q(query)
# split by heads
key = key.view(batch_size * num_heads, -1, dim_per_head)
value = value.view(batch_size * num_heads, -1, dim_per_head)
query = query.view(batch_size * num_heads, -1, dim_per_head)
if attn_mask is not None:
attn_mask = attn_mask.repeat(num_heads, 1, 1)
# scaled dot product attention
scale = (key.size(-1)) ** -0.5
context, attention = self.dot_product_attention(
query, key, value, scale, attn_mask)
# concat heads
context = context.view(batch_size, -1, dim_per_head * num_heads)
# final linear projection
output = self.linear_final(context)
# dropout
output = self.dropout(output)
# add residual and norm layer
output = self.layer_norm(residual + output)
return output, attention
def padding_mask(seq_k, seq_q):
# seq_k 和 seq_q 的形状都是 [B,L]
len_q = seq_q.size(1)
# `PAD` is 0
pad_mask = seq_k.eq(0)
pad_mask = pad_mask.unsqueeze(1).expand(-1, len_q, -1) # shape [B, L_q, L_k]
return pad_mask
def sequence_mask(seq):
batch_size, seq_len, _ = seq.size()
mask = torch.triu(torch.ones((seq_len, seq_len), dtype=torch.uint8),
diagonal=1)
mask = mask.unsqueeze(0).expand(batch_size, -1, -1) # [B, L, L]
return mask
class PositionalEncoding(nn.Module):
def __init__(self, d_model, max_seq_len):
"""初始化。
Args:
d_model: 一个标量。模型的维度,论文默认是512
max_seq_len: 一个标量。文本序列的最大长度
"""
super(PositionalEncoding, self).__init__()
# 根据论文给的公式,构造出PE矩阵
position_encoding = np.array([
[pos / np.power(10000, 2.0 * (j // 2) / d_model) for j in range(d_model)]
for pos in range(max_seq_len)])
# 偶数列使用sin,奇数列使用cos
position_encoding[:, 0::2] = np.sin(position_encoding[:, 0::2])
position_encoding[:, 1::2] = np.cos(position_encoding[:, 1::2])
# 在PE矩阵的第一行,加上一行全是0的向量,代表这`PAD`的positional encoding
# 在word embedding中也经常会加上`UNK`,代表位置单词的word embedding,两者十分类似
# 那么为什么需要这个额外的PAD的编码呢?很简单,因为文本序列的长度不一,我们需要对齐,
# 短的序列我们使用0在结尾补全,我们也需要这些补全位置的编码,也就是`PAD`对应的位置编码
pad_row = torch.zeros([1, d_model])
position_encoding = torch.cat((pad_row, position_encoding))
# 嵌入操作,+1是因为增加了`PAD`这个补全位置的编码,
# Word embedding中如果词典增加`UNK`,我们也需要+1。看吧,两者十分相似
self.position_encoding = nn.Embedding(max_seq_len + 1, d_model)
self.position_encoding.weight = nn.Parameter(position_encoding,
requires_grad=False)
def forward(self, input_len):
"""神经网络的前向传播。
Args:
input_len: 一个张量,形状为[BATCH_SIZE, 1]。每一个张量的值代表这一批文本序列中对应的长度。
Returns:
返回这一批序列的位置编码,进行了对齐。
"""
# 找出这一批序列的最大长度
max_len = torch.max(input_len)
tensor = torch.cuda.LongTensor if input_len.is_cuda else torch.LongTensor
# 对每一个序列的位置进行对齐,在原序列位置的后面补上0
# 这里range从1开始也是因为要避开PAD(0)的位置
input_pos = tensor(
[list(range(1, int(len) + 1)) + [0] * (max_len - len) for len in input_len])
return self.position_encoding(input_pos)
class PositionalWiseFeedForward(nn.Module):
def __init__(self, model_dim=512, ffn_dim=2048, dropout=0.0):
super(PositionalWiseFeedForward, self).__init__()
self.w1 = nn.Conv1d(model_dim, ffn_dim, 1)
self.w2 = nn.Conv1d(ffn_dim, model_dim, 1)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(model_dim)
def forward(self, x):
output = x.transpose(1, 2)
output = self.w2(F.relu(self.w1(output)))
output = self.dropout(output.transpose(1, 2))
# add residual and norm layer
output = self.layer_norm(x + output)
return output
class EncoderLayer(nn.Module):
"""Encoder的一层。"""
def __init__(self, model_dim=512, num_heads=8, ffn_dim=2048, dropout=0.0):
super(EncoderLayer, self).__init__()
self.attention = MultiHeadAttention(model_dim, num_heads, dropout)
self.feed_forward = PositionalWiseFeedForward(model_dim, ffn_dim, dropout)
def forward(self, inputs, attn_mask=None):
# self attention
context, attention = self.attention(inputs, inputs, inputs, attn_mask)
# feed forward network
output = self.feed_forward(context)
return output, attention
class Encoder(nn.Module):
"""多层EncoderLayer组成Encoder。"""
def __init__(self,
num_layers=6,
model_dim=512,
num_heads=8,
ffn_dim=2048,
dropout=0.0):
super(Encoder, self).__init__()
self.encoder_layers = nn.ModuleList(
[EncoderLayer(model_dim, num_heads, ffn_dim, dropout) for _ in
range(num_layers)])
def forward(self, inputs):
output = inputs
# self_attention_mask = padding_mask(inputs, inputs)
attentions = []
for encoder in self.encoder_layers:
output, attention = encoder(output)
attentions.append(attention)
return output, attentions
class DecoderLayer(nn.Module):
def __init__(self, model_dim, num_heads=8, ffn_dim=2048, dropout=0.0):
super(DecoderLayer, self).__init__()
self.attention = MultiHeadAttention(model_dim, num_heads, dropout)
self.feed_forward = PositionalWiseFeedForward(model_dim, ffn_dim, dropout)
def forward(self,
dec_inputs,
enc_outputs,
self_attn_mask=None,
context_attn_mask=None):
# self attention, all inputs are decoder inputs
dec_output, self_attention = self.attention(
dec_inputs, dec_inputs, dec_inputs, self_attn_mask)
# context attention
# query is decoder's outputs, key and value are encoder's inputs
dec_output, context_attention = self.attention(
enc_outputs, enc_outputs, dec_output, context_attn_mask)
# decoder's output, or context
dec_output = self.feed_forward(dec_output)
return dec_output, self_attention, context_attention
class Decoder(nn.Module):
def __init__(self,
num_layers=6,
model_dim=512,
num_heads=8,
ffn_dim=2048,
dropout=0.0):
super(Decoder, self).__init__()
self.num_layers = num_layers
self.decoder_layers = nn.ModuleList(
[DecoderLayer(model_dim, num_heads, ffn_dim, dropout) for _ in
range(num_layers)])
def forward(self, inputs, enc_output, context_attn_mask=None):
# self_attention_padding_mask = padding_mask(inputs, inputs)
seq_mask = sequence_mask(inputs)
self_attn_mask = torch.gt((seq_mask), 0)
self_attn_mask = None
self_attentions = []
context_attentions = []
output = inputs
for decoder in self.decoder_layers:
output, self_attn, context_attn = decoder(output, enc_output, self_attn_mask, context_attn_mask)
self_attentions.append(self_attn)
context_attentions.append(context_attn)
return output, self_attentions, context_attentions
class Transformer(nn.Module):
def __init__(self,
N,
dim,
model_dim=512,
num_layers=6,
num_heads=8,
ffn_dim=2048,
dropout=0.2,
with_attn=False,
condition=False
):
super(Transformer, self).__init__()
self.N = N
self.dim = dim
self.with_attn = with_attn
self.condition = condition
self.patch_to_embedding = nn.Linear(dim, model_dim)
self.pos_embedding = nn.Parameter(torch.randn(1, N, model_dim)) # 1, N, dim
if self.condition:
# dim+dim : 上采样通道扩增
self.condition_to_embedding = nn.Linear(dim, model_dim)
self.condition_pos_embedding = nn.Parameter(torch.randn(1, N, model_dim)) # 1, N, dim
self.encoder = Encoder(num_layers, model_dim, num_heads, ffn_dim, dropout)
self.decoder = Decoder(num_layers, model_dim, num_heads, ffn_dim, dropout)
self.linear = nn.Linear(model_dim, dim, bias=False)
def forward(self, x, condition=None):
b, N, C = x.size()
# print('x.size()',x.size())
# condition的利用方式存疑?
if condition is None:
x = self.patch_to_embedding(x) # b, N, dim
x += self.pos_embedding
x, enc_self_attn = self.encoder(x)
output, dec_self_attn, ctx_attn = self.decoder(inputs=x, enc_output=x)
elif self.condition:
'''考虑condition'''
x = self.patch_to_embedding(x) # b, N, dim
x += self.pos_embedding
condition = self.condition_to_embedding(condition)
condition += self.condition_pos_embedding
condition, enc_self_attn = self.encoder(condition)
output, dec_self_attn, ctx_attn = self.decoder(inputs=x, enc_output=condition)
# elif self.condition:
# x = torch.cat([x, condition], dim=2)
# x = self.condition_to_embedding(x) # b, N, dim
# x += self.pos_embedding
# x, enc_self_attn = self.encoder(x)
# output, dec_self_attn, ctx_attn = self.decoder(inputs=x, enc_output=x)
output = self.linear(output)
if self.with_attn:
return output, enc_self_attn, dec_self_attn, ctx_attn
else:
return output
class TransformerLayer(nn.Module):
def __init__(self, size, patch_size, channel_transform=False, transpose_transform=False, condition=False, MiniTransFormer=None, with_mask=False):
super(TransformerLayer, self).__init__()
# size: [c, h, w]
self.input_dim, self.H, self.W = size
self.p = patch_size # patch_size
self.channel_transform = channel_transform
self.transpose_transform = transpose_transform
self.patch_dim = self.input_dim*(self.p*self.p)
self.patch_num = (self.H//self.p)*(self.W//self.p)
model_dim = 256
num_layers = 6
num_heads = 8
ffn_dim = 512
if MiniTransFormer is not None:
model_dim, num_layers, num_heads, ffn_dim = MiniTransFormer
self.condition = condition
self.transformer_h = Transformer(
N=self.patch_num,
dim=self.patch_dim,
model_dim=model_dim,
num_layers=num_layers,
num_heads=num_heads,
ffn_dim=ffn_dim,
condition=self.condition
)
if self.transpose_transform:
self.transformer_w = Transformer(
N=self.patch_num,
dim=self.patch_dim,
model_dim=model_dim,
num_layers=num_layers,
num_heads=num_heads,
ffn_dim=ffn_dim,
condition=self.condition
)
if self.channel_transform:
self.transformer_channel = Transformer(
N=self.patch_dim,
dim=self.patch_num,
model_dim=model_dim,
num_layers=num_layers,
num_heads=num_heads,
ffn_dim=ffn_dim,
condition=self.condition
)
self.out_channel = self.input_dim*(1+self.channel_transform+self.transpose_transform)
self.to_input_dim = None
if self.out_channel != self.input_dim:
self.to_input_dim = nn.Conv2d(self.out_channel, self.input_dim, kernel_size=1)
def forward(self, x, condition=None):
b, c, h, w = x.size()
p = self.p
if self.transpose_transform:
x_trans = x.transpose(2, 3)
condition_trans = None
if condition is not None:
condition_trans = condition_trans.transpose(2, 3)
# P = 8, patch_size 大小, 32 * 32
x = rearrange(x, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p) # b, N, p^2*C -> b, 1024, 320
if condition is not None:
condition = rearrange(condition, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p)
'''channel 并行'''
if self.channel_transform:
x_channel = x.permute(0, 2, 1)
condition_channel = None
if condition is not None:
condition_channel = condition.permute(0, 2, 1)
x_channel = self.transformer_channel(x_channel, condition_channel)
x_channel = x_channel.permute(0, 2, 1)
'''结束'''
x = self.transformer_h(x, condition)
x = rearrange(x, ' b (h w) (p1 p2 c) -> b c (h p1) (w p2)', h = h//p, w = w//p, p1 = p, p2 = p) # b, c, h, w
if self.transpose_transform:
x_trans = rearrange(x_trans, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p) # b, N, p^2*C
if condition is not None:
condition_trans = rearrange(condition_trans, 'b c (h p1) (w p2) -> b (h w) (p1 p2 | |
# from .global_variables import *
from PyPATools.field import Field
from PyPATools.pusher import ParticlePusher
import numpy as np
# import multiprocessing as mp
# import time
def track(si, r_start=None, v_start=None, nsteps=10000, dt=1e-12, omit_b=False, omit_e=False):
# TODO: For now break if r_start or v_start are not given, later get from class properties?
assert (r_start is not None and v_start is not None), "Have to specify r_start and v_start for now!"
analytic_params = si.analytic_parameters
numerical_vars = si.numerical_variables
track_vars = si.track_variables
if numerical_vars["ef_itp"] is None:
print("No E-Field has been generated. Cannot track!")
return 1
pusher = ParticlePusher(analytic_params["ion"].species, "boris") # Note: leapfrog is inaccurate above dt = 1e-12
if omit_e:
efield1 = Field(dim=0, field={"x": 0.0, "y": 0.0, "z": 0.0})
else:
efield1 = numerical_vars["ef_itp"] # type: Field
if omit_b:
bfield1 = Field(dim=0, field={"x": 0.0, "y": 0.0, "z": 0.0})
else:
bfield1 = analytic_params["bf_itp"] # type: Field
r = np.zeros([nsteps + 1, 3])
v = np.zeros([nsteps + 1, 3])
r[0, :] = r_start[:]
v[0, :] = v_start[:]
# initialize the velocity half a step back:
ef = efield1(r[0])
bf = bfield1(r[0])
_, v[0] = pusher.push(r[0], v[0], ef, bf, -0.5 * dt)
# Track for n steps
for i in range(nsteps):
# print(r[i])
ef = efield1(r[i])
bf = bfield1(r[i])
r[i + 1], v[i + 1] = pusher.push(r[i], v[i], ef, bf, dt)
track_vars["trj_tracker"] = r
si.track_variables = track_vars
return r, v
def fast_track(si, r_start=None, v_start=None, nsteps=10000, dt=1e-12, omit_b=False, omit_e=False):
# TODO: For now break if r_start or v_start are not given, later get from class properties?
assert (r_start is not None and v_start is not None), "Have to specify r_start and v_start for now!"
analytic_params = si.analytic_parameters
numerical_vars = si.numerical_variables
track_vars = si.track_variables
if numerical_vars["ef_itp"] is None:
print("No E-Field has been generated. Cannot track!")
return 1
pusher = ParticlePusher(analytic_params["ion"].species, "boris") # Note: leapfrog is inaccurate above dt = 1e-12
if omit_e:
efield1 = Field(dim=0, field={"x": 0.0, "y": 0.0, "z": 0.0})
else:
efield1 = numerical_vars["ef_itp"] # type: Field
if omit_b:
bfield1 = Field(dim=0, field={"x": 0.0, "y": 0.0, "z": 0.0})
else:
bfield1 = analytic_params["bf_itp"] # type: Field
pusher.set_efield(efield1)
pusher.set_bfield(bfield1)
r, v = pusher.track(r_start, v_start, nsteps, dt)
track_vars["trj_tracker"] = r
si.track_variables = track_vars
return r, v
def fast_track_with_termination(si, r_start=None, v_start=None,
nsteps=10000, dt=1e-12,
omit_b=False, omit_e=False):
# TODO: For now break if r_start or v_start are not given, later get from class properties?
assert (r_start is not None and v_start is not None), "Have to specify r_start and v_start for now!"
analytic_params = si.analytic_parameters
numerical_vars = si.numerical_variables
track_vars = si.track_variables
if numerical_vars["ef_itp"] is None:
print("No E-Field has been generated. Cannot track!")
return 1
pusher = ParticlePusher(analytic_params["ion"].species, "boris") # Note: leapfrog is inaccurate above dt = 1e-12
if omit_e:
efield1 = Field(dim=0, field={"x": 0.0, "y": 0.0, "z": 0.0})
else:
efield1 = numerical_vars["ef_itp"] # type: Field
if omit_b:
bfield1 = Field(dim=0, field={"x": 0.0, "y": 0.0, "z": 0.0})
else:
bfield1 = analytic_params["bf_itp"] # type: Field
pusher.set_efield(efield1)
pusher.set_bfield(bfield1)
pusher.set_bds(si.bempp_variables["objects"]) # 'objects' is now a PyElectrodeAssembly
r, v = pusher.track(r_start, v_start, nsteps, dt)
track_vars["trj_tracker"] = r
si.track_variables = track_vars
return r, v
# def deflection_job(si, particle, j):
# z_axis = Vector([0.0, 0.0, 1.0])
# print("Starting new particle process ({})...".format(j))
# ts = time.time()
# r, v = si.track(r_start=particle.get_position(),
# v_start=particle.get_velocity(),
# nsteps=11000,
# dt=1e-11)
# print("Particle {}: Tracking took {:.4f} s.".format(j, time.time() - ts))
#
# trj_dir = Vector(r[-1] - r[-100])
# deviation = 90.0 - np.rad2deg(trj_dir.angle_with(z_axis))
#
# print("Particle {}: Deviation from xy-plane: {:.4f} degrees".format(j, deviation))
#
#
# def deflection_angle_analysis(si, bunch):
#
# jobs = []
#
# i = 0
# for particle in bunch:
# i += 1
# p = mp.Process(target=deflection_job, args=(si, particle, i,))
# jobs.append(p)
# p.start()
def generate_analytical_trajectory(si):
if not si.initialized:
si.initialize()
analytic_params = si.analytic_parameters
analytic_vars = si.analytic_variables
print("Calculating Design Trajectory... ", end="")
h = analytic_vars["height"]
tilt = analytic_params["tilt"] # type: float
analytic_vars["kp"] = np.tan(np.deg2rad(tilt)) # Tilt parameter
analytic_vars["k"] = ((h / analytic_vars["r_cyc"]) + analytic_vars["kp"]) / 2.0
cplus = analytic_vars["c+"] = (2.0 * analytic_vars["k"] + 1.0)
cminus = analytic_vars["c-"] = -(2.0 * analytic_vars["k"] - 1.0)
# --- Trajectory coordinates --- #
_x = +0.5 * h * ((2.0 / (1.0 - (4.0 * (analytic_vars["k"] ** 2.0)))) -
(np.cos(cplus * analytic_vars["b"]) / cplus) - np.cos(
-cminus * analytic_vars["b"]) / cminus)
_y = -0.5 * h * (np.sin(cplus * analytic_vars["b"]) / cplus +
np.sin(-cminus * analytic_vars["b"]) / cminus)
_z = - h * (1.0 - np.sin(analytic_vars["b"]))
analytic_vars["trj_design"] = np.array([_x, _y, _z]).T
# Rotation/flip
if not ((analytic_vars["bf_design"] < 0.0) ^ (analytic_params["ion"].species.q < 0.0)):
if si.debug:
print("Flipping direction of cyclotron motion...", end="")
analytic_vars["trj_design"][:, 1] = -analytic_vars["trj_design"][:, 1]
# Orbit center calculation
xc, yc = calculate_orbit_center(analytic_vars["k"], analytic_vars["kp"], analytic_vars["height"])
analytic_vars["orbit_center"] = (xc, yc)
# If there is a known shift, apply it now...
# TODO: Commented this out due to possible shifting error -PW
# if si._variables_track["shift"] is not None:
# analytic_vars["trj_design"] += si._variables_track["shift"]
# TODO: This is a work in progress
# if si._variables_optimization["x_rot"] is not None and si._params_exp["y_opt"]:
# xrot = np.array([[1.0, 0.0, 0.0],
# [0.0, np.cos(si._variables_optimization["x_rot"]),
# -np.sin(si._variables_optimization["x_rot"])],
# [0.0, np.sin(si._variables_optimization["x_rot"]),
# np.cos(si._variables_optimization["x_rot"])]])
# for i in range(analytic_params["ns"]):
# analytic_vars["trj_design"][i, :] = np.matmul(xrot, analytic_vars["trj_design"][i, :])
if analytic_params["rotation"] != 0.0:
for i in range(analytic_params["ns"]):
analytic_vars["trj_design"][i, :] = np.matmul(analytic_vars["rot"],
analytic_vars["trj_design"][i, :])
print("Done!")
if si.debug:
print("Design Trajectory:")
print(analytic_vars["trj_design"])
print("")
si.analytic_parameters = analytic_params
si.analytic_variables = analytic_vars
return analytic_vars["trj_design"]
def generate_numerical_trajectory(si, bf=None, nsteps=100000, dt=1e-12):
# TODO: Make sure the nsteps and dt are being consistent throughout the code
analytic_params = si.analytic_parameters
analytic_vars = si.analytic_variables
track_params = si.track_parameters
if "nsteps" in track_params:
nsteps = track_params["nsteps"]
if "dt" in track_params:
dt = track_params["dt"]
pusher = ParticlePusher(analytic_params["ion"].species, "boris") # Note: leapfrog is inaccurate above dt = 1e-12
tilt = analytic_params["tilt"] # type: float
analytic_vars["kp"] = np.tan(np.deg2rad(tilt)) # Tilt parameter
analytic_vars["k"] = ((analytic_vars["height"] / analytic_vars["r_cyc"]) + analytic_vars["kp"]) / 2.0
r_start = np.array([0.0, 0.0, -analytic_vars["height"]])
v_start = np.array([0.0, 0.0, analytic_params["ion"].v_mean_m_per_s])
_r = np.zeros([nsteps + 1, 3])
_v = np.zeros([nsteps + 1, 3])
_b = np.zeros([nsteps + 1]) # Store the "b" angle for geometry generation
_r[0, :] = r_start[:]
_v[0, :] = v_start[:]
# Create a new electric field, which will be repeatedly re-defined
field_val = analytic_vars["ef_design"] * np.sign(analytic_params["ion"].species.q)
efield1 = Field(dim=0, field={"x": field_val, "y": 0.0, "z": 0.0})
if bf is not None:
bfield1 = bf
else:
bfield1 = analytic_params["bf_itp"]
# initialize the velocity half a step back:
ef = efield1(_r[0])
bf = bfield1(_r[0])
_, _v[0] = pusher.push(_r[0], _v[0], ef, bf, -0.5 * dt)
# Track for n steps
# for i in range(nsteps):
# ef = efield1(_r[i])
# bf = bfield1(_r[i])
#
# _r[i + 1], _v[i + 1] = pusher.push(_r[i], _v[i], ef, bf, dt)
#
# vx, vy, vz = _v[i + 1]
# vo = np.sqrt(vx ** 2.0 + vy ** 2.0 + vz ** 2.0)
# _b[i + 1] = i * dt * vo / analytic_vars["height"]
#
# # Toprek theory with surgery
# Eh = field_val * analytic_vars["kp"] * np.sin(_b[i + 1])
# Ehx = -Eh * vy / (np.sqrt(vo ** 2.0 - vz ** 2.0))
# Ehy = Eh * vx / (np.sqrt(vo ** 2.0 - vz ** 2.0))
#
# ex = field_val * vx * np.abs(vz) / (vo * np.sqrt(vo ** 2.0 - vz ** 2.0)) + Ehx
# ey = field_val * vy * np.abs(vz) / (vo * np.sqrt(vo ** 2.0 - vz ** 2.0)) + Ehy
# ez = -field_val * (vo ** 2.0 - vz ** 2.0) / (vo * np.sqrt(vo ** 2.0 - vz ** 2.0))
# efield1 = Field(dim=0, field={"x": ex, "y": ey, "z": ez})
# if vz < 0: # Stop when the z-component of the velocity is zero
# if si._debug:
# print(_r[i + 1, :]) # Print the final position
# break
i = 0
while i < nsteps:
ef = efield1(_r[i])
bf = bfield1(_r[i])
if si.debug and i == 0:
print("v0 =", _v[i])
print("r0 =", _r[i])
print("Fields", ef, bf)
_r[i + 1], _v[i + 1] = pusher.push(_r[i], _v[i], ef, bf, dt)
if si.debug and i == 0:
print("v1 =", _v[i + 1])
print("r1 =", _r[i + 1])
vx, vy, vz = _v[i + 1]
| |
True},
}
#here, ga ada requested predicate
})
alice['job_application_proof'] = \
await anoncreds.prover_create_proof(alice['wallet'], alice['registration_req_proof_request'],
alice['registration_req_requested_creds'], alice['master_secret_id'],
alice['schemas'], alice['cred_defs'], alice['revoc_states'])
print("\"Alice\" -> Authcrypt \"Registration-Requirement\" Proof for KS-Telecom")
alice['authcrypted_registration_req_proof'] = \
await crypto.auth_crypt(alice['wallet'], alice['key_for_ks'], alice['ks_key_for_alice'],
alice['registration_req_proof'].encode('utf-8'))
print("\"Alice\" -> Send authcrypted \"Registration-Requirement\" Proof to KS-Telecom")
ks['authcrypted_registration_req_proof'] = alice['authcrypted_registration_req_proof']
print("\"Acme\" -> Authdecrypted \"Registration-Requirement\" Proof from Alice")
_, ks['registration_req_proof'], decrypted_registration_req_proof = \
await auth_decrypt(ks['wallet'], ks['key_for_alice'], ks['authcrypted_registration_req_proof'])
ks['schemas'], ks['cred_defs'], ks['revoc_ref_defs'], ks['revoc_regs'] = \
await verifier_get_entities_from_ledger(ks['pool'], ks['did'],
decrypted_registration_req_proof['identifiers'], ks['name'])
#br smp sini
print("\"KS-Telecom\" -> Verify \"Registration-Requirement\" Proof from Alice")
assert 'Alice' == \
decrypted_registration_req_proof['requested_proof']['revealed_attrs']['attr1_referent']['raw']
assert 'Garcia' == \
decrypted_registration_req_proof['requested_proof']['revealed_attrs']['attr2_referent']['raw']
assert '123-45-6789' == \
decrypted_registration_req_proof['requested_proof']['revealed_attrs']['attr4_referent']['raw']
#here ada self-attested attribute, tapi kita ga ada, jadi ga kepake
"""
assert 'Alice' == decrypted_registration_req_proof['requested_proof']['self_attested_attrs']['attr1_referent']
assert 'Garcia' == decrypted_registration_req_proof['requested_proof']['self_attested_attrs']['attr2_referent']
assert '123-45-6789' == decrypted_registration_req_proof['requested_proof']['self_attested_attrs']['attr4_referent']
"""
assert await anoncreds.verifier_verify_proof(ks['registration_req_proof_request'], ks['registration_req_proof'],
ks['schemas'], ks['cred_defs'], ks['revoc_ref_defs'],
ks['revoc_regs'])
print("==============================")
print("== Alice Getting TRC Credential from KS-Telecom==")
print("------------------------------")
print("\"KS-Telecom\" -> Create \"TRC-Certificate\" Credential Offer for Alice")
ks['trc_certificate_cred_offer'] = \
await anoncreds.issuer_create_credential_offer(ks['wallet'], ks['trc_certificate_cred_def_id'])
print("\"KS-Telecom\" -> Get key for Alice did")
ks['alice_key_for_ks'] = \
await did.key_for_did(ks['pool'], ks['wallet'], ks['alice_connection_response']['did'])
print("\"KS-Telecom\" -> Authcrypt \"TRC-Certificate\" Credential Offer for Alice")
ks['authcrypted_job_certificate_cred_offer'] = \
await crypto.auth_crypt(ks['wallet'], ks['key_for_alice'], ks['alice_key_for_ks'],
ks['trc_certificate_cred_offer'].encode('utf-8'))
print("\"KS-Telecom\" -> Send authcrypted \"TRC-Certificate\" Credential Offer to Alice")
alice['authcrypted_trc_certificate_cred_offer'] = ks['authcrypted_trc_certificate_cred_offer']
print("\"Alice\" -> Authdecrypted \"TRC-Certificate\" Credential Offer from KS-Telecom")
alice['ks_key_for_alice_alice'], alice['trc_certificate_cred_offer'], job_certificate_cred_offer = \
await auth_decrypt(alice['wallet'], alice['key_for_ks'], alice['authcrypted_trc_certificate_cred_offer'])
print("\"Alice\" -> Get \"TRC-Certificate\" Credential Definition from Ledger")
(alice['ks_trc_certificate_cred_def_id'], alice['ks_trc_certificate_cred_def']) = \
await get_cred_def(alice['pool'], alice['did_for_ks'], trc_certificate_cred_offer['cred_def_id'])
print("\"Alice\" -> Create and store in Wallet \"TRC-Certificate\" Credential Request for KS-Telecom")
(alice['trc_certificate_cred_request'], alice['trc_certificate_cred_request_metadata']) = \
await anoncreds.prover_create_credential_req(alice['wallet'], alice['did_for_ks'],
alice['trc_certificate_cred_offer'],
alice['ks_trc_certificate_cred_def'], alice['master_secret_id'])
print("\"Alice\" -> Authcrypt \"TRC-Certificate\" Credential Request for KS-Telecom")
alice['authcrypted_job_certificate_cred_request'] = \
await crypto.auth_crypt(alice['wallet'], alice['key_for_ks'], alice['ks_key_for_alice'],
alice['trc_certificate_cred_request'].encode('utf-8'))
print("\"Alice\" -> Send authcrypted \"TRC-Certificate\" Credential Request to KS-Telecom")
alice['trc_certificate_cred_values'] = json.dumps({
#encoded belum diganti
"first_name": {"raw": "Alice", "encoded": "245712572474217942457235975012103335"},
"last_name": {"raw": "Garcia", "encoded": "312643218496194691632153761283356127"},
"phone_no": {"raw": "010-8877-8877", "encoded": "2143135425425143112321314321"},
"ssn": {"raw": "123-45-6789", "encoded": "3124141231422543541"},
"date_of_registration": {"raw": "20190520", "encoded": "20190520"}
})
ks['authcrypted_trc_certificate_cred_request'] = alice['authcrypted_trc_certificate_cred_request']
ks['trc_certificate_cred_values'] = alice['trc_certificate_cred_values']
print("\"KS-Telecom\" -> Authdecrypt \"TRC-Certificate\" Credential Request from Alice")
ks['alice_key_for_ks'], ks['trc_certificate_cred_request'], _ = \
await auth_decrypt(ks['wallet'], ks['key_for_alice'], ks['authcrypted_trc_certificate_cred_request'])
print("\"KS-Telecom\" -> Create \"TRC-Certificate\" Credential for Alice")
ks['trc_certificate_cred'], _, _ = \
await anoncreds.issuer_create_credential(ks['wallet'], ks['trc_certificate_cred_offer'],
ks['trc_certificate_cred_request'],
ks['trc_certificate_cred_values'], None, None)
print("\"KS-Telecom\" -> Authcrypt \"TRC-Certificate\" Credential for Alice")
ks['authcrypted_trc_certificate_cred'] = \
await crypto.auth_crypt(ks['wallet'], ks['key_for_alice'], ks['alice_key_for_ks'],
ks['trc_certificate_cred'].encode('utf-8'))
print("\"KS-Telecom\" -> Send authcrypted \"TRC-Certificate\" Credential to Alice")
alice['authcrypted_trc_certificate_cred'] = ks['authcrypted_trc_certificate_cred']
print("\"Alice\" -> Authdecrypted \"TRC-Certificate\" Credential from KS-Telecom")
_, alice['trc_certificate_cred'], _ = \
await auth_decrypt(alice['wallet'], alice['key_for_ks'], alice['authcrypted_trc_certificate_cred'])
print("\"Alice\" -> Store \"TRC-Certificate\" Credential")
await anoncreds.prover_store_credential(alice['wallet'], None, alice['trc_certificate_cred_request_metadata'],
alice['trc_certificate_cred'],
alice['ks_trc_certificate_cred_def'], None)
print("==============================")
print("=== Apply for a Discount at GS-50 ==")
print("==============================")
print("== Apply for a Discount at GS-50 - Onboarding ==")
print("------------------------------")
gs['did_for_alice'], gs['key_for_alice'], alice['did_for_gs'], alice['key_for_gs'], \
gs['alice_connection_response'] = await onboarding(gs, alice)
print("==============================")
print("== Apply for a Discount at GS-50 - KS-Telecom Subscription Proving ==")
print("------------------------------")
print("\"GS-50\" -> Create \"Membership\" Proof Request")
gs['apply_membership_proof_request'] = json.dumps({
'nonce': '123432421212',
'name': 'KS-Telecom Membership',
'version': '0.1',
#kita gaada requested attribute, cuma predicate
"""
'requested_attributes': {
'attr1_referent': {
'name': 'status',
'restrictions': [{'cred_def_id': ks['trc_certificate_cred_def_id']}]
}
},
"""
'requested_predicates': {
'predicate1_referent': {
'name': 'status',
'p_type': '=',
'p_value': active,
'restrictions': [{'cred_def_id': ks['crt_certificate_cred_def_id']}]
}
}
})
print("\"GS-50\" -> Get key for Alice did")
gs['alice_key_for_gs'] = \
await did.key_for_did(gs['pool'], gs['wallet'], gs['alice_connection_response']['did'])
print("\"GS-50\" -> Authcrypt \"Membership\" Proof Request for Alice")
gs['authcrypted_apply_membership_proof_request'] = \
await crypto.auth_crypt(gs['wallet'], gs['key_for_alice'], gs['alice_key_for_gs'],
gs['apply_membership_proof_request'].encode('utf-8'))
print("\"GS-50\" -> Send authcrypted \"Membership\" Proof Request to Alice")
alice['authcrypted_apply_membership_proof_request'] = gs['authcrypted_apply_membership_proof_request']
print("\"Alice\" -> Authdecrypt \"Membership\" Proof Request from GS-50")
alice['gs_key_for_alice'], alice['apply_membership_proof_request'], _ = \
await auth_decrypt(alice['wallet'], alice['key_for_gs'], alice['authcrypted_apply_membership_proof_request'])
print("\"Alice\" -> Get credentials for \"Membership\" Proof Request")
search_for_apply_membership_proof_request = \
await anoncreds.prover_search_credentials_for_proof_req(alice['wallet'],
alice['apply_membership_proof_request'], None)
cred_for_predicate1 = await get_credential_for_referent(search_for_apply_membership_proof_request, 'predicate1_referent')
await anoncreds.prover_close_credentials_search_for_proof_req(search_for_apply_membership_proof_request)
alice['creds_for_apply_membership_proof'] = {cred_for_predicate1['referent']: cred_for_predicate1}
alice['schemas'], alice['cred_defs'], alice['revoc_states'] = \
await prover_get_entities_from_ledger(alice['pool'], alice['did_for_gs'],
alice['creds_for_apply_membership_proof'],
alice['name'])
print("\"Alice\" -> Create \"Membership\" Proof")
alice['apply_membership_requested_creds'] = json.dumps({
'self_attested_attributes': {},
'requested_attributes': {},
'requested_predicates': {
'predicate1_referent': {'cred_id': cred_for_predicate1['referent']} #gak ada revealed true ky contoh yg requested attribute, gapapa or gimans?
}
})
alice['apply_membership_proof'] = \
await anoncreds.prover_create_proof(alice['wallet'], alice['apply_membership_proof_request'],
alice['apply_membership_requested_creds'], alice['master_secret_id'],
alice['schemas'], alice['cred_defs'], alice['revoc_states'])
print("\"Alice\" -> Authcrypt \"Membership\" Proof for GS-50")
alice['authcrypted_alice_apply_membership_proof'] = \
await crypto.auth_crypt(alice['wallet'], alice['key_for_gs'], alice['gs_key_for_alice'],
alice['apply_membership_proof'].encode('utf-8'))
print("\"Alice\" -> Send authcrypted \"Membership\" Proof to GS-50")
gs['authcrypted_alice_apply_membership_proof'] = alice['authcrypted_alice_apply_membership_proof']
print("\"GS-50\" -> Authdecrypted \"Membership\" Proof from Alice")
_, gs['alice_apply_membership_proof'], authdecrypted_alice_apply_membership_proof = \
await auth_decrypt(gs['wallet'], gs['key_for_alice'], gs['authcrypted_alice_apply_membership_proof'])
print("\"GS-50\" -> Get Schemas, Credential Definitions and Revocation Registries from Ledger"
" required for Proof verifying")
gs['schemas'], gs['cred_defs'], gs['revoc_defs'], gs['revoc_regs'] = \
await verifier_get_entities_from_ledger(gs['pool'], gs['did'],
authdecrypted_alice_apply_membership_proof['identifiers'], gs['name'])
print("\"GS-50\" -> Verify \"Membership\" Proof from Alice")
assert 'active' == \
authdecrypted_alice_apply_loan_proof['requested_proof']['revealed_attrs']['attr1_referent']['raw'] #in our case ini predicate, bukan attribute. Gimanas?
assert await anoncreds.verifier_verify_proof(gs['apply_membership_proof_request'], gs['alice_apply_membership_proof'],
gs['schemas'], gs['cred_defs'], gs['revoc_defs'],
gs['revoc_regs'])
""" Kita gaada fase KYC sih, buang ae?
print("==============================")
print("==============================")
print("== Apply for the loan with GS-50 - Transcript and Job-Certificate proving ==")
print("------------------------------")
print("\"GS-50\" -> Create \"Loan-Application-KYC\" Proof Request")
gs['apply_loan_kyc_proof_request'] = json.dumps({
'nonce': '123432421212',
'name': 'Loan-Application-KYC',
'version': '0.1',
'requested_attributes': {
'attr1_referent': {'name': 'first_name'},
'attr2_referent': {'name': 'last_name'},
'attr3_referent': {'name': 'ssn'}
},
'requested_predicates': {}
})
print("\"GS-50\" -> Get key for Alice did")
gs['alice_key_for_gs'] = await did.key_for_did(gs['pool'], gs['wallet'],
gs['alice_connection_response']['did'])
print("\"GS-50\" -> Authcrypt \"Loan-Application-KYC\" Proof Request for Alice")
gs['authcrypted_apply_loan_kyc_proof_request'] = \
await crypto.auth_crypt(gs['wallet'], gs['key_for_alice'], gs['alice_key_for_gs'],
gs['apply_loan_kyc_proof_request'].encode('utf-8'))
print("\"GS-50\" -> Send authcrypted \"Loan-Application-KYC\" Proof Request to Alice")
alice['authcrypted_apply_loan_kyc_proof_request'] = gs['authcrypted_apply_loan_kyc_proof_request']
print("\"Alice\" -> Authdecrypt \"Loan-Application-KYC\" Proof Request from GS-50")
alice['gs_key_for_alice'], alice['apply_loan_kyc_proof_request'], _ = \
await auth_decrypt(alice['wallet'], alice['key_for_gs'], alice['authcrypted_apply_loan_kyc_proof_request'])
print("\"Alice\" -> Get credentials for \"Loan-Application-KYC\" Proof Request")
search_for_apply_loan_kyc_proof_request = \
await anoncreds.prover_search_credentials_for_proof_req(alice['wallet'],
alice['apply_loan_kyc_proof_request'], None)
cred_for_attr1 = await get_credential_for_referent(search_for_apply_loan_kyc_proof_request, 'attr1_referent')
cred_for_attr2 = await get_credential_for_referent(search_for_apply_loan_kyc_proof_request, 'attr2_referent')
cred_for_attr3 = await get_credential_for_referent(search_for_apply_loan_kyc_proof_request, 'attr3_referent')
await anoncreds.prover_close_credentials_search_for_proof_req(search_for_apply_loan_kyc_proof_request)
alice['creds_for_apply_loan_kyc_proof'] = {cred_for_attr1['referent']: cred_for_attr1,
cred_for_attr2['referent']: cred_for_attr2,
cred_for_attr3['referent']: cred_for_attr3}
alice['schemas'], alice['cred_defs'], alice['revoc_states'] = \
await prover_get_entities_from_ledger(alice['pool'], alice['did_for_gs'],
alice['creds_for_apply_loan_kyc_proof'], 'Alice')
print("\"Alice\" -> Create \"Loan-Application-KYC\" Proof")
alice['apply_loan_kyc_requested_creds'] = json.dumps({
'self_attested_attributes': {},
'requested_attributes': {
'attr1_referent': {'cred_id': cred_for_attr1['referent'], 'revealed': True},
'attr2_referent': {'cred_id': cred_for_attr2['referent'], 'revealed': True},
'attr3_referent': {'cred_id': cred_for_attr3['referent'], 'revealed': True}
},
'requested_predicates': {}
})
alice['apply_loan_kyc_proof'] = \
await anoncreds.prover_create_proof(alice['wallet'], alice['apply_loan_kyc_proof_request'],
alice['apply_loan_kyc_requested_creds'], alice['master_secret_id'],
alice['schemas'], alice['cred_defs'], alice['revoc_states'])
print("\"Alice\" -> Authcrypt \"Loan-Application-KYC\" Proof for GS-50")
alice['authcrypted_alice_apply_loan_kyc_proof'] = \
await crypto.auth_crypt(alice['wallet'], alice['key_for_gs'], alice['gs_key_for_alice'],
alice['apply_loan_kyc_proof'].encode('utf-8'))
print("\"Alice\" -> Send authcrypted \"Loan-Application-KYC\" Proof to GS-50")
gs['authcrypted_alice_apply_loan_kyc_proof'] = alice['authcrypted_alice_apply_loan_kyc_proof']
print("\"GS-50\" -> Authdecrypted \"Loan-Application-KYC\" Proof from Alice")
_, gs['alice_apply_loan_kyc_proof'], alice_apply_loan_kyc_proof = \
await auth_decrypt(gs['wallet'], gs['key_for_alice'], gs['authcrypted_alice_apply_loan_kyc_proof'])
print("\"GS-50\" -> Get Schemas, Credential Definitions and Revocation Registries from Ledger"
" required for Proof verifying")
gs['schemas'], gs['cred_defs'], gs['revoc_defs'], gs['revoc_regs'] = \
await verifier_get_entities_from_ledger(gs['pool'], gs['did'],
alice_apply_loan_kyc_proof['identifiers'], 'GS-50')
print("\"GS-50\" -> Verify \"Loan-Application-KYC\" Proof from Alice")
assert 'Alice' == \
alice_apply_loan_kyc_proof['requested_proof']['revealed_attrs']['attr1_referent']['raw']
assert 'Garcia' == \
alice_apply_loan_kyc_proof['requested_proof']['revealed_attrs']['attr2_referent']['raw']
assert '123-45-6789' == \
alice_apply_loan_kyc_proof['requested_proof']['revealed_attrs']['attr3_referent']['raw']
assert await anoncreds.verifier_verify_proof(gs['apply_loan_kyc_proof_request'],
gs['alice_apply_loan_kyc_proof'],
gs['schemas'], gs['cred_defs'], gs['revoc_defs'],
gs['revoc_regs'])
"""
print("==============================")
print(" \"Sovrin Steward\" -> Close and Delete wallet")
await wallet.close_wallet(steward['wallet'])
await wallet.delete_wallet(steward['wallet_config'], steward['wallet_credentials'])
print("\"Government\" -> Close and Delete wallet")
await wallet.close_wallet(government['wallet'])
await wallet.delete_wallet(government['wallet_config'], government['wallet_credentials'])
print("\"Faber\" -> Close and Delete wallet")
await wallet.close_wallet(faber['wallet'])
await wallet.delete_wallet(faber['wallet_config'], faber['wallet_credentials'])
print("\"Acme\" -> Close and Delete wallet")
await wallet.close_wallet(ks['wallet'])
await wallet.delete_wallet(ks['wallet_config'], ks['wallet_credentials'])
print("\"GS-50\" -> Close and Delete wallet")
await wallet.close_wallet(gs['wallet'])
await wallet.delete_wallet(gs['wallet_config'], gs['wallet_credentials'])
print("\"Alice\" -> Close and Delete wallet")
await wallet.close_wallet(alice['wallet'])
await wallet.delete_wallet(alice['wallet_config'], alice['wallet_credentials'])
print("Close and Delete pool")
await pool.close_pool_ledger(pool_['handle'])
await pool.delete_pool_ledger_config(pool_['name'])
print("Getting started -> done")
async def onboarding(_from, to):
print("\"{}\" -> Create and store in Wallet \"{} {}\" DID".format(_from['name'], _from['name'], to['name']))
(from_to_did, from_to_key) = await did.create_and_store_my_did(_from['wallet'], "{}")
print("\"{}\" -> Send Nym to Ledger for \"{} {}\" DID".format(_from['name'], _from['name'], to['name']))
await send_nym(_from['pool'], _from['wallet'], _from['did'], from_to_did, from_to_key, None)
print("\"{}\" -> Send connection request to {} with \"{} {}\" DID and nonce"
.format(_from['name'], to['name'], _from['name'], to['name']))
connection_request = {
'did': from_to_did,
'nonce': 123456789
}
if 'wallet' not in to:
print("\"{}\" -> Create wallet".format(to['name']))
try:
await wallet.create_wallet(to['wallet_config'], to['wallet_credentials'])
except IndyError as ex:
if ex.error_code == ErrorCode.PoolLedgerConfigAlreadyExistsError:
pass
to['wallet'] = await wallet.open_wallet(to['wallet_config'], to['wallet_credentials'])
print("\"{}\" -> Create and store in Wallet \"{} {}\" DID".format(to['name'], to['name'], _from['name']))
(to_from_did, to_from_key) = await did.create_and_store_my_did(to['wallet'], "{}")
print("\"{}\" -> Get key for did from \"{}\" connection request".format(to['name'], _from['name']))
from_to_verkey = await did.key_for_did(_from['pool'], to['wallet'], connection_request['did'])
print("\"{}\" -> Anoncrypt connection response for \"{}\" with \"{} {}\" DID, verkey and nonce"
.format(to['name'], _from['name'], to['name'], _from['name']))
to['connection_response'] = json.dumps({
'did': to_from_did,
'verkey': to_from_key,
'nonce': connection_request['nonce']
})
to['anoncrypted_connection_response'] = \
await crypto.anon_crypt(from_to_verkey, to['connection_response'].encode('utf-8'))
print("\"{}\" -> Send anoncrypted connection | |
"""
Code adapted from the Mathis Lab
MIT License Copyright (c) 2022 <NAME>
DataJoint Schema for DeepLabCut 2.x, Supports 2D and 3D DLC via triangulation.
"""
import datajoint as dj
import os
import inspect
import importlib
import numpy as np
from pathlib import Path
from datetime import datetime
import yaml
import cv2
from element_interface.utils import find_full_path, find_root_directory
schema = dj.schema()
_linking_module = None
def activate(
dlc_schema_name, *, create_schema=True, create_tables=True, linking_module=None
):
"""
activate(schema_name, *, create_schema=True, create_tables=True,
linking_module=None)
:param schema_name: schema name on the database server to activate the
`deeplabcut` element
:param create_schema: when True (default), create schema in the database if it
does not yet exist.
:param create_tables: when True (default), create schema in the database if it
does not yet exist.
:param linking_module: a module (or name) containing the required dependencies
to activate the `session` element:
Upstream tables:
+ Session: parent table to VideoRecording, identifying a recording session
+ Equipment: parent table to VideoRecording, identifying recording device
Functions:
+ get_dlc_root_data_dir() -> list Retrieve the root data director(y/ies)
with behavioral recordings for all subject/sessions.
:return: a string for full path to the root data directory
+ get_dlc_processed_data_dir(session_key: dict) -> str
Optional function to retrive the desired output directory for DeepLabCut
files for a given session. If unspecified,
output stored in the session video folder, per DLC default
:return: a string for the absolute path of output directory
"""
if isinstance(linking_module, str):
linking_module = importlib.import_module(linking_module)
assert inspect.ismodule(
linking_module
), "The argument 'dependency' must be a module's name or a module"
assert hasattr(
linking_module, "get_dlc_root_data_dir"
), "The linking module must specify a lookup funtion for a root data directory"
global _linking_module
_linking_module = linking_module
# activate
schema.activate(
dlc_schema_name,
create_schema=create_schema,
create_tables=create_tables,
add_objects=_linking_module.__dict__,
)
# -------------- Functions required by element-deeplabcut ---------------
def get_dlc_root_data_dir() -> list:
"""
It is recommended that all paths in DataJoint Elements stored as relative
paths, with respect to some user-configured "root" director(y/ies). The
root(s) may vary between data modalities and user machines
get_dlc_root_data_dir() -> list
This user-provided function retrieves the possible root data
director(y/ies) containing continuous behavioral data for all subjects
and sessions (e.g. acquired video or treadmill raw files)
:return: a string for full path to the behavioral root data directory,
or list of strings for possible root data directories
"""
root_directories = _linking_module.get_dlc_root_data_dir()
if isinstance(root_directories, (str, Path)):
root_directories = [root_directories]
if (
hasattr(_linking_module, "get_dlc_processed_data_dir")
and get_dlc_processed_data_dir() not in root_directories
):
root_directories.append(_linking_module.get_dlc_processed_data_dir())
return root_directories
def get_dlc_processed_data_dir() -> str:
"""
If specified by the user, this function provides DeepLabCut with an output
directory for processed files. If unspecified, output files will be stored
in the session directory 'videos' folder, per DeepLabCut default
get_dlc_processed_data_dir -> str
This user-provided function specifies where DeepLabCut output files
will be stored.
"""
if hasattr(_linking_module, "get_dlc_processed_data_dir"):
return _linking_module.get_dlc_processed_data_dir()
else:
return get_dlc_root_data_dir()[0]
# ----------------------------- Table declarations ----------------------
@schema
class VideoRecording(dj.Manual):
definition = """
-> Session
recording_id: int
---
-> Equipment
"""
class File(dj.Part):
definition = """
-> master
file_id: int
---
file_path: varchar(255) # filepath of video, relative to root data directory
"""
@schema
class RecordingInfo(dj.Imported):
definition = """
-> VideoRecording
---
px_height : smallint # height in pixels
px_width : smallint # width in pixels
nframes : smallint # number of frames
fps = NULL : int # (Hz) frames per second
recording_datetime = NULL : datetime # Datetime for the start of the recording
recording_duration : float # video duration (s) from nframes / fps
"""
@property
def key_source(self):
return VideoRecording & VideoRecording.File
def make(self, key):
file_paths = (VideoRecording.File & key).fetch("file_path")
nframes = 0
px_height, px_width, fps = None, None, None
for file_path in file_paths:
file_path = (find_full_path(get_dlc_root_data_dir(), file_path)).as_posix()
cap = cv2.VideoCapture(file_path)
info = (
int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)),
int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(cap.get(cv2.CAP_PROP_FPS)),
)
if px_height is not None:
assert (px_height, px_width, fps) == info
px_height, px_width, fps = info
nframes += int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
cap.release()
self.insert1(
{
**key,
"px_height": px_height,
"px_width": px_width,
"nframes": nframes,
"fps": fps,
"recording_duration": nframes / fps,
}
)
@schema
class BodyPart(dj.Lookup):
definition = """
body_part : varchar(32)
---
body_part_description='' : varchar(1000)
"""
@classmethod
def extract_new_body_parts(cls, dlc_config: dict, verbose=True):
"""Print a list of new body parts from a dlc config,
to examine before generating descriptions
:param dlc_config: path to a config.y*ml, or dict including contents thereof
:param verbose: default True. Print existing/new items to console
"""
if not isinstance(dlc_config, dict):
dlc_config_fp = find_full_path(get_dlc_root_data_dir(), Path(dlc_config))
assert dlc_config_fp.exists() and dlc_config_fp.suffix in (
".yml",
".yaml",
), f"dlc_config is neither dict nor filepath\n Check: {dlc_config_fp}"
if dlc_config_fp.suffix in (".yml", ".yaml"):
with open(dlc_config_fp, "rb") as f:
dlc_config = yaml.safe_load(f)
# -- Check and insert new BodyPart --
assert "bodyparts" in dlc_config, f"Found no bodyparts section in {dlc_config}"
tracked_body_parts = cls.fetch("body_part")
new_body_parts = np.setdiff1d(dlc_config["bodyparts"], tracked_body_parts)
if verbose: # Added to silence duplicate prompt during `insert_new_model`
print(f"Existing body parts: {tracked_body_parts}")
print(f"New body parts: {new_body_parts}")
return new_body_parts
@classmethod
def insert_from_config(
cls, dlc_config: dict, descriptions: list = None, prompt=True
):
"""Insert all body parts from a config file
:param dlc_config: path to a config.y*ml, or dict including contents thereof
:param descriptions: optional list describing new body parts
"""
# handle dlc_config being a yaml file
new_body_parts = cls.extract_new_body_parts(dlc_config)
if new_body_parts is not None: # Required bc np.array is ambiguous as bool
if descriptions:
assert len(descriptions) == len(new_body_parts), (
"Descriptions list does not match "
+ " the number of new_body_parts"
)
print(f"New descriptions: {descriptions}")
if descriptions is None:
descriptions = ["" for x in range(len(new_body_parts))]
if (
prompt
and dj.utils.user_choice(
f"Insert {len(new_body_parts)} new body " + "part(s)?"
)
!= "yes"
):
print("Canceled insert.")
return
cls.insert(
[
{"body_part": b, "body_part_description": d}
for b, d in zip(new_body_parts, descriptions)
]
)
@schema
class Model(dj.Manual):
definition = """
model_name : varchar(64) # user-friendly model name
---
task : varchar(32) # task in the config yaml
date : varchar(16) # date in the config yaml
iteration : int # iteration/version of this model
snapshotindex : int # which snapshot for prediction (if -1, latest)
shuffle : int # which shuffle of the training dataset
trainingsetindex : int # which training set fraction to generate model
unique index (task, date, iteration, shuffle, snapshotindex, trainingsetindex)
scorer : varchar(64) # scorer/network name - DLC's GetScorerName()
config_template : longblob # dictionary of the config for analyze_videos()
project_path : varchar(255) # DLC's project_path in config relative to root
model_prefix='' : varchar(32)
model_description='' : varchar(1000)
-> [nullable] train.TrainingParamSet
"""
# project_path is the only item required downstream in the pose schema
# what happens if TrainingParamSet isn't in the namespace?
class BodyPart(dj.Part):
definition = """
-> master
-> BodyPart
"""
@classmethod
def insert_new_model(
cls,
model_name: str,
dlc_config: dict,
*,
shuffle: int,
trainingsetindex,
model_description="",
model_prefix="",
paramset_idx: int = None,
prompt=True,
):
"""Insert new model into the dlc.Model table
:param model_name: User-friendly name for this model
:param dlc_config: path to a config.y*ml, or dict including contents thereof
:param shuffle: integer, shuffle number
:param trainingsetindex: index of training fraction list in config.yaml
:param model_description: Description of this model
:param model_prefix: Filename prefix used across DLC project
:param body_part_descriptions: optional list for new items in BodyParts table
:param paramset_idx: optional index from the TrainingParamSet table
"""
from deeplabcut.utils.auxiliaryfunctions import GetScorerName
from deeplabcut import __version__ as dlc_version
from packaging import version
from distutils.util import strtobool
# handle dlc_config being a yaml file
if not isinstance(dlc_config, dict):
dlc_config_fp = find_full_path(get_dlc_root_data_dir(), Path(dlc_config))
assert dlc_config_fp.exists(), (
"dlc_config is neither dict nor filepath" + f"\n Check: {dlc_config_fp}"
)
if dlc_config_fp.suffix in (".yml", ".yaml"):
with open(dlc_config_fp, "rb") as f:
dlc_config = yaml.safe_load(f)
# ---- Get and resolve project path ----
project_path = find_full_path(
get_dlc_root_data_dir(), dlc_config["project_path"]
)
root_dir = find_root_directory(get_dlc_root_data_dir(), project_path)
# ---- Build config ----
template_attributes = [
"Task",
"date",
"TrainingFraction",
"iteration",
"snapshotindex",
"batch_size",
"cropping",
"x1",
"x2",
"y1",
"y2",
"project_path",
]
config_template = {
k: v for k, v in dlc_config.items() if k in template_attributes
}
# ---- Get scorer name ----
# "or 'f'" below covers case where config returns None. StrToBool handles else
scorer_legacy = (
1
if (
strtobool(dlc_config.get("scorer_legacy") or "f")
or version.parse(dlc_version) < version.parse("2.1")
)
else 0
) # if old version, or | |
in foreign_graph[part]:
if f not in graph[part]:
graph[part].append(f)
# graph[part] += foreign_graph[part]
continue
# print "get FK: {0}".format(field)
if not ForeignModel:
ForeignModel = cls.get_fk_model(field)
if not ForeignModel:
rest_helpers.log_print("no foreignkey: {0}".format(field))
continue
# print ForeignModel
# print graph["recurse_into"]
# print graph["recurse_into"]
if field not in graph["recurse_into"]:
graph["recurse_into"].append(field)
# print ForeignModel
# if not hasattr(ForeignModel, "getGraph"):
# foreign_graph = {}
# foreign_graph["fields"] = []
# for f in ForeignModel._meta.fields:
# if f.name not in RestModel.__RestMeta__.NO_SHOW_FIELDS:
# foreign_graph["fields"].append(f.name)
# print ForeignModel
# print foreign_graph["fields"]
# else:
if not hasattr(ForeignModel, "getGraph"):
# print "NO getGraph"
continue
# print "getting graph: {0} for {1}".format(gname, field)
foreign_graph = ForeignModel.getGraph(gname)
# print foreign_graph
for part in ["fields", "recurse_into", "extra", "exclude"]:
if part not in foreign_graph:
continue
graph_part = foreign_graph[part]
if part not in graph:
graph[part] = []
root_part = graph[part]
for f in graph_part:
if type(f) is tuple:
f1, f2 = f
nfname = ("{0}.{1}".format(field, f1), f2)
elif graph["no_uscore"] and '_' in f:
f1, f2 = f, f.replace('_', '').split('.')[-1]
# print field
# print f2
nfname = ("{0}.{1}".format(field, f1), f2)
else:
nfname = "{0}.{1}".format(field, f)
if nfname not in root_part:
root_part.append(nfname)
del graph["graphs"]
if "fields" not in graph:
if graph["no_uscore"]:
graph["fields"] = []
for f in field_names:
if "_" in f:
f1, f2 = f, f.lower().replace('_', '')
# print "noscore"
# print f1
# print f2
graph["fields"].append((f1, f2))
else:
graph["fields"].append(f)
else:
graph["fields"] = field_names
if "no_uscore" in graph:
del graph["no_uscore"]
return graph
@classmethod
def ro_objects(cls):
using = getattr(cls.RestMeta, "RO_DATABASE", None)
if using is None:
using = getattr(cls.RestMeta, "DATABASE", None)
# if using is None:
# if settings.DATABASES.get("readonly", None) != None:
# using = "readonly"
if using:
return cls.objects.using(using)
return cls.objects
@classmethod
def rw_objects(cls):
using = getattr(cls.RestMeta, "DATABASE", None)
if using:
return cls.objects.using(using)
return cls.objects
def safeSave(self, **kwargs):
using = getattr(self.RestMeta, "DATABASE", None)
if using:
return self.save(using=using, **kwargs)
return self.save(**kwargs)
@classmethod
def getGraph(cls, name):
graph_key = "_graph_{0}__".format(name)
if hasattr(cls, graph_key):
return getattr(cls, graph_key)
if not hasattr(cls, "_lock__"):
cls._lock__ = threading.RLock()
# cls._lock__.acquire()
# try:
graph = cls.buildGraph(name)
# print "-" * 80
# print "SETTING GRAPH {0} FOR {1}".format(name, cls.__name__)
# print graph
setattr(cls, graph_key, graph)
# print "." * 80
# except:
# pass
# cls._lock__.release()
return graph
def toGraph(self, request=None, graph="basic"):
RestModel._setupGraphHelpers()
if not request:
request = GRAPH_HELPERS.get_request()
return GRAPH_HELPERS.restGet(request, self, return_httpresponse=False, **self.getGraph(graph))
@classmethod
def getActiveLogger(cls):
return rest_helpers.getLogger(cls.getActiveRequest())
@classmethod
def getActiveMember(cls):
request = cls.getActiveRequest()
if request:
return request.member
return None
@classmethod
def getActiveRequest(cls):
if not GRAPH_HELPERS.get_request:
mw = importlib.import_module("rest.middleware")
GRAPH_HELPERS.get_request = mw.get_request
return GRAPH_HELPERS.get_request()
@classmethod
def getFromRequest(cls, request):
key = cls.__name__.lower()
key_p = "{0}_id".format(key)
lookup_fields = [key, key_p]
using = getattr(cls.RestMeta, "DATABASE", None)
for field in lookup_fields:
value = request.DATA.get(field)
if value:
if not using:
obj = cls.objects.filter(pk=value).first()
else:
obj = cls.objects.using(using).filter(pk=value).first()
if obj:
return obj
lookup_fields = getattr(cls.RestMeta, "UNIQUE_LOOKUP", [])
for field in lookup_fields:
value = request.DATA.get(field)
if value:
q = {}
q[field] = value
if not using:
obj = cls.objects.filter(**q).first()
else:
obj = cls.objects.using(using).filter(**q).first()
if obj:
return obj
return None
value = request.DATA.get(key_p)
if not value:
value = request.DATA.get(key)
if not value:
return None
if using:
return cls.objects.using(using).filter(pk=value).first()
return cls.objects.filter(pk=value).first()
@classmethod
def getFromPK(cls, pk):
using = getattr(cls.RestMeta, "DATABASE", None)
if using:
return cls.objects.using(using).filter(pk=pk).first()
return cls.objects.filter(pk=pk).first()
@classmethod
def restEncrypt(cls, data):
if ENCRYPTER:
return ENCRYPTER.encrypt(data)
return data
@staticmethod
def restGetModel(app_name, model_name):
return apps.get_model(app_name, model_name)
@staticmethod
def getModel(app_name, model_name):
return apps.get_model(app_name, model_name)
def restGetGenericModel(self, field):
# called by the rest module to magically parse
# a component that is marked genericrelation in a graph
if not hasattr(self, field):
rest_helpers.log_print("model has no field: {0}".format(field))
return None
name = getattr(self, field)
if not name or "." not in name:
return None
a_name, m_name = name.split(".")
model = RestModel.getModel(a_name, m_name)
if not model:
rest_helpers.log_print("GENERIC MODEL DOES NOT EXIST: {0}".format(name))
return model
def restGetGenericRelation(self, field):
# called by the rest module to magically parse
# a component that is marked genericrelation in a graph
GenericModel = self.restGetGenericModel(field)
if not GenericModel:
return None
key = getattr(self, "{0}_id".format(field))
return GenericModel.rw_objects().filter(pk=key).first()
@staticmethod
def restGetModelDB(Model, default=None):
if hasattr(Model, "RestMeta"):
return getattr(Model.RestMeta, "DATABASE", default)
return default
@property
def has_model_changed(self):
if hasattr(self, "_changed__"):
return len(self._changed__) > 0
return False
def saveFields(self, allow_null=True, **kwargs):
"""
Helper method to save a list of fields
"""
self._changed__ = UberDict()
for key, value in list(kwargs.items()):
if value is None and not allow_null:
continue
self.restSaveField(key, value)
if len(self._changed__):
self.save()
def restSaveField(self, fieldname, value, has_fields=False, has_no_fields=False, using=None):
if not hasattr(self, "_changed__"):
self._changed__ = UberDict()
if fieldname.startswith("_"):
return
if not hasattr(self, "_field_names__"):
self._field_names__ = [f.name for f in self._meta.get_fields()]
# print "saving field: {0} = {1}".format(fieldname, value)
if fieldname in RestModel.__RestMeta__.NO_SAVE_FIELDS:
return
if has_no_fields and fieldname in self.RestMeta.NO_SAVE_FIELDS:
return
if has_fields and fieldname not in self.RestMeta.SAVE_FIELDS:
return
if fieldname.endswith("_id") and not self.get_field_type(fieldname):
# django will have ForeignKeys with _id, we don't want that, on_delete=models.CASCADE
fieldname = fieldname[:-3]
setter = "set_{0}".format(fieldname)
if hasattr(self, setter):
getattr(self, setter)(value)
return
if fieldname in self._field_names__:
# TODO check if it is a function
if isinstance(value, models.Model):
setattr(self, fieldname, value)
self._changed__[fieldname] = True
return
ForeignModel = self.get_fk_model(fieldname)
if ForeignModel and isinstance(value, dict):
obj = getattr(self, fieldname, None)
if obj is None:
obj = ForeignModel()
if using is None:
using = self.restGetModelDB(self)
obj.saveFromDict(None, value, using=using)
# rest_helpers.log_print("{} vs {}".format(self._state.db, obj._state.db))
# rest_helpers.log_print("saving FK to {} ({}.{}) - {}".format(fieldname, using, obj.pk, type(obj)), value)
setattr(self, fieldname, obj)
self._changed__[fieldname] = True
return
elif ForeignModel and value and (type(value) is int or value.isdigit()):
# print "\tforeign model({2}) field: {0} = {1}".format(fieldname, value, ForeignModel.__class__.__name__)
value = int(value)
using = RestModel.restGetModelDB(ForeignModel)
if using:
value = ForeignModel.objects.using(using).filter(pk=value).first()
else:
value = ForeignModel.objects.filter(pk=value).first()
elif ForeignModel and "MediaItem" in ForeignModel.__name__:
if value:
self.saveMediaFile(value, fieldname, None, True)
return
elif ForeignModel and not value:
value = None
# maybe we could look for to_python here to make sure we have proper conversion
# thinking mainly around datetimes from epoch values
if not ForeignModel:
# field_model, model, direct, mm = self._meta.get_field_by_name(fieldname)
field_model = self._meta.get_field(fieldname)
# hack to handle save datetime fields correctly from floats
try:
if field_model and value != None:
field_model_name = field_model.__class__.__name__
if field_model_name == "DateTimeField":
value = rest_helpers.parseDateTime(value)
# value = datetime.fromtimestamp(float(value))
elif field_model_name == "DateField":
value = rest_helpers.parseDate(value, as_date=True)
elif field_model_name == "IntegerField":
value = int(value)
elif field_model_name == "FloatField":
value = float(value)
elif field_model_name == "CurrencyField":
value = Decimal(value).quantize(TWOPLACES)
elif field_model_name == "BooleanField":
if value in [True, 1, 'True', 'true', '1', 't', 'y', 'yes']:
value = True
else:
value = False
except Exception:
return
if hasattr(self, fieldname) and getattr(self, fieldname) != value:
self._changed__[fieldname] = getattr(self, fieldname)
setattr(self, fieldname, value)
# else:
# print "does not have field: {0}".format(fieldname)
def saveFromRequest(self, request, **kwargs):
if "files" not in kwargs:
kwargs["files"] = request.FILES
return self.saveFromDict(request, request.DATA, **kwargs)
def _recordRestChange(self, fieldname, old_value):
if not hasattr(self, "_changed__"):
self._changed__ = UberDict()
if "." in fieldname:
fields = fieldname.split('.')
root = self._changed__
for f in fields[:-1]:
if f not in root:
root[f] = UberDict()
root = root[f]
root[fields[-1]] = old_value
else:
self._changed__[fieldname] = old_value
def saveFromDict(self, request, data, files=None, **kwargs):
can_save = getattr(self.RestMeta, "CAN_SAVE", True)
if not can_save:
return self.restStatus(request, False, error="saving not allowed via rest for this model.")
# check check for save permissions
if request is None:
request = RestModel.getActiveRequest()
if request is None:
request = UberDict(member=None, FILES=[])
if hasattr(self, "onRestCanSave"):
# this should throw an error
self.onRestCanSave(request)
is_new = self.id is None
has_fields = hasattr(self.RestMeta, "SAVE_FIELDS") and len(self.RestMeta.SAVE_FIELDS)
has_no_fields = hasattr(self.RestMeta, "NO_SAVE_FIELDS") and len(self.RestMeta.NO_SAVE_FIELDS)
self._field_names__ = [f.name for f in self._meta.get_fields()]
# fix for multidatabase support and using readonly db for get
self._state.db = kwargs.get("using", self.restGetModelDB(self, "default"))
auto_save_fields = getattr(self.RestMeta, "AUTO_SAVE", None)
if auto_save_fields:
rest_helpers.log_print(auto_save_fields)
for field in auto_save_fields:
rest_helpers.log_print(field)
if isinstance(field, tuple):
m_field, req_field = field
else:
m_field = field
req_field = field
req_value = getattr(request, req_field, None)
if request and req_value:
data[m_field] = req_value
rest_helpers.log_print(data)
self._changed__ = UberDict()
if hasattr(self.RestMeta, "POST_SAVE_FIELDS"):
post_save_fields = self.RestMeta.POST_SAVE_FIELDS
else:
post_save_fields = []
using = kwargs.get("using", | |
payload = json.loads(r.text)
except Exception as e:
print("Failed to query DN. Exception: {}".format(e))
status = 666
return (status, payload)
def query_class(self, query_class, query_filter=''):
s = requests.Session()
try:
r = s.get('https://{}/api/node/class/{}.json{}'.format(self.apic,
query_class, query_filter), cookies=self.cookies,
verify=False)
status = r.status_code
payload = json.loads(r.text)
except Exception as e:
print("Failed to query Class. Exception: {}".format(e))
status = 666
return (status, payload)
# Method must be called with the following kwargs.
# url: the url of the objectquery, for example /api/mo/...
# Returns status code and json payload of query
def query_url(self, url):
s = requests.Session()
try:
r = s.get('https://'+str(self.apic)+url,
cookies=self.cookies, verify=False)
status = r.status_code
payload = json.loads(r.text)
except Exception as e:
print("Failed to query Class. Exception: {}".format(e))
status = 666
return (status, payload)
# Queries the fabric to retrieve information about the ports,
# and returns them in a dictionary of dictionaries. The array is made in this way:
#
# node_data[node_id]['ports'][intf]['intSel'] = intSel (1 value)
# node_data[node_id]['ports'][intf]['intProf'] = intProf intf selector's father
# node_data[node_id]['ports'][intf]['polGrp'] = polGrp (1 value)
# node_data[node_id]['ports'][intf]['type'] = port_type 'access' or 'bundle'
# node_data[node_id]['ports'][intf]['descr']
# node_data[node_id]['swProf'][switchProf][swSel] contains all swSelectors
# node_data[node_id]['intProf'][intProf] --> father's switch profile
def query_ports (self):
node_data = {}
query = '/api/node/class/infraNodeP.json?query-target=subtree&target-subtree-class=infraNodeBlk'
[status, payload] = self.query_url(query)
if status != 200:
return None
json_data = payload['imdata']
for obj in json_data:
dn = obj['infraNodeBlk']['attributes']['dn']
# We obtain the relationship between switch profiles and switch selector profiles,
# from the switch selector profile we obtain the real node IDs. Potentially there
# could be multiple switch selectors, not just one.
#
# uni/infra/nprof-<Leaf_Prof>/leaves-<Switch_Selector>-typ-range/nodeblk-1fd76fa26065f27f
reg = re.search('nprof-(.*?)\/leaves-(.*?)-typ-range\/nodeblk', dn)
switchProf = reg.group(1)
swSel = reg.group(2)
nodeFrom = (int)(obj['infraNodeBlk']['attributes']['from_'])
nodeTo = (int)(obj['infraNodeBlk']['attributes']['to_'])
for node_id in range(nodeFrom, nodeTo+1):
if not node_id in node_data:
node_data[node_id] = {}
node_data[node_id]['swProf'] = {}
node_data[node_id]['intProf'] = {}
node_data[node_id]['ports'] = {}
if not switchProf in node_data[node_id]['swProf']:
node_data[node_id]['swProf'][switchProf] = {}
node_data[node_id]['swProf'][switchProf][swSel] = 1
query = '/api/node/class/infraNodeP.json?query-target=subtree&target-subtree-class=infraRsAccPortP'
[status, payload] = self.query_url(query)
if status != 200:
return None
json_data = payload['imdata']
for obj in json_data:
dn = obj['infraRsAccPortP']['attributes']['dn']
# here we obtain the relationship between the switch profile and the interface profile
#
# uni/infra/nprof-<Leaf_Prof>/rsaccPortP-[uni/infra/accportprof-<if_Prof>]
reg = re.search('nprof-(.*?)\/.*\[uni/infra/accportprof-(.*)\]', dn)
switchProf = reg.group(1)
intProf = reg.group(2)
for node_id in node_data:
if switchProf in node_data[node_id]['swProf']:
node_data[node_id]['intProf'][intProf] = switchProf
# From this query, you get the port ranges for all the interface selectors, the dn
# of the object contains also the interface profile to which the selectors belong to.
#
# uni/infra/accportprof-<if_Prof>/hports-<if_Selector>-typ-range/portblk-4e72096af1945b11
[status, payload] = self.query_class('infraPortBlk')
if status != 200:
return None
json_data = payload['imdata']
for obj in json_data:
dn = obj['infraPortBlk']['attributes']['dn']
module = (int)(obj['infraPortBlk']['attributes']['fromCard'])
fromPort = (int)(obj['infraPortBlk']['attributes']['fromPort'])
toPort = (int)(obj['infraPortBlk']['attributes']['toPort'])
descr = obj['infraPortBlk']['attributes']['descr']
reg = re.search('accportprof-(.*?)\/hports-(.*?)-typ-range\/portblk', dn)
intProf = reg.group(1)
intSel = reg.group(2)
# we now cycle on the nodes that have that intSelection profile, and add all the ports
for node_id in node_data:
for prof in node_data[node_id]['intProf']:
if intProf == prof:
# here all intProf should be there
for port_id in range(fromPort,toPort+1):
port = str(module)+'/'+str(port_id)
if not port in node_data[node_id]['ports']:
node_data[node_id]['ports'][port] = {}
node_data[node_id]['ports'][port]['intSel'] = intSel
node_data[node_id]['ports'][port]['descr'] = descr
node_data[node_id]['ports'][port]['intProf'] = intProf
# for every intSelector, we have the sum of the range of ports PLUS the policy group
#
# uni/infra/accportprof-<if_Prof>/hports-<if_Selector>-typ-range/rsaccBaseGrp
[status, payload] = self.query_class('infraRsAccBaseGrp')
if status != 200:
return None
json_data = payload['imdata']
for obj in json_data:
dn = obj['infraRsAccBaseGrp']['attributes']['dn']
reg = re.search('accportprof-(.*?)\/hports-(.*?)-typ-range', dn)
intProf = reg.group(1)
intSel = reg.group(2)
# uni/infra/funcprof/accbundle-<pol_Grp>
polGrp_dn = obj['infraRsAccBaseGrp']['attributes']['tDn']
reg = re.search('acc(bundle|portgrp)-(.*)', polGrp_dn)
polGrp = reg.group(2).strip()
if reg.group(1) == 'bundle':
port_type = 'bundle'
else:
port_type = 'access'
for node_id in node_data:
for intf in node_data[node_id]['ports']:
if node_data[node_id]['ports'][intf]['intSel'] == intSel:
node_data[node_id]['ports'][intf]['polGrp'] = polGrp
node_data[node_id]['ports'][intf]['type'] = port_type
# uncomment to print out all retrieved data
for node_id in sorted(node_data):
for intf in sorted(node_data[node_id]['ports']):
intProf = node_data[node_id]['ports'][intf]['intProf']
intSel = node_data[node_id]['ports'][intf]['intSel']
polGrp = node_data[node_id]['ports'][intf]['polGrp']
descr = node_data[node_id]['ports'][intf]['descr']
port_type = node_data[node_id]['ports'][intf]['type']
swProf = node_data[node_id]['intProf'][intProf]
print('Node '+str(node_id)+' interface "'+intf+'":')
print(' ---> selected by "'+intSel+'" is used by "'+intProf+'"')
print(' ---> "'+intProf+'\" is used by "'+swProf+'"')
print(' ---> "'+swProf+'" swSel sons are "'+(','.join(node_data[node_id]['swProf'][swProf]))+'"')
print(' ---> attached polGrp "'+polGrp+'" description "'+descr+'" mode "'+port_type+'"\n')
return node_data
# This function queries all the tenants information regarding vrf, bd,
# subnets, application profiles, epg and stores all the most important data
# (i.e. not all the parameters of every object) in a dictionary of dictionaries.
#
# Queries to the apic are time expensive, for this reason it is usually more
# efficient to perform less queries, retrieve more data and process it locally.
#
# apic_data[ten_name]['vrf_list'][vrf_name][bd_name]['ip'] = [], list of subnets
# apic_data[ten_name]['anp_list'][ap_name][epg] = {}
# apic_data[ten_name]['bd_list'][bd_name]['ip'] = [], list of subnets
# apic_data[ten_name]['bd_list'][bd_name]['vrf'] = vrf
#
# the third and fourth row are used to easily get the vrf to which a certain
# BD is associated, without searching on the data tree built in the first row.
def query_all_tenants(self):
apic_data = {}
# TENANTS and VRF
[status, payload] = self.query_class('fvCtx')
if (status != 200):
return None
json_data = payload['imdata']
for obj in json_data:
dn = obj['fvCtx']['attributes']['dn']
reg = re.search('\/tn-(.*?)\/ctx-(.*)', dn)
ten_name = reg.group(1)
vrf = reg.group(2).strip()
if not ten_name in apic_data:
apic_data[ten_name] = {}
apic_data[ten_name]['vrf_list'] = {}
apic_data[ten_name]['anp_list'] = {}
apic_data[ten_name]['bd_list'] = {}
apic_data[ten_name]['vrf_list'][vrf]={}
# APPLICATION PROFILES
[status, payload] = self.query_class('fvAp')
if (status != 200):
return None
json_data = payload['imdata']
for obj in json_data:
dn = obj['fvAp']['attributes']['dn']
reg = re.search('uni\/tn-(.*?)\/ap-(.*)', dn)
ten_name = reg.group(1)
app = reg.group(2).strip()
apic_data[ten_name]['anp_list'][app]={}
# BRIDGE DOMAINS, we query all bridge domains for which a vrf has been configured
[status, payload] = self.query_class('fvRsCtx')
if (status != 200):
return None
json_data = payload['imdata']
for obj in json_data:
dn = obj['fvRsCtx']['attributes']['dn']
reg = re.search('\/tn-(.*?)\/BD-(.*?)\/rsctx', dn)
ten_name = reg.group(1)
bd_name = reg.group(2)
tdn = obj['fvRsCtx']['attributes']['tDn']
vrf = re.search('uni\/tn-(.*?)\/ctx-(.*)', tdn).group(2).strip()
apic_data[ten_name]['vrf_list'][vrf][bd_name]={}
# there can be multiple ip subnets associated to a BD
apic_data[ten_name]['vrf_list'][vrf][bd_name]['ip'] = []
apic_data[ten_name]['bd_list'][bd_name]={}
apic_data[ten_name]['bd_list'][bd_name]['vrf'] = vrf
# there can be multiple ip subnets associated to a BD
apic_data[ten_name]['bd_list'][bd_name]['ip'] = []
# BRIDGE DOMAIN SUBNETS
[status, payload] = self.query_class('fvSubnet')
if (status != 200):
return None
json_data = payload['imdata']
for obj in json_data:
dn = obj['fvSubnet']['attributes']['dn']
# uni/tn-<tn_name>/BD-<bd_name>/subnet-[<subnet>]
# there are also the following objects, we skip them
# uni/tn-<tn_name>/ap-<anp_name>/epg-<epg_name>/subnet-[<subnet>]
reg = re.search('\/tn-(.*?)\/BD-(.*?)\/subnet-\[(.*)\]', dn)
if reg == None:
continue
ten_name = reg.group(1)
bd_name = reg.group(2)
ip = reg.group(3)
# here we easily retrieve the vrf associated to the bd
vrf = apic_data[ten_name]['bd_list'][bd_name]['vrf']
apic_data[ten_name]['vrf_list'][vrf][bd_name]['ip'].append(ip)
apic_data[ten_name]['bd_list'][bd_name]['ip'].append(ip)
# EPG
[status, payload] = self.query_class('fvAEPg')
if (status != 200):
return None
json_data = payload['imdata']
for obj in json_data:
dn = obj['fvAEPg']['attributes']['dn']
# uni/tn-<tn_name>/ap-<anp_name>/epg-<epg_name>
reg = re.search('\/tn-(.*?)\/ap-(.*?)\/epg-(.*)', dn)
ten_name = reg.group(1)
anp_prof = reg.group(2)
epg = reg.group(3).strip()
apic_data[ten_name]['anp_list'][anp_prof][epg]={}
'''
for ten_name in apic_data:
for vrf_name in apic_data[ten_name]['vrf_list']:
for bd in apic_data[ten_name]['vrf_list'][vrf_name]:
print ('TENANT: "{}", vrf: "{}", BD: "{}", subnets: {}'\
.format(ten_name,vrf_name,bd,', '\
.join(apic_data[ten_name]['vrf_list'][vrf_name][bd]['ip'])))
for ten_name in apic_data:
for app_name in apic_data[ten_name]['anp_list']:
for epg in apic_data[ten_name]['anp_list'][app_name]:
print ('TENANT: "{}", ANP: "{}", BD: "{}"'.format(ten_name,app_name,epg))
'''
return apic_data
# This function performs queries to the fabric and retrieves the configured
# vPC, and return an hash where the key is the policy group applied to the
# channel/vpc, and the value is its DN.
def query_vpc (self):
[status, payload] = self.query_url('/api/class/fabricProtPathEpCont.json')
if status != 200:
return None
json_data = payload['imdata']
vpc_dn = {}
for res in json_data:
dn = res['fabricProtPathEpCont']['attributes']['dn']
[status, payload] = self.query_url('/api/mo/'+dn+'.json?query-target=children')
if status != 200:
return None
vpc_data = payload['imdata']
for elem in vpc_data:
dn = elem['fabricPathEp']['attributes']['dn']
vpc_dn[re.search("pathep-\[(.*)\]",dn).group(1)] = dn
return vpc_dn
# Class must be instantiated with APIC IP address and cookies
class FabCfgMgmt(object):
def __init__(self, apic, cookies):
self.apic = apic
self.cookies = cookies
self.templateLoader = jinja2.FileSystemLoader(
searchpath=(json_path + 'FabCfgMgmt/'))
self.templateEnv = jinja2.Environment(loader=self.templateLoader)
# Method must be called with the following kwargs. Note only supports
# SCP at this time (could easily add SFTP or FTP if needed though)
# name = | |
If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
SuccessResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['finger_print'] = \
finger_print
kwargs['session_token'] = \
session_token
kwargs['cert_attributes'] = \
cert_attributes
return self.call_with_http_info(**kwargs)
self.v1_companycert_finger_print_update_post = _Endpoint(
settings={
'response_type': (SuccessResponse,),
'auth': [],
'endpoint_path': '/v1/companycert/{fingerPrint}/update',
'operation_id': 'v1_companycert_finger_print_update_post',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'finger_print',
'session_token',
'cert_attributes',
],
'required': [
'finger_print',
'session_token',
'cert_attributes',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'finger_print':
(str,),
'session_token':
(str,),
'cert_attributes':
(CompanyCertAttributes,),
},
'attribute_map': {
'finger_print': 'fingerPrint',
'session_token': 'sessionToken',
},
'location_map': {
'finger_print': 'path',
'session_token': 'header',
'cert_attributes': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__v1_companycert_finger_print_update_post
)
def __v1_companycert_list_get(
self,
session_token,
**kwargs
):
"""List all trusted certs # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = pod_api.v1_companycert_list_get(session_token, async_req=True)
>>> result = thread.get()
Args:
session_token (str): Session authentication token.
Keyword Args:
skip (int): Pagination start. [optional]
limit (int): Row limit. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
CompanyCertInfoList
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['session_token'] = \
session_token
return self.call_with_http_info(**kwargs)
self.v1_companycert_list_get = _Endpoint(
settings={
'response_type': (CompanyCertInfoList,),
'auth': [],
'endpoint_path': '/v1/companycert/list',
'operation_id': 'v1_companycert_list_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'session_token',
'skip',
'limit',
],
'required': [
'session_token',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'session_token':
(str,),
'skip':
(int,),
'limit':
(int,),
},
'attribute_map': {
'session_token': 'sessionToken',
'skip': 'skip',
'limit': 'limit',
},
'location_map': {
'session_token': 'header',
'skip': 'query',
'limit': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__v1_companycert_list_get
)
def __v1_companycert_podmanaged_list_get(
self,
session_token,
**kwargs
):
"""List all trusted certs # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = pod_api.v1_companycert_podmanaged_list_get(session_token, async_req=True)
>>> result = thread.get()
Args:
session_token (str): Session authentication token.
Keyword Args:
skip (int): Pagination start. [optional]
limit (int): Row limit. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
CompanyCertInfoList
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['session_token'] = \
session_token
return self.call_with_http_info(**kwargs)
self.v1_companycert_podmanaged_list_get = _Endpoint(
settings={
'response_type': (CompanyCertInfoList,),
'auth': [],
'endpoint_path': '/v1/companycert/podmanaged/list',
'operation_id': 'v1_companycert_podmanaged_list_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'session_token',
'skip',
'limit',
],
'required': [
'session_token',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'session_token':
(str,),
'skip':
(int,),
'limit':
(int,),
},
'attribute_map': {
'session_token': 'sessionToken',
'skip': 'skip',
'limit': 'limit',
},
'location_map': {
'session_token': 'header',
'skip': 'query',
'limit': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__v1_companycert_podmanaged_list_get
)
def __v1_companycert_type_list_post(
self,
session_token,
type_id_list,
**kwargs
):
"""List all certs of the given types # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = pod_api.v1_companycert_type_list_post(session_token, type_id_list, async_req=True)
>>> result = thread.get()
Args:
session_token (str): Session authentication token.
type_id_list (CompanyCertTypeList): Certificate type list
Keyword Args:
skip (int): Pagination start. [optional]
limit (int): Row limit. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
CompanyCertInfoList
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['session_token'] = \
session_token
kwargs['type_id_list'] = \
type_id_list
return self.call_with_http_info(**kwargs)
self.v1_companycert_type_list_post = _Endpoint(
settings={
'response_type': (CompanyCertInfoList,),
'auth': [],
'endpoint_path': '/v1/companycert/type/list',
'operation_id': 'v1_companycert_type_list_post',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'session_token',
'type_id_list',
'skip',
'limit',
],
'required': [
'session_token',
'type_id_list',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'session_token':
(str,),
'type_id_list':
(CompanyCertTypeList,),
'skip':
(int,),
'limit':
(int,),
},
'attribute_map': {
'session_token': 'sessionToken',
'skip': 'skip',
'limit': 'limit',
},
'location_map': {
'session_token': 'header',
'type_id_list': 'body',
'skip': 'query',
'limit': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__v1_companycert_type_list_post
)
def __v2_companycert_create_post(
self,
session_token,
cert,
**kwargs
):
"""Create a company trusted or untrusted certificate. Different from V1 in that we reject expired certificates. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.