repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
cloud-custodian/cel-python | https://github.com/cloud-custodian/cel-python/blob/3a134c10394058c73a6bbe0e4ca7e862ea9707b3/tests/test_main.py | tests/test_main.py | # SPDX-Copyright: Copyright (c) Capital One Services, LLC
# SPDX-License-Identifier: Apache-2.0
# Copyright 2020 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
"""
Pure Python implementation of CEL.
Test the main CLI.
Python >= 3.9 preserves order of arguments defined in :mod:`argparse`.
Python < 3.9 alphabetizes the arguments. This makes string comparisons
challenging in expected results.
"""
import argparse
import datetime
import io
import stat as os_stat
from pathlib import Path
import sys
from unittest.mock import Mock, call, sentinel, ANY
import pytest
import celpy
import celpy.__main__
from celpy import celtypes
@pytest.fixture
def mock_os_environ(monkeypatch):
monkeypatch.setitem(celpy.__main__.os.environ, "OS_ENV_VAR", "3.14")
def test_arg_type_value(mock_os_environ):
"""GIVEN arg values; WHEN parsing; THEN correct interpretation."""
assert celpy.__main__.arg_type_value("name:int=42") == (
"name",
celtypes.IntType,
42,
)
assert celpy.__main__.arg_type_value("OS_ENV_VAR") == (
"OS_ENV_VAR",
celtypes.StringType,
"3.14",
)
assert celpy.__main__.arg_type_value("OS_ENV_VAR:double") == (
"OS_ENV_VAR",
celtypes.DoubleType,
3.14,
)
with pytest.raises(argparse.ArgumentTypeError):
celpy.__main__.arg_type_value("name:type:value")
def test_get_options():
"""GIVEN verbose settings; WHEN parsing; THEN correct interpretation."""
options = celpy.__main__.get_options(["--arg", "name:int=42", "-n", "355./113."])
assert options.arg == [("name", celtypes.IntType, 42)]
assert options.null_input
assert options.expr == "355./113."
assert options.verbose == 0
options = celpy.__main__.get_options(["-v", "-n", '"hello world"'])
assert options.null_input
assert options.expr == '"hello world"'
assert options.verbose == 1
options = celpy.__main__.get_options(["-vv", ".doc.field * 42"])
assert not options.null_input
assert options.expr == ".doc.field * 42"
assert options.verbose == 2
def test_arg_type_bad(capsys, monkeypatch):
"""GIVEN invalid arg values; WHEN parsing; THEN correct interpretation."""
monkeypatch.setenv("COLUMNS", "80")
with pytest.raises(SystemExit) as exc_info:
options = celpy.__main__.get_options(
["--arg", "name:nope=42", "-n", "355./113."]
)
assert exc_info.value.args == (2,)
out, err = capsys.readouterr()
assert err.splitlines() == [
"usage: celpy [-h] [-v] [-a ARG] [-n] [-s] [-i] [--json-package NAME]",
" [--json-document NAME] [-b] [-f FORMAT]",
" [expr]",
"celpy: error: argument -a/--arg: arg name:nope=42 type name not in ['int', "
"'uint', 'double', 'bool', 'string', 'bytes', 'list', 'map', 'null_type', "
"'single_duration', 'single_timestamp', 'int64_value', 'uint64_value', "
"'double_value', 'bool_value', 'string_value', 'bytes_value', 'number_value', "
"'null_value']",
]
def test_arg_value_bad(capsys, monkeypatch):
"""GIVEN invalid arg values; WHEN parsing; THEN correct interpretation."""
monkeypatch.setenv("COLUMNS", "80")
with pytest.raises(SystemExit) as exc_info:
options = celpy.__main__.get_options(
["--arg", "name:int=nope", "-n", "355./113."]
)
assert exc_info.value.args == (2,)
out, err = capsys.readouterr()
assert err.splitlines() == [
"usage: celpy [-h] [-v] [-a ARG] [-n] [-s] [-i] [--json-package NAME]",
" [--json-document NAME] [-b] [-f FORMAT]",
" [expr]",
"celpy: error: argument -a/--arg: arg name:int=nope value invalid for the supplied type",
]
def test_arg_combo_bad(capsys, monkeypatch):
"""GIVEN invalid arg combinations; WHEN parsing; THEN correct interpretation."""
monkeypatch.setenv("COLUMNS", "80")
error_prefix = [
"usage: celpy [-h] [-v] [-a ARG] [-n] [-s] [-i] [--json-package NAME]",
" [--json-document NAME] [-b] [-f FORMAT]",
" [expr]",
]
with pytest.raises(SystemExit) as exc_info:
options = celpy.__main__.get_options(
["-i", "-n", "355./113."]
)
assert exc_info.value.args == (2,)
out, err = capsys.readouterr()
assert err.splitlines() == error_prefix + [
"celpy: error: Interactive mode and an expression provided",
]
with pytest.raises(SystemExit) as exc_info:
options = celpy.__main__.get_options(
["-n"]
)
assert exc_info.value.args == (2,)
out, err = capsys.readouterr()
assert err.splitlines() == error_prefix + [
"celpy: error: No expression provided",
]
with pytest.raises(SystemExit) as exc_info:
options = celpy.__main__.get_options(
["-n", "--json-document=_", "--json-package=_"]
)
assert exc_info.value.args == (2,)
out, err = capsys.readouterr()
assert err.splitlines() == error_prefix + [
"celpy: error: Either use --json-package or --json-document, not both",
]
@pytest.fixture
def mock_cel_environment(monkeypatch):
mock_runner = Mock(evaluate=Mock(return_value=str(sentinel.OUTPUT)))
mock_env = Mock(
compile=Mock(return_value=sentinel.AST), program=Mock(return_value=mock_runner)
)
mock_env_class = Mock(return_value=mock_env)
monkeypatch.setattr(celpy.__main__, "Environment", mock_env_class)
return mock_env_class
def test_main_0(mock_cel_environment, caplog, capsys):
"""GIVEN null-input AND expression; WHEN eval; THEN correct internal object use."""
argv = ["--null-input", '"Hello world! I\'m " + name + "."']
status = celpy.__main__.main(argv)
assert status == 0
assert mock_cel_environment.mock_calls == [
call(package=None, annotations={"stat": celpy.celtypes.FunctionType})
]
env = mock_cel_environment.return_value
assert env.compile.mock_calls == [call('"Hello world! I\'m " + name + "."')]
assert env.program.mock_calls == [call(sentinel.AST, functions={"stat": ANY})]
prgm = env.program.return_value
assert prgm.evaluate.mock_calls == [call({})]
assert caplog.messages == []
out, err = capsys.readouterr()
assert out == '"sentinel.OUTPUT"\n'
assert err == ""
def test_main_1(mock_cel_environment, caplog, capsys):
"""GIVEN null-input AND arg AND expression; WHEN eval; THEN correct internal object use."""
argv = [
"--arg",
"name:string=CEL",
"--null-input",
'"Hello world! I\'m " + name + "."',
]
status = celpy.__main__.main(argv)
assert status == 0
assert mock_cel_environment.mock_calls == [
call(package=None, annotations={"name": celtypes.StringType, "stat": celpy.celtypes.FunctionType})
]
env = mock_cel_environment.return_value
assert env.compile.mock_calls == [call('"Hello world! I\'m " + name + "."')]
assert env.program.mock_calls == [call(sentinel.AST, functions={"stat": ANY})]
prgm = env.program.return_value
assert prgm.evaluate.mock_calls == [call({"name": "CEL"})]
assert caplog.messages == []
out, err = capsys.readouterr()
assert out == '"sentinel.OUTPUT"\n'
assert err == ""
def test_main_pipe(mock_cel_environment, caplog, capsys):
"""GIVEN JSON AND expression; WHEN eval; THEN correct internal object use."""
argv = ['"Hello world! I\'m " + name + "."']
sys.stdin = io.StringIO('{"name": "CEL"}\n')
status = celpy.__main__.main(argv)
sys.stdin = sys.__stdin__
assert status == 0
assert mock_cel_environment.mock_calls == [
call(package="jq", annotations={"stat": celpy.celtypes.FunctionType})
]
env = mock_cel_environment.return_value
assert env.compile.mock_calls == [call('"Hello world! I\'m " + name + "."')]
assert env.program.mock_calls == [call(sentinel.AST, functions={'stat': ANY})]
prgm = env.program.return_value
assert prgm.evaluate.mock_calls == [
call(
{
"jq": celtypes.MapType(
{celtypes.StringType("name"): celtypes.StringType("CEL")}
)
}
)
]
assert caplog.messages == []
out, err = capsys.readouterr()
assert out == '"sentinel.OUTPUT"\n'
assert err == ""
def test_main_0_non_boolean(mock_cel_environment, caplog, capsys):
"""
GIVEN null-input AND boolean option and AND non-bool expr
WHEN eval
THEN correct internal object use.
"""
argv = ["-bn", '"Hello world! I\'m " + name + "."']
status = celpy.__main__.main(argv)
assert status == 2
assert mock_cel_environment.mock_calls == [
call(package=None, annotations={"stat": celpy.celtypes.FunctionType})
]
env = mock_cel_environment.return_value
assert env.compile.mock_calls == [call('"Hello world! I\'m " + name + "."')]
assert env.program.mock_calls == [call(sentinel.AST, functions={'stat': ANY})]
prgm = env.program.return_value
assert prgm.evaluate.mock_calls == [call({})]
assert caplog.messages == [
"Expected celtypes.BoolType, got <class 'str'> = 'sentinel.OUTPUT'"
]
out, err = capsys.readouterr()
assert out == ""
assert err == ""
@pytest.fixture
def mock_cel_environment_false(monkeypatch):
mock_runner = Mock(evaluate=Mock(return_value=celtypes.BoolType(False)))
mock_env = Mock(
compile=Mock(return_value=sentinel.AST), program=Mock(return_value=mock_runner)
)
mock_env_class = Mock(return_value=mock_env)
monkeypatch.setattr(celpy.__main__, "Environment", mock_env_class)
return mock_env_class
def test_main_0_boolean(mock_cel_environment_false, caplog, capsys):
"""
GIVEN null-input AND boolean option AND false expr
WHEN eval
THEN correct internal object use.
"""
argv = ["-bn", "2 == 1"]
status = celpy.__main__.main(argv)
assert status == 1
assert mock_cel_environment_false.mock_calls == [
call(package=None, annotations={"stat": celpy.celtypes.FunctionType})
]
env = mock_cel_environment_false.return_value
assert env.compile.mock_calls == [call("2 == 1")]
assert env.program.mock_calls == [call(sentinel.AST, functions={'stat': ANY})]
prgm = env.program.return_value
assert prgm.evaluate.mock_calls == [call({})]
assert caplog.messages == []
out, err = capsys.readouterr()
assert out == ""
assert err == ""
@pytest.fixture
def mock_cel_environment_integer(monkeypatch):
mock_runner = Mock(evaluate=Mock(return_value=celtypes.IntType(3735928559)))
mock_env = Mock(
compile=Mock(return_value=sentinel.AST), program=Mock(return_value=mock_runner)
)
mock_env_class = Mock(return_value=mock_env)
monkeypatch.setattr(celpy.__main__, "Environment", mock_env_class)
return mock_env_class
def test_main_slurp_int_format(mock_cel_environment_integer, caplog, capsys):
"""
GIVEN JSON AND slurp option AND formatted output AND int expr
WHEN eval
THEN correct internal object use.
"""
argv = ["-s", "-f", "#8x", "339629869*11"]
sys.stdin = io.StringIO('{"name": "CEL"}\n')
status = celpy.__main__.main(argv)
sys.stdin = sys.__stdin__
assert status == 0
assert mock_cel_environment_integer.mock_calls == [
call(package='jq', annotations={"stat": celpy.celtypes.FunctionType})
]
env = mock_cel_environment_integer.return_value
assert env.compile.mock_calls == [call("339629869*11")]
assert env.program.mock_calls == [call(sentinel.AST, functions={'stat': ANY})]
prgm = env.program.return_value
assert prgm.evaluate.mock_calls == [
call({'jq': celtypes.MapType({celtypes.StringType('name'): celtypes.StringType('CEL')})})
]
assert caplog.messages == []
out, err = capsys.readouterr()
assert out == "0xdeadbeef\n"
assert err == ""
@pytest.fixture
def mock_cel_environment_bool(monkeypatch):
mock_runner = Mock(evaluate=Mock(return_value=celtypes.BoolType(False)))
mock_env = Mock(
compile=Mock(return_value=sentinel.AST), program=Mock(return_value=mock_runner)
)
mock_env_class = Mock(return_value=mock_env)
monkeypatch.setattr(celpy.__main__, "Environment", mock_env_class)
return mock_env_class
def test_main_slurp_bool_status(mock_cel_environment_bool, caplog, capsys):
"""
GIVEN JSON AND slurp option AND formatted output AND int expr
WHEN eval
THEN correct internal object use.
"""
argv = ["-s", "-b", '.name == "not CEL"']
sys.stdin = io.StringIO('{"name": "CEL"}\n')
status = celpy.__main__.main(argv)
sys.stdin = sys.__stdin__
assert status == 1
assert mock_cel_environment_bool.mock_calls == [
call(package='jq', annotations={"stat": celpy.celtypes.FunctionType})
]
env = mock_cel_environment_bool.return_value
assert env.compile.mock_calls == [call('.name == "not CEL"')]
assert env.program.mock_calls == [call(sentinel.AST, functions={'stat': ANY})]
prgm = env.program.return_value
assert prgm.evaluate.mock_calls == [
call({'jq': celtypes.MapType({celtypes.StringType('name'): celtypes.StringType('CEL')})})
]
assert caplog.messages == []
out, err = capsys.readouterr()
assert out == "false\n"
assert err == ""
def test_main_0_int_format(mock_cel_environment_integer, caplog, capsys):
"""
GIVEN slurp option AND formatted output AND int expr
WHEN eval
THEN correct internal object use.
"""
argv = ["-n", "-f", "#8x", "339629869*11"]
status = celpy.__main__.main(argv)
assert status == 0
assert mock_cel_environment_integer.mock_calls == [
call(package=None, annotations={'stat': celpy.celtypes.FunctionType})
]
env = mock_cel_environment_integer.return_value
assert env.compile.mock_calls == [call("339629869*11")]
assert env.program.mock_calls == [
call(sentinel.AST, functions={"stat": ANY})
]
prgm = env.program.return_value
assert prgm.evaluate.mock_calls == [call({})]
assert caplog.messages == []
out, err = capsys.readouterr()
assert out == "0xdeadbeef\n"
assert err == ""
def test_main_verbose(mock_cel_environment, caplog, capsys):
"""GIVEN verbose AND expression; WHEN eval; THEN correct log output."""
argv = ["-v", "[2, 4, 5].map(x, x/2)"]
status = celpy.__main__.main(argv)
assert status == 0
assert mock_cel_environment.mock_calls == [
call(package="jq", annotations={'stat': celpy.celtypes.FunctionType})
]
assert caplog.messages == ["Expr: '[2, 4, 5].map(x, x/2)'"]
out, err = capsys.readouterr()
assert out == ""
assert err == ""
def test_main_very_verbose(mock_cel_environment, caplog, capsys):
"""GIVEN very verbose AND expression; WHEN eval; THEN correct log output."""
argv = ["-vv", "[2, 4, 5].map(x, x/2)"]
status = celpy.__main__.main(argv)
assert status == 0
assert mock_cel_environment.mock_calls == [
call(package="jq", annotations={'stat': celpy.celtypes.FunctionType})
]
expected_namespace = argparse.Namespace(
verbose=2, arg=None, null_input=False, slurp=False, interactive=False,
package='jq', document=None,
boolean=False, format=None,
expr='[2, 4, 5].map(x, x/2)'
)
assert caplog.messages == [
str(expected_namespace),
"Expr: '[2, 4, 5].map(x, x/2)'",
]
out, err = capsys.readouterr()
assert out == ""
assert err == ""
@pytest.fixture
def mock_cel_environment_syntax_error(monkeypatch):
mock_runner = Mock(evaluate=Mock(return_value=str(sentinel.OUTPUT)))
mock_env = Mock(
compile=Mock(side_effect=celpy.CELParseError((sentinel.arg0, sentinel.arg1))),
cel_parser=Mock(error_text=Mock(return_value=sentinel.Formatted_Error)),
)
mock_env_class = Mock(return_value=mock_env)
monkeypatch.setattr(celpy.__main__, "Environment", mock_env_class)
return mock_env_class
def test_main_parse_error(mock_cel_environment_syntax_error, caplog, capsys):
"""GIVEN syntax error; WHEN eval; THEN correct stderr output."""
argv = ["-n", "[nope++]"]
status = celpy.__main__.main(argv)
assert status == 1
assert mock_cel_environment_syntax_error.mock_calls == [
call(package=None, annotations={'stat': celpy.celtypes.FunctionType})
]
expected_namespace = argparse.Namespace(
verbose=0, arg=None, null_input=True, slurp=False, interactive=False,
package='jq', document=None,
boolean=False, format=None,
expr='[nope++]'
)
assert caplog.messages == [
str(expected_namespace),
"Expr: '[nope++]'",
]
out, err = capsys.readouterr()
assert out == ""
assert err == "sentinel.Formatted_Error\n"
@pytest.fixture
def mock_cel_environment_eval_error(monkeypatch):
mock_runner = Mock(
evaluate=Mock(side_effect=celpy.CELEvalError((sentinel.arg0, sentinel.arg1)))
)
mock_env = Mock(
compile=Mock(return_value=sentinel.AST),
program=Mock(return_value=mock_runner),
cel_parser=Mock(error_text=Mock(return_value=sentinel.Formatted_Error)),
)
mock_env_class = Mock(return_value=mock_env)
monkeypatch.setattr(celpy.__main__, "Environment", mock_env_class)
return mock_env_class
def test_main_0_eval_error(mock_cel_environment_eval_error, caplog, capsys):
"""GIVEN null input AND bad expression; WHEN eval; THEN correct stderr output."""
argv = ["-n", "2 / 0"]
status = celpy.__main__.main(argv)
assert status == 2
assert mock_cel_environment_eval_error.mock_calls == [
call(package=None, annotations={'stat': celpy.celtypes.FunctionType})
]
expected_namespace = argparse.Namespace(
verbose=0, arg=None, null_input=True, slurp=False, interactive=False,
package='jq', document=None,
boolean=False, format=None,
expr='2 / 0'
)
assert caplog.messages == [
str(expected_namespace),
"Expr: '2 / 0'",
]
out, err = capsys.readouterr()
assert out == ""
assert err == "sentinel.Formatted_Error\n"
def test_main_pipe_eval_error(mock_cel_environment_eval_error, caplog, capsys):
"""GIVEN piped input AND bad expression; WHEN eval; THEN correct stderr output."""
argv = [".json.field / 0"]
sys.stdin = io.StringIO('{"name": "CEL"}\n')
status = celpy.__main__.main(argv)
sys.stdin = sys.__stdin__
assert status == 0
assert mock_cel_environment_eval_error.mock_calls == [
call(package='jq', annotations={'stat': celpy.celtypes.FunctionType})
]
expected_namespace = argparse.Namespace(
verbose=0, arg=None, null_input=False, slurp=False, interactive=False,
package='jq', document=None,
boolean=False, format=None,
expr='.json.field / 0'
)
assert caplog.messages == [
str(expected_namespace),
"Expr: '.json.field / 0'",
"Encountered (sentinel.arg0, sentinel.arg1) on document '{\"name\": \"CEL\"}\\n'",
]
out, err = capsys.readouterr()
assert out == "null\n"
assert err == ""
def test_main_pipe_json_error(mock_cel_environment_eval_error, caplog, capsys):
"""GIVEN piped input AND bad expression; WHEN eval; THEN correct stderr output."""
argv = [".json.field / 0"]
sys.stdin = io.StringIO('nope, not json\n')
status = celpy.__main__.main(argv)
sys.stdin = sys.__stdin__
assert status == 3
assert mock_cel_environment_eval_error.mock_calls == [
call(package='jq', annotations={'stat': celpy.celtypes.FunctionType})
]
expected_namespace = argparse.Namespace(
verbose=0, arg=None, null_input=False, slurp=False, interactive=False,
package='jq', document=None,
boolean=False, format=None,
expr='.json.field / 0'
)
assert caplog.messages == [
str(expected_namespace),
"Expr: '.json.field / 0'",
"Expecting value: line 1 column 1 (char 0) on document 'nope, not json\\n'",
]
out, err = capsys.readouterr()
assert out == ""
assert err == ""
def test_main_repl(monkeypatch, capsys):
mock_repl = Mock()
mock_repl_class = Mock(return_value=mock_repl)
monkeypatch.setattr(celpy.__main__, 'CEL_REPL', mock_repl_class)
argv = ["-i"]
status = celpy.__main__.main(argv)
assert status == 0
assert mock_repl_class.mock_calls == [
call()
]
assert mock_repl.cmdloop.mock_calls == [
call()
]
def test_repl_class_good_interaction(capsys):
"""
If any print() is added for debugging, this test will break.
"""
c = celpy.__main__.CEL_REPL()
c.preloop()
assert c.state == {}
r_0 = c.onecmd("set pi 355./113.")
assert not r_0
r_1 = c.onecmd("show")
assert not r_1
r_2 = c.onecmd("pi * 2.")
assert not r_2
r_2 = c.onecmd("quit")
assert r_2
out, err = capsys.readouterr()
print(out) # Needed to reveal debugging print() output.
lines = out.splitlines()
assert lines[0].startswith("3.14159")
assert lines[1].startswith("{'pi': DoubleType(3.14159")
assert lines[2].startswith("6.28318")
assert c.state == {"pi": celpy.celtypes.DoubleType(355./113.)}
def test_repl_class_bad_interaction(capsys):
c = celpy.__main__.CEL_REPL()
c.preloop()
c.onecmd("set a pi ++ nope | not & proper \\ CEL")
c.onecmd("this! isn't! valid!!")
out, err = capsys.readouterr()
lines = err.splitlines()
assert (
lines[0] ==
"ERROR: <input>:1:5 pi ++ nope | not & proper \ CEL"
)
assert (
lines[4] ==
" | ....^"
)
assert c.state == {}
def test_stat_good():
cwd = Path.cwd()
doc = celpy.__main__.stat(str(cwd))
assert doc['st_atime'] == celtypes.TimestampType(
datetime.datetime.fromtimestamp(
cwd.stat().st_atime))
assert doc['st_ctime'] == celtypes.TimestampType(
datetime.datetime.fromtimestamp(
cwd.stat().st_ctime))
assert doc['st_mtime'] == celtypes.TimestampType(
datetime.datetime.fromtimestamp(
cwd.stat().st_mtime))
# Not on all versions of Python.
# assert doc['st_birthtime'] == celtypes.TimestampType(
# datetime.datetime.fromtimestamp(
# cwd.stat().st_birthtime))
assert doc['st_ino'] == celtypes.IntType(cwd.stat().st_ino)
assert doc['st_size'] == celtypes.IntType(cwd.stat().st_size)
assert doc['st_nlink'] == celtypes.IntType(cwd.stat().st_nlink)
assert doc['kind'] == 'd'
assert doc['setuid'] == celtypes.BoolType(os_stat.S_ISUID & cwd.stat().st_mode != 0)
assert doc['setgid'] == celtypes.BoolType(os_stat.S_ISGID & cwd.stat().st_mode != 0)
assert doc['sticky'] == celtypes.BoolType(os_stat.S_ISVTX & cwd.stat().st_mode != 0)
def test_stat_does_not_exist():
path = Path.cwd() / "does_not_exist.tmp"
doc = celpy.__main__.stat(str(path))
assert doc is None
| python | Apache-2.0 | 3a134c10394058c73a6bbe0e4ca7e862ea9707b3 | 2026-01-05T07:13:01.631050Z | false |
cloud-custodian/cel-python | https://github.com/cloud-custodian/cel-python/blob/3a134c10394058c73a6bbe0e4ca7e862ea9707b3/tests/test_c7nlib.py | tests/test_c7nlib.py | # SPDX-Copyright: Copyright (c) Capital One Services, LLC
# SPDX-License-Identifier: Apache-2.0
# Copyright 2020 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
"""
Test the functions for C7N Compatibility.
There are three collections of tests.
- Isolated Unit Tests without a dependency on the ``CELFilter`` class.
- Integration Tests which depend on the ``celfilter_instance`` fixture.
These tests are essential for making sure we have C7N compatibility.
"""
import datetime
import io
import zlib
from types import SimpleNamespace
from unittest.mock import Mock, call, sentinel
import pytest
import celpy
import celpy.adapter
import celpy.c7nlib
import celpy.celtypes
def test_key():
tags = celpy.json_to_cel(
[
{"Key": "Target", "Value": "First"},
{"Key": "Target", "Value": "Second"},
]
)
assert celpy.c7nlib.key(tags, celpy.celtypes.StringType("Target")) == "First"
assert celpy.c7nlib.key(tags, celpy.celtypes.StringType("NotFound")) is None
def test_glob():
assert celpy.c7nlib.glob("c7nlib.py", "*.py")
assert not celpy.c7nlib.glob("c7nlib.py", "*.pyc")
def test_difference():
assert celpy.c7nlib.difference(["a", "b"], ["b", "c"])
assert not celpy.c7nlib.difference(["b"], ["b", "c"])
def test_intersect():
assert celpy.c7nlib.intersect(["a", "b"], ["b", "c"])
assert not celpy.c7nlib.intersect(["a", "b"], ["c"])
def test_normalize():
assert celpy.c7nlib.normalize(" HeLlO WoRlD ") == "hello world"
assert celpy.c7nlib.normalize(" HeLlO WoRlD ") != "HeLlO WoRlD"
def test_unique_size():
assert celpy.c7nlib.unique_size(["a", "b", "b", "c"]) == 3
def test_parse_cidr():
assert len(list(celpy.c7nlib.parse_cidr("192.168.100.0/22").hosts())) == 1022
assert celpy.c7nlib.parse_cidr("192.168.100.0") in celpy.c7nlib.parse_cidr("192.168.100.0/22")
assert celpy.c7nlib.parse_cidr("192.168.100.0").packed == bytes([192, 168, 100, 0])
assert celpy.c7nlib.parse_cidr("localhost") is None
assert not celpy.c7nlib.parse_cidr("localhost") in celpy.c7nlib.parse_cidr("192.168.100.0/22")
assert (
celpy.c7nlib.parse_cidr("192.168.100.0/22") in celpy.c7nlib.parse_cidr("192.168.100.0/22")
)
def test_size_parse_cidr():
assert celpy.c7nlib.size_parse_cidr("192.168.100.0/22") == 22
assert celpy.c7nlib.size_parse_cidr("localhost") is None
def test_version():
assert celpy.c7nlib.version("2.7.18") < celpy.c7nlib.version("2.8")
assert celpy.c7nlib.version("2.6") < celpy.c7nlib.version("2.7.18")
assert celpy.c7nlib.version("2.7") == celpy.c7nlib.version("2.7")
assert not (celpy.c7nlib.version("2.7") == ">=2.6")
value_from_examples = [
(
".txt", "text", b"data\n",
celpy.celtypes.ListType(
[celpy.celtypes.StringType('data')]
)
),
(
".txt", "gzip", zlib.compress(b"data\n"),
celpy.celtypes.ListType(
[celpy.celtypes.StringType('data')]
)
),
(
".json", "text", b'{"key": "data"}\n',
celpy.celtypes.MapType(
{
celpy.celtypes.StringType('key'): celpy.celtypes.StringType('data')
}
)
),
(
".ldjson", "text", b'{"row": 1}\n{"row": 2}\n',
celpy.celtypes.ListType(
[
celpy.celtypes.MapType(
{
celpy.celtypes.StringType('row'): celpy.celtypes.IntType(1)
}
),
celpy.celtypes.MapType(
{
celpy.celtypes.StringType('row'): celpy.celtypes.IntType(2)
}
)
]
)
),
(
".csv", "text", (b"row,value\r\n1,42\r\n"),
celpy.celtypes.ListType(
[
celpy.celtypes.ListType(
[
celpy.celtypes.StringType('row'),
celpy.celtypes.StringType('value'),
]
),
celpy.celtypes.ListType(
[
celpy.celtypes.StringType('1'),
celpy.celtypes.StringType('42'),
]
),
]
)
),
(
".csv2dict", "text", (b"row,value\r\n1,42\r\n"),
celpy.celtypes.ListType(
[
celpy.celtypes.MapType(
{
celpy.celtypes.StringType('row'): celpy.celtypes.StringType('1'),
celpy.celtypes.StringType('value'): celpy.celtypes.StringType('42'),
}
),
]
)
),
]
@pytest.fixture(params=value_from_examples)
def mock_urllib_request(monkeypatch, request):
suffix, encoding, raw_bytes, expected = request.param
urllib_request = Mock(
Request=Mock(return_value=Mock()),
urlopen=Mock(return_value=Mock(
info=Mock(return_value=Mock(get=Mock(return_value=encoding))),
read=Mock(return_value=raw_bytes)
))
)
monkeypatch.setattr(celpy.c7nlib.urllib, 'request', urllib_request)
mock_os = Mock(
splitext=Mock(
return_value=("path", suffix)
)
)
monkeypatch.setattr(celpy.c7nlib.os, 'path', mock_os)
return urllib_request, expected
def test_value_from(mock_urllib_request):
urllib_request, expected = mock_urllib_request
data = celpy.c7nlib.value_from(sentinel.URL)
assert urllib_request.Request.mock_calls == [
call(sentinel.URL, headers={'Accept-Encoding': 'gzip'})
]
assert expected == data
def test_value_from_bad_format():
with pytest.raises(ValueError):
celpy.c7nlib.value_from(sentinel.URL, format="nope")
jmes_path_examples = [
({"foo": {"bar": "baz"}}, "foo.bar", "baz"),
({"foo": {"bar": ["one", "two"]}}, "foo.bar[0]", "one"),
({"foo": {"bar": [{"name": "one"}, {"name": "two"}]}}, "foo.bar[*].name", ["one", "two"]),
]
@pytest.fixture(params=jmes_path_examples)
def doc_path_expected(request):
json_doc, path, expected = request.param
return (
celpy.adapter.json_to_cel(json_doc),
celpy.celtypes.StringType(path),
expected
)
def test_jmes_path(doc_path_expected):
doc, path, expected = doc_path_expected
actual = celpy.c7nlib.jmes_path(doc, path)
assert expected == actual
@pytest.fixture(params=jmes_path_examples)
def doclist_path_expected(request):
json_doc, path, expected = request.param
return (
celpy.celtypes.ListType([celpy.adapter.json_to_cel(json_doc)]),
celpy.celtypes.StringType(path),
[expected]
)
def test_jmes_path_map(doclist_path_expected):
doclist, path, expected_list = doclist_path_expected
actual_list = celpy.c7nlib.jmes_path_map(doclist, path)
assert expected_list == actual_list
def test_present():
assert celpy.c7nlib.present(celpy.celtypes.StringType("yes"))
assert not celpy.c7nlib.present(celpy.celtypes.StringType(""))
assert not celpy.c7nlib.present(None)
def test_absent():
assert not celpy.c7nlib.absent(celpy.celtypes.StringType("no"))
assert celpy.c7nlib.absent(celpy.celtypes.StringType(""))
assert celpy.c7nlib.absent(None)
def test_marked_key_good():
tags_good = celpy.celtypes.ListType(
[
celpy.celtypes.MapType(
{
celpy.celtypes.StringType("Key"):
celpy.celtypes.StringType("c7n-tag-compliance"),
celpy.celtypes.StringType("Value"):
celpy.celtypes.StringType("hello:stop@2020-09-10"),
}
),
]
)
doc = celpy.c7nlib.marked_key(
tags_good,
celpy.celtypes.StringType("c7n-tag-compliance")
)
assert doc.get(celpy.celtypes.StringType("message")) == celpy.celtypes.StringType("hello")
assert doc.get(celpy.celtypes.StringType("action")) == celpy.celtypes.StringType("stop")
assert doc.get(celpy.celtypes.StringType("action_date")) == celpy.celtypes.TimestampType("2020-09-10")
def test_marked_key_missing():
tags_good = celpy.celtypes.ListType(
[
celpy.celtypes.MapType(
{
celpy.celtypes.StringType("Key"):
celpy.celtypes.StringType("ASSET"),
celpy.celtypes.StringType("Value"):
celpy.celtypes.StringType("hello:stop@2020-09-10"),
}
),
]
)
doc = celpy.c7nlib.marked_key(
tags_good,
celpy.celtypes.StringType("c7n-tag-compliance")
)
assert doc is None
def test_marked_key_wrong_format():
tags_good = celpy.celtypes.ListType(
[
celpy.celtypes.MapType(
{
celpy.celtypes.StringType("Key"):
celpy.celtypes.StringType("c7n-tag-compliance"),
celpy.celtypes.StringType("Value"):
celpy.celtypes.StringType("nope:"),
}
),
]
)
doc = celpy.c7nlib.marked_key(
tags_good,
celpy.celtypes.StringType("c7n-tag-compliance")
)
assert doc is None
def test_arn_split():
f1 = "arn:partition-1:service-1:region-1:account-id-1:resource-id-1"
assert celpy.c7nlib.arn_split(f1, "partition") == "partition-1"
assert celpy.c7nlib.arn_split(f1, "service") == "service-1"
assert celpy.c7nlib.arn_split(f1, "region") == "region-1"
assert celpy.c7nlib.arn_split(f1, "account-id") == "account-id-1"
assert celpy.c7nlib.arn_split(f1, "resource-id") == "resource-id-1"
f2 = "arn:partition-2:service-2:region-2:account-id-2:resource-type-2/resource-id-2"
assert celpy.c7nlib.arn_split(f2, "partition") == "partition-2"
assert celpy.c7nlib.arn_split(f2, "service") == "service-2"
assert celpy.c7nlib.arn_split(f2, "region") == "region-2"
assert celpy.c7nlib.arn_split(f2, "account-id") == "account-id-2"
assert celpy.c7nlib.arn_split(f2, "resource-id") == "resource-type-2/resource-id-2"
f3 = "arn:partition-3:service-3:region-3:account-id-3:resource-type-3:resource-id-3"
assert celpy.c7nlib.arn_split(f3, "partition") == "partition-3"
assert celpy.c7nlib.arn_split(f3, "service") == "service-3"
assert celpy.c7nlib.arn_split(f3, "region") == "region-3"
assert celpy.c7nlib.arn_split(f3, "account-id") == "account-id-3"
assert celpy.c7nlib.arn_split(f3, "resource-type") == "resource-type-3"
assert celpy.c7nlib.arn_split(f3, "resource-id") == "resource-id-3"
with pytest.raises(ValueError):
celpy.c7nlib.arn_split("http://server.name:port/path/to/resource", "partition")
@pytest.fixture
def mock_manager():
datapoints = [
{"Average": str(sentinel.average)}
]
health_events = [
{
"category": "issue",
"code": "AWS_EC2_SYSTEM_MAINTENANCE_EVENT",
"service": "EC2"
}
]
cloudwatch_client = Mock(
name="cloudwatch_client",
get_metric_statistics=Mock(
return_value={"Datapoints": datapoints}
)
)
ec2_client = Mock(
name="ec2_client",
describe_flow_logs=Mock(
return_value={"FlowLogs": [{"ResourceId": "i-123456789"}]}
),
describe_snapshot_attribute=Mock(
return_value=[str(sentinel.snashot_permission)]
),
)
elb_client = Mock(
name="elb_client",
describe_load_balancer_attributes=Mock(
return_value={
'LoadBalancerAttributes': [
{"Enabled": True}]
}
)
)
elbv2_client = Mock(
name="elbv2_client",
describe_load_balancer_attributes=Mock(
return_value={
"Attributes": [
{"Key": "access_logs.s3.enabled", "Value": "true"},
{"Key": "boolean", "Value": "false"},
{"Key": "integer", "Value": "42"},
{"Key": "string", "Value": "other"},
]
}
)
)
health_client = Mock(
name="health_client",
describe_events=Mock(
return_value={"events": health_events}
)
)
kms_client = Mock(
name="kms_client",
get_key_policy=Mock(
return_value={"Policy": str(sentinel.policy)}),
)
logs_client = Mock(
name="logs_cient",
describe_subscription_filters=Mock(
return_value={"subscriptionFilters": [str(sentinel.subscription_filter)]}
)
)
shield_client = Mock(
name="shield_client",
describe_subscription=Mock(
return_value={"Subscription": str(sentinel.shield)}
)
)
clients = {
"ec2": ec2_client,
"cloudwatch": cloudwatch_client,
"elb": elb_client,
"elbv2": elbv2_client,
"health": health_client,
"kms": kms_client,
"logs": logs_client,
"shield": shield_client,
}
mock_session = Mock(
name="mock_session instance",
client=Mock(side_effect=lambda name, region_name=None: clients.get(name))
)
asg_resource_manager = Mock(
name="asg_resource_manager",
resources=Mock(
return_value=[
{
"LaunchConfigurationName": str(sentinel.asg_launch_config_name),
"AutoScalingGroupName": str(sentinel.asg_name),
},
]
)
)
rds_resource_manager = Mock(
name="rds_resource_manager",
resources=Mock(
return_value=[
{
"DBSubnetGroupName": str(sentinel.rds_subnet_group_name),
"DBInstanceIdentifier": str(sentinel.rds_instance_identifier),
},
]
)
)
waf_resource_manager = Mock(
name="waf_resource_manager",
resources=Mock(
return_value=[
{"Name": str(sentinel.waf_name), "WebACLId": str(sentinel.waf_acl_id)}
]
)
)
resource_managers = {
"asg": asg_resource_manager,
"rds": rds_resource_manager,
"waf": waf_resource_manager,
}
manager = Mock(
name="mock_manager",
session_factory=Mock(return_value=mock_session),
get_model=Mock(return_value=Mock(dimension="InstanceId", id="InstanceId", service="ec2")),
get_resource_manager=Mock(side_effect=lambda name: resource_managers.get(name)),
retry=Mock(side_effect=lambda f, **kwargs: f(**kwargs)),
resource_type="ec2",
config = Mock(
account_id="123456789012",
region="us-east-1",
),
data={"resource": "ec2"}
)
return locals()
@pytest.fixture
def mock_filter_class():
"""
This CELFilter class demonstrates *all* the features required for the refactored C7N.
"""
mock_parser = Mock(
name="Mock c7n.filters.offhours.ScheduleParser instance",
parse=Mock(
return_value={
"off": [
{"days": [1, 2, 3, 4, 5], "hour": 21},
{"days": [0], "hour": 18}
],
"on": [
{"days": [1, 2, 3, 4, 5], "hour": 6},
{"days": [0], "hour": 10}
],
"tz": "pt"
}
)
)
def get_related_results(resources):
result = []
for r in resources:
if r.get("ResourceType") == "ec2":
result.append(str(sentinel.sg_id))
elif r.get("ResourceType") == "ebs":
result.append({"AliasName": str(sentinel.alias_name)})
else:
raise NotImplementedError(f"No get_related() for {resources}")
return result
# Class foundation from C7n.
class Filter:
"""Mock of c7n.filters.core.Filter"""
def __init__(self, data, manager):
self.data = data
self.manager = manager
# Mixins from C7N.
class InstanceImageMixin:
get_instance_image = Mock(
return_value={"CreationDate": "2020-09-10T11:12:13Z", "Name": str(sentinel.name)}
)
class RelatedResourceMixin:
get_related_ids = Mock(return_value=[str(sentinel.sg_id)])
get_related_sgs = Mock(return_value=[str(sentinel.sg)])
get_related_subnets = Mock(return_value=[str(sentinel.subnet)])
get_related_nat_gateways = Mock(return_value=[str(sentinel.nat_gateway)])
get_related_igws = Mock(return_value=[str(sentinel.igw)])
get_related_security_configs = Mock(return_value=[str(sentinel.sec_config)])
get_related_vpc = Mock(return_value=[str(sentinel.vpc)])
get_related_kms_keys = Mock(return_value=[str(sentinel.kms_key)])
get_related = Mock(side_effect=get_related_results)
class CredentialReportMixin:
get_credential_report = Mock(return_value=str(sentinel.credential))
class ResourceKmsKeyAliasMixin:
get_matching_aliases = Mock(return_value=[str(sentinel.kms_alias)])
class CrossAccountAccessMixin:
get_accounts = Mock(return_value=[str(sentinel.account)])
get_vpcs = Mock(return_value=[str(sentinel.vpc)])
get_vpces=Mock(return_value=[str(sentinel.vpce)])
get_orgids=Mock(return_value=[str(sentinel.orgid)])
get_resource_policy = Mock(return_value=[str(sentinel.policy)])
class SNSCrossAccountMixin:
get_endpoints = Mock(return_value=[str(sentinel.endpoint)])
get_protocols = Mock(return_value=[str(sentinel.protocol)])
class ImagesUnusedMixin:
_pull_ec2_images = Mock(return_value=set([str(sentinel.ec2_image_id)]))
_pull_asg_images = Mock(return_value=set())
class SnapshotUnusedMixin:
_pull_asg_snapshots = Mock(return_value=set([str(sentinel.asg_snapshot_id)]))
_pull_ami_snapshots = Mock(return_value=set())
class IamRoleUsageMixin:
service_role_usage = Mock(return_value=[str(sentinel.iam_role)])
instance_profile_usage = Mock(return_value=[str(sentinel.iam_profile)])
class SGUsageMixin:
scan_groups = Mock(return_value=[str(sentinel.scan_group)])
class IsShieldProtectedMixin:
get_type_protections = Mock(return_value=[{"ResourceArn": str(sentinel.shield)}])
class ShieldEnabledMixin:
account_shield_subscriptions = Mock(return_value=[str(sentinel.shield)])
class CELFilter(
InstanceImageMixin, RelatedResourceMixin, CredentialReportMixin,
ResourceKmsKeyAliasMixin, CrossAccountAccessMixin, SNSCrossAccountMixin,
ImagesUnusedMixin, SnapshotUnusedMixin, IamRoleUsageMixin, SGUsageMixin,
IsShieldProtectedMixin, ShieldEnabledMixin,
Filter,
):
"""Mocked subclass of c7n.filters.core.Filter with Mocked mixins."""
def __init__(self, data, manager):
super().__init__(data, manager)
assert self.data["type"].lower() == "cel"
self.expr = self.data["expr"]
self.parser = mock_parser
return CELFilter
@pytest.fixture
def celfilter_instance(mock_filter_class, mock_manager):
"""
The mocked CELFilter instance for all of the c7nlib integration tests.
"""
# A place-holder used only for initialization.
mock_policy_filter_source = {"type": "cel", "expr": "1+1==2"}
# The mock for the ``CELFilter`` instance C7N must provide.
the_filter = mock_filter_class(mock_policy_filter_source, mock_manager['manager'])
return locals()
def test_image_age_good(celfilter_instance):
mock_filter = celfilter_instance['the_filter']
resource = celpy.celtypes.MapType({})
with celpy.c7nlib.C7NContext(filter=mock_filter):
doc = celpy.c7nlib.image(
resource
)
assert doc.get(celpy.celtypes.StringType('CreationDate')) == celpy.celtypes.TimestampType("2020-09-10T11:12:13Z")
def test_image_age_missing(celfilter_instance, mock_manager):
mock_filter = celfilter_instance['the_filter']
mock_filter.get_instance_image=Mock(return_value=None)
resource = celpy.celtypes.MapType({})
with celpy.c7nlib.C7NContext(filter=mock_filter):
doc = celpy.c7nlib.image(
resource
)
assert doc.get(celpy.celtypes.StringType('CreationDate')) == celpy.celtypes.TimestampType("2000-01-01T01:01:01.000Z")
def test_get_raw_metrics(celfilter_instance, mock_manager):
mock_filter = celfilter_instance['the_filter']
datapoints = mock_manager['datapoints']
now = celpy.celtypes.TimestampType("2000-01-01T01:01:01.000Z")
resource = celpy.celtypes.MapType({})
request = celpy.celtypes.MapType(
{
celpy.celtypes.StringType("Namespace"): celpy.celtypes.StringType("AWS/EC2"),
celpy.celtypes.StringType("MetricName"): celpy.celtypes.StringType("CPUUtilization"),
celpy.celtypes.StringType("Dimensions"): celpy.celtypes.MapType(
{
celpy.celtypes.StringType("Name"): celpy.celtypes.StringType("InstanceId"),
celpy.celtypes.StringType("Value"): celpy.celtypes.StringType("i-1234567890abcdef0"),
}
),
celpy.celtypes.StringType("Statistics"): celpy.celtypes.ListType(
[
celpy.celtypes.StringType("Average")
]
),
celpy.celtypes.StringType("StartTime"): now - celpy.celtypes.DurationType("4d"),
celpy.celtypes.StringType("EndTime"): now,
celpy.celtypes.StringType("Period"): celpy.celtypes.DurationType("86400s")
}
)
with celpy.c7nlib.C7NContext(filter=mock_filter):
doc = celpy.c7nlib.get_raw_metrics(request)
assert doc == celpy.json_to_cel(datapoints)
def test_get_metrics(celfilter_instance, mock_manager):
"""
Two approaches possible. (1) mock :func:`get_raw_metrics`. (2) provide mocks to support
:func:`get_raw_metrics`. We use approach 2 in case the implmentation of `get_metrics`
is changed.
"""
mock_filter = celfilter_instance['the_filter']
datapoints = mock_manager['datapoints']
now = celpy.celtypes.TimestampType("2000-01-01T01:01:01.000Z")
resource = celpy.celtypes.MapType({"InstanceId": "i-123456789012"})
request = celpy.celtypes.MapType(
{
celpy.celtypes.StringType("MetricName"): celpy.celtypes.StringType("CPUUtilization"),
celpy.celtypes.StringType("Statistic"): celpy.celtypes.StringType("Average"),
celpy.celtypes.StringType("StartTime"): now - celpy.celtypes.DurationType("4d"),
celpy.celtypes.StringType("EndTime"): now,
celpy.celtypes.StringType("Period"): celpy.celtypes.DurationType("86400s")
}
)
with celpy.c7nlib.C7NContext(filter=mock_filter):
doc = celpy.c7nlib.get_metrics(
resource,
request
)
assert doc == celpy.celtypes.ListType(
[celpy.celtypes.StringType('sentinel.average')]
)
def test_get_related_ids(celfilter_instance):
mock_filter = celfilter_instance['the_filter']
resource = celpy.celtypes.MapType({})
with celpy.c7nlib.C7NContext(filter=mock_filter):
doc = celpy.c7nlib.get_related_ids(
resource
)
assert doc == celpy.celtypes.ListType(
[
celpy.celtypes.StringType("sentinel.sg_id"),
]
)
def test_security_group(celfilter_instance):
mock_sg_1 = dict(
SecurityGroupId="sg-12345678",
SecurityGroupName="SomeName",
)
mock_sg_2 = dict(
SecurityGroupId="sg-23456789",
SecurityGroupName="AnotherName",
)
mock_filter = celfilter_instance['the_filter']
mock_filter.get_related = Mock(return_value=[mock_sg_1, mock_sg_2])
resource = celpy.celtypes.MapType(
{celpy.celtypes.StringType("ResourceType"): celpy.celtypes.StringType("sg")}
)
with celpy.c7nlib.C7NContext(filter=mock_filter):
doc = celpy.c7nlib.security_group(
resource
)
assert doc == celpy.celtypes.ListType(
[
celpy.celtypes.MapType({
celpy.celtypes.StringType("SecurityGroupId"):
celpy.celtypes.StringType("sg-12345678"),
celpy.celtypes.StringType("SecurityGroupName"):
celpy.celtypes.StringType("SomeName"),
}),
celpy.celtypes.MapType({
celpy.celtypes.StringType("SecurityGroupId"):
celpy.celtypes.StringType("sg-23456789"),
celpy.celtypes.StringType("SecurityGroupName"):
celpy.celtypes.StringType("AnotherName"),
}),
]
)
def test_subnet(celfilter_instance):
mock_subnet = dict(
SubnetID="subnet-12345678",
SubnetArn="arn:aws:asg:us-east-1:123456789012:subnet-12345678",
)
mock_filter = celfilter_instance['the_filter']
mock_filter.get_related=Mock(return_value=mock_subnet)
resource = celpy.celtypes.MapType({})
with celpy.c7nlib.C7NContext(filter=mock_filter):
doc = celpy.c7nlib.subnet(
resource
)
assert doc.get(celpy.celtypes.StringType("SubnetID")) == celpy.celtypes.StringType("subnet-12345678")
def test_flow_logs(celfilter_instance):
"""
Test :func:`c7nlib.flow_logs`.
.. todo:: Refactor :func:`c7nlib.flow_logs` -- it exposes too much implementation detail.
"""
mock_filter = celfilter_instance['the_filter']
resource_1 = celpy.celtypes.MapType(
{celpy.celtypes.StringType("InstanceId"): celpy.celtypes.StringType("i-123456789")})
with celpy.c7nlib.C7NContext(filter=mock_filter):
doc_1 = celpy.c7nlib.flow_logs(
resource_1
)
assert doc_1 == [{"ResourceId": "i-123456789"}]
resource_2 = celpy.celtypes.MapType(
{celpy.celtypes.StringType("InstanceId"): celpy.celtypes.StringType("i-111111111")})
with celpy.c7nlib.C7NContext(filter=mock_filter):
doc_2 = celpy.c7nlib.flow_logs(
resource_2
)
assert doc_2 == []
def test_vpc(celfilter_instance):
vpc_1 = {"ResourceId": "vpc-123456789", "More": "Details"}
mock_filter = celfilter_instance['the_filter']
mock_filter.get_related = Mock(return_value=vpc_1)
resource_1 = celpy.celtypes.MapType(
{celpy.celtypes.StringType("InstanceId"): celpy.celtypes.StringType("vpc-123456789")})
with celpy.c7nlib.C7NContext(filter=mock_filter):
doc_1 = celpy.c7nlib.vpc(
resource_1
)
assert doc_1 == vpc_1
def test_subst(celfilter_instance):
mock_filter = celfilter_instance['the_filter']
with celpy.c7nlib.C7NContext(filter=mock_filter):
assert celpy.c7nlib.subst("this") == "this"
assert celpy.c7nlib.subst("this {account_id}") == "this 123456789012"
def test_credentials(celfilter_instance):
mock_filter = celfilter_instance['the_filter']
resource_1 = celpy.celtypes.MapType(
{celpy.celtypes.StringType("ResourceType"): celpy.celtypes.StringType("iam-user")})
with celpy.c7nlib.C7NContext(filter=mock_filter):
doc_1 = celpy.c7nlib.credentials(
resource_1
)
assert doc_1 == celpy.adapter.json_to_cel(str(sentinel.credential))
def test_kms_alias(celfilter_instance):
mock_filter = celfilter_instance['the_filter']
resource_1 = celpy.celtypes.MapType(
{celpy.celtypes.StringType("ResourceType"): celpy.celtypes.StringType("rds")})
with celpy.c7nlib.C7NContext(filter=mock_filter):
doc_1 = celpy.c7nlib.kms_alias(
resource_1
)
assert doc_1 == celpy.adapter.json_to_cel([str(sentinel.kms_alias)])
def test_kms_key(celfilter_instance):
mock_filter = celfilter_instance['the_filter']
resource_1 = celpy.celtypes.MapType(
{celpy.celtypes.StringType("ResourceType"): celpy.celtypes.StringType("ebs")})
with celpy.c7nlib.C7NContext(filter=mock_filter):
doc_1 = celpy.c7nlib.kms_key(
resource_1
)
assert doc_1 == celpy.adapter.json_to_cel([{"AliasName": str(sentinel.alias_name)}])
def test_C7N_resource_schedule(celfilter_instance):
mock_filter = celfilter_instance['the_filter']
ec2_doc = {
"ResourceType": "ec2",
"Tags": [
{
"key": "maid_offhours",
"value": "off=[(M-F,21),(U,18)];on=[(M-F,6),(U,10)];tz=pt"
}
]
}
with celpy.c7nlib.C7NContext(filter=mock_filter):
other_tz_names = {
'et': 'US/Eastern',
'pt': 'US/Pacific',
}
celpy.celtypes.TimestampType.TZ_ALIASES.update(other_tz_names)
schedule = celpy.c7nlib.resource_schedule(ec2_doc)
assert schedule == {
celpy.celtypes.StringType('off'): celpy.celtypes.ListType([
celpy.celtypes.MapType({
celpy.celtypes.StringType('days'): celpy.celtypes.ListType([
celpy.celtypes.IntType(1), celpy.celtypes.IntType(2),
celpy.celtypes.IntType(3), celpy.celtypes.IntType(4),
celpy.celtypes.IntType(5)
]),
celpy.celtypes.StringType('hour'): celpy.celtypes.IntType(21),
celpy.celtypes.StringType('tz'): celpy.celtypes.StringType('pt'),
}),
celpy.celtypes.MapType({
celpy.celtypes.StringType('days'): celpy.celtypes.ListType([
celpy.celtypes.IntType(0)
]),
celpy.celtypes.StringType('hour'): celpy.celtypes.IntType(18),
celpy.celtypes.StringType('tz'): celpy.celtypes.StringType('pt'),
})
]),
celpy.celtypes.StringType('on'): celpy.celtypes.ListType([
celpy.celtypes.MapType({
celpy.celtypes.StringType('days'): celpy.celtypes.ListType([
celpy.celtypes.IntType(1), celpy.celtypes.IntType(2),
celpy.celtypes.IntType(3), celpy.celtypes.IntType(4),
celpy.celtypes.IntType(5)
]),
celpy.celtypes.StringType('hour'): celpy.celtypes.IntType(6),
celpy.celtypes.StringType('tz'): celpy.celtypes.StringType('pt'),
}),
celpy.celtypes.MapType({
celpy.celtypes.StringType('days'): celpy.celtypes.ListType([
celpy.celtypes.IntType(0)
]),
celpy.celtypes.StringType('hour'): celpy.celtypes.IntType(10),
celpy.celtypes.StringType('tz'): celpy.celtypes.StringType('pt'),
})
]),
}
def test_get_accounts(celfilter_instance):
mock_filter = celfilter_instance['the_filter']
ami_doc = {"ResourceType": "ami"}
with celpy.c7nlib.C7NContext(filter=mock_filter):
accounts = celpy.c7nlib.get_accounts(ami_doc)
assert accounts == [str(sentinel.account)]
def test_get_vpcs(celfilter_instance):
mock_filter = celfilter_instance['the_filter']
ami_doc = {"ResourceType": "ami"}
with celpy.c7nlib.C7NContext(filter=mock_filter):
vpcs = celpy.c7nlib.get_vpcs(ami_doc)
assert vpcs == [str(sentinel.vpc)]
def test_get_vpces(celfilter_instance):
mock_filter = celfilter_instance['the_filter']
ami_doc = {"ResourceType": "ami"}
with celpy.c7nlib.C7NContext(filter=mock_filter):
vpces = celpy.c7nlib.get_vpces(ami_doc)
assert vpces == [str(sentinel.vpce)]
def test_get_orgids(celfilter_instance):
mock_filter = celfilter_instance['the_filter']
ami_doc = {"ResourceType": "ami"}
with celpy.c7nlib.C7NContext(filter=mock_filter):
orgids = celpy.c7nlib.get_orgids(ami_doc)
assert orgids == [str(sentinel.orgid)]
def test_get_endpoints(celfilter_instance):
mock_filter = celfilter_instance['the_filter']
ami_doc = {"ResourceType": "sns"}
with celpy.c7nlib.C7NContext(filter=mock_filter):
| python | Apache-2.0 | 3a134c10394058c73a6bbe0e4ca7e862ea9707b3 | 2026-01-05T07:13:01.631050Z | true |
cloud-custodian/cel-python | https://github.com/cloud-custodian/cel-python/blob/3a134c10394058c73a6bbe0e4ca7e862ea9707b3/tests/test_transpilation.py | tests/test_transpilation.py | # SPDX-Copyright: Copyright (c) Capital One Services, LLC
# SPDX-License-Identifier: Apache-2.0
# Copyright 2020 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
"""
Test all the transpilation methods.
A large number of tests use :py:meth:`Transpiler.transpile`.
The approach used here may not be ideal from a unit testing perspective.
This approach tends to test the superclass :py:meth:`lark.visitors.Visitor_Recursive.visit`,
which involves re-testing a fair amount of Lark code.
Further, we don't follow the path used by the :py:mod:`test_evaluation` test suite.
This module does not synthesize parse trees to disentangle itself from the parser.
Instead, it parses CEL expressions directly.
This does not precisely parallel :py:mod:`test_evaluation`.
The :py:mod:`test_evaluation` module tests a number of elements of evaluation
that are outside the Evaluator, including `Activation`, `Referent`, `NameContainer`.
This module cherry-picks tests of CEL expressions separate from the evaluation mechanics.
"""
import ast
from textwrap import dedent
from types import SimpleNamespace
from unittest.mock import Mock, MagicMock, sentinel
import lark
import pytest
import celpy.evaluation # Expose the name for monkeypatching
from celpy import celparser, celtypes
from celpy.evaluation import *
@pytest.fixture
def mock_protobuf():
"""Used for a few test cases."""
def get_method(name, default=None):
if name == "field":
return 42
else:
raise KeyError(name)
protobuf_message = Mock(name="protobuf_message", spec=celtypes.MessageType, get=Mock(side_effect=get_method))
protobuf_message_class = Mock(name="protobuf_message class", return_value=protobuf_message)
return protobuf_message_class
# This may be slightly better for isolating Activation implementation.
# @pytest.fixture
# def mock_activation():
# activation = Mock(
# resolve_name=Mock(return_value=lambda name: {"name2": celtypes.IntType}.get())
# )
# return activation
@pytest.fixture
def mock_functions():
return {"no_arg_function": no_arg_function}
@pytest.fixture
def mock_activation(mock_protobuf, mock_functions):
"""
See :py:class:`NameContainer`, specifically :py:meth:`NameContainer.load_annotations`
"""
return Activation(
annotations={
"name1.name2": celtypes.IntType,
"protobuf_message": mock_protobuf,
"a.b.c": celtypes.StringType,
},
functions=mock_functions,
vars={
"duration": celtypes.DurationType(seconds=123, nanos=123456789),
"a.b.c": celtypes.StringType("yeah"),
}
)
def no_arg_function():
return celpy.celtypes.IntType(42)
@pytest.fixture
def mock_globals(mock_activation, mock_protobuf):
# Works, but feels sketchy...
# We're tweaking one function's understanding of globals.
global_vars = celpy.evaluation.result.__globals__
global_vars["the_activation"] = mock_activation
global_vars["protobuf_message"] = mock_protobuf
global_vars["test_transpilation"] = SimpleNamespace(no_arg_function=no_arg_function)
# Seems to make more sense, but doesn't actually work!
# global the_activation
# the_activation = mock_activation
# global_vars = globals().copy()
return global_vars
def test_result_builder(mock_globals, mock_activation):
def mock_operation(a, b):
if isinstance(a, Exception):
raise a
else:
return sentinel.A_OP_B
expr_1 = lambda activation: mock_operation(TypeError(sentinel.type_error_message), sentinel.VALUE)
result_1 = celpy.evaluation.result(mock_activation, expr_1)
assert isinstance(result_1, CELEvalError)
assert result_1.args == ('no such overload', TypeError, (sentinel.type_error_message,))
assert result_1.__cause__.__class__ == TypeError
with pytest.raises(IOError) as exc_info:
expr_2 = lambda activation: mock_operation(IOError(sentinel.io_error_message), sentinel.VALUE)
result_2 = celpy.evaluation.result(mock_activation, expr_2)
assert not isinstance(exc_info.value, CELEvalError)
assert exc_info.value.args == (sentinel.io_error_message,)
result_3 = mock_operation(sentinel.A, sentinel.B)
assert result_3 == sentinel.A_OP_B
literals = [
('3.14',
"CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.celtypes.DoubleType(3.14))",
celpy.celtypes.DoubleType(3.14),
"literal"),
('42',
"CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.celtypes.IntType(42))",
celpy.celtypes.IntType(42),
"literal"),
('42u',
"CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.celtypes.UintType(42))",
celpy.celtypes.UintType(42),
"literal"),
('b"B"',
"CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.celtypes.BytesType(b'B'))",
celpy.celtypes.BytesType(b'B'),
"literal"),
('"String"',
"CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.celtypes.StringType('String'))",
celpy.celtypes.StringType("String"),
"literal"),
('true',
"CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.celtypes.BoolType(True))",
celpy.celtypes.BoolType(True),
"literal"),
('null',
"CEL = celpy.evaluation.result(base_activation, lambda activation: None)",
None, # celpy.celtypes.NullType(),
"literal"),
('[]',
"CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.celtypes.ListType([]))",
celpy.celtypes.ListType([]),
"literal"),
('{}',
"CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.celtypes.MapType([]))",
celpy.celtypes.MapType({}),
"literal"),
('bool',
"CEL = celpy.evaluation.result(base_activation, lambda activation: activation.bool)",
celpy.celtypes.BoolType,
"literal"),
]
function_params = [
("size([42, 6, 7])",
"CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.evaluation.function_size(celpy.celtypes.ListType([celpy.celtypes.IntType(42), celpy.celtypes.IntType(6), celpy.celtypes.IntType(7)])))",
celpy.celtypes.IntType(3),
"IDENT(_)"),
("size(3.14)",
"CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.evaluation.function_size(celpy.celtypes.DoubleType(3.14)))",
CELEvalError,
"IDENT(_)"),
('"hello".size()',
"CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.evaluation.function_size(celpy.celtypes.StringType('hello')))",
celpy.celtypes.IntType(5),
"_.IDENT()"),
]
method_params = [
("[42, 6, 7].size()",
"CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.evaluation.function_size(celpy.celtypes.ListType([celpy.celtypes.IntType(42), celpy.celtypes.IntType(6), celpy.celtypes.IntType(7)])))",
celpy.celtypes.IntType(3),
"_.size()"),
('timestamp("2009-02-13T23:31:30Z").getMonth()',
"CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.evaluation.function_getMonth(celpy.celtypes.TimestampType(celpy.celtypes.StringType('2009-02-13T23:31:30Z'))))",
celtypes.IntType(1),
"_._())"),
('["hello", "world"].contains("hello")',
"CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.evaluation.function_contains(celpy.celtypes.ListType([celpy.celtypes.StringType('hello'), celpy.celtypes.StringType('world')]), celpy.celtypes.StringType('hello')))",
celtypes.BoolType(True),
"_._(_)"),
]
macro_has_params = [
('has({"n": 355, "d": 113}.n)',
dedent("""\
# ident_arg has:
ex_9_h = lambda activation: celpy.celtypes.MapType([(celpy.celtypes.StringType('n'), celpy.celtypes.IntType(355)), (celpy.celtypes.StringType('d'), celpy.celtypes.IntType(113))]).get('n')
ex_9 = lambda activation: not isinstance(celpy.evaluation.result(activation, ex_9_h), CELEvalError)
CEL = celpy.evaluation.result(base_activation, ex_9)"""),
celtypes.BoolType(True),
"has(_._)"),
('has({"n": 355, "d": 113}.nope)',
dedent("""\
# ident_arg has:
ex_9_h = lambda activation: celpy.celtypes.MapType([(celpy.celtypes.StringType('n'), celpy.celtypes.IntType(355)), (celpy.celtypes.StringType('d'), celpy.celtypes.IntType(113))]).get('nope')
ex_9 = lambda activation: not isinstance(celpy.evaluation.result(activation, ex_9_h), CELEvalError)
CEL = celpy.evaluation.result(base_activation, ex_9)"""
),
celtypes.BoolType(False),
"has(_._)"),
('dyn(6) * 7',
"CEL = celpy.evaluation.result(base_activation, lambda activation: operator.mul(celpy.celtypes.IntType(6), celpy.celtypes.IntType(7)))",
celtypes.IntType(42),
"dyn(_)"),
("type(dyn([1, 'one']))",
"CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.celtypes.TypeType(celpy.celtypes.ListType([celpy.celtypes.IntType(1), celpy.celtypes.StringType('one')])))",
celtypes.ListType,
"dyn(_)"),
]
unary_operator_params = [
("! true", "CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.celtypes.logical_not(celpy.celtypes.BoolType(True)))", celtypes.BoolType(False), "!_"),
("- 42", "CEL = celpy.evaluation.result(base_activation, lambda activation: operator.neg(celpy.celtypes.IntType(42)))", celtypes.IntType(-42), "-_"),
("- -9223372036854775808", "CEL = celpy.evaluation.result(base_activation, lambda activation: operator.neg(celpy.celtypes.IntType(-9223372036854775808)))", CELEvalError, "-_"),
]
binary_operator_params = [
("6 < 7", "CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.evaluation.bool_lt(celpy.celtypes.IntType(6), celpy.celtypes.IntType(7)))", celtypes.BoolType(True), "_<_"),
("6 <= 7", "CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.evaluation.bool_le(celpy.celtypes.IntType(6), celpy.celtypes.IntType(7)))", celtypes.BoolType(True), "_<=_"),
("6 > 7", "CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.evaluation.bool_gt(celpy.celtypes.IntType(6), celpy.celtypes.IntType(7)))", celtypes.BoolType(False), "_>_"),
("6 >= 7", "CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.evaluation.bool_ge(celpy.celtypes.IntType(6), celpy.celtypes.IntType(7)))", celtypes.BoolType(False), "_>=_"),
("42 == 42", "CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.evaluation.bool_eq(celpy.celtypes.IntType(42), celpy.celtypes.IntType(42)))", celtypes.BoolType(True), "_==_"),
("[] == []", "CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.evaluation.bool_eq(celpy.celtypes.ListType([]), celpy.celtypes.ListType([])))", celtypes.BoolType(True), "_==_"),
("42 != 42", "CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.evaluation.bool_ne(celpy.celtypes.IntType(42), celpy.celtypes.IntType(42)))", celtypes.BoolType(False), "_!=_"),
('"b" in ["a", "b", "c"]',
"CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.evaluation.operator_in(celpy.celtypes.StringType('b'), celpy.celtypes.ListType([celpy.celtypes.StringType('a'), celpy.celtypes.StringType('b'), celpy.celtypes.StringType('c')])))",
celtypes.BoolType(True),
"_in_"),
("40 + 2", "CEL = celpy.evaluation.result(base_activation, lambda activation: operator.add(celpy.celtypes.IntType(40), celpy.celtypes.IntType(2)))", celtypes.IntType(42), "_+_"),
("44 - 2", "CEL = celpy.evaluation.result(base_activation, lambda activation: operator.sub(celpy.celtypes.IntType(44), celpy.celtypes.IntType(2)))", celtypes.IntType(42), "_-_"),
("6 * 7", "CEL = celpy.evaluation.result(base_activation, lambda activation: operator.mul(celpy.celtypes.IntType(6), celpy.celtypes.IntType(7)))", celtypes.IntType(42), "_*_"),
("84 / 2", "CEL = celpy.evaluation.result(base_activation, lambda activation: operator.truediv(celpy.celtypes.IntType(84), celpy.celtypes.IntType(2)))", celtypes.IntType(42), "_/_"),
("85 % 43", "CEL = celpy.evaluation.result(base_activation, lambda activation: operator.mod(celpy.celtypes.IntType(85), celpy.celtypes.IntType(43)))", celtypes.IntType(42), "_%_"),
# A few error examples
('42 in ["a", "b", "c"]',
"CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.evaluation.operator_in(celpy.celtypes.IntType(42), celpy.celtypes.ListType([celpy.celtypes.StringType('a'), celpy.celtypes.StringType('b'), celpy.celtypes.StringType('c')])))",
CELEvalError,
"_in_"),
("9223372036854775807 + 1", "CEL = celpy.evaluation.result(base_activation, lambda activation: operator.add(celpy.celtypes.IntType(9223372036854775807), celpy.celtypes.IntType(1)))", CELEvalError, "_+_"),
("9223372036854775807 * 2", "CEL = celpy.evaluation.result(base_activation, lambda activation: operator.mul(celpy.celtypes.IntType(9223372036854775807), celpy.celtypes.IntType(2)))", CELEvalError, "_*_"),
("84 / 0", "CEL = celpy.evaluation.result(base_activation, lambda activation: operator.truediv(celpy.celtypes.IntType(84), celpy.celtypes.IntType(0)))", CELEvalError, "_/_"),
]
short_circuit_params = [
("true || (3 / 0 != 0)",
dedent("""\
# conditionalor:
ex_1_l = lambda activation: celpy.celtypes.BoolType(True)
ex_1_r = lambda activation: celpy.evaluation.bool_ne(operator.truediv(celpy.celtypes.IntType(3), celpy.celtypes.IntType(0)), celpy.celtypes.IntType(0))
ex_1 = lambda activation: celpy.celtypes.logical_or(celpy.evaluation.result(activation, ex_1_l), celpy.evaluation.result(activation, ex_1_r))
CEL = celpy.evaluation.result(base_activation, ex_1)"""),
celtypes.BoolType(True), "_||_"),
("(3 / 0 != 0) || true",
dedent("""\
# conditionalor:
ex_1_l = lambda activation: celpy.evaluation.bool_ne(operator.truediv(celpy.celtypes.IntType(3), celpy.celtypes.IntType(0)), celpy.celtypes.IntType(0))
ex_1_r = lambda activation: celpy.celtypes.BoolType(True)
ex_1 = lambda activation: celpy.celtypes.logical_or(celpy.evaluation.result(activation, ex_1_l), celpy.evaluation.result(activation, ex_1_r))
CEL = celpy.evaluation.result(base_activation, ex_1)"""),
celtypes.BoolType(True), "_||_"),
("false || (3 / 0 != 0)",
dedent("""\
# conditionalor:
ex_1_l = lambda activation: celpy.celtypes.BoolType(False)
ex_1_r = lambda activation: celpy.evaluation.bool_ne(operator.truediv(celpy.celtypes.IntType(3), celpy.celtypes.IntType(0)), celpy.celtypes.IntType(0))
ex_1 = lambda activation: celpy.celtypes.logical_or(celpy.evaluation.result(activation, ex_1_l), celpy.evaluation.result(activation, ex_1_r))
CEL = celpy.evaluation.result(base_activation, ex_1)"""),
CELEvalError, "_||_"),
("(3 / 0 != 0) || false",
dedent("""\
# conditionalor:
ex_1_l = lambda activation: celpy.evaluation.bool_ne(operator.truediv(celpy.celtypes.IntType(3), celpy.celtypes.IntType(0)), celpy.celtypes.IntType(0))
ex_1_r = lambda activation: celpy.celtypes.BoolType(False)
ex_1 = lambda activation: celpy.celtypes.logical_or(celpy.evaluation.result(activation, ex_1_l), celpy.evaluation.result(activation, ex_1_r))
CEL = celpy.evaluation.result(base_activation, ex_1)"""),
CELEvalError, "_||_"),
("true && 3 / 0",
dedent("""\
# conditionaland:
ex_2_l = lambda activation: celpy.celtypes.BoolType(True)
ex_2_r = lambda activation: operator.truediv(celpy.celtypes.IntType(3), celpy.celtypes.IntType(0))
ex_2 = lambda activation: celpy.celtypes.logical_and(celpy.evaluation.result(activation, ex_2_l), celpy.evaluation.result(activation, ex_2_r))
CEL = celpy.evaluation.result(base_activation, ex_2)"""),
CELEvalError, "_&&_"),
("false && 3 / 0",
dedent("""\
# conditionaland:
ex_2_l = lambda activation: celpy.celtypes.BoolType(False)
ex_2_r = lambda activation: operator.truediv(celpy.celtypes.IntType(3), celpy.celtypes.IntType(0))
ex_2 = lambda activation: celpy.celtypes.logical_and(celpy.evaluation.result(activation, ex_2_l), celpy.evaluation.result(activation, ex_2_r))
CEL = celpy.evaluation.result(base_activation, ex_2)"""),
celpy.celtypes.BoolType(False), "_&&_"),
("3 / 0 && true",
dedent("""\
# conditionaland:
ex_2_l = lambda activation: operator.truediv(celpy.celtypes.IntType(3), celpy.celtypes.IntType(0))
ex_2_r = lambda activation: celpy.celtypes.BoolType(True)
ex_2 = lambda activation: celpy.celtypes.logical_and(celpy.evaluation.result(activation, ex_2_l), celpy.evaluation.result(activation, ex_2_r))
CEL = celpy.evaluation.result(base_activation, ex_2)"""),
CELEvalError, "_&&_"),
("3 / 0 && false",
dedent("""\
# conditionaland:
ex_2_l = lambda activation: operator.truediv(celpy.celtypes.IntType(3), celpy.celtypes.IntType(0))
ex_2_r = lambda activation: celpy.celtypes.BoolType(False)
ex_2 = lambda activation: celpy.celtypes.logical_and(celpy.evaluation.result(activation, ex_2_l), celpy.evaluation.result(activation, ex_2_r))
CEL = celpy.evaluation.result(base_activation, ex_2)"""),
celpy.celtypes.BoolType(False), "_&&_"),
("(13 % 2 != 0) ? (13 * 3 + 1) : (13 / 0)",
dedent("""\
# expr:
ex_0_c = lambda activation: celpy.evaluation.bool_ne(operator.mod(celpy.celtypes.IntType(13), celpy.celtypes.IntType(2)), celpy.celtypes.IntType(0))
ex_0_l = lambda activation: operator.add(operator.mul(celpy.celtypes.IntType(13), celpy.celtypes.IntType(3)), celpy.celtypes.IntType(1))
ex_0_r = lambda activation: operator.truediv(celpy.celtypes.IntType(13), celpy.celtypes.IntType(0))
ex_0 = lambda activation: celpy.celtypes.logical_condition(celpy.evaluation.result(activation, ex_0_c), celpy.evaluation.result(activation, ex_0_l), celpy.evaluation.result(activation, ex_0_r))
CEL = celpy.evaluation.result(base_activation, ex_0)"""),
celtypes.IntType(40),
"_?_:_"),
("(12 % 2 != 0) ? (12 / 0) : (12 / 2)",
dedent("""\
# expr:
ex_0_c = lambda activation: celpy.evaluation.bool_ne(operator.mod(celpy.celtypes.IntType(12), celpy.celtypes.IntType(2)), celpy.celtypes.IntType(0))
ex_0_l = lambda activation: operator.truediv(celpy.celtypes.IntType(12), celpy.celtypes.IntType(0))
ex_0_r = lambda activation: operator.truediv(celpy.celtypes.IntType(12), celpy.celtypes.IntType(2))
ex_0 = lambda activation: celpy.celtypes.logical_condition(celpy.evaluation.result(activation, ex_0_c), celpy.evaluation.result(activation, ex_0_l), celpy.evaluation.result(activation, ex_0_r))
CEL = celpy.evaluation.result(base_activation, ex_0)"""),
celtypes.IntType(6),
"_?_:_"),
("(14 % 0 != 0) ? (14 * 3 + 1) : (14 / 2)",
dedent("""\
# expr:
ex_0_c = lambda activation: celpy.evaluation.bool_ne(operator.mod(celpy.celtypes.IntType(14), celpy.celtypes.IntType(0)), celpy.celtypes.IntType(0))
ex_0_l = lambda activation: operator.add(operator.mul(celpy.celtypes.IntType(14), celpy.celtypes.IntType(3)), celpy.celtypes.IntType(1))
ex_0_r = lambda activation: operator.truediv(celpy.celtypes.IntType(14), celpy.celtypes.IntType(2))
ex_0 = lambda activation: celpy.celtypes.logical_condition(celpy.evaluation.result(activation, ex_0_c), celpy.evaluation.result(activation, ex_0_l), celpy.evaluation.result(activation, ex_0_r))
CEL = celpy.evaluation.result(base_activation, ex_0)"""),
CELEvalError,
"_?_:_"),
]
member_dot_params = [
('{"field": 42}.field',
dedent("""\
CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.celtypes.MapType([(celpy.celtypes.StringType('field'), celpy.celtypes.IntType(42))]).get('field'))"""),
celtypes.IntType(42),
"_._"),
# Must match the mock_protobuf message
('protobuf_message{field: 42}.field',
dedent("""\
CEL = celpy.evaluation.result(base_activation, lambda activation: activation.protobuf_message([('field', celpy.celtypes.IntType(42))]).get('field'))"""),
celtypes.IntType(42),
"_._"),
# Must NOT match the mock_protobuf message
('protobuf_message{field: 42}.not_the_name',
dedent("""\
CEL = celpy.evaluation.result(base_activation, lambda activation: activation.protobuf_message([('field', celpy.celtypes.IntType(42))]).get('not_the_name'))"""),
CELEvalError,
"_._"),
# Requires mock_activation with {"name1.name2": celtypes.IntType}
('name1.name2',
dedent("""\
CEL = celpy.evaluation.result(base_activation, lambda activation: activation.name1.get('name2'))"""),
celtypes.IntType,
"_._"),
('a.b.c',
dedent("""\
CEL = celpy.evaluation.result(base_activation, lambda activation: activation.a.get('b').get('c'))"""),
celtypes.StringType("yeah"),
"_._"),
]
member_item_params = [
('["hello", "world"][0]',
"CEL = celpy.evaluation.result(base_activation, lambda activation: operator.getitem(celpy.celtypes.ListType([celpy.celtypes.StringType('hello'), celpy.celtypes.StringType('world')]), celpy.celtypes.IntType(0)))",
celtypes.StringType("hello"),
"_.[_]"),
('["hello", "world"][42]',
"CEL = celpy.evaluation.result(base_activation, lambda activation: operator.getitem(celpy.celtypes.ListType([celpy.celtypes.StringType('hello'), celpy.celtypes.StringType('world')]), celpy.celtypes.IntType(42)))",
CELEvalError,
"_.[_]"),
('["hello", "world"][3.14]',
"CEL = celpy.evaluation.result(base_activation, lambda activation: operator.getitem(celpy.celtypes.ListType([celpy.celtypes.StringType('hello'), celpy.celtypes.StringType('world')]), celpy.celtypes.DoubleType(3.14)))",
CELEvalError,
"_.[_]"),
('{"hello": "world"}["hello"]',
"CEL = celpy.evaluation.result(base_activation, lambda activation: operator.getitem(celpy.celtypes.MapType([(celpy.celtypes.StringType('hello'), celpy.celtypes.StringType('world'))]), celpy.celtypes.StringType('hello')))",
celtypes.StringType("world"),
"_.[_]"),
('{"hello": "world"}["world"]',
"CEL = celpy.evaluation.result(base_activation, lambda activation: operator.getitem(celpy.celtypes.MapType([(celpy.celtypes.StringType('hello'), celpy.celtypes.StringType('world'))]), celpy.celtypes.StringType('world')))",
CELEvalError,
"_.[_]"),
]
# A protobuf message class, `protobuf_message`, is required in the activation.
member_object_params = [
# Must match the mock_protobuf fixture
('protobuf_message{field: 42}',
"CEL = celpy.evaluation.result(base_activation, lambda activation: activation.protobuf_message([('field', celpy.celtypes.IntType(42))]))",
sentinel.MESSAGE,
"_.{_}"),
('protobuf_message{}',
"CEL = celpy.evaluation.result(base_activation, lambda activation: activation.protobuf_message([]))",
sentinel.MESSAGE,
"_.{_}"),
]
member_dot_arg_method = [
('timestamp("2009-02-13T23:31:30Z").getMonth()',
dedent("""\
CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.evaluation.function_getMonth(celpy.celtypes.TimestampType(celpy.celtypes.StringType('2009-02-13T23:31:30Z'))))"""),
celtypes.IntType(1),
"_._(_)"),
('timestamp("2009-02-13T23:31:30Z").getDate()',
dedent("""\
CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.evaluation.function_getDate(celpy.celtypes.TimestampType(celpy.celtypes.StringType('2009-02-13T23:31:30Z'))))"""),
celtypes.IntType(13),
"_._(_)"),
('timestamp("2009-02-13T23:31:30Z").getDayOfMonth()',
dedent("""\
CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.evaluation.function_getDayOfMonth(celpy.celtypes.TimestampType(celpy.celtypes.StringType('2009-02-13T23:31:30Z'))))"""),
celtypes.IntType(12),
"_._(_)"),
('timestamp("2009-02-13T23:31:30Z").getDayOfWeek()',
dedent("""\
CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.evaluation.function_getDayOfWeek(celpy.celtypes.TimestampType(celpy.celtypes.StringType('2009-02-13T23:31:30Z'))))"""),
celtypes.IntType(5),
"_._(_)"),
('timestamp("2009-02-13T23:31:30Z").getDayOfYear()',
dedent("""\
CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.evaluation.function_getDayOfYear(celpy.celtypes.TimestampType(celpy.celtypes.StringType('2009-02-13T23:31:30Z'))))"""),
celtypes.IntType(43),
"_._(_)"),
('timestamp("2009-02-13T23:31:30Z").getFullYear()',
dedent("""\
CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.evaluation.function_getFullYear(celpy.celtypes.TimestampType(celpy.celtypes.StringType('2009-02-13T23:31:30Z'))))"""),
celtypes.IntType(2009),
"_._(_)"),
('timestamp("2009-02-13T23:31:30Z").getHours()',
dedent("""\
CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.evaluation.function_getHours(celpy.celtypes.TimestampType(celpy.celtypes.StringType('2009-02-13T23:31:30Z'))))"""),
celtypes.IntType(23),
"_._(_)"),
('timestamp("2009-02-13T23:31:30Z").getMilliseconds()',
dedent("""\
CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.evaluation.function_getMilliseconds(celpy.celtypes.TimestampType(celpy.celtypes.StringType('2009-02-13T23:31:30Z'))))"""),
celtypes.IntType(0),
"_._(_)"),
('timestamp("2009-02-13T23:31:30Z").getMinutes()',
dedent("""\
CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.evaluation.function_getMinutes(celpy.celtypes.TimestampType(celpy.celtypes.StringType('2009-02-13T23:31:30Z'))))"""),
celtypes.IntType(31),
"_._(_)"),
('timestamp("2009-02-13T23:31:30Z").getSeconds()',
dedent("""\
CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.evaluation.function_getSeconds(celpy.celtypes.TimestampType(celpy.celtypes.StringType('2009-02-13T23:31:30Z'))))"""),
celtypes.IntType(30),
"_._(_)"),
('["hello", "world"].contains("hello")',
dedent("""\
CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.evaluation.function_contains(celpy.celtypes.ListType([celpy.celtypes.StringType('hello'), celpy.celtypes.StringType('world')]), celpy.celtypes.StringType('hello')))"""),
celtypes.BoolType(True),
"_._(_)"),
('"hello".startsWith("h")',
dedent("""\
CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.evaluation.function_startsWith(celpy.celtypes.StringType('hello'), celpy.celtypes.StringType('h')))"""),
celtypes.BoolType(True),
"_._(_)"),
('"hello".endsWith("o")',
dedent("""\
CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.evaluation.function_endsWith(celpy.celtypes.StringType('hello'), celpy.celtypes.StringType('o')))"""),
celtypes.BoolType(True),
"_._(_)"),
]
member_dot_arg_method_macro = [
('["hello", "world"].map(x, x) == ["hello", "world"]',
dedent("""\
# member_dot_arg map:
ex_10_l = lambda activation: celpy.celtypes.ListType([celpy.celtypes.StringType('hello'), celpy.celtypes.StringType('world')])
ex_10_x = lambda activation: activation.x
ex_10 = lambda activation: celpy.evaluation.macro_map(activation, 'x', ex_10_x, ex_10_l)
CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.evaluation.bool_eq(ex_10(activation), celpy.celtypes.ListType([celpy.celtypes.StringType('hello'), celpy.celtypes.StringType('world')])))"""),
celtypes.BoolType(True),
"_._(_)"),
('[true, false].filter(x, x) == [true]',
dedent("""\
# member_dot_arg filter:
ex_10_l = lambda activation: celpy.celtypes.ListType([celpy.celtypes.BoolType(True), celpy.celtypes.BoolType(False)])
ex_10_x = lambda activation: activation.x
ex_10 = lambda activation: celpy.evaluation.macro_filter(activation, 'x', ex_10_x, ex_10_l)
CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.evaluation.bool_eq(ex_10(activation), celpy.celtypes.ListType([celpy.celtypes.BoolType(True)])))"""),
celtypes.BoolType(True),
"_._(_)"),
('[42, 0].filter(x, 2 / x > 0) == [42]',
dedent("""\
# member_dot_arg filter:
ex_10_l = lambda activation: celpy.celtypes.ListType([celpy.celtypes.IntType(42), celpy.celtypes.IntType(0)])
ex_10_x = lambda activation: celpy.evaluation.bool_gt(operator.truediv(celpy.celtypes.IntType(2), activation.x), celpy.celtypes.IntType(0))
ex_10 = lambda activation: celpy.evaluation.macro_filter(activation, 'x', ex_10_x, ex_10_l)
CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.evaluation.bool_eq(ex_10(activation), celpy.celtypes.ListType([celpy.celtypes.IntType(42)])))"""),
CELEvalError,
"_._(_)"),
('[true, false].exists_one(x, x)',
dedent("""\
# member_dot_arg exists_one:
ex_8_l = lambda activation: celpy.celtypes.ListType([celpy.celtypes.BoolType(True), celpy.celtypes.BoolType(False)])
ex_8_x = lambda activation: activation.x
ex_8 = lambda activation: celpy.evaluation.macro_exists_one(activation, 'x', ex_8_x, ex_8_l)
CEL = celpy.evaluation.result(base_activation, ex_8)"""),
celtypes.BoolType(True),
"_._(_)"),
('[42, 0].exists_one(x, 2 / x > 0) == true',
dedent("""\
# member_dot_arg exists_one:
ex_10_l = lambda activation: celpy.celtypes.ListType([celpy.celtypes.IntType(42), celpy.celtypes.IntType(0)])
ex_10_x = lambda activation: celpy.evaluation.bool_gt(operator.truediv(celpy.celtypes.IntType(2), activation.x), celpy.celtypes.IntType(0))
ex_10 = lambda activation: celpy.evaluation.macro_exists_one(activation, 'x', ex_10_x, ex_10_l)
CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.evaluation.bool_eq(ex_10(activation), celpy.celtypes.BoolType(True)))"""),
CELEvalError,
"_._(_)"),
('[true, false].exists(x, x)',
dedent("""\
# member_dot_arg exists:
ex_8_l = lambda activation: celpy.celtypes.ListType([celpy.celtypes.BoolType(True), celpy.celtypes.BoolType(False)])
ex_8_x = lambda activation: activation.x
ex_8 = lambda activation: celpy.evaluation.macro_exists(activation, 'x', ex_8_x, ex_8_l)
CEL = celpy.evaluation.result(base_activation, ex_8)"""),
celtypes.BoolType(True),
"_._(_)"),
('[true, false].all(x, x)',
dedent("""\
# member_dot_arg all:
ex_8_l = lambda activation: celpy.celtypes.ListType([celpy.celtypes.BoolType(True), celpy.celtypes.BoolType(False)])
ex_8_x = lambda activation: activation.x
ex_8 = lambda activation: celpy.evaluation.macro_all(activation, 'x', ex_8_x, ex_8_l)
CEL = celpy.evaluation.result(base_activation, ex_8)"""),
celtypes.BoolType(False),
"_._(_)"),
# Some difficult cases from the acceptance test suite, repeated here to make debugging easier.
("[1, 'foo', 3].exists(e, e != '1')",
dedent("""\
# member_dot_arg exists:
ex_8_l = lambda activation: celpy.celtypes.ListType([celpy.celtypes.IntType(1), celpy.celtypes.StringType('foo'), celpy.celtypes.IntType(3)])
ex_8_x = lambda activation: celpy.evaluation.bool_ne(activation.e, celpy.celtypes.StringType('1'))
ex_8 = lambda activation: celpy.evaluation.macro_exists(activation, 'e', ex_8_x, ex_8_l)
CEL = celpy.evaluation.result(base_activation, ex_8)
"""),
celtypes.BoolType(True),
"_._(_)"),
("['foal', 'foo', 'four'].exists_one(n, n.startsWith('fo'))",
dedent("""
# member_dot_arg exists_one:
| python | Apache-2.0 | 3a134c10394058c73a6bbe0e4ca7e862ea9707b3 | 2026-01-05T07:13:01.631050Z | true |
cloud-custodian/cel-python | https://github.com/cloud-custodian/cel-python/blob/3a134c10394058c73a6bbe0e4ca7e862ea9707b3/tests/test_c7n_to_cel.py | tests/test_c7n_to_cel.py | # SPDX-Copyright: Copyright (c) Capital One Services, LLC
# SPDX-License-Identifier: Apache-2.0
# Copyright 2020 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
"""
C7N Integration Translation Test Cases.
"""
from unittest.mock import Mock, call, sentinel
from pytest import *
from xlate.c7n_to_cel import C7N_Rewriter
def test_q():
assert C7N_Rewriter.q("hello") == '"hello"'
assert C7N_Rewriter.q("world", quote="'") == "'world'"
assert C7N_Rewriter.q(None) == '""'
assert C7N_Rewriter.q('Say, "hello"') == r'"Say, \"hello\""'
def test_age_to_duration():
assert C7N_Rewriter.age_to_duration(1) == '"1d"'
assert C7N_Rewriter.age_to_duration(.5) == '"12h"'
assert C7N_Rewriter.age_to_duration(.084) == '"2h57s"'
assert C7N_Rewriter.age_to_duration(.011) == '"15m50s"'
@fixture
def mock_logical_connector(monkeypatch):
logical_connector = Mock(
return_value=sentinel.rewritten
)
monkeypatch.setattr(C7N_Rewriter, 'logical_connector', logical_connector)
return logical_connector
def test_c7n_rewrite(mock_logical_connector):
assert C7N_Rewriter.c7n_rewrite('name: policy\nfilters: "text"\n') == sentinel.rewritten
assert mock_logical_connector.mock_calls == [call(None, "text")]
@fixture
def mock_type_value_rewrite(monkeypatch):
type_value_rewrite = Mock(
return_value=str(sentinel.rewritten)
)
monkeypatch.setattr(C7N_Rewriter, 'type_value_rewrite', type_value_rewrite)
return type_value_rewrite
@fixture
def mock_type_marked_for_op_rewrite(monkeypatch):
type_marked_for_op_rewrite = Mock(
return_value=str(sentinel.rewritten)
)
monkeypatch.setattr(C7N_Rewriter, 'type_marked_for_op_rewrite', type_marked_for_op_rewrite)
return type_marked_for_op_rewrite
@fixture
def mock_type_image_age_rewrite(monkeypatch):
type_image_age_rewrite = Mock(
return_value=str(sentinel.rewritten)
)
monkeypatch.setattr(C7N_Rewriter, 'type_image_age_rewrite', type_image_age_rewrite)
return type_image_age_rewrite
@fixture
def mock_type_event_rewrite(monkeypatch):
type_event_rewrite = Mock(
return_value=str(sentinel.rewritten)
)
monkeypatch.setattr(C7N_Rewriter, 'type_event_rewrite', type_event_rewrite)
return type_event_rewrite
def test_logical_connector_list(mock_type_value_rewrite):
assert C7N_Rewriter.logical_connector(sentinel.resource, [{"type": "value"}]) == str(sentinel.rewritten)
assert mock_type_value_rewrite.mock_calls == [call(sentinel.resource, {'type': 'value'})]
def test_logical_connector_and(mock_type_value_rewrite):
assert C7N_Rewriter.logical_connector(sentinel.resource, {"and": [{"type": "value"}]}) == str(sentinel.rewritten)
assert mock_type_value_rewrite.mock_calls == [call(sentinel.resource, {'type': 'value'})]
def test_logical_connector_or(mock_type_value_rewrite):
# Note the singleton or; this is common.
assert C7N_Rewriter.logical_connector(sentinel.resource, {"or": [{"type": "value"}]}) == str(sentinel.rewritten)
assert mock_type_value_rewrite.mock_calls == [call(sentinel.resource, {'type': 'value'})]
def test_logical_connector_not_1(mock_type_value_rewrite):
not_1 = {"not": [{"type": "value"}]}
assert (
C7N_Rewriter.logical_connector(sentinel.resource, not_1) == f"! ({str(sentinel.rewritten)})"
)
assert mock_type_value_rewrite.mock_calls == [call(sentinel.resource, {'type': 'value'})]
def test_logical_connector_not_2(mock_type_value_rewrite):
not_2 = {"not": [{"type": "value", "value": 1}, {"type": "value", "value": 2}]}
expected_2 = f"! ({str(sentinel.rewritten)} && {str(sentinel.rewritten)})"
assert (
C7N_Rewriter.logical_connector(sentinel.resource, not_2) == expected_2
)
assert mock_type_value_rewrite.mock_calls == [
call(sentinel.resource, {'type': 'value', 'value': 1}),
call(sentinel.resource, {'type': 'value', 'value': 2})
]
def test_logical_connector_errors(mock_type_value_rewrite):
with raises(ValueError):
C7N_Rewriter.logical_connector(sentinel.resource, {"type": "-not-defined-"})
with raises(ValueError):
C7N_Rewriter.logical_connector(sentinel.resource, "nope")
@fixture
def mock_key_to_cel(monkeypatch):
key_to_cel = Mock(
return_value=str(sentinel.rewritten)
)
monkeypatch.setattr(C7N_Rewriter, 'key_to_cel', key_to_cel)
return key_to_cel
@fixture
def mock_value_to_cel(monkeypatch):
value_to_cel = Mock(
return_value=str(sentinel.rewritten)
)
monkeypatch.setattr(C7N_Rewriter, 'value_to_cel', value_to_cel)
return value_to_cel
@fixture
def mock_value_from_to_cel(monkeypatch):
value_from_to_cel = Mock(
return_value=str(sentinel.rewritten)
)
monkeypatch.setattr(C7N_Rewriter, 'value_from_to_cel', value_from_to_cel)
return value_from_to_cel
def test_type_value_rewrite(mock_key_to_cel, mock_value_to_cel):
clause = {"key": "key", "op": "eq", "value": 42}
assert C7N_Rewriter.type_value_rewrite(sentinel.resource, clause) == str(sentinel.rewritten)
assert mock_key_to_cel.mock_calls == [call("key")]
assert mock_value_to_cel.mock_calls == [call(str(sentinel.rewritten), "eq", 42, None)]
def test_type_value_rewrite_present(mock_key_to_cel, mock_value_to_cel):
clause = {"key": "key", "value": "present"}
assert C7N_Rewriter.type_value_rewrite(sentinel.resource, clause) == str(sentinel.rewritten)
assert mock_key_to_cel.mock_calls == [call("key")]
assert mock_value_to_cel.mock_calls == [call(str(sentinel.rewritten), "__present__", None)]
def test_type_value_rewrite_not_null(mock_key_to_cel, mock_value_to_cel):
clause = {"key": "key", "value": "not-null"}
assert C7N_Rewriter.type_value_rewrite(sentinel.resource, clause) == str(sentinel.rewritten)
assert mock_key_to_cel.mock_calls == [call("key")]
assert mock_value_to_cel.mock_calls == [call(str(sentinel.rewritten), "__present__", None)]
def test_type_value_rewrite_absent(mock_key_to_cel, mock_value_to_cel):
clause = {"key": "key", "value": "absent"}
assert C7N_Rewriter.type_value_rewrite(sentinel.resource, clause) == str(sentinel.rewritten)
assert mock_key_to_cel.mock_calls == [call("key")]
assert mock_value_to_cel.mock_calls == [call(str(sentinel.rewritten), "__absent__", None)]
def test_type_value_rewrite_emptu(mock_key_to_cel, mock_value_to_cel):
clause = {"key": "key", "value": "empty"}
assert C7N_Rewriter.type_value_rewrite(sentinel.resource, clause) == str(sentinel.rewritten)
assert mock_key_to_cel.mock_calls == [call("key")]
assert mock_value_to_cel.mock_calls == [call(str(sentinel.rewritten), "__absent__", None)]
def test_tag_absent(mock_key_to_cel, mock_value_to_cel):
clause = {"tag:aws:autoscaling:groupName": "absent"}
assert C7N_Rewriter.type_value_rewrite(sentinel.resource, clause) == str(sentinel.rewritten)
assert mock_key_to_cel.mock_calls == [call("tag:aws:autoscaling:groupName")]
assert mock_value_to_cel.mock_calls == [call(str(sentinel.rewritten), "__absent__", None)]
def test_primitive_value(mock_type_value_rewrite):
assert C7N_Rewriter.primitive(sentinel.resource, {"type": "value"}) == str(sentinel.rewritten)
assert mock_type_value_rewrite.mock_calls == [call(sentinel.resource, {'type': 'value'})]
def test_primitive_absent(mock_type_value_rewrite):
clause = {"tag:aws:autoscaling:groupName": "absent"}
assert C7N_Rewriter.primitive(sentinel.resource, clause) == str(sentinel.rewritten)
assert mock_type_value_rewrite.mock_calls == [call(sentinel.resource, clause)]
def test_type_value_from_rewrite(mock_key_to_cel, mock_value_from_to_cel):
clause = {"key": "key", "op": "in", "value_from": {"url": "url"}}
assert C7N_Rewriter.type_value_rewrite(sentinel.resource, clause) == str(sentinel.rewritten)
assert mock_key_to_cel.mock_calls == [call("key")]
assert mock_value_from_to_cel.mock_calls == [
call(str(sentinel.rewritten), "in", {"url": "url"})
]
def test_type_value_rewrite_error(mock_key_to_cel):
clause = {"key": "key", "op": "in", "nope": {"url": "url"}}
with raises(ValueError):
C7N_Rewriter.type_value_rewrite(sentinel.resource, clause)
clause = {"key": "key", "value": "nope"}
with raises(ValueError):
C7N_Rewriter.type_value_rewrite(sentinel.resource, clause)
def test_value_from_to_cel():
value_from_1 = {"url": "url://path"}
expected_1 = 'value_from("url://path").contains(key)'
assert C7N_Rewriter.value_from_to_cel("key", "in", value_from_1) == expected_1
value_from_2 = {"url": "url://path", "format": "json"}
expected_2 = 'value_from("url://path", "json").contains(key)'
assert C7N_Rewriter.value_from_to_cel("key", "in", value_from_2) == expected_2
value_from_3 = {"url": "url://path", "expr": "jmespath"}
expected_3 = 'value_from("url://path").jmes_path(\'jmespath\').contains(key)'
assert C7N_Rewriter.value_from_to_cel("key", "in", value_from_3) == expected_3
value_from_4 = {"url": "url://path", "expr": "jmespath{account-id}"}
expected_4 = 'value_from("url://path").jmes_path(subst(\'jmespath{account-id}\')).contains(key)'
assert C7N_Rewriter.value_from_to_cel("key", None, value_from_4) == expected_4
def test_value_to_cel_boolean():
assert C7N_Rewriter.value_to_cel("key", "eq", "true") == "key"
assert C7N_Rewriter.value_to_cel("key", "eq", True) == "key"
assert C7N_Rewriter.value_to_cel("key", "eq", "false") == "! key"
assert C7N_Rewriter.value_to_cel("key", "eq", False) == "! key"
assert C7N_Rewriter.value_to_cel("key", "ne", "true") == "! key"
assert C7N_Rewriter.value_to_cel("key", "ne", True) == "! key"
assert C7N_Rewriter.value_to_cel("key", "ne", "false") == "key"
assert C7N_Rewriter.value_to_cel("key", "ne", False) == "key"
with raises(ValueError):
C7N_Rewriter.value_to_cel("key", "nope", "true")
def test_value_to_cel_non_bool():
assert (
C7N_Rewriter.value_to_cel("key", "eq", "some_string") == 'key == "some_string"'
)
assert (
C7N_Rewriter.value_to_cel("key", "gt", 42) == 'key > 42'
)
assert (
C7N_Rewriter.value_to_cel("key", "gt", 42, value_type="age")
== 'now - duration("42d") > timestamp(key)'
)
assert (
C7N_Rewriter.value_to_cel("key", "gt", 42, value_type="integer") == 'int(key) > 42'
)
assert (
C7N_Rewriter.value_to_cel("key", "gt", 42, value_type="expiration")
== 'timestamp(key) > now + duration("42d")'
)
assert (
C7N_Rewriter.value_to_cel("key", "eq", "some_string", value_type="normalize")
== 'normalize(key) == "some_string"'
)
assert (
C7N_Rewriter.value_to_cel("key", "gt", 42, value_type="size") == 'size(key) > 42'
)
assert (
C7N_Rewriter.value_to_cel("key", "ne", "127.0.0.1/22", value_type="cidr")
== 'parse_cidr(key) != parse_cidr("127.0.0.1/22")'
)
assert (
C7N_Rewriter.value_to_cel("key", "gt", "127.0.0.1/22", value_type="cidr_size")
== 'size_parse_cidr(key) > "127.0.0.1/22"'
)
assert (
C7N_Rewriter.value_to_cel("key", "eq", "some_string", value_type="swap")
== '"some_string" == key'
)
assert (
C7N_Rewriter.value_to_cel("key", "gt", 42, value_type="unique_size")
== 'unique_size(key) > 42'
)
assert (
C7N_Rewriter.value_to_cel("key", "gt", 42, value_type="date")
== 'timestamp(key) > timestamp(42)'
)
assert (
C7N_Rewriter.value_to_cel("key", "gt", "3.8.5", value_type="version")
== 'version(key) > version("3.8.5")'
)
def test_key_to_cel():
assert (
C7N_Rewriter.key_to_cel("length(key)") == 'size(resource["key"])'
)
assert (
C7N_Rewriter.key_to_cel("Key.Subkey") == 'resource["Key"]["Subkey"]'
)
assert (
C7N_Rewriter.key_to_cel("tag:TagName")
== 'resource["Tags"].filter(x, x["Key"] == "TagName")[0]["Value"]'
)
assert (
C7N_Rewriter.key_to_cel("key") == 'resource["key"]'
)
def test_marked_for_op_rewrite(mock_key_to_cel):
clause = {"op": "terminate", "skew": 4, "tag": "c7n-tag-compliance", "type": "marked-for-op"}
expected = (
'resource["Tags"].marked_key("c7n-tag-compliance").action == "terminate" '
'&& now >= resource["Tags"].marked_key("c7n-tag-compliance").action_date '
'- duration("4d0h")'
)
assert C7N_Rewriter.type_marked_for_op_rewrite(sentinel.resource, clause) == expected
def test_primitive_mark_for_op(mock_type_marked_for_op_rewrite):
assert C7N_Rewriter.primitive(sentinel.resource, {"type": "marked-for-op"}) == str(sentinel.rewritten)
assert mock_type_marked_for_op_rewrite.mock_calls == [call(sentinel.resource, {'type': 'marked-for-op'})]
def test_image_age_rewrite():
clause = {"days": 60, "op": "gt", "type": "image-age"}
expected = (
'now - resource.image().CreationDate > duration("60d")'
)
assert C7N_Rewriter.type_image_age_rewrite(sentinel.resource, clause) == expected
def test_image_rewrite():
clause = {"key": "Name", "op": "regex", "type": "image", "value": "(?!WIN.*)"}
expected = (
'resource.image().Name.matches("(?!WIN.*)")'
)
assert C7N_Rewriter.type_image_rewrite(sentinel.resource, clause) == expected
def test_primitive_image_age(mock_type_image_age_rewrite):
assert C7N_Rewriter.primitive(sentinel.resource, {"type": "image-age"}) == str(sentinel.rewritten)
assert mock_type_image_age_rewrite.mock_calls == [call(sentinel.resource, {'type': 'image-age'})]
def test_event_rewrite():
clause = {
"key": "detail.responseElements.functionName", "op": "regex", "type": "event",
"value": "^(custodian-.*)"
}
expected = (
'event.detail.responseElements.functionName.matches("^(custodian-.*)")'
)
assert C7N_Rewriter.type_event_rewrite(sentinel.resource, clause) == expected
def test_primitive_event(mock_type_event_rewrite):
assert C7N_Rewriter.primitive(sentinel.resource, {"type": "event"}) == str(sentinel.rewritten)
assert mock_type_event_rewrite.mock_calls == [call(sentinel.resource, {'type': 'event'})]
def test_metrics_rewrite_simple():
clause = {
"type": "metrics",
"name": "CPUUtilization",
"days": 4,
"period": 86400,
"value": 30,
"op": "less-than",
}
expected = (
'resource.get_metrics('
'{"MetricName": "CPUUtilization", "Statistic": "Average", '
'"StartTime": now - duration("4d"), "EndTime": now, "Period": duration("1d")})'
'.exists(m, m < 30)'
)
assert C7N_Rewriter.type_metrics_rewrite(sentinel.resource, clause) == expected
def test_metrics_rewrite_missing_value():
clause = {
"type": "metrics",
"name": "RequestCount",
"statistics": "Sum",
"days": 7,
"value": 7,
"op": "less-than",
"missing-value": 0,
}
expected = (
'resource.get_metrics('
'{"MetricName": "RequestCount", "Statistic": "Sum", '
'"StartTime": now - duration("7d"), "EndTime": now, "Period": duration("7d")})'
'.map(m, m == null ? 0 : m)'
'.exists(m, m < 7)'
)
assert C7N_Rewriter.type_metrics_rewrite(sentinel.resource, clause) == expected
def test_age_rewrite():
clause = {"days": 21, "op": "gt", "type": "age"}
expected = (
'now - timestamp(resource.StartTime) > duration("21d")'
)
assert C7N_Rewriter.type_age_rewrite("ebs-snapshot", clause) == expected
def test_security_group_rewrite():
clause_0 = {
"key": "GroupId", "op": "in", "type": "security-group",
"value": ["sg-12345678", "sg-23456789", "sg-34567890"]
}
expected = 'resource.SecurityGroups.map(sg, sg.GroupId.security_group()).exists(sg, [\'sg-12345678\', \'sg-23456789\', \'sg-34567890\'].contains(sg["GroupId"]))'
assert C7N_Rewriter.type_security_group_rewrite("ec2", clause_0) == expected
clause_1 = {
"key": "GroupName", "op": "regex", "type": "security-group",
"value": "^Enterprise-AllInstances-SG.*$"}
expected = 'resource.SecurityGroups.map(sg, sg.GroupId.security_group()).exists(sg, sg["GroupName"].matches(\'^Enterprise-AllInstances-SG.*$\'))'
assert C7N_Rewriter.type_security_group_rewrite("ec2", clause_1) == expected
clause_2 = {"key": "tag:ASSET", "op": "eq", "type": "security-group", "value": "SPECIALASSETNAME"}
expected = 'resource.SecurityGroups.map(sg, sg.GroupId.security_group()).exists(sg, sg["Tags"].filter(x, x["Key"] == "ASSET")[0]["Value"] == \'SPECIALASSETNAME\')'
assert C7N_Rewriter.type_security_group_rewrite("ec2", clause_2) == expected
def test_subnet_rewrite():
clause_0 = {
"key": "SubnetId", "op": "in", "type": "subnet-group",
"value_from": {"format": "txt", "url": "s3://path-to-resource/subnets.txt"},
"value_type": "normalize",
}
expected = 'value_from("s3://path-to-resource/subnets.txt", "txt").map(v, normalize(v)).contains(resource.SubnetId.subnet().SubnetID)'
assert C7N_Rewriter.type_subnet_rewrite("asg", clause_0) == expected
def test_flow_logs_rewrite():
clause_0 = {
"enabled": False, "type": "flow-logs",
}
expected = 'size(resource.flow_logs()) == 0'
assert C7N_Rewriter.type_flow_log_rewrite("vpc", clause_0) == expected
clause_1 = {
"enabled": "true", "type": "flow-logs", "destination-type": "s3",
}
expected = 'size(resource.flow_logs()) != 0 && (resource.flow_logs().exists(x, x.LogDestinationType == "s3"))'
assert C7N_Rewriter.type_flow_log_rewrite("vpc", clause_1) == expected
clause_2 = {'type': 'flow-logs', 'enabled': True,
'set-op': 'or', 'op': 'equal', 'traffic-type': 'all', 'status': 'active',
'log-group': 'vpc-logs'}
expected = 'size(resource.flow_logs()) != 0 && (resource.flow_logs().exists(x, x.LogGroupName == "vpc-logs") || resource.flow_logs().exists(x, x.TrafficType == "ALL") || resource.flow_logs().exists(x, x.FlowLogStatus == "active"))'
assert C7N_Rewriter.type_flow_log_rewrite("vpc", clause_2) == expected
clause_3 = {'type': 'flow-logs', 'enabled': True,
"log-format": "this", "destination": "that", "deliver-status": "the-other-thing"}
expected = 'size(resource.flow_logs()) != 0 && (resource.flow_logs().exists(x, x.LogFormat == "this") || resource.flow_logs().exists(x, x.LogDestination == "that") || resource.flow_logs().exists(x, x.DeliverLogsStatus == "the-other-thing"))'
assert C7N_Rewriter.type_flow_log_rewrite("vpc", clause_3) == expected
def test_tag_count_rewrite():
clause_0 = {
"type": "tag-count", "op": "gte", "count": 8
}
expected = 'size(resource["Tags"].filter(x, ! matches(x.Key, "^aws:.*"))) >= 8'
assert C7N_Rewriter.type_tag_count_rewrite("elb", clause_0) == expected
clause_1 = {
"type": "tag-count", "op": "gte", "count": 8
}
expected = 'size(resource["Tags"].filter(x, ! matches(x.Key, "^aws:.*"))) >= 8'
assert C7N_Rewriter.type_tag_count_rewrite("elb", clause_1) == expected
def test_type_vpc_rewrite():
clause_0 = {
"key": "VpcId", "op": "not-in", "type": "vpc",
"value_from": {
"url": "s3://c7n-resources/some_list.json",
"format": "json",
"expr": 'not_null(offhours_exceptions."{account_id}".account, "[]")'.format(account_id="123456789012")
}
}
expected = '! value_from("s3://c7n-resources/some_list.json", "json").jmes_path(\'not_null(offhours_exceptions."123456789012".account, \"[]\")\').contains(resource.VPCId)'
assert C7N_Rewriter.type_vpc_rewrite("elb", clause_0) == expected
clause_1 = {
"key": "VpcId", "op": "not-equal", "type": "vpc", "value": "vpc-12ab34de"
}
expected = 'resource.VPCId != "vpc-12ab34de"'
assert C7N_Rewriter.type_vpc_rewrite("elb", clause_1) == expected
def test_type_credential_rewrite():
clause_0 = {
"key": "access_keys.last_rotated",
"op": "gte",
"type": "credential",
"value": 55,
"value_type": "age"
}
expected = 'now - duration("55d") >= timestamp(resource.credentials().access_keys.last_rotated)'
assert C7N_Rewriter.type_credential_rewrite("elb", clause_0) == expected
def test_type_kms_alias_rewrite():
clause_0 = {
"key": "AliasName", "op": "regex", "type": "kms-alias", "value": "^(alias/aws/)"
}
expected = 'resource.kms_alias().AliasName.matches("^(alias/aws/)")'
assert C7N_Rewriter.type_kms_alias_rewrite("elb", clause_0) == expected
def test_type_kms_key_rewrite():
clause_0 = {
"key": "c7n:AliasName", "op": "regex", "type": "kms-key",
"value": "^(alias/enterprise/sns/encrypted)"
}
expected = 'resource.KmsKeyId.kms_key()["Aliases"][0]["AliasName"].matches("^(alias/enterprise/sns/encrypted)")'
assert C7N_Rewriter.type_kms_key_rewrite("efs", clause_0) == expected
clause_1 = {
"key": "AliasName", "op": "regex", "type": "kms-key", "value": "^(alias/aws/)"
}
expected = 'resource.KmsKeyId.kms_key()["AliasName"].matches("^(alias/aws/)")'
assert C7N_Rewriter.type_kms_key_rewrite("efs", clause_1) == expected
def test_onhour_rewrite():
clause_0 = {
"default_tz": "et",
"onhour": 7,
"opt-out": True,
"type": "onhour"
}
expected_0 = 'resource.Tags.exists(x, x.key=="maid_offhours") ? false : (now.getDayOfWeek("et") in [0, 1, 2, 3, 4] && now.getHours("et") == 7)'
assert C7N_Rewriter.onhour_rewrite("efs", clause_0) == expected_0
clause_1 = {
"default_tz": "et",
"onhour": 7,
"skip-days": ['2019-11-11', '2019-11-28', '2019-12-25', '2020-01-01'],
"tag": "custodian_downtime",
"type": "onhour"
}
expected_1 = '! getDate(now) in ["2019-11-11", "2019-11-28", "2019-12-25", "2020-01-01"].map(d, getDate(timestamp(d))) && resource.Tags.exists(x, x.key=="custodian_downtime") ? resource.Tags.key("custodian_downtime").resource_schedule().on.exists(s, now.getDayOfWeek(s.tz) in s.days && now.getHours(s.tz) == s.hour) || (now.getDayOfWeek("et") in [0, 1, 2, 3, 4] && now.getHours("et") == 7) : false'
assert C7N_Rewriter.onhour_rewrite("efs", clause_1) == expected_1
def test_offhour_rewrite():
clause_2 = {
"type": "offhour", "weekends": False, "default_tz": "pt",
"tag": "datetime", "opt-out": True, "offhour": 20
}
expected_2 = 'resource.Tags.exists(x, x.key=="datetime") ? false : (now.getDayOfWeek("pt") in [0, 1, 2, 3, 4, 5, 6] && now.getHours("pt") == 20)'
assert C7N_Rewriter.offhour_rewrite("efs", clause_2) == expected_2
def test_cross_account_rewrite():
clause_0 = {
"type": "cross-account",
}
expected_0 = 'size(resource.map(r, r["VaultName"])["policy"]["Policy"])) > 0'
assert C7N_Rewriter.cross_account_rewrite("glacier", clause_0) == expected_0
clause_1 = {
"type": "cross-account",
"whitelist": ["permitted-account-01", "permitted-account-02"]
}
expected_1 = 'size(resource.map(r, r["VaultName"])["policy"]["Policy"]).filter(acct, ! acct in ["permitted-account-01", "permitted-account-02"])) > 0'
assert C7N_Rewriter.cross_account_rewrite("glacier", clause_1) == expected_1
clause_2 = {
"type": "cross-account",
"whitelist_from": {
"expr": "accounts.*.accountNumber",
"url": "http://server/path/to/data.json"
}
}
expected_2 = 'size(resource.map(r, r["VaultName"])["policy"]["Policy"]).filter(acct, ! acct in json_from("http://server/path/to/data.json", "json").jmes_path("accounts.*.accountNumber"))) > 0'
assert C7N_Rewriter.cross_account_rewrite("glacier", clause_2) == expected_2
clause_3 = {
"type": "cross-account",
"whitelist_from": {
"expr": "accounts.*.account",
"url": "http://server/path/to/data.json"
},
"whitelist_orgids": ["o-rhymjmbbe"]
}
expected_3 = 'size(resource.map(r, r["VaultName"])["policy"]["Policy"]).filter(acct, ! acct in json_from("http://server/path/to/data.json", "json").jmes_path("accounts.*.account")).filter(p, ! p.attr in ["o-rhymjmbbe"])) > 0'
assert C7N_Rewriter.cross_account_rewrite("glacier", clause_3) == expected_3
def test_used_rewrite():
clause_0 = {"type": "used"}
expected_0 = 'resource["LaunchConfigurationName"] in all_launch_configuration_names()'
assert C7N_Rewriter.used_rewrite("asg", clause_0) == expected_0
clause_1 = "used"
assert C7N_Rewriter.primitive("asg", clause_1) == expected_0
def test_unused_rewrite():
clause_0 = {"type": "unused"}
expected_0 = '! resource["LaunchConfigurationName"] in all_launch_configuration_names()'
assert C7N_Rewriter.unused_rewrite("asg", clause_0) == expected_0
clause_1 = "unused"
assert C7N_Rewriter.primitive("asg", clause_1) == expected_0
def test_is_logging_rewrite():
clause_0 = {"type": "is-logging"}
expected_0 = 'resource.get_access_log().exists(a, a["Enabled"])'
assert C7N_Rewriter.is_logging_rewrite("elb", clause_0) == expected_0
clause_1 = "is-logging"
assert C7N_Rewriter.primitive("elb", clause_1) == expected_0
clause_2 = {"type": "is-logging"}
expected_2 = 'resource.get_load_balancer().get("access_logs.s3.enabled")'
assert C7N_Rewriter.is_logging_rewrite("app-elb", clause_2) == expected_2
with raises(ValueError):
C7N_Rewriter.is_logging_rewrite("nope", clause_2)
def test_is_not_logging_rewrite():
clause_0 = {"type": "is-not-logging"}
expected_0 = '! resource.get_access_log().exists(a, a["Enabled"])'
assert C7N_Rewriter.is_not_logging_rewrite("elb", clause_0) == expected_0
clause_1 = "is-not-logging"
assert C7N_Rewriter.primitive("elb", clause_1) == expected_0
def test_health_event_rewrite():
clause_0 = {"type": "health-event", "statuses": ["upcoming", "open"]}
expected_0 = 'size(resource.get_health_events(["upcoming", "open"])) > 0'
assert C7N_Rewriter.health_event_rewrite("directory", clause_0) == expected_0
clause_1 = "health-event"
assert C7N_Rewriter.primitive("directory", clause_1) == expected_0
def test_shield_enabled_rewrite():
clause_0 = {"type": "shield-enabled", "state": False}
expected_0 = '! resource.shield_protection()'
assert C7N_Rewriter.shield_enabled_rewrite("elb", clause_0) == expected_0
expected_1 = '! resource.shield_subscription()'
assert C7N_Rewriter.primitive("account", clause_0) == expected_1
def test_waf_enabled_rewrite():
clause_0 = {"type": "waf-enabled", "state": False, "web-acl": "WebACL to allow or restrict by IP"}
expected_0 = '! resource.web_acls().contains("WebACL to allow or restrict by IP")'
assert C7N_Rewriter.waf_enabled_rewrite("distribution", clause_0) == expected_0
def test_network_location_rewrite():
clause_0 = {
'compare': ['resource', 'security-group'],
'ignore': [
{'Description': 'New VPC Enterprise All Instances SG 2016'},
{'Description': 'Enterprise All Instances Security Group'},
{'Description': 'CoreServicesAccess-SG'},
{'tag:Asset': 'SomeAssetTag'}],
'key': 'tag:Asset',
'max-cardinality': 1,
'missing-ok': False,
'type': 'network-location'
}
expected_0 = (
'! (["New VPC Enterprise All Instances SG 2016", "Enterprise All Instances Security Group", "CoreServicesAccess-SG"].contains(resource.Description) || ["SomeAssetTag"].contains(resource.Tags["Asset"])) '
'&& (resource.SecurityGroupId.security_group().Tags["Asset"] == resource.Tags["Asset"]) '
'&& (size(resource.SecurityGroupId.security_group()) == 1)'
)
assert C7N_Rewriter.network_location_rewrite("ec2", clause_0) == expected_0
clause_1 = {
'compare': ['resource', 'subnet'],
'key': 'tag:Asset',
'max-cardinality': 1,
'missing-ok': False,
'type': 'network-location'
}
expected_1 = (
'(resource.SubnetId.subnet().Tags["Asset"] == resource.Tags["Asset"]) '
'&& (size(resource.SubnetId.subnet()) == 1)'
)
assert C7N_Rewriter.network_location_rewrite("ec2", clause_1) == expected_1
| python | Apache-2.0 | 3a134c10394058c73a6bbe0e4ca7e862ea9707b3 | 2026-01-05T07:13:01.631050Z | false |
cloud-custodian/cel-python | https://github.com/cloud-custodian/cel-python/blob/3a134c10394058c73a6bbe0e4ca7e862ea9707b3/tests/test_package.py | tests/test_package.py | # SPDX-Copyright: Copyright (c) Capital One Services, LLC
# SPDX-License-Identifier: Apache-2.0
# Copyright 2020 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
"""
Test celpy package as a whole. Mostly, this means testing the ``__init__.py`` module
that defines the package.
"""
import json
from unittest.mock import Mock, call, sentinel
import pytest
import lark
import celpy
def test_json_to_cel():
"""GIVEN JSON doc; WHEN json_to_cel(); THEN expected conversions applied."""
doc = [
{"bool": True},
{"numbers": [2.71828, 42]},
{"null": None},
{"string": 'embedded "quote"'},
]
actual = celpy.json_to_cel(doc)
expected = celpy.celtypes.ListType(
[
celpy.celtypes.MapType({celpy.celtypes.StringType("bool"): celpy.celtypes.BoolType(True)}),
celpy.celtypes.MapType(
{
celpy.celtypes.StringType("numbers"): celpy.celtypes.ListType(
[celpy.celtypes.DoubleType(2.71828), celpy.celtypes.IntType(42)]
)
}
),
celpy.celtypes.MapType({celpy.celtypes.StringType("null"): None}),
celpy.celtypes.MapType(
{celpy.celtypes.StringType("string"): celpy.celtypes.StringType('embedded "quote"')}
),
]
)
assert actual == expected
def test_json_to_cel_unexpected():
"""GIVEN JSON doc with invalid type; WHEN json_to_cel(); THEN exception raised."""
doc = {"bytes": b"Ynl0ZXM="}
with pytest.raises(ValueError):
actual = celpy.json_to_cel(doc)
def test_encoder():
cel_obj = celpy.celtypes.MapType(
{
celpy.celtypes.StringType("bool"): celpy.celtypes.BoolType(True),
celpy.celtypes.StringType("numbers"):
celpy.celtypes.ListType([
celpy.celtypes.DoubleType(2.71828), celpy.celtypes.UintType(42)
]),
celpy.celtypes.StringType("null"): None,
celpy.celtypes.StringType("string"): celpy.celtypes.StringType('embedded "quote"'),
celpy.celtypes.StringType("bytes"):
celpy.celtypes.BytesType(bytes([0x62, 0x79, 0x74, 0x65, 0x73])),
celpy.celtypes.StringType("timestamp"): celpy.celtypes.TimestampType('2009-02-13T23:31:30Z'),
celpy.celtypes.StringType("duration"): celpy.celtypes.DurationType('42s'),
}
)
json_text = json.dumps(cel_obj, cls=celpy.CELJSONEncoder)
assert (
json_text == '{"bool": true, "numbers": [2.71828, 42], "null": null, '
'"string": "embedded \\"quote\\"", "bytes": "Ynl0ZXM=", '
'"timestamp": "2009-02-13T23:31:30Z", "duration": "42s"}'
)
def test_encoder_unknown():
cel_obj = sentinel.no_json
with pytest.raises(TypeError):
json_text = json.dumps(cel_obj, cls=celpy.CELJSONEncoder)
def test_decoder():
json_text = (
'{"bool": 1, "numbers": [2.71828, 42], "null": null, '
'"string": "embedded \\"quote\\"", "bytes": "Ynl0ZXM=", '
'"timestamp": "2009-02-13T23:31:30Z", "duration": "42s"}'
)
cel_obj = json.loads(json_text, cls=celpy.CELJSONDecoder)
assert cel_obj == celpy.celtypes.MapType({
celpy.celtypes.StringType('bool'): celpy.celtypes.IntType(1),
celpy.celtypes.StringType('bytes'): celpy.celtypes.StringType('Ynl0ZXM='),
celpy.celtypes.StringType('duration'): celpy.celtypes.StringType('42s'),
celpy.celtypes.StringType('null'): None,
celpy.celtypes.StringType('numbers'):
celpy.celtypes.ListType([celpy.celtypes.DoubleType(2.71828), celpy.celtypes.IntType(42)]),
celpy.celtypes.StringType('string'): celpy.celtypes.StringType('embedded "quote"'),
celpy.celtypes.StringType('timestamp'): celpy.celtypes.StringType('2009-02-13T23:31:30Z'),
})
@pytest.fixture
def mock_environment(monkeypatch):
environment = Mock(
package=sentinel.Package,
annotations={},
)
return environment
def test_interp_runner(mock_environment):
"""
GIVEN Environment and AST and mocked Evaluator
WHEN InterpretedRunner created and evaluated
THEN Runner uses Environment, AST, and the mocked Evaluator
"""
def a_function():
return None
functions = [a_function]
ast = Mock(spec=lark.Tree, children=[lark.Token(type_="BOOL_LIT", value="true"),], data="literal")
r = celpy.InterpretedRunner(mock_environment, ast, functions)
result = r.evaluate({"variable": sentinel.variable})
assert result == celpy.celtypes.BoolType(True)
@pytest.fixture
def mock_ast():
# Reset the ClassVar CEL_PARSER.
celpy.CELParser.CEL_PARSER = None
parser = celpy.CELParser(tree_class=celpy.evaluation.TranspilerTree)
source = "true"
tree = parser.parse(source)
return tree
def test_compiled_runner(mock_environment, mock_ast):
"""
GIVEN Environment and AST and mocked Evaluator
WHEN InterpretedRunner created and evaluated
THEN Runner uses Environment, AST, and the mocked Evaluator
"""
def a_function():
return None
functions = [a_function]
r = celpy.CompiledRunner(mock_environment, mock_ast, functions)
assert r.tp.source_text.strip() == "CEL = celpy.evaluation.result(base_activation, lambda activation: celpy.celtypes.BoolType(True))"
result = r.evaluate({"variable": sentinel.variable})
assert result == celpy.celtypes.BoolType(True)
@pytest.fixture
def mock_parser(monkeypatch):
parser = Mock(parse=Mock(return_value=sentinel.AST))
parser_class = Mock(return_value=parser)
monkeypatch.setattr(celpy, "CELParser", parser_class)
return parser_class
@pytest.fixture
def mock_runner(monkeypatch):
runner = Mock()
runner_class = Mock(return_value=runner)
monkeypatch.setattr(celpy, "InterpretedRunner", runner_class)
return runner_class
@pytest.fixture
def mock_activation(monkeypatch):
activation = Mock()
activation_class = Mock(return_value=activation)
monkeypatch.setattr(celpy, "Activation", activation_class)
return activation_class
def test_environment(mock_parser, mock_runner, mock_activation):
e = celpy.Environment(sentinel.package, {sentinel.variable: celpy.celtypes.UintType})
ast = e.compile(sentinel.Source)
assert ast == sentinel.AST
assert mock_parser.return_value.parse.mock_calls == [call(sentinel.Source)]
pgm = e.program(ast, functions=[sentinel.Function])
assert pgm == mock_runner.return_value
assert mock_runner.mock_calls == [call(e, sentinel.AST, [sentinel.Function])]
assert e.annotations[sentinel.variable] == celpy.celtypes.UintType
# OLD DESIGN
# act = e.activation()
# assert act == mock_activation.return_value
# expected = {
# sentinel.variable: celtypes.UintType,
# }
# TESTS Activation, doesn't really belong here
# expected.update(celpy.googleapis)
# assert mock_activation.mock_calls == [
# call(
# annotations=expected,
# package=sentinel.package
# )
# ]
| python | Apache-2.0 | 3a134c10394058c73a6bbe0e4ca7e862ea9707b3 | 2026-01-05T07:13:01.631050Z | false |
cloud-custodian/cel-python | https://github.com/cloud-custodian/cel-python/blob/3a134c10394058c73a6bbe0e4ca7e862ea9707b3/benches/complex_expression.py | benches/complex_expression.py | """
https://github.com/cloud-custodian/cel-python/issues/68
Performance of the given expression is perfectly awful.
What can be done to make it better?
"""
import timeit
import cProfile
import pstats
from textwrap import dedent
import celpy
import celpy.celtypes
CEL_EXPRESSION_ORIGINAL = """
(
(
!has(class_a.property_a) ?
false : ("Linux" == class_a.property_a)
) &&
(
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("os:/o:centos:centos:")
) ||
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("x-os:/o:centos:centos:")
) ||
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("os:/a:centos:centos:")
) ||
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("x-os:/a:centos:centos:")
) ||
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("p-os:/a:centos:centos:")
)
)
) ?
optional.of("Linux Team") :
(
(
(
!has(class_a.property_a) ?
false : ("Linux" == class_a.property_a)
) &&
(
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("os:/o:debian:debian_linux:")
) ||
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("x-os:/o:debian:debian_linux:")
) ||
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("os:/a:debian:debian:")
) ||
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("x-os:/a:debian:debian:")
) ||
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("p-os:/a:debian:debian_linux:")
)
)
) ?
optional.of("Linux Team") :
(
(
(
!has(class_a.property_a) ?
false : ("Linux" == class_a.property_a)
) &&
(
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("os:/o:fedoraproject:fedora:")
) ||
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("x-os:/o:fedoraproject:fedora:")
) ||
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("os:/a:fedoraproject:fedora:")
) ||
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("x-os:/a:fedoraproject:fedora:")
) ||
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("p-os:/a:fedoraproject:fedora:")
)
)
) ?
optional.of("Linux Team") :
(
(
(
!has(class_a.property_a) ?
false : ("Linux" == class_a.property_a)
) &&
(
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("os:/o:oracle:linux:")
) ||
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("x-os:/o:oracle:linux:")
) ||
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("os:/a:oracle:linux:")
) ||
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("x-os:/a:oracle:linux:")
) ||
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("p-os:/a:oracle:linux:")
)
)
) ?
optional.of("Linux Team") :
(
(
(
!has(class_a.property_a) ?
false : ("Linux" == class_a.property_a)
) &&
(
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("os:/o:redhat:enterprise_linux:")
) ||
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("x-os:/o:redhat:enterprise_linux:")
) ||
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("os:/a:redhat:enterprise_linux:")
) ||
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("x-os:/a:redhat:enterprise_linux:")
) ||
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("p-os:/a:redhat:enterprise_linux:")
)
)
) ?
optional.of("Linux Team") :
(
(
(
!has(class_a.property_a) ?
false : ("Linux" == class_a.property_a)
) && (
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("os:/o:novell:suse_linux:")
) ||
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("x-os:/o:novell:suse_linux:")
) ||
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("os:/a:novell:suse_linux:")
) ||
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("x-os:/a:novell:suse_linux:")
) ||
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("p-os:/a:novell:suse_linux:")
)
)
) ?
optional.of("Linux Team") :
(
(
(
!has(class_a.property_a) ?
false : ("Linux" == class_a.property_a)
) &&
(
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("os:/o:canonical:ubuntu_linux:")
) ||
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("x-os:/o:canonical:ubuntu_linux:")
) ||
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("os:/a:canonical:ubuntu_linux:")
) ||
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("x-os:/a:canonical:ubuntu_linux:")
) ||
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("p-os:/a:canonical:ubuntu_linux:")
)
)
) ?
optional.of("Linux Team") :
(
(
(
!has(class_a.property_a) ?
false : ("Linux" == class_a.property_a)
) &&
(
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("os:/h:")
) ||
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("x-os:/h:")
)
)
) ?
optional.of("Linux Team") :
(
(
(
!has(class_a.property_a) ?
false : ("Windows Server" == class_a.property_a)
) &&
(
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("os:/o:microsoft:")
) ||
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("x-os:/o:microsoft:")
) ||
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("os:/a:microsoft:")
) ||
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("x-os:/a:microsoft:")
)
)
) ?
optional.of("Windows Team") :
(
(
(
!has(class_a.property_a) ?
false : ("Windows Server" == class_a.property_a)
) &&
(
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("os:/h:")
) ||
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("x-os:/h:")
)
)
) ?
optional.of("Windows Team") :
(
(
(
!has(class_a.property_a) ?
false : ("Linux" == class_a.property_a)
) ||
(
!has(class_a.property_a) ?
false : ("Windows Server" == class_a.property_a)
)
) ?
optional.of("Unassigned") :
(
(
(
!has(class_a.property_a) ?
false : ("Windows Workstation" == class_a.property_a)
) &&
(
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("os:/o:microsoft:")
) ||
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("x-os:/o:microsoft:")
) ||
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("os:/a:microsoft:")
) ||
(
!has(class_b.property_b) ?
false : class_b.property_b.contains("x-os:/a:microsoft:")
)
)
) ?
optional.of("Solutions Team") :
(
(
!has(class_a.property_a) ?
false : ("Windows Workstation" == class_a.property_a)
) ?
optional.of("End User") : optional.of("Unknown")))))))))))))
"""
CEL_EXPRESSION_SHORT = """
"bla bla"
"""
CEL_EXPRESSION_MEDIUM = """
(!has(class_b.integration_info.type) ?
false:("some value" == class_b.integration_info.type)) ?
optional.of("some value") : optional.of("some other value")
"""
CEL_EXPRESSION_ORIGINAL_NO_OPTIONAL = """
((!has(class_a.property_a) ? false : ("Linux" == class_a.property_a)) && ((!has(class_b.property_b) ? false : class_b.property_b.contains("os:/o:centos:centos:")) || (!has(class_b.property_b) ? false : class_b.property_b.contains("x-os:/o:centos:centos:")) || (!has(class_b.property_b) ? false : class_b.property_b.contains("os:/a:centos:centos:")) || (!has(class_b.property_b) ? false : class_b.property_b.contains("x-os:/a:centos:centos:")) || (!has(class_b.property_b) ? false : class_b.property_b.contains("p-os:/a:centos:centos:")))) ? "Linux Team" : (((!has(class_a.property_a) ? false : ("Linux" == class_a.property_a)) && ((!has(class_b.property_b) ? false : class_b.property_b.contains("os:/o:debian:debian_linux:")) || (!has(class_b.property_b) ? false : class_b.property_b.contains("x-os:/o:debian:debian_linux:")) || (!has(class_b.property_b) ? false : class_b.property_b.contains("os:/a:debian:debian:")) || (!has(class_b.property_b) ? false : class_b.property_b.contains("x-os:/a:debian:debian:")) || (!has(class_b.property_b) ? false : class_b.property_b.contains("p-os:/a:debian:debian_linux:")))) ? "Linux Team" : (((!has(class_a.property_a) ? false : ("Linux" == class_a.property_a)) && ((!has(class_b.property_b) ? false : class_b.property_b.contains("os:/o:fedoraproject:fedora:")) || (!has(class_b.property_b) ? false : class_b.property_b.contains("x-os:/o:fedoraproject:fedora:")) || (!has(class_b.property_b) ? false : class_b.property_b.contains("os:/a:fedoraproject:fedora:")) || (!has(class_b.property_b) ? false : class_b.property_b.contains("x-os:/a:fedoraproject:fedora:")) || (!has(class_b.property_b) ? false : class_b.property_b.contains("p-os:/a:fedoraproject:fedora:")))) ? "Linux Team" : (((!has(class_a.property_a) ? false : ("Linux" == class_a.property_a)) && ((!has(class_b.property_b) ? false : class_b.property_b.contains("os:/o:oracle:linux:")) || (!has(class_b.property_b) ? false : class_b.property_b.contains("x-os:/o:oracle:linux:")) || (!has(class_b.property_b) ? false : class_b.property_b.contains("os:/a:oracle:linux:")) || (!has(class_b.property_b) ? false : class_b.property_b.contains("x-os:/a:oracle:linux:")) || (!has(class_b.property_b) ? false : class_b.property_b.contains("p-os:/a:oracle:linux:")))) ? "Linux Team" : (((!has(class_a.property_a) ? false : ("Linux" == class_a.property_a)) && ((!has(class_b.property_b) ? false : class_b.property_b.contains("os:/o:redhat:enterprise_linux:")) || (!has(class_b.property_b) ? false : class_b.property_b.contains("x-os:/o:redhat:enterprise_linux:")) || (!has(class_b.property_b) ? false : class_b.property_b.contains("os:/a:redhat:enterprise_linux:")) || (!has(class_b.property_b) ? false : class_b.property_b.contains("x-os:/a:redhat:enterprise_linux:")) || (!has(class_b.property_b) ? false : class_b.property_b.contains("p-os:/a:redhat:enterprise_linux:")))) ? "Linux Team" : (((!has(class_a.property_a) ? false : ("Linux" == class_a.property_a)) && ((!has(class_b.property_b) ? false : class_b.property_b.contains("os:/o:novell:suse_linux:")) || (!has(class_b.property_b) ? false : class_b.property_b.contains("x-os:/o:novell:suse_linux:")) || (!has(class_b.property_b) ? false : class_b.property_b.contains("os:/a:novell:suse_linux:")) || (!has(class_b.property_b) ? false : class_b.property_b.contains("x-os:/a:novell:suse_linux:")) || (!has(class_b.property_b) ? false : class_b.property_b.contains("p-os:/a:novell:suse_linux:")))) ? "Linux Team" : (((!has(class_a.property_a) ? false : ("Linux" == class_a.property_a)) && ((!has(class_b.property_b) ? false : class_b.property_b.contains("os:/o:canonical:ubuntu_linux:")) || (!has(class_b.property_b) ? false : class_b.property_b.contains("x-os:/o:canonical:ubuntu_linux:")) || (!has(class_b.property_b) ? false : class_b.property_b.contains("os:/a:canonical:ubuntu_linux:")) || (!has(class_b.property_b) ? false : class_b.property_b.contains("x-os:/a:canonical:ubuntu_linux:")) || (!has(class_b.property_b) ? false : class_b.property_b.contains("p-os:/a:canonical:ubuntu_linux:")))) ? "Linux Team" : (((!has(class_a.property_a) ? false : ("Linux" == class_a.property_a)) && ((!has(class_b.property_b) ? false : class_b.property_b.contains("os:/h:")) || (!has(class_b.property_b) ? false : class_b.property_b.contains("x-os:/h:")))) ? "Linux Team" : (((!has(class_a.property_a) ? false : ("Windows Server" == class_a.property_a)) && ((!has(class_b.property_b) ? false : class_b.property_b.contains("os:/o:microsoft:")) || (!has(class_b.property_b) ? false : class_b.property_b.contains("x-os:/o:microsoft:")) || (!has(class_b.property_b) ? false : class_b.property_b.contains("os:/a:microsoft:")) || (!has(class_b.property_b) ? false : class_b.property_b.contains("x-os:/a:microsoft:")))) ? "Windows Team" : (((!has(class_a.property_a) ? false : ("Windows Server" == class_a.property_a)) && ((!has(class_b.property_b) ? false : class_b.property_b.contains("os:/h:")) || (!has(class_b.property_b) ? false : class_b.property_b.contains("x-os:/h:")))) ? "Windows Team" : (((!has(class_a.property_a) ? false : ("Linux" == class_a.property_a)) || (!has(class_a.property_a) ? false : ("Windows Server" == class_a.property_a))) ? "Unassigned" : (((!has(class_a.property_a) ? false : ("Windows Workstation" == class_a.property_a)) && ((!has(class_b.property_b) ? false : class_b.property_b.contains("os:/o:microsoft:")) || (!has(class_b.property_b) ? false : class_b.property_b.contains("x-os:/o:microsoft:")) || (!has(class_b.property_b) ? false : class_b.property_b.contains("os:/a:microsoft:")) || (!has(class_b.property_b) ? false : class_b.property_b.contains("x-os:/a:microsoft:")))) ? "Solutions Team" : ((!has(class_a.property_a) ? false : ("Windows Workstation" == class_a.property_a)) ? "End User" : "unknown"))))))))))))
"""
functions = {
"of": lambda optional, value: value,
"none": lambda optional, : None
}
def simple_performance(runner_class: type[celpy.Runner] | None = None) -> None:
env = celpy.Environment(runner_class=runner_class)
number = 100
compile = timeit.timeit(
stmt=dedent("""\
env.compile(CEL_EXPRESSION_ORIGINAL_NO_OPTIONAL)
"""),
globals={
'env': env,
'CEL_EXPRESSION_ORIGINAL_NO_OPTIONAL': CEL_EXPRESSION_ORIGINAL_NO_OPTIONAL
},
number=number
)
print(f"Compile: {1_000 * compile / number:9.4f} ms")
ast = env.compile(CEL_EXPRESSION_ORIGINAL_NO_OPTIONAL)
number = 1_000
prepare = timeit.timeit(
stmt=dedent("""\
env.program(ast,functions=functions)
"""),
globals={
'env': env,
'ast': ast,
'functions': functions
},
number=number
)
print(f"Prepare: {1_000 * prepare / number:9.4f} ms")
program = env.program(ast, functions=functions)
number = 1_000
convert = timeit.timeit(
stmt=dedent("""
{
"class_a": celpy.json_to_cel({"property_a": "something"}),
"class_b": celpy.json_to_cel(
{"title": "something else", "property_b": "some var",
"integration_info": {"type": "GitHub"}}),
"optional": celpy.json_to_cel({})
}
"""),
globals={'celpy': celpy},
number=number
)
print(f"Convert: {1_000 * convert / number:9.4f} ms")
cel_context = {
"class_a": celpy.json_to_cel({"property_a": "something"}),
"class_b": celpy.json_to_cel(
{"title": "something else", "property_b": "some var",
"integration_info": {"type": "GitHub"}}),
"optional": celpy.json_to_cel({})
}
number = 100
evaluation = timeit.timeit(
stmt=dedent("""
program.evaluate(cel_context)
"""),
globals = {
'program': program,
'cel_context': cel_context
},
number=number
)
print(f"Evaluate: {1_000 * evaluation / number:9.4f} ms")
print()
def process(program: celpy.CompiledRunner, number: int = 100):
"""A processing loop that prepares data and evaluates the CEL program."""
for i in range(number):
cel_context = {
"class_a": celpy.json_to_cel({"property_a":"something"}),
"class_b": celpy.json_to_cel({"title":"something else","property_b":"some var","integration_info":{"type":"GitHub"}}),
"optional": celpy.json_to_cel({})
}
result = program.evaluate(cel_context)
assert result == "unknown"
def detailed_profile():
env = celpy.Environment()
ast = env.compile(CEL_EXPRESSION_ORIGINAL_NO_OPTIONAL)
program = env.program(ast, functions=functions)
pr = cProfile.Profile()
pr.enable()
process(program)
pr.disable()
ps = pstats.Stats(pr).sort_stats(pstats.SortKey.TIME)
ps.print_stats()
def main():
print("# Performance")
print()
print("## Interpreter")
print()
simple_performance(celpy.InterpretedRunner)
print()
print("## Transpiler")
print()
simple_performance(celpy.CompiledRunner)
print()
print("# Profile")
print()
detailed_profile()
if __name__ == "__main__":
main()
| python | Apache-2.0 | 3a134c10394058c73a6bbe0e4ca7e862ea9707b3 | 2026-01-05T07:13:01.631050Z | false |
cloud-custodian/cel-python | https://github.com/cloud-custodian/cel-python/blob/3a134c10394058c73a6bbe0e4ca7e862ea9707b3/benches/large_resource_set.py | benches/large_resource_set.py | # SPDX-Copyright: Copyright (c) Capital One Services, LLC
# SPDX-License-Identifier: Apache-2.0
# Copyright 2020 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
"""
Evaluate filters of varying complexity on large sets of resources.
See https://github.com/cloud-custodian/cel-python/issues/7
given large cardinality resource sets, it would be good to evaluate the performance of
cel-python against ~1000-~10000 and look for profile based optimization opportunities
as well to have a general sense of the relative performance.
the type casting me a bit concerned that we may end
Each :py:class:`Benchmark` subclass combines several elements.
- The "example" which is either a C7N PolicyCase or a CEL FilterCase
- The "resources" which is a generator to build synthetic resource instances
or a Query to read real cloud resources, or a loader to read a file of recorded cloud resources.
- A "text_from" function which will implement all "text_from" and "value_from" queries.
This can read S3 values, manage a cache, read HTTPS values, or simulate these reads
by reading from local fileystsem values or creating synthetic values.
This can be used for a variety of things.
1. Convert a Policy filter to CEL. Create a :py:class:`Benchmark` class definition.
Use the ``--cel`` option to see the resulting expression.
In some cases, the naive version must be rewritten to improve performance or resiliency
in the face of bad data.
2. Test a CEL expression against actual and synthetic data.
Use the various logging and error limit options to see what is going on.
3. Collect performance profile information for the CEL evaluator.
Use the ``--profile`` option to see where in :py:mod:`celpy` time is being spent.
.. todo:: Cleanup profile output.
Remove all functions called once, they tend to be uninfornative.
Remove all functions with a cumulative time < 0.001 sec.
Remove all calls to functions outside ``celpy``.
"""
import argparse
import collections
import cProfile
import logging
import pstats
import random
import statistics
import sys
import textwrap
import time
from typing import (Any, Callable, Counter, Dict, Iterable, List, Optional,
Union)
import yaml
import celpy
import celpy.c7nlib
import celpy.celtypes
from xlate.c7n_to_cel import C7N_Rewriter
JSON = Union[Dict[Dict, Any], List[Any], None, bool, str, int, float]
logger = logging.getLogger("Benchmark")
class FilterCase:
"""
A filter expression in CEL.
"""
filter_expr = textwrap.dedent("""
""")
class PolicyCase(FilterCase):
"""
A C7N Policy Document with a ``filters:`` clause
The CEL filter expression is built from the policy using the :mod:`xlate` package.
"""
policy_doc = textwrap.dedent("""
""")
def __init__(self) -> None:
self.filter_expr = C7N_Rewriter.c7n_rewrite(self.policy_doc)
class TagAssetPolicy(PolicyCase):
policy_doc = textwrap.dedent("""
name: enterprise-ec2-cloud-custodian-reserved-role-compliance
resource: ec2
comment: 'Notify janitorial services about ec2 instances when instances use custodian
reserved roles but don''t have Custodian''s ASSET Tag.
'
actions:
- cc:
- janitorialservices@enterprise.com
cc_from:
expr: accounts."{account_id}".contacts[?role == `custodian`].email
format: json
url: s3://c7n-resources/accounts_aws.json
from: noreply@enterprise.com
subject: '[custodian {{ account }}] reserved role improperly used - {{ region }}'
template: controls-default.html
to:
- resource-owner
- CloudCustodian@enterprise.com
to_from:
expr: accounts."{account_id}".contacts[?role == `custodian-support`].email
format: json
url: s3://c7n-resources/accounts_aws.json
transport:
topic: arn:aws:sns:{region}:123456789012:c7n-notifications
type: sns
type: notify
violation_desc: The following EC2 instance(s) are using a c7n reserved role without
having the c7n ASSET Tag
filters:
- and:
- key: IamInstanceProfile.Arn
op: regex
type: value
value: (.*)(?=Enterprise-Reserved-CloudCustodian.*)
- and:
- key: tag:ASSET
op: ne
type: value
value: CLOUDCUSTODIAN
- key: tag:ASSET
op: ne
type: value
value: CLOUDCORESERVICES
- key: tag:ASSET
type: value
value: present
""")
class Mock_EC2:
"""Generator for synthetic EC2 resources."""
def generate(self, n: Optional[int] = 1000) -> Iterable[JSON]:
for i in range(n):
yield {
"IamInstanceProfile": {
"Arn": random.choice(["prefix-Enterprise-Reserved-CloudCustodian", "other"]),
},
"AmiLaunchIndex": 0,
"ImageId": "ami-0abcdef1234567890",
"InstanceId": "i-1234567890abcdef0",
"InstanceType": "t2.micro",
"KeyName": "MyKeyPair",
"LaunchTime": "2018-05-10T08:05:20.000Z",
"Monitoring": {
"State": "disabled"
},
"Placement": {
"AvailabilityZone": "us-east-2a",
"GroupName": "",
"Tenancy": "default"
},
"PrivateDnsName": "ip-10-0-0-157.us-east-2.compute.internal",
"PrivateIpAddress": "10.0.0.157",
"ProductCodes": [],
"PublicDnsName": "",
"State": {
"Code": 0,
"Name": "pending"
},
"StateTransitionReason": "",
"SubnetId": "subnet-04a636d18e83cfacb",
"VpcId": "vpc-1234567890abcdef0",
"Architecture": "x86_64",
"BlockDeviceMappings": [],
"ClientToken": "",
"EbsOptimized": False,
"Hypervisor": "xen",
"NetworkInterfaces": [
{
"Attachment": {
"AttachTime": "2018-05-10T08:05:20.000Z",
"AttachmentId": "eni-attach-0e325c07e928a0405",
"DeleteOnTermination": True,
"DeviceIndex": 0,
"Status": "attaching"
},
"Description": "",
"Groups": [
{
"GroupName": "MySecurityGroup",
"GroupId": "sg-0598c7d356eba48d7"
}
],
"Ipv6Addresses": [],
"MacAddress": "0a:ab:58:e0:67:e2",
"NetworkInterfaceId": "eni-0c0a29997760baee7",
"OwnerId": "123456789012",
"PrivateDnsName": "ip-10-0-0-157.us-east-2.compute.internal",
"PrivateIpAddress": "10.0.0.157",
"PrivateIpAddresses": [
{
"Primary": True,
"PrivateDnsName": "ip-10-0-0-157.us-east-2.compute.internal",
"PrivateIpAddress": "10.0.0.157"
}
],
"SourceDestCheck": True,
"Status": "in-use",
"SubnetId": "subnet-04a636d18e83cfacb",
"VpcId": "vpc-1234567890abcdef0",
"InterfaceType": "interface"
}
],
"RootDeviceName": "/dev/xvda",
"RootDeviceType": "ebs",
"SecurityGroups": [
{
"GroupName": "MySecurityGroup",
"GroupId": "sg-0598c7d356eba48d7"
}
],
"SourceDestCheck": True,
"StateReason": {
"Code": "pending",
"Message": "pending"
},
"Tags": [
{
"Key": "ASSET",
"Value":
random.choice(["CLOUDCUSTODIAN", "CLOUDCORESERVICES", None, "OTHER"])
},
],
"VirtualizationType": "hvm",
"CpuOptions": {
"CoreCount": 1,
"ThreadsPerCore": 1
},
"CapacityReservationSpecification": {
"CapacityReservationPreference": "open"
},
"MetadataOptions": {
"State": "pending",
"HttpTokens": "optional",
"HttpPutResponseHopLimit": 1,
"HttpEndpoint": "enabled"
}
}
class Benchmark:
"""
Define a benchmark performance test.
If effect, it's this::
GIVEN a FilterCase with CEL (or a PolicyCase with the C7N policy version of the CEL)
AND a collection of resources (either actual or mocked)
AND an implementation of text_from() to fetch value_from: data (either actual or mocked)
WHEN CEL expression evaluated for all resources
THEN we have some benchmark metrics
AND we can have profiling data if that's useful
Each subclass provides the following class-level objects.
The ``example`` value must be an instance of FilterCase or it's subclass PolicyCase.
The ``resources`` must be a generator.
For example::
resources = yaml.load_all("path/to/resources", Loader=yaml.SafeLoader)
Or::
resources = Mock_EC2().generate(n=1000)
The ``text_from`` is an optional Callable that's used to replace the c7nlib
function to provide values for this benchmark performance test.
"""
example: FilterCase
resources: Iterable[JSON]
text_from: Optional[Callable[..., celpy.celtypes.Value]] = None
def run(self, error_limit: Optional[int] = None) -> None:
self.run_times: List[float] = []
self.exception_times: List[float] = []
self.errors: Counter[Exception] = collections.Counter()
self.results: Counter[celpy.celtypes.Value] = collections.Counter()
decls = {"resource": celpy.celtypes.MapType}
decls.update(celpy.c7nlib.DECLARATIONS)
cel_env = celpy.Environment(annotations=decls)
ast = cel_env.compile(self.example.filter_expr)
program = cel_env.program(ast, functions=celpy.c7nlib.FUNCTIONS)
if self.text_from:
celpy.c7nlib.__dict__['text_from'] = self.text_from
overall_start = time.perf_counter()
for resource in self.resources:
start = time.perf_counter()
activation = {
"resource": celpy.json_to_cel(resource)
}
try:
result = program.evaluate(activation)
end = time.perf_counter()
self.run_times.append((end-start)*1000)
self.results[result] += 1
except celpy.CELEvalError as ex:
end = time.perf_counter()
self.exception_times.append((end-start)*1000)
self.errors[repr(ex)] += 1
logger.debug(repr(ex))
logger.debug(resource)
if error_limit:
error_limit -= 1
if error_limit == 0:
raise
overall_end = time.perf_counter()
self.overall_run = (overall_end-overall_start)*1000
self.volume = len(self.run_times) + len(self.exception_times)
def report(self):
print(f"Filter : {self.example.filter_expr}")
print(f"Resources : {self.volume:,d}")
print(f"Total Time: {self.overall_run:,.1f} ms")
print(f"Range : {min(self.run_times):.1f} ms - {max(self.run_times):.1f} ms")
print(f"Mean : {statistics.mean(self.run_times):.2f} ms")
print(f"Median: {statistics.median(self.run_times):.2f} ms")
print()
print("Results")
for result, freq in self.results.most_common():
print(f" {freq:6,d}: {result}")
if self.errors:
print()
print("Exceptions")
for ex, freq in self.errors.most_common():
print(f" {freq:6,d}: {ex}")
def get_options(benchmarks: List[str], argv: List[str] = sys.argv[1:]) -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument(
"--cel", "-c", action='store_true', default=False,
help="Show Cel Expression")
parser.add_argument(
"--debug", "-d", action='store_true', default=False,
help="Show benchmark debugging")
parser.add_argument(
"--error_limit", "-e", action='store', type=int, default=None,
help="Upper bound of number of errors to tolerate, -e1 stops on the first error")
parser.add_argument(
"--profile", "-p", action="store_true", default=False,
help="Collect profiling for all benchmarks"
)
parser.add_argument("benchmarks", nargs="*", choices=benchmarks)
return parser.parse_args(argv)
class TagAssetBenchmark(Benchmark):
"""
This uses a version of the enterprise-ec2-cloud-custodian-reserved-role-compliance policy.
It supplies a pool of 1,000 synthetic EC2 instances.
"""
example = TagAssetPolicy()
resources = Mock_EC2().generate(n=1000)
if __name__ == "__main__":
logging.basicConfig()
defined_benchmarks = [c.__name__ for c in Benchmark.__subclasses__()]
options = get_options(defined_benchmarks)
if options.debug:
logger.setLevel(logging.DEBUG)
if options.profile:
pr = cProfile.Profile()
pr.enable()
for benchmark in options.benchmarks:
b = TagAssetBenchmark()
if options.cel:
print(f"Policy {b.example.policy['name']}")
multiline = '\n&& '.join(b.example.filter_expr.split('&&'))
print(f"{multiline}")
else:
b.run(error_limit=options.error_limit)
b.report()
if options.profile:
pr.disable()
stats = pstats.Stats(pr).strip_dirs()
stats.sort_stats(pstats.SortKey.TIME).print_stats(0.20)
| python | Apache-2.0 | 3a134c10394058c73a6bbe0e4ca7e862ea9707b3 | 2026-01-05T07:13:01.631050Z | false |
cloud-custodian/cel-python | https://github.com/cloud-custodian/cel-python/blob/3a134c10394058c73a6bbe0e4ca7e862ea9707b3/features/environment.py | features/environment.py | """
Environment definition for Behave acceptance test suite.
"""
from functools import partial
import os
from types import SimpleNamespace
from unittest.mock import Mock, patch
import celpy.c7nlib
def mock_text_from(context, url):
"""
Mock for :py:func:`celpy.c7nlib.text_from` that replaces a URL-based request
with a value provided as part of the test context.
"""
return context.value_from_data.get(url)
def before_scenario(context, scenario):
"""
Be sure there's a place to store test scenario files.
Also. Inject an implementation of the low-level :py:func:`celpy.c7nlib.text_from` function
that reads from data provided here.
Check for command-line or environment option to pick the Runner to be used.
Use ``-D runner=interpreted`` or ``compiled``
Or set environment variable ``CEL_RUNNER=interpreted`` or ``compiled``
"""
# context.data used by the CEL conformance test suite converted from textproto.
context.data = {}
context.data['disable_check'] = False
context.data['type_env'] = {} # name: type association
context.data['bindings'] = {} # name: value association
context.data['container'] = "" # If set, can associate a type binding from local proto files.
context.data['json'] = []
RUNNERS = {"interpreted": celpy.InterpretedRunner, "compiled": celpy.CompiledRunner}
try:
context.data['runner'] = RUNNERS[os.environ.get("CEL_RUNNER", "interpreted")]
except KeyError:
print(f"CEL_RUNNER= must be from {RUNNERS.keys()}")
raise
if "runner" in context.config.userdata:
try:
context.data['runner'] = RUNNERS[context.config.userdata["runner"]]
except KeyError:
print(f"-D runner= must be from {RUNNERS.keys()}")
raise
# context.cel used by the integration test suite.
context.cel = {}
# Variables to be provided to CEL
context.cel['activation'] = {
"resource": None,
"now": None,
# "C7N": None, A namespace with the current filter.
}
context.cel['filter'] = Mock(name="mock filter", manager=Mock(config=Mock()))
# A mapping from URL to text usined by :py:func:`mock_text_from`.
context.value_from_data = {}
# Mock used by the integration test suite.
text_from = partial(mock_text_from, context)
text_from.__name__ = "text_from"
context.saved_function = celpy.c7nlib.text_from
celpy.c7nlib.__dict__['text_from'] = text_from
def after_scenario(context, scenario):
"""Remove the injected mock for the `text_from` function."""
celpy.c7nlib.__dict__['text_from'] = context.saved_function
| python | Apache-2.0 | 3a134c10394058c73a6bbe0e4ca7e862ea9707b3 | 2026-01-05T07:13:01.631050Z | false |
cloud-custodian/cel-python | https://github.com/cloud-custodian/cel-python/blob/3a134c10394058c73a6bbe0e4ca7e862ea9707b3/features/steps/c7n_integration.py | features/steps/c7n_integration.py | # SPDX-Copyright: Copyright (c) Capital One Services, LLC
# SPDX-License-Identifier: Apache-2.0
# Copyright 2020 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
"""
C7N Integration Bindings for Behave testing.
These step definitions create C7N-like CEL expressions from the source policy YAML and the evaluate
those CEL expressions with a given document.
This builds the global objects expected in an activation
- ``resource`` is a CEL representation of the cloud resource
- ``now`` is the current time
- ``event`` is the activation event, if needed.
When the CEL is evaluated, the C7NContext manager is used to provide a filter instance.
This also uses the :py:class:`celpy.c7nlib.C7N_Interpreted_Runner` class to provide
access to C7N caches to the c7nlib functions.
"""
import json
from ast import literal_eval
from types import SimpleNamespace
from unittest.mock import Mock
from behave import *
# from dateutil.parser import parse as parse_date
from pendulum import parse as parse_date
import celpy
import celpy.c7nlib
import celpy.celtypes
from xlate.c7n_to_cel import C7N_Rewriter
@given(u'policy text')
def step_impl(context):
context.cel['policy'] = context.text
@given(u'celtypes.TimestampType configured with TZ_ALIASES {alias_dict}')
def step_impl(context, alias_dict):
aliases = literal_eval(alias_dict)
context.cel["TZ_ALIASES"] = aliases
@given(u'resource value {value}')
def step_impl(context, value):
resource = json.loads(value)
context.cel['activation']["resource"] = celpy.json_to_cel(resource)
@given(u'now value {timestamp}')
def step_impl(context, timestamp):
context.cel['activation']["now"] = celpy.celtypes.TimestampType(parse_date(timestamp))
@given(u'event value {value}')
def step_impl(context, value):
resource = json.loads(value)
context.cel['activation']["event"] = celpy.json_to_cel(resource)
@given(u'url {url} has text')
def step_impl(context, url):
context.value_from_data[url] = context.text
def build_mock_resources(context):
"""
Examine a number of GIVEN caches to gather all of the data required to build mocks.
If there are values provided in GIVEN steps.
"""
if context.cel.get('get_instance_image'):
timestamp = context.cel['get_instance_image'].get("CreateDate", "2020-01-18T19:20:21Z")
name = context.cel['get_instance_image'].get("Name", "RHEL-8.0.0_HVM-20190618-x86_64-1-Hourly2-GP2")
instance_image={
"VirtualizationType": "hvm",
"Description": "Provided by Red Hat, Inc.",
"PlatformDetails": "Red Hat Enterprise Linux",
"EnaSupport": True,
"Hypervisor": "xen",
"State": "available",
"SriovNetSupport": "simple",
"ImageId": "ami-1234567890EXAMPLE",
"UsageOperation": "RunInstances:0010",
"BlockDeviceMappings": [
{
"DeviceName": "/dev/sda1",
"Ebs": {
"SnapshotId": "snap-111222333444aaabb",
"DeleteOnTermination": True,
"VolumeType": "gp2",
"VolumeSize": 10,
"Encrypted": False
}
}
],
"Architecture": "x86_64",
"ImageLocation": "123456789012/RHEL-8.0.0_HVM-20190618-x86_64-1-Hourly2-GP2",
"RootDeviceType": "ebs",
"OwnerId": "123456789012",
"RootDeviceName": "/dev/sda1",
"CreationDate": timestamp,
"Public": True,
"ImageType": "machine",
"Name": name
}
context.cel['filter'].get_instance_image = Mock(
return_value=instance_image
)
@given(u'C7N.filter has get_instance_image result with {field} of {value}')
def step_impl(context, field, value):
"""
Assmble the values to build the filter's :py:meth:`get_instance_image` response.
This method returns the relevant image descruption.
"""
# Save the values for later mock object assembly.
# There are two variants: CreateDate and Name
context.cel.setdefault('get_instance_image', {})[field] = value
@given(u'C7N.filter has get_metric_statistics result with {statistics}')
def step_impl(context, statistics):
"""
The C7N filter ``get_metric_statistics()`` response.
Two API's are reflected here an old-style one that may work with current C7N
The preferred one after CELFilter is refactored.
"""
# Current API.
context.cel['filter'].manager.session_factory = Mock(
name="mock filter session_factory()",
return_value=Mock(
name="mock filter session_factory instance",
client=Mock(
name="mock filter session_factory().client()",
return_value=Mock(
name="mock filter client instance",
get_metric_statistics=Mock(
name="mock filter client get_metric_statistics()",
return_value=json.loads(statistics)
)
)
)
)
)
# Preferred API.
context.cel['filter'].get_resource_statistics = Mock(
return_value=json.loads(statistics)["Datapoints"]
)
@given(u'C7N.filter manager has get_model result of {model}')
def step_impl(context, model):
context.cel['filter'].manager.get_model = Mock(
name="mock filter.manager.get_model()",
return_value=Mock(
name="mock filter.manager.model",
dimension=model
)
)
@given(u'C7N.filter manager has config with {name} = {value}')
def step_impl(context, name, value):
setattr(context.cel['filter'].manager.config, name, value)
@given(u'C7N.filter has resource type of {resource_type}')
def step_impl(context, resource_type):
context.cel['filter'].manager.resource_type = resource_type
@given(u'C7N.filter has get_related result with {sg_document}')
def step_impl(context, sg_document):
context.cel['filter'].get_related = Mock(
name="mock filter.get_related()",
return_value=json.loads(sg_document),
)
@given(u'C7N.filter has flow_logs result with {flow_logs}')
def step_impl(context, flow_logs):
context.cel['filter'].manager.session_factory = Mock(
name="mock filter session_factory()",
return_value=Mock(
name="mock filter session_factory instance",
client=Mock(
name="mock filter session_factory().client()",
return_value=Mock(
name="mock filter client instance",
describe_flow_logs=Mock(
name="mock filter client describe_flow_logs()",
return_value={"FlowLogs": json.loads(flow_logs)}
)
)
)
)
)
# Preferred API.
context.cel['filter'].get_flow_logs=Mock(
return_value={"FlowLogs": json.loads(flow_logs)}
)
@given(u'C7N.filter has get_credential_report result with {credential_report}')
def step_impl(context, credential_report):
context.cel['filter'].get_credential_report=Mock(
return_value=json.loads(credential_report)
)
@given(u'C7N.filter has get_matching_aliases result with {alias_detail}')
def step_impl(context, alias_detail):
context.cel['filter'].get_matching_aliases=Mock(
return_value=json.loads(alias_detail)
)
def evaluate(context):
"""
This does not use the :py:class:`celpy.c7nlib.C7NContext`.
Instead, it provides the context and filter as arguments to :meth:`evaluate`.
"""
decls = {
"resource": celpy.celtypes.MapType,
"now": celpy.celtypes.TimestampType,
}
decls.update(celpy.c7nlib.DECLARATIONS)
context.cel['env'] = celpy.Environment(
annotations=decls,
runner_class=celpy.c7nlib.C7N_Interpreted_Runner
)
context.cel['ast'] = context.cel['env'].compile(context.cel['source'])
context.cel['prgm'] = context.cel['env'].program(context.cel['ast'], functions=celpy.c7nlib.FUNCTIONS)
build_mock_resources(context)
if "TZ_ALIASES" in context.cel:
celpy.celtypes.TimestampType.TZ_ALIASES.update(context.cel["TZ_ALIASES"])
try:
context.cel['result'] = context.cel['prgm'].evaluate(
context=context.cel['activation'],
filter=context.cel['filter'])
except celpy.CELEvalError as ex:
context.cel['result'] = ex
@when(u'CEL filter is built and evaluated')
def step_impl(context):
context.cel['source'] = C7N_Rewriter.c7n_rewrite(context.cel['policy'])
print(f"\nCEL: {context.cel['source']}\n")
evaluate(context)
@when(u'CEL filter {cel_text} is evaluated')
def step_impl(context, cel_text):
context.cel['source'] = cel_text
evaluate(context)
@then(u'result is {result}')
def step_impl(context, result):
error_message = f"{context.cel['source']} evaluated with {context.cel['activation']} is {context.cel['result']}, expected {result!r}"
if result in ("True", "False"):
expected = result == "True"
assert context.cel['result'] == expected, error_message
elif result == "CELEvalError":
assert isinstance(context.cel['result'], celpy.CELEvalError)
else:
raise Exception(f"Invalid THEN step 'result is {result}'")
@then(u'CEL text is {translation}')
def step_impl(context, translation):
assert context.cel['source'] == translation, f"{context.cel['source']!r} != {translation!r}"
| python | Apache-2.0 | 3a134c10394058c73a6bbe0e4ca7e862ea9707b3 | 2026-01-05T07:13:01.631050Z | false |
cloud-custodian/cel-python | https://github.com/cloud-custodian/cel-python/blob/3a134c10394058c73a6bbe0e4ca7e862ea9707b3/features/steps/integration_binding.py | features/steps/integration_binding.py | # SPDX-Copyright: Copyright (c) Capital One Services, LLC
# SPDX-License-Identifier: Apache-2.0
# Copyright 2020 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
"""
CELPY Integration Bindings for Behave testing.
These step definitions import and execute ``celpy`` features directly.
This is used by the feature files created from the ``.textproto`` source.
We use an intermediate form of the textproto definition of each test case.
Most simple objects are represented as ``Value(value_type='type_name', source='value')"``.
The type name is mapped to a ``celtypes`` type or a native Python type.
The value text is then supplied to create the expected object.
This means interpreting textproto escape rules for string and bytes values.
These are not the same as native Python escapes.
Map and List values are aggregates that work well in this schema.
Protobuf objects can be quite complex, a separate tool creates
the intermediate form used by these step definitions.
Error Matching
===============
We have an error matching problem.
1. Errors are not named consistently in the tests or the specification.
2. It may be that the exact error doesn't actually matter.
Consider two cases, where the error identified appears inconsistent.
- error_case: ``"2 / 0 > 4 ? 'baz' : 'quux'"`` --> "division by zero"
- error_right: ``"true && 1/0 != 0"`` --> "no matching overload"
Sometimes (``2 / 0 > 4``) the first exception is preserved.
Other times (``1/0 != 0``) the second exception is preserved.
This may mean the error detail doesn't matter, as long as an error was spotted.
This can explain "return error for overflow" as an vague-looking error response.
Use ``-D match=exact`` to do exact error matching. The default is "any error will do."
"""
import logging
import sys
from enum import Enum, auto
from pathlib import Path
try:
from types import NoneType
except ImportError:
# Python 3.9 hack
NoneType = type(None)
from typing import (Any, Callable, Dict, List, NamedTuple, Optional, Tuple,
Type, Union, cast)
from behave import *
import celpy.celtypes
import celpy.evaluation
from celpy import CELEvalError, Environment
from celpy.celparser import CELParseError
from celpy.celtypes import *
logger = logging.getLogger(__name__)
class Bindings(NamedTuple):
bindings: List[Dict[str, Any]]
class TestAllTypes(celpy.celtypes.MessageType):
"""
An example of a (hyper-complex) protobuf MessageType class.
https://github.com/google/cel-spec/blob/master/proto/test/v1/proto3/test_all_types.proto
There are (up to) 62 different kinds of fields, each with a distinct
default value.
Note the VARIETY of contexts.
- "TestAllTypes{list_value: [1.0, 'one']}" -> an ObjectValue wrapping a ListType
- "TestAllTypes{list_value: []}" -> an ObjectValue wrapping a ListType instance
- "TestAllTypes{list_value: [1.0, 'one']}.list_value" -> the ListType instance
- "TestAllTypes{list_value: []}.list_value" -> the ListType instance
- "TestAllTypes{}.list_value" -> the ListType
Also note that range checks are part of the acceptance test suite.
- ``single_float_wrapper`` -- 1e−126 <= x < 1e+127
- ``single_int32_wrapper`` -- -2**32 <= x < 2**31
- ``single_uint32_wrapper`` -- 0 <= x < 2**32
TODO: Refactor into an external module and apply as a type environment Annotation.
The complete list of simple attributes in Python and protobuf notation:
- single_int32: int = field(default=0) # int32 single_int32 = 1;
- single_int64: int = field(default=0) # int64 single_int64 = 2;
- single_uint32: int = field(default=0) # uint32 single_uint32 = 3;
- single_uint64: int = field(default=0) # uint64 single_uint64 = 4;
- single_sint32: int = field(default=0) # sint32 single_sint32 = 5;
- single_sint64: int = field(default=0) # sint64 single_sint64 = 6;
- single_fixed32: int = field(default=0) # fixed32 single_fixed32 = 7;
- single_fixed64: int = field(default=0) # fixed64 single_fixed64 = 8;
- single_sfixed32: int = field(default=0) # sfixed32 single_sfixed32 = 9;
- single_sfixed64: int = field(default=0) # sfixed64 single_sfixed64 = 10;
- single_float: float = field(default=0) # float single_float = 11;
- single_double: float = field(default=0) # double single_double = 12;
- single_bool: bool = field(default=0) # bool single_bool = 13;
- single_string: str = field(default="") # string single_string = 14;
- single_bytes: bytes = field(default=b"") # bytes single_bytes = 15;
- single_any: Any = field(default=None) # google.protobuf.Any single_any = 100;
- single_duration: DurationType = field(default=None) # google.protobuf.Duration single_duration = 101;
- single_timestamp: TimestampType = field(default=None) # google.protobuf.Timestamp single_timestamp = 102;
- single_struct: MessageType = field(default=None) # google.protobuf.Struct single_struct = 103;
- single_value: Any = field(default=None) # google.protobuf.Value single_value = 104;
- single_int64_wrapper: IntType = field(default=IntType(0)) # google.protobuf.Int64Value single_int64_wrapper = 105;
- single_int32_wrapper: IntType = field(default=IntType(0)) # google.protobuf.Int32Value single_int32_wrapper = 106;
- single_double_wrapper: DoubleType = field(default=DoubleType(0)) # google.protobuf.DoubleValue single_double_wrapper = 107;
- single_float_wrapper: DoubleType = field(default=DoubleType(0)) # google.protobuf.FloatValue single_float_wrapper = 108;
- single_uint64_wrapper: UintType = field(default=UintType(0)) # google.protobuf.UInt64Value single_uint64_wrapper = 109;
- single_uint32_wrapper: UintType = field(default=UintType(0)) # google.protobuf.UInt32Value single_uint32_wrapper = 110;
- single_string_wrapper: StringType = field(default=StringType("")) # google.protobuf.StringValue single_string_wrapper = 111;
- single_bool_wrapper: BoolType = field(default=BoolType(False)) # google.protobuf.BoolValue single_bool_wrapper = 112;
- single_bytes_wrapper: BytesType = field(default=BytesType(b"")) # google.protobuf.BytesValue single_bytes_wrapper = 113;
- list_value: ListType = field(default=ListType([])) # google.protobuf.ListValue list_value = 114;
- repeated int32 repeated_int32 = 31;
- repeated int64 repeated_int64 = 32;
- repeated uint32 repeated_uint32 = 33;
- repeated uint64 repeated_uint64 = 34;
- repeated sint32 repeated_sint32 = 35;
- repeated sint64 repeated_sint64 = 36;
- repeated fixed32 repeated_fixed32 = 37;
- repeated fixed64 repeated_fixed64 = 38;
- repeated sfixed32 repeated_sfixed32 = 39;
- repeated sfixed64 repeated_sfixed64 = 40;
- repeated float repeated_float = 41;
- repeated double repeated_double = 42;
- repeated bool repeated_bool = 43;
- repeated string repeated_string = 44;
- repeated bytes repeated_bytes = 45;
- repeated NestedMessage repeated_nested_message = 51;
- repeated NestedEnum repeated_nested_enum = 52;
- repeated string repeated_string_piece = 53 [ctype = STRING_PIECE];
- repeated string repeated_cord = 54 [ctype = CORD];
- repeated NestedMessage repeated_lazy_message = 55 [lazy = true];
Some more complex attributes
- NestedMessage single_nested_message = 21;
- NestedEnum single_nested_enum = 22;
- NestedMessage standalone_message = 23;
- NestedEnum standalone_enum = 24;
Many others
- map<string, string> map_string_string = 61;
- map<int64, NestedTestAllTypes> map_int64_nested_type = 62;
"""
range_check = {
"single_float_wrapper": lambda x: -1e+127 <= x < 1e+127,
"single_int32_wrapper": lambda x: -(2**32) <= x < 2**31,
"single_uint32_wrapper": lambda x: 0 <= x < 2**32,
}
def __new__(cls, source=None, *args, **kwargs) -> 'TestAllTypes':
logger.debug(f"TestAllTypes(source={source}, *{args}, **{kwargs})")
if source is None:
return cast(TestAllTypes, super().__new__(cls)) # type: ignore[call-arg]
elif isinstance(source, celpy.celtypes.MessageType):
for field in source:
valid_range = cls.range_check.get(field, lambda x: True)
if not valid_range(source[field]):
raise ValueError(f"TestAllTypes {field} value {source[field]} invalid")
return cast(TestAllTypes, super().__new__(cls, source))
else:
# Should validate the huge list of internal fields and their ranges!
for field in kwargs:
valid_range = cls.range_check.get(field, lambda x: True)
if not valid_range(kwargs[field]):
raise ValueError(f"TestAllTypes {field} value {kwargs[field]} invalid")
return cast(TestAllTypes, super().__new__(cls, source)) # type: ignore[call-arg]
def get(self, field: Any, default: Optional[Value] = None) -> Value:
"""Provides default values for the defined fields."""
logger.info(f"TestAllTypes.get({field!r}, {default!r})")
default_attribute_value: Optional[Value] = None
if field in ("NestedEnum",):
default_attribute_value = celpy.celtypes.MessageType(
{
"FOO": celpy.celtypes.IntType(0),
"BAR": celpy.celtypes.IntType(1),
"BAZ": celpy.celtypes.IntType(2),
}
)
elif field in ("NestedMessage",):
default_attribute_value = NestedMessage({"bb": 1})
elif field in ("map_string_string", "map_int64_nested_type",):
return celpy.celtypes.MapType()
elif field in (
"single_uint64_wrapper", "single_uint32_wrapper",
"single_int64_wrapper", "single_int32_wrapper",
"single_float_wrapper", "single_double_wrapper",
"single_string_wrapper", "single_bool_wrapper", "single_bytes_wrapper",
):
default_attribute_value = None
elif field in ("single_int32", "single_sint32", "single_int64", "single_sint64", "repeated_int32", "repeated_int64", "repeated_sint32", "repeated_sint64"):
default_attribute_value = celpy.celtypes.IntType(0)
elif field in ("single_fixed32", "single_fixed64", "single_sfixed32", "single_sfixed64", "repeated_fixed32", "repeated_fixed64", "repeated_sfixed32", "repeated_sfixed64"):
default_attribute_value = celpy.celtypes.IntType(0)
elif field in ("single_uint32", "single_uint64", "repeated_uint32", "repeated_uint64"):
default_attribute_value = celpy.celtypes.UintType(0)
elif field in ("single_float", "single_double", "repeated_float", "repeated_double"):
default_attribute_value = celpy.celtypes.DoubleType(0)
elif field in ("single_bool", "repeated_bool"):
default_attribute_value = celpy.celtypes.BoolType(False)
elif field in ("single_string", "repeated_string"):
default_attribute_value = celpy.celtypes.StringType("")
elif field in ("single_bytes", "repeated_bytes"):
default_attribute_value = celpy.celtypes.BytesType(b"")
elif field in ("list_value",):
default_attribute_value = celpy.celtypes.ListType([])
elif field in ("single_struct",):
default_attribute_value = celpy.celtypes.MessageType({})
elif field in ("single_any", "single_value",):
default_attribute_value = None
elif field in ("single_duration", "single_timestamp",):
default_attribute_value = None
elif field in ("standalone_message", "single_nested_message", "repeated_nested_message", "repeated_lazy_message"):
default_attribute_value = celpy.celtypes.MessageType()
elif field in ("standalone_enum", "single_nested_enum", "repeated_nested_enum"):
pass
elif field in ("repeated_cord",):
return celpy.celtypes.IntType(1)
elif field in ("repeated_string_piece",):
return celpy.celtypes.IntType(2)
else:
err = f"no such member in {self.__class__.__name__}: {field!r}"
raise KeyError(err)
return super().get(field, default if default is not None else default_attribute_value)
def __eq__(self, other: Any) -> bool:
"""
For protobuf testing, we'll have expected values that do not have a complete
set of CELType conversions on the defaults.
"""
if not isinstance(other, TestAllTypes):
return False
keys = set.intersection(set(self.keys()), set(other.keys()))
return all(self.get(k) == other.get(k) for k in keys)
class NestedTestAllTypes(celpy.celtypes.MessageType):
"""
An example of a protobuf MessageType class.
https://github.com/google/cel-spec/blob/master/proto/test/v1/proto3/test_all_types.proto
::
// This proto includes a recursively nested message.
message NestedTestAllTypes {
NestedTestAllTypes child = 1;
TestAllTypes payload = 2;
}
TODO: Refactor into an external module and apply as a type environment Annotation.
"""
def __new__(cls, source=None, *args, **kwargs) -> 'NestedTestAllTypes':
logger.debug(f"NestedTestAllTypes(source={source}, *{args}, **{kwargs})")
if source is None:
return cast(NestedTestAllTypes, super().__new__(cls)) # type: ignore[call-arg]
elif isinstance(source, celpy.celtypes.MessageType):
return cast(NestedTestAllTypes, super().__new__(cls, source))
else:
# Should validate the fields are in "child", "payload"
return cast(NestedTestAllTypes, super().__new__(cls, source)) # type: ignore[call-arg]
def get(self, field: Any, default: Optional[Value] = None) -> Value:
"""
Provides default values for the defined fields.
"""
logger.info(f"NestedTestAllTypes.get({field!r}, {default!r})")
if field in self:
return self[field]
elif field == "child":
return NestedTestAllTypes()
elif field == "payload":
return TestAllTypes()
elif default is not None:
return default
else:
err = f"no such member in mapping: {field!r}"
raise KeyError(err)
# return super().get(field, default if default is not None else default_class())
class NestedMessage(celpy.celtypes.MessageType):
"""
An example of a protobuf MessageType class.
https://github.com/google/cel-spec/blob/master/proto/test/v1/proto3/test_all_types.proto
::
message NestedMessage {
// The field name "b" fails to compile in proto1 because it conflicts with
// a local variable named "b" in one of the generated methods.
// This file needs to compile in proto1 to test backwards-compatibility.
int32 bb = 1;
}
TODO: Refactor into an external module and apply as a type environment Annotation.
"""
pass
@given(u'disable_check parameter is {disable_check}')
def step_impl(context, disable_check):
context.data['disable_check'] = eval(disable_check)
@given(u'type_env parameter {name} is {type_env}')
def step_impl(context, name, type_env):
context.data['type_env'][eval(name)] = eval(type_env)
@given(u'bindings parameter {name} is {binding}')
def step_impl(context, name, binding):
context.data['bindings'][eval(name)] = eval(binding)
@given(u'container is {container}')
def step_impl(context, container):
context.data['container'] = eval(container)
def cel(context):
"""
Run the CEL expression.
TODO: include disable_macros and disable_check in environment.
For the parse feature, force in the TestAllTypes and NestedTestAllTypes protobuf types.
"""
# Some tests seem to assume this binding. Others have it in their environment definition.
if context.data['container']:
container = context.data['container']
context.data['type_env'][f"{container}.TestAllTypes"] = TestAllTypes
context.data['type_env'][f"{container}.NestedTestAllTypes"] = NestedTestAllTypes
context.data['type_env'][f"{container}.NestedMessage"] = NestedMessage
context.data['test_all_types'] = TestAllTypes
context.data['nested_test_all_types'] = NestedTestAllTypes
env = Environment(
package=context.data['container'],
annotations=context.data['type_env'],
runner_class=context.data['runner'])
try:
ast = env.compile(context.data['expr'])
prgm = env.program(ast)
except CELParseError as ex:
context.data['exc_info'] = sys.exc_info()
context.data['error'] = ex.args[0]
return None
activation = context.data['bindings']
print(f"GIVEN activation={activation!r}")
try:
result = prgm.evaluate(activation)
context.data['result'] = result
context.data['exc_info'] = None
context.data['error'] = None
except CELEvalError as ex:
# No 'result' to distinguish from an expected None value.
context.data['exc_info'] = sys.exc_info()
context.data['error'] = ex.args[0]
@when(u'CEL expression {expr} is evaluated')
def step_impl(context, expr):
context.data['expr'] = eval(expr)
cel(context)
@then(u'value is {value}')
def step_impl(context, value):
"""
The ``value`` **must** be the ``repr()`` string for a CEL object.
This includes types and protobuf messages.
"""
try:
expected = eval(value)
except TypeError as ex:
print(f"Could not eval({value!r}) in {context.scenario}")
raise
context.data['expected'] = expected
if 'result' not in context.data:
print("Unexpected exception:", context.data['exc_info'])
raise AssertionError(f"Error {context.data['error']!r} unexpected")
result = context.data['result']
if expected is not None:
assert result == expected, \
f"{result!r} != {expected!r} in {context.data}"
else:
assert result is None, f"{result!r} is not None in {context.data}"
class ErrorCategory(Enum):
divide_by_zero = auto()
does_not_support = auto()
integer_overflow = auto()
invalid = auto()
invalid_argument = auto()
modulus_by_zero = auto()
no_such_key = auto()
no_such_member = auto()
no_such_overload = auto()
range_error = auto()
repeated_key = auto()
unbound_function = auto()
undeclared_reference = auto()
unknown_variable = auto()
other = auto()
ERROR_ALIASES = {
"division by zero": ErrorCategory.divide_by_zero,
"divide by zero": ErrorCategory.divide_by_zero,
"invalid UTF-8": ErrorCategory.invalid,
"modulus by zero": ErrorCategory.modulus_by_zero,
"modulus or divide by zero": ErrorCategory.modulus_by_zero,
"no such key": ErrorCategory.no_such_key,
"no such member": ErrorCategory.no_such_member,
"no such overload": ErrorCategory.no_such_overload,
"no matching overload": ErrorCategory.no_such_overload,
"range": ErrorCategory.range_error,
"range error": ErrorCategory.range_error,
"repeated key": ErrorCategory.repeated_key,
"Failed with repeated key": ErrorCategory.repeated_key,
"return error for overflow": ErrorCategory.integer_overflow,
"unknown variable": ErrorCategory.unknown_variable,
"unknown varaible": ErrorCategory.unknown_variable, # spelling error in TextProto
"unbound function": ErrorCategory.unbound_function,
"unsupported key type": ErrorCategory.does_not_support,
}
def error_category(text: str) -> ErrorCategory:
"""Summarize errors into broad ErrorCategory groupings."""
if text in ErrorCategory.__members__:
return ErrorCategory[text]
if text in ERROR_ALIASES:
return ERROR_ALIASES[text]
# Some harder problems:
if text.startswith("undeclared reference to"):
return ErrorCategory.undeclared_reference
elif text.startswith("found no matching overload for"):
return ErrorCategory.no_such_overload
elif text.startswith("no such key"):
return ErrorCategory.no_such_key
elif text.startswith("no such member"):
return ErrorCategory.no_such_member
elif "does not support" in text:
return ErrorCategory.does_not_support
else:
print(f"***No error category for {text!r}***")
return ErrorCategory.other
@then(u"eval_error is {error_text}")
def step_impl(context, error_text):
error = eval(error_text)
if error is None:
assert context.data['error'] is None, f"error not None in {context.data}"
else:
print(f"*** Analyzing context.data = {context.data!r}***")
expected_ec = error_category(error)
actual_ec = error_category(context.data['error'] or "")
if context.config.userdata.get("match", "any") == "exact":
assert expected_ec == actual_ec, f"{expected_ec} != {actual_ec} in {context.data}"
else:
if expected_ec != actual_ec:
print(f"{expected_ec} != {actual_ec} in {context.data}", file=sys.stderr)
assert context.data['error'] is not None, f"error None in {context.data}"
| python | Apache-2.0 | 3a134c10394058c73a6bbe0e4ca7e862ea9707b3 | 2026-01-05T07:13:01.631050Z | false |
cloud-custodian/cel-python | https://github.com/cloud-custodian/cel-python/blob/3a134c10394058c73a6bbe0e4ca7e862ea9707b3/features/steps/cli_binding.py | features/steps/cli_binding.py | # SPDX-Copyright: Copyright (c) Capital One Services, LLC
# SPDX-License-Identifier: Apache-2.0
# Copyright 2020 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
"""
CLI Bindings for Behave testing.
These step definitions use ``subprocess.run()`` to run the ``celpy`` app as a separate process.
The command-line parameters these step-bindings use.
* ``-D PYTHONPATH=p`` sets the ``PYTHONPATH`` environment variable for this run.
* ``-D env=name`` sets the virtual environment name, creates a .test/name directory for temporary files
* ``-D debug=True`` produces additional debugging output
"""
import re
import shlex
import subprocess
import sys
from pathlib import Path
import parse
from behave import *
@given(u'JSON document \'{json}\'')
def step_impl(context, json):
context.data['json'].append(json)
@given(u'OS environment sets {name} to {value}')
def step_impl(context, name, value):
context.data['bindings'][name] = str(value)
@when(u'echo document | celpy {arguments} is run')
def step_impl(context, arguments):
if "PYTHONPATH" in context.config.userdata:
environment = {"PYTHONPATH": context.config.userdata["PYTHONPATH"]}
else:
environment = {}
environment.update(context.data['bindings'])
extra = {'text': True}
context.data['arguments'] = shlex.split(arguments)
env = context.config.userdata['env']
test_dir = Path.cwd() / ".test" / env
test_dir.mkdir(exist_ok=True, parents=True)
temp = test_dir / "test.json"
temp.write_text("\n".join(context.data['json']) + "\n")
with temp.open() as input:
result = subprocess.run(
[sys.executable, '-m', 'celpy'] + context.data['arguments'],
env=environment,
stdin=input,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**extra
)
temp.unlink()
test_dir.rmdir()
context.data['status'] = result.returncode
context.data['stdout'] = result.stdout
context.data['stderr'] = result.stderr
if "debug" in context.config.userdata:
for line in context.data['stdout'].splitlines():
print(f"OUT: {line}", file=sys.stderr)
for line in context.data['stderr'].splitlines():
print(f"ERR: {line}", file=sys.stderr)
@when(u'celpy {arguments} is run')
def step_impl(context, arguments):
"""
This definition forces in a ``--null-input`` option to be sure that celpy doesn't hang
waiting for input missing from a scenario.
"""
if "PYTHONPATH" in context.config.userdata:
environment = {"PYTHONPATH": context.config.userdata["PYTHONPATH"]}
else:
environment = {}
environment.update(context.data['bindings'])
# if sys.version_info.minor <= 6:
# extra = {}
# else:
extra = {'text': True}
context.data['arguments'] = shlex.split(arguments)
result = subprocess.run(
[sys.executable, '-m', 'celpy', '--null-input'] + context.data['arguments'],
env=environment,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**extra
)
context.data['status'] = result.returncode
# if sys.version_info.minor <= 6:
# context.data['stdout'] = result.stdout.decode('utf-8')
# context.data['stderr'] = result.stderr.decode('utf-8')
# else:
context.data['stdout'] = result.stdout
context.data['stderr'] = result.stderr
if "debug" in context.config.userdata:
for line in context.data['stdout'].splitlines():
print(f"OUT: {line}", file=sys.stderr)
for line in context.data['stderr'].splitlines():
print(f"ERR: {line}", file=sys.stderr)
@then(u'stdout matches \'{regex}\'')
def step_impl(context, regex):
pattern = re.compile(regex)
assert pattern.match(context.data['stdout']), f"{context.data}"
@then(u'stdout is \'{text}\'')
def step_impl(context, text):
clean_text = text.replace(r"\n", "\n")
assert clean_text == context.data['stdout'], f"{text!r} != {context.data!r}['stdout']"
@then(u'stdout is \'\'')
def step_impl(context):
assert context.data['stdout'].rstrip() == "", f"'' != {context.data!r}['stdout']"
@then(u'stderr contains \'{text}\'')
def step_impl(context, text):
assert text in context.data['stderr'].rstrip(), f"{text} not in {context.data!r}['stderr']"
@then(u'stderr is \'\'')
def step_impl(context):
assert context.data['stderr'].rstrip() == "", f"'' != {context.data!r}['stderr']"
@then(u'exit status is {status:d}')
def step_impl(context, status):
assert context.data['status'] == status, f"{status} != {context.data['status']}"
| python | Apache-2.0 | 3a134c10394058c73a6bbe0e4ca7e862ea9707b3 | 2026-01-05T07:13:01.631050Z | false |
cloud-custodian/cel-python | https://github.com/cloud-custodian/cel-python/blob/3a134c10394058c73a6bbe0e4ca7e862ea9707b3/docs/source/conf.py | docs/source/conf.py | # Copyright 2020 The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use pathlib.Path.absolute() to make it absolute.
import sys
from pathlib import Path
sys.path.insert(0, str(Path('../../tools').absolute()))
# -- Project information -----------------------------------------------------
project = 'CEL in Python'
copyright = '2020, CapitalOne'
author = 'CapitalOne'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.todo',
'sphinxcontrib.plantuml',
'sphinxcontrib.programoutput'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns: list[str] = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path: list[str] = []
# -- Options for Autodoc -----------------------------------------------------
autodoc_default_options = {
'members': True,
'member-order': 'bysource',
'undoc-members': True,
'special-members': True,
'exclude-members': '__weakref__,__module__,__dict__,__annotations__,__slots__'
}
# -- Options for PlantUML
DOCS = Path.cwd().parent
plantuml = f'java -jar {DOCS/"plantuml-asl-1.2025.3.jar"!s}'
| python | Apache-2.0 | 3a134c10394058c73a6bbe0e4ca7e862ea9707b3 | 2026-01-05T07:13:01.631050Z | false |
cloud-custodian/cel-python | https://github.com/cloud-custodian/cel-python/blob/3a134c10394058c73a6bbe0e4ca7e862ea9707b3/demo/celdemo.py | demo/celdemo.py | # SPDX-Copyright: Copyright (c) Capital One Services, LLC
# SPDX-License-Identifier: Apache-2.0
# Copyright 2020 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
"""
CEL Test Harness.
Use this to provide a CEL expression and a file of resources in NDJSON or YAML format.
Optionally, the c7nlib can be included. The Full C7N capability, however, isn't readily
available without provide a more sophisticated mocking capability.
CEL must either be installed or available on :envvar:`PYTHONPATH`. We suggest running
as follows:
% PYTHONPATH=src python demo/celdemo.py --cel '355./113.'
3.1415929203539825
% PYTHONPATH=src python demo/celdemo.py --cel 'now+duration("1h")' --now "2020-09-10T11:12:13Z"
2020-09-10T12:12:13Z
"""
import argparse
import datetime
import json
import logging
import sys
from pathlib import Path
from typing import Any, Dict, Iterable, List
import yaml
import celpy
logger = logging.getLogger("celdemo")
def cel_compile(text: str) -> celpy.Runner:
decls: Dict[str, celpy.Annotation] = {
"resource": celpy.celtypes.MapType,
"now": celpy.celtypes.TimestampType,
}
env = celpy.Environment(annotations=decls)
ast = env.compile(text)
prgm = env.program(ast)
return prgm
def run_cel_resource(cel: str, now: str, resource_iter: Iterable[Any]) -> None:
prgm = cel_compile(cel)
for document in resource_iter:
logger.debug(f"INPUT: {document!r}\n")
activation = {
"resource": celpy.adapter.json_to_cel(document),
"now": celpy.celtypes.TimestampType(now),
}
try:
result = prgm.evaluate(activation)
print(f"{result!r} from now {now!r}, resource {document}")
except Exception as ex:
print(f"{ex!r} from now {now!r}, resource {document}")
def get_options(argv: List[str] = sys.argv[1:]) -> argparse.Namespace:
now = datetime.datetime.utcnow().isoformat()
parser = argparse.ArgumentParser()
parser.add_argument("--cel", action="store", required=True)
parser.add_argument("-n", "--now", action="store", default=now)
parser.add_argument(
"-f",
"--format",
action="store",
choices=("json", "ndjaon", "jsonnl", "yaml"),
help="Format when stdin is read",
default=None,
)
parser.add_argument("-v", action="count", default=0)
parser.add_argument("resources", nargs="*", type=argparse.FileType("r"))
options = parser.parse_args(argv)
return options
def main() -> None:
options = get_options()
if options.v == 1:
logging.getLogger().setLevel(logging.INFO)
elif options.v == 2:
logging.getLogger().setLevel(logging.DEBUG)
if options.resources:
logger.debug(f"Reading {options.resources}")
for input_file in options.resources:
if input_file is sys.stdin:
doc_iter: Iterable[Any]
logger.debug(f"Reading stdin")
if options.format in {".ndjson", ".jsonnl"}:
doc_iter = (json.loads(line) for line in sys.stdin)
elif options.format == "json":
doc_iter = iter([json.load(sys.stdin)])
elif options.format == "yaml":
doc_iter = yaml.load_all(sys.stdin, Loader=yaml.SafeLoader)
else:
logger.error(f"Unknown --format {options.format!r}")
doc_iter = iter([])
run_cel_resource(options.cel, options.now, doc_iter)
elif Path(input_file.name).suffix in {".ndjson", ".jsonnl"}:
doc_iter = (json.loads(line) for line in input_file)
run_cel_resource(options.cel, options.now, doc_iter)
elif Path(input_file.name).suffix == ".json":
doc_iter = iter([json.load(input_file)])
run_cel_resource(options.cel, options.now, doc_iter)
elif Path(input_file.name).suffix in {".yaml", ".yml"}:
doc_iter = yaml.load_all(input_file, Loader=yaml.SafeLoader)
run_cel_resource(options.cel, options.now, doc_iter)
else:
logger.error(f"Unknown suffix on {input_file.name!r}")
else:
doc_iter = [None]
run_cel_resource(options.cel, options.now, doc_iter)
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
main()
logging.shutdown()
| python | Apache-2.0 | 3a134c10394058c73a6bbe0e4ca7e862ea9707b3 | 2026-01-05T07:13:01.631050Z | false |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/run.py | run.py | from application import create_app
# from dotenv import load_dotenv
# load_dotenv(dotenv_path='.flaskenv')
app = create_app()
def main():
app.run()
if __name__ == '__main__':
main()
| python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/application.py | application.py | from flask import Flask
from extensions.routes_extension import register_routes
from extensions.injector_extension import register_dependency_injection
from extensions.exception_extension import register_exception_handler
def create_app():
app = Flask(__name__)
# will move to register_config soon
app.config['ERROR_404_HELP'] = False
register_routes(app)
register_exception_handler(app)
register_dependency_injection(app)
return app
| python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/apps/__init__.py | apps/__init__.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/apps/api/account_v1.py | apps/api/account_v1.py | from flask import Blueprint
from flask_restplus import Api
from werkzeug.exceptions import HTTPException
from .account.controllers.security import api as security_api
blueprint = Blueprint('account_api', __name__, url_prefix='/account/v1')
api = Api(blueprint,
doc='/doc/',
title='Resource API - Account',
version='1.0',
description='A description'
)
api.add_namespace(security_api)
@api.errorhandler(HTTPException)
def handle_error(error: HTTPException):
""" Handle BluePrint JSON Error Response """
response = {
'error': error.__class__.__name__,
'message': error.description,
}
return response, error.code
| python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/apps/api/profile_v1.py | apps/api/profile_v1.py | from flask import Blueprint
from flask_restplus import Api
from werkzeug.exceptions import HTTPException
from .profile.controllers.avatar import api as avatar_api
from .profile.controllers.member import api as member_api
from .profile.controllers.search import api as search_api
blueprint = Blueprint('profile_api', __name__, url_prefix='/profile/v1')
api = Api(blueprint,
doc='/doc/',
title='Resource API - Profile',
version='1.0',
description='A description'
)
api.add_namespace(avatar_api)
api.add_namespace(search_api)
api.add_namespace(member_api)
@api.errorhandler(HTTPException)
def handle_error(error: HTTPException):
""" Handle BluePrint JSON Error Response """
response = {
'error': error.__class__.__name__,
'message': error.description,
}
return response, error.code
| python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/apps/api/__init__.py | apps/api/__init__.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/apps/api/account/__init__.py | apps/api/account/__init__.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/apps/api/account/controllers/security.py | apps/api/account/controllers/security.py | from flask_restplus import Namespace, Resource
from extensions.log_extension import get_logger
api = Namespace('security', description='Security Endpoints')
logger = get_logger(__name__)
@api.route('/authorize')
class Authorize(Resource):
@api.doc('Authorize Requests')
def get(self):
logger.info('authorize')
return ['authorize']
@api.route('/logout')
class Logout(Resource):
@api.doc('Logout Endpoint')
def get(self):
logger.warn('logout')
return {'logout': True}
@api.route('/inquiry')
class Inquiry(Resource):
@api.doc('Query Account')
def get(self):
logger.debug('inquiry')
return ['Query']
@api.route('/token')
class Token(Resource):
@api.doc('Exchange Tokens')
def get(self):
logger.info('token')
return ['Token']
| python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/apps/api/account/controllers/__init__.py | apps/api/account/controllers/__init__.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/apps/api/account/adapters/__init__.py | apps/api/account/adapters/__init__.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/apps/api/account/adapters/presenters/__init__.py | apps/api/account/adapters/presenters/__init__.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/apps/api/account/adapters/validators/__init__.py | apps/api/account/adapters/validators/__init__.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/apps/api/account/adapters/response/__init__.py | apps/api/account/adapters/response/__init__.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/apps/api/account/adapters/request/__init__.py | apps/api/account/adapters/request/__init__.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/apps/api/profile/__init__.py | apps/api/profile/__init__.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/apps/api/profile/controllers/search.py | apps/api/profile/controllers/search.py | from flask_restplus import Resource
from apps.api.profile.controllers import search_api as api
@api.route('/member')
class MemberSearch(Resource):
@api.doc('Search Member')
def post(self):
return ['search member']
@api.route('/newcomer')
class NewcomerSearch(Resource):
@api.doc('Search Newcomer')
def post(self):
return ['search newcomer']
| python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/apps/api/profile/controllers/member.py | apps/api/profile/controllers/member.py | from flask_restplus import Resource
from injector import inject
from apps.api.profile.adapters.presenters.profile_presenter import GetUserProfilePresenter
from apps.api.profile.adapters.response.profile_response import member_profile
from apps.api.profile.controllers import member_api as api
from apps.shared.global_exception import CustomError, BadRequestError, NotFoundError
from core.domain.profile.use_case.get_user_profile import GetUserProfileUseCase, GetUserProfileRequest
@api.route('/')
class MemberList(Resource):
@api.doc('Create Member')
@api.response(400, 'Bad Request')
def post(self):
return ['create member']
@api.route('/<int:member_id>/')
class Member(Resource):
@inject
def __init__(self, uc_get_profile: GetUserProfileUseCase, *args, **kwargs):
super().__init__(*args, **kwargs)
self._uc_get_profile = uc_get_profile
@api.doc('Get Member')
@api.response(404, 'User Not Found')
@api.marshal_with(member_profile)
def get(self, member_id: int):
uc_request = GetUserProfileRequest(member_id, "member")
presenter = GetUserProfilePresenter()
self._uc_get_profile.execute(uc_request, presenter)
return presenter.content_result
@api.route('/<int:member_id>/basic')
class MemberBasic(Resource):
@api.doc('Get Member Basic info')
@api.response(404, 'User Not Found')
def put(self, member_id: int):
# return ['put member basic']
raise CustomError('Bad key123')
@api.route('/<int:member_id>/extra')
class MemberExtra(Resource):
@api.doc('Get Member Extra Info')
@api.response(404, 'User Not Found')
def put(self, member_id: int):
# return ['put member extra']
raise BadRequestError('wrong parameter abc')
| python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/apps/api/profile/controllers/__init__.py | apps/api/profile/controllers/__init__.py | from flask_restplus import Namespace
avatar_api = Namespace('avatar', description='Avatar Image Endpoints')
member_api = Namespace('member', description='Member Resource Endpoints')
search_api = Namespace('search', description='Profile Search Endpoints')
| python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/apps/api/profile/controllers/avatar.py | apps/api/profile/controllers/avatar.py | from flask_restplus import Resource
from apps.api.profile.controllers import avatar_api as api
@api.route('/')
class AvatarList(Resource):
@api.doc('Upload Avatar Image')
def post(self):
return ['avatar_post']
@api.route('/<string:avatar_id>')
class Avatar(Resource):
@api.doc('Get Avatar Image')
def get(self, avatar_id: str):
return [avatar_id]
| python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/apps/api/profile/adapters/__init__.py | apps/api/profile/adapters/__init__.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/apps/api/profile/adapters/presenters/profile_presenter.py | apps/api/profile/adapters/presenters/profile_presenter.py | from apps.shared.global_exception import NotFoundError
from core.domain.profile.use_case.get_user_profile import GetUserProfileResponse
from core.kernel.exception import BaseNotFoundException
from core.kernel.port import JsonContentResult
from core.kernel.use_case import UseCaseOutputPort
class GetUserProfilePresenter(UseCaseOutputPort[GetUserProfileResponse], JsonContentResult):
def handle(self, response: GetUserProfileResponse) -> None:
if not response.is_succeeded:
if isinstance(response.error, BaseNotFoundException):
raise NotFoundError(response.error.message)
self.content_result = response
| python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/apps/api/profile/adapters/presenters/__init__.py | apps/api/profile/adapters/presenters/__init__.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/apps/api/profile/adapters/validators/__init__.py | apps/api/profile/adapters/validators/__init__.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/apps/api/profile/adapters/response/profile_response.py | apps/api/profile/adapters/response/profile_response.py | from flask_restplus import fields
from apps.api.profile.controllers import member_api
entity = member_api.model('Entity', {
'id': fields.Integer
})
basic_profile = member_api.model('BasicProfile', {
'real_name': fields.String,
'gender': fields.String,
# 'birthday': fields.String
})
extra_profile = member_api.model('ExtraProfile', {
'profile_category': fields.String,
'*': fields.Wildcard(fields.String)
})
user_profile = member_api.clone('UserProfile', entity, {
'user_type': fields.String,
'user_name': fields.String,
'user_status': fields.String,
})
member_profile = member_api.clone('MemberProfile', user_profile, {
'basic_profile': fields.Nested(basic_profile),
'extra_profile': fields.List(fields.Nested(extra_profile))
})
| python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/apps/api/profile/adapters/response/__init__.py | apps/api/profile/adapters/response/__init__.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/apps/api/profile/adapters/request/profile_request.py | apps/api/profile/adapters/request/profile_request.py | from flask_restplus import fields
from apps.api.profile.controllers import member_api
member_id_request = member_api.model('MemberIdRequest', {
'id': fields.Integer
})
| python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/apps/api/profile/adapters/request/__init__.py | apps/api/profile/adapters/request/__init__.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/apps/shared/global_exception.py | apps/shared/global_exception.py | from werkzeug.exceptions import (
HTTPException,
BadRequest,
NotFound
)
class CustomError(HTTPException):
""" Custom Error Exception """
code = 409
description = "custom error"
class BadRequestError(BadRequest):
""" Wrap BadRequest Exception """
class NotFoundError(NotFound):
""" Wrap NotFound Exception """
| python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/apps/shared/__init__.py | apps/shared/__init__.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/extensions/routes_extension.py | extensions/routes_extension.py | from apps.api.account_v1 import blueprint as account_api
from apps.api.profile_v1 import blueprint as profile_api
def register_routes(app):
"""
Register routes with blueprint and namespace
"""
app.register_blueprint(account_api)
app.register_blueprint(profile_api)
| python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/extensions/config_extension.py | extensions/config_extension.py | from config import configurations
def register_config(app):
pass
| python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/extensions/hooks_extension.py | extensions/hooks_extension.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/extensions/log_extension.py | extensions/log_extension.py | import logging
FORMAT = '%(asctime)-15s %(name)s %(levelname)-8s %(message)s'
logging.basicConfig(format=FORMAT)
def get_logger(name):
logger = logging.getLogger(name)
return logger
| python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/extensions/injector_extension.py | extensions/injector_extension.py | from flask_injector import FlaskInjector
from injector import singleton, Binder
from core.domain.profile.repository.profile_repository import ProfileRepository
from core.domain.profile.use_case.get_user_profile import GetUserProfileUseCase
from infra.sql.profile.repository.sql_profile_repository import SqlProfileRepository
# from infra.mock.repository import MockProfileRepository
def configure_binding(binder: Binder) -> Binder:
binder.bind(GetUserProfileUseCase, to=GetUserProfileUseCase, scope=singleton)
# binder.bind(ProfileRepository, to=MockProfileRepository, scope=singleton)
binder.bind(ProfileRepository, to=SqlProfileRepository, scope=singleton)
return binder
def register_dependency_injection(app):
FlaskInjector(app=app, modules=[configure_binding])
| python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/extensions/oidc_extension.py | extensions/oidc_extension.py | from authlib.integrations.flask_oauth2 import AuthorizationServer, ResourceProtector
from authlib.oauth2.rfc6749.grants import AuthorizationCodeGrant as _AuthorizationCodeGrant
from authlib.oidc.core import UserInfo
from authlib.oidc.core.grants import (
OpenIDCode as _OpenIDCode,
OpenIDImplicitGrant as _OpenIDImplicitGrant,
OpenIDHybridGrant as _OpenIDHybridGrant
)
from werkzeug.security import gen_salt
DUMMY_JWT_CONFIG = {
'key': 'secret-key',
'alg': 'HS256',
'iss': 'https://authlib.org',
'exp': 3600,
}
def exists_nonce(nonce, req):
# exists = OAuth2AuthorizationCode.query.filter_by(
# client_id=req.client_id, nonce=nonce
# ).first()
# return bool(exists)
return True
def generate_user_info(user, scope):
return UserInfo(sub=str(user.id), name=user.username)
def create_authorization_code(client, grant_user, request):
code = gen_salt(48)
nonce = request.data.get('nonce')
# item = OAuth2AuthorizationCode(
# code=code,
# client_id=client.client_id,
# redirect_uri=request.redirect_uri,
# scope=request.scope,
# user_id=grant_user.id,
# nonce=nonce,
# )
# db.session.add(item)
# db.session.commit()
return code
class AuthorizationCodeGrant(_AuthorizationCodeGrant):
def create_authorization_code(self, client, grant_user, request):
return create_authorization_code(client, grant_user, request)
def parse_authorization_code(self, code, client):
item = None
# item = OAuth2AuthorizationCode.query.filter_by(
# code=code, client_id=client.client_id).first()
if item and not item.is_expired():
return item
def delete_authorization_code(self, authorization_code):
# db.session.delete(authorization_code)
# db.session.commit()
pass
def authenticate_user(self, authorization_code):
# return User.query.get(authorization_code.user_id)
pass
class OpenIDCode(_OpenIDCode):
def exists_nonce(self, nonce, request):
return exists_nonce(nonce, request)
def get_jwt_config(self, grant):
return DUMMY_JWT_CONFIG
def generate_user_info(self, user, scope):
return generate_user_info(user, scope)
class ImplicitGrant(_OpenIDImplicitGrant):
def exists_nonce(self, nonce, request):
return exists_nonce(nonce, request)
def get_jwt_config(self, grant):
return DUMMY_JWT_CONFIG
def generate_user_info(self, user, scope):
return generate_user_info(user, scope)
class HybridGrant(_OpenIDHybridGrant):
def create_authorization_code(self, client, grant_user, request):
return create_authorization_code(client, grant_user, request)
def exists_nonce(self, nonce, request):
return exists_nonce(nonce, request)
def get_jwt_config(self):
return DUMMY_JWT_CONFIG
def generate_user_info(self, user, scope):
return generate_user_info(user, scope)
authorization = AuthorizationServer()
require_oauth = ResourceProtector()
def query_client():
pass
def save_token():
pass
def bearer_cls():
pass
def config_oauth(app):
# query_client = create_query_client_func(db.session, OAuth2Client)
# save_token = create_save_token_func(db.session, OAuth2Token)
authorization.init_app(
app,
query_client=query_client,
save_token=save_token
)
# support all openid grants
authorization.register_grant(AuthorizationCodeGrant, [
OpenIDCode(require_nonce=True),
])
authorization.register_grant(ImplicitGrant)
authorization.register_grant(HybridGrant)
# protect resource
# bearer_cls = create_bearer_token_validator(db.session, OAuth2Token)
require_oauth.register_token_validator(bearer_cls())
| python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/extensions/__init__.py | extensions/__init__.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/extensions/database_extension.py | extensions/database_extension.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/extensions/exception_extension.py | extensions/exception_extension.py | from werkzeug.exceptions import HTTPException
def handle_global_error(error: HTTPException):
""" Make JSON Error Response instead of Web Page """
response = {
'error': error.__class__.__name__,
'message': error.description,
}
return response, error.code
def register_exception_handler(app):
app.register_error_handler(HTTPException, handle_global_error)
| python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/tests/__init__.py | tests/__init__.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/infra/__init__.py | infra/__init__.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/infra/mock/__init__.py | infra/mock/__init__.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/infra/mock/repository.py | infra/mock/repository.py | from typing import List
from core.domain.profile.entity.profile import (
UserBasicProfile, EducationExtraProfile, CareerExtraProfile, UserExtraProfile
)
from core.domain.profile.entity.user import User, Member, Newcomer
from core.domain.profile.exception import UserNotFound
from core.domain.profile.repository.profile_repository import ProfileRepository
class MockProfileRepository(ProfileRepository):
_users: List[User] = []
def __init__(self):
education_profile = EducationExtraProfile(school="College", department="Art")
career_profile = CareerExtraProfile(career="Developer", job_title="Sr. Developer II")
member = Member(
id=111,
user_name="Mock",
user_status="enabled",
basic_profile=UserBasicProfile(real_name="Hello Mock", gender="Male", birthday=""),
extra_profile=[education_profile, career_profile]
)
newcomer = Newcomer(
id=112,
user_name="Nancy",
user_status="enabled",
basic_profile=UserBasicProfile(real_name="Hello Nancy", gender="Female", birthday=""),
extra_profile=[career_profile]
)
self._users.append(member)
self._users.append(newcomer)
def get_user(self, user_type: str, user_id: int) -> User:
user = next((x for x in self._users if x.id == user_id and x.user_type == user_type), None)
return user
def create_user(self, user: User) -> None:
self._users.append(user)
def update_user_basic(self, user_type: str, user_id: int, basic_profile: UserBasicProfile) -> None:
user = next((x for x in self._users if x.id == user_id and x.user_type == user_type), None)
if user:
user.basic_profile = basic_profile
else:
raise UserNotFound()
def update_user_extra(self, user_type: str, user_id: int, extra_profiles: List[UserExtraProfile]) -> None:
user = next((x for x in self._users if x.id == user_id and x.user_type == user_type), None)
if user:
user.extra_profile = extra_profiles
else:
raise UserNotFound()
| python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/infra/sql/__init__.py | infra/sql/__init__.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/infra/sql/account/orm.py | infra/sql/account/orm.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/infra/sql/account/__init__.py | infra/sql/account/__init__.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/infra/sql/account/repository/__init__.py | infra/sql/account/repository/__init__.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/infra/sql/profile/orm.py | infra/sql/profile/orm.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/infra/sql/profile/__init__.py | infra/sql/profile/__init__.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/infra/sql/profile/repository/sql_profile_repository.py | infra/sql/profile/repository/sql_profile_repository.py | from typing import List
from core.domain.profile.entity.profile import (
UserBasicProfile, EducationExtraProfile, CareerExtraProfile, UserExtraProfile
)
from core.domain.profile.entity.user import User, Member, Newcomer
from core.domain.profile.exception import UserNotFound
from core.domain.profile.repository.profile_repository import ProfileRepository
class SqlProfileRepository(ProfileRepository):
_users: List[User] = []
def __init__(self):
education_profile = EducationExtraProfile(school="College", department="Art")
career_profile = CareerExtraProfile(career="Developer", job_title="Sr. Developer II")
member = Member(
id=111,
user_name="Mike",
user_status="enabled",
basic_profile=UserBasicProfile(real_name="Hello Mike", gender="Male", birthday=""),
extra_profile=[education_profile, career_profile]
)
newcomer = Newcomer(
id=112,
user_name="Nicole",
user_status="enabled",
basic_profile=UserBasicProfile(real_name="Hello Nicole", gender="Female", birthday=""),
extra_profile=[career_profile]
)
self._users.append(member)
self._users.append(newcomer)
def get_user(self, user_type: str, user_id: int) -> User:
user = next((x for x in self._users if x.id == user_id and x.user_type == user_type), None)
return user
def create_user(self, user: User) -> None:
self._users.append(user)
def update_user_basic(self, user_type: str, user_id: int, basic_profile: UserBasicProfile) -> None:
user = next((x for x in self._users if x.id == user_id and x.user_type == user_type), None)
if user:
user.basic_profile = basic_profile
else:
raise UserNotFound()
def update_user_extra(self, user_type: str, user_id: int, extra_profiles: List[UserExtraProfile]) -> None:
user = next((x for x in self._users if x.id == user_id and x.user_type == user_type), None)
if user:
user.extra_profile = extra_profiles
else:
raise UserNotFound()
| python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/infra/sql/profile/repository/__init__.py | infra/sql/profile/repository/__init__.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/core/__init__.py | core/__init__.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/core/kernel/exception.py | core/kernel/exception.py | import attr
@attr.s(auto_attribs=True)
class UseCaseException(Exception):
""" Base UseCase Error """
message: str = None
@attr.s(auto_attribs=True)
class BaseNotFoundException(UseCaseException):
""" Base Not Found Exception Abstraction """
| python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/core/kernel/port.py | core/kernel/port.py | from abc import ABC, abstractmethod
from typing import TypeVar, Generic, Dict, Any
import attr
import cattr
from .exception import UseCaseException
T = TypeVar('T')
@attr.s(auto_attribs=True)
class UseCaseRequest(ABC):
""" Base UseCase Request """
@attr.s(auto_attribs=True)
class UseCaseResponse(object):
result: Any = None
error: UseCaseException = None
@property
def is_succeeded(self):
return self.error is None or self.result is not None
class UseCaseOutputPort(Generic[T]):
def __str__(self):
return f'{__class__.__name__} with Type: {T}'
@abstractmethod
def handle(self, response: T) -> None:
return NotImplemented
class JsonContentResult(object):
__content_result: Dict = {}
def __init__(self, content: UseCaseResponse = None) -> None:
if content and content.is_succeeded:
self.__content_result = cattr.unstructure(content.result)
@property
def content_result(self) -> Dict:
return self.__content_result
@content_result.setter
def content_result(self, content: UseCaseResponse) -> None:
if content and content.is_succeeded:
self.__content_result = cattr.unstructure(content.result)
| python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/core/kernel/__init__.py | core/kernel/__init__.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/core/kernel/entity.py | core/kernel/entity.py | from uuid import uuid4, UUID
import attr
@attr.s(auto_attribs=True)
class Entity(object):
""" Int ID Entity """
id: int = None
@attr.s(auto_attribs=True)
class UuidEntity(object):
""" UUID Entity """
id: UUID = uuid4()
@attr.s(auto_attribs=True)
class ValueObject(object):
""" Value Object """
| python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/core/kernel/use_case.py | core/kernel/use_case.py | from abc import ABC, abstractmethod
from .port import UseCaseRequest, UseCaseResponse, UseCaseOutputPort
class UseCase(ABC):
@abstractmethod
def execute(self, uc_request: UseCaseRequest, uc_output_port: UseCaseOutputPort[UseCaseResponse]) -> None:
return NotImplemented
| python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/core/kernel/repository.py | core/kernel/repository.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/core/domain/__init__.py | core/domain/__init__.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/core/domain/account/__init__.py | core/domain/account/__init__.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/core/domain/account/repository/authorize_repository.py | core/domain/account/repository/authorize_repository.py | from abc import ABC, abstractmethod
class AuthorizeRepository(ABC):
@abstractmethod
def get_user(self):
return NotImplemented
| python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/core/domain/account/repository/__init__.py | core/domain/account/repository/__init__.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/core/domain/account/entity/__init__.py | core/domain/account/entity/__init__.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/core/domain/account/use_case/authorize_user.py | core/domain/account/use_case/authorize_user.py | from core.kernel.use_case import UseCase
from core.kernel.port import UseCaseRequest, UseCasePresenter
from core.domain.account.repository.authorize_repository import AuthorizeRepository
class AuthorizeUser(UseCase):
_auth_repo = None
def __init__(self, auth_repo: AuthorizeRepository):
self._auth_repo = auth_repo
def execute(self, uc_request: UseCaseRequest, uc_presenter: UseCasePresenter) -> None:
pass
| python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/core/domain/account/use_case/__init__.py | core/domain/account/use_case/__init__.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/core/domain/profile/exception.py | core/domain/profile/exception.py | import attr
from core.kernel.exception import BaseNotFoundException
@attr.s(auto_attribs=True)
class UserNotFound(BaseNotFoundException):
""" User Not Found Exception """
message: str = "User Not Found"
| python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/core/domain/profile/__init__.py | core/domain/profile/__init__.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/core/domain/profile/repository/__init__.py | core/domain/profile/repository/__init__.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/core/domain/profile/repository/profile_repository.py | core/domain/profile/repository/profile_repository.py | from abc import ABC, abstractmethod
from typing import List
from core.domain.profile.entity.user import User, UserBasicProfile, UserExtraProfile
class ProfileRepository(ABC):
@abstractmethod
def get_user(self, user_type: str, user_id: int) -> User:
return NotImplemented
@abstractmethod
def create_user(self, user: User) -> None:
return NotImplemented
@abstractmethod
def update_user_basic(self, user_type: str, user_id: int, basic_profile: UserBasicProfile) -> None:
return NotImplemented
@abstractmethod
def update_user_extra(self, user_type: str, user_id: int, extra_profiles: List[UserExtraProfile]) -> None:
return NotImplemented
| python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/core/domain/profile/entity/profile.py | core/domain/profile/entity/profile.py | import attr
from datetime import date
from core.kernel.entity import ValueObject
@attr.s(auto_attribs=True)
class UserBasicProfile(ValueObject):
""" User Basic Profile """
real_name: str = None
gender: str = None
birthday: str = None
@attr.s(auto_attribs=True)
class UserExtraProfile(ValueObject):
""" Base User Extra Profile """
profile_category: str = None
@attr.s(auto_attribs=True)
class EducationExtraProfile(UserExtraProfile):
""" Education Extra Profile """
profile_category: str = attr.ib(default="education", init=False)
school: str = None
department: str = None
@attr.s(auto_attribs=True)
class CareerExtraProfile(UserExtraProfile):
""" Career Extra Profile """
profile_category: str = attr.ib(default="career", init=False)
career: str = None
job_title: str = None
| python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/core/domain/profile/entity/user.py | core/domain/profile/entity/user.py | import attr
from typing import List
from core.kernel.entity import Entity
from .profile import UserBasicProfile, UserExtraProfile
@attr.s(auto_attribs=True)
class User(Entity):
""" Base User Entity """
user_type: str = None
user_name: str = None
user_status: str = "enabled"
basic_profile: UserBasicProfile = None
extra_profile: List[UserExtraProfile] = []
@attr.s(auto_attribs=True)
class Member(User):
""" Member Entity Extends User """
user_type: str = attr.ib(default="member", init=False)
@attr.s(auto_attribs=True)
class Newcomer(User):
""" Newcomer Entity Extends User """
user_type: str = attr.ib(default="newcomer", init=False)
| python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/core/domain/profile/entity/__init__.py | core/domain/profile/entity/__init__.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/core/domain/profile/use_case/get_user_profile.py | core/domain/profile/use_case/get_user_profile.py | import attr
from injector import inject
from core.domain.profile.exception import UserNotFound
from core.domain.profile.repository.profile_repository import ProfileRepository
from core.kernel.port import UseCaseRequest, UseCaseResponse, UseCaseOutputPort
from core.kernel.use_case import UseCase
@attr.s(auto_attribs=True)
class GetUserProfileRequest(UseCaseRequest):
user_id: int = None
user_type: str = None
@attr.s(auto_attribs=True)
class GetUserProfileResponse(UseCaseResponse):
""" Extends UseCase Response """
class GetUserProfileUseCase(UseCase):
_profile_repo = None
@inject
def __init__(self, profile_repo: ProfileRepository):
self._profile_repo = profile_repo
def execute(self, uc_request: GetUserProfileRequest,
uc_output_port: UseCaseOutputPort[GetUserProfileResponse]) -> None:
response = GetUserProfileResponse()
user = self._profile_repo.get_user(uc_request.user_type, uc_request.user_id)
if not user:
response.error = UserNotFound("This user does not exist")
else:
response.result = user
uc_output_port.handle(response)
| python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/core/domain/profile/use_case/__init__.py | core/domain/profile/use_case/__init__.py | python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false | |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/config/base_config.py | config/base_config.py | """Flask config class."""
import os
class BaseConfig:
"""Base config vars."""
SECRET_KEY = os.environ.get('SECRET_KEY')
SESSION_COOKIE_NAME = os.environ.get('SESSION_COOKIE_NAME')
| python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/config/prod_config.py | config/prod_config.py | """Flask config class."""
import os
from .base_config import BaseConfig
class ProductionConfig(BaseConfig):
DEBUG = False
TESTING = False
DATABASE_URI = os.environ.get('PROD_DATABASE_URI')
| python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/config/__init__.py | config/__init__.py | from .prod_config import ProductionConfig
from .dev_config import DevelopmentConfig
configurations = {
'production': ProductionConfig,
'development': DevelopmentConfig,
'default': DevelopmentConfig
}
| python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false |
chonhan/flask_restapi_clean_architecture | https://github.com/chonhan/flask_restapi_clean_architecture/blob/539994825013d45712817da72dd96e0c6f8ee56b/config/dev_config.py | config/dev_config.py | """Flask config class."""
import os
from .base_config import BaseConfig
class ProductionConfig(BaseConfig):
DEBUG = False
TESTING = False
DATABASE_URI = os.environ.get('PROD_DATABASE_URI')
| python | MIT | 539994825013d45712817da72dd96e0c6f8ee56b | 2026-01-05T07:13:07.327713Z | false |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/setup.py | setup.py | from setuptools import setup, find_packages
setup(name='safekit',
version=1.0,
description='Neural Network Anomaly Detection for Multivariate Sequences',
url='http://aarontuor.site',
author='Aaron Tuor, Ryan Baerwolf, Robin Cosbey, Nick Knowles',
author_email='aaron.tuor@pnnl.gov',
license='MIT',
packages=find_packages(), # or list of package paths from this directory
zip_safe=False,
install_requires=['tensorflow', 'scipy', 'sklearn', 'numpy', 'matplotlib'],
classifiers=['Programming Language :: Python'],
keywords=['Deep Learning', 'Anomaly Detection', 'LSTM', 'RNN'])
| python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/safekit/graph_training_utils.py | safekit/graph_training_utils.py | """
Utilities for training the parameters of tensorflow computational graphs.
"""
import tensorflow as tf
import sys
import math
OPTIMIZERS = {'grad': tf.train.GradientDescentOptimizer, 'adam': tf.train.AdamOptimizer}
class EarlyStop:
"""
A class for determining when to stop a training while loop by a bad count criterion.
If the data is exhausted or the model's performance hasn't improved for *badlimit* training
steps, the __call__ function returns false. Otherwise it returns true.
"""
def __init__(self, badlimit=20):
"""
:param badlimit: Limit of for number of training steps without improvement for early stopping.
"""
self.badlimit = badlimit
self.badcount = 0
self.current_loss = sys.float_info.max
def __call__(self, mat, loss):
"""
Returns a boolean for customizable stopping criterion.
For first loop iteration set loss to sys.float_info.max.
:param mat: Current batch of features for training.
:param loss: Current loss during training.
:return: boolean, True when mat is not None and self.badcount < self.badlimit and loss != inf, nan.
"""
if mat is None:
sys.stderr.write('Done Training. End of data stream.')
cond = 0
elif math.isnan(loss) or math.isinf(loss):
sys.stderr.write('Exiting due divergence: %s\n\n' % loss)
cond = -1
elif loss > self.current_loss:
self.badcount += 1
if self.badcount >= self.badlimit:
sys.stderr.write('Exiting. Exceeded max bad count.')
cond = -1
else:
cond = 1
else:
self.badcount = 0
cond = True
self.current_loss = loss
return cond
class ModelRunner:
"""
A class for gradient descent training tensorflow models.
"""
def __init__(self, loss, ph_dict, learnrate=0.01, opt='adam', debug=False, decay=True,
decay_rate=0.99, decay_steps=20):
"""
:param loss: The objective function for optimization strategy.
:param ph_dict: A dictionary of names (str) to tensorflow placeholders.
:param learnrate: The step size for gradient descent.
:param opt: Optimization algorithm can be 'adam', or 'grad'
:param debug: Whether or not to print debugging info.
:param decay: (boolean) Whether or not to use a learn rate with exponential decay.
:param decay_rate: The rate parameter for exponential decay of learn rate.
:param decay_steps: The number of training steps to decay learn rate.
"""
self.loss = loss
self.ph_dict = ph_dict
self.debug = debug
if decay:
self.global_step = tf.Variable(0, trainable=False)
learnrate = tf.train.exponential_decay(learnrate, self.global_step,
decay_steps, decay_rate, staircase=True)
else:
self.global_step = None
self.train_op = OPTIMIZERS[opt](learnrate).minimize(loss, global_step=self.global_step)
self.init = tf.global_variables_initializer()
self.sess = tf.Session()
self.sess.run(self.init)
def train_step(self, datadict, eval_tensors=[], update=True):
"""
Performs a training step of gradient descent with given optimization strategy.
:param datadict: A dictionary of names (str) matching names in ph_dict to numpy matrices for this mini-batch.
:param eval_tensors: (list of Tensors) Tensors to evaluate along with train_op.
:param update: (boolean) Whether to perform a gradient update this train step
:return: A list of numpy arrays for eval_tensors. First element is None.
"""
if update:
train_op = [self.train_op]
else:
train_op = eval_tensors[0:1]
return self.sess.run(train_op + eval_tensors,
feed_dict=get_feed_dict(datadict, self.ph_dict, debug=self.debug))
def eval(self, datadict, eval_tensors):
"""
Evaluates tensors without effecting parameters of model.
:param datadict: A dictionary of names (str) matching names in ph_dict to numpy matrices for this mini-batch.
:param eval_tensors: Tensors from computational graph to evaluate as numpy matrices.
:return: A list of evaluated tensors as numpy matrices.
"""
return self.sess.run(eval_tensors,
feed_dict=get_feed_dict(datadict, self.ph_dict, train=0, debug=self.debug))
def get_feed_dict(datadict, ph_dict, train=1, debug=False):
"""
Function for pairing placeholders of a tensorflow computational graph with numpy arrays.
:param datadict: A dictionary with keys matching keys in ph_dict, and values are numpy arrays.
:param ph_dict: A dictionary where the keys match keys in datadict and values are placeholder tensors.
:param train: {1,0}. Different values get fed to placeholders for dropout probability, and batch norm statistics
depending on if model is training or evaluating.
:param debug: (boolean) Whether or not to print dimensions of contents of placeholderdict, and datadict.
:return: A feed dictionary with keys of placeholder tensors and values of numpy matrices.
"""
fd = {}
for k, v in ph_dict.iteritems():
if type(v) is not list:
fd[v] = datadict[k]
else:
for tensor, matrix in zip(v, datadict[k]):
fd[tensor] = matrix
dropouts = tf.get_collection('dropout_prob')
bn_deciders = tf.get_collection('bn_deciders')
if dropouts:
for prob in dropouts:
if train == 1:
fd[prob[0]] = prob[1]
else:
fd[prob[0]] = 1.0
if bn_deciders:
fd.update({decider: [train] for decider in bn_deciders})
if debug:
for desc in ph_dict:
if type(ph_dict[desc]) is not list:
print('%s\n\tph: %s\t%s\tdt: %s\t%s' % (desc,
ph_dict[desc].get_shape().as_list(),
ph_dict[desc].dtype,
datadict[desc].shape,
datadict[desc].dtype))
print(fd.keys())
return fd
| python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/safekit/util.py | safekit/util.py | """
Python and numpy functions.
"""
from tf_ops import softmax_dist_loss, diag_mvn_loss, full_mvn_loss
import numpy as np
import argparse
def make_feature_spec(dataspec):
"""
Makes lists of all the continuous and categorical features to be used as input features of a neural network.
:param dataspec: (dict) From a json specification of the purpose of fields in the csv input file (See docs for formatting)
:return: (dict) features {'categorical': [categorical_feature_1, ..., categorical_feature_j],
'continuous': [continuous_feature_1, ..., continuous_feature_k]}
"""
spec = {k: v for k, v in dataspec.iteritems() if k != 'num_features'}
feature_spec = {'categorical': [], 'continuous': []}
for key, field in spec.iteritems():
if field['num_classes'] == 0 and field['feature']:
feature_spec['continuous'].append(key)
if field['num_classes'] > 0 and field['feature']:
feature_spec['categorical'].append(key)
return feature_spec
def make_loss_spec(dataspec, mvn):
"""
Makes a list of tuples for each target to be used in training a multiple output neural network modeling a
mixed joint distribution of discrete and continuous variables.
:param dataspec: (dict) From a json specification of the purpose of fields in the csv input file (See docs for formatting)
:param mvn: Tensorflow function for calculating type of multivariate loss for continuous target vectors.
Can be tf_ops.diag_mvn_loss, tf_ops.full_mvn_loss, tf_ops.eyed_mvn_loss
:return: A list of tuples of the form: (target_name, loss_function, dimension) where dimension
is the dimension of the target vector (for categorical features this is the number of classes, for continuous
targets this is the size of the continuous target vector)
"""
spec = {k:v for k,v in dataspec.iteritems() if k != 'num_features'}
loss_spec = []
for key, field in spec.iteritems():
if field['num_classes'] == 0 and field['target']:
loss_spec.append((key, mvn, len(field['index'])))
if field['num_classes'] > 0 and field['target']:
loss_spec.append((key, softmax_dist_loss, field['num_classes']))
return loss_spec
def get_multivariate_loss_names(loss_spec):
"""
For use in conjunction with `tf_ops.multivariate_loss`. Gives the names of all contributors (columns) of the loss matrix.
:param loss_spec: A list of 3-tuples of the form (input_name, loss_function, dimension) where
input_name is the same as a target in datadict,
loss_function takes two parameters, a target and prediction,
and dimension is the dimension of the target.
:return: loss_names is a list concatenated_feature_size long with names of all loss contributors.
"""
loss_names, log_det_names = [], []
for i, (input_name, loss_func, dimension) in enumerate(loss_spec):
if loss_func == softmax_dist_loss: # discrete
loss_names.append("loss_%s" % input_name)
else: # continuous
if loss_func == diag_mvn_loss or loss_func == full_mvn_loss:
log_det_names.append("loss_%s.logdet" % input_name)
for k in range(dimension):
loss_names.append("loss_%s.%d" % (input_name, k))
loss_names.extend(log_det_names)
return loss_names
def get_mask(lens, num_tokens):
"""
For masking output of lm_rnn for jagged sequences for correct gradient update.
Sequence length of 0 will output nan for that row of mask so don't do this.
:param lens: Numpy vector of sequence lengths
:param num_tokens: (int) Number of predicted tokens in sentence.
:return: A numpy array mask MB X num_tokens
For each row there are: lens[i] values of 1/lens[i]
followed by num_tokens - lens[i] zeros
"""
mask_template = np.repeat(np.arange(num_tokens).reshape(1, -1), lens.shape[0], axis=0)
return (mask_template < lens.reshape([-1, 1])).astype(float) / lens.reshape([-1, 1]).astype(float)
class RunningMean:
"""
Calculates the batchwise running mean from rows, columns, or values of a matrix.
"""
def __init__(self, axis=0):
"""
:param axis: The axis to calculate the running mean over. If axis==None then the running mean for the entire array is taken.
"""
self.n = 0.0 # total number of samples
self.avg = 0.0
self.axis = axis
def __call__(self, samples):
"""
:param samples: a matrix of samples to incorporate into running mean
:return: running average over axis
"""
if self.axis is not None:
m = float(samples.shape[self.axis]) # num_new_samples
else:
m = np.prod(np.array(samples.shape))
self.n += m
self.avg = ((self.n - m) / self.n) * self.avg + np.sum(samples, axis=self.axis) / self.n # second term = (new_avg*m)/n
return self.avg
class ExponentialRunningMean:
"""
Calculates the running mean of row vectors batchwise given a sequence of matrices.
"""
def __init__(self, alpha=1.0):
"""
:param alpha: (float) Higher alpha discounts older observations faster.
The smaller the alpha, the further you take into consideration the past.
"""
self.mean = None
self.alpha = alpha
def __call__(self, samples):
"""
:param samples: a matrix of samples to incorporate into running mean
:return: running average over axis
"""
if self.mean is None:
self.mean = np.mean(samples, axis=0).reshape([1, -1])
else:
old_mean = self.mean[-1, :]
self.mean = np.empty((0, samples.shape[1]))
for i in range(samples.shape[0]):
new_mean = (1 - self.alpha)*old_mean + self.alpha*samples[i, :]
self.mean = np.vstack([self.mean, new_mean])
old_mean = new_mean
return self.mean
class Parser(argparse.ArgumentParser):
"""
Hack for Sphinx documentation of scripts to work correctly.
"""
def _get_option_tuples(self, option_string):
result = []
# option strings starting with two prefix characters are only
# split at the '='
chars = self.prefix_chars
if option_string[0] in chars and option_string[1] in chars:
if '=' in option_string:
option_prefix, explicit_arg = option_string.split('=', 1)
else:
option_prefix = option_string
explicit_arg = None
for option_string in self._option_string_actions:
if option_string == option_prefix:
action = self._option_string_actions[option_string]
tup = action, option_string, explicit_arg
result.append(tup)
# single character options can be concatenated with their arguments
# but multiple character options always have to have their argument
# separate
elif option_string[0] in chars and option_string[1] not in chars:
option_prefix = option_string
explicit_arg = None
short_option_prefix = option_string[:2]
short_explicit_arg = option_string[2:]
for option_string in self._option_string_actions:
if option_string == short_option_prefix:
action = self._option_string_actions[option_string]
tup = action, option_string, short_explicit_arg
result.append(tup)
elif option_string == option_prefix:
action = self._option_string_actions[option_string]
tup = action, option_string, explicit_arg
result.append(tup)
# shouldn't ever get here
else:
self.error(_('unexpected option string: %s') % option_string)
# return the collected option tuples
return result
| python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/safekit/__init__.py | safekit/__init__.py | python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false | |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/safekit/batch.py | safekit/batch.py | """
Module for mini-batching data.
"""
# TODO: Make skipping header argument consistent (numpy style skiprows) for all batchers.
# TODO: Make arguments for all batchers as consistent as possible.
# TODO: Look at Replay batcher and try to fix behavior of replay DNN. If fixed combine replay batcher with OnlineBatcher.
# TODO: StateTrackingBatcher - needs additional checking and commenting
import numpy as np
import random
import math
from collections import deque
class DayBatcher:
"""
Gives batches from a csv file on a per day basis. The first field is assumed to be the day field.
Days are assumed to be sorted in ascending order (No out of order days in csv file).
For batching data too large to fit into memory. Written for one pass on data!!!
"""
def __init__(self, datafile, skiprow=0, delimiter=','):
"""
:param datafile: (str) File to read lines from.
:param skiprow: (int) How many lines to ignore at beginning of file (e.g. if file has a header)
:param delimiter: (str) The delimiter for the csv file
"""
self.f = open(datafile, 'r')
self.delimiter = delimiter
for i in range(skiprow):
self.f.readline()
self.current_line = self.f.readline()
self.current_day = -1
def next_batch(self):
"""
:return: (np.array) shape=(num_rows_in_a_day, len(csv_lines)).
Until end of datafile, each time called,
returns 2D array of consecutive lines with same day stamp.
Returns None when no more data is available (one pass batcher!!).
"""
matlist = []
if self.current_line == '':
return None
rowtext = np.array([float(k) for k in self.current_line.strip().split(self.delimiter)])
self.current_day = rowtext[0]
while rowtext[0] == self.current_day:
self.current_day = rowtext[0]
matlist.append(rowtext)
self.current_line = self.f.readline()
if self.current_line == '':
break
rowtext = np.array([float(k) for k in self.current_line.strip().split(self.delimiter)])
return np.array(matlist)
class OnlineBatcher:
"""
Gives batches from a csv file.
For batching data too large to fit into memory. Written for one pass on data!!!
"""
def __init__(self, datafile, batch_size,
skipheader=False, delimiter=',',
alpha=0.5, size_check=None,
datastart_index=3, norm=False):
"""
:param datafile: (str) File to read lines from.
:param batch_size: (int) Mini-batch size.
:param skipheader: (bool) Whether or not to skip first line of file.
:param delimiter: (str) Delimiter of csv file.
:param alpha: (float) For exponential running mean and variance.
Lower alpha discounts older observations faster.
The higher the alpha, the further you take into consideration the past.
:param size_check: (int) Expected number of fields from csv file. Used to check for data corruption.
:param datastart_index: (int) The csv field where real valued features to be normalized begins.
Assumed that all features beginnning at datastart_index till end of line
are real valued.
:param norm: (bool) Whether or not to normalize the real valued data features.
"""
self.alpha = alpha
self.f = open(datafile, 'r')
self.batch_size = batch_size
self.index = 0
self.delimiter = delimiter
self.size_check = size_check
if skipheader:
self.header = self.f.readline()
self.datastart_index = datastart_index
self.norm = norm
self.replay = False
def next_batch(self):
"""
:return: (np.array) until end of datafile, each time called,
returns mini-batch number of lines from csv file
as a numpy array. Returns shorter than mini-batch
end of contents as a smaller than batch size array.
Returns None when no more data is available(one pass batcher!!).
"""
matlist = []
l = self.f.readline()
if l == '':
return None
rowtext = np.array([float(k) for k in l.strip().split(self.delimiter)])
if self.size_check is not None:
while len(rowtext) != self.size_check:
l = self.f.readline()
if l == '':
return None
rowtext = np.array([float(k) for k in l.strip().split(self.delimiter)])
matlist.append(rowtext)
for i in range(self.batch_size - 1):
l = self.f.readline()
if l == '':
break
rowtext = np.array([float(k) for k in l.strip().split(self.delimiter)])
if self.size_check is not None:
while len(rowtext) != self.size_check:
l = self.f.readline()
if l == '':
return None
rowtext = np.array([float(k) for k in l.strip().split(self.delimiter)])
matlist.append(rowtext)
data = np.array(matlist)
if self.norm:
batchmean, batchvariance = data[:,self.datastart_index:].mean(axis=0), data[:, self.datastart_index:].var(axis=0)
if self.index == 0:
self.mean, self.variance = batchmean, batchvariance
else:
self.mean = self.alpha * self.mean + (1 - self.alpha) * batchmean
self.variance = self.alpha * self.variance + (1 - self.alpha) * batchvariance
data[:, self.datastart_index:] = (data[:, self.datastart_index:] - self.mean)/(self.variance + 1e-10)
self.index += self.batch_size
return data
def split_batch(batch, spec):
"""
Splits numpy matrix into separate data fields according to spec dictionary.
:param batch: (np.array) Array with shape=(batch_size, num_features) of data collected from stream.
:param spec: (dict) A python dict containing information about which indices in the incoming data point correspond to which features.
Entries for continuous features list the indices for the feature, while entries for categorical features
contain a dictionary- {'index': i, 'num_classes': c}, where i and c are the index into the datapoint,
and number of distinct categories for the category in question.
:return: (dict) A dictionary of numpy arrays of the split 2d feature array.
"""
assert spec['num_features'] == batch.shape[1], "Wrong number of features: spec/%s\tbatch/%s" % (spec['num_features'], batch.shape[1])
datadict = {}
for dataname, value in spec.iteritems():
if dataname != 'num_features':
if value['num_classes'] > 0:
datadict[dataname] = batch[:, value['index'][0]].astype(int)
else:
datadict[dataname] = batch[:, value['index']]
return datadict
class StateTrackingBatcher:
"""
Aggregate RNN batcher. Reads line by line from a file or pipe being fed csv format features by a feature extractor.
Keeps track of window of user history for truncated backpropagation through time with a shifting set of users.
"""
def __init__(self, pipe_name,
specs,
batchsize=21,
num_steps=3,
layers=(10),
replay_ratio=(1, 0),
next_step=False,
warm_start_state=None,
delimiter=',',
skipheader=False,
alpha=0.5,
datastart_index=3,
standardize=True):
"""
:param pipe_name: (str) Name of file or pipe to read from.
:param specs: (dict) From a json specification of the purpose of fields in the csv input file (See docs for formatting)
:param batchsize: (int) The maximum number of events in a mini-batch
:param num_steps: (int) The maximum number of events for any given user per mini-batch (window size sort of)
:param layers: (list) A list of the sizes of hidden layers for the stacked lstm.
:param replay_ratio: (tuple) (num_new_batches, num_replay_batches) Describes the ratio of new batches to replay batches.
:param next_step: (boolean) False (0) if autoencoding, True (1) if next time step prediction
:param warm_start_state: (tuple) Tuple of numpy arrays for warm_starting state of all users of RNN.
:param delimiter: (str) Delimiter of csv file.
:param skipheader: (bool) Whether or not to skip first line of csv file.
:param alpha: (float) For exponential running mean and variance.
Lower alpha discounts older observations faster.
The higher the alpha, the further you take into consideration the past.
:param datastart_index: (int) The csv field where real valued features to be normalized begins.
Assumed that all features beginnning at datastart_index till end of line
are real valued.
:param standardize: (bool) Whether or not to standardize the data using running mean and variance.
"""
self.specs = specs
self.batch_limit = batchsize
self.num_steps = num_steps
self.pipe_name = pipe_name
self.pipein = open(pipe_name, 'r')
self.pipein.readline()
self.event_map = {}
self.state_map = {}
self.new_event_count = {}
self.layers = layers
self.event_number = 0
self.event_deque_size = int(next_step) + self.num_steps
self.next_step = next_step
self.finished = False
self.num_features = self.specs.pop('num_features', None)
self.user_index = self.specs['user']['index'][0]
self.count_start = self.specs['counts']['index'][0]
self.warm_start_state = warm_start_state
self.period = sum(replay_ratio)
self.batch_function = [self.new_batch if i < replay_ratio[0] else self.replay_batch for i in range(self.period)]
self.replay_indicator = [False if i < replay_ratio[0] else True for i in range(self.period)]
self.delimiter = delimiter
self.mod = 0
self.day = 0
self.alpha = alpha
self.index = 0
self.datastart_index = datastart_index
self.standardize = standardize
if skipheader:
self.header = self.pipein.readline()
@property
def replay(self):
"""
Whether or not a replay batch was just processed.
"""
return self.replay_indicator[self.mod]
def next_batch(self):
"""
:return: (dict) A dictionary of numpy arrays from splitting a
3d (numsteps X mb_size X num_csv_fields) array into subarrays with keys pertaining to use in training.
"""
if self.day < 20:
return self.new_batch()
else:
batch = self.batch_function[self.mod]()
self.mod = (self.mod + 1) % self.period
return batch
def package_data(self, batch):
"""
:param batch: (np.array) An assembled 3 way array of data collected from the stream with shape (num_time_steps, num_users, num_features)
:return: (dict) A dictionary of numpy arrays of the diced 3way feature array.
"""
datadict = {}
for dataname, value in self.specs.iteritems():
if value['num_classes'] != 0: # type(value) is dict:
datadict[dataname] = batch[:, :, value['index'][0]].astype(int)
else:
datadict[dataname] = batch[:, :, value['index']]
return datadict
def blank_slate(self):
"""
Creates and returns a zero state for one time step for 1 user
:return: (list) A list of 1 X state_size numpy arrays the number of layers long
"""
if self.warm_start_state is not None:
return self.warm_start_state
return np.stack([np.stack([np.random.normal(scale=0.1, size=(1, units)),
np.random.normal(scale=0.1, size=(1, units))]) for units in self.layers])
def avg_state(self):
"""
:return: (list) The average of all the most recent states for each batch entity.
"""
avg = self.blank_state
for user, dec in self.state_map.iteritems():
avg = [[a[0] + b[0], a[1] + b[1]] for a, b in zip(dec[-1], avg)]
return [[a[0] / float(len(self.state_map)), a[1] / float(len(self.state_map))] for a in avg]
def event_padding_random(self, rowtext):
"""
Creates and returns a 'non-event' with random entries for event history padding of newly encountered user.
:param rowtext: (int) A log line for the user
:return: (np.array) A random event with user meta data attached.
"""
meta = rowtext[:self.count_start]
num_zeros = (len(rowtext) / 4) * 3
zeros = np.zeros(num_zeros)
vals = np.random.randint(1, high=30, size=len(rowtext) - num_zeros - self.count_start)
zero_vals = np.concatenate([zeros, vals]) # len(rowtext) - self.count_start
np.random.shuffle(zero_vals) # len(rowtext) - self.count_start
return np.concatenate((meta, zero_vals)) # len(rowtext)
def get_new_events(self):
"""
To get new events when not replaying old events.
:returns: (int) 1 if not EOF 0 if EOF
"""
if self.finished:
return 0
max_user_event_count = 0
event_count = 0
self.new_event_count = {}
matlist = []
while (event_count < self.batch_limit and
max_user_event_count < self.num_steps):
rowtext = self.pipein.readline()[:-1].strip().split(self.delimiter)
if rowtext[-1] == '':
self.finished = True
break
assert len(rowtext) == self.num_features, 'Discrepancy in number of features of event %s. \n ' \
'Expected %s, got %s. \nFields: %r' % (self.event_number,
self.num_features,
len(rowtext),
rowtext)
event_count += 1
user = int(float(rowtext[self.user_index]))
self.day = float(rowtext[0])
if user not in self.new_event_count:
self.new_event_count[user] = 1
else:
self.new_event_count[user] += 1
max_user_event_count = max(self.new_event_count[user], max_user_event_count)
try:
rowtext = [float(entry) for entry in rowtext]
except ValueError:
raise ValueError('Non numeric string found in event %s' % self.event_number)
matlist.append(rowtext)
data = np.array(matlist)
if self.standardize:
batchmean, batchvariance = data[:, self.datastart_index:].mean(axis=0), data[:, self.datastart_index:].var(
axis=0)
if self.index == 0:
self.mean, self.variance = batchmean, batchvariance
else:
self.mean = self.alpha * self.mean + (1 - self.alpha) * batchmean
self.variance = self.alpha * self.variance + (1 - self.alpha) * batchvariance
self.index += data.shape[0]
data[:, self.datastart_index:] = (data[:, self.datastart_index:] - self.mean) / (self.variance + 1e-10)
for rowtext in data:
user = int(float(rowtext[self.user_index]))
if user not in self.event_map:
self.event_map[user] = deque(self.event_padding_random(rowtext)
for i in range(self.event_deque_size))
self.state_map[user] = deque(self.blank_slate()
for i in range(self.num_steps + 1))
self.event_map[user].append(rowtext)
self.event_map[user].popleft()
self.state_map[user].popleft()
self.event_number += 1
return 1
def get_states(self):
"""
Fetches the saved RNN states of users in current mini-batch
:return: (list) List of user states.
"""
state_batch = np.concatenate([self.state_map[user][self.next_step] for user in self.new_event_count], axis=2)
state_batch = state_batch.reshape(len(self.layers) * 2, len(self.new_event_count.keys()), self.layers[0])
return [state_batch[k, :, :] for k in range(len(self.layers) * 2)]
def get_events(self):
"""
:return: (np.array) 3 way array of shape (num_time_steps, num_users, num_features)
"""
eventlist = [list(self.event_map[user]) for user in self.new_event_count]
return np.array([[user_event_list[tme] for user_event_list in eventlist]
for tme in range(self.event_deque_size)])
def make_key_map(self):
"""
:return: (dict) For use in get_eval_indices.
"""
return {user_index: number for user_index, number in
zip(self.new_event_count.keys(), range(len(self.new_event_count)))}
def get_eval_indices(self, key_map):
"""
:param key_map: (dict)
:return: (list) Data structure which keeps track of where to evaluate RNN.
"""
# Reverse the order of eval_indices to lookup correct hidden state in rnn output
return [np.array([key_map[key] for key in self.new_event_count if self.new_event_count[key] > val])
for val in range(self.num_steps - 1, -1, -1)]
def new_batch(self):
"""
:return: (dict) A dictionary with keys to match to placeholders and values of numpy matrices. Entries are described as follows:
- **states** A structured list of numpy arrays to feed as initial state for next round of training
- **inputs** A three way numpy array of dimensions (timestep X user X (feature_size + target_size + meta_size)) where meta-size is the number of fields not used in training (user_id, timestamp, etc.)
- **eval_indices** A num_time_steps long list of numpy vectors which contain the indices of hidden state outputs to evaluate on for each time step in this batch of training.
- **Other entries** are split from the 'inputs' matrix using the specs dictionary which describes indices of matrices to extract.
"""
if self.get_new_events() == 0:
return None
events = self.get_events()
key_map = self.make_key_map()
eval_indices = self.get_eval_indices(key_map)
if self.next_step:
eval_indices = eval_indices[1:]
datadict = self.package_data(events)
datadict['eval_indices'] = eval_indices
datadict['initial_state'] = self.get_states() # list(itertools.chain.from_iterable(states))
return datadict
def replay_batch(self):
"""
:return: (dict) A dictionary with keys to match to placeholders and values of numpy matrices. Entries are described as follows:
- **states** A structured list of numpy arrays to feed as initial state for next round of training
- **inputs** A three way numpy array of dimensions (timestep X user X (feature_size + target_size + meta_size)) where meta-size is the number of fields not used in training (user_id, timestamp, etc.)
- **eval_indices** A num_time_steps long list of numpy vectors which contain the indices of hidden state outputs to evaluate on for each time step in this batch of training.
- **Other entries** are split from the 'inputs' matrix using the specs dictionary which describes indices of matrices to extract.
"""
users = list(self.event_map.keys())
random.shuffle(users)
users = users[
:int(math.ceil(float(self.batch_limit) / float(self.num_steps)))] # numusers * numsteps = batchlimit
self.new_event_count = {user: self.num_steps for user in users}
events = self.get_events()
key_map = self.make_key_map()
eval_indices = self.get_eval_indices(key_map)
if self.next_step:
eval_indices = eval_indices[1:]
datadict = self.package_data(events)
datadict['eval_indices'] = eval_indices
datadict['initial_state'] = self.get_states()
return datadict
def update_states(self, states):
"""
For updating the deque of lstm states for each user after a minibatch of training.
:param states: (list) The unstructured list of state matrices evaluated after a train step.
"""
# states handed to last batch that we want to preserve for popleft rule to work
last_states = np.concatenate([self.state_map[user][0] for user in self.new_event_count], axis=2)
new_states = np.array(states).reshape(
[self.num_steps, len(self.layers), 2, last_states.shape[2], self.layers[0]])
new_states = np.concatenate([np.expand_dims(last_states, axis=0), new_states],
axis=0) # numsteps X layers X 2 X user_mb+1 X units
new_states = np.split(new_states, new_states.shape[3], axis=3)
for idx, user in enumerate(self.new_event_count):
self.state_map[user] = deque([new_states[idx][t, :, :, :, :] for t in range(self.num_steps + 1)])
class OnlineLMBatcher:
"""
For use with tiered_lm.py. Batcher keeps track of user states in upper tier RNN.
"""
def __init__(self, datafile, initial_state_triple,
batch_size=100, num_steps=5, delimiter=" ",
skiprows=0):
"""
:param datafile: (str) CSV file to read data from.
:param initial_state_triple: (tuple) Initial state for users in lstm.
:param batch_size: (int) How many users in a mini-batch.
:param num_steps: (int) How many log lines to get for each user.
:param delimiter: (str) delimiter for csv file.
:param skiprows: (int) How many rows to skip at beginning of csv file.
"""
self.user_count = 15000 # number of users in population
self.delimiter = delimiter # delimiter for input file
self.mb_size = batch_size # the number of users in a batch
self.num_steps = num_steps # The number of log lines for each user in a batch
self.user_logs = [deque() for i in range(self.user_count)] # list lists of loglines for each user. an individual user log line list
# has length between 0 and self.mb_size - 1. When a user log line list
# becomes self.mb_size it is transformed into np.array and moved to either
# the current batch, or self.user_batch_overflow
self.user_batch_overflow = [] # To store num_steps matrices of log lines for high frequency users
self.state_triples = [initial_state_triple] * self.user_count # lstm state for each user for top tier language model
self.data = open(datafile, 'r')
self.batch_user_list = [] # A record of all the users in a batch for retrieving and updating states
for i in range(skiprows):
garbage = self.data.readline()
self.line_num = 1 # The line number of the file to be read next
self.flush = False # used by next_batch() to decide whether to call flush_batch()
def update_state_triples(self, new_triples):
"""
Called after training step of RNN to save current states of users.
:param new_triples: (3-tuple) context_list = np.array shape=(users X context_rnn_hidden_size)
state_list = list of np.arrays of shape=(users X context_rnn_hidden_size)
hidden_list = Same type as state list
"""
context_list = np.split(new_triples[0], new_triples[0].shape[0], axis=0) # split on user dimension
state_list = np.split(np.array(new_triples[1]), len(context_list), axis=1) # split on user dimension
hidden_list = np.split(np.array(new_triples[2]), len(context_list), axis=1) # split on user dimension
for idx, user in enumerate(self.batch_user_list):
self.state_triples[int(user)] = (context_list[idx], state_list[idx], hidden_list[idx])
def get_state_triples(self):
"""
:return: (dict) Current states of users for all users in this mini-batch.
"""
context_list = [None] * len(self.batch_user_list)
state_list = [None] * len(self.batch_user_list)
hidden_list = [None] * len(self.batch_user_list)
for idx, user in enumerate(self.batch_user_list):
context_list[idx], state_list[idx], hidden_list[idx] = self.state_triples[int(user)]
state_matrix = np.concatenate(state_list, axis=1)
hidden_matrix = np.concatenate(hidden_list, axis=1)
return {'context_vector': np.concatenate(context_list, axis=0), # users X context_rnn_hidden_size
'c_state_init': [state_matrix[layer, :, :] for layer in range(state_matrix.shape[0])],
# list of users X context_rnn_hidden_size
'h_state_init': [hidden_matrix[layer, :, :] for layer in range(hidden_matrix.shape[0])]
# list of users X context_rnn_hidden_size
}
def next_batch(self):
"""
:return: (tuple) (batch, state_triples) Where batch is a three way array and state_triples contains current user
states for upper tier lstm. At beginning of file batch will be shape (batch_size X num_steps X num_feats).
At end of file during first stage of flushing batch will be shape (num_unique_users X num_steps X num_feats).
At end of file during second stage of flushing batch will be
shape (min(batch_size X num_steps, num_unique_users) X num_feats).
"""
if self.flush:
return self.flush_batch()
else:
return self.new_batch()
def flush_batch(self):
"""
Called when EOF is encountered. Returns either first stage flush batch or second stage flush batch.
"""
print("flushing overflow matrices")
self.batch_user_list = []
batch, batch_user_set = self.get_batch_from_overflow()
if len(batch) == 0:
return self.collect_stragglers()
else:
return np.array(batch), self.get_state_triples()
def collect_stragglers(self):
"""
Second stage flushing
"""
self.batch_user_list = [deq[0][3] for deq in self.user_logs if len(deq) > 0]
if len(self.batch_user_list) == 0:
return None, None
else:
straggler_mb_size = min(len(self.batch_user_list), self.mb_size * self.num_steps)
batch = [self.user_logs[int(i)].popleft() for i in self.batch_user_list[:straggler_mb_size]]
self.batch_user_list = self.batch_user_list[:straggler_mb_size]
return np.array(batch), self.get_state_triples()
def get_batch_from_overflow(self):
"""
Called at beginning of each new batch to see if users have any premade matrix of events ready.
"""
batch = []
batch_user_set = set()
idx = 0
while len(batch) < self.mb_size and idx < len(self.user_batch_overflow):
matrix = self.user_batch_overflow[idx]
user = matrix[0, 3]
if user not in batch_user_set:
batch.append(matrix)
batch_user_set.add(user)
self.batch_user_list.append(user)
self.user_batch_overflow.pop(idx)
else:
idx += 1
return batch, batch_user_set
def new_batch(self):
"""
First checks user_batch_overflow to see if there are user batches ready for the new mini-batch.
Iterates over the file, adding user's loglines to user_logs. When a user gets
num_steps loglines, those num_steps loglines are added to the batch or if the user is already present
in the batch to the user_batch_overflow. Now, when we have minibatch number of user batches, we return
those as a batch. At most one user-batch for each user is allowed in a mini-batch
"""
self.batch_user_list = []
# First check overflow buffer for num_steps X sentence_length matrices for minibatch
batch, batch_user_set = self.get_batch_from_overflow()
# Now get more log lines from log file to make more num_steps X sentence_length matrices for minibatch
while len(batch) < self.mb_size:
l = self.data.readline()
self.line_num += 1
if l == '':
self.flush = True
if len(self.batch_user_list) > 0:
return np.array(batch), self.get_state_triples() # batch is mb(user) X numsteps X sentence_length
else:
return self.flush_batch()
rowtext = [float(k) for k in l.strip().split(self.delimiter)]
user = int(rowtext[3])
self.user_logs[user].append(rowtext)
if len(self.user_logs[user]) == self.num_steps:
if user in batch_user_set:
self.user_batch_overflow.append(np.array(self.user_logs[user]))
else:
batch.append(np.array(self.user_logs[user]))
batch_user_set.add(user)
self.batch_user_list.append(user)
self.user_logs[user] = deque()
return np.array(batch), self.get_state_triples() # batch is mb(user) X numsteps X sentence_length
class NormalizingReplayOnlineBatcher:
"""
For replay batching on aggregate DNN model.
For batching data too large to fit into memory. Written for one pass on data!!!
"""
def __init__(self, datafile, batch_size, skipheader=False,
delimiter=',', size_check=None, refresh_ratio=.5,
ratio=(1, 0), pool_size=5, alpha=0.5, datastart_index=3):
"""
:param datafile: File to read data from
:param batch_size: For mini-batching
:param skipheader: Use if there is a header on the data file
:param delimiter: Typically ' ' or ',' which delimits columns in data file
:param size_check: Ignore this
:param refresh_ratio: The proportion of the new mini-batch to use in refreshing the pool.
:param ratio: (tuple) (num_new, num_replay) The batcher will provide num_new new batches of data points
and then num_replay batches of old data points from the pool.
:param pool_size: The scale of the pool. The pool will be pool_size * batchsize data points.
:param alpha: (float) For exponential running mean and variance.
Lower alpha discounts older observations faster.
The higher the alpha, the further you take into consideration the past.
:param datastart_index: The csv field where real valued features to be normalized begins.
Assumed that all features beginnning at datastart_index till end of line
are real valued.
"""
assert ratio[0] > 0 and ratio[1] > 0, "Ratio values must be greater than zero."
assert pool_size >= batch_size, "Pool size must be larger than batch size."
assert refresh_ratio <= 1.0 and refresh_ratio > 0.0, "Refresh ratio must be between 1 an 0. This is the percentage of the minibatch to put into the replay pool."
self.pool_size = pool_size
self.index = 0
self.mod = 0
self.period = sum(ratio)
self.batch_function = [self.new_batch if i < ratio[0] else self.replay_batch for i in range(self.period)]
self.batch_size = batch_size
self.delimiter = delimiter
self.size_check = size_check
self.num_new = int(refresh_ratio*batch_size)
# initialize replay pool
| python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | true |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/safekit/tf_ops.py | safekit/tf_ops.py | """
Functions for building tensorflow computational graph models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import sys
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import nn
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import rnn
from tensorflow.python.ops import variable_scope as vs
# So this will run without safekit installed
cyberpath = '/'.join(os.path.realpath(__file__).split('/')[:-3])
sys.path.insert(0, cyberpath)
# TODO: Look at fixing magic number in full covariance loss
def fan_scale(initrange, activation, tensor_in):
"""
Creates a scaling factor for weight initialization according to best practices.
:param initrange: Scaling in addition to fan_in scale.
:param activation: A tensorflow non-linear activation function
:param tensor_in: Input tensor to layer of network to scale weights for.
:return: (float) scaling factor for weight initialization.
"""
if activation == tf.nn.relu:
initrange *= np.sqrt(2.0/float(tensor_in.get_shape().as_list()[1]))
else:
initrange *= (1.0/np.sqrt(float(tensor_in.get_shape().as_list()[1])))
return initrange
def ident(tensor_in):
"""
The identity function
:param tensor_in: Input to operation.
:return: tensor_in
"""
return tensor_in
def weights(distribution, shape, dtype=tf.float32, initrange=1e-5,
seed=None, l2=0.0, name='weights'):
"""
Wrapper parameterizing common constructions of tf.Variables.
:param distribution: A string identifying distribution 'tnorm' for truncated normal, 'rnorm' for random normal, 'constant' for constant, 'uniform' for uniform.
:param shape: Shape of weight tensor.
:param dtype: dtype for weights
:param initrange: Scales standard normal and trunctated normal, value of constant dist., and range of uniform dist. [-initrange, initrange].
:param seed: For reproducible results.
:param l2: Floating point number determining degree of of l2 regularization for these weights in gradient descent update.
:param name: For variable scope.
:return: A tf.Variable.
"""
with tf.variable_scope(name):
if distribution == 'norm':
wghts = tf.Variable(initrange * tf.random_normal(shape, 0, 1, dtype, seed))
elif distribution == 'tnorm':
wghts = tf.Variable(initrange * tf.truncated_normal(shape, 0, 1, dtype, seed))
elif distribution == 'uniform':
wghts = tf.Variable(tf.random_uniform(shape, -initrange, initrange, dtype, seed))
elif distribution == 'constant':
wghts = tf.Variable(tf.constant(initrange, dtype=dtype, shape=shape))
else:
raise ValueError("Argument 'distribution takes values 'norm', 'tnorm', 'uniform', 'constant', "
"Received %s" % distribution)
if l2 != 0.0:
tf.add_to_collection('losses', tf.multiply(tf.nn.l2_loss(wghts), l2, name=name + 'weight_loss'))
return wghts
def batch_normalize(tensor_in, epsilon=1e-5, decay=0.999):
"""
Batch Normalization:
`Batch Normalization Accelerating Deep Network Training by Reducing Internal Covariate Shift`_
An exponential moving average of means and variances in calculated to estimate sample mean
and sample variance for evaluations. For testing pair placeholder is_training
with [0] in feed_dict. For training pair placeholder is_training
with [1] in feed_dict. Example:
Let **train = 1** for training and **train = 0** for evaluation
.. code-block:: python
bn_deciders = {decider:[train] for decider in tf.get_collection('bn_deciders')}
feed_dict.update(bn_deciders)
During training the running statistics are updated, and batch statistics are used for normalization.
During testing the running statistics are not updated, and running statistics are used for normalization.
:param tensor_in: (tf.Tensor) Input Tensor.
:param epsilon: (float) A float number to avoid being divided by 0.
:param decay: (float) For exponential decay estimate of running mean and variance.
:return: (tf.Tensor) Tensor with variance bounded by a unit and mean of zero according to the batch.
"""
is_training = tf.placeholder(tf.int32, shape=[None]) # [1] or [0], Using a placeholder to decide which
# statistics to use for normalization allows
# either the running stats or the batch stats to
# be used without rebuilding the graph.
tf.add_to_collection('bn_deciders', is_training)
pop_mean = tf.Variable(tf.zeros([tensor_in.get_shape()[-1]]), trainable=False)
pop_var = tf.Variable(tf.ones([tensor_in.get_shape()[-1]]), trainable=False)
# calculate batch mean/var and running mean/var
batch_mean, batch_variance = tf.nn.moments(tensor_in, [0])
# The running mean/variance is updated when is_training == 1.
running_mean = tf.assign(pop_mean,
pop_mean * (decay + (1.0 - decay)*(1.0 - tf.to_float(is_training))) +
batch_mean * (1.0 - decay) * tf.to_float(is_training))
running_var = tf.assign(pop_var,
pop_var * (decay + (1.0 - decay)*(1.0 - tf.to_float(is_training))) +
batch_variance * (1.0 - decay) * tf.to_float(is_training))
# Choose statistic
mean = tf.nn.embedding_lookup(tf.stack([running_mean, batch_mean]), is_training)
variance = tf.nn.embedding_lookup(tf.stack([running_var, batch_variance]), is_training)
shape = tensor_in.get_shape().as_list()
gamma = tf.Variable(tf.constant(0.0, dtype=tf.float32, shape=[shape[1]], name='gamma'))
beta = tf.Variable(tf.constant(1.0, dtype=tf.float32, shape=[shape[1]], name='beta'))
# Batch Norm Transform
inv = tf.rsqrt(epsilon + variance)
tensor_in = beta * (tensor_in - mean) * inv + gamma
return tensor_in
def dropout(tensor_in, prob):
"""
Adds dropout node.
`Dropout A Simple Way to Prevent Neural Networks from Overfitting`_
:param tensor_in: Input tensor.
:param prob: The percent of units to keep.
:return: Tensor of the same shape of *tensor_in*.
"""
if isinstance(prob, float):
keep_prob = tf.placeholder(tf.float32)
tf.add_to_collection('dropout_prob', (keep_prob, prob))
return tf.nn.dropout(tensor_in, keep_prob)
def layer_norm(h):
"""
:param h: (tensor) Hidden layer of neural network
:return: (tensor) Hidden layer after layer_norm transform
"""
dim = h.get_shape().as_list()
bias = tf.Variable(tf.zeros([1, dim[1]], dtype=tf.float32))
gain = tf.Variable(tf.ones([1, dim[1]], dtype=tf.float32))
mu, variance = tf.nn.moments(h, [1], keep_dims=True)
return (gain/tf.sqrt(variance))*(h - mu) + bias
def dnn(x, layers=[100, 408], act=tf.nn.relu, scale_range=1.0, norm=None, keep_prob=None, name='nnet'):
"""
An arbitrarily deep neural network. Output has non-linear activation.
:param x: (tf.tensor) Input to the network.
:param layers: List of integer sizes of network layers.
:param act: Activation function to produce hidden layers of neural network.
:param scale_range: (float) Scaling factor for initial range of weights (Set to 1/sqrt(fan_in) for tanh, sqrt(2/fan_in) for relu.
:param norm: Normalization function. Could be layer_norm or other function that retains shape of tensor.
:param keep_prob: (float) The percent of nodes to keep in dropout layers.
:param name: (str) For naming and variable scope.
:return: (tf.Tensor) Output of neural net. This will be just following a non linear transform, so that final activation has not been applied.
"""
if type(scale_range) is not list:
scale_range = [scale_range] * len(layers)
assert len(layers) == len(scale_range)
for ind, hidden_size in enumerate(layers):
with tf.variable_scope('layer_%s' % ind):
fan_in = x.get_shape().as_list()[1]
W = tf.Variable(fan_scale(scale_range[ind], act, x) * tf.truncated_normal([fan_in, hidden_size],
mean=0.0, stddev=1.0,
dtype=tf.float32, seed=None,
name='W'))
tf.add_to_collection(name + '_weights', W)
b = tf.Variable(tf.zeros([hidden_size])) + 0.1*(float(act == tf.nn.relu))
tf.add_to_collection(name + '_bias', b)
x = tf.matmul(x, W) + b
if norm is not None:
x = norm(x)
x = act(x, name='h' + str(ind)) # The hidden layer
tf.add_to_collection(name + '_activation', x)
if keep_prob:
x = dropout(x, keep_prob)
return x
def bidir_lm_rnn(x, t, token_embed, layers, seq_len=None, context_vector=None, cell=tf.nn.rnn_cell.BasicLSTMCell):
"""
Token level bidirectional LSTM language model that uses a sentence level context vector.
:param x: Input to rnn
:param t: Targets for language model predictions (typically next token in sequence)
:param token_embed: (tensor) MB X ALPHABET_SIZE.
:param layers: A list of hidden layer sizes for stacked lstm
:param seq_len: A 1D tensor of mini-batch size for variable length sequences
:param context_vector: (tensor) MB X 2*CONTEXT_LSTM_OUTPUT_DIM. Optional context to append to each token embedding
:param cell: (class) A tensorflow RNNCell sub-class
:return: (tensor) tuple-token_losses , (list of tensors) hidden_states, (tensor) final_hidden
"""
token_set_size = token_embed.get_shape().as_list()[0]
with tf.variable_scope('forward'):
fw_cells = [cell(num_units) for num_units in layers]
fw_cell = tf.nn.rnn_cell.MultiRNNCell(fw_cells, state_is_tuple=True)
with tf.variable_scope('backward'):
bw_cells = [cell(num_units) for num_units in layers]
bw_cell = tf.nn.rnn_cell.MultiRNNCell(bw_cells, state_is_tuple=True)
x_lookup = tf.nn.embedding_lookup(token_embed, x)
# List of mb X embedding_size tensors
input_features = tf.unstack(x_lookup, axis=1)
if context_vector is not None:
input_features = [tf.concat([embedding, context_vector], 1) for embedding in input_features]
# input_features: list of sentence long tensors (mb X embedding_size)
hidden_states, fw_cell_state, bw_cell_state = tf.nn.static_bidirectional_rnn(fw_cell, bw_cell, input_features,
dtype=tf.float32,
sequence_length=seq_len,
scope='language_model')
final_hidden = tf.concat((fw_cell_state[-1].h, bw_cell_state[-1].h), 1)
f_hidden_states, b_hidden_states = tf.split(tf.stack(hidden_states), 2, axis=2) # 2 sen_len X num_users X hidden_size tensors
# truncate forward and backward output to align for prediction
f_hidden_states = tf.stack(tf.unstack(f_hidden_states)[:-2]) # sen_len-2 X num_users X hidden_size tensor
b_hidden_states = tf.stack(tf.unstack(b_hidden_states)[2:]) # sen_len-2 X num_users X hidden_size tensor
# concatenate forward and backward output for prediction
prediction_states = tf.unstack(tf.concat((f_hidden_states, b_hidden_states), 2)) # sen_len-2 long list of num_users X 2*hidden_size tensors
token_losses = batch_softmax_dist_loss(t, prediction_states, token_set_size)
return token_losses, hidden_states, final_hidden
def lm_rnn(x, t, token_embed, layers, seq_len=None, context_vector=None, cell=tf.nn.rnn_cell.BasicLSTMCell):
"""
Token level LSTM language model that uses a sentence level context vector.
:param x: (tensor) Input to rnn
:param t: (tensor) Targets for language model predictions (typically next token in sequence)
:param token_embed: (tensor) MB X ALPHABET_SIZE.
:param layers: A list of hidden layer sizes for stacked lstm
:param seq_len: A 1D tensor of mini-batch size for variable length sequences
:param context_vector: (tensor) MB X 2*CONTEXT_LSTM_OUTPUT_DIM. Optional context to append to each token embedding
:param cell: (class) A tensorflow RNNCell sub-class
:return: (tuple) token_losses (tensor), hidden_states (list of tensors), final_hidden (tensor)
"""
token_set_size = token_embed.get_shape().as_list()[0]
cells = [cell(num_units) for num_units in layers]
cell = tf.nn.rnn_cell.MultiRNNCell(cells, state_is_tuple=True)
# mb X sentence_length X embedding_size
x_lookup = tf.nn.embedding_lookup(token_embed, x)
# List of mb X embedding_size tensors
input_features = tf.unstack(x_lookup, axis=1)
# input_features: list max_length of sentence long tensors (mb X embedding_size+context_size)
if context_vector is not None:
input_features = [tf.concat([embedding, context_vector], 1) for embedding in input_features]
# hidden_states: sentence length long list of tensors (mb X final_layer_size)
# cell_state: data structure that contains the cell state for each hidden layer for a mini-batch (complicated)
hidden_states, cell_state = tf.nn.static_rnn(cell, input_features,
initial_state=None,
dtype=tf.float32,
sequence_length=seq_len,
scope='language_model')
# batch_size X sequence_length (see tf_ops for def)
token_losses = batch_softmax_dist_loss(t, hidden_states, token_set_size)
final_hidden = cell_state[-1].h
return token_losses, hidden_states, final_hidden
def join_multivariate_inputs(feature_spec, specs, embedding_ratio, max_embedding, min_embedding):
"""
Makes placeholders for all input data, performs a lookup on an embedding matrix for each categorical feature,
and concatenates the resulting real-valued vectors from individual features into a single vector for each data point in the batch.
:param feature_spec: A dict {categorical: [c1, c2, ..., cp], continuous:[f1, f2, ...,fk]
which lists which features to use as categorical and continuous inputs to the model.
c1, ..., cp, f1, ...,fk should match a key in specs.
:param specs: A python dict containing information about which indices in the incoming data point correspond to which features.
Entries for continuous features list the indices for the feature, while entries for categorical features
contain a dictionary- {'index': i, 'num_classes': c}, where i and c are the index into the datapoint, and number of distinct
categories for the category in question.
:param embedding_ratio: Determines size of embedding vectors for each categorical feature: num_classes*embedding_ratio (within limits below)
:param max_embedding: A limit on how large an embedding vector can be.
:param min_embedding: A limit on how small an embedding vector can be.
:return: A tuple (x, placeholderdict):
(tensor with shape [None, Sum_of_lengths_of_all_continuous_feature_vecs_and_embedding_vecs],
dict to store tf placeholders to pair with data, )
"""
placeholderdict, embeddings, continuous_features, targets = {}, {}, {}, {}
# Make placeholders for all input data and select embeddings for categorical data
for dataname in feature_spec['categorical']:
embedding_size = math.ceil(embedding_ratio * specs[dataname]['num_classes'])
embedding_size = int(max(min(max_embedding, embedding_size), min_embedding))
with tf.variable_scope(dataname):
placeholderdict[dataname] = tf.placeholder(tf.int32, [None])
embedding_matrix = tf.Variable(1e-5*tf.truncated_normal((specs[dataname]['num_classes'], embedding_size), dtype=tf.float32))
embeddings[dataname] = tf.nn.embedding_lookup(embedding_matrix, placeholderdict[dataname])
for dataname in feature_spec['continuous']:
placeholderdict[dataname] = tf.placeholder(tf.float32, [None, len(specs[dataname]['index'])])
continuous_features[dataname] = placeholderdict[dataname]
# concatenate all features
return tf.concat(continuous_features.values() + embeddings.values(), 1, name='features'), placeholderdict
# ============================================================
# ================ LOSS FUNCTIONS ============================
# ============================================================
def softmax_dist_loss(truth, h, dimension, scale_range=1.0, U=None):
"""
This function paired with a tensorflow optimizer is multinomial logistic regression.
It is designed for cotegorical predictions.
:param truth: A tensorflow vector tensor of integer class labels.
:param h: A placeholder if doing simple multinomial logistic regression, or the output of some neural network.
:param dimension: Number of classes in output distribution.
:param scale_range: For scaling the weight matrices (by default weights are initialized two 1/sqrt(fan_in)) for tanh activation and sqrt(2/fan_in) for relu activation.
:param U: Optional weight tensor (If you is not provided a new weight tensor is made)
:return: (Tensor[MB X 1]) Cross-entropy of true distribution vs. predicted distribution.
"""
fan_in = h.get_shape().as_list()[1]
if U is None:
U = tf.Variable(fan_scale(scale_range, tf.tanh, h) * tf.truncated_normal([fan_in, dimension],
dtype=tf.float32,
name='W'))
b = tf.Variable(tf.zeros([dimension]))
y = tf.matmul(h, U) + b
loss_column = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=truth)
loss_column = tf.reshape(loss_column, [-1, 1])
return loss_column
def batch_softmax_dist_loss(truth, h, dimension, scale_range=1.0):
"""
This function paired with a tensorflow optimizer is multinomial logistic regression.
It is designed for cotegorical predictions.
:param truth: (tf.Tensor) A tensorflow vector tensor of integer class labels.
:param h: (tf.Tensor) A placeholder if doing simple multinomial logistic regression, or the output of some neural network.
:param dimension: (int) Number of classes in output distribution.
:param scale_range: (float) For scaling the weight matrices (by default weights are initialized two 1/sqrt(fan_in)) for tanh activation and sqrt(2/fan_in) for relu activation.
:return: (tf.Tensor, shape = [MB, Sequence_length]) Cross-entropy of true distribution vs. predicted distribution.
"""
fan_in = h[0].get_shape().as_list()[1]
initializer = fan_scale(scale_range, tf.tanh, h[0]) * tf.truncated_normal([fan_in, dimension],
dtype=tf.float32,
name='W')
U = tf.get_variable('softmax_weights', initializer=initializer)
hidden_tensor = tf.stack(h) # sequence_length X batch_size X final_hidden_size
tf.add_to_collection('logit_weights', U)
b = tf.get_variable('softmax_bias', initializer=tf.zeros([dimension]))
ustack = tf.stack([U]*len(h)) #sequence_length X final_hidden_size X dimension
logits = tf.matmul(hidden_tensor, ustack) + b # sequence_length X batch_size X dimension
logits = tf.transpose(logits, perm=[1, 0, 2]) # batch_size X sequence_length X dimension
tf.add_to_collection("true_probabilities", tf.nn.softmax(logits)) # added to store probabilities of true logline
loss_matrix = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=truth) # batch_size X sequence_length
return loss_matrix
def eyed_mvn_loss(truth, h, scale_range=1.0):
"""
This function takes the output of a neural network after it's last activation, performs an affine transform,
and returns the squared error of this result and the target.
:param truth: A tensor of target vectors.
:param h: The output of a neural network post activation.
:param scale_range: For scaling the weight matrices (by default weights are initialized two 1/sqrt(fan_in)) for
tanh activation and sqrt(2/fan_in) for relu activation.
:return: (tf.Tensor[MB X D], None) squared_error, None
"""
fan_in = h.get_shape().as_list()[1]
dim = truth.get_shape().as_list()[1]
U = tf.Variable(fan_scale(scale_range, tf.tanh, h) * tf.truncated_normal([fan_in, dim],
dtype=tf.float32, name='U'))
b = tf.Variable(tf.zeros([dim]))
y = tf.matmul(h, U) + b
loss_columns = tf.square(y-truth)
return loss_columns, None
def diag_mvn_loss(truth, h, scale_range=1.0, variance_floor=0.1):
"""
Takes the output of a neural network after it's last activation, performs an affine transform.
It returns the mahalonobis distances between the targets and the result of the affine transformation, according
to a parametrized Normal distribution with diagonal covariance. The log of the determinant of the parametrized
covariance matrix is meant to be minimized to avoid a trivial optimization.
:param truth: (tf.Tensor) The targets for this minibatch.
:param h: (tf.Tensor) The output of dnn. (Here the output of dnn , h, is assumed to be the same dimension as truth)
:param scale_range: (float) For scaling the weight matrices (by default weights are initialized two 1/sqrt(fan_in)) for tanh activation and sqrt(2/fan_in) for relu activation.
:param variance_floor: (float, positive) To ensure model doesn't find trivial optimization.
:return: (tf.Tensor shape=[MB X D], tf.Tensor shape=[MB X 1]) Loss matrix, log_of_determinants of covariance matrices.
"""
fan_in = h.get_shape().as_list()[1]
dim = truth.get_shape().as_list()[1]
U = tf.Variable(
fan_scale(scale_range, tf.tanh, h) * tf.truncated_normal([fan_in, 2 * dim],
dtype=tf.float32,
name='U'))
b = tf.Variable(tf.zeros([2 * dim]))
y = tf.matmul(h, U) + b
mu, var = tf.split(y, 2, axis=1) # split y into two even sized matrices, each with half the columns
var = tf.maximum(tf.exp(var), variance_floor) # make the variance non-negative
#tf.constant(variance_floor, shape=[dim], dtype=tf.float32))
logdet = tf.reduce_sum(tf.log(var), 1) # MB x 1
loss_columns = tf.square(truth - mu) / var # MB x D
return loss_columns, tf.reshape(logdet, [-1, 1])
# TODO: Look at fixing magic number in full covariance loss
def full_mvn_loss(truth, h):
"""
Takes the output of a neural network after it's last activation, performs an affine transform.
It returns the mahalonobis distances between the targets and the result of the affine transformation, according
to a parametrized Normal distribution. The log of the determinant of the parametrized
covariance matrix is meant to be minimized to avoid a trivial optimization.
:param truth: Actual datapoints to compare against learned distribution
:param h: output of neural network (after last non-linear transform)
:return: (tf.Tensor[MB X D], tf.Tensor[MB X 1]) Loss matrix, log_of_determinants of covariance matrices.
"""
fan_in = h.get_shape().as_list()[1]
dimension = truth.get_shape().as_list()[1]
U = 100*tf.Variable(tf.truncated_normal([fan_in, dimension + dimension**2],
dtype=tf.float32,
name='U'))
b = tf.Variable(tf.zeros([dimension + dimension**2]))
y = tf.matmul(h, U) + b
mu = tf.slice(y, [0, 0], [-1, dimension]) # is MB x dimension
var = tf.slice(y, [0, dimension], [-1, -1])*0.0001 # is MB x dimension^2 # WARNING WARNING TODO FIX THIS MAGIC NUMBER
var = tf.reshape(var, [-1, dimension, dimension]) # make it a MB x D x D tensor (var is a superset of the lower triangular part of a Cholesky decomp)
var_diag = tf.exp(tf.matrix_diag_part(var)) + 1 # WARNING: FIX THIS MAGIC NUMBER
var = tf.matrix_set_diag(var,var_diag)
var = tf.matrix_band_part(var, -1, 0)
z = tf.squeeze(tf.matrix_triangular_solve(var, tf.reshape(truth - mu, [-1, dimension, 1]), lower=True, adjoint=False)) # z should be MB x D
inner_prods = tf.reduce_sum(tf.square(z), 1) # take row-wise inner products of z, leaving MB x 1 vector
logdet = tf.reduce_sum(tf.log(tf.square(tf.matrix_diag_part(var))), 1) # diag_part converts MB x D x D to MB x D, square and log preserve, then sum makes MB x 1
loss_column = inner_prods # is MB x 1 ... hard to track of individual features' contributions due to correlations
tf.add_to_collection('full', var_diag)
tf.add_to_collection('full', var)
return tf.reshape(loss_column, [-1, 1]), tf.reshape(logdet, [-1, 1])
def multivariate_loss(h, loss_spec, placeholder_dict, variance_floor=0.01):
"""
Computes a multivariate loss according to loss_spec.
:param h: Final hidden layer of dnn or rnn. (Post-activation)
:param loss_spec: A tuple of 3-tuples of the form (input_name, loss_function, dimension) where
input_name is the same as a target in datadict,
loss_function takes two parameters, a target and prediction,
and dimension is the dimension of the target.
:param placeholder_dict: A dictionary to store placeholder tensors for target values.
:param variance_floor: (float) Parameter for diag_mvn_loss.
:return loss_matrix: (MB X concatenated_feature_size Tensor) Contains loss for all contributors for each data point.
"""
log_det_list, log_det_names, loss_list, loss_names = [], [], [], []
for i, (input_name, loss_func, dimension) in enumerate(loss_spec):
with tf.variable_scope(input_name):
# this input will be a (classification or regression) target - need to define a placeholder for it
if loss_func == softmax_dist_loss:
x = tf.placeholder(tf.int32, [None])
else:
x = tf.placeholder(tf.float32, [None, dimension])
placeholder_dict["target_%s" % input_name] = x
# predict this input from the current hidden state
if loss_func == softmax_dist_loss: # discrete
component_wise_point_loss = loss_func(x, h, dimension)# MB X 1
elif loss_func == diag_mvn_loss: # continuous
component_wise_point_loss, logdet = loss_func(x, h, variance_floor=variance_floor)# MB X DIM_MULTIVARIATE, MB X 1
if logdet is not None:
log_det_list.append(logdet)
else: # continuous
component_wise_point_loss, logdet = loss_func(x, h)# MB X DIM_MULTIVARIATE, MB X 1
if logdet is not None:
log_det_list.append(logdet)
loss_list.append(component_wise_point_loss)
loss_list.extend(log_det_list)
loss_matrix = tf.concat(loss_list, 1) # is MB x (total num contributors)
return loss_matrix
def layer_norm_rnn(inputs,
initial_state=None,
layers=(10,),
sequence_lengths=None,
state_index=-1):
"""
:param inputs: A list with length the number of time steps of longest sequence in the batch. inputs contains
matrices of shape=[num_sequences X feature_dimension]
:param initial_state: Initialized first hidden states. A tuple of len(layers) tuples of cell and hidden state tensors
:param layers: list of number of nodes in each of stacked lstm layers
:param sequence_lengths: A vector of sequence lengths of size batch_size
:param state_index: If -1, last state is returned, if None all states are returned, if 1, second state is returned.
:return: hidden_states, current_state
"""
layer_norm_stack = [nn.rnn_cell.BasicLSTMCell(layers[0], state_is_tuple=True)]
for i in range(1, len(layers)):
layer_norm_stack.append(tf.contrib.rnn.LayerNormBasicLSTMCell(layers[i]))
cell = nn.rnn_cell.MultiRNNCell(layer_norm_stack, state_is_tuple=True)
hidden_states, current_state = true_bptt_rnn(cell,
inputs,
initial_state=initial_state,
dtype=tf.float32,
sequence_length=sequence_lengths,
state_index=state_index)
return hidden_states, current_state
def swapping_rnn(inputs,
initial_state=None,
layers=(10,),
sequence_lengths=None,
state_index=-1):
"""
:param inputs: A list with length the number of time steps of longest sequence in the batch. inputs contains
matrices of shape=[num_sequences X feature_dimension]
:param initial_state: Initialized first hidden states. A tuple of len(layers) tuples of cell and hidden state tensors
:param layers: list of number of nodes in each of stacked lstm layers
:param sequence_lengths: A vector of sequence lengths of size batch_size
:param state_index: If -1, last state is returned, if None all states are returned, if 1, second state is returned.
:return:
"""
cells = [nn.rnn_cell.BasicLSTMCell(num_units, state_is_tuple=True) for num_units in layers]
cell = nn.rnn_cell.MultiRNNCell(cells, state_is_tuple=True)
hidden_states, current_state = true_bptt_rnn(cell,
inputs,
initial_state=initial_state,
dtype=tf.float32,
sequence_length=sequence_lengths,
state_index=state_index)
return hidden_states, current_state
# ==================================================================================
# =======================Adapted From Tensorflow====================================
# ==================================================================================
def _state_size_with_prefix(state_size, prefix=None):
"""Helper function that enables int or TensorShape shape specification.
This function takes a size specification, which can be an integer or a
TensorShape, and converts it into a list of integers. One may specify any
additional dimensions that precede the final state size specification.
:param state_size: TensorShape or int that specifies the size of a tensor.
prefix: optional additional list of dimensions to prepend.
:return: result_state_size: list of dimensions the resulting tensor size.
"""
result_state_size = tensor_shape.as_shape(state_size).as_list()
if prefix is not None:
if not isinstance(prefix, list):
raise TypeError("prefix of _state_size_with_prefix should be a list.")
result_state_size = prefix + result_state_size
return result_state_size
def true_bptt_rnn(cell, inputs, initial_state=None, dtype=None,
sequence_length=None, scope=None, state_index=1): ### Adapted From Tensorflow
"""
Creates a recurrent neural network specified by RNNCell `cell`.
The simplest form of RNN network generated is:
.. code:: python
state = cell.zero_state(...)
outputs = []
for input_ in inputs:
output, state = cell(input_, state)
outputs.append(output)
return (outputs, state)
However, a few other options are available:
An initial state can be provided.
| python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | true |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/safekit/models/simple_lm.py | safekit/models/simple_lm.py | """
Simple language model code for network modeling.
File name convention:
---------------------
- lr: learnrate for gradient descent
- nl: number of stacked lstm layers
- hs: size of hidden layers (presumes all layers have same number of hidden units)
- mb: Size of mini-batch
- bc: max bad count for early stopping
- em: Size of token embeddings
- rs: Random seed for reproducible results
stdout
------
For each mini-batch the following is printed to standard output ::
batchsize line_number second status filename index current_loss
Where:
- batchsize: The size of the mini-batch
- line_number: Line number from original auth.txt file (may be off by 1)
- second: The second of the first event in the mini-batch
- status: Whether the model is updating or merely forward propagating
- filename: The current file being processed
- index: The number of samples processed to date
- current_loss: The average loss over the mini-batch
File output:
------------
::
batch_num line second day user red loss
Where:
- batch_num: The mini-batch this event was a part of
- line: Line number from original auth.txt file (may be off by 1)
- second: The second which the event occurred on
- day: The day the event occurred on
- user: The user who performed the event
- red: Whether this event was a labeled red team activity (1 for red team activity 0 otherwise)
- loss: The anomaly score for this event
Example calls
-------------
**Simple character based language model.** ::
python safekit/models/simple_lm.py results/ safekit/features/specs/lm/lanl_char_config.json data_examples/lanl/lm_feats/raw_day_split/ -test -skipsos -jagged
.. Note :: The output results will be printed to /tmp/lanl_result/ and then moved to results/ upon completion
to avoid experiment slowdown of constant network traffic when using a distributed file system.
Input Data
----------
The format of the input makes the following assumptions:
- Input files are together in datafolder, one file for each day.
- Input files are plain text files with one line of integers per log line representing meta data and the tokens from log text.
- Input format for fixed length sequences ::
line_nums second day user red logtokenid1 .... logtokenid_SentenceLen
- Zero paded Input format for jagged sequences ::
line_nums second day user red SentenceLen logtokenid1 .... logtokenid_SentenceLen 0 0 .... 0
"""
import os
import sys
# So we can run this code on arbitrary environment which has tensorflow but not safekit installed
cyberpath = '/'.join(os.path.realpath(__file__).split('/')[:-3])
sys.path.insert(0, cyberpath)
import tensorflow as tf
import json
from safekit.batch import OnlineBatcher
from safekit.graph_training_utils import EarlyStop, ModelRunner
from safekit.tf_ops import lm_rnn, bidir_lm_rnn
from safekit.util import get_mask, Parser
import time
import numpy as np
def return_parser():
"""
Defines and returns argparse ArgumentParser object.
:return: ArgumentParser
"""
parser = Parser("Simple token based rnn for network modeling.")
parser.add_argument('results_folder', type=str,
help='The folder to print results to.')
parser.add_argument('config', type=str,
help='The data spec.')
parser.add_argument("datafolder", type=str,
help='The folder where the data is stored.')
parser.add_argument('-learnrate', type=float, default=0.001,
help='Step size for gradient descent.')
parser.add_argument("-lm_layers", nargs='+', type=int, default=[10],
help="A list of hidden layer sizes.")
parser.add_argument("-context_layers", nargs='+', type=int, default=[10],
help="decoy arg.")
parser.add_argument("-numsteps", type=int, default=10,
help="decoy arg.")
parser.add_argument('-mb', type=int, default=128,
help='The mini batch size for stochastic gradient descent.')
parser.add_argument('-debug', action='store_true',
help='Use this flag to print feed dictionary contents and dimensions.')
parser.add_argument('-maxbadcount', type=str, default=20,
help='Threshold for early stopping.')
parser.add_argument('-em', type=int, default=20,
help='Size of embeddings for categorical features.')
parser.add_argument('-encoding', type=str, default=None,
help='Can be "oct", "raw" or "word"')
parser.add_argument('-random_seed', type=int, default=5,
help='Random seed for reproducible experiments.')
parser.add_argument('-jagged', action='store_true',
help='Whether using sequences of variable length (Input should'
'be zero-padded to max_sequence_length.')
parser.add_argument('-skipsos', action='store_true',
help='Whether to skip a start of sentence token.')
parser.add_argument('-bidir', action='store_true',
help='Whether to use bidirectional lstm for lower tier.')
parser.add_argument('-test', action='store_true',
help='Whether to run on a subset of the data (5000 lines from days 1,2,3) or the entire set.')
parser.add_argument('-verbose', type=int, default=1, help='Whether to print loss during training.')
parser.add_argument('-delimiter', type=str, default=',',
help="Delimiter for input text file. You should be using ' ' for the dayshuffled cert.")
parser.add_argument('-cell_type', type=str, default='lstm',
help='Can be either "lstm", "ident_ran", or "ran"')
return parser
def write_results(datadict, pointloss, outfile, batch):
"""
Writes loss for each datapoint, along with meta-data to file.
:param datadict: Dictionary of data names (str) keys to numpy matrix values for this mini-batch.
:param pointloss: MB X 1 numpy array
:param outfile: Where to write results.
:param batch: The mini-batch number for these events.
:return:
"""
for line, sec, day, usr, red, loss in zip(datadict['line'].flatten().tolist(),
datadict['second'].flatten().tolist(),
datadict['day'].flatten().tolist(),
datadict['user'].flatten().tolist(),
datadict['red'].flatten().tolist(),
pointloss.flatten().tolist()):
outfile.write('%s %s %s %s %s %s %r\n' % (batch, line, sec, day, usr, red, loss))
CELL = {'lstm': tf.nn.rnn_cell.BasicLSTMCell}
if __name__ == '__main__':
args = return_parser().parse_args()
direction = ('fwd', 'bidir')[args.bidir]
outfile_name = "simple_%s_%s_%s_%s_lr_%s_nl_%s_hs_%s_mb_%s_bc_%s_em_%s_rs_%s" % (direction,
args.encoding,
args.cell_type,
time.ctime(time.time()).replace(' ', '-'),
args.learnrate,
len(args.lm_layers),
args.lm_layers[0],
args.mb,
args.maxbadcount,
args.em,
args.random_seed)
args = return_parser().parse_args()
conf = json.load(open(args.config, 'r'))
if not args.results_folder.endswith('/'):
args.results_folder += '/'
tf.set_random_seed(args.random_seed)
np.random.seed(args.random_seed)
if not args.datafolder.endswith('/'):
args.datafolder += '/'
if "lanl_result" not in os.listdir("/tmp"):
os.system("mkdir /tmp/lanl_result; chmod o+rwx /tmp/lanl_result")
if not args.bidir:
language_model = lm_rnn
else:
language_model = bidir_lm_rnn
outfile = open('/tmp/lanl_result/' + outfile_name, 'w')
tf.set_random_seed(args.random_seed)
np.random.seed(args.random_seed)
sentence_length = conf["sentence_length"]- 1 - int(args.skipsos) + int(args.bidir)
token_set_size = conf["token_set_size"]
x = tf.placeholder(tf.int32, [None, sentence_length])
t = tf.placeholder(tf.int32, [None, sentence_length-2*args.bidir])
ph_dict = {'x': x, 't': t}
if args.jagged:
seq_len = tf.placeholder(tf.int32, [None])
ph_dict['lengths'] = seq_len
else:
seq_len = None
token_embed = tf.Variable(tf.truncated_normal([token_set_size, args.em])) # Initial embeddings vocab X embedding size
token_losses, hidden_states, final_hidden = language_model(x, t, token_embed,
args.lm_layers, seq_len=seq_len,
cell=CELL[args.cell_type])
if args.jagged:
ph_dict['mask'] = tf.placeholder(tf.float32, [None, sentence_length-2*args.bidir])
token_losses *= ph_dict['mask']
line_losses = tf.reduce_sum(token_losses, axis=1) # batch_size X 1
else:
line_losses = tf.reduce_mean(token_losses, axis=1) # batch_size X 1
avgloss = tf.reduce_mean(line_losses) # scalar
model = ModelRunner(avgloss, ph_dict, learnrate=args.learnrate, debug=args.debug,
decay=True,
decay_rate=0.99, decay_steps=20)
# training loop
start_time = time.time()
jag = int(args.jagged)
skipsos = int(args.skipsos)
def trainday(is_training, f):
batch_num = 0
data = OnlineBatcher(args.datafolder + f, args.mb, delimiter=' ')
raw_batch = data.next_batch()
current_loss = sys.float_info.max
not_early_stop = EarlyStop(args.maxbadcount)
endx = raw_batch.shape[1] - int(not args.bidir)
endt = raw_batch.shape[1] - int(args.bidir)
continue_training = not_early_stop(raw_batch, current_loss)
while continue_training: # mat is not None and self.badcount < self.badlimit and loss != inf, nan:
datadict = {'line': raw_batch[:, 0],
'second': raw_batch[:, 1],
'day': raw_batch[:, 2],
'user': raw_batch[:, 3],
'red': raw_batch[:, 4],
'x': raw_batch[:, (5+jag+skipsos):endx],
't': raw_batch[:, (6+jag+skipsos):endt]}
if args.jagged:
datadict['lengths'] = raw_batch[:, 5]
datadict['mask'] = get_mask(datadict['lengths']-2*args.bidir-args.skipsos, sentence_length-2*args.bidir)
assert np.all(datadict['lengths'] <= x.get_shape().as_list()[1]), 'Sequence found greater than num_tokens_predicted'
assert np.nonzero(datadict['lengths'])[0].shape[0] == datadict['lengths'].shape[0], \
'Sequence lengths must be greater than zero.' \
'Found zero length sequence in datadict["lengths"]: %s' % datadict['lengths']
eval_tensors = [avgloss, line_losses]
_, current_loss, pointloss = model.train_step(datadict, eval_tensors,
update=is_training)
if not is_training:
write_results(datadict, pointloss, outfile, batch_num)
batch_num += 1
if args.verbose:
print('%s %s %s %s %s %s %r' % (raw_batch.shape[0],
datadict['line'][0],
datadict['second'][0],
('fixed', 'update')[is_training],
f,
data.index,
current_loss))
raw_batch = data.next_batch()
continue_training = not_early_stop(raw_batch, current_loss)
if continue_training < 0:
exit(0)
weekend_days = conf["weekend_days"]
if args.test:
files = conf["test_files"] # 5000 lines from each of day 0, day 1 and day 2
else:
files = [str(i) + '.txt' for i in range(conf["num_days"]) if i not in weekend_days]
outfile.write("batch line second day user red loss\n")
for idx, f in enumerate(files[:-1]):
trainday(True, f)
trainday(False, files[idx + 1])
outfile.close()
total_time = time.time() - start_time
print('elapsed time: %s' % total_time)
os.system("mv /tmp/lanl_result/%s %s" % (outfile_name, args.results_folder + outfile_name))
| python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/safekit/models/tiered_lm.py | safekit/models/tiered_lm.py | #!/usr/bin/env python
"""
This is a two tiered language model for anomaly detection, where the second tier LSTM (log line level)
takes the concatenation of the average sentence vector and final hidden state
from the lower tier (token level) LSTM as input, creating a new context vector and hidden state
for the given user.
Example Command for running a model configuration
-------------------------------------------------
**Raw (character token) tiered model** (The jagged parameter lets the model know there are variable length sequences) ::
python safekit/models/tiered_lm.py results/ safekit/features/specs/lm/lanl_char_config.json data_examples/lanl/lm_feats/raw_day_split/ -test -skipsos -jagged
.. Note ::
The output results will be printed to /tmp/lanl_result/ and then moved to results/ upon completion
to avoid experiment slowdown of constant network traffic.
File name convention:
---------------------
- em: embedding size for token embedding
- ns: number of loglines per user per mini-batch for trunctated back propagation through time
- mb: Minibatch size (mini-batch over users)
- lr: learnrate (step size for gradient descent)
- cl: context layers (number of hidden layers for top level (log line level) context rnn)
- lml: language model layers (number of hidden layers for the bottom level, token level, rnn)
- rs: random seed for reproducible results
stdout
------
For each mini-batch the following is printed to standard output ::
batchsize line_number second status filename index current_loss
Where:
- batchsize: The size of the mini-batch
- line_number: Line number from original auth.txt file (may be off by 1)
- second: The second of the first event in the mini-batch
- status: Whether the model is updating or merely forward propagating
- filename: The current file being processed
- index: The number of samples processed to date
- current_loss: The average loss over the mini-batch
File output
-----------
::
batch_num line second day user red loss
Where:
- batch_num: The mini-batch this event was a part of
- line: Line number from original auth.txt file (may be off by 1)
- second: The second which the event occurred on
- day: The day the event occurred on
- user: The user who performed the event
- red: Whether this event was a labeled red team activity (1 for red team activity 0 otherwise)
- loss: The anomaly score for this event
.. Note ::
The runtime of the experiment is also printed to a file called runtimes.txt at the end of training
Input Data
----------
The format of the input makes the following assumptions:
- Input files are together in datafolder, one file for each day.
- Input files are plain text files with one line of integers per log line representing meta data and the tokens from log text.
- Input format for fixed length sequences ::
line_nums second day user red logtokenid1 .... logtokenid_SentenceLen
- Zero paded Input format for jagged sequences ::
line_nums second day user red SentenceLen logtokenid1 .... logtokenid_SentenceLen 0 0 .... 0
"""
import os
import sys
# So we can run this code on arbitrary environment which has tensorflow but not safekit installed
cyberpath = '/'.join(os.path.realpath(__file__).split('/')[:-3])
sys.path.insert(0, cyberpath)
import tensorflow as tf
import numpy as np
import time
from safekit.batch import OnlineLMBatcher
from simple_lm import write_results, CELL
from safekit.tf_ops import lm_rnn, bidir_lm_rnn
from safekit.graph_training_utils import ModelRunner
from safekit.util import get_mask, Parser
import json
import math
def return_parser():
parser = Parser()
parser.add_argument('results_folder', type=str,
help='The folder to print results to.')
parser.add_argument('config', type=str,
help='The data spec.')
parser.add_argument("datafolder", type=str,
help="File with token features")
parser.add_argument('-encoding', type=str, default=None,
help='Can be "oct", "raw" or "word"')
parser.add_argument("-em", type=int, default=5,
help="Dimension of token embeddings")
parser.add_argument("-numsteps", type=int, default=3,
help="length of unrolled context_rnn, number of log lines per user per train step")
parser.add_argument('-mb', type=int, default=64,
help='Number of users in mini-batch.')
parser.add_argument('-learnrate', type=float, default=0.001,
help='Step size for gradient descent.')
parser.add_argument("-context_layers", type=int, nargs='+', default=[10],
help='List of hidden layer sizes for context lstm.')
parser.add_argument('-lm_layers', type=int, nargs='+', default=[5],
help='List of hidden layer sizes for token lstm.')
parser.add_argument('-debug', action='store_true',
help='Use this flag to print feed dictionary contents and dimensions.')
parser.add_argument('-random_seed', type=int, default=5,
help='Random seed for reproducible experiments.')
parser.add_argument('-jagged', action='store_true',
help='Whether using sequences of variable length (Input should'
'be zero-padded to max_sequence_length.')
parser.add_argument('-skipsos', action='store_true',
help='Whether to skip a start of sentence token.')
parser.add_argument('-bidir', action='store_true',
help='Whether to use bidirectional lstm for lower tier.')
parser.add_argument('-test', action='store_true',
help='Whether to run on a subset of the data (5000 lines from days 1,2,3) or the entire set.')
parser.add_argument('-verbose', type=int, default=1,
help='Whether to print loss during training.')
parser.add_argument('-delimiter', type=str, default=',',
help="Delimiter for input text file")
parser.add_argument('-cell_type', type=str, default='lstm',
help='Can be either "lstm", "ident_ran", or "ran"')
parser.add_argument('-upper_cell_type', type=str, default='lstm',
help='Can be either "lstm", "ident_ran", or "ran"')
return parser
class ContextRNN:
"""
Log line level LSTM cell that keeps track of it's last lstm state tuple
"""
def __init__(self, layers, initial_state,
cell=tf.nn.rnn_cell.LSTMCell):
"""
:param layers: List of hidden layer sizes.
:param initial_state: List of numlayers lists of tensors (cell_state, hidden_state),
or List of lstm state tuples (which are named tuples of tensors (c=cell_state, h=hidden_state)
:param cell: Type of rnn cell to use.
"""
self.cell_type = cell
self.cell_stack = tf.nn.rnn_cell.MultiRNNCell([self.cell_type(cell_size) for cell_size in layers])
self.layers = layers
self.state = initial_state
def __call__(self, lower_outputs, final_hidden, seq_len):
"""
:param line_input: The input for current time step.
:param state: The cell state output by ContextRnn from previous time step.
:param seq_len: A 1D tensor of of size mb giving lengths of sequences in mb for this time step
:return: (tensor, LSTMStateTuple) output, state
"""
ctxt_input = ContextRNN._create_input(lower_outputs, final_hidden, seq_len)
output, self.state = self.cell_stack(ctxt_input, self.state)
return output, self.state
@staticmethod
def _create_input(lower_outputs, final_hidden, seq_len):
"""
:param lower_outputs: The list of output Tensors from the token level rnn
:param final_hidden: The final hidden state from the token level rnn
:param seq_len: A 1D tensor of of size mb giving lengths of token level sequences in mb for this time step
:return: A tensor which is the concatenation of the hidden state averages and final hidden state from lower
tier model. Used as input to context rnn
"""
if seq_len is not None:
mean_hidden = tf.reduce_sum(tf.stack(lower_outputs, axis=0), axis=0)/seq_len
else:
mean_hidden = tf.reduce_mean(tf.stack(lower_outputs, axis=0), axis=0)
return tf.concat([mean_hidden, final_hidden], 1)
def tiered_lm(token_set_size, embedding_size, ph_dict, context_layers, lm_layers,
numsteps, bidir=False, jagged=False):
"""
:param token_set_size: (int) Number of unique tokens in token set
:param embedding_size: (int) Dimensionality of token embeddings
:param ph_dict: dictionary of tensorflow placeholders and lists of tensorflow placeholders
:param context_layers: List of hidden layer sizes for stacked context LSTM
:param lm_layers: list of hidden layer sizes for stacked sentence LSTM
:param numsteps: How many steps (log lines) to unroll the upper tier RNN
:param bidir: Whether to use bidirectional LSTM for lower tier model
:param jagged: Whether or not variable length sequences are used
:return: total_loss (scalar tensor),
context_vector (tensor),
line_loss_matrix (tensor), Losses for each line in mini-batch
context_state (LSTMStateTuple) Final state of upper tier model
"""
if bidir:
language_model = bidir_lm_rnn
else:
language_model = lm_rnn
# =========================================================
# ========== initialize token level lstm variables ========
# =========================================================
if jagged:
ph_dict['lens'] = []
ph_dict['masks'] = []
context_vector = tf.placeholder(tf.float32, [None, ctxt_size], name="context_vector")
ph_dict['context_vector'] = context_vector
tf.add_to_collection('context_vector', ph_dict['context_vector'])
token_embed = tf.Variable(tf.truncated_normal([token_set_size, embedding_size])) # Initial embeddings vocab X embedding size
total_loss = 0.0
# =========================================================
# ======= initialize log line level (context) lstm ========
# =========================================================
ph_dict['c_state_init'] = [tf.placeholder(tf.float32, [None, c_size]) for c_size in context_layers]
ph_dict['h_state_init'] = [tf.placeholder(tf.float32, [None, h_size]) for h_size in context_layers]
context_init = [tf.nn.rnn_cell.LSTMStateTuple(ph_dict['c_state_init'][i],
ph_dict['h_state_init'][i])
for i in range(len(context_layers))]
ctxt_rnn = ContextRNN(context_layers, context_init, cell=CELL[args.upper_cell_type])
# =========================================================
# ======= initiate loop that ties together tiered lstm ====
# =========================================================
with tf.variable_scope("reuse_scope") as vscope:
for i in range(numsteps):
x = tf.placeholder(tf.int64, [None, sentence_length])
t = tf.placeholder(tf.int64, [None, sentence_length-2*bidir])
ph_dict['x'].append(x)
ph_dict['t'].append(t)
if jagged:
seq_len = tf.placeholder(tf.int32, [None])
ph_dict['lens'].append(seq_len)
else:
seq_len = None
token_losses, hidden_states, final_hidden = language_model(x, t, token_embed, lm_layers,
seq_len=seq_len,
context_vector=context_vector,
cell=CELL[args.cell_type])
if jagged:
ph_dict['masks'].append(tf.placeholder(tf.float32, [None, sentence_length-2*bidir]))
token_losses *= ph_dict['masks'][-1]
line_losses = tf.reduce_sum(token_losses, axis=1) # batch_size X 1
sequence_lengths = tf.reshape(tf.cast(ph_dict['lens'][-1], tf.float32), (-1, 1))
else:
line_losses = tf.reduce_mean(token_losses, axis=1) # batch_size X 1
sequence_lengths = None
avgloss = tf.reduce_mean(line_losses) # scalar
total_loss += avgloss
if i == 0:
line_loss_matrix = tf.reshape(line_losses, [1, -1])
tf.add_to_collection('first_line_loss_matrix', line_loss_matrix)
else:
line_loss_matrix = tf.concat((line_loss_matrix, tf.reshape(line_losses, [1, -1])), 0)
context_vector, context_state = ctxt_rnn(hidden_states,
final_hidden,
sequence_lengths)
tf.add_to_collection('context_vector', context_vector)
tf.add_to_collection('context_state', context_state)
tf.get_variable_scope().reuse_variables()
total_loss /= float(numsteps)
return total_loss, context_vector, line_loss_matrix, context_state
if __name__ == "__main__":
# ===========================================================================
# =========================PARSE ARGUMENTS===================================
# ===========================================================================
args = return_parser().parse_args()
conf = json.load(open(args.config, 'r'))
assert all(x == args.context_layers[0] for x in args.context_layers), 'Different sized context layers not supported.'
assert args.numsteps > 1, 'Must have at least two upper tier time steps to build graph for tiered lstm.'
if not args.results_folder.endswith('/'):
args.results_folder += '/'
tf.set_random_seed(args.random_seed)
np.random.seed(args.random_seed)
sentence_length = (conf['sentence_length'] - 1) - int(args.skipsos) + int(args.bidir)
token_set_size = conf['token_set_size']
ctxt_size = args.context_layers[0]
direction = ('fwd', 'bidir')[args.bidir]
results_file = 'tier_%s_%s_%s_%s__em_%s__ns_%s__mb_%s__lr_%s__cl_%s__lml_%s__rs_%s' % (direction,
args.encoding,
args.cell_type,
time.ctime(time.time()).replace(' ', '-'),
args.em,
args.numsteps,
args.mb,
args.learnrate,
args.context_layers[0],
args.lm_layers[0],
args.random_seed)
# if the -test flag passed, store predictions in a temporary file
if "lanl_results" not in os.listdir("/tmp"):
os.system("mkdir /tmp/lanl_results; chmod g+rwx /tmp/lanl_results")
outfile = open("/tmp/lanl_results/" + results_file, 'w')
outfile.write("batch line second day user red loss\n")
mode = ('fixed', 'update')
jag = int(args.jagged)
skipsos = int(args.skipsos)
# ===========================================================================
# =========================BUILD GRAPH=======================================
# ===========================================================================
ph_dict = {'x': [], 't': []}
dummy_loss = tf.constant(1)
total_loss, context_vector, line_loss_matrix, context_state = tiered_lm(token_set_size, args.em,
ph_dict,
args.context_layers,
args.lm_layers,
args.numsteps,
bidir=args.bidir,
jagged=args.jagged)
tiered_network_model = ModelRunner(total_loss, ph_dict, learnrate=args.learnrate,
debug=args.debug, decay=True,
decay_rate=0.99, decay_steps=20)
# ===========================================================================
# =========================TRAINING LOOP=====================================
# ===========================================================================
init_triple = (np.zeros([1, ctxt_size], np.float32), # context
[np.zeros([1, c_size], np.float32) for c_size in args.context_layers], # state
[np.zeros([1, h_size], np.float32) for h_size in args.context_layers]) # hidden
start_time = time.time()
def trainday(is_training, f, states, logs):
num_processed = 0
data = OnlineLMBatcher(args.datafolder + f, init_triple,
batch_size=args.mb, num_steps=args.numsteps, skiprows=0)
do_update = is_training
if states is not None:
data.state_triples = states
batch, state_triple = data.next_batch()
batch_num = 0
stragglers = False
while batch is not None:
if data.flush:
do_update = False
if len(batch.shape) == 2: # Straggler log lines that don't fit into num_steps by end of day are run in large batches one step at a time
stragglers = True
batch = batch.reshape((1, batch.shape[0], batch.shape[1]))
endx = batch.shape[2] - int(not args.bidir)
endt = batch.shape[2] - int(args.bidir)
datadict = {'line': batch[:, :, 0],
'second': batch[:, :, 1],
'day': batch[:, :, 2],
'user': batch[:, :, 3],
'red': batch[:, :, 4],
'x': [batch[0, :, 5 + jag + skipsos:endx]] * args.numsteps,
't': [batch[0, :, 6 + jag + skipsos:endt]] * args.numsteps,
'context_vector': state_triple['context_vector'],
'c_state_init': state_triple['c_state_init'],
'h_state_init': state_triple['h_state_init']}
if args.jagged:
datadict['lens'] = [batch[0, :, 5] - skipsos] * args.numsteps
datadict['masks'] = [get_mask(seq_length - 2 * args.bidir, sentence_length - 2 * args.bidir) for
seq_length in datadict['lens']]
for i in range(len(datadict['x'])):
assert np.all(datadict['lens'][i] <= datadict['x'][i].shape[1]), \
'Sequence found greater than num_tokens_predicted'
assert np.nonzero(datadict['lens'][i])[0].shape[0] == datadict['lens'][i].shape[0], \
'Sequence lengths must be greater than zero.' \
'Found zero length sequence in datadict["lengths"]: %s' % datadict['lens']
first_output_context_state = tf.get_collection('context_state')[0]
eval_tensors = ([total_loss,
tf.get_collection('context_vector')[1],
tf.get_collection('first_line_loss_matrix')[0]] +
[state_tuple.c for state_tuple in first_output_context_state] +
[state_tuple.h for state_tuple in first_output_context_state])
else: # Ordinary batching and matrix flush batching
batch = np.transpose(batch, axes=(1, 0, 2))
endx = batch.shape[2] - int(not args.bidir)
endt = batch.shape[2] - int(args.bidir)
datadict = {'line': batch[:, :, 0],
'second': batch[:, :, 1],
'day': batch[:, :, 2],
'user': batch[:, :, 3],
'red': batch[:, :, 4],
'x': [batch[i, :, 5 + jag + skipsos:endx] for i in range(args.numsteps)],
't': [batch[i, :, 6 + jag + skipsos:endt] for i in range(args.numsteps)],
'context_vector': state_triple['context_vector'],
'c_state_init': state_triple['c_state_init'],
'h_state_init': state_triple['h_state_init']}
if args.jagged:
datadict['lens'] = [batch[i, :, 5] - skipsos for i in range(args.numsteps)]
datadict['masks'] = [get_mask(seq_length-args.bidir-args.skipsos,
sentence_length-2*args.bidir) for seq_length in datadict['lens']]
for i in range(len(datadict['x'])):
assert np.all(datadict['lens'][i] <= datadict['x'][i].shape[1]), \
'Sequence found greater than num_tokens_predicted'
assert np.nonzero(datadict['lens'][i])[0].shape[0] == datadict['lens'][i].shape[0], \
'Sequence lengths must be greater than zero.' \
'Found zero length sequence in datadict["lengths"]: %s' % datadict['lens']
eval_tensors = ([total_loss, context_vector, line_loss_matrix] +
[state_tuple.c for state_tuple in context_state] +
[state_tuple.h for state_tuple in context_state])
# output dims: 0: Nothing, 1 (total_loss): scalar, 2 (context_vector): num_users X hidden_size,
# 3 (line_loss_matrix): num_users X num_steps
output = tiered_network_model.train_step(datadict, eval_tensors=eval_tensors,
update=do_update)
loss, context, loss_matrix = output[1], output[2], output[3]
current_context_state = output[4:4 + len(args.context_layers)]
current_context_hidden = output[4 + len(args.context_layers):4 + 2*len(args.context_layers)]
data.update_state_triples([context, current_context_state, current_context_hidden])
if args.verbose:
print('%s %s %s %s %s %s %r' % (datadict['day'].shape[1],
datadict['line'][0][0],
datadict['second'][0][0],
mode[do_update],
f,
data.line_num, loss))
if math.isnan(loss) or math.isinf(loss):
print('Exiting due to divergence!')
exit(1)
if not is_training:
num_processed += batch.shape[0] * batch.shape[1]
if not stragglers:
assert loss_matrix.shape[0] * loss_matrix.shape[1] == batch.shape[0] * batch.shape[1], 'Batch size %s is different from output size %s. May be losing datapoints.' % (batch.shape, loss_matrix.shape)
write_results(datadict, loss_matrix, outfile, batch_num)
else:
assert loss_matrix[0, :].shape[0] == batch.shape[0] * batch.shape[1], 'Batch size is different from output size. May be losing datapoints.'
write_results(datadict, loss_matrix[0, :], outfile, batch_num)
batch, state_triple = data.next_batch()
batch_num += 1
return data.state_triples, data.user_logs, num_processed
weekend_days = conf["weekend_days"]
if args.test:
files = conf["test_files"] # 5000 lines from each of day 0, day 1 and day 2
else:
files = [str(i) + '.txt' for i in range(conf["num_days"]) if i not in weekend_days]
states1 = None
logs1 = None
number_processed = 0
for idx, f in enumerate(files[:-1]):
states1, logs1, num_processed = trainday(True, f, states1, logs1)
states2, logs2, num_processed = trainday(False, files[idx + 1], states1, logs1)
number_processed += num_processed
outfile.close()
total_time = time.time() - start_time
print('elapsed time: %s' % total_time)
os.system("mv /tmp/lanl_results/%s %s" % (results_file, args.results_folder + results_file))
print('number processed', number_processed)
| python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/safekit/models/pca_autoencoder.py | safekit/models/pca_autoencoder.py | """Principal Components Analysis autoencoder baseline"""
import numpy as np
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
import argparse
class Batcher:
"""
For batching data too large to fit into memory. Written for one pass on data!!!
"""
def __init__(self, datafile):
"""
:param datafile: File to read lines from.
:param batch_size: Mini-batch size.
"""
self.f = open(datafile, 'r')
self.f.readline() # added for header
self.current_line = self.f.readline()
self.current_day = -1
def next_batch(self):
"""
:return: until end of datafile, each time called,
returns mini-batch number of lines from csv file
as a numpy array. Returns shorter than mini-batch
end of contents as a smaller than batch size array.
Returns None when no more data is available(one pass batcher!!).
"""
matlist = []
if self.current_line == '':
return None
rowtext = np.array([float(k) for k in self.current_line.strip().split(',')])
self.current_day = rowtext[0]
while rowtext[0] == self.current_day:
self.current_day = rowtext[0]
matlist.append(rowtext)
self.current_line = self.f.readline()
if self.current_line == '':
break
rowtext = np.array([float(k) for k in self.current_line.strip().split(',')])
return np.array(matlist)
def train(train_data, outfile):
"""
:param train_data: A Batcher object that delivers batches of train data.
:param outfile: (str) Where to print results.
"""
outfile.write('day user red loss\n')
mat = train_data.next_batch()
while mat is not None:
datadict = {'features': mat[:, 3:], 'red': mat[:,2], 'user': mat[:,1], 'day': mat[:,0]}
batch = scale(datadict['features'])
pca = PCA(n_components=1)
pca.fit(batch)
data_reduced = np.dot(batch, pca.components_.T) # pca transform
data_original = np.dot(data_reduced, pca.components_) # inverse_transform
pointloss = np.mean(np.square(batch - data_original), axis=1)
loss = np.mean(pointloss)
for d, u, t, l, in zip(datadict['day'].tolist(), datadict['user'].tolist(),
datadict['red'].tolist(), pointloss.flatten().tolist()):
outfile.write('%s %s %s %s\n' % (d, u, t, l))
print('loss: %.4f' % loss)
mat = train_data.next_batch()
if __name__ == '__main__':
parser = argparse.ArgumentParser("PCA autoencoder")
parser.add_argument('datafile', type=str, help='Input data for anomaly detection')
parser.add_argument('results', type=str, help='Where to print results.')
parser.add_argument('-components', type=int, help='Number of principal components to use in reconstruction.')
args = parser.parse_args()
with open(args.results, 'w') as outfile:
data = Batcher(args.datafile)
train(data, outfile)
| python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false |
pnnl/safekit | https://github.com/pnnl/safekit/blob/92c004bc72f1480a4f9b26d304a900cbc8dea48d/safekit/models/svm.py | safekit/models/svm.py | """One class support vector machine baseline
"""
import sys
import os
cyberpath = '/'.join(os.path.realpath(__file__).split('/')[:-3])
sys.path.insert(0, cyberpath)
import argparse
from sklearn.svm import OneClassSVM
from safekit.batch import DayBatcher
import time
from safekit.util import apr
def return_parser():
parser = argparse.ArgumentParser("Run anomaly detection with One Class Support Vector Machine.")
parser.add_argument('datafile', type=str,
help='Input data for anomaly detection.')
parser.add_argument('result_path', type=str,
help='Results dir.')
parser.add_argument('-loss_fn', type=str, default='/tmp/' + str(time.time()),
help='Loss file param for spearmint')
parser.add_argument('-kern', type=str, default='sigmoid',
help="Specifies the kernel type to be used in the algorithm. It must be one of linear, "
"poly, rbf, sigmoid, or a callable. If none is given, sigmoid will be used.")
parser.add_argument('-nu', type=float, default=0.5,
help="An upper bound on the fraction of training errors and a lower bound of the fraction "
"of support vectors. Should be in the interval (0, 1]. By default 0.5 will be taken.")
parser.add_argument('-deg', type=int, default=3,
help="Degree of the polynomial kernel function (poly). "
"Ignored by all other kernels. Default is 3.")
parser.add_argument('-shrink', type=str, default='False',
help='Whether to use the shrinking heuristic.Default is False.')
return parser
def sample_hyps_svm(kern, nu_, deg, shrink):
"""
:return: A OneClassSVM object with randomly sampled hyperparams, used to detect anomaly.
"""
kernel = kern #random.choice(['rbf', 'linear', 'poly', 'sigmoid', 'precomputed'])
nu = nu_ #randrange_float(0.0, 0.9999, 0.05)
degree = deg #random.randint(1,10)
gamma = 'auto' # default. uses 1/n_features
coef0 = 0.0 # default. No suggested values given in documentation
shrinking = shrink #random.choice([True, False])
model = OneClassSVM(kernel=kernel, nu=nu,
degree=degree, gamma=gamma,
coef0=coef0, shrinking=True)
resultsfile = open('model_OneClassSVM' +
'__kernel_' + str(kernel) +
'__nu_' + str(nu) +
'__degree_' + str(degree) +
'__gamma_' + str(gamma) +
'__coef0_' + str(coef0) +
'__shrinking_' + str(shrinking),
'w')
return model
def train_model(model, batcher, res_file):
"""
:param model: A sklearn anomaly detection model. Needs to have the decision_function() function.
:param batcher: A Batcher object that delivers batches of training data.
:param outfile: (file obj) Where to write results.
"""
resultsfile = open(res_file, 'w')
resultsfile.write('day user red loss\n')
mat = batcher.next_batch()
batch_num = 0
while mat is not None:
datadict = {'features': mat[:, 3:], 'red': mat[:, 2], 'user': mat[:, 1], 'day': mat[:, 0]}
model.fit(datadict['features'])
anomaly_scores = model.decision_function(datadict['features'])
for day, user, red, score in zip(datadict['day'], datadict['user'], datadict['red'], anomaly_scores):
resultsfile.write(str(day) + ' ' + str(user) + ' ' + str(red) + ' ' + str(score[0]) + '\n')
batch_num += 1
print('finished batch num: ' + str(batch_num))
mat = batcher.next_batch()
if __name__ == '__main__':
args = return_parser().parse_args()
day_batcher = DayBatcher(args.datafile, skiprow=1)
if not args.result_path.endswith('/'):
args.result_path += '/'
resultsfile = (args.result_path + str(time.time()) + '_OneClassSVM' +
'__kernel_' + str(args.kern) +
'__nu_' + str(args.nu) +
'__degree_' + str(args.deg) +
'__coef0_' + '0.0' +
'__shrinking_' + str(args.shrink))
# add args here
model = sample_hyps_svm(args.kern, args.nu, args.deg, bool(args.shrink))
start_time = time.time()
train_model(model, day_batcher, resultsfile)
with open(args.loss_fn, 'w') as lf:
lf.write(str(apr(resultsfile, [0, 12], inverse=True)))
os.system('mv %s %s' % (resultsfile, resultsfile + '__' + str(time.time() - start_time)))
| python | MIT | 92c004bc72f1480a4f9b26d304a900cbc8dea48d | 2026-01-05T07:13:08.019988Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.