hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
eb04ec59e94e9c904e0a1ea728fff83f455f16f7
| 42
|
py
|
Python
|
src/007-10001st-prime/python/solve.py
|
xfbs/ProjectEulerRust
|
e26768c56ff87b029cb2a02f56dc5cd32e1f7c87
|
[
"MIT"
] | 1
|
2018-01-26T21:18:12.000Z
|
2018-01-26T21:18:12.000Z
|
src/007-10001st-prime/python/solve.py
|
xfbs/ProjectEulerRust
|
e26768c56ff87b029cb2a02f56dc5cd32e1f7c87
|
[
"MIT"
] | 3
|
2017-12-09T14:49:30.000Z
|
2017-12-09T14:59:39.000Z
|
src/007-10001st-prime/python/solve.py
|
xfbs/ProjectEulerRust
|
e26768c56ff87b029cb2a02f56dc5cd32e1f7c87
|
[
"MIT"
] | null | null | null |
import solver
print(solver.solve(10001))
| 10.5
| 26
| 0.785714
| 6
| 42
| 5.5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131579
| 0.095238
| 42
| 3
| 27
| 14
| 0.736842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
eb2a59bb14bae8ba6438d62016fc262757e425e1
| 20
|
py
|
Python
|
tests/Dispatcher/__init__.py
|
ramgopal99/centipede
|
0b1dc1f17b025f6b37c9a3cf5753a46cbbcd36ba
|
[
"MIT"
] | 3
|
2018-05-28T20:56:19.000Z
|
2018-06-02T15:58:10.000Z
|
tests/Dispatcher/__init__.py
|
ramgopal99/centipede
|
0b1dc1f17b025f6b37c9a3cf5753a46cbbcd36ba
|
[
"MIT"
] | 20
|
2019-02-16T04:21:13.000Z
|
2019-03-09T21:21:21.000Z
|
tests/Dispatcher/__init__.py
|
ramgopal99/centipede
|
0b1dc1f17b025f6b37c9a3cf5753a46cbbcd36ba
|
[
"MIT"
] | 3
|
2018-07-10T14:51:13.000Z
|
2022-03-17T00:39:58.000Z
|
from . import Local
| 10
| 19
| 0.75
| 3
| 20
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 20
| 1
| 20
| 20
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
de443ebf2a55c618bec50e3ba60555f6f898df91
| 46
|
py
|
Python
|
vnpy/gateway/sopttest/__init__.py
|
funrunskypalace/vnpy
|
2d87aede685fa46278d8d3392432cc127b797926
|
[
"MIT"
] | 19,529
|
2015-03-02T12:17:35.000Z
|
2022-03-31T17:18:27.000Z
|
vnpy/gateway/sopttest/__init__.py
|
funrunskypalace/vnpy
|
2d87aede685fa46278d8d3392432cc127b797926
|
[
"MIT"
] | 2,186
|
2015-03-04T23:16:33.000Z
|
2022-03-31T03:44:01.000Z
|
vnpy/gateway/sopttest/__init__.py
|
funrunskypalace/vnpy
|
2d87aede685fa46278d8d3392432cc127b797926
|
[
"MIT"
] | 8,276
|
2015-03-02T05:21:04.000Z
|
2022-03-31T13:13:13.000Z
|
from .sopttest_gateway import SopttestGateway
| 23
| 45
| 0.891304
| 5
| 46
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 46
| 1
| 46
| 46
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
de8b063459c6ea30362d2d7e42196bead2702a39
| 127
|
py
|
Python
|
src/BribeNet/gui/apps/temporal/wizard/generation.py
|
RobMurray98/BribeNet
|
09ddd8f15d9ab5fac44ae516ed92c6ba5e5119bc
|
[
"MIT"
] | null | null | null |
src/BribeNet/gui/apps/temporal/wizard/generation.py
|
RobMurray98/BribeNet
|
09ddd8f15d9ab5fac44ae516ed92c6ba5e5119bc
|
[
"MIT"
] | null | null | null |
src/BribeNet/gui/apps/temporal/wizard/generation.py
|
RobMurray98/BribeNet
|
09ddd8f15d9ab5fac44ae516ed92c6ba5e5119bc
|
[
"MIT"
] | null | null | null |
from BribeNet.gui.apps.static.wizard.generation import StaticGeneration
class TemporalGeneration(StaticGeneration):
pass
| 21.166667
| 71
| 0.834646
| 13
| 127
| 8.153846
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102362
| 127
| 5
| 72
| 25.4
| 0.929825
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
deaac683d56c3065a46f3da13c4b31d25198a141
| 26
|
py
|
Python
|
lps/loham/__init__.py
|
arup-group/london-pop-synth
|
38e56230d440d49ddb2e2841d46a5cbaab260c35
|
[
"MIT"
] | 1
|
2020-11-25T06:56:43.000Z
|
2020-11-25T06:56:43.000Z
|
lps/loham/__init__.py
|
arup-group/london-pop-synth
|
38e56230d440d49ddb2e2841d46a5cbaab260c35
|
[
"MIT"
] | null | null | null |
lps/loham/__init__.py
|
arup-group/london-pop-synth
|
38e56230d440d49ddb2e2841d46a5cbaab260c35
|
[
"MIT"
] | null | null | null |
from .loham import Demand
| 13
| 25
| 0.807692
| 4
| 26
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 26
| 1
| 26
| 26
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
dec4ec79dd116db12cf227bc0cd64c0834d820c5
| 58
|
py
|
Python
|
src/__init__.py
|
quosi/CleanCode
|
545afdfdaefeeaed739b48edd3170de1f1197201
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
quosi/CleanCode
|
545afdfdaefeeaed739b48edd3170de1f1197201
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
quosi/CleanCode
|
545afdfdaefeeaed739b48edd3170de1f1197201
|
[
"MIT"
] | 1
|
2022-02-05T03:20:36.000Z
|
2022-02-05T03:20:36.000Z
|
from . import IqaLoggingProcessor
from . import IqaPlotter
| 29
| 33
| 0.844828
| 6
| 58
| 8.166667
| 0.666667
| 0.408163
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12069
| 58
| 2
| 34
| 29
| 0.960784
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
dee4ed5bf89005f05ead56d9f03bafff21c17285
| 6,887
|
py
|
Python
|
tests/test_test_suite.py
|
nihilistkitten/aga
|
d48baab1118a091e8cf3a9736f9d80597ffdc543
|
[
"MIT"
] | 4
|
2022-01-01T07:17:31.000Z
|
2022-02-28T10:48:54.000Z
|
tests/test_test_suite.py
|
nihilistkitten/aga
|
d48baab1118a091e8cf3a9736f9d80597ffdc543
|
[
"MIT"
] | 24
|
2021-09-26T23:25:47.000Z
|
2022-03-21T08:55:04.000Z
|
tests/test_test_suite.py
|
nihilistkitten/aga
|
d48baab1118a091e8cf3a9736f9d80597ffdc543
|
[
"MIT"
] | null | null | null |
"""Tests for the `_AgaTestCase` class."""
from typing import List, Tuple
from unittest import TestCase
import pytest
from aga import problem, test_case
from aga.core import Problem
def square_wrong(x: int) -> int:
"""Square x, incorrectly."""
return x + 1
def square_right(x: int) -> int:
"""Square x, correctly."""
return x**2
@test_case(2)
@problem()
def square_one_tc(x: int) -> int:
"""Square x.
This problem has only one test case to make inspecting the specific error message
easier.
"""
return x * x
@test_case(x=2)
@problem()
def square_one_tc_kwd(x: int = 0) -> int:
"""Square x.
This problem has only one test case to make inspecting the specific error message
easier. It also uses a kewyork argument to allow testing that case.
"""
return x * x
@test_case(2, 1)
@problem()
def diff_one_tc(x: int, y: int) -> int:
"""Compute x - y.
This problem has only one test case to make inspecting the specific error message
easier.
"""
return x - y
@test_case(2, y=1)
@problem()
def diff_one_tc_kwd(x: int, y: int = 0) -> int:
"""Compute x - y.
This problem has only one test case to make inspecting the specific error message
easier. It also uses a keyword argument to allow testing combining positional and
keyword args.
"""
return x - y
def diff_wrong(x: int, y: int) -> int:
"""Compute x - y, incorrectly."""
return x + y
def test_square_wrong(square: Problem[int]) -> None:
"""Test that the tests fail for the incorrect implementation."""
suite = square.generate_test_suite(square_wrong)
result = suite.run(TestCase().defaultTestResult())
assert not result.wasSuccessful()
def test_square_right(square: Problem[int]) -> None:
"""Test that the tests succeed for the correct implementation."""
suite = square.generate_test_suite(square_right)
result = suite.run(TestCase().defaultTestResult())
assert result.wasSuccessful()
@pytest.fixture(name="square_failure")
def fixture_square_failure() -> List[Tuple[TestCase, str]]:
"""Generate a list of failures for the single tc square problem."""
suite = square_one_tc.generate_test_suite(square_wrong)
result = suite.run(TestCase().defaultTestResult())
return result.failures
def test_one_failure(square_failure: List[Tuple[TestCase, str]]) -> None:
"""Test that the one-tc problem only has one failure."""
assert len(square_failure) == 1
def test_failure_message(square_failure: List[Tuple[TestCase, str]]) -> None:
"""Test that the one-tc problem's failure message is correct."""
message = square_failure[0][1]
assert "Checked with 2. Expected 4. Got 3 instead." in message
def test_failure_description(square_failure: List[Tuple[TestCase, str]]) -> None:
"""Test that the one-tc problem's test case description is correct."""
message = square_failure[0][0].shortDescription()
assert message == "Test 2"
@pytest.fixture(name="diff_failure")
def fixture_diff_failure() -> List[Tuple[TestCase, str]]:
"""Generate a list of failures for the single tc diff problem."""
suite = diff_one_tc.generate_test_suite(diff_wrong)
result = suite.run(TestCase().defaultTestResult())
return result.failures
def test_one_failure_diff(diff_failure: List[Tuple[TestCase, str]]) -> None:
"""Test that the one-tc problem only has one failure."""
assert len(diff_failure) == 1
def test_failure_message_multiple_args(
diff_failure: List[Tuple[TestCase, str]]
) -> None:
"""Test that the one-tc diff problem's failure message is correct.
This test is interesting because diff has two arguments, and we do formatting for
tuples in `_TestInputs`.
"""
message = diff_failure[0][1]
assert "Checked with 2,1. Expected 1. Got 3 instead." in message
def test_failure_description_multiple_args(
diff_failure: List[Tuple[TestCase, str]]
) -> None:
"""Test that the one-tc diff problem's test case description is correct.
This test is interesting because diff has two arguments, and we do formatting for
tuples in `_TestInputs`.
"""
message = diff_failure[0][0].shortDescription()
assert message == "Test 2,1"
@pytest.fixture(name="square_kwd_failure")
def fixture_square_kwd_failure() -> List[Tuple[TestCase, str]]:
"""Generate a list of failures for the single tc square kwd problem."""
suite = square_one_tc_kwd.generate_test_suite(square_wrong)
result = suite.run(TestCase().defaultTestResult())
return result.failures
def test_one_failure_square_kwd(square_kwd_failure: List[Tuple[TestCase, str]]) -> None:
"""Test that the one-tc problem only has one failure."""
assert len(square_kwd_failure) == 1
def test_failure_message_kwdargs(
square_kwd_failure: List[Tuple[TestCase, str]]
) -> None:
"""Test that the one-tc square_kwd problem's failure message is correct.
This test is interesting because square_kwd has a kewyord argument, and we do
formatting for kwdargs in `_TestInputs`.
"""
message = square_kwd_failure[0][1]
assert "Checked with x=2. Expected 4. Got 3 instead." in message
def test_failure_description_kwdargs(
square_kwd_failure: List[Tuple[TestCase, str]]
) -> None:
"""Test that the one-tc square_kwd problem's test case description is correct.
This test is interesting because square_kwd has a kewyord argument, and we do
formatting for kwdargs in `_TestInputs`.
"""
message = square_kwd_failure[0][0].shortDescription()
assert message == "Test x=2"
@pytest.fixture(name="diff_kwd_failure")
def fixture_diff_kwd_failure() -> List[Tuple[TestCase, str]]:
"""Generate a list of failures for the single tc diff kwd problem."""
suite = diff_one_tc_kwd.generate_test_suite(diff_wrong)
result = suite.run(TestCase().defaultTestResult())
return result.failures
def test_one_failure_diff_kwd(diff_kwd_failure: List[Tuple[TestCase, str]]) -> None:
"""Test that the one-tc problem only has one failure."""
assert len(diff_kwd_failure) == 1
def test_failure_message_pos_and_kwdargs(
diff_kwd_failure: List[Tuple[TestCase, str]]
) -> None:
"""Test that the one-tc diff_kwd problem's failure message is correct.
This test is interesting because diff_kwd has a kewyord argument and a positional
argument.
"""
message = diff_kwd_failure[0][1]
assert "Checked with 2,y=1. Expected 1. Got 3 instead." in message
def test_failure_description_pos_and_kwdargs(
diff_kwd_failure: List[Tuple[TestCase, str]]
) -> None:
"""Test that the one-tc diff_kwd problem's test case description is correct.
This test is interesting because diff_kwd has a kewyord argument and a positional
argument.
"""
message = diff_kwd_failure[0][0].shortDescription()
assert message == "Test 2,y=1"
| 30.745536
| 88
| 0.708291
| 1,003
| 6,887
| 4.707876
| 0.10668
| 0.021177
| 0.054214
| 0.081321
| 0.866582
| 0.825921
| 0.769166
| 0.71474
| 0.662431
| 0.662431
| 0
| 0.008902
| 0.184405
| 6,887
| 223
| 89
| 30.883408
| 0.831761
| 0.353129
| 0
| 0.30303
| 0
| 0
| 0.063962
| 0
| 0
| 0
| 0
| 0
| 0.141414
| 1
| 0.252525
| false
| 0
| 0.050505
| 0
| 0.414141
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
720bd2b1acb5d635f24a79aa2ddb890b5d2d825e
| 368
|
py
|
Python
|
stable_baselines/td3/__init__.py
|
iDurugkar/adversarial-intrinsic-motivation
|
e0ece991fe9b8278596c0ad9c68ccfc98a71e1e2
|
[
"MIT"
] | 2
|
2022-03-11T15:26:00.000Z
|
2022-03-15T12:20:57.000Z
|
stable_baselines/td3/__init__.py
|
iDurugkar/adversarial-intrinsic-motivation
|
e0ece991fe9b8278596c0ad9c68ccfc98a71e1e2
|
[
"MIT"
] | null | null | null |
stable_baselines/td3/__init__.py
|
iDurugkar/adversarial-intrinsic-motivation
|
e0ece991fe9b8278596c0ad9c68ccfc98a71e1e2
|
[
"MIT"
] | null | null | null |
from stable_baselines.common.noise import NormalActionNoise, OrnsteinUhlenbeckActionNoise
from stable_baselines.td3.rnd import RND
from stable_baselines.td3.td3 import TD3
from stable_baselines.td3.dist_predictor import Predictor
from stable_baselines.td3.ddl_td3 import DDLTD3
from stable_baselines.td3.policies import MlpPolicy, CnnPolicy, LnMlpPolicy, LnCnnPolicy
| 46
| 89
| 0.877717
| 48
| 368
| 6.5625
| 0.395833
| 0.190476
| 0.361905
| 0.349206
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026549
| 0.078804
| 368
| 7
| 90
| 52.571429
| 0.902655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
721b14ed626218ea37a73aedc2648c66ed25b9ad
| 99
|
py
|
Python
|
VideoSearchEngine/ObjectDetection/__init__.py
|
AkshatSh/VideoSearchEngine
|
57f64b241b8a7bbc377ce7826e1206f679f41def
|
[
"MIT"
] | 49
|
2018-05-22T09:06:18.000Z
|
2022-02-26T10:03:43.000Z
|
VideoSearchEngine/ObjectDetection/__init__.py
|
AkshatSh/VideoSearchEngine
|
57f64b241b8a7bbc377ce7826e1206f679f41def
|
[
"MIT"
] | 17
|
2018-05-18T21:14:36.000Z
|
2019-06-06T09:17:18.000Z
|
VideoSearchEngine/ObjectDetection/__init__.py
|
AkshatSh/VideoSearchEngine
|
57f64b241b8a7bbc377ce7826e1206f679f41def
|
[
"MIT"
] | 18
|
2018-06-06T22:14:26.000Z
|
2021-11-23T08:59:31.000Z
|
# from . import bbox_detector
from . import DarknetModels
from . import TinyYolo
from . import Yolo
| 24.75
| 29
| 0.787879
| 13
| 99
| 5.923077
| 0.538462
| 0.519481
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.161616
| 99
| 4
| 30
| 24.75
| 0.927711
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
723b4087c1005833507e087c35a8842a2d7f2551
| 102
|
py
|
Python
|
utils.py
|
Tchepga/bidding
|
ea99b791629033402df01e20dcbf75ab11471491
|
[
"MIT"
] | null | null | null |
utils.py
|
Tchepga/bidding
|
ea99b791629033402df01e20dcbf75ab11471491
|
[
"MIT"
] | null | null | null |
utils.py
|
Tchepga/bidding
|
ea99b791629033402df01e20dcbf75ab11471491
|
[
"MIT"
] | null | null | null |
import time
from time import strftime
def get_log_time():
return strftime('[%Y-%b-%d %H:%M:%S]')
| 17
| 42
| 0.656863
| 18
| 102
| 3.611111
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156863
| 102
| 6
| 42
| 17
| 0.755814
| 0
| 0
| 0
| 0
| 0
| 0.184466
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 0
| 0
|
0
| 6
|
a0fe6e833992588798a814aa86eef87d1875996c
| 2,629
|
py
|
Python
|
flipboard/cli.py
|
chris48s/flipboard
|
98fa22bcd7b3c20688a793b9406695c13f16008c
|
[
"MIT"
] | 1
|
2021-07-06T11:10:59.000Z
|
2021-07-06T11:10:59.000Z
|
flipboard/cli.py
|
chris48s/flipboard
|
98fa22bcd7b3c20688a793b9406695c13f16008c
|
[
"MIT"
] | 2
|
2021-08-12T17:10:03.000Z
|
2022-02-21T00:56:31.000Z
|
flipboard/cli.py
|
chris48s/flipboard
|
98fa22bcd7b3c20688a793b9406695c13f16008c
|
[
"MIT"
] | null | null | null |
import json
from base64 import b64encode, b64decode
from urllib.parse import quote, unquote
import click
import pyperclip
import xmlformatter
import xml.parsers.expat
@click.group()
def cli():
pass
@cli.command()
@click.argument('encoding', type=click.Choice(['base64', 'url']), required=True)
def encode(encoding):
input_ = pyperclip.paste()
if encoding == 'url':
return pyperclip.copy(quote(input_))
if encoding == 'base64':
tmp = input_.encode('ascii')
tmp = b64encode(tmp)
tmp = tmp.decode('ascii')
return pyperclip.copy(tmp)
raise NotImplementedError()
@cli.command()
@click.argument('encoding', type=click.Choice(['base64', 'url']), required=True)
def decode(encoding):
input_ = pyperclip.paste()
if encoding == 'url':
return pyperclip.copy(unquote(input_))
if encoding == 'base64':
tmp = input_.encode('ascii')
tmp = b64decode(tmp)
tmp = tmp.decode('ascii')
return pyperclip.copy(tmp)
raise NotImplementedError()
@cli.command()
@click.argument('language', type=click.Choice(['json', 'xml']), required=True)
def pprint(language):
input_ = pyperclip.paste()
if language == 'json':
try:
return pyperclip.copy(json.dumps(json.loads(input_), indent=2))
except json.decoder.JSONDecodeError:
return
if language == 'xml':
if '<' not in input_ or '>' not in input_:
return
formatter = xmlformatter.Formatter(indent="2", indent_char=" ")
try:
return pyperclip.copy(
formatter.format_string(input_).decode(formatter.encoding_effective)
)
except xml.parsers.expat.ExpatError:
return
raise NotImplementedError()
@cli.command()
@click.argument('language', type=click.Choice(['json', 'xml']), required=True)
def minify(language):
input_ = pyperclip.paste()
if language == 'json':
try:
return pyperclip.copy(json.dumps(json.loads(input_)))
except json.decoder.JSONDecodeError:
return
if language == 'xml':
if '<' not in input_ or '>' not in input_:
return
formatter = xmlformatter.Formatter(compress=True, indent_char=" ")
try:
return pyperclip.copy(
formatter.format_string(input_).decode(formatter.encoding_effective)
)
except xml.parsers.expat.ExpatError:
return
raise NotImplementedError()
@cli.command()
def trim():
input_ = pyperclip.paste()
return pyperclip.copy(input_.strip())
| 25.524272
| 84
| 0.623431
| 282
| 2,629
| 5.72695
| 0.219858
| 0.083591
| 0.105882
| 0.056966
| 0.816099
| 0.816099
| 0.816099
| 0.816099
| 0.816099
| 0.762848
| 0
| 0.010111
| 0.247623
| 2,629
| 102
| 85
| 25.77451
| 0.80637
| 0
| 0
| 0.666667
| 0
| 0
| 0.046786
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0.012821
| 0.089744
| 0
| 0.358974
| 0.012821
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9d003c100cc6aa5cdeb3c1dc30aa1e103a8085b5
| 213
|
py
|
Python
|
capstone/cite/templatetags/redaction.py
|
rachelaus/capstone
|
2affa02706f9b1a99d032c66f258a7421c40a35e
|
[
"MIT"
] | 134
|
2017-07-12T17:03:06.000Z
|
2022-03-27T06:38:29.000Z
|
capstone/cite/templatetags/redaction.py
|
rachelaus/capstone
|
2affa02706f9b1a99d032c66f258a7421c40a35e
|
[
"MIT"
] | 1,362
|
2017-06-22T17:42:49.000Z
|
2022-03-31T15:28:00.000Z
|
capstone/cite/templatetags/redaction.py
|
rachelaus/capstone
|
2affa02706f9b1a99d032c66f258a7421c40a35e
|
[
"MIT"
] | 38
|
2017-06-22T14:46:23.000Z
|
2022-03-16T05:32:54.000Z
|
from django import template
register = template.Library()
@register.filter()
def redact(text, case):
return case.redact_obj(text)
@register.filter()
def elide(text, case):
return case.elide_obj(text)
| 15.214286
| 32
| 0.7277
| 29
| 213
| 5.275862
| 0.482759
| 0.183007
| 0.222222
| 0.235294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.150235
| 213
| 14
| 33
| 15.214286
| 0.845304
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.125
| 0.25
| 0.625
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
9d40331569547fc1607a561511cd4d68911fdd77
| 8,646
|
py
|
Python
|
demo/tests/test_post_save_callbacks.py
|
jayvdb/django-formidable
|
df8bcd0c882990d72d302be47aeb4fb11915b1fa
|
[
"MIT"
] | 11
|
2018-02-14T08:15:23.000Z
|
2021-09-10T02:16:38.000Z
|
demo/tests/test_post_save_callbacks.py
|
jayvdb/django-formidable
|
df8bcd0c882990d72d302be47aeb4fb11915b1fa
|
[
"MIT"
] | 61
|
2017-11-27T10:15:43.000Z
|
2021-06-28T14:17:25.000Z
|
demo/tests/test_post_save_callbacks.py
|
jayvdb/django-formidable
|
df8bcd0c882990d72d302be47aeb4fb11915b1fa
|
[
"MIT"
] | 2
|
2019-04-06T11:17:05.000Z
|
2020-10-10T08:36:27.000Z
|
from copy import deepcopy
from django.core.exceptions import ImproperlyConfigured
from django.urls import reverse
from django.conf import settings
from django.test import TestCase, override_settings
from rest_framework.test import APITestCase
from formidable.models import Formidable
from formidable.views import check_callback_configuration
from . import form_data, form_data_items
from unittest.mock import patch
CALLBACK = 'demo.callback_save'
CALLBACK_EXCEPTION = 'demo.callback_exception'
class CreateFormTestCase(APITestCase):
@override_settings(
FORMIDABLE_POST_CREATE_CALLBACK_SUCCESS=CALLBACK,
FORMIDABLE_POST_CREATE_CALLBACK_FAIL=CALLBACK
)
def test_do_no_call_on_get(self):
with patch(CALLBACK) as patched_callback:
res = self.client.get(
reverse('formidable:form_create')
)
self.assertEqual(res.status_code, 405)
# No call on GET
self.assertEqual(patched_callback.call_count, 0)
@override_settings(FORMIDABLE_POST_CREATE_CALLBACK_SUCCESS=CALLBACK)
def test_create_no_error_post(self):
with patch(CALLBACK) as patched_callback:
res = self.client.post(
reverse('formidable:form_create'), form_data, format='json'
)
self.assertEqual(res.status_code, 201)
self.assertEqual(patched_callback.call_count, 1)
@override_settings(FORMIDABLE_POST_CREATE_CALLBACK_FAIL=CALLBACK)
def test_create_error_post(self):
with patch(CALLBACK) as patched_callback:
form_data_without_items = deepcopy(form_data_items)
form_data_without_items['fields'][0].pop('items')
res = self.client.post(
reverse('formidable:form_create'), form_data_without_items,
format='json'
)
self.assertEquals(res.status_code, 422)
self.assertEqual(patched_callback.call_count, 1)
@override_settings(
FORMIDABLE_POST_CREATE_CALLBACK_SUCCESS=CALLBACK_EXCEPTION
)
def test_create_exception(self):
# The called function raises an error, but the treatment proceeds
# as if nothing has happened
res = self.client.post(
reverse('formidable:form_create'), form_data, format='json'
)
self.assertEqual(res.status_code, 201)
@override_settings(
FORMIDABLE_POST_CREATE_CALLBACK_SUCCESS=CALLBACK_EXCEPTION
)
def test_create_exception_logger(self):
# The called function raises an error, but the treatment proceeds
# as if nothing has happened
with patch('formidable.views.logger.error') as logger_error:
res = self.client.post(
reverse('formidable:form_create'), form_data, format='json'
)
self.assertEqual(res.status_code, 201)
self.assertEqual(logger_error.call_count, 1)
@override_settings(FORMIDABLE_POST_CREATE_CALLBACK_SUCCESS='non.existent')
def test_create_callback_is_non_existent(self):
# A non-existing module is treated separately.
with patch('formidable.views.logger.error') as logger_error:
res = self.client.post(
reverse('formidable:form_create'), form_data, format='json'
)
self.assertEqual(res.status_code, 201)
self.assertEqual(logger_error.call_count, 1)
class UpdateFormTestCase(APITestCase):
def setUp(self):
super().setUp()
self.form = Formidable.objects.create(
label='test', description='test'
)
@override_settings(
FORMIDABLE_POST_UPDATE_CALLBACK_SUCCESS=CALLBACK,
FORMIDABLE_POST_UPDATE_CALLBACK_FAIL=CALLBACK
)
def test_do_no_call_on_get(self):
with patch(CALLBACK) as patched_callback:
res = self.client.get(
reverse('formidable:form_detail', args=[self.form.id])
)
self.assertEqual(res.status_code, 200)
# No call on GET
self.assertEqual(patched_callback.call_count, 0)
@override_settings(FORMIDABLE_POST_UPDATE_CALLBACK_SUCCESS=CALLBACK)
def test_update_no_error_post(self):
with patch(CALLBACK) as patched_callback:
res = self.client.put(
reverse('formidable:form_detail', args=[self.form.id]),
form_data, format='json'
)
self.assertEqual(res.status_code, 200)
self.assertEqual(patched_callback.call_count, 1)
@override_settings(FORMIDABLE_POST_UPDATE_CALLBACK_FAIL=CALLBACK)
def test_update_error_post(self):
with patch(CALLBACK) as patched_callback:
form_data_without_items = deepcopy(form_data_items)
form_data_without_items['fields'][0].pop('items')
res = self.client.put(
reverse('formidable:form_detail', args=[self.form.id]),
form_data_without_items, format='json'
)
self.assertEquals(res.status_code, 422)
self.assertEqual(patched_callback.call_count, 1)
@override_settings(
FORMIDABLE_POST_UPDATE_CALLBACK_SUCCESS=CALLBACK_EXCEPTION
)
def test_update_exception(self):
# The called function raises an error, but the treatment proceeds
# as if nothing has happened
res = self.client.put(
reverse('formidable:form_detail', args=[self.form.id]),
form_data, format='json'
)
self.assertEqual(res.status_code, 200)
@override_settings(
FORMIDABLE_POST_UPDATE_CALLBACK_SUCCESS=CALLBACK_EXCEPTION
)
def test_update_exception_logger(self):
# The called function raises an error, but the treatment proceeds
# as if nothing has happened
with patch('formidable.views.logger.error') as logger_error:
res = self.client.put(
reverse('formidable:form_detail', args=[self.form.id]),
form_data, format='json'
)
self.assertEqual(res.status_code, 200)
self.assertEqual(logger_error.call_count, 1)
@override_settings(FORMIDABLE_POST_UPDATE_CALLBACK_SUCCESS='non.existent')
def test_update_callback_is_non_existent(self):
# A non-existing module is treated separately.
with patch('formidable.views.logger.error') as logger_error:
res = self.client.put(
reverse('formidable:form_detail', args=[self.form.id]),
form_data, format='json'
)
self.assertEqual(res.status_code, 200)
self.assertEqual(logger_error.call_count, 1)
class ConfigurationLoadingTestCases(TestCase):
@override_settings()
def test_all_deleted(self):
del settings.FORMIDABLE_POST_UPDATE_CALLBACK_SUCCESS
del settings.FORMIDABLE_POST_UPDATE_CALLBACK_FAIL
del settings.FORMIDABLE_POST_CREATE_CALLBACK_SUCCESS
del settings.FORMIDABLE_POST_CREATE_CALLBACK_FAIL
self.assertTrue(check_callback_configuration())
@override_settings(
FORMIDABLE_POST_UPDATE_CALLBACK_SUCCESS=None,
FORMIDABLE_POST_UPDATE_CALLBACK_FAIL=None,
FORMIDABLE_POST_CREATE_CALLBACK_SUCCESS=None,
FORMIDABLE_POST_CREATE_CALLBACK_FAIL=None
)
def test_all_none(self):
self.assertTrue(check_callback_configuration())
@override_settings(
FORMIDABLE_POST_UPDATE_CALLBACK_SUCCESS='',
FORMIDABLE_POST_UPDATE_CALLBACK_FAIL='',
FORMIDABLE_POST_CREATE_CALLBACK_SUCCESS='',
FORMIDABLE_POST_CREATE_CALLBACK_FAIL=''
)
def test_all_empty(self):
self.assertTrue(check_callback_configuration())
@override_settings(
FORMIDABLE_POST_UPDATE_CALLBACK_SUCCESS='non.existing',
)
def test_update_success_unknown(self):
with self.assertRaises(ImproperlyConfigured):
check_callback_configuration()
@override_settings(
FORMIDABLE_POST_UPDATE_CALLBACK_FAIL='non.existing',
)
def test_update_fail_unknown(self):
with self.assertRaises(ImproperlyConfigured):
check_callback_configuration()
@override_settings(
FORMIDABLE_POST_CREATE_CALLBACK_SUCCESS='non.existing',
)
def test_create_success_unknown(self):
with self.assertRaises(ImproperlyConfigured):
check_callback_configuration()
@override_settings(
FORMIDABLE_POST_CREATE_CALLBACK_FAIL='non.existing',
)
def test_create_fail_unknown(self):
with self.assertRaises(ImproperlyConfigured):
check_callback_configuration()
| 37.755459
| 78
| 0.683669
| 971
| 8,646
| 5.765191
| 0.112255
| 0.075027
| 0.086459
| 0.096463
| 0.873526
| 0.824223
| 0.769739
| 0.76831
| 0.714184
| 0.711683
| 0
| 0.007277
| 0.237104
| 8,646
| 228
| 79
| 37.921053
| 0.841419
| 0.055864
| 0
| 0.486339
| 0
| 0
| 0.06908
| 0.049448
| 0
| 0
| 0
| 0
| 0.15847
| 1
| 0.10929
| false
| 0
| 0.054645
| 0
| 0.180328
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
19b5b02976acae87d157248b84d140e4bc9959f2
| 41
|
py
|
Python
|
package/code/gtfs_harvester/extractor/__init__.py
|
highered-esricanada/Parallel-GTFS-Workflow
|
5386ca58708cfcf3e9aa901b02e273b98dfe2fcb
|
[
"MIT"
] | null | null | null |
package/code/gtfs_harvester/extractor/__init__.py
|
highered-esricanada/Parallel-GTFS-Workflow
|
5386ca58708cfcf3e9aa901b02e273b98dfe2fcb
|
[
"MIT"
] | null | null | null |
package/code/gtfs_harvester/extractor/__init__.py
|
highered-esricanada/Parallel-GTFS-Workflow
|
5386ca58708cfcf3e9aa901b02e273b98dfe2fcb
|
[
"MIT"
] | null | null | null |
from .gtfs_converter import ExtractGTFSRT
| 41
| 41
| 0.902439
| 5
| 41
| 7.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073171
| 41
| 1
| 41
| 41
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
19c6fa1a5b098a72965b7a70367b2ecf319f54aa
| 135
|
py
|
Python
|
scripts/npc/autogen_BonfireMinigameEntranceNPC.py
|
hsienjan/SideQuest-Server
|
3e88debaf45615b759d999255908f99a15283695
|
[
"MIT"
] | null | null | null |
scripts/npc/autogen_BonfireMinigameEntranceNPC.py
|
hsienjan/SideQuest-Server
|
3e88debaf45615b759d999255908f99a15283695
|
[
"MIT"
] | null | null | null |
scripts/npc/autogen_BonfireMinigameEntranceNPC.py
|
hsienjan/SideQuest-Server
|
3e88debaf45615b759d999255908f99a15283695
|
[
"MIT"
] | null | null | null |
# Character field ID when accessed: 820000000
# ParentID: 9201476
# ObjectID: 1000037
# Object Position Y: 37
# Object Position X: 260
| 22.5
| 45
| 0.755556
| 18
| 135
| 5.666667
| 0.888889
| 0.27451
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0.17037
| 135
| 5
| 46
| 27
| 0.660714
| 0.918519
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
19ed76e37cc84851e118c2c6eeedc8acbb43bda0
| 24
|
py
|
Python
|
jiebazhc/__init__.py
|
jack139/tongjian
|
5827ae9ddbde744474f3058675c16a7749378507
|
[
"BSD-3-Clause"
] | 32
|
2016-04-10T10:43:31.000Z
|
2022-01-26T08:00:25.000Z
|
jiebazhc/__init__.py
|
jack139/tongjian
|
5827ae9ddbde744474f3058675c16a7749378507
|
[
"BSD-3-Clause"
] | 3
|
2016-09-17T05:09:29.000Z
|
2020-02-04T15:50:52.000Z
|
jiebazhc/__init__.py
|
jack139/tongjian
|
5827ae9ddbde744474f3058675c16a7749378507
|
[
"BSD-3-Clause"
] | 8
|
2016-04-11T14:57:03.000Z
|
2021-06-12T01:56:53.000Z
|
from .jiebazhc import *
| 12
| 23
| 0.75
| 3
| 24
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 24
| 1
| 24
| 24
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
19f529fa8ba13623e22868c511501dae074e301e
| 45
|
py
|
Python
|
incomplete/rasterizer/rasterizer/examples/__init__.py
|
adlerliu/500lines
|
9100aaa8cf510439460ab8a1fad3311926a94d90
|
[
"CC-BY-3.0"
] | 26,185
|
2015-01-01T04:59:51.000Z
|
2022-03-31T10:20:14.000Z
|
incomplete/rasterizer/rasterizer/examples/__init__.py
|
fsxchen/500lines
|
3f2cd407ebedaf0a3cfa6858c4cf94543067433d
|
[
"CC-BY-3.0"
] | 160
|
2015-01-05T12:20:21.000Z
|
2021-10-03T07:25:43.000Z
|
incomplete/rasterizer/rasterizer/examples/__init__.py
|
fsxchen/500lines
|
3f2cd407ebedaf0a3cfa6858c4cf94543067433d
|
[
"CC-BY-3.0"
] | 6,572
|
2015-01-01T01:31:00.000Z
|
2022-03-31T07:31:22.000Z
|
import e1
import e2
import e3
import destijl
| 9
| 14
| 0.822222
| 8
| 45
| 4.625
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081081
| 0.177778
| 45
| 4
| 15
| 11.25
| 0.918919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
dfe5be80c84676c693ac6218a14169594ad243a7
| 31
|
py
|
Python
|
ruleex/anndt/__init__.py
|
rohancode/ruleex_modified
|
ec974e7811fafc0c06d4d2c53b4e2898dd6b7305
|
[
"Apache-2.0"
] | 18
|
2019-09-19T09:50:52.000Z
|
2022-03-20T13:59:20.000Z
|
ruleex/anndt/__init__.py
|
rohancode/ruleex_modified
|
ec974e7811fafc0c06d4d2c53b4e2898dd6b7305
|
[
"Apache-2.0"
] | 3
|
2020-10-31T05:15:32.000Z
|
2022-02-10T00:34:05.000Z
|
ruleex/anndt/__init__.py
|
rohancode/ruleex_modified
|
ec974e7811fafc0c06d4d2c53b4e2898dd6b7305
|
[
"Apache-2.0"
] | 7
|
2020-12-06T20:55:50.000Z
|
2021-12-11T18:14:51.000Z
|
from ruleex.anndt.core import *
| 31
| 31
| 0.806452
| 5
| 31
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 31
| 1
| 31
| 31
| 0.892857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a0498478fa095bff24afe8c59b2b294e2b6634e0
| 296
|
py
|
Python
|
dataship/beam/__init__.py
|
dataship/python-dataship
|
2ac8595cdf061b10cdc33f5cb68f23f97afc3eed
|
[
"MIT"
] | 6
|
2017-12-29T17:06:50.000Z
|
2020-04-12T23:30:19.000Z
|
dataship/beam/__init__.py
|
dataship/python-dataship
|
2ac8595cdf061b10cdc33f5cb68f23f97afc3eed
|
[
"MIT"
] | null | null | null |
dataship/beam/__init__.py
|
dataship/python-dataship
|
2ac8595cdf061b10cdc33f5cb68f23f97afc3eed
|
[
"MIT"
] | null | null | null |
from .beam import load
from .beam import read
from .beam import write
from .beam import to_dataframe
from .beam import from_dataframe
from .beam import write_column
from .beam import read_column
__all__ = ['load', 'read', 'write', 'to_dataframe', 'from_dataframe', 'write_column', 'read_column']
| 32.888889
| 100
| 0.773649
| 44
| 296
| 4.931818
| 0.227273
| 0.258065
| 0.451613
| 0.165899
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 296
| 8
| 101
| 37
| 0.837838
| 0
| 0
| 0
| 0
| 0
| 0.209459
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.875
| 0
| 0.875
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a0682868c5f534cb29d71b2033ce841402165595
| 1,126
|
py
|
Python
|
test/test_distance.py
|
DavidWalz/diversipy
|
bbc9b6b650529f7cb739cf981dddb3eaad2f2613
|
[
"BSD-3-Clause"
] | 3
|
2021-01-06T13:35:00.000Z
|
2021-08-12T08:22:04.000Z
|
test/test_distance.py
|
DavidWalz/diversipy
|
bbc9b6b650529f7cb739cf981dddb3eaad2f2613
|
[
"BSD-3-Clause"
] | 1
|
2020-02-20T10:11:38.000Z
|
2020-02-29T22:52:42.000Z
|
test/test_distance.py
|
DavidWalz/diversipy
|
bbc9b6b650529f7cb739cf981dddb3eaad2f2613
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import diversipy
def test_distance_to_boundary():
points = np.array([[0.1, 0.2], [0.3, 0.9]])
np.testing.assert_almost_equal(
diversipy.distance.distance_to_boundary(points), np.array([0.1, 0.1])
)
np.testing.assert_almost_equal(
diversipy.distance.distance_to_boundary(points, cuboid=((-1, -1), (2, 2))),
np.array([1.1, 1.1]),
)
def test_distance_matrix():
points1 = np.array([[0.1, 0.2], [0.3, 0.9], [0.6, 0.1]])
points2 = np.array([[0.2, 0.2]])
# test L1 distance
np.testing.assert_almost_equal(
diversipy.distance.distance_matrix(points1, points2, norm=1),
[[0.1], [0.1 + 0.7], [0.4 + 0.1]],
)
# test L2 distance
np.testing.assert_almost_equal(
diversipy.distance.distance_matrix(points1, points2, norm=2),
[[0.1], [(0.1 ** 2 + 0.7 ** 2) ** 0.5], [(0.4 ** 2 + 0.1 ** 2) ** 0.5]],
)
# test toridal L1 distance
np.testing.assert_almost_equal(
diversipy.distance.distance_matrix(points1, points2, norm=1, max_dist=[1, 1]),
[[0.1], [0.1 + (1 - 0.7)], [0.4 + 0.1]],
)
| 33.117647
| 86
| 0.581705
| 179
| 1,126
| 3.530726
| 0.195531
| 0.044304
| 0.033228
| 0.166139
| 0.751582
| 0.738924
| 0.738924
| 0.716772
| 0.716772
| 0.662975
| 0
| 0.103527
| 0.219361
| 1,126
| 33
| 87
| 34.121212
| 0.615472
| 0.05151
| 0
| 0.192308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.192308
| 1
| 0.076923
| false
| 0
| 0.076923
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
262037f5ffb432dcd1cb9fb5c6454b7d089c69af
| 27
|
py
|
Python
|
gyakujinton/Window/__init__.py
|
mamerisawesome/gyakujinton
|
835ffe8ddf61b638db50a6ff15f764bee19917bd
|
[
"MIT"
] | null | null | null |
gyakujinton/Window/__init__.py
|
mamerisawesome/gyakujinton
|
835ffe8ddf61b638db50a6ff15f764bee19917bd
|
[
"MIT"
] | null | null | null |
gyakujinton/Window/__init__.py
|
mamerisawesome/gyakujinton
|
835ffe8ddf61b638db50a6ff15f764bee19917bd
|
[
"MIT"
] | null | null | null |
from .Window import Window
| 13.5
| 26
| 0.814815
| 4
| 27
| 5.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cd05c09eba77d1b820ef2e6d102de816ed15e01e
| 103
|
py
|
Python
|
inselect/gui/views/boxes/__init__.py
|
NaturalHistoryMuseum/inselect
|
196a3ae2a0ed4e2c7cb667aaba9a6be1bcd90ca6
|
[
"BSD-3-Clause"
] | 128
|
2015-03-06T00:17:51.000Z
|
2021-09-15T07:59:01.000Z
|
inselect/gui/views/boxes/__init__.py
|
NaturalHistoryMuseum/inselect
|
196a3ae2a0ed4e2c7cb667aaba9a6be1bcd90ca6
|
[
"BSD-3-Clause"
] | 346
|
2015-01-22T10:07:52.000Z
|
2020-02-25T21:24:56.000Z
|
inselect/gui/views/boxes/__init__.py
|
NaturalHistoryMuseum/inselect
|
196a3ae2a0ed4e2c7cb667aaba9a6be1bcd90ca6
|
[
"BSD-3-Clause"
] | 15
|
2015-02-26T21:31:18.000Z
|
2020-12-29T17:18:47.000Z
|
from .boxes_view import BoxesView # noqa
from .graphics_item_view import GraphicsItemView # noqa
| 34.333333
| 58
| 0.786408
| 13
| 103
| 6
| 0.692308
| 0.25641
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.174757
| 103
| 2
| 59
| 51.5
| 0.917647
| 0.087379
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
26f2e28345c7f8b3c0cb765ecb88bc1a2203d70d
| 10,298
|
py
|
Python
|
fireant/tests/slicer/test_execution.py
|
vladaspasic/fireant
|
2dbae6a97a927ef62fdcd5f37fcb51a7d6d55334
|
[
"Apache-2.0"
] | null | null | null |
fireant/tests/slicer/test_execution.py
|
vladaspasic/fireant
|
2dbae6a97a927ef62fdcd5f37fcb51a7d6d55334
|
[
"Apache-2.0"
] | null | null | null |
fireant/tests/slicer/test_execution.py
|
vladaspasic/fireant
|
2dbae6a97a927ef62fdcd5f37fcb51a7d6d55334
|
[
"Apache-2.0"
] | null | null | null |
from unittest import (
TestCase,
skip,
)
import numpy as np
import pandas as pd
import pandas.testing
from fireant.slicer.queries.execution import reduce_result_set
from fireant.slicer.totals import get_totals_marker_for_dtype
from .mocks import (
cat_dim_df,
cat_dim_totals_df,
cat_uni_dim_df,
cont_cat_dim_all_totals_df,
cont_cat_dim_df,
cont_cat_dim_totals_df,
cont_cat_uni_dim_all_totals_df,
cont_cat_uni_dim_df,
cont_dim_df,
single_metric_df,
slicer,
)
pd.set_option('display.expand_frame_repr', False)
def replace_totals(data_frame):
index_names = data_frame.index.names
raw = data_frame.reset_index()
for name in index_names:
marker = get_totals_marker_for_dtype(raw[name].dtype)
raw[name].replace(marker, np.nan, inplace=True)
return raw
class ReduceResultSetsTests(TestCase):
def test_reduce_single_result_set_no_dimensions(self):
expected = single_metric_df
raw_df = expected
dimensions = ()
result = reduce_result_set([raw_df], (), dimensions, ())
pandas.testing.assert_frame_equal(expected, result)
def test_reduce_single_result_set_with_cont_dimension(self):
expected = cont_dim_df
raw_df = replace_totals(expected)
dimensions = (slicer.dimensions.timestamp,)
result = reduce_result_set([raw_df], (), dimensions, ())
pandas.testing.assert_frame_equal(expected, result)
def test_reduce_single_result_set_with_cat_dimension(self):
expected = cat_dim_df
raw_df = replace_totals(expected)
dimensions = (slicer.dimensions.political_party,)
result = reduce_result_set([raw_df], (), dimensions, ())
pandas.testing.assert_frame_equal(expected, result)
def test_reduce_single_result_set_with_cont_cat_dimensions(self):
expected = cont_cat_dim_df
raw_df = replace_totals(expected)
dimensions = (slicer.dimensions.timestamp, slicer.dimensions.political_party)
result = reduce_result_set([raw_df], (), dimensions, ())
pandas.testing.assert_frame_equal(expected, result)
def test_reduce_single_result_set_with_cat_uni_dimensions(self):
expected = cat_uni_dim_df.sort_index()
raw_df = replace_totals(expected)
dimensions = (slicer.dimensions.political_party, slicer.dimensions.candidate)
result = reduce_result_set([raw_df], (), dimensions, ())
pandas.testing.assert_frame_equal(expected, result)
def test_reduce_single_result_set_with_cont_cat_uni_dimensions(self):
expected = cont_cat_uni_dim_df
raw_df = replace_totals(expected)
dimensions = (slicer.dimensions.timestamp, slicer.dimensions.political_party, slicer.dimensions.state)
result = reduce_result_set([raw_df], (), dimensions, ())
pandas.testing.assert_frame_equal(expected, result)
class ReduceResultSetsWithTotalsTests(TestCase):
def test_reduce_single_result_set_with_cat_dimension(self):
expected = cat_dim_totals_df
raw_df = replace_totals(cat_dim_df)
totals_df = pd.merge(pd.DataFrame([None], columns=['$d$political_party']),
pd.DataFrame([raw_df[['$m$votes', '$m$wins']].sum(axis=0)]),
how='outer',
left_index=True,
right_index=True)
dimensions = (slicer.dimensions.political_party.rollup(),)
result = reduce_result_set([raw_df, totals_df], (), dimensions, ())
pandas.testing.assert_frame_equal(expected, result)
def test_reduce_single_result_set_with_cont_cat_dimensions_cont_totals(self):
expected = cont_cat_dim_all_totals_df.loc[(slice(None), slice('d', 'r')), :] \
.append(cont_cat_dim_all_totals_df.iloc[-1])
raw_df = replace_totals(cont_cat_dim_df)
totals_df = pd.merge(pd.DataFrame([[None, None]], columns=['$d$timestamp', '$d$political_party']),
pd.DataFrame([raw_df[['$m$votes', '$m$wins']].sum(axis=0)]),
how='outer',
left_index=True,
right_index=True)
dimensions = (slicer.dimensions.timestamp.rollup(), slicer.dimensions.political_party)
result = reduce_result_set([raw_df, totals_df], (), dimensions, ())
pandas.testing.assert_frame_equal(expected, result)
def test_reduce_single_result_set_with_cont_cat_dimensions_cat_totals(self):
expected = cont_cat_dim_totals_df
raw_df = replace_totals(cont_cat_dim_df)
totals_df = raw_df.groupby('$d$timestamp').sum().reset_index()
totals_df['$d$political_party'] = None
totals_df = totals_df[['$d$timestamp', '$d$political_party', '$m$votes', '$m$wins']]
dimensions = (slicer.dimensions.timestamp, slicer.dimensions.political_party.rollup())
result = reduce_result_set([raw_df, totals_df], (), dimensions, ())
pandas.testing.assert_frame_equal(expected, result)
def test_reduce_single_result_set_with_cont_cat_uni_dimensions_cont_totals(self):
expected = cont_cat_uni_dim_all_totals_df.loc[(slice(None), slice('d', 'r'), slice('1', '2')), :] \
.append(cont_cat_uni_dim_all_totals_df.iloc[-1])
raw_df = replace_totals(cont_cat_uni_dim_df)
totals_df = pd.merge(pd.DataFrame([[None, None, None, None]],
columns=['$d$timestamp', '$d$political_party',
'$d$state', '$d$state_display']),
pd.DataFrame([raw_df[['$m$votes', '$m$wins']].sum(axis=0)]),
how='outer',
left_index=True,
right_index=True)
totals_df = totals_df[['$d$timestamp', '$d$political_party', '$d$state', '$d$state_display',
'$m$votes', '$m$wins']]
dimensions = (slicer.dimensions.timestamp.rollup(), slicer.dimensions.political_party, slicer.dimensions.state)
result = reduce_result_set([raw_df, totals_df], (), dimensions, ())
pandas.testing.assert_frame_equal(expected, result)
def test_reduce_single_result_set_with_cont_cat_uni_dimensions_cat_totals(self):
expected = cont_cat_uni_dim_all_totals_df.loc[(slice(None), slice(None), slice('1', '2')), :] \
.append(cont_cat_uni_dim_all_totals_df.loc[(slice(None), '~~totals'), :].iloc[:-1]) \
.sort_index()
raw_df = replace_totals(cont_cat_uni_dim_df)
totals_df = raw_df.groupby('$d$timestamp').sum().reset_index()
totals_df['$d$political_party'] = None
totals_df['$d$state'] = None
totals_df['$d$state_display'] = None
totals_df = totals_df[['$d$timestamp', '$d$political_party', '$d$state', '$d$state_display',
'$m$votes', '$m$wins']]
dimensions = (slicer.dimensions.timestamp, slicer.dimensions.political_party.rollup(), slicer.dimensions.state)
result = reduce_result_set([raw_df, totals_df], (), dimensions, ())
pandas.testing.assert_frame_equal(expected, result)
def test_reduce_single_result_set_with_cont_cat_uni_dimensions_uni_totals(self):
expected = cont_cat_uni_dim_all_totals_df.loc[(slice(None), slice('d', 'r')), :]
raw_df = replace_totals(cont_cat_uni_dim_df)
totals_df = raw_df.groupby(['$d$timestamp', '$d$political_party']).sum().reset_index()
totals_df['$d$state'] = None
totals_df['$d$state_display'] = None
totals_df = totals_df[['$d$timestamp', '$d$political_party', '$d$state', '$d$state_display',
'$m$votes', '$m$wins']]
dimensions = (slicer.dimensions.timestamp, slicer.dimensions.political_party, slicer.dimensions.state.rollup())
result = reduce_result_set([raw_df, totals_df], (), dimensions, ())
pandas.testing.assert_frame_equal(expected, result)
@skip('BAN-2594')
def test_reduce_single_result_set_with_cont_cat_uni_dimensions_cat_totals_with_null_in_cont_dim(self):
index_names = list(cont_cat_uni_dim_all_totals_df.index.names)
nulls = pd.DataFrame([[np.nan, 'd', '1', 'Texas', 5, 0], [np.nan, 'd', '2', 'California', 2, 0],
[np.nan, 'i', '1', 'Texas', 5, 0], [np.nan, 'i', '2', 'California', 7, 0],
[np.nan, 'r', '1', 'Texas', 11, 0], [np.nan, 'r', '2', 'California', 3, 0]],
columns=index_names + list(cont_cat_uni_dim_all_totals_df.columns))
nulls_totals = pd.DataFrame([nulls[['$m$votes', '$m$wins']].sum()])
nulls_totals[index_names[0]] = np.nan
nulls_totals[index_names[1]] = '~~totals'
nulls_totals[index_names[2]] = '~~totals'
expected = cont_cat_uni_dim_all_totals_df.loc[(slice(None), slice(None), slice('1', '2')), :] \
.append(cont_cat_uni_dim_all_totals_df.loc[(slice(None), '~~totals'), :].iloc[:-1]) \
.append(nulls.set_index(index_names)) \
.append(nulls_totals.set_index(index_names)) \
.sort_index()
raw_df = replace_totals(cont_cat_uni_dim_df)
raw_df = nulls \
.append(raw_df) \
.sort_values(['$d$timestamp', '$d$political_party', '$d$state'])
totals_df = raw_df.groupby('$d$timestamp').sum().reset_index()
null_totals_df = pd.DataFrame([raw_df[raw_df['$d$timestamp'].isnull()]
[['$m$votes', '$m$wins']].sum()])
null_totals_df['$d$timestamp'] = None
totals_df = totals_df.append(null_totals_df)
totals_df['$d$political_party'] = None
totals_df['$d$state'] = None
totals_df['$d$state_display'] = None
totals_df = totals_df[['$d$timestamp', '$d$political_party', '$d$state', '$d$state_display',
'$m$votes', '$m$wins']]
dimensions = (slicer.dimensions.timestamp, slicer.dimensions.political_party.rollup(), slicer.dimensions.state)
result = reduce_result_set([raw_df, totals_df], (), dimensions, ())
pandas.testing.assert_frame_equal(expected, result)
| 47.022831
| 119
| 0.639736
| 1,299
| 10,298
| 4.69592
| 0.082371
| 0.072131
| 0.034426
| 0.034098
| 0.83082
| 0.799672
| 0.778033
| 0.749836
| 0.738033
| 0.73459
| 0
| 0.004895
| 0.226258
| 10,298
| 218
| 120
| 47.238532
| 0.760668
| 0
| 0
| 0.454023
| 0
| 0
| 0.087493
| 0.002428
| 0
| 0
| 0
| 0
| 0.074713
| 1
| 0.08046
| false
| 0
| 0.04023
| 0
| 0.137931
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f80a25bec132661adc4b26b49e323b6cdce852f5
| 84
|
py
|
Python
|
core/components/__init__.py
|
roimpacta/exemplos
|
cbfe7c81fc14932697c02eb63bec7d7e4a2c5d5a
|
[
"Apache-2.0"
] | null | null | null |
core/components/__init__.py
|
roimpacta/exemplos
|
cbfe7c81fc14932697c02eb63bec7d7e4a2c5d5a
|
[
"Apache-2.0"
] | null | null | null |
core/components/__init__.py
|
roimpacta/exemplos
|
cbfe7c81fc14932697c02eb63bec7d7e4a2c5d5a
|
[
"Apache-2.0"
] | null | null | null |
from .GerenciadorToken import *
from .GerenciadorEmail import *
from .Token import *
| 28
| 31
| 0.797619
| 9
| 84
| 7.444444
| 0.555556
| 0.298507
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130952
| 84
| 3
| 32
| 28
| 0.917808
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f860e18560765b13b68f0d8f4f71cef23aab2504
| 21,298
|
py
|
Python
|
streamselect/adaptive_learning/test_base_adaptive_learner.py
|
BenHals/streamselect
|
ca5e80f3a8a31a38ac52bccfd92528d73f387a6a
|
[
"BSD-3-Clause"
] | null | null | null |
streamselect/adaptive_learning/test_base_adaptive_learner.py
|
BenHals/streamselect
|
ca5e80f3a8a31a38ac52bccfd92528d73f387a6a
|
[
"BSD-3-Clause"
] | null | null | null |
streamselect/adaptive_learning/test_base_adaptive_learner.py
|
BenHals/streamselect
|
ca5e80f3a8a31a38ac52bccfd92528d73f387a6a
|
[
"BSD-3-Clause"
] | null | null | null |
from typing import List, Optional
from river import synth
from river.drift import ADWIN
from river.tree import HoeffdingTreeClassifier
from streamselect.adaptive_learning import BaseAdaptiveLearner
from streamselect.adaptive_learning.reidentification_schedulers import (
DriftDetectionCheck,
DriftInfo,
DriftType,
PeriodicCheck,
)
from streamselect.concept_representations import ErrorRateRepresentation
from streamselect.repository import AbsoluteValueComparer
from streamselect.states import State
from streamselect.utils import Observation
# pylint: disable=too-many-statements, duplicate-code, R0801
def test_init() -> None:
"""Test initialization of the base class."""
al_classifier = BaseAdaptiveLearner(
classifier_constructor=HoeffdingTreeClassifier,
representation_constructor=ErrorRateRepresentation,
representation_comparer=AbsoluteValueComparer(),
drift_detector_constructor=ADWIN,
background_state_mode="drift_reset",
)
# Check initial state has been constructed
assert len(al_classifier.repository.states) == 1
assert al_classifier.active_state_id in al_classifier.repository.states
assert al_classifier.active_state_id in al_classifier.active_window_state_representations
# Assert background state was constructed
assert al_classifier.background_state
assert al_classifier.background_state_active_representation
assert al_classifier.background_state_detector
al_classifier = BaseAdaptiveLearner(
classifier_constructor=HoeffdingTreeClassifier,
representation_constructor=ErrorRateRepresentation,
representation_comparer=AbsoluteValueComparer(),
drift_detector_constructor=ADWIN,
background_state_mode="transition_reset",
)
# Assert background state was constructed
assert al_classifier.background_state
assert al_classifier.background_state_active_representation
assert not al_classifier.background_state_detector
al_classifier = BaseAdaptiveLearner(
classifier_constructor=HoeffdingTreeClassifier,
representation_constructor=ErrorRateRepresentation,
representation_comparer=AbsoluteValueComparer(),
drift_detector_constructor=ADWIN,
background_state_mode="transition_reset",
)
# Assert background state was constructed
assert al_classifier.background_state
assert al_classifier.background_state_active_representation
assert not al_classifier.background_state_detector
al_classifier = BaseAdaptiveLearner(
classifier_constructor=HoeffdingTreeClassifier,
representation_constructor=ErrorRateRepresentation,
representation_comparer=AbsoluteValueComparer(),
drift_detector_constructor=ADWIN,
background_state_mode=None,
)
# Assert background state was constructed
assert not al_classifier.background_state
assert not al_classifier.background_state_active_representation
assert not al_classifier.background_state_detector
# Test that states get the correct properties
window_size = 50
update_period = 50
al_classifier = BaseAdaptiveLearner(
classifier_constructor=HoeffdingTreeClassifier,
representation_constructor=ErrorRateRepresentation,
representation_comparer=AbsoluteValueComparer(),
drift_detector_constructor=ADWIN,
representation_window_size=window_size,
representation_update_period=update_period,
)
# Assert background state was constructed
assert isinstance(al_classifier.get_active_state().classifier, HoeffdingTreeClassifier)
assert isinstance(al_classifier.get_active_state().get_self_representation(), ErrorRateRepresentation)
assert al_classifier.get_active_state().get_self_representation().window_size == window_size
assert al_classifier.get_active_state().get_self_representation().update_period == update_period
# Check that states are correctly made as the concept mode
assert al_classifier.get_active_state().get_self_representation().mode == "concept"
def test_base_predictions() -> None:
"""Test predictions are the same as made by a base classifier."""
al_classifier = BaseAdaptiveLearner(
classifier_constructor=HoeffdingTreeClassifier,
representation_constructor=ErrorRateRepresentation,
representation_comparer=AbsoluteValueComparer(),
drift_detector_constructor=ADWIN,
background_state_mode="drift_reset",
)
baseline_state = State(
HoeffdingTreeClassifier(),
lambda state_id: ErrorRateRepresentation(al_classifier.representation_window_size, state_id),
state_id=-1,
)
baseline_active_representation = ErrorRateRepresentation(
al_classifier.representation_window_size, baseline_state.state_id
)
baseline_comparer = AbsoluteValueComparer()
baseline_detector = ADWIN()
dataset = synth.STAGGER()
for t, (x, y) in enumerate(dataset.take(50)):
p = al_classifier.predict_one(x, t)
ob = Observation(x=x, y=y, seen_at=t, active_state_id=baseline_state.state_id)
p_b = baseline_state.predict_one(ob)
baseline_active_representation.predict_one(ob)
assert p == p_b
al_classifier.learn_one(x, y, timestep=t)
baseline_state.learn_one(ob)
p_b = baseline_state.predict_one(ob)
baseline_active_representation.learn_one(ob)
in_drift, _ = baseline_detector.update(
baseline_comparer.get_state_rep_similarity(baseline_state, baseline_active_representation) # type: ignore
)
if in_drift:
break
assert not al_classifier.performance_monitor.in_drift
assert not al_classifier.performance_monitor.made_transition
def test_drift_detection() -> None:
"""Test predictions are the same as made by a base classifier, and drift detection capabilities are as well."""
al_classifier = BaseAdaptiveLearner(
classifier_constructor=HoeffdingTreeClassifier,
representation_constructor=ErrorRateRepresentation,
representation_comparer=AbsoluteValueComparer(),
drift_detector_constructor=ADWIN,
background_state_mode="drift_reset",
)
baseline_state = State(
HoeffdingTreeClassifier(),
lambda state_id: ErrorRateRepresentation(al_classifier.representation_window_size, state_id, mode="concept"),
state_id=-1,
)
baseline_active_representation = ErrorRateRepresentation(
al_classifier.representation_window_size, baseline_state.state_id
)
baseline_comparer = AbsoluteValueComparer()
baseline_detector = ADWIN()
dataset_0 = synth.STAGGER(classification_function=0, seed=0)
dataset_1 = synth.STAGGER(classification_function=1, seed=0)
found_drift = False
for t, (x, y) in enumerate(dataset_0.take(500)):
# Ensure predictions are equal
ob = Observation(x=x, y=y, seen_at=t, active_state_id=baseline_state.state_id)
p = al_classifier.predict_one(x)
p_b = baseline_state.predict_one(ob, force_train_own_representation=True)
baseline_active_representation.predict_one(ob)
# Ensure background predictions are equal, since we are using drift_reset and no drift will occur.
p_background = al_classifier.background_state.predict_one(ob) # type: ignore
assert al_classifier.background_state
assert al_classifier.background_state_active_representation
assert al_classifier.background_state_detector
assert p_b == p_background
assert p == p_b
assert (
baseline_active_representation.meta_feature_values[0]
== al_classifier.active_window_state_representations[al_classifier.active_state_id].meta_feature_values[0]
)
assert (
baseline_active_representation.meta_feature_values[0]
== al_classifier.background_state_active_representation.meta_feature_values[0]
)
# Assert learning and relevance checks are equal.
# Note: we have to use the second prediction from the baseline, as for the very
# first prediction in the stream the first prediction is None as classes haven't been
# learned. We do this automatically in the adaptive_learning class.
al_classifier.learn_one(x, y)
baseline_state.learn_one(ob, force_train_classifier=True)
baseline_active_representation.learn_one(ob)
baseline_relevance = baseline_comparer.get_state_rep_similarity(baseline_state, baseline_active_representation)
assert (
baseline_state.get_self_representation().meta_feature_values[0]
== al_classifier.get_active_state().get_self_representation().meta_feature_values[0]
)
assert (
baseline_state.get_self_representation().meta_feature_values[0]
== al_classifier.background_state.get_self_representation().meta_feature_values[0]
)
assert (
baseline_active_representation.meta_feature_values[0]
== al_classifier.active_window_state_representations[al_classifier.active_state_id].meta_feature_values[0]
)
assert (
baseline_active_representation.meta_feature_values[0]
== al_classifier.background_state_active_representation.meta_feature_values[0]
)
assert baseline_relevance == al_classifier.performance_monitor.active_state_relevance
assert baseline_relevance == al_classifier.performance_monitor.background_state_relevance
in_drift, _ = baseline_detector.update(baseline_relevance) # type: ignore
assert baseline_detector.total == al_classifier.drift_detector.total # type: ignore
# We shouldn't find a drift in stable data
assert not found_drift
assert not al_classifier.performance_monitor.in_drift
assert not al_classifier.performance_monitor.made_transition
if not found_drift:
for t, (x, y) in enumerate(dataset_1.take(500), start=500):
ob = Observation(x=x, y=y, seen_at=t, active_state_id=baseline_state.state_id)
p = al_classifier.predict_one(x)
p_b = baseline_state.predict_one(ob, force_train_own_representation=True)
baseline_active_representation.predict_one(ob)
assert p == p_b
al_classifier.learn_one(x, y)
baseline_state.learn_one(ob, force_train_classifier=True)
baseline_active_representation.learn_one(ob)
baseline_relevance = baseline_comparer.get_state_rep_similarity(
baseline_state, baseline_active_representation
)
assert baseline_relevance == al_classifier.performance_monitor.active_state_relevance
in_drift, _ = baseline_detector.update(baseline_relevance) # type: ignore
if in_drift:
found_drift = True
break
assert not al_classifier.performance_monitor.in_drift
assert not al_classifier.performance_monitor.made_transition
# We should have found a drift when the concept changed
assert al_classifier.performance_monitor.in_drift
# background should have been reset since we are using "drift_reset"
assert al_classifier.background_state is not None
assert al_classifier.background_state.seen_weight == 0.0
assert al_classifier.get_active_state().seen_weight == 0.0
assert len(al_classifier.repository.states) == 2
assert al_classifier.active_state_id == 1
def test_drift_transition() -> None:
"""Test data after a drift is handled correctly."""
al_classifier = BaseAdaptiveLearner(
classifier_constructor=HoeffdingTreeClassifier,
representation_constructor=ErrorRateRepresentation,
representation_comparer=AbsoluteValueComparer(),
drift_detector_constructor=ADWIN,
background_state_mode="drift_reset",
)
baseline_c1_state = State(
HoeffdingTreeClassifier(),
lambda state_id: ErrorRateRepresentation(al_classifier.representation_window_size, state_id, mode="concept"),
state_id=-1,
)
baseline_c1_active_representation = ErrorRateRepresentation(
al_classifier.representation_window_size, baseline_c1_state.state_id
)
baseline_c1_comparer = AbsoluteValueComparer()
baseline_c1_detector = ADWIN()
dataset_1 = synth.STAGGER(classification_function=0, seed=0)
dataset_2 = synth.STAGGER(classification_function=1, seed=0)
found_drift = False
drift_point = None
# Concept 1
for t, (x, y) in enumerate(dataset_1.take(500)):
ob = Observation(x=x, y=y, seen_at=t, active_state_id=baseline_c1_state.state_id)
al_classifier.predict_one(x)
baseline_c1_state.predict_one(ob, force_train_own_representation=True)
baseline_c1_active_representation.predict_one(ob)
al_classifier.learn_one(x, y)
baseline_c1_state.learn_one(ob, force_train_classifier=True)
baseline_c1_active_representation.learn_one(ob)
baseline_c1_relevance = baseline_c1_comparer.get_state_rep_similarity(
baseline_c1_state, baseline_c1_active_representation
)
in_drift, _ = baseline_c1_detector.update(baseline_c1_relevance) # type: ignore
assert not found_drift
assert not al_classifier.performance_monitor.in_drift
assert not al_classifier.performance_monitor.made_transition
# Concept 2
for t, (x, y) in enumerate(dataset_2.take(500), start=500):
ob = Observation(x=x, y=y, seen_at=t, active_state_id=baseline_c1_state.state_id)
al_classifier.predict_one(x)
baseline_c1_state.predict_one(ob, force_train_own_representation=True)
baseline_c1_active_representation.predict_one(ob)
al_classifier.learn_one(x, y)
baseline_c1_state.learn_one(ob, force_train_classifier=True)
baseline_c1_active_representation.learn_one(ob)
baseline_c1_relevance = baseline_c1_comparer.get_state_rep_similarity(
baseline_c1_state, baseline_c1_active_representation
)
assert baseline_c1_relevance == al_classifier.performance_monitor.active_state_relevance
in_drift, _ = baseline_c1_detector.update(baseline_c1_relevance) # type: ignore
if in_drift:
found_drift = True
drift_point = t
break
assert not al_classifier.performance_monitor.in_drift
assert not al_classifier.performance_monitor.made_transition
# We should have found a drift when the concept changed
assert al_classifier.performance_monitor.in_drift
# background should have been reset since we are using "drift_reset"
assert al_classifier.background_state is not None
assert al_classifier.background_state.seen_weight == 0.0
assert al_classifier.get_active_state().seen_weight == 0.0
assert len(al_classifier.repository.states) == 2
assert al_classifier.active_state_id == 1
assert drift_point
# Test that after the transition, we are properly using the new state not the old state.
baseline_c2_state = State(
HoeffdingTreeClassifier(),
lambda state_id: ErrorRateRepresentation(al_classifier.representation_window_size, state_id, mode="concept"),
state_id=-2,
)
baseline_c2_active_representation = ErrorRateRepresentation(
al_classifier.representation_window_size, baseline_c2_state.state_id
)
baseline_c2_comparer = AbsoluteValueComparer()
baseline_c2_detector = ADWIN()
# Concept 2
for t, (x, y) in enumerate(dataset_2.take(500), start=500 + drift_point):
ob = Observation(x=x, y=y, seen_at=t, active_state_id=baseline_c2_state.state_id)
assert al_classifier.active_state_id == 1
p_c2 = al_classifier.predict_one(x)
bp_c2 = baseline_c2_state.predict_one(ob, force_train_own_representation=True)
# the adaptive learner should give the same results as a new classifier trained on the new concept.
assert p_c2 == bp_c2
# The original concept 1 state should be stored, and give the same predictions as the baseline trained
# only on that data.
p_c1 = al_classifier.repository.states[0].predict_one(ob)
bp_c1 = baseline_c1_state.predict_one(ob)
assert p_c1 == bp_c1
baseline_c2_active_representation.predict_one(ob)
al_classifier.learn_one(x, y)
baseline_c2_state.learn_one(ob, force_train_classifier=True)
baseline_c2_active_representation.learn_one(ob)
baseline_c2_relevance = baseline_c2_comparer.get_state_rep_similarity(
baseline_c2_state, baseline_c2_active_representation
)
assert baseline_c2_relevance == al_classifier.performance_monitor.active_state_relevance
in_drift, _ = baseline_c2_detector.update(baseline_c2_relevance) # type: ignore
if in_drift:
found_drift = True
drift_point = t
break
assert not al_classifier.performance_monitor.in_drift
assert not al_classifier.performance_monitor.made_transition
def test_reidentification_schedule_detection() -> None:
"""Test that drifts are scheduled at the correct times using the DriftDetectionScheduler."""
# In this case, we want to see a reidentification check performed 50 timesteps after every drift.
check_delay = 50
classifier = BaseAdaptiveLearner(
classifier_constructor=HoeffdingTreeClassifier,
representation_constructor=ErrorRateRepresentation,
representation_comparer=AbsoluteValueComparer(),
drift_detector_constructor=ADWIN,
background_state_mode="drift_reset",
reidentification_check_schedulers=[DriftDetectionCheck(check_delay)],
representation_window_size=50,
)
dataset_0 = synth.STAGGER(classification_function=0, seed=0)
dataset_1 = synth.STAGGER(classification_function=1, seed=0)
dataset_2 = synth.STAGGER(classification_function=2, seed=0)
active_state_segments: List[Optional[int]] = [None]
drift_checks: List[Optional[DriftInfo]] = [None]
t = 0
for dataset in [dataset_0, dataset_1, dataset_2] * 3:
for x, y in dataset.take(500):
_ = classifier.predict_one(x, t)
classifier.learn_one(x, y, timestep=t)
current_id = classifier.performance_monitor.final_active_state_id
current_drift = classifier.performance_monitor.last_drift
if current_id != active_state_segments[-1]:
active_state_segments.append(current_id)
if current_drift != drift_checks[-1]:
drift_checks.append(current_drift)
t += 1
for i, drift in enumerate(drift_checks):
if drift is None:
continue
if drift.drift_type == DriftType.ScheduledOne:
prev_drift = drift_checks[i - 1]
assert prev_drift is not None
assert prev_drift.drift_type == DriftType.DriftDetectorTriggered or prev_drift.triggered_transition
assert prev_drift.drift_timestep == drift.drift_timestep - check_delay - 1
def test_reidentification_schedule_periodic() -> None:
"""Test that drifts are scheduled at the correct times using the PeriodicCheck."""
# In this case, we want to see a reidentification check performed every 50.
check_period = 100
classifier = BaseAdaptiveLearner(
classifier_constructor=HoeffdingTreeClassifier,
representation_constructor=ErrorRateRepresentation,
representation_comparer=AbsoluteValueComparer(),
drift_detector_constructor=ADWIN,
background_state_mode="drift_reset",
reidentification_check_schedulers=[PeriodicCheck(check_period)],
representation_window_size=50,
)
dataset_0 = synth.STAGGER(classification_function=0, seed=0)
dataset_1 = synth.STAGGER(classification_function=1, seed=0)
dataset_2 = synth.STAGGER(classification_function=2, seed=0)
active_state_segments: List[Optional[int]] = [None]
drift_checks: List[Optional[DriftInfo]] = [None]
t = 0
for dataset in [dataset_0, dataset_1, dataset_2] * 3:
for x, y in dataset.take(500):
_ = classifier.predict_one(x, t)
classifier.learn_one(x, y, timestep=t)
current_id = classifier.performance_monitor.final_active_state_id
current_drift = classifier.performance_monitor.last_drift
if current_id != active_state_segments[-1]:
active_state_segments.append(current_id)
if current_drift != drift_checks[-1]:
drift_checks.append(current_drift)
t += 1
for i, drift in enumerate(drift_checks):
if drift is None:
continue
if drift.drift_type == DriftType.ScheduledOne:
prev_drift = drift_checks[i - 1]
print(drift, prev_drift)
assert prev_drift is not None
if prev_drift.triggered_transition:
assert prev_drift.drift_timestep == drift.drift_timestep - check_period - 1
else:
assert prev_drift.drift_timestep == drift.drift_timestep - check_period
# %%
| 45.218684
| 119
| 0.731994
| 2,476
| 21,298
| 5.953958
| 0.088449
| 0.076516
| 0.031746
| 0.042125
| 0.834283
| 0.818613
| 0.789445
| 0.775878
| 0.768824
| 0.731448
| 0
| 0.011613
| 0.203493
| 21,298
| 470
| 120
| 45.314894
| 0.857404
| 0.099211
| 0
| 0.656
| 0
| 0
| 0.006591
| 0
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.016
| false
| 0
| 0.026667
| 0
| 0.042667
| 0.002667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f86507f41b74b2236f297c019d8f962e263f7688
| 101
|
bzl
|
Python
|
cipd_deps.bzl
|
bazelembedded/rules_cipd
|
a94deb125e9611cb06101cf65eca634717d1ddfa
|
[
"MIT"
] | 1
|
2022-02-22T07:31:07.000Z
|
2022-02-22T07:31:07.000Z
|
cipd_deps.bzl
|
bazelembedded/rules_cipd
|
a94deb125e9611cb06101cf65eca634717d1ddfa
|
[
"MIT"
] | 3
|
2022-02-11T11:02:57.000Z
|
2022-02-11T11:11:23.000Z
|
cipd_deps.bzl
|
bazelembedded/rules_cipd
|
a94deb125e9611cb06101cf65eca634717d1ddfa
|
[
"MIT"
] | null | null | null |
load("//cipd/internal:cipd_client.bzl", "cipd_client_deps")
def cipd_deps():
cipd_client_deps()
| 20.2
| 59
| 0.732673
| 15
| 101
| 4.533333
| 0.466667
| 0.441176
| 0.411765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09901
| 101
| 4
| 60
| 25.25
| 0.747253
| 0
| 0
| 0
| 0
| 0
| 0.465347
| 0.306931
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f86ac1eac20774749615147922719e2538ae2c34
| 200
|
py
|
Python
|
src/AuShadha/history/family_history/admin.py
|
GosthMan/AuShadha
|
3ab48825a0dba19bf880b6ac6141ab7a6adf1f3e
|
[
"PostgreSQL"
] | 46
|
2015-03-04T14:19:47.000Z
|
2021-12-09T02:58:46.000Z
|
src/AuShadha/history/family_history/admin.py
|
aytida23/AuShadha
|
3ab48825a0dba19bf880b6ac6141ab7a6adf1f3e
|
[
"PostgreSQL"
] | 2
|
2015-06-05T10:29:04.000Z
|
2015-12-06T16:54:10.000Z
|
src/AuShadha/history/family_history/admin.py
|
aytida23/AuShadha
|
3ab48825a0dba19bf880b6ac6141ab7a6adf1f3e
|
[
"PostgreSQL"
] | 24
|
2015-03-23T01:38:11.000Z
|
2022-01-24T16:23:42.000Z
|
from django.contrib import admin
from history.family_history.models import FamilyHistory
class FamilyHistoryAdmin(admin.ModelAdmin):
pass
admin.site.register(FamilyHistory, FamilyHistoryAdmin)
| 22.222222
| 55
| 0.84
| 22
| 200
| 7.590909
| 0.681818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 200
| 8
| 56
| 25
| 0.927778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
3e0202b21a5816f826865bf1bf625b5ff0554955
| 26
|
py
|
Python
|
oauth2/__init__.py
|
mart-e/requests-oauth2
|
93119cafed2b2393e0c80bd16aaf7a2d2490a8b1
|
[
"BSD-3-Clause-Attribution"
] | 2
|
2015-01-13T11:19:44.000Z
|
2015-09-22T13:28:42.000Z
|
oauth2/__init__.py
|
mart-e/requests-oauth2
|
93119cafed2b2393e0c80bd16aaf7a2d2490a8b1
|
[
"BSD-3-Clause-Attribution"
] | null | null | null |
oauth2/__init__.py
|
mart-e/requests-oauth2
|
93119cafed2b2393e0c80bd16aaf7a2d2490a8b1
|
[
"BSD-3-Clause-Attribution"
] | null | null | null |
from oauth2 import OAuth2
| 13
| 25
| 0.846154
| 4
| 26
| 5.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 0.153846
| 26
| 1
| 26
| 26
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3e5f0e3cd644fce2a76b3d624cf39c32c6874bdd
| 109
|
py
|
Python
|
app/user/__init__.py
|
puzzle9/FaceApi
|
9a19babf1759a637261b1ad7d9c35ec630679527
|
[
"MIT"
] | null | null | null |
app/user/__init__.py
|
puzzle9/FaceApi
|
9a19babf1759a637261b1ad7d9c35ec630679527
|
[
"MIT"
] | null | null | null |
app/user/__init__.py
|
puzzle9/FaceApi
|
9a19babf1759a637261b1ad7d9c35ec630679527
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
blueprint = Blueprint('user', __name__, url_prefix='/user')
from . import user
| 18.166667
| 59
| 0.752294
| 14
| 109
| 5.5
| 0.571429
| 0.467532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137615
| 109
| 5
| 60
| 21.8
| 0.819149
| 0
| 0
| 0
| 0
| 0
| 0.082569
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
e410f380f1e80840e45eef86f99fc87966b89a99
| 10,836
|
py
|
Python
|
src/aggressive_ensemble/models.py
|
mpajak98/aggressive-ensemble.torch
|
fbc298b3ccf4f31fa8144c9c927b3b9b4281b4d0
|
[
"MIT"
] | null | null | null |
src/aggressive_ensemble/models.py
|
mpajak98/aggressive-ensemble.torch
|
fbc298b3ccf4f31fa8144c9c927b3b9b4281b4d0
|
[
"MIT"
] | null | null | null |
src/aggressive_ensemble/models.py
|
mpajak98/aggressive-ensemble.torch
|
fbc298b3ccf4f31fa8144c9c927b3b9b4281b4d0
|
[
"MIT"
] | null | null | null |
from torchvision import models
import pretrainedmodels
import torch
from torch import nn
__all__ = ['resnet50', 'resnet152', 'alexnet', 'vgg', 'densenet',
'inception', 'xception', 'nasnetalarge', 'nasnetamobile',
"yolov5s", "yolov5m"]
def set_parameter_requires_grad(model, feature_extracting):
if feature_extracting:
for param in model.parameters():
param.requires_grad = False
def nasnetalarge(num_classes, feature_extract, use_pretrained=True):
"""Funkcja zwracająca model nasnet_large
:param num_classes: liczba szukanych cech
:type num_classes: int
:param feature_extract: czy ekstrachować cechy
:type feature_extract: bool
:param use_pretrained: czy używać wstępnie przetrenowanego modelu
:type use_pretrained: bool
:return: wygenerowny model,wielkość wejścia modelu,sugerowana średnia do normalizacji,sugerowane odchylenie standardowe do normalizacji
:rtype: model, int, float, float
"""
model = pretrainedmodels.nasnetalarge(num_classes=1000, pretrained='imagenet')
set_parameter_requires_grad(model, feature_extract)
num_ftrs = model.last_linear.in_features
model.last_linear = nn.Sequential(
nn.Linear(num_ftrs, num_classes),
nn.Sigmoid())
input_size = 224
mean, std = [0.5, 0.5, 0.5], [0.5, 0.5, 0.5]
return model, input_size, mean, std
def nasnetamobile(num_classes, feature_extract, use_pretrained=True):
"""Funkcja zwracająca model nasnet_mobile
:param num_classes: liczba szukanych cech
:type num_classes: int
:param feature_extract: czy ekstrachować cechy
:type feature_extract: bool
:param use_pretrained: czy używać wstępnie przetrenowanego modelu
:type use_pretrained: bool
:return: wygenerowny model,wielkość wejścia modelu,sugerowana średnia do normalizacji,sugerowane odchylenie standardowe do normalizacji
:rtype: model, int, float, float
"""
model = pretrainedmodels.nasnetamobile(num_classes=1000, pretrained='imagenet')
set_parameter_requires_grad(model, feature_extract)
num_ftrs = model.last_linear.in_features
model.last_linear = nn.Sequential(
nn.Linear(num_ftrs, num_classes),
nn.Sigmoid())
input_size = 224
mean, std = [0.5, 0.5, 0.5], [0.5, 0.5, 0.5]
return model, input_size, mean, std
def xception(num_classes, feature_extract, use_pretrained=True):
"""Funkcja zwracająca model xception
:param num_classes: liczba szukanych cech
:type num_classes: int
:param feature_extract: czy ekstrachować cechy
:type feature_extract: bool
:param use_pretrained: czy używać wstępnie przetrenowanego modelu
:type use_pretrained: bool
:return: wygenerowny model,wielkość wejścia modelu,sugerowana średnia do normalizacji,sugerowane odchylenie standardowe do normalizacji
:rtype: model, int, float, float
"""
model = pretrainedmodels.xception(num_classes=1000, pretrained='imagenet')
set_parameter_requires_grad(model, feature_extract)
num_ftrs = model.last_linear.in_features
model.last_linear = nn.Sequential(
nn.Linear(num_ftrs, num_classes),
nn.Sigmoid())
input_size = 299
mean, std = [0.5, 0.5, 0.5], [0.5, 0.5, 0.5]
return model, input_size, mean, std
def inception(num_classes, feature_extract, use_pretrained=True):
"""Funkcja zwracająca model inception_v3
:param num_classes: liczba szukanych cech
:type num_classes: int
:param feature_extract: czy ekstrachować cechy
:type feature_extract: bool
:param use_pretrained: czy używać wstępnie przetrenowanego modelu
:type use_pretrained: bool
:return: wygenerowny model,wielkość wejścia modelu,sugerowana średnia do normalizacji,sugerowane odchylenie standardowe do normalizacji
:rtype: model, int, float, float
"""
model = models.inception_v3(pretrained=use_pretrained)
set_parameter_requires_grad(model, feature_extract)
# Handle the auxilary net
num_ftrs = model.AuxLogits.fc.in_features
model.AuxLogits.fc = nn.Sequential(
nn.Linear(num_ftrs, num_classes),
nn.Sigmoid())
# Handle the primary net
num_ftrs = model.fc.in_features
model.fc = nn.Sequential(
nn.Linear(num_ftrs, num_classes),
nn.Sigmoid())
input_size = 299
mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
return model, input_size, mean, std
def densenet(num_classes, feature_extract, use_pretrained=True):
"""Funkcja zwracająca model densenet
:param num_classes: liczba szukanych cech
:type num_classes: int
:param feature_extract: czy ekstrachować cechy
:type feature_extract: bool
:param use_pretrained: czy używać wstępnie przetrenowanego modelu
:type use_pretrained: bool
:return: wygenerowny model,wielkość wejścia modelu,sugerowana średnia do normalizacji,sugerowane odchylenie standardowe do normalizacji
:rtype: model, int, float, float
"""
model = models.densenet121(pretrained=use_pretrained)
set_parameter_requires_grad(model, feature_extract)
num_ftrs = model.classifier.in_features
model.classifier = nn.Sequential(
nn.Linear(num_ftrs, num_classes),
nn.Sigmoid())
input_size = 224
mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
return model, input_size, mean, std
def vgg(num_classes, feature_extract, use_pretrained=True):
"""Funkcja zwracająca model vgg
:param num_classes: liczba szukanych cech
:type num_classes: int
:param feature_extract: czy ekstrachować cechy
:type feature_extract: bool
:param use_pretrained: czy używać wstępnie przetrenowanego modelu
:type use_pretrained: bool
:return: wygenerowny model,wielkość wejścia modelu,sugerowana średnia do normalizacji,sugerowane odchylenie standardowe do normalizacji
:rtype: model, int, float, float
"""
model = models.vgg11_bn(pretrained=use_pretrained)
set_parameter_requires_grad(model, feature_extract)
num_ftrs = model.classifier[6].in_features
model.classifier[6] = nn.Sequential(
nn.Linear(num_ftrs, num_classes),
nn.Sigmoid())
input_size = 224
mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
return model, input_size, mean, std
def alexnet(num_classes, feature_extract, use_pretrained=True):
"""Funkcja zwracająca model alexnet
:param num_classes: liczba szukanych cech
:type num_classes: int
:param feature_extract: czy ekstrachować cechy
:type feature_extract: bool
:param use_pretrained: czy używać wstępnie przetrenowanego modelu
:type use_pretrained: bool
:return: wygenerowny model,wielkość wejścia modelu,sugerowana średnia do normalizacji,sugerowane odchylenie standardowe do normalizacji
:rtype: model, int, float, float
"""
model = models.alexnet(pretrained=use_pretrained)
set_parameter_requires_grad(model, feature_extract)
num_ftrs = model.classifier[6].in_features
model.classifier[6] = nn.Sequential(
nn.Linear(num_ftrs, num_classes),
nn.Sigmoid())
input_size = 224
mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
return model, input_size, mean, std
def resnet50(num_classes, feature_extract, use_pretrained=True):
"""Funkcja zwracająca model resnet50
:param num_classes: liczba szukanych cech
:type num_classes: int
:param feature_extract: czy ekstrachować cechy
:type feature_extract: bool
:param use_pretrained: czy używać wstępnie przetrenowanego modelu
:type use_pretrained: bool
:return: wygenerowny model,wielkość wejścia modelu,sugerowana średnia do normalizacji,sugerowane odchylenie standardowe do normalizacji
:rtype: model, int, float, float
"""
model = models.resnet50(pretrained=use_pretrained)
set_parameter_requires_grad(model, feature_extract)
num_ftrs = model.fc.in_features
model.fc = nn.Sequential(
nn.Linear(num_ftrs, num_classes),
nn.Sigmoid())
input_size = 224
mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
return model, input_size, mean, std
def resnet152(num_classes, feature_extract, use_pretrained=True):
"""Funkcja zwracająca model resnet152
:param num_classes: liczba szukanych cech
:type num_classes: int
:param feature_extract: czy ekstrachować cechy
:type feature_extract: bool
:param use_pretrained: czy używać wstępnie przetrenowanego modelu
:type use_pretrained: bool
:return: wygenerowny model,wielkość wejścia modelu,sugerowana średnia do normalizacji,sugerowane odchylenie standardowe do normalizacji
:rtype: model, int, float, float
"""
model = models.resnet152(pretrained=use_pretrained)
set_parameter_requires_grad(model, feature_extract)
num_ftrs = model.fc.in_features
model.fc = nn.Sequential(
nn.Linear(num_ftrs, num_classes),
nn.Sigmoid())
input_size = 224
mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
return model, input_size, mean, std
def yolov5s(num_classes, feature_extract, use_pretrained=True):
"""Funkcja zwracająca model yolo v5
:param num_classes: liczba szukanych cech
:type num_classes: int
:param feature_extract: czy ekstrachować cechy
:type feature_extract: bool
:param use_pretrained: czy używać wstępnie przetrenowanego modelu
:type use_pretrained: bool
:return: wygenerowny model,wielkość wejścia modelu,sugerowana średnia do normalizacji,sugerowane odchylenie standardowe do normalizacji
:rtype: model, int, float, float
"""
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True, channels=num_classes)
set_parameter_requires_grad(model, feature_extract)
num_ftrs = model.fc.in_features
model.fc = nn.Sequential(
nn.Linear(num_ftrs, num_classes),
nn.Sigmoid())
input_size = 224
mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
return model, input_size, mean, std
def yolov5m(num_classes, feature_extract, use_pretrained=True):
"""Funkcja zwracająca model yolo v5
:param num_classes: liczba szukanych cech
:type num_classes: int
:param feature_extract: czy ekstrachować cechy
:type feature_extract: bool
:param use_pretrained: czy używać wstępnie przetrenowanego modelu
:type use_pretrained: bool
:return: wygenerowny model,wielkość wejścia modelu,sugerowana średnia do normalizacji,sugerowane odchylenie standardowe do normalizacji
:rtype: model, int, float, float
"""
model = torch.hub.load('ultralytics/yolov5', 'yolov5m', pretrained=True, channels=num_classes)
set_parameter_requires_grad(model, feature_extract)
input_size = 224
mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
return model, input_size, mean, std
| 38.425532
| 139
| 0.727206
| 1,398
| 10,836
| 5.464235
| 0.079399
| 0.064145
| 0.005891
| 0.007854
| 0.918838
| 0.918838
| 0.914125
| 0.914125
| 0.914125
| 0.914125
| 0
| 0.035524
| 0.184293
| 10,836
| 281
| 140
| 38.562278
| 0.828714
| 0.448505
| 0
| 0.694215
| 0
| 0
| 0.029935
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.099174
| false
| 0
| 0.033058
| 0
| 0.223141
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e4284915889bb4e9f69f9c3e00e74535ef41ee3b
| 29
|
py
|
Python
|
tienda/products/models/__init__.py
|
andresdavidsv/tienda-bbb
|
24a058ded19ed433b1dd03b18057bbbdd7ddc6e5
|
[
"MIT"
] | null | null | null |
tienda/products/models/__init__.py
|
andresdavidsv/tienda-bbb
|
24a058ded19ed433b1dd03b18057bbbdd7ddc6e5
|
[
"MIT"
] | null | null | null |
tienda/products/models/__init__.py
|
andresdavidsv/tienda-bbb
|
24a058ded19ed433b1dd03b18057bbbdd7ddc6e5
|
[
"MIT"
] | null | null | null |
from .products import Product
| 29
| 29
| 0.862069
| 4
| 29
| 6.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 29
| 1
| 29
| 29
| 0.961538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e494c856e6e268352dc436c488c469d8af50f272
| 28
|
py
|
Python
|
subm/log.py
|
tor4z/python_test
|
6b18110b4e82ad00a065b03d0ee8f7f331b2f874
|
[
"Unlicense"
] | null | null | null |
subm/log.py
|
tor4z/python_test
|
6b18110b4e82ad00a065b03d0ee8f7f331b2f874
|
[
"Unlicense"
] | null | null | null |
subm/log.py
|
tor4z/python_test
|
6b18110b4e82ad00a065b03d0ee8f7f331b2f874
|
[
"Unlicense"
] | null | null | null |
def f():
print(__name__)
| 14
| 19
| 0.607143
| 4
| 28
| 3.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.214286
| 28
| 2
| 19
| 14
| 0.590909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
e4af8b24ab6e80b08f14f8700e33b22392b51f48
| 1,539
|
py
|
Python
|
src/var_learning/plot.py
|
rcmdnk/phys_learning
|
2ea0b3e133aed1f57ede03c8ab0c43487a2e0266
|
[
"Apache-2.0"
] | null | null | null |
src/var_learning/plot.py
|
rcmdnk/phys_learning
|
2ea0b3e133aed1f57ede03c8ab0c43487a2e0266
|
[
"Apache-2.0"
] | null | null | null |
src/var_learning/plot.py
|
rcmdnk/phys_learning
|
2ea0b3e133aed1f57ede03c8ab0c43487a2e0266
|
[
"Apache-2.0"
] | null | null | null |
import os
import matplotlib.pyplot as plt
def hist_one(x1, bins, range, name, xlabel, ylabel='Count'):
fig, ax = plt.subplots()
ax.hist(x1, bins=bins, range=range, color='blue')
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
fig.show()
os.makedirs('plots/', exist_ok=True)
fig.savefig('plots/' + name + '.pdf')
plt.close(fig)
def hist_two(x1, x2, bins, range, name, xlabel, ylabel='Count',
label1='1', label2='2'):
fig, ax = plt.subplots()
ax.hist(x1, bins=bins, label=label1, range=range, color='blue', alpha=0.5)
ax.hist(x2, bins=bins, label=label2, range=range, color='red', alpha=0.5)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.legend()
fig.show()
os.makedirs('plots/', exist_ok=True)
fig.savefig('plots/' + name + '.pdf')
plt.close(fig)
def plot_one(x1, y1, name, xlabel, ylabel='Count'):
fig, ax = plt.subplots()
ax.plot(x1, y1, range=range, color='blue')
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
fig.show()
os.makedirs('plots/', exist_ok=True)
fig.savefig('plots/' + name + '.pdf')
plt.close(fig)
def plot_two(x1, y1, x2, y2, name, xlabel, ylabel='Count',
label1='1', label2='2'):
fig, ax = plt.subplots()
ax.plot(x1, y1, label=label1, color='blue')
ax.plot(x2, y2, label=label2, color='red')
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.legend()
fig.show()
os.makedirs('plots/', exist_ok=True)
fig.savefig('plots/' + name + '.pdf')
plt.close(fig)
| 29.037736
| 78
| 0.615335
| 233
| 1,539
| 3.995708
| 0.197425
| 0.042965
| 0.068743
| 0.090226
| 0.77014
| 0.77014
| 0.750806
| 0.750806
| 0.73362
| 0.619764
| 0
| 0.027597
| 0.19948
| 1,539
| 52
| 79
| 29.596154
| 0.728084
| 0
| 0
| 0.727273
| 0
| 0
| 0.071475
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.045455
| 0
| 0.136364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e4cab05bb978830b4e6ee56915ffc29d98686a7e
| 156
|
py
|
Python
|
jj.py
|
abinashstack/Flaskapp
|
4848f7dce656f7cbc08324a6af6c8f1c0facc039
|
[
"MIT"
] | null | null | null |
jj.py
|
abinashstack/Flaskapp
|
4848f7dce656f7cbc08324a6af6c8f1c0facc039
|
[
"MIT"
] | null | null | null |
jj.py
|
abinashstack/Flaskapp
|
4848f7dce656f7cbc08324a6af6c8f1c0facc039
|
[
"MIT"
] | null | null | null |
@app.route('/home')
def home():
return redirect(url_for('about', name='World'))
@app.route('/about/<name>')
def about(name):
return f'Hello {name}'
| 22.285714
| 51
| 0.634615
| 23
| 156
| 4.26087
| 0.565217
| 0.27551
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128205
| 156
| 7
| 52
| 22.285714
| 0.720588
| 0
| 0
| 0
| 0
| 0
| 0.254777
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
90069c2f28f43e65b0a8454be111168e2caeb3d1
| 83
|
py
|
Python
|
models/encoder/__init__.py
|
MinkaiXu/GeoDiff
|
c6f26dc250308bff8923a19884e601e0bb0f975a
|
[
"MIT"
] | 9
|
2022-03-08T12:32:29.000Z
|
2022-03-31T10:39:45.000Z
|
models/encoder/__init__.py
|
MinkaiXu/GeoDiff
|
c6f26dc250308bff8923a19884e601e0bb0f975a
|
[
"MIT"
] | 1
|
2022-03-30T23:03:07.000Z
|
2022-03-31T00:12:07.000Z
|
models/encoder/__init__.py
|
MinkaiXu/GeoDiff
|
c6f26dc250308bff8923a19884e601e0bb0f975a
|
[
"MIT"
] | 3
|
2022-03-01T06:45:40.000Z
|
2022-03-30T13:12:20.000Z
|
from .schnet import *
from .gin import *
from .edge import *
from .coarse import *
| 16.6
| 21
| 0.710843
| 12
| 83
| 4.916667
| 0.5
| 0.508475
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.192771
| 83
| 4
| 22
| 20.75
| 0.880597
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5f406160957645fd9bd6e0559a1ad65fa883db46
| 22,084
|
py
|
Python
|
licornes/tests/test_views.py
|
blacherez/jioti
|
5a850feb197242688768119a184f95042229fd29
|
[
"MIT"
] | null | null | null |
licornes/tests/test_views.py
|
blacherez/jioti
|
5a850feb197242688768119a184f95042229fd29
|
[
"MIT"
] | 25
|
2019-01-01T15:37:19.000Z
|
2019-01-06T18:58:29.000Z
|
licornes/tests/test_views.py
|
blacherez/jioti
|
5a850feb197242688768119a184f95042229fd29
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
# Create your tests here.
from django.urls import reverse
from licornes.models import Licorne
from licornes.models import User
from licornes.models import Etape
from django.conf import settings
from bs4 import BeautifulSoup
import re
import os
class IndexViewTest(TestCase):
@classmethod
def setUpTestData(cls):
# On crée des utilisateurs et on leur attribue x licornes à chacun
number_of_creators = 2
number_of_licornes = 3
cls.total_licornes = number_of_creators * number_of_licornes
for user_id in range(number_of_creators):
User.objects.create(username=f"utilisateur {user_id}")
u = User.objects.get(username=f"utilisateur {user_id}")
for licorne_id in range(number_of_licornes):
Licorne.objects.create(
nom=f'Licorne {licorne_id} de {user_id}',
identifiant=f'{user_id}-{licorne_id}',
createur=u,
)
def test_view_url_exists_at_desired_location(self):
response = self.client.get('/licornes/')
self.assertEqual(response.status_code, 200)
def test_view_url_accessible_by_name(self):
response = self.client.get(reverse('index'))
self.assertEqual(response.status_code, 200)
def test_view_uses_correct_template(self):
response = self.client.get(reverse('index'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'licornes/index.html')
def test_licornes_are_present(self):
response = self.client.get(reverse('index'))
self.assertEqual(response.status_code, 200)
self.assertTrue('meslicornes' in response.context)
#self.assertTrue(response.context['meslicornes'] == True)
self.assertTrue(len(response.context['meslicornes']) == self.total_licornes)
#print(str(response.content))
self.assertTrue("Licorne 0 de 0" in str(response.content))
def test_licornes_ont_badge(self):
response = self.client.get(reverse('index'))
soup = BeautifulSoup(response.content, features="html.parser")
h2s = soup.find_all("h2")
badges_de_licornes = 0
for h2 in h2s:
if h2.span and "badge" in h2.span["class"]:
badges_de_licornes += 1
self.assertTrue(badges_de_licornes)
self.assertEqual(badges_de_licornes, self.total_licornes)
def test_titres_present(self):
response = self.client.get(reverse('index'))
self.assertEqual(response.status_code, 200)
self.assertInHTML("Mes licornes", str(response.content))
self.assertInHTML("Trajet", str(response.content))
def test_bouton_ajouter_present(self):
response = self.client.get(reverse('index'))
self.assertEqual(response.status_code, 200)
self.assertTrue("+ Ajouter une licorne" in str(response.content))
def test_div_map_present(self):
response = self.client.get(reverse('index'))
soup = BeautifulSoup(response.content, features="html.parser")
divs = soup.find_all("div")
div_map_in_divs = False
for d in divs:
if d.has_attr("id") and d["id"] == "map":
div_map_in_divs = True
self.assertTrue(div_map_in_divs)
def test_liens_vers_licornes_presents(self):
response = self.client.get(reverse('index'))
soup = BeautifulSoup(response.content, features="html.parser")
a = soup.find_all("a")
lien_vers_1_dans_liens = False
for l in a:
if "licorne/1" in l["href"]:
lien_vers_1_dans_liens = True
break
self.assertTrue(lien_vers_1_dans_liens)
def test_aucune_licorne_nest_active(self):
response = self.client.get(reverse('index'))
soup = BeautifulSoup(response.content, features="html.parser")
a = soup.find_all("a")
active_in_a_class = 0
for l in a:
if l.has_attr("class"):
classes = l["class"]
if "active" in classes:
active_in_a_class += 1
self.assertFalse(active_in_a_class)
def test_pas_de_polyline(self):
response = self.client.get(reverse('index'))
self.assertFalse("google.maps.Polyline" in str(response.content))
class AddViewTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.identifiant_existant = "777"
cls.identifiant_inexistant = "666"
User.objects.create(username=f"kuala")
u = User.objects.get(username=f"kuala")
Licorne.objects.create(
nom=f'Licorne de {u}',
identifiant=f'{cls.identifiant_existant}',
createur=u,
)
cls.u = u
cls.l = Licorne.objects.get(identifiant=cls.identifiant_existant)
def test_view_url_exists_at_desired_location(self):
response = self.client.get('/licornes/add/')
self.assertEqual(response.status_code, 200)
def test_view_url_accessible_by_name(self):
response = self.client.get(reverse('add'))
self.assertEqual(response.status_code, 200)
def test_view_uses_correct_template(self):
response = self.client.get(reverse('add'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'licornes/licorne_form.html')
def test_view_titre(self):
response = self.client.get(reverse('add'))
self.assertEqual(response.status_code, 200)
self.assertTrue("Ajouter une licorne" in str(response.content))
def test_view_fields_presents(self):
response = self.client.get(reverse('add'))
self.assertEqual(response.status_code, 200)
self.assertTrue("Nom" in str(response.content))
self.assertTrue("Identifiant" in str(response.content))
self.assertFalse("Photo" in str(response.content))
self.assertTrue("Image" in str(response.content))
self.assertFalse("+ Ajouter une licorne" in str(response.content))
def test_redirects_to_etape_on_success(self):
#response = self.client.get(reverse('etape', args=[self.identifiant_existant]))
#self.assertEqual(response.status_code, 200)
with open(os.path.join("licornes/tests", "image-test.jpg"), "rb") as i:
response = self.client.post(reverse('add'), {"nom": "Bouou", "identifiant": self.identifiant_inexistant, "createur": self.u.id, "image": i})
self.assertRedirects(response, reverse('etape', args=[self.identifiant_inexistant]))
def test_nom_ne_peut_pas_etre_vide(self):
response = self.client.post(reverse('add'), {"nom": "", "identifiant": self.identifiant_inexistant, "createur": self.u.id})
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', 'nom', 'Ce champ est obligatoire.')
def test_identifiant_ne_peut_pas_etre_vide(self):
response = self.client.post(reverse('add'), {"nom": "UIOU", "identifiant": "", "createur": self.u.id})
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', 'identifiant', 'Ce champ est obligatoire.')
def test_champ_image_peut_etre_vide(self):
response = self.client.post(reverse('add'), {"nom": "Bouou", "identifiant": self.identifiant_inexistant, "createur": self.u.id, "image": ""})
self.assertRedirects(response, reverse('etape', args=[self.identifiant_inexistant]))
def test_champ_image_doit_etre_une_image(self):
with open(os.path.join("licornes/tests", "spam.txt"), "r") as i:
response = self.client.post(reverse('add'), {"nom": "Bouou", "identifiant": self.identifiant_inexistant, "createur": self.u.id, "image": i})
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', 'image', "Téléversez une image valide. Le fichier que vous avez transféré n'est pas une image ou bien est corrompu.")
class EtapeViewTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.identifiant_existant = "777"
cls.identifiant_inexistant = "666"
User.objects.create(username=f"kuala")
u = User.objects.get(username=f"kuala")
Licorne.objects.create(
nom=f'Licorne de {u}',
identifiant=f'{cls.identifiant_existant}',
createur=u,
)
cls.u = u
cls.l = Licorne.objects.get(identifiant=cls.identifiant_existant)
# On ne peut plus utiliser la version sans argument
def test_view_url_returns_404_if_no_licorne(self):
response = self.client.get('/licornes/etape/')
self.assertEqual(response.status_code, 404)
def test_view_url_by_name_404_if_no_licorne(self):
response = self.client.get(reverse('etape'))
self.assertEqual(response.status_code, 404)
# Version avec argument
def test_view_url_exists_at_desired_location(self):
response = self.client.get('/licornes/etape/%s/' % (self.identifiant_existant))
self.assertEqual(response.status_code, 200)
def test_view_url_accessible_by_name(self):
response = self.client.get(reverse('etape', args=[self.identifiant_existant]))
self.assertEqual(response.status_code, 200)
def test_view_uses_correct_template(self):
response = self.client.get(reverse('etape', args=[self.identifiant_existant]))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'licornes/etape_form.html')
def test_view_titre(self):
licorne = Licorne.objects.get(identifiant=self.identifiant_existant)
response = self.client.get(reverse('etape', args=[self.identifiant_existant]))
self.assertEqual(response.status_code, 200)
soup = BeautifulSoup(response.content, features="html.parser")
h1 = soup.h1.string
self.assertEqual(h1, "Ajouter une étape pour %s" % (licorne))
def test_view_fields_presents(self):
response = self.client.get(reverse('etape', args=[self.identifiant_existant]))
self.assertEqual(response.status_code, 200)
soup = BeautifulSoup(response.content, features="html.parser")
lbls = soup.find_all("label")
labels = []
for l in lbls:
labels.append(l["for"])
self.assertTrue("id_localisation" in labels)
self.assertFalse("id_current" in labels)
self.assertTrue("id_auteur" in labels)
self.assertTrue("id_media" in labels)
# Champ input hidden pour la licorne
inputs = soup.find_all("input")
licorne_in_hidden_field = False
for i in inputs:
if i["type"] == "hidden" and i["name"] == "licorne":
licorne_in_hidden_field = True
break
self.assertTrue(licorne_in_hidden_field)
def test_view_autocomplete_present(self):
response = self.client.get(reverse('etape', args=[self.identifiant_existant]))
self.assertEqual(response.status_code, 200)
soup = BeautifulSoup(response.content, features="html.parser")
scripts = soup.find_all("script")
autocomplete_in_src = False
#print(scripts)
for s in scripts:
if s.has_attr("src"):
src = s["src"]
if "autocomplete.js" in src:
autocomplete_in_src = True
#autocomplete_in_src = True
self.assertTrue(autocomplete_in_src)
def test_view_creer_si_inexistante(self):
# Si l'identifiant de licorne fourni ne correspond pas à une licorne
# existante, on propose de la créer
response = self.client.get(reverse('etape', args=[self.identifiant_inexistant]))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'licornes/creer.html')
soup = BeautifulSoup(response.content, features="html.parser")
t = soup.title
self.assertTrue("J'irai où tu iras" in t)
h1 = soup.h1.string
self.assertTrue("Licorne inexistante" in h1)
a = soup.find_all("a")
add_in_href = False
for l in a:
if "/add" in l["href"]:
add_in_href = True
self.assertTrue(add_in_href)
self.assertTrue(f"{self.identifiant_inexistant}" in str(response.content))
def test_form_etape_valeur_initiale_licorne(self):
response = self.client.get(reverse('etape', args=[self.identifiant_existant]))
self.assertEqual(response.status_code, 200)
licorne = Licorne.objects.get(identifiant=self.identifiant_existant)
self.assertEqual(response.context['form'].initial['licorne'], licorne)
def test_redirects_to_index_on_success(self):
#response = self.client.get(reverse('etape', args=[self.identifiant_existant]))
#self.assertEqual(response.status_code, 200)
response = self.client.post(reverse('etape', args=[self.l.identifiant]), {"localisation": "Pau, France", "auteur": self.u.id, "media": "Tagalok", "licorne": self.l.id})
self.assertRedirects(response, reverse('index'))
def test_form_invalid_licorne(self):
wrong_id = 78787897873
response = self.client.post(reverse('etape', args=[self.l.identifiant]), {"localisation": "Pau, France", "auteur": self.u.id, "media": "Tagalok", "licorne": wrong_id})
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', 'licorne', 'Sélectionnez un choix valide. Ce choix ne fait pas partie de ceux disponibles.')
def test_form_invalid_localisation(self):
response = self.client.post(reverse('etape', args=[self.l.identifiant]), {"localisation": "", "auteur": self.u.id, "media": "Tagalok", "licorne": self.l.id})
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', 'localisation', 'Ce champ est obligatoire.')
def test_form_invalid_auteur(self):
wrong_id = 78787897873
response = self.client.post(reverse('etape', args=[self.l.identifiant]), {"localisation": "Pau, France", "auteur": wrong_id, "media": "Tagalok", "licorne": self.l.id})
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', 'auteur', 'Sélectionnez un choix valide. Ce choix ne fait pas partie de ceux disponibles.')
# def test_form_invalid_renewal_date_future(self):
# login = self.client.login(username='testuser2', password='2HJ1vRV0Z&3iD')
# invalid_date_in_future = datetime.date.today() + datetime.timedelta(weeks=5)
# response = self.client.post(reverse('renew-book-librarian', kwargs={'pk': self.test_bookinstance1.pk}), {'renewal_date': invalid_date_in_future})
# self.assertEqual(response.status_code, 200)
# self.assertFormError(response, 'form', 'renewal_date', 'Invalid date - renewal more than 4 weeks ahead')
class LicorneViewTest(TestCase):
@classmethod
def setUpTestData(cls):
# On crée des utilisateurs et on leur attribue x licornes à chacun
number_of_creators = 2
number_of_licornes = 3
cls.total_licornes = number_of_creators * number_of_licornes
cls.licornes_de_test = []
for user_id in range(number_of_creators):
User.objects.create(username=f"utilisateur {user_id}")
u = User.objects.get(username=f"utilisateur {user_id}")
for licorne_id in range(number_of_licornes):
Licorne.objects.create(
nom=f'Licorne {licorne_id} de {user_id}',
identifiant=f'{user_id}-{licorne_id}',
createur=u,
image=f'{licorne_id}.png',
)
cls.licornes_de_test.append(Licorne.objects.latest("id"))
def test_view_url_exists_at_desired_location(self):
id_lic = self.licornes_de_test[3].id
response = self.client.get(f'/licornes/licorne/{id_lic}/')
self.assertEqual(response.status_code, 200)
def test_view_url_redirected_if_no_trailing_slash(self):
id_lic = self.licornes_de_test[3].id
response = self.client.get(f'/licornes/licorne/{id_lic}')
self.assertEqual(response.status_code, 301)
def test_view_url_accessible_by_name(self):
id_lic = self.licornes_de_test[3].id
response = self.client.get(reverse('licorne', args=[id_lic]))
self.assertEqual(response.status_code, 200)
def test_view_uses_correct_template(self):
id_lic = self.licornes_de_test[3].id
response = self.client.get(reverse('licorne', args=[id_lic]))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'licornes/licorne.html')
def test_licornes_are_present(self):
id_lic = self.licornes_de_test[3].id
response = self.client.get(reverse('licorne', args=[id_lic]))
self.assertEqual(response.status_code, 200)
self.assertTrue('meslicornes' in response.context)
#self.assertTrue(response.context['meslicornes'] == True)
self.assertTrue(len(response.context['meslicornes']) == self.total_licornes)
#print(str(response.content))
self.assertTrue("Licorne 0 de 0" in str(response.content))
def test_titres_present(self):
id_lic = self.licornes_de_test[3].id
response = self.client.get(reverse('licorne', args=[id_lic]))
self.assertEqual(response.status_code, 200)
self.assertTrue("Mes licornes" in str(response.content))
self.assertInHTML("Trajet", str(response.content))
def test_bouton_ajouter_present(self):
id_lic = self.licornes_de_test[3].id
response = self.client.get(reverse('licorne', args=[id_lic]))
self.assertEqual(response.status_code, 200)
self.assertTrue("+ Ajouter une licorne" in str(response.content))
def test_div_map_present(self):
id_lic = self.licornes_de_test[3].id
response = self.client.get(reverse('licorne', args=[id_lic]))
soup = BeautifulSoup(response.content, features="html.parser")
divs = soup.find_all("div")
div_map_in_divs = False
for d in divs:
if d.has_attr("id") and d["id"] == "map":
div_map_in_divs = True
self.assertTrue(div_map_in_divs)
def test_liens_vers_licornes_presents(self):
id_lic = self.licornes_de_test[3].id
response = self.client.get(reverse('licorne', args=[id_lic]))
soup = BeautifulSoup(response.content, features="html.parser")
a = soup.find_all("a")
lien_vers_1_dans_liens = False
for l in a:
if "licorne/1" in l["href"]:
lien_vers_1_dans_liens = True
break
self.assertTrue(lien_vers_1_dans_liens)
def test_une_licorne_est_active(self):
id_lic = self.licornes_de_test[3].id
response = self.client.get(reverse('licorne', args=[id_lic]))
soup = BeautifulSoup(response.content, features="html.parser")
a = soup.find_all("a")
active_in_a_class = 0
for l in a:
if l.has_attr("class"):
classes = l["class"]
if "active" in classes:
active_in_a_class += 1
self.assertTrue(active_in_a_class)
self.assertEqual(active_in_a_class, 1)
def test_licornes_ont_badge(self):
response = self.client.get(reverse('index'))
soup = BeautifulSoup(response.content, features="html.parser")
h2s = soup.find_all("h2")
badges_de_licornes = 0
for h2 in h2s:
if h2.span and "badge" in h2.span["class"]:
badges_de_licornes += 1
self.assertTrue(badges_de_licornes)
self.assertEqual(badges_de_licornes, self.total_licornes)
def test_licornes_ont_image(self):
response = self.client.get(reverse('index'))
soup = BeautifulSoup(response.content, features="html.parser")
lics = soup.find_all(attrs={"class": "list-group-item"})
lic_img = 0
bons_noms_dimages = 0
for l in lics:
numero = re.sub("Licorne ([0-9]+).*", "\\1", l.h2.text, re.M)[0:4].strip()
if l.img:
lic_img += 1
if os.path.basename(l.img["src"]) == f'{numero}.png':
bons_noms_dimages += 1
self.assertTrue(lic_img)
self.assertTrue(bons_noms_dimages)
self.assertEqual(lic_img, len(lics))
self.assertEqual(bons_noms_dimages, len(lics))
class MediaViewTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.identifiant_existant = "777"
cls.identifiant_inexistant = "666"
User.objects.create(username=f"kuala")
u = User.objects.get(username=f"kuala")
Licorne.objects.create(
nom=f'Licorne de {u}',
identifiant=f'{cls.identifiant_existant}',
createur=u,
)
l = Licorne.objects.get(nom=f'Licorne de {u}')
e0 = Etape.objects.create(licorne=l, auteur=u, localisation="Paris, France")
e0.save()
e1 = Etape.objects.create(licorne=l, auteur=u, localisation="Berlin, Allemagne")
e1.save()
e2 = Etape.objects.create(licorne=l, auteur=u, localisation="San Francisco")
e2.save()
# Version avec argument
def test_view_url_exists_at_desired_location(self):
e1 = Etape.objects.get(localisation="Berlin, Allemagne")
u = '/licornes/media/%s/' % (e1.id)
response = self.client.get(u)
self.assertEqual(response.status_code, 200)
def test_view_url_accessible_by_name(self):
e1 = Etape.objects.get(localisation="Berlin, Allemagne")
response = self.client.get(reverse('media', args=[e1.id]))
self.assertEqual(response.status_code, 200)
def test_404_if_nonexistant_id(self):
response = self.client.get(reverse('media', args=[11111111]))
self.assertEqual(response.status_code, 404)
| 44.345382
| 176
| 0.65509
| 2,756
| 22,084
| 5.057692
| 0.113933
| 0.03874
| 0.068441
| 0.064782
| 0.806801
| 0.791448
| 0.767056
| 0.739723
| 0.703279
| 0.688141
| 0
| 0.015106
| 0.223601
| 22,084
| 497
| 177
| 44.434608
| 0.797854
| 0.06104
| 0
| 0.648241
| 0
| 0.002513
| 0.119411
| 0.013279
| 0
| 0
| 0
| 0
| 0.243719
| 1
| 0.138191
| false
| 0
| 0.022613
| 0
| 0.173367
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
39798e641222dde2d6270db0d552e617d0712338
| 31
|
py
|
Python
|
lltk/corpus/internet_archive/__init__.py
|
literarylab/lltk
|
0e516d7fa0978c1a3bd2cb7636f0089772e515ec
|
[
"MIT"
] | 5
|
2021-03-15T21:05:06.000Z
|
2022-03-04T10:52:16.000Z
|
lltk/corpus/internet_archive/__init__.py
|
literarylab/lltk
|
0e516d7fa0978c1a3bd2cb7636f0089772e515ec
|
[
"MIT"
] | 1
|
2021-05-04T17:01:47.000Z
|
2021-05-10T15:14:55.000Z
|
lltk/corpus/internet_archive/__init__.py
|
literarylab/lltk
|
0e516d7fa0978c1a3bd2cb7636f0089772e515ec
|
[
"MIT"
] | null | null | null |
from .internet_archive import *
| 31
| 31
| 0.83871
| 4
| 31
| 6.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 31
| 1
| 31
| 31
| 0.892857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
39e7e3d16726a910820596885a270fba06ddec80
| 281
|
py
|
Python
|
challanges/fifo_animal_shelter/conftest.py
|
Patricia888/data-structures-and-algorithms
|
8963acf857b9f7069eeeea2884b41376986c3d7c
|
[
"MIT"
] | null | null | null |
challanges/fifo_animal_shelter/conftest.py
|
Patricia888/data-structures-and-algorithms
|
8963acf857b9f7069eeeea2884b41376986c3d7c
|
[
"MIT"
] | null | null | null |
challanges/fifo_animal_shelter/conftest.py
|
Patricia888/data-structures-and-algorithms
|
8963acf857b9f7069eeeea2884b41376986c3d7c
|
[
"MIT"
] | null | null | null |
import pytest
from . import AnimalShelter
@pytest.fixture
def empty_queue():
return AnimalShelter()
@pytest.fixture
def short_queue():
return AnimalShelter(['dog', 'cat', 'dog', 'cat'])
@pytest.fixture
def ddc_queue():
return AnimalShelter(['dog', 'dog', 'cat'])
| 15.611111
| 54
| 0.683274
| 33
| 281
| 5.727273
| 0.393939
| 0.206349
| 0.253968
| 0.306878
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156584
| 281
| 17
| 55
| 16.529412
| 0.797468
| 0
| 0
| 0.272727
| 0
| 0
| 0.074733
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.272727
| true
| 0
| 0.181818
| 0.272727
| 0.727273
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
|
0
| 6
|
f2d8865dab1292d8d1e305f92362acac52856ec9
| 145
|
py
|
Python
|
mogpe/mixture_of_experts/__init__.py
|
aidanscannell/mogpe
|
25a9af473d73d6fa35bd060bee0eb2c372b995e5
|
[
"Apache-2.0"
] | 11
|
2021-04-01T02:40:21.000Z
|
2022-01-31T16:14:44.000Z
|
mogpe/mixture_of_experts/__init__.py
|
aidanscannell/mogpe
|
25a9af473d73d6fa35bd060bee0eb2c372b995e5
|
[
"Apache-2.0"
] | null | null | null |
mogpe/mixture_of_experts/__init__.py
|
aidanscannell/mogpe
|
25a9af473d73d6fa35bd060bee0eb2c372b995e5
|
[
"Apache-2.0"
] | 3
|
2021-04-04T02:45:34.000Z
|
2021-11-22T23:48:28.000Z
|
#!/usr/bin/env python3
from mogpe.mixture_of_experts.base import MixtureOfExperts
from mogpe.mixture_of_experts.svgp import MixtureOfSVGPExperts
| 36.25
| 62
| 0.868966
| 20
| 145
| 6.1
| 0.7
| 0.147541
| 0.262295
| 0.295082
| 0.409836
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007407
| 0.068966
| 145
| 3
| 63
| 48.333333
| 0.896296
| 0.144828
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
8406f84d465e80185ed69e2e1d6e5d6565c45336
| 131
|
py
|
Python
|
mllearn/alg_adapt/__init__.py
|
Lxinyuelxy/multi-label-learn
|
ab347e9c9ccac1503f22c7b76e0b3e9a4e8214da
|
[
"MIT"
] | 4
|
2018-11-19T13:34:53.000Z
|
2020-01-11T11:58:13.000Z
|
mllearn/alg_adapt/__init__.py
|
Lxinyuelxy/multi-label-learn
|
ab347e9c9ccac1503f22c7b76e0b3e9a4e8214da
|
[
"MIT"
] | null | null | null |
mllearn/alg_adapt/__init__.py
|
Lxinyuelxy/multi-label-learn
|
ab347e9c9ccac1503f22c7b76e0b3e9a4e8214da
|
[
"MIT"
] | 3
|
2019-04-14T18:13:33.000Z
|
2021-04-05T14:45:56.000Z
|
from mllearn.alg_adapt.mlknn import MLKNN
from mllearn.alg_adapt.mldt import MLDecisionTree
__all__ = ['MLKNN', 'MLDecisionTree']
| 32.75
| 49
| 0.80916
| 17
| 131
| 5.882353
| 0.529412
| 0.22
| 0.28
| 0.38
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.099237
| 131
| 4
| 50
| 32.75
| 0.847458
| 0
| 0
| 0
| 0
| 0
| 0.143939
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
8429c53f7fd28307a11ce122a67b429fe703dee1
| 10,777
|
py
|
Python
|
src/main/python/view/plot_utils.py
|
gwdgithubnom/ox-patient
|
cddf4fe381cb4506db8e0d62803dd2044cf7ad92
|
[
"MIT"
] | null | null | null |
src/main/python/view/plot_utils.py
|
gwdgithubnom/ox-patient
|
cddf4fe381cb4506db8e0d62803dd2044cf7ad92
|
[
"MIT"
] | null | null | null |
src/main/python/view/plot_utils.py
|
gwdgithubnom/ox-patient
|
cddf4fe381cb4506db8e0d62803dd2044cf7ad92
|
[
"MIT"
] | 1
|
2021-04-14T00:45:38.000Z
|
2021-04-14T00:45:38.000Z
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from context import resource_manager
import pandas
import numpy
from tools import logger
import numpy as np
import matplotlib.pyplot as plt
log = logger.getLogger()
def plot_image_file(img):
plt.imshow(img)
plt.show()
def plot_image(narray, w='', h=''):
log.info("plot image array:" + str(narray.shape))
if w is not '':
narray = narray.reshape(w, h)
plt.imshow(narray)
plt.show()
def plot_rho_delta(rho, delta):
'''
Plot scatter diagram for rho-delta points
Args:
rho : rho list
delta : delta list
'''
log.info("PLOT: rho-delta plot")
plot_scatter_diagram(0, rho[1:], delta[1:], x_label='rho', y_label='delta', title='rho-delta')
# def plot_scatter_diagram(which_fig, x, y, x_label='x', y_label='y', title='title', style_list=None):
# '''
# Plot scatter diagram
#
# Args:
# which_fig : which sub plot
# x : x array
# y : y array
# x_label : label of x pixel
# y_label : label of y pixel
# title : title of the plot
# '''
# styles =
# assert len(x) == len(y)
# if style_list != None:
# assert len(x) == len(style_list) and len(styles) >= len(set(style_list))
# plt.figure(which_fig)
# plt.clf()
# if style_list == None:
# plt.plot(x, y, styles[0])
# else:
# clses = set(style_list)
# xs, ys = {}, {}
# for i in range(len(x)):
# try:
# xs[style_list[i]].append(x[i])
# ys[style_list[i]].append(y[i])
# except KeyError:
# xs[style_list[i]] = [x[i]]
# ys[style_list[i]] = [y[i]]
# added = 1
# for idx, cls in enumerate(clses):
# if cls == -1:
# style = styles[0]
# added = 0
# else:
# style = styles[idx + added]
# plt.plot(xs[cls], ys[cls], style)
# plt.title(title)
# plt.xlabel(x_label)
# plt.ylabel(y_label)
# plt.ylim(bottom=0)
# plt.show()
def plot_dataframe_scatter_diagram(which_fig, data, x_label='x', y_label='y', title='title', label=None):
styles = ['k.', 'g.', 'r.', 'c.', 'm.', 'y.', 'b.']
linestyles = ['-.', '--', 'None', '-', ':']
stylesMarker = markers = ['.', # point
',', # pixel
'o', # circle
'v', # triangle down
'^', # triangle up
'<', # triangle_left
'>', # triangle_right
'1', # tri_down
'2', # tri_up
'3', # tri_left
'4', # tri_right
'8', # octagon
's', # square
'p', # pentagon
'*', # star
'h', # hexagon1
'H', # hexagon2
'+', # plus
'x', # x
'D', # diamond
'd', # thin_diamond
'|', # vline
]
# styles = []
stylesColors = pandas.read_csv(
resource_manager.Properties.getDefaultDataFold() + "view" + resource_manager.getSeparator() + "color.csv").ix[:,
2]
plt.figure(which_fig)
plt.clf()
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.ylim(bottom=0)
plt.legend(loc='upper left')
plt.show()
def plot_scatter_diagram(which_fig, x, y, x_label='x', y_label='y', title='title', label=None):
'''
Plot scatter diagram
Args:
which_fig : which sub plot
x : x array
y : y array
x_label : label of x pixel
y_label : label of y pixel
title : title of the plot
'''
styles = ['k.', 'g.', 'r.', 'c.', 'm.', 'y.', 'b.']
linestyles = ['-.', '--', 'None', '-', ':']
stylesMarker = pandas.read_csv(
resource_manager.Properties.getDefaultDataFold() + "view" + resource_manager.getSeparator() + "style.csv").ix[:,
3]
stylesColors = pandas.read_csv(
resource_manager.Properties.getDefaultDataFold() + "view" + resource_manager.getSeparator() + "style.csv").ix[:,
2]
assert len(x) == len(y)
if label != None:
assert len(x) == len(label) # and len(stylesMarker) >= len(set(label))
plt.figure(which_fig)
plt.clf()
if label == None:
plt.plot(x, y, styles[0])
else:
l = len(label)
labelSet = set(label)
k = 0
for i in labelSet:
xs = []
ys = []
for j in range(l):
if i == label[j]:
xs.append(x[j])
ys.append(y[j])
k = k + 1
try:
plt.scatter(xs, ys, c=stylesColors[k].strip(), marker=r"$ {} $".format(str(i)),label=i)
except:
log.fatal(stylesMarker)
log.fatal(stylesColors)
log.fatal(stylesMarker[k])
log.fatal(stylesColors[k])
plt.scatter(xs, ys, c=stylesColors[k].strip(), marker=r"$ {} $".format(str(i)),label=i)
exit()
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.ylim(bottom=0)
# plt.legend(loc='upper left')
plt.show()
def save_scatter_diagram(which_fig, x, y, x_label='x', y_label='y', title='title', label=None,
path=resource_manager.Properties.getDefaultDataFold() + "result" + resource_manager.getSeparator() + "result.png"):
'''
Plot scatter diagram
Args:
which_fig : which sub plot
x : x array
y : y array
x_label : label of x pixel
y_label : label of y pixel
title : title of the plot
'''
styles = ['k.', 'g.', 'r.', 'c.', 'm.', 'y.', 'b.']
linestyles = ['-.', '--', 'None', '-', ':']
stylesMarker = pandas.read_csv(
resource_manager.Properties.getDefaultDataFold() + "view" + resource_manager.getSeparator() + "style.csv").ix[:,
3]
stylesColors = pandas.read_csv(
resource_manager.Properties.getDefaultDataFold() + "view" + resource_manager.getSeparator() + "style.csv").ix[:,
2]
assert len(x) == len(y)
if label != None:
assert len(x) == len(label) # and len(stylesMarker) >= len(set(label))
plt.figure(which_fig)
plt.clf()
if label == None:
plt.plot(x, y, styles[0])
else:
l = len(label)
labelSet = set(label)
k = 0
for i in labelSet:
xs = []
ys = []
for j in range(l):
if i == label[j]:
xs.append(x[j])
ys.append(y[j])
k = k + 1
try:
if k<=7:
plt.scatter(xs, ys, c=stylesColors[k].strip(), marker=stylesMarker[k],label=i)
else:
plt.scatter(xs, ys, c=stylesColors[k%100].strip(), marker=r"$ {} $".format(str(i)),label=i)
except:
log.fatal(stylesMarker)
log.fatal(stylesColors)
log.fatal(stylesMarker[k])
log.fatal(stylesColors[k])
plt.scatter(xs, ys, c=stylesColors[k].strip(), marker=r"$ {} $".format(str(i)),label=i)
exit()
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.ylim(bottom=0)
plt.savefig(path,dpi=900)
#plt.savefig(path)
plt.close()
def save_all_scatter_diagram(which_fig, x, y, x_label='x', y_label='y', title='title', label=None,
path=resource_manager.Properties.getDefaultDataFold() + "result" + resource_manager.getSeparator() + "result.png"):
'''
Plot scatter diagram
Args:
which_fig : which sub plot
x : x array
y : y array
x_label : label of x pixel
y_label : label of y pixel
title : title of the plot
'''
styles = ['k.', 'g.', 'r.', 'c.', 'm.', 'y.', 'b.']
linestyles = ['-.', '--', 'None', '-', ':']
stylesMarker = pandas.read_csv(
resource_manager.Properties.getDefaultDataFold() + "view" + resource_manager.getSeparator() + "style.csv").ix[:,
3]
stylesColors = pandas.read_csv(
resource_manager.Properties.getDefaultDataFold() + "view" + resource_manager.getSeparator() + "style.csv").ix[:,
2]
assert len(x) == len(y)
if label != None:
assert len(x) == len(label) # and len(stylesMarker) >= len(set(label))
plt.figure(which_fig)
plt.clf()
if label == None:
plt.plot(x, y, styles[0])
else:
l = len(label)
labelSet = set(label)
k = 0
for i in labelSet:
xs = []
ys = []
for j in range(l):
if i == label[j]:
xs.append(x[j])
ys.append(y[j])
k = k + 1
try:
# if k<=7:
# plt.scatter(xs, ys, c=stylesColors[k].strip(), marker=stylesMarker[k],label=i)
# else:
plt.scatter(xs, ys, c=stylesColors[k%100].strip(), marker=r"$ {} $".format(str(i)),label=i)
except:
log.fatal(stylesMarker)
log.fatal(stylesColors)
log.fatal(stylesMarker[k])
log.fatal(stylesColors[k])
plt.scatter(xs, ys, c=stylesColors[k].strip(), marker=r"$ {} $".format(str(i)),label=i)
exit()
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.ylim(bottom=0)
# plt.legend(loc='upper left')
plt.savefig(path+".jpg",dpi=900)
#plt.savefig(path+".jpg")
plt.close()
if __name__ == '__main__':
x = np.array([1, 2, 3, 4, 5, 6, 7, 8])
y = np.array([2, 3, 4, 5, 6, 2, 4, 8])
cls = np.array([1, 4, 2, 3, 5, 1, 1, 7])
plot_scatter_diagram(0, x, y, label=cls)
| 34.10443
| 141
| 0.461538
| 1,216
| 10,777
| 3.995066
| 0.133224
| 0.058666
| 0.033347
| 0.079662
| 0.771099
| 0.757308
| 0.743516
| 0.73837
| 0.733429
| 0.733429
| 0
| 0.011552
| 0.389533
| 10,777
| 315
| 142
| 34.212698
| 0.726858
| 0.240512
| 0
| 0.716495
| 0
| 0
| 0.049895
| 0
| 0
| 0
| 0
| 0
| 0.030928
| 1
| 0.036082
| false
| 0
| 0.030928
| 0
| 0.06701
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ffc07014e07d2061a2e2e288c79e9d7fd7b2fa0f
| 76
|
py
|
Python
|
kbdiffdi/utilities/__init__.py
|
subond/kbdi-ffdi
|
f0f05afbfa43ef62dedc92a5ca1f4ce2ca17b4b3
|
[
"MIT"
] | null | null | null |
kbdiffdi/utilities/__init__.py
|
subond/kbdi-ffdi
|
f0f05afbfa43ef62dedc92a5ca1f4ce2ca17b4b3
|
[
"MIT"
] | null | null | null |
kbdiffdi/utilities/__init__.py
|
subond/kbdi-ffdi
|
f0f05afbfa43ef62dedc92a5ca1f4ce2ca17b4b3
|
[
"MIT"
] | 1
|
2021-12-04T15:39:30.000Z
|
2021-12-04T15:39:30.000Z
|
from .conversion import *
from .input_output import *
from .plotter import *
| 25.333333
| 27
| 0.776316
| 10
| 76
| 5.8
| 0.6
| 0.344828
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144737
| 76
| 3
| 28
| 25.333333
| 0.892308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ffe170f1b11d10909b63117569cdf732e9b54b5a
| 26
|
py
|
Python
|
src/lib/steam/__init__.py
|
ueffel/keypirinha-allmygames
|
3ef8f641cec9d2165fbafcc7224f65d3fab1089a
|
[
"MIT"
] | 9
|
2020-05-31T11:13:52.000Z
|
2021-09-23T14:26:42.000Z
|
src/lib/steam/__init__.py
|
ueffel/keypirinha-allmygames
|
3ef8f641cec9d2165fbafcc7224f65d3fab1089a
|
[
"MIT"
] | 9
|
2020-05-31T11:55:10.000Z
|
2022-01-22T11:22:55.000Z
|
src/lib/steam/__init__.py
|
ueffel/keypirinha-allmygames
|
3ef8f641cec9d2165fbafcc7224f65d3fab1089a
|
[
"MIT"
] | 1
|
2020-09-11T17:40:51.000Z
|
2020-09-11T17:40:51.000Z
|
from .steam import Steam
| 8.666667
| 24
| 0.769231
| 4
| 26
| 5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.192308
| 26
| 2
| 25
| 13
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f23761e228ab837847a5c4c000c5ec7d93553ece
| 8,309
|
py
|
Python
|
georiviere/river/tests/test_fields.py
|
georiviere/Georiviere-admin
|
4ac532f84a7a8fef3e01384fad63e8e288d397c0
|
[
"BSD-2-Clause"
] | 7
|
2021-11-05T14:52:25.000Z
|
2022-03-24T21:18:02.000Z
|
georiviere/river/tests/test_fields.py
|
georiviere/Georiviere-admin
|
4ac532f84a7a8fef3e01384fad63e8e288d397c0
|
[
"BSD-2-Clause"
] | 57
|
2021-11-02T10:27:34.000Z
|
2022-03-31T14:08:32.000Z
|
georiviere/river/tests/test_fields.py
|
georiviere/Georiviere-admin
|
4ac532f84a7a8fef3e01384fad63e8e288d397c0
|
[
"BSD-2-Clause"
] | 1
|
2021-12-05T14:55:42.000Z
|
2021-12-05T14:55:42.000Z
|
from django.test import TestCase
from django.core.exceptions import ValidationError
from django.contrib.gis.geos import GEOSGeometry, LineString, Point, Polygon
from django.conf import settings
from georiviere.river.fields import SnappedGeometryField, SnappedLineStringField
from georiviere.river.tests.factories import StreamFactory
class SnappedLineStringFieldTest(TestCase):
def setUp(self):
self.f = SnappedLineStringField()
self.wktgeom = ('LINESTRING(-0.77054223313507 -5.32573853776343,'
'-0.168053647782867 -4.66595028627023)')
self.geojson = ('{"type":"LineString","coordinates":['
' [-0.77054223313507,-5.32573853776343],'
' [-0.168053647782867,-4.66595028627023]]}')
def test_dict_with_geom_is_mandatory(self):
self.assertRaises(ValidationError, self.f.clean,
'LINESTRING(0 0, 1 0)')
self.assertRaises(ValidationError, self.f.clean,
'{"geo": "LINESTRING(0 0, 1 0)"}')
def test_snaplist_is_mandatory(self):
self.assertRaises(ValidationError, self.f.clean,
'{"geom": "LINESTRING(0 0, 1 0)"}')
def test_snaplist_must_have_same_number_of_vertices(self):
self.assertRaises(ValidationError, self.f.clean,
'{"geom": "LINESTRING(0 0, 1 0)", "snap": [null]}')
def test_geom_cannot_be_invalid_wkt(self):
self.assertRaises(ValidationError, self.f.clean,
'{"geom": "LINEPPRING(0 0, 1 0)", '
'"snap": [null, null]}')
def test_geom_can_be_geojson(self):
geojsonstr = self.geojson.replace('"', '\\"')
geom = self.f.clean('{"geom": "%s", '
' "snap": [null, null]}' % geojsonstr)
self.assertTrue(geom.equals_exact(
LineString((100000, 100000), (200000, 200000),
srid=settings.SRID), 0.1))
def test_geom_is_not_snapped_if_snap_is_null(self):
value = '{"geom": "%s", "snap": [null, null]}' % self.wktgeom
self.assertTrue(self.f.clean(value).equals_exact(
LineString((100000, 100000), (200000, 200000),
srid=settings.SRID), 0.1))
def test_geom_is_snapped_if_path_pk_is_provided(self):
geom_4326 = GEOSGeometry(self.wktgeom, srid=4326).transform(2154, clone=True)
last_coords = geom_4326[-1]
stream = StreamFactory.create()
coords_stream = [coord for coord in stream.geom.coords]
coords_stream.append(last_coords)
stream.geom = LineString(coords_stream, srid=2154)
stream.save()
value = '{"geom": "%s", "snap": [null, %s]}' % (self.wktgeom, stream.pk)
self.assertTrue(self.f.clean(value).equals_exact(
LineString((100000, 100000), (200000, 200000),
srid=settings.SRID), 0.1))
class SnappedGeometryFieldTest(TestCase):
def setUp(self):
self.f = SnappedGeometryField()
self.wktgeom_point = 'POINT(-0.77054223313507 -5.32573853776343)'
self.wktgeom_linestring = ('LINESTRING(-0.77054223313507 -5.32573853776343,'
'-0.168053647782867 -4.66595028627023)')
self.wktgeom_polygon = ('POLYGON((-0.77054223313507 -5.32573853776343, -0.57054223313507 -3.32573853776343,'
'-0.168053647782867 -4.66595028627023, -0.77054223313507 -5.32573853776343))')
self.geojson_linestring = ('{"type":"LineString","coordinates":['
' [-0.77054223313507,-5.32573853776343],'
' [-0.168053647782867,-4.66595028627023]]}')
def test_snaplist_must_have_same_number_of_vertices_linestring(self):
self.assertRaises(ValidationError, self.f.clean,
'{"geom": "LINESTRING(0 0, 1 0)", "snap": [null]}')
def test_snaplist_must_have_same_number_of_vertices_polygon(self):
self.assertRaises(ValidationError, self.f.clean,
'{"geom": "POLYGON((0 0, 1 0, 1 2, 0 0))", "snap": [null, null]}')
def test_snaplist_must_have_same_number_of_vertices_point(self):
self.assertRaises(ValidationError, self.f.clean,
'{"geom": "POINT(0 0)", "snap": []}')
def test_linestring_cannot_be_invalid_wkt(self):
self.assertRaises(ValidationError, self.f.clean,
'{"geom": "LINEPPRING(0 0, 1 0)", '
'"snap": [null, null]}')
def test_linestring_is_not_snapped_if_snap_is_null(self):
value = '{"geom": "%s", "snap": [null, null]}' % self.wktgeom_linestring
self.assertTrue(self.f.clean(value).equals_exact(
LineString((100000, 100000), (200000, 200000),
srid=settings.SRID), 0.1))
def test_polygon_is_not_snapped_if_snap_is_null(self):
value = '{"geom": "%s", "snap": [null, null, null]}' % self.wktgeom_polygon
self.assertTrue(self.f.clean(value).equals_exact(
Polygon(((100000, 100000), (145961.3334090858, 411410.4491531737),
(200000, 200000), (100000, 100000)),
srid=settings.SRID), 0.1))
def test_point_is_not_snapped_if_snap_is_null(self):
value = '{"geom": "%s", "snap": [null]}' % self.wktgeom_point
self.assertTrue(self.f.clean(value).equals_exact(
Point(100000, 100000, srid=settings.SRID), 0.1))
def test_linestring_is_snapped_if_path_pk_is_provided(self):
geom_4326 = GEOSGeometry(self.wktgeom_linestring, srid=4326).transform(2154, clone=True)
last_coords = geom_4326[-1]
stream = StreamFactory.create()
coords_stream = [coord for coord in stream.geom.coords]
coords_stream.append(last_coords)
stream.geom = LineString(coords_stream, srid=2154)
stream.save()
value = '{"geom": "%s", "snap": [null, %s]}' % (self.wktgeom_linestring, stream.pk)
self.assertTrue(self.f.clean(value).equals_exact(
LineString((100000, 100000), (200000, 200000),
srid=settings.SRID), 0.1))
def test_polygon_is_snapped_if_path_pk_is_provided(self):
"""
Stream's linestring is a random linestring
0°
+
\
+
+
|\
| \
| \
+--+
1°
+
\
+
/
+ /
|| /
| | /
| |/
+--+x
2°
+
\
+
/
+ /
|| /
| | /
| |/
+--+ snapped
"""
# 0°
geom_4326 = GEOSGeometry(self.wktgeom_polygon, srid=4326).transform(2154, clone=True)
last_coords = geom_4326.coords[0][-2]
stream = StreamFactory.create()
coords_stream = [coord for coord in stream.geom.coords]
coords_stream.append(last_coords) # 1°
stream.geom = LineString(coords_stream, srid=2154)
stream.save()
value = '{"geom": "%s", "snap": [null, null, %s]}' % (self.wktgeom_polygon, stream.pk)
# 2° Snap x on linestring
self.assertTrue(self.f.clean(value).equals_exact(
Polygon(((100000, 100000), (145961.3334090858, 411410.4491531737),
(200000, 200000), (100000, 100000)),
srid=settings.SRID), 0.1))
def test_point_is_snapped_if_path_pk_is_provided(self):
geom_4326 = GEOSGeometry(self.wktgeom_point, srid=4326).transform(2154, clone=True)
last_coords = geom_4326.coords
stream = StreamFactory.create()
coords_stream = [coord for coord in stream.geom.coords]
coords_stream.append(last_coords) # 1°
stream.geom = LineString(coords_stream, srid=2154)
stream.save()
value = '{"geom": "%s", "snap": [%s]}' % (self.wktgeom_point, stream.pk)
# 2° Snap x on linestring
self.assertTrue(self.f.clean(value).equals_exact(
Point(100000, 100000, srid=settings.SRID), 0.1))
| 42.610256
| 116
| 0.573234
| 892
| 8,309
| 5.161435
| 0.122197
| 0.02172
| 0.039096
| 0.068419
| 0.814509
| 0.769765
| 0.747176
| 0.747176
| 0.713293
| 0.677889
| 0
| 0.140085
| 0.292935
| 8,309
| 194
| 117
| 42.829897
| 0.642213
| 0.052112
| 0
| 0.554688
| 0
| 0.007813
| 0.170435
| 0.043524
| 0
| 0
| 0
| 0
| 0.140625
| 1
| 0.148438
| false
| 0
| 0.046875
| 0
| 0.210938
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f23828e13821d1c9e65df2bad561d209451fc90f
| 639
|
py
|
Python
|
examples/analyze.py
|
heinrichreimer/targer-api
|
b1d5c9369a4e65bdf94fdd81da0f1a92e2d4dff6
|
[
"MIT"
] | 1
|
2022-01-27T15:13:41.000Z
|
2022-01-27T15:13:41.000Z
|
examples/analyze.py
|
heinrichreimer/targer-api
|
b1d5c9369a4e65bdf94fdd81da0f1a92e2d4dff6
|
[
"MIT"
] | 2
|
2022-01-24T14:32:44.000Z
|
2022-01-25T11:01:55.000Z
|
examples/analyze.py
|
heinrichreimer/targer-api
|
b1d5c9369a4e65bdf94fdd81da0f1a92e2d4dff6
|
[
"MIT"
] | null | null | null |
from targer_api.api import analyze_text
arguments = analyze_text(
"Academic freedom is not absolute. "
"All major Canadian universities are now publicly funded "
"but maintain institutional autonomy, "
"with the ability to decide on admission, tuition and governance."
)
print(arguments)
arguments_per_model = analyze_text(
"Academic freedom is not absolute. "
"All major Canadian universities are now publicly funded "
"but maintain institutional autonomy, "
"with the ability to decide on admission, tuition and governance.",
{"tag-ibm-fasttext", "tag-essays-fasttext"}
)
print(arguments_per_model)
| 33.631579
| 71
| 0.748044
| 81
| 639
| 5.802469
| 0.506173
| 0.070213
| 0.080851
| 0.110638
| 0.731915
| 0.731915
| 0.731915
| 0.731915
| 0.731915
| 0.731915
| 0
| 0
| 0.178404
| 639
| 18
| 72
| 35.5
| 0.895238
| 0
| 0
| 0.375
| 0
| 0
| 0.652582
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.0625
| 0
| 0.0625
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f247df3026d20ff012b9d9ef55547fda9639966c
| 77
|
py
|
Python
|
reactorch/__init__.py
|
WeilunQiu/reactorch
|
a7fcf375de76eff2089879706d4a9ec548f95049
|
[
"MIT"
] | null | null | null |
reactorch/__init__.py
|
WeilunQiu/reactorch
|
a7fcf375de76eff2089879706d4a9ec548f95049
|
[
"MIT"
] | null | null | null |
reactorch/__init__.py
|
WeilunQiu/reactorch
|
a7fcf375de76eff2089879706d4a9ec548f95049
|
[
"MIT"
] | null | null | null |
from . import import_kinetics
from . import kinetics
from .solution import *
| 19.25
| 29
| 0.792208
| 10
| 77
| 6
| 0.4
| 0.333333
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155844
| 77
| 3
| 30
| 25.666667
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
f2a0f0827ca31edb7a2ad346a83d68d695a30579
| 51,234
|
py
|
Python
|
tests/test_pytsmp.py
|
kithomak/pytsmp
|
15d58a39e016100fb44cdcc9f6115fa7736eb2bb
|
[
"MIT"
] | null | null | null |
tests/test_pytsmp.py
|
kithomak/pytsmp
|
15d58a39e016100fb44cdcc9f6115fa7736eb2bb
|
[
"MIT"
] | 1
|
2020-01-14T19:38:41.000Z
|
2020-01-14T19:38:41.000Z
|
tests/test_pytsmp.py
|
kithomak/pytsmp
|
15d58a39e016100fb44cdcc9f6115fa7736eb2bb
|
[
"MIT"
] | null | null | null |
import pytest
import numpy as np
from pytsmp import pytsmp
from tests import helpers
class TestMatrixProfile:
def test_MatrixProfile_init(self):
with pytest.raises(TypeError):
t = np.random.rand(1000)
mp = pytsmp.MatrixProfile(t, window_size=100, verbose=False)
class TestSTAMP:
def test_STAMP_init_incorrect_window_size1(self):
with pytest.raises(ValueError) as excinfo:
t = np.random.rand(1000)
mp = pytsmp.STAMP(t, window_size=0, verbose=False)
assert str(excinfo.value) == "Incorrect window size specified."
def test_STAMP_init_incorrect_window_size2(self):
with pytest.raises(ValueError) as excinfo:
t = np.random.rand(1000)
mp = pytsmp.STAMP(t, window_size=2.3, verbose=False)
assert str(excinfo.value) == "Incorrect window size specified."
def test_STAMP_init_incorrect_window_size3(self):
with pytest.raises(ValueError) as excinfo:
t1 = np.random.rand(1000)
t2 = np.random.rand(500)
mp = pytsmp.STAMP(t1, t2, window_size=501, verbose=False)
assert str(excinfo.value) == "Incorrect window size specified."
def test_STAMP_init_incorrect_exclusion_zone(self):
with pytest.raises(ValueError) as excinfo:
t = np.random.rand(1000)
mp = pytsmp.STAMP(t, window_size=10, exclusion_zone=-1, verbose=False)
assert str(excinfo.value) == "Exclusion zone must be non-negative."
def test_STAMP_init_incorrect_s_size1(self):
with pytest.raises(ValueError) as excinfo:
t = np.random.rand(1000)
mp = pytsmp.STAMP(t, window_size=10, s_size=0, verbose=False)
assert str(excinfo.value) == "s_size must be between 0 and 1."
def test_STAMP_init_incorrect_s_size2(self):
with pytest.raises(ValueError) as excinfo:
t = np.random.rand(1000)
mp = pytsmp.STAMP(t, window_size=10, s_size=1.2, verbose=False)
assert str(excinfo.value) == "s_size must be between 0 and 1."
def test_STAMP_is_anytime(self):
t = np.random.rand(1000)
mp = pytsmp.STAMP(t, window_size=10, s_size=1, verbose=True) # for coverage purpose
is_anytime = mp.is_anytime
assert is_anytime == True, "STAMP_is_anytime: STAMP should be an anytime algorithm."
def test_STAMP_init_check_mutation(self):
t1 = np.random.rand(100)
t2 = np.random.rand(100)
w = 10
mp = pytsmp.STAMP(t1, t2, window_size=w, exclusion_zone=0, verbose=False)
t1[0] = -10
t2[0] = -10
assert t1[0] != mp.ts1[0], "STAMP_init_check_mutation: Matrix profile init should leave original array intact."
assert t2[0] != mp.ts2[0], "STAMP_init_check_mutation: Matrix profile init should leave original array intact."
def test_STAMP_get_profiles_check_length(self):
n = np.random.randint(100, 1000)
m = np.random.randint(100, 1000)
t1 = np.random.rand(n)
t2 = np.random.rand(m)
w = np.random.randint(10, min(n, m))
mp = pytsmp.STAMP(t1, t2, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
assert len(mpro) == n - w + 1, "STAMP_get_profile_check_length: Matrix profile should have correct length"
assert len(ipro) == n - w + 1, "STAMP_get_profile_check_length: Index profile should have correct length"
def test_STAMP_get_profiles_check_mutation(self):
t = np.random.rand(1000)
w = 10
mp = pytsmp.STAMP(t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
mpro[0] = -1
ipro[0] = -1
mpro2, ipro2 = mp.get_profiles()
assert mpro[0] != mpro2[0], "STAMP_get_profile_check_mutation: " \
"Get profile should return a copy of the matrix profile, not the internal one."
assert ipro[0] != ipro2[0], "STAMP_get_profile_check_mutation: " \
"Get profile should return a copy of the index profile, not the internal one."
def test_STAMP_compute_matrix_profile_sanity(self):
t = np.random.rand(1000)
w = 10
mp = pytsmp.STAMP(t, t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
assert np.allclose(mpro, np.zeros(len(t) - w + 1), atol=1e-5), "STAMP_compute_matrix_profile_sanity: " \
"Should compute the matrix profile correctly in the trivial case."
assert np.array_equal(ipro, np.arange(len(t) - w + 1)), "STAMP_compute_matrix_profile_sanity: " \
"Should compute the index profile correctly in the trivial case."
def test_STAMP_compute_matrix_profile_same_random_data(self):
n = np.random.randint(100, 200) # anything larger will be too time-consuming
t = np.random.rand(n)
w = np.random.randint(10, n // 4)
mp = pytsmp.STAMP(t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
mp_naive, ip_naive = helpers.naive_matrix_profile(t, window_size=w)
assert np.allclose(mpro, mp_naive), "STAMP_compute_matrix_profile_same_random_data: " \
"Should compute the matrix profile correctly."
assert np.allclose(ipro, ip_naive), "STAMP_compute_matrix_profile_same_random_data: " \
"Should compute the index profile correctly."
def test_STAMP_compute_matrix_profile_random_data(self):
n = np.random.randint(100, 200)
m = np.random.randint(100, 200)
t1 = np.random.rand(n)
t2 = np.random.rand(m)
w = np.random.randint(10, min(n, m) // 4)
mp = pytsmp.STAMP(t1, t2, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
mp_naive, ip_naive = helpers.naive_matrix_profile(t1, t2, window_size=w)
assert np.allclose(mpro, mp_naive), "STAMP_compute_matrix_profile_random_data: " \
"Should compute the matrix profile correctly."
assert np.allclose(ipro, ip_naive), "STAMP_compute_matrix_profile_random_data: " \
"Should compute the index profile correctly."
def test_STAMP_compute_matrix_profile_data1(self):
t = np.loadtxt("./data/random_walk_data.csv")
mpro_ans = np.loadtxt("./data/random_walk_data_mpro.csv")
ipro_ans = np.loadtxt("./data/random_walk_data_ipro.csv")
w = 50
mp = pytsmp.STAMP(t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
assert np.allclose(mpro, mpro_ans), "STAMP_compute_matrix_profile_data1: " \
"Should compute the matrix profile correctly. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro_ans)))
# assert np.allclose(ipro, ipro_ans), "STAMP_compute_matrix_profile_data1: " \
# "Should compute the index profile correctly."
def test_STAMP_compute_matrix_profile_data2(self):
t = np.loadtxt("./data/candy_production.csv")
mpro_ans = np.loadtxt("./data/candy_production_mpro.csv")
ipro_ans = np.loadtxt("./data/candy_production_ipro.csv")
w = 80
mp = pytsmp.STAMP(t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
assert np.allclose(mpro, mpro_ans), "STAMP_compute_matrix_profile_data2: " \
"Should compute the matrix profile correctly. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro_ans)))
assert np.allclose(ipro, ipro_ans), "STAMP_compute_matrix_profile_data1: " \
"Should compute the index profile correctly."
def test_STAMP_compute_matrix_profile_data3(self):
t = np.loadtxt("./data/bitcoin_price.csv")
mpro_ans = np.loadtxt("./data/bitcoin_price_mpro.csv")
ipro_ans = np.loadtxt("./data/bitcoin_price_ipro.csv")
w = 100
mp = pytsmp.STAMP(t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
assert np.allclose(mpro, mpro_ans), "STAMP_compute_matrix_profile_data3: " \
"Should compute the matrix profile correctly. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro_ans)))
assert np.allclose(ipro, ipro_ans), "STAMP_compute_matrix_profile_data3: " \
"Should compute the index profile correctly."
class TestConvFunctions:
"""
The class for tests of helper functions independent of matrix profile classes.
"""
def test_update_ts1_random_data(self):
n = np.random.randint(200, 1000)
m = np.random.randint(200, 1000)
t1 = np.random.rand(n)
t2 = np.random.rand(m)
w = np.random.randint(10, min(n, m) // 4)
mp = pytsmp.STAMP(t1[:-1], t2, window_size=w, verbose=False)
mp.update_ts1(t1[-1])
mpro, ipro = mp.get_profiles()
mp2 = pytsmp.STAMP(t1, t2, window_size=w, verbose=False)
mpro2, ipro2 = mp2.get_profiles()
assert np.allclose(mpro, mpro2), "update_ts1_random_data: " \
"update_ts1 should update the matrix profile properly on random data. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro2)))
assert np.allclose(ipro, ipro2), "update_ts1_random_data: " \
"update_ts1 should update the index profile properly on random data."
def test_update_ts1_multiple_random_data(self):
n = np.random.randint(200, 1000)
m = np.random.randint(200, 1000)
t1 = np.random.rand(n)
t2 = np.random.rand(m)
w = np.random.randint(10, min(n, m) // 4)
times = np.random.randint(5, 50)
mp = pytsmp.STAMP(t1[:-times], t2, window_size=w, verbose=False)
for i in range(-times, 0, 1):
mp.update_ts1(t1[i])
mpro, ipro = mp.get_profiles()
mp2 = pytsmp.STAMP(t1, t2, window_size=w, verbose=False)
mpro2, ipro2 = mp2.get_profiles()
assert np.allclose(mpro, mpro2), "update_ts1_multiple_random_data: " \
"update_ts1 should update the matrix profile multiple times properly on random data. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro2)))
assert np.allclose(ipro, ipro2), "update_ts1_random_data: " \
"update_ts1 should update the index profile multiple times properly on random data."
def test_update_ts2_random_data(self):
n = np.random.randint(200, 1000)
m = np.random.randint(200, 1000)
t1 = np.random.rand(n)
t2 = np.random.rand(m)
w = np.random.randint(10, min(n, m) // 4)
mp = pytsmp.STAMP(t1, t2[:-1], window_size=w, verbose=False)
mp.update_ts2(t2[-1])
mpro, ipro = mp.get_profiles()
mp2 = pytsmp.STAMP(t1, t2, window_size=w, verbose=False)
mpro2, ipro2 = mp2.get_profiles()
assert np.allclose(mpro, mpro2), "update_ts2_random_data: " \
"update_ts2 should update the matrix profile properly on random data. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro2)))
assert np.allclose(ipro, ipro2), "update_ts2_random_data: " \
"update_ts2 should update the index profile properly on random data."
def test_update_ts2_multiple_random_data(self):
n = np.random.randint(200, 1000)
m = np.random.randint(200, 1000)
t1 = np.random.rand(n)
t2 = np.random.rand(m)
w = np.random.randint(10, min(n, m) // 4)
times = np.random.randint(5, 50)
mp = pytsmp.STAMP(t1, t2[:-times], window_size=w, verbose=False)
for i in range(-times, 0, 1):
mp.update_ts2(t2[i])
mpro, ipro = mp.get_profiles()
mp2 = pytsmp.STAMP(t1, t2, window_size=w, verbose=False)
mpro2, ipro2 = mp2.get_profiles()
assert np.allclose(mpro, mpro2), "update_ts2_multiple_random_data: " \
"update_ts2 should update the matrix profile multiple times properly on random data. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro2)))
assert np.allclose(ipro, ipro2), "update_ts2_random_data: " \
"update_ts2 should update the index profile multiple times properly on random data."
def test_update_interleave_random_data(self):
n = np.random.randint(200, 1000)
m = np.random.randint(200, 1000)
t1 = np.random.rand(n)
t2 = np.random.rand(m)
w = np.random.randint(10, min(n, m) // 4)
times = np.random.randint(5, 25)
mp = pytsmp.STAMP(t1[:-times], t2[:-times], window_size=w, verbose=False)
for i in range(-times, 0, 1):
mp.update_ts1(t1[i])
mp.update_ts2(t2[i])
mpro, ipro = mp.get_profiles()
mp2 = pytsmp.STAMP(t1, t2, window_size=w, verbose=False)
mpro2, ipro2 = mp2.get_profiles()
assert np.allclose(mpro, mpro2), "update_interleave_random_data: " \
"update_ts1 and update_ts2 should update the matrix profile multiple times " \
"properly on random data. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro2)))
assert np.allclose(ipro, ipro2), "update_interleave_random_data: " \
"update_ts1 and update_ts2 should update the index profile multiple times " \
"properly on random data."
def test_update_ts1_same_data(self):
n = np.random.randint(200, 1000)
t = np.random.rand(n)
w = np.random.randint(10, n // 4)
mp = pytsmp.STAMP(t[:-1], window_size=w, verbose=False)
mp.update_ts1(t[-1])
mpro, ipro = mp.get_profiles()
mp2 = pytsmp.STAMP(t, window_size=w, verbose=False)
mpro2, ipro2 = mp2.get_profiles()
assert np.allclose(mpro, mpro2), "update_ts1_same_data: " \
"update_ts1 should update the matrix profile properly when ts1 == ts2. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro2)))
assert np.allclose(ipro, ipro2), "update_ts1_same_data: " \
"update_ts1 should update the index profile properly when ts1 == ts2."
def test_update_ts1_multiple_same_data(self):
n = np.random.randint(200, 1000)
t = np.random.rand(n)
w = np.random.randint(10, n // 4)
times = np.random.randint(5, 50)
mp = pytsmp.STAMP(t[:-times], window_size=w, verbose=False)
for i in range(-times, 0, 1):
mp.update_ts1(t[i])
mpro, ipro = mp.get_profiles()
mp2 = pytsmp.STAMP(t, window_size=w, verbose=False)
mpro2, ipro2 = mp2.get_profiles()
assert np.allclose(mpro, mpro2), "update_ts1_multiple_same_data: " \
"update_ts1 should update the matrix profile multiple times properly when ts1 == ts2. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro2)))
assert np.allclose(ipro, ipro2), "update_ts1_multiple_same_data: " \
"update_ts1 should update the index profile multiple times properly when ts1 == ts2."
def test_update_ts2_same_data(self):
n = np.random.randint(200, 1000)
t = np.random.rand(n)
w = np.random.randint(10, n // 4)
mp = pytsmp.STAMP(t[:-1], window_size=w, verbose=False)
mp.update_ts2(t[-1])
mpro, ipro = mp.get_profiles()
mp2 = pytsmp.STAMP(t, window_size=w, verbose=False)
mpro2, ipro2 = mp2.get_profiles()
assert np.allclose(mpro, mpro2), "update_ts2_same_data: " \
"update_ts2 should update the matrix profile properly when ts1 == ts2. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro2)))
assert np.allclose(ipro, ipro2), "update_ts2_same_data: " \
"update_ts2 should update the index profile properly when ts1 == ts2."
def test_update_ts2_multiple_same_data(self):
n = np.random.randint(200, 1000)
t = np.random.rand(n)
w = np.random.randint(10, n // 4)
times = np.random.randint(5, 50)
mp = pytsmp.STAMP(t[:-times], window_size=w, verbose=False)
for i in range(-times, 0, 1):
mp.update_ts2(t[i])
mpro, ipro = mp.get_profiles()
mp2 = pytsmp.STAMP(t, window_size=w, verbose=False)
mpro2, ipro2 = mp2.get_profiles()
assert np.allclose(mpro, mpro2), "update_ts2_multiple_same_data: " \
"update_ts2 should update the matrix profile multiple times properly when ts1 == ts2. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro2)))
assert np.allclose(ipro, ipro2), "update_ts2_multiple_same_data: " \
"update_ts2 should update the index profile multiple times properly when ts1 == ts2."
def test_update_interleave_same_data(self):
n = np.random.randint(200, 1000)
t = np.random.rand(n)
w = np.random.randint(10, n // 4)
times = np.random.randint(5, 25)
mp = pytsmp.STAMP(t[:-times], window_size=w, verbose=False)
for i in range(-times, 0, 1):
if i % 2 == 0:
mp.update_ts1(t[i])
else:
mp.update_ts2(t[i])
mpro, ipro = mp.get_profiles()
mp2 = pytsmp.STAMP(t, window_size=w, verbose=False)
mpro2, ipro2 = mp2.get_profiles()
assert np.allclose(mpro, mpro2), "update_interleave_same_data: " \
"update_ts1 and update_ts2 should update the matrix profile multiple times " \
"properly when ts1 == ts2. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro2)))
assert np.allclose(ipro, ipro2), "update_interleave_same_data: " \
"update_ts1 and update_ts2 should update the index profile multiple times " \
"properly when ts1 == ts2."
def test_find_discords_incorrect_num_discords1(self):
with pytest.raises(ValueError) as excinfo:
t = np.random.rand(1000)
mp = pytsmp.STAMP(t, window_size=10, verbose=False)
discords = mp.find_discords(-1)
assert str(excinfo.value) == "Incorrect num_discords entered."
def test_find_discords_incorrect_num_discords2(self):
with pytest.raises(ValueError) as excinfo:
t = np.random.rand(1000)
mp = pytsmp.STAMP(t, window_size=10, verbose=False)
discords = mp.find_discords(4.2)
assert str(excinfo.value) == "Incorrect num_discords entered."
def test_find_discords_incorrect_num_discords3(self):
with pytest.raises(ValueError) as excinfo:
t = np.random.rand(1000)
mp = pytsmp.STAMP(t, window_size=10, verbose=False)
discords = mp.find_discords(0)
assert str(excinfo.value) == "Incorrect num_discords entered."
def test_find_discords_incorrect_exclusion_zone(self):
with pytest.raises(ValueError) as excinfo:
t = np.random.rand(1000)
mp = pytsmp.STAMP(t, window_size=10, verbose=False)
discords = mp.find_discords(3, exclusion_zone=-1)
assert str(excinfo.value) == "Exclusion zone must be non-negative."
def test_find_discords_sanity1(self):
n = np.random.randint(200, 1000)
t = np.random.rand(n)
w = np.random.randint(10, n // 4)
mp = pytsmp.STAMP(t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
discords = mp.find_discords(n - w + 1, exclusion_zone=0)
mp_discords = mpro[discords]
assert len(discords) == n - w + 1, "find_discords_snaity1: find_discords should return the correct number of discords."
assert (mp_discords[1:] <= mp_discords[:-1]).all(), "find_discords_sanity1: find_discords should return " \
"discords in descending order of profile values."
def test_find_discords_sanity2(self):
n = np.random.randint(200, 1000)
t = np.random.rand(n)
w = np.random.randint(10, n // 4)
mp = pytsmp.STAMP(t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
discords = mp.find_discords(n - w + 1) # exclusion_zone=None
mp_discords = mpro[discords]
assert (n - w + 1) // w <= len(discords) <= (n - w + 1) // w * 2 + 1, \
"find_discords_snaity2: find_discords should not return more than the max possible number of discords."
assert (mp_discords[1:] <= mp_discords[:-1]).all(), "find_discords_sanity2: find_discords should return " \
"discords in descending order of profile values."
def test_find_discords_sanity3(self):
n = np.random.randint(200, 1000)
t = np.random.rand(n)
w = np.random.randint(10, n // 5)
num_discords = 5
mp = pytsmp.STAMP(t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
discords = mp.find_discords(num_discords, exclusion_zone=1/2)
mp_discords = mpro[discords]
assert len(discords) == num_discords, "find_discords_snaity3: find_discords should return the desired number of discords."
assert (mp_discords[1:] <= mp_discords[:-1]).all(), "find_discords_sanity3: find_discords should return " \
"discords in descending order of profile values."
def test_find_discords_anomaly(self):
"""
find_discords should be able to locate obvious anomaly.
"""
n = np.random.randint(200, 500)
t = np.random.rand(n)
t = np.tile(t, 4)
w = np.random.randint(10, n // 4)
ab = np.random.randint(len(t))
t[ab] += 5
mp = pytsmp.STAMP(t, window_size=w, verbose=False)
discords = np.sort(mp.find_discords(1, exclusion_zone=1/2))
assert len(discords) == 1, "find_discords_anomaly: find_discords should return the desired number of discords."
assert np.abs(ab - discords[0]) < w, \
"find_discords_anomaly: find_discords should be able to locate obvious anomaly."
def test_find_motifs_incorrect_num_discords1(self):
with pytest.raises(ValueError) as excinfo:
t = np.random.rand(1000)
mp = pytsmp.STAMP(t, window_size=10, verbose=False)
motifs = mp.find_motifs(-1)
assert str(excinfo.value) == "Incorrect num_motifs entered."
def test_find_motifs_incorrect_num_motifs2(self):
with pytest.raises(ValueError) as excinfo:
t = np.random.rand(1000)
mp = pytsmp.STAMP(t, window_size=10, verbose=False)
motifs = mp.find_motifs(4.2)
assert str(excinfo.value) == "Incorrect num_motifs entered."
def test_find_motifs_incorrect_num_motifs3(self):
with pytest.raises(ValueError) as excinfo:
t = np.random.rand(1000)
mp = pytsmp.STAMP(t, window_size=10, verbose=False)
motifs = mp.find_motifs(0)
assert str(excinfo.value) == "Incorrect num_motifs entered."
def test_find_motifs_incorrect_exclusion_zone(self):
with pytest.raises(ValueError) as excinfo:
t = np.random.rand(1000)
mp = pytsmp.STAMP(t, window_size=10, verbose=False)
motifs = mp.find_motifs(5, exclusion_zone=-1)
assert str(excinfo.value) == "Exclusion zone must be non-negative."
def test_find_motifs_sanity1(self):
n = np.random.randint(200, 1000)
t = np.random.rand(n)
w = np.random.randint(10, n // 4)
mp = pytsmp.STAMP(t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
num_motifs = 3
motifs = mp.find_motifs(num_motifs, exclusion_zone=1/2)
mp_motifs = mpro[motifs]
assert len(motifs) == num_motifs, "find_motifs_snaity1: find_motifs should return the desired number of motifs."
assert (mp_motifs[1:, 0] >= mp_motifs[:-1, 0]).all(), "find_motifs_sanity1: find_motifs should return " \
"motifs in ascending order of profile values."
def test_find_motifs_sanity2(self):
n = np.random.randint(200, 1000)
t = np.random.rand(n)
w = np.random.randint(10, n // 4)
mp = pytsmp.STAMP(t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
motifs = mp.find_motifs(n - w + 1) # exclusion_zone=None
mp_motifs = mpro[motifs]
assert (n - w + 1) // (2 * w) <= len(motifs) <= (n - w + 1) // w * 2 + 1, \
"find_motifs_snaity2: find_motifs should not return more than the max possible number of motifs."
assert (mp_motifs[1:, 0] >= mp_motifs[:-1, 0]).all(), "find_motifs_sanity2: find_motifs should return " \
"motifs in descending order of profile values."
class TestSTOMP:
def test_STOMP_is_anytime(self):
t = np.random.rand(1000)
mp = pytsmp.STOMP(t, window_size=10, s_size=1, verbose=True)
is_anytime = mp.is_anytime
assert is_anytime == False, "STOMP_is_anytime: STOMP should not be an anytime algorithm."
def test_STOMP_get_profiles_check_length(self):
n = np.random.randint(100, 1000)
m = np.random.randint(100, 1000)
t1 = np.random.rand(n)
t2 = np.random.rand(m)
w = np.random.randint(10, min(n, m))
mp = pytsmp.STOMP(t1, t2, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
assert len(mpro) == n - w + 1, "STOMP_get_profile_check_length: Matrix profile should have correct length"
assert len(ipro) == n - w + 1, "STOMP_get_profile_check_length: Index profile should have correct length"
def test_STOMP_get_profiles_check_mutation(self):
t = np.random.rand(1000)
w = 10
mp = pytsmp.STOMP(t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
mpro[0] = -1
ipro[0] = -1
mpro2, ipro2 = mp.get_profiles()
assert mpro[0] != mpro2[0], "STOMP_get_profile_check_mutation: " \
"Get profile should return a copy of the matrix profile, not the internal one."
assert ipro[0] != ipro2[0], "STOMP_get_profile_check_mutation: " \
"Get profile should return a copy of the index profile, not the internal one."
def test_STOMP_compute_matrix_profile_sanity(self):
t = np.random.rand(1000)
w = 10
mp = pytsmp.STOMP(t, t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
assert np.allclose(mpro, np.zeros(len(t) - w + 1), atol=1e-5), "STOMP_compute_matrix_profile_sanity: " \
"Should compute the matrix profile correctly in the trivial case."
assert np.array_equal(ipro, np.arange(len(t) - w + 1)), "STOMP_compute_matrix_profile_sanity: " \
"Should compute the index profile correctly in the trivial case."
def test_STOMP_compute_matrix_profile_same_random_data(self):
n = np.random.randint(100, 200) # anything larger will be too time-consuming
t = np.random.rand(n)
w = np.random.randint(10, n // 4)
mp = pytsmp.STOMP(t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
mp_naive, ip_naive = helpers.naive_matrix_profile(t, window_size=w)
assert np.allclose(mpro, mp_naive), "STOMP_compute_matrix_profile_same_random_data: " \
"Should compute the matrix profile correctly."
assert np.allclose(ipro, ip_naive), "STOMP_compute_matrix_profile_same_random_data: " \
"Should compute the index profile correctly."
def test_STOMP_compute_matrix_profile_random_data(self):
n = np.random.randint(100, 200)
m = np.random.randint(100, 200)
t1 = np.random.rand(n)
t2 = np.random.rand(m)
w = np.random.randint(10, min(n, m) // 4)
mp = pytsmp.STOMP(t1, t2, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
mp_naive, ip_naive = helpers.naive_matrix_profile(t1, t2, window_size=w)
assert np.allclose(mpro, mp_naive), "STOMP_compute_matrix_profile_random_data: " \
"Should compute the matrix profile correctly."
assert np.allclose(ipro, ip_naive), "STOMP_compute_matrix_profile_random_data: " \
"Should compute the index profile correctly."
def test_STOMP_compute_matrix_profile_data1(self):
t = np.loadtxt("./data/random_walk_data.csv")
mpro_ans = np.loadtxt("./data/random_walk_data_mpro.csv")
ipro_ans = np.loadtxt("./data/random_walk_data_ipro.csv")
w = 50
mp = pytsmp.STOMP(t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
assert np.allclose(mpro, mpro_ans), "STOMP_compute_matrix_profile_data1: " \
"Should compute the matrix profile correctly. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro_ans)))
# assert np.allclose(ipro, ipro_ans), "STOMP_compute_matrix_profile_data1: " \
# "Should compute the index profile correctly."
def test_STOMP_compute_matrix_profile_data2(self):
t = np.loadtxt("./data/candy_production.csv")
mpro_ans = np.loadtxt("./data/candy_production_mpro.csv")
ipro_ans = np.loadtxt("./data/candy_production_ipro.csv")
w = 80
mp = pytsmp.STOMP(t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
assert np.allclose(mpro, mpro_ans), "STOMP_compute_matrix_profile_data2: " \
"Should compute the matrix profile correctly. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro_ans)))
assert np.allclose(ipro, ipro_ans), "STOMP_compute_matrix_profile_data1: " \
"Should compute the index profile correctly."
def test_STOMP_compute_matrix_profile_data3(self):
t = np.loadtxt("./data/bitcoin_price.csv")
mpro_ans = np.loadtxt("./data/bitcoin_price_mpro.csv")
ipro_ans = np.loadtxt("./data/bitcoin_price_ipro.csv")
w = 100
mp = pytsmp.STOMP(t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
assert np.allclose(mpro, mpro_ans), "STOMP_compute_matrix_profile_data3: " \
"Should compute the matrix profile correctly. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro_ans)))
assert np.allclose(ipro, ipro_ans), "STOMP_compute_matrix_profile_data3: " \
"Should compute the index profile correctly."
class TestSCRIMP:
def test_SCRIMP_is_anytime(self):
t = np.random.rand(1000)
mp = pytsmp.SCRIMP(t, window_size=10, s_size=1, verbose=True, pre_scrimp=1)
is_anytime = mp.is_anytime
assert is_anytime == True, "SCRIMP_is_anytime: SCRIMP should be an anytime algorithm."
def test_SCRIMP_get_profiles_check_length(self):
n = np.random.randint(100, 1000)
m = np.random.randint(100, 1000)
t1 = np.random.rand(n)
t2 = np.random.rand(m)
w = np.random.randint(10, min(n, m))
mp = pytsmp.SCRIMP(t1, t2, window_size=w, verbose=False, pre_scrimp=0)
mpro, ipro = mp.get_profiles()
assert len(mpro) == n - w + 1, "SCRIMP_get_profile_check_length: Matrix profile should have correct length"
assert len(ipro) == n - w + 1, "SCRIMP_get_profile_check_length: Index profile should have correct length"
def test_SCRIMP_get_profiles_check_mutation(self):
t = np.random.rand(1000)
w = 10
mp = pytsmp.SCRIMP(t, window_size=w, verbose=False, pre_scrimp=0)
mpro, ipro = mp.get_profiles()
mpro[0] = -1
ipro[0] = -1
mpro2, ipro2 = mp.get_profiles()
assert mpro[0] != mpro2[0], "SCRIMP_get_profile_check_mutation: " \
"Get profile should return a copy of the matrix profile, not the internal one."
assert ipro[0] != ipro2[0], "SCRIMP_get_profile_check_mutation: " \
"Get profile should return a copy of the index profile, not the internal one."
def test_SCRIMP_compute_matrix_profile_sanity(self):
t = np.random.rand(1000)
w = 10
mp = pytsmp.SCRIMP(t, t, window_size=w, verbose=False, pre_scrimp=0)
mpro, ipro = mp.get_profiles()
assert np.allclose(mpro, np.zeros(len(t) - w + 1), atol=1e-5), "SCRIMP_compute_matrix_profile_sanity: " \
"Should compute the matrix profile correctly in the trivial case."
assert np.array_equal(ipro, np.arange(len(t) - w + 1)), "SCRIMP_compute_matrix_profile_sanity: " \
"Should compute the index profile correctly in the trivial case."
def test_SCRIMP_compute_matrix_profile_same_random_data(self):
n = np.random.randint(100, 200) # anything larger will be too time-consuming
t = np.random.rand(n)
w = np.random.randint(10, n // 4)
mp = pytsmp.SCRIMP(t, window_size=w, verbose=False, pre_scrimp=0)
mpro, ipro = mp.get_profiles()
mp_naive, ip_naive = helpers.naive_matrix_profile(t, window_size=w)
assert np.allclose(mpro, mp_naive), "SCRIMP_compute_matrix_profile_same_random_data: " \
"Should compute the matrix profile correctly."
assert np.allclose(ipro, ip_naive), "SCRIMP_compute_matrix_profile_same_random_data: " \
"Should compute the index profile correctly."
def test_SCRIMP_compute_matrix_profile_random_data(self):
n = np.random.randint(100, 200)
m = np.random.randint(100, 200)
t1 = np.random.rand(n)
t2 = np.random.rand(m)
w = np.random.randint(10, min(n, m) // 4)
mp = pytsmp.SCRIMP(t1, t2, window_size=w, verbose=False, pre_scrimp=0)
mpro, ipro = mp.get_profiles()
mp_naive, ip_naive = helpers.naive_matrix_profile(t1, t2, window_size=w)
assert np.allclose(mpro, mp_naive), "SCRIMP_compute_matrix_profile_random_data: " \
"Should compute the matrix profile correctly."
assert np.allclose(ipro, ip_naive), "SCRIMP_compute_matrix_profile_random_data: " \
"Should compute the index profile correctly."
def test_SCRIMP_compute_matrix_profile_data1(self):
t = np.loadtxt("./data/random_walk_data.csv")
mpro_ans = np.loadtxt("./data/random_walk_data_mpro.csv")
ipro_ans = np.loadtxt("./data/random_walk_data_ipro.csv")
w = 50
mp = pytsmp.SCRIMP(t, window_size=w, verbose=False, pre_scrimp=0)
mpro, ipro = mp.get_profiles()
assert np.allclose(mpro, mpro_ans), "SCRIMP_compute_matrix_profile_data1: " \
"Should compute the matrix profile correctly. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro_ans)))
# assert np.allclose(ipro, ipro_ans), "SCRIMP_compute_matrix_profile_data1: " \
# "Should compute the index profile correctly."
def test_SCRIMP_compute_matrix_profile_data2(self):
t = np.loadtxt("./data/candy_production.csv")
mpro_ans = np.loadtxt("./data/candy_production_mpro.csv")
ipro_ans = np.loadtxt("./data/candy_production_ipro.csv")
w = 80
mp = pytsmp.SCRIMP(t, window_size=w, verbose=False, pre_scrimp=0)
mpro, ipro = mp.get_profiles()
assert np.allclose(mpro, mpro_ans), "SCRIMP_compute_matrix_profile_data2: " \
"Should compute the matrix profile correctly. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro_ans)))
assert np.allclose(ipro, ipro_ans), "SCRIMP_compute_matrix_profile_data1: " \
"Should compute the index profile correctly."
def test_SCRIMP_compute_matrix_profile_data3(self):
t = np.loadtxt("./data/bitcoin_price.csv")
mpro_ans = np.loadtxt("./data/bitcoin_price_mpro.csv")
ipro_ans = np.loadtxt("./data/bitcoin_price_ipro.csv")
w = 100
mp = pytsmp.SCRIMP(t, window_size=w, verbose=False, pre_scrimp=0)
mpro, ipro = mp.get_profiles()
assert np.allclose(mpro, mpro_ans), "SCRIMP_compute_matrix_profile_data3: " \
"Should compute the matrix profile correctly. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro_ans)))
assert np.allclose(ipro, ipro_ans), "SCRIMP_compute_matrix_profile_data3: " \
"Should compute the index profile correctly."
class TestPreSCRIMP:
def test_PreSCRIMP_is_anytime(self):
t = np.random.rand(1000)
mp = pytsmp.PreSCRIMP(t, window_size=10, s_size=1, verbose=True)
is_anytime = mp.is_anytime
assert is_anytime == True, "PreSCRIMP_is_anytime: PreSCRIMP should be an anytime algorithm."
def test_PreSCRIMP_init_incorrect_pre_scrimp1(self):
with pytest.raises(ValueError) as excinfo:
t = np.random.rand(1000)
mp = pytsmp.PreSCRIMP(t, window_size=10, verbose=False, sample_rate=0)
assert str(excinfo.value) == "sample_rate must be positive."
def test_PreSCRIMP_init_incorrect_pre_scrimp2(self):
with pytest.raises(ValueError) as excinfo:
t = np.random.rand(1000)
mp = pytsmp.PreSCRIMP(t, window_size=10, verbose=False, sample_rate=-2)
assert str(excinfo.value) == "sample_rate must be positive."
def test_PreSCRIMP_get_profiles_check_length(self):
n = np.random.randint(100, 1000)
m = np.random.randint(100, 1000)
t1 = np.random.rand(n)
t2 = np.random.rand(m)
w = np.random.randint(10, min(n, m))
mp = pytsmp.PreSCRIMP(t1, t2, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
assert len(mpro) == n - w + 1, "PreSCRIMP_get_profile_check_length: Matrix profile should have correct length"
assert len(ipro) == n - w + 1, "PreSCRIMP_get_profile_check_length: Index profile should have correct length"
def test_PreSCRIMP_get_profiles_check_mutation(self):
t = np.random.rand(1000)
w = 10
mp = pytsmp.PreSCRIMP(t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
mpro[0] = -1
ipro[0] = -1
mpro2, ipro2 = mp.get_profiles()
assert mpro[0] != mpro2[0], "PreSCRIMP_get_profile_check_mutation: " \
"Get profile should return a copy of the matrix profile, not the internal one."
assert ipro[0] != ipro2[0], "PreSCRIMP_get_profile_check_mutation: " \
"Get profile should return a copy of the index profile, not the internal one."
def test_PreSCRIMP_compute_matrix_profile_sanity1(self):
t = np.random.rand(1000)
w = 10
mp = pytsmp.PreSCRIMP(t, t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
assert np.allclose(mpro, np.zeros(len(t) - w + 1), atol=1e-5), "PreSCRIMP_compute_matrix_profile_sanity1: " \
"Should compute the matrix profile correctly in the trivial case."
assert np.array_equal(ipro, np.arange(len(t) - w + 1)), "PreSCRIMP_compute_matrix_profile_sanity1: " \
"Should compute the index profile correctly in the trivial case."
def test_PreSCRIMP_compute_matrix_profile_sanity2(self):
t = np.random.rand(1000)
w = 50
mpp = pytsmp.PreSCRIMP(t, t, window_size=w, verbose=False)
mprop, iprop = mpp.get_profiles()
mp = pytsmp.SCRIMP(t, t, window_size=w, verbose=False, pre_scrimp=0)
mpro, ipro = mp.get_profiles()
assert (mprop > mpro - 1e-5).all(), "PreSCRIMP_compute_matrix_profile_sanity2: PreSCRIMP should be an " \
"upper approximation for the actual matrix profile."
@pytest.mark.skip(reason="Randomized tests on approximate algorithms do not seem a correct thing to do.")
def test_PreSCRIMP_compute_matrix_profile_same_random_data(self):
n = np.random.randint(100, 200) # anything larger will be too time-consuming
t = np.random.rand(n)
w = np.random.randint(10, n // 4)
mp = pytsmp.PreSCRIMP(t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
mp_naive, ip_naive = helpers.naive_matrix_profile(t, window_size=w)
assert np.allclose(mpro, mp_naive), "PreSCRIMP_compute_matrix_profile_same_random_data: " \
"Should compute the matrix profile correctly."
assert np.allclose(ipro, ip_naive), "PreSCRIMP_compute_matrix_profile_same_random_data: " \
"Should compute the index profile correctly."
@pytest.mark.skip(reason="Randomized tests on approximate algorithms do not seem a correct thing to do.")
def test_PreSCRIMP_compute_matrix_profile_random_data(self):
n = np.random.randint(100, 200)
m = np.random.randint(100, 200)
t1 = np.random.rand(n)
t2 = np.random.rand(m)
w = np.random.randint(10, min(n, m) // 4)
mp = pytsmp.PreSCRIMP(t1, t2, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
mp_naive, ip_naive = helpers.naive_matrix_profile(t1, t2, window_size=w)
assert np.allclose(mpro, mp_naive), "PreSCRIMP_compute_matrix_profile_random_data: " \
"Should compute the matrix profile correctly."
assert np.allclose(ipro, ip_naive), "PreSCRIMP_compute_matrix_profile_random_data: " \
"Should compute the index profile correctly."
class TestSCRIMP_PreSCRIMP:
def test_SCRIMP_init_incorrect_pre_scrimp(self):
with pytest.raises(ValueError) as excinfo:
t = np.random.rand(1000)
mp = pytsmp.SCRIMP(t, window_size=10, verbose=False, pre_scrimp=-1)
assert str(excinfo.value) == "pre_scrimp parameter must be non-negative."
def test_SCRIMP_init_pre_scrimp_zero(self):
t = np.random.rand(1000)
mp = pytsmp.SCRIMP(t, window_size=10, s_size=1, verbose=False, pre_scrimp=0)
assert getattr(mp, "_pre_scrimp_class", None) is None, "SCRIMP_init_pre_scrimp_zero: " \
"PreSCRIMP should not run if pre_scrimp = 0."
def test_SCRIMP_init_pre_scrimp_nonzero(self):
t = np.random.rand(1000)
mp = pytsmp.SCRIMP(t, window_size=10, s_size=1, verbose=False, pre_scrimp=1/2)
assert getattr(mp, "_pre_scrimp_class", None) is not None, "SCRIMP_init_pre_scrimp_nonzero: " \
"PreSCRIMP should run if pre_scrimp > 0."
def test_SCRIMP_PreSCRIMP_get_profiles_check_length(self):
n = np.random.randint(100, 1000)
m = np.random.randint(100, 1000)
t1 = np.random.rand(n)
t2 = np.random.rand(m)
w = np.random.randint(10, min(n, m))
mp = pytsmp.SCRIMP(t1, t2, window_size=w, verbose=False, pre_scrimp=1/4)
mpro, ipro = mp.get_profiles()
assert len(mpro) == n - w + 1, "SCRIMP_get_profile_check_length: Matrix profile should have correct length"
assert len(ipro) == n - w + 1, "SCRIMP_get_profile_check_length: Index profile should have correct length"
def test_SCRIMP_PreSCRIMP_get_profiles_check_mutation(self):
t = np.random.rand(1000)
w = 10
mp = pytsmp.SCRIMP(t, window_size=w, verbose=False, pre_scrimp=1/4)
mpro, ipro = mp.get_profiles()
mpro[0] = -1
ipro[0] = -1
mpro2, ipro2 = mp.get_profiles()
assert mpro[0] != mpro2[0], "SCRIMP_get_profile_check_mutation: " \
"Get profile should return a copy of the matrix profile, not the internal one."
assert ipro[0] != ipro2[0], "SCRIMP_get_profile_check_mutation: " \
"Get profile should return a copy of the index profile, not the internal one."
def test_SCRIMP_PreSCRIMP_compute_matrix_profile_sanity(self):
t = np.random.rand(1000)
w = 10
mp = pytsmp.SCRIMP(t, t, window_size=w, verbose=False, pre_scrimp=1/4)
mpro, ipro = mp.get_profiles()
assert np.allclose(mpro, np.zeros(len(t) - w + 1), atol=1e-5), "SCRIMP_compute_matrix_profile_sanity: " \
"Should compute the matrix profile correctly in the trivial case."
assert np.array_equal(ipro, np.arange(len(t) - w + 1)), "SCRIMP_compute_matrix_profile_sanity: " \
"Should compute the index profile correctly in the trivial case."
def test_SCRIMP_PreSCRIMP_compute_matrix_profile_same_random_data(self):
n = np.random.randint(100, 200) # anything larger will be too time-consuming
t = np.random.rand(n)
w = np.random.randint(10, n // 4)
mp = pytsmp.SCRIMP(t, window_size=w, verbose=False, pre_scrimp=1/4)
mpro, ipro = mp.get_profiles()
mp_naive, ip_naive = helpers.naive_matrix_profile(t, window_size=w)
assert np.allclose(mpro, mp_naive), "SCRIMP_compute_matrix_profile_same_random_data: " \
"Should compute the matrix profile correctly."
assert np.allclose(ipro, ip_naive), "SCRIMP_compute_matrix_profile_same_random_data: " \
"Should compute the index profile correctly."
def test_SCRIMP_PreSCRIMP_compute_matrix_profile_random_data(self):
n = np.random.randint(100, 200)
m = np.random.randint(100, 200)
t1 = np.random.rand(n)
t2 = np.random.rand(m)
w = np.random.randint(10, min(n, m) // 4)
mp = pytsmp.SCRIMP(t1, t2, window_size=w, verbose=False, pre_scrimp=1/4)
mpro, ipro = mp.get_profiles()
mp_naive, ip_naive = helpers.naive_matrix_profile(t1, t2, window_size=w)
assert np.allclose(mpro, mp_naive), "SCRIMP_compute_matrix_profile_random_data: " \
"Should compute the matrix profile correctly."
assert np.allclose(ipro, ip_naive), "SCRIMP_compute_matrix_profile_random_data: " \
"Should compute the index profile correctly."
def test_SCRIMP_PreSCRIMP_compute_matrix_profile_data1(self):
t = np.loadtxt("./data/random_walk_data.csv")
mpro_ans = np.loadtxt("./data/random_walk_data_mpro.csv")
ipro_ans = np.loadtxt("./data/random_walk_data_ipro.csv")
w = 50
mp = pytsmp.SCRIMP(t, window_size=w, verbose=False, pre_scrimp=1/4)
mpro, ipro = mp.get_profiles()
assert np.allclose(mpro, mpro_ans), "SCRIMP_compute_matrix_profile_data1: " \
"Should compute the matrix profile correctly. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro_ans)))
# assert np.allclose(ipro, ipro_ans), "SCRIMP_compute_matrix_profile_data1: " \
# "Should compute the index profile correctly."
def test_SCRIMP_PreSCRIMP_compute_matrix_profile_data2(self):
t = np.loadtxt("./data/candy_production.csv")
mpro_ans = np.loadtxt("./data/candy_production_mpro.csv")
ipro_ans = np.loadtxt("./data/candy_production_ipro.csv")
w = 80
mp = pytsmp.SCRIMP(t, window_size=w, verbose=False, pre_scrimp=1/4)
mpro, ipro = mp.get_profiles()
assert np.allclose(mpro, mpro_ans), "SCRIMP_compute_matrix_profile_data2: " \
"Should compute the matrix profile correctly. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro_ans)))
assert np.allclose(ipro, ipro_ans), "SCRIMP_compute_matrix_profile_data1: " \
"Should compute the index profile correctly."
def test_SCRIMP_PreSCRIMP_compute_matrix_profile_data3(self):
t = np.loadtxt("./data/bitcoin_price.csv")
mpro_ans = np.loadtxt("./data/bitcoin_price_mpro.csv")
ipro_ans = np.loadtxt("./data/bitcoin_price_ipro.csv")
w = 100
mp = pytsmp.SCRIMP(t, window_size=w, verbose=False, pre_scrimp=1/4)
mpro, ipro = mp.get_profiles()
assert np.allclose(mpro, mpro_ans), "SCRIMP_compute_matrix_profile_data3: " \
"Should compute the matrix profile correctly. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro_ans)))
assert np.allclose(ipro, ipro_ans), "SCRIMP_compute_matrix_profile_data3: " \
"Should compute the index profile correctly."
| 56.054705
| 130
| 0.597221
| 6,631
| 51,234
| 4.410345
| 0.034535
| 0.045957
| 0.034467
| 0.040007
| 0.951787
| 0.939887
| 0.922346
| 0.908258
| 0.895606
| 0.882441
| 0
| 0.033543
| 0.297654
| 51,234
| 913
| 131
| 56.116101
| 0.77918
| 0.020904
| 0
| 0.681874
| 0
| 0
| 0.244916
| 0.091185
| 0
| 0
| 0
| 0
| 0.15783
| 1
| 0.097411
| false
| 0
| 0.004932
| 0
| 0.110974
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4b48555c88a27a414c9288d96c41d5f217f3f4ed
| 4,275
|
py
|
Python
|
station/tests/test_quality_ratings.py
|
gut-space/svarog
|
d68020a8f104da3b30a29ad24cc0ac64cf12ef5c
|
[
"MIT"
] | 7
|
2021-09-12T17:23:35.000Z
|
2022-01-26T18:14:45.000Z
|
station/tests/test_quality_ratings.py
|
gut-space/svarog
|
ef5dda811315c183bdc2e996b015d4d1fbe57f19
|
[
"MIT"
] | 45
|
2021-05-09T11:46:34.000Z
|
2022-02-20T20:47:09.000Z
|
station/tests/test_quality_ratings.py
|
gut-space/svarog
|
d68020a8f104da3b30a29ad24cc0ac64cf12ef5c
|
[
"MIT"
] | null | null | null |
import unittest
import numpy as np
import quality_ratings
class TestQualityRatings(unittest.TestCase):
def test_list_names(self):
names = quality_ratings.get_rate_names()
self.assertTrue("analog" in names)
self.assertTrue("digital" in names)
def test_get_rate_by_name(self):
self.assertIsNotNone(quality_ratings.get_rate_by_name("analog"))
self.assertIsNotNone(quality_ratings.get_rate_by_name("digital"))
def test_analog_rating_on_gaussian_noise_small_sigma(self):
img = np.random.normal(scale=1, size=(1000, 1000))
rate = quality_ratings.get_rate_by_name("analog")
rating = rate(img)
self.assertAlmostEqual(1.0, rating, 2)
def test_analog_rating_on_gaussian_noise_big_sigma(self):
img = np.random.normal(scale=20, size=(1000, 1000))
rate = quality_ratings.get_rate_by_name("analog")
rating = rate(img)
self.assertAlmostEqual(0.0, rating, 2)
def test_analog_rating_on_gaussian_noise_medium_sigma(self):
img = np.random.normal(scale=12.7, size=(1000, 1000))
rate = quality_ratings.get_rate_by_name("analog")
rating = rate(img)
self.assertAlmostEqual(0.5, rating, 1)
def test_analog_rating_on_gaussian_noise_small_sigma_floating_img(self):
img = np.random.normal(scale=1, size=(1000, 1000))
img = img.astype(float) / 255.
rate = quality_ratings.get_rate_by_name("analog")
rating = rate(img)
self.assertAlmostEqual(1.0, rating, 2)
def test_analog_rating_on_gaussian_noise_big_sigma_floating_img(self):
img = np.random.normal(scale=20, size=(1000, 1000))
img = img.astype(float) / 255.
rate = quality_ratings.get_rate_by_name("analog")
rating = rate(img)
self.assertAlmostEqual(0.0, rating, 2)
def test_analog_rating_on_gaussian_noise_medium_sigma_floating_img(self):
img = np.random.normal(scale=12.7, size=(1000, 1000))
img = img.astype(float) / 255.
rate = quality_ratings.get_rate_by_name("analog")
rating = rate(img)
self.assertAlmostEqual(0.5, rating, 1)
def test_analog_rating_on_blank(self):
'''Image doesn't contain any noise - good quality'''
img = np.zeros((1000, 1000))
rate = quality_ratings.get_rate_by_name("analog")
rating = rate(img)
self.assertAlmostEqual(1, rating, 2)
def test_digital_rating_on_black(self):
'''All pixels are black - no data - bad quality'''
img = np.zeros((1000, 1000))
rate = quality_ratings.get_rate_by_name("digital")
rating = rate(img)
self.assertAlmostEqual(0, rating, 3)
def test_digital_rating_on_white(self):
'''All pixels aren't black - full data - goo quality'''
img = np.ones((1000, 1000))
rate = quality_ratings.get_rate_by_name("digital")
rating = rate(img)
self.assertAlmostEqual(1, rating, 3)
def test_digital_rating_on_half_zeros(self):
z = np.ones((1000, 500))
o = np.zeros((1000, 500))
img = np.hstack((z, o))
rate = quality_ratings.get_rate_by_name("digital")
rating = rate(img)
self.assertAlmostEqual(0.5, rating, 3)
def test_digital_rating_on_quater_zeros(self):
ones = np.ones((1000, 750))
zeros = np.zeros((1000, 250))
img = np.hstack((zeros, ones))
rate = quality_ratings.get_rate_by_name("digital")
rating = rate(img)
self.assertAlmostEqual(0.75, rating, 3)
def test_digital_rating_on_3d_black(self):
img = np.zeros((1000, 1000, 3))
rate = quality_ratings.get_rate_by_name("digital")
rating = rate(img)
self.assertAlmostEqual(0, rating, 3)
def test_digital_rating_on_3d_white(self):
img = np.ones((1000, 1000, 3))
rate = quality_ratings.get_rate_by_name("digital")
rating = rate(img)
self.assertAlmostEqual(1, rating, 3)
def test_digital_rating_on_3d_quater_zeros(self):
img = np.ones((1000, 1000, 3))
img[:,:,2] = 0
img[:250,:,0:2] = 0
rate = quality_ratings.get_rate_by_name("digital")
rating = rate(img)
self.assertAlmostEqual(0.75, rating, 3)
| 37.5
| 77
| 0.654503
| 587
| 4,275
| 4.494037
| 0.13799
| 0.095527
| 0.109553
| 0.13533
| 0.808946
| 0.793404
| 0.793404
| 0.764215
| 0.716073
| 0.678165
| 0
| 0.060036
| 0.228538
| 4,275
| 113
| 78
| 37.831858
| 0.739842
| 0.032982
| 0
| 0.588889
| 0
| 0
| 0.028419
| 0
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.177778
| false
| 0
| 0.033333
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4b509886b57a37ed72577e9a2a00f616415818a1
| 3,197
|
py
|
Python
|
service/endpoints/inference.py
|
hasty-ai/docker-inference-example
|
f5e8bcccff8011b783c25c9795771be1fd4f732d
|
[
"MIT"
] | 1
|
2021-11-04T06:50:30.000Z
|
2021-11-04T06:50:30.000Z
|
service/endpoints/inference.py
|
hasty-ai/docker-inference-example
|
f5e8bcccff8011b783c25c9795771be1fd4f732d
|
[
"MIT"
] | null | null | null |
service/endpoints/inference.py
|
hasty-ai/docker-inference-example
|
f5e8bcccff8011b783c25c9795771be1fd4f732d
|
[
"MIT"
] | null | null | null |
from flask import Blueprint, request, g
from .. import api
inference_api = Blueprint('inference_api', __name__)
@inference_api.route('/v1/object_detection', methods=['POST'])
def get_object_detection_prediction():
confidence_thresh = request.json.get('confidence_threshold', 0.5)
attr_thresh = request.json.get('attributer_threshold', 0.5)
request_id = request.json.get("request_id")
if request_id:
g.request_id = request_id
image = request.json.get('image', {})
image_b64, image_url = None, None
if 'b64' in image:
image_b64 = image.get("b64")
if 'url' in image:
image_url = image.get("url")
if not image_b64 and not image_url:
raise ValueError("Image url or base64 should be provided")
model = request.json.get('model', None)
cls_model_name = request.json.get('cls_model_name', None)
attr_model_name = request.json.get('attr_model_name', None)
predictions = api.inference.get_object_detection_prediction(
model,
image_b64=image_b64,
image_url=image_url,
confidence_thresh=confidence_thresh,
attr_thresh=attr_thresh,
cls_model_name=cls_model_name,
attr_model_name=attr_model_name,
)
results = {"predictions": predictions}
if request_id:
results["request_id"] = request_id
return api.base.get_json_response(results)
@inference_api.route("/v1/image_tagger", methods=["POST"])
def get_image_tagger_prediction():
confidence_thresh = request.json.get("confidence_threshold", 0.5)
request_id = request.json.get("request_id")
if request_id:
g.request_id = request_id
image = request.json.get("image", {})
image_b64, image_url = None, None
if "b64" in image:
image_b64 = image.get("b64")
if "url" in image:
image_url = image.get("url")
if not image_b64 and not image_url:
raise ValueError("Image url or base64 should be provided")
model = request.json.get("model", None)
predictions = api.inference.get_image_tagger_prediction(
model,
image_b64=image_b64,
image_url=image_url,
confidence_thresh=confidence_thresh,
)
results = {"predictions": predictions}
if request_id:
results["request_id"] = request_id
return api.base.get_json_response(results)
@inference_api.route("/v1/semantic_segmentor", methods=["POST"])
def get_semantic_segmentor_prediction():
request_id = request.json.get("request_id")
if request_id:
g.request_id = request_id
image = request.json.get("image", {})
image_b64, image_url = None, None
if "b64" in image:
image_b64 = image.get("b64")
if "url" in image:
image_url = image.get("url")
if not image_b64 and not image_url:
raise ValueError("Image url or base64 should be provided")
model = request.json.get("model", None)
predictions = api.inference.get_semantic_segmentor_prediction(
model,
image_b64=image_b64,
image_url=image_url,
)
results = {"predictions": predictions}
if request_id:
results["request_id"] = request_id
return api.base.get_json_response(results)
| 35.131868
| 69
| 0.680325
| 428
| 3,197
| 4.806075
| 0.121495
| 0.105007
| 0.095284
| 0.052504
| 0.807487
| 0.753525
| 0.753525
| 0.753525
| 0.753525
| 0.753525
| 0
| 0.022539
| 0.208946
| 3,197
| 90
| 70
| 35.522222
| 0.790826
| 0
| 0
| 0.670732
| 0
| 0
| 0.139193
| 0.006881
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036585
| false
| 0
| 0.02439
| 0
| 0.097561
| 0.02439
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4b5a05bbf0148b95b991eb4566207295c1654be0
| 144
|
py
|
Python
|
src/ar6/metrics/__init__.py
|
IPCC-WG1/Chapter-7
|
235679fbd25e489827de605e1417ac3f27e6abab
|
[
"MIT"
] | 11
|
2021-08-18T10:15:24.000Z
|
2021-08-23T19:15:34.000Z
|
src/ar6/metrics/__init__.py
|
IPCC-WG1/Chapter-7
|
235679fbd25e489827de605e1417ac3f27e6abab
|
[
"MIT"
] | null | null | null |
src/ar6/metrics/__init__.py
|
IPCC-WG1/Chapter-7
|
235679fbd25e489827de605e1417ac3f27e6abab
|
[
"MIT"
] | 4
|
2021-08-25T00:55:11.000Z
|
2022-01-08T12:21:29.000Z
|
# import modules into namespace
from .co2 import co2_analytical
from .ch4 import ch4_analytical
from .halogen_generic import halogen_analytical
| 28.8
| 47
| 0.854167
| 20
| 144
| 5.95
| 0.5
| 0.235294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031496
| 0.118056
| 144
| 4
| 48
| 36
| 0.905512
| 0.201389
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4bbe3fcd4e61b2ed6247d2bc1b140f2682368d75
| 3,092
|
py
|
Python
|
tests/auth_api/queries/test_queries_companyquery.py
|
Energinet-DataHub/po-auth
|
009071018a390aeee29f2ab0da472b1338ea9f89
|
[
"Apache-2.0"
] | 1
|
2022-02-21T11:19:41.000Z
|
2022-02-21T11:19:41.000Z
|
tests/auth_api/queries/test_queries_companyquery.py
|
Energinet-DataHub/po-auth
|
009071018a390aeee29f2ab0da472b1338ea9f89
|
[
"Apache-2.0"
] | 40
|
2022-01-25T11:28:36.000Z
|
2022-03-03T08:24:26.000Z
|
tests/auth_api/queries/test_queries_companyquery.py
|
Energinet-DataHub/po-auth
|
009071018a390aeee29f2ab0da472b1338ea9f89
|
[
"Apache-2.0"
] | 9
|
2021-11-29T14:25:01.000Z
|
2022-03-16T10:57:55.000Z
|
import pytest
from auth_api.db import db
from auth_api.models import DbCompany
from auth_api.queries import CompanyQuery
from tests.auth_api.queries.query_base import (
COMPANY_LIST,
TestQueryBase,
)
class TestCompanyQueries(TestQueryBase):
"""Test user queries."""
@pytest.mark.parametrize('company', COMPANY_LIST)
def test__has_id__id_exists__return_correct_company(
self,
seeded_session: db.Session,
company: dict,
):
"""
If company with id exists return correct company.
:param seeded_session: Mocked database session
:param company: Current company inserted into the test
"""
# -- Act -------------------------------------------------------------
fetched_company: DbCompany = CompanyQuery(seeded_session) \
.has_id(company['id']) \
.one_or_none()
# -- Assert ----------------------------------------------------------
assert fetched_company is not None
assert fetched_company.id == company['id']
def test__has_id__id_does_not_exists__return_none(
self,
seeded_session: db.Session,
):
"""
If company with id does not exist return none.
:param seeded_session: Mocked database session
"""
# -- Act -------------------------------------------------------------
fetched_company: DbCompany = CompanyQuery(seeded_session) \
.has_id("THIS_ID_DOES_NOT_EXIST") \
.one_or_none()
# -- Assert ----------------------------------------------------------
assert fetched_company is None
@pytest.mark.parametrize('company', COMPANY_LIST)
def test__has_tin__tin_exists__return_correct_company(
self,
seeded_session: db.Session,
company: dict,
):
"""
If company with tin exists return correct company.
:param seeded_session: Mocked database session
:param company: Current company inserted into the test
"""
# -- Act -------------------------------------------------------------
fetched_company: DbCompany = CompanyQuery(seeded_session) \
.has_tin(company['tin']) \
.one_or_none()
# -- Assert ----------------------------------------------------------
assert fetched_company is not None
assert fetched_company.tin == company['tin']
assert fetched_company.id == company['id']
def test__has_tin__tin_not_exists__return_none(
self,
seeded_session: db.Session,
):
"""
If company with tin that does not exists return none.
:param seeded_session: Mocked database session
"""
# -- Act -------------------------------------------------------------
fetched_company: DbCompany = CompanyQuery(seeded_session) \
.has_tin("THIS_TIN_DOES_NOT_EXISTS") \
.one_or_none()
# -- Assert ----------------------------------------------------------
assert fetched_company is None
| 30.019417
| 78
| 0.524256
| 291
| 3,092
| 5.254296
| 0.185567
| 0.102027
| 0.091563
| 0.068018
| 0.800523
| 0.773054
| 0.773054
| 0.773054
| 0.773054
| 0.608241
| 0
| 0
| 0.252911
| 3,092
| 102
| 79
| 30.313725
| 0.661905
| 0.347671
| 0
| 0.625
| 0
| 0
| 0.038627
| 0.024678
| 0
| 0
| 0
| 0
| 0.145833
| 1
| 0.083333
| false
| 0
| 0.104167
| 0
| 0.208333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
29a3f4c0a831cc3d5b9d0dd5b35b56e068b56291
| 11,529
|
py
|
Python
|
telnyx-python/tests/api_resources/test_call.py
|
team-telnyx/telnyx-2fa
|
6b8acb6703dc9458176c97d7fa6d1fd9c303bfbd
|
[
"MIT"
] | null | null | null |
telnyx-python/tests/api_resources/test_call.py
|
team-telnyx/telnyx-2fa
|
6b8acb6703dc9458176c97d7fa6d1fd9c303bfbd
|
[
"MIT"
] | 3
|
2020-03-24T18:09:34.000Z
|
2021-02-02T22:37:30.000Z
|
telnyx-python/tests/api_resources/test_call.py
|
mgwilliams/telnyx-2fa
|
49b794c05f42bc6d1c27f722e0d09da6654ad8d2
|
[
"MIT"
] | 1
|
2020-01-24T17:39:37.000Z
|
2020-01-24T17:39:37.000Z
|
from __future__ import absolute_import, division, print_function
import pytest
import telnyx
CALL_CONTROL_ID = "AgDIxmoRX6QMuaIj_uXRXnPAXP0QlNfXczRrZvZakpWxBlpw48KyZQ=="
def create_dial():
return telnyx.Call.create(
connection_id="1111111111222222223", to="+12223334444", from_="+12223330000"
)
class TestCall(object):
def test_is_creatable(self, request_mock):
resource = create_dial()
request_mock.assert_requested("post", "/v2/calls")
assert isinstance(resource, telnyx.Call)
def test_can_call_reject(self, request_mock):
resource = create_dial()
resource.call_control_id = CALL_CONTROL_ID
resource.reject()
request_mock.assert_requested(
"post", "/v2/calls/%s/actions/reject" % CALL_CONTROL_ID
)
assert isinstance(resource, telnyx.Call)
@pytest.mark.skip
def test_can_call_calls_reject(self, request_mock):
resource = create_dial()
resource.create_reject(CALL_CONTROL_ID)
request_mock.assert_requested(
"post", "/v2/calls/%s/actions/reject" % CALL_CONTROL_ID
)
assert isinstance(resource, telnyx.Call)
def test_can_call_answer(self, request_mock):
resource = create_dial()
resource.call_control_id = CALL_CONTROL_ID
resource.answer()
request_mock.assert_requested(
"post", "/v2/calls/%s/actions/answer" % CALL_CONTROL_ID
)
assert isinstance(resource, telnyx.Call)
@pytest.mark.skip
def test_can_call_calls_answer(self, request_mock):
resource = create_dial()
resource.create_answer(CALL_CONTROL_ID)
request_mock.assert_requested(
"post", "/v2/calls/%s/actions/answer" % CALL_CONTROL_ID
)
assert isinstance(resource, telnyx.Call)
def test_can_call_hangup(self, request_mock):
resource = create_dial()
resource.call_control_id = CALL_CONTROL_ID
resource.hangup()
request_mock.assert_requested(
"post", "/v2/calls/%s/actions/hangup" % CALL_CONTROL_ID
)
assert isinstance(resource, telnyx.Call)
@pytest.mark.skip
def test_can_call_calls_hangup(self, request_mock):
resource = create_dial()
resource.create_hangup(CALL_CONTROL_ID)
request_mock.assert_requested(
"post", "/v2/calls/%s/actions/hangup" % CALL_CONTROL_ID
)
assert isinstance(resource, telnyx.Call)
def test_can_call_bridge(self, request_mock):
resource = create_dial()
resource.call_control_id = CALL_CONTROL_ID
resource.bridge(call_control_id=CALL_CONTROL_ID)
request_mock.assert_requested(
"post", "/v2/calls/%s/actions/bridge" % CALL_CONTROL_ID
)
assert isinstance(resource, telnyx.Call)
@pytest.mark.skip
def test_can_call_calls_bridge(self, request_mock):
resource = create_dial()
resource.create_bridge(CALL_CONTROL_ID, call_control_id=CALL_CONTROL_ID)
request_mock.assert_requested(
"post", "/v2/calls/%s/actions/bridge" % CALL_CONTROL_ID
)
assert isinstance(resource, telnyx.Call)
def test_can_call_fork_start(self, request_mock):
resource = create_dial()
resource.call_control_id = CALL_CONTROL_ID
resource.fork_start()
request_mock.assert_requested(
"post", "/v2/calls/%s/actions/fork_start" % CALL_CONTROL_ID
)
assert isinstance(resource, telnyx.Call)
@pytest.mark.skip
def test_can_call_calls_fork_start(self, request_mock):
resource = create_dial()
resource.create_fork_start(CALL_CONTROL_ID)
request_mock.assert_requested(
"post", "/v2/calls/%s/actions/fork_start" % CALL_CONTROL_ID
)
assert isinstance(resource, telnyx.Call)
def test_can_call_fork_stop(self, request_mock):
resource = create_dial()
resource.call_control_id = CALL_CONTROL_ID
resource.fork_stop()
request_mock.assert_requested(
"post", "/v2/calls/%s/actions/fork_stop" % CALL_CONTROL_ID
)
assert isinstance(resource, telnyx.Call)
@pytest.mark.skip
def test_can_call_calls_fork_stop(self, request_mock):
resource = create_dial()
resource.create_fork_stop(CALL_CONTROL_ID)
request_mock.assert_requested(
"post", "/v2/calls/%s/actions/fork_stop" % CALL_CONTROL_ID
)
assert isinstance(resource, telnyx.Call)
def test_can_call_gather_using_audio(self, request_mock):
resource = create_dial()
resource.call_control_id = CALL_CONTROL_ID
resource.gather_using_audio(audio_url="http://telnyx-audio.url")
request_mock.assert_requested(
"post", "/v2/calls/%s/actions/gather_using_audio" % CALL_CONTROL_ID
)
assert isinstance(resource, telnyx.Call)
@pytest.mark.skip
def test_can_call_calls_gather_using_audio(self, request_mock):
resource = create_dial()
resource.create_gather_using_audio(
CALL_CONTROL_ID, audio_url="http://telnyx-audio.url"
)
request_mock.assert_requested(
"post", "/v2/calls/%s/actions/gather_using_audio" % CALL_CONTROL_ID
)
assert isinstance(resource, telnyx.Call)
def test_can_call_gather_using_speak(self, request_mock):
resource = create_dial()
resource.call_control_id = CALL_CONTROL_ID
resource.gather_using_speak(
language="en-US", voice="female", payload="Hello from the other side"
)
request_mock.assert_requested(
"post", "/v2/calls/%s/actions/gather_using_speak" % CALL_CONTROL_ID
)
assert isinstance(resource, telnyx.Call)
@pytest.mark.skip
def test_can_call_calls_gather_using_speak(self, request_mock):
resource = create_dial()
resource.create_gather_using_speak(
CALL_CONTROL_ID,
language="en-US",
voice="female",
payload="Hello from the other side",
)
request_mock.assert_requested(
"post", "/v2/calls/%s/actions/gather_using_speak" % CALL_CONTROL_ID
)
assert isinstance(resource, telnyx.Call)
def test_can_call_playback_start(self, request_mock):
resource = create_dial()
resource.call_control_id = CALL_CONTROL_ID
resource.playback_start(audio_url="http://telnyx-audio.url")
request_mock.assert_requested(
"post", "/v2/calls/%s/actions/playback_start" % CALL_CONTROL_ID
)
assert isinstance(resource, telnyx.Call)
@pytest.mark.skip
def test_can_call_calls_playback_start(self, request_mock):
resource = create_dial()
resource.create_playback_start(
CALL_CONTROL_ID, audio_url="http://telnyx-audio.url"
)
request_mock.assert_requested(
"post", "/v2/calls/%s/actions/playback_start" % CALL_CONTROL_ID
)
assert isinstance(resource, telnyx.Call)
def test_can_call_playback_stop(self, request_mock):
resource = create_dial()
resource.call_control_id = CALL_CONTROL_ID
resource.playback_stop()
request_mock.assert_requested(
"post", "/v2/calls/%s/actions/playback_stop" % CALL_CONTROL_ID
)
assert isinstance(resource, telnyx.Call)
@pytest.mark.skip
def test_can_call_calls_playback_stop(self, request_mock):
resource = create_dial()
resource.create_playback_stop(CALL_CONTROL_ID)
request_mock.assert_requested(
"post", "/v2/calls/%s/actions/playback_stop" % CALL_CONTROL_ID
)
assert isinstance(resource, telnyx.Call)
def test_can_call_record_start(self, request_mock):
resource = create_dial()
resource.call_control_id = CALL_CONTROL_ID
resource.record_start(channels="single", format="mp3")
request_mock.assert_requested(
"post", "/v2/calls/%s/actions/record_start" % CALL_CONTROL_ID
)
assert isinstance(resource, telnyx.Call)
@pytest.mark.skip
def test_can_call_calls_record_start(self, request_mock):
resource = create_dial()
resource.create_record_start(CALL_CONTROL_ID, channels="single", format="mp3")
request_mock.assert_requested(
"post", "/v2/calls/%s/actions/record_start" % CALL_CONTROL_ID
)
assert isinstance(resource, telnyx.Call)
def test_can_call_record_stop(self, request_mock):
resource = create_dial()
resource.call_control_id = CALL_CONTROL_ID
resource.record_stop()
request_mock.assert_requested(
"post", "/v2/calls/%s/actions/record_stop" % CALL_CONTROL_ID
)
assert isinstance(resource, telnyx.Call)
@pytest.mark.skip
def test_can_call_calls_record_stop(self, request_mock):
resource = create_dial()
resource.create_record_stop(CALL_CONTROL_ID)
request_mock.assert_requested(
"post", "/v2/calls/%s/actions/record_stop" % CALL_CONTROL_ID
)
assert isinstance(resource, telnyx.Call)
def test_can_call_send_dtmf(self, request_mock):
resource = create_dial()
resource.call_control_id = CALL_CONTROL_ID
resource.send_dtmf(digits="1www2WABCDw9")
request_mock.assert_requested(
"post", "/v2/calls/%s/actions/send_dtmf" % CALL_CONTROL_ID
)
assert isinstance(resource, telnyx.Call)
@pytest.mark.skip
def test_can_call_calls_send_dtmf(self, request_mock):
resource = create_dial()
resource.create_send_dtmf(CALL_CONTROL_ID, digits="1www2WABCDw9")
request_mock.assert_requested(
"post", "/v2/calls/%s/actions/send_dtmf" % CALL_CONTROL_ID
)
assert isinstance(resource, telnyx.Call)
def test_can_call_speak(self, request_mock):
resource = create_dial()
resource.call_control_id = CALL_CONTROL_ID
resource.speak(
language="en-US", voice="female", payload="Hello from the other side"
)
request_mock.assert_requested(
"post", "/v2/calls/%s/actions/speak" % CALL_CONTROL_ID
)
assert isinstance(resource, telnyx.Call)
@pytest.mark.skip
def test_can_call_calls_speak(self, request_mock):
resource = create_dial()
resource.create_speak(
CALL_CONTROL_ID,
language="en-US",
voice="female",
payload="Hello from the other side",
)
request_mock.assert_requested(
"post", "/v2/calls/%s/actions/speak" % CALL_CONTROL_ID
)
assert isinstance(resource, telnyx.Call)
def test_can_call_transfer(self, request_mock):
resource = create_dial()
resource.call_control_id = CALL_CONTROL_ID
resource.transfer(to="+11111222222")
request_mock.assert_requested(
"post", "/v2/calls/%s/actions/transfer" % CALL_CONTROL_ID
)
assert isinstance(resource, telnyx.Call)
@pytest.mark.skip
def test_can_call_calls_transfer(self, request_mock):
resource = create_dial()
resource.create_transfer(CALL_CONTROL_ID, to="+11111222222")
request_mock.assert_requested(
"post", "/v2/calls/%s/actions/transfer" % CALL_CONTROL_ID
)
assert isinstance(resource, telnyx.Call)
| 37.31068
| 86
| 0.664411
| 1,373
| 11,529
| 5.225055
| 0.057538
| 0.122665
| 0.144968
| 0.099387
| 0.949122
| 0.942292
| 0.936995
| 0.928352
| 0.885838
| 0.815863
| 0
| 0.012078
| 0.238789
| 11,529
| 308
| 87
| 37.431818
| 0.805378
| 0
| 0
| 0.605166
| 0
| 0
| 0.127158
| 0.085697
| 0
| 0
| 0
| 0
| 0.228782
| 1
| 0.118081
| false
| 0
| 0.01107
| 0.00369
| 0.136531
| 0.00369
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
29c2e16add404371a3e5a4a7860aa0aacd065221
| 542
|
py
|
Python
|
gym_autoencoder/heist/envs/__init__.py
|
neuroevolution-ai/ProcgenAutoencoder
|
2dd0afd491701eff49be00774a7e63b56ff33fb9
|
[
"MIT"
] | 1
|
2021-08-02T12:42:05.000Z
|
2021-08-02T12:42:05.000Z
|
gym_autoencoder/heist/envs/__init__.py
|
neuroevolution-ai/ProcgenAutoencoder
|
2dd0afd491701eff49be00774a7e63b56ff33fb9
|
[
"MIT"
] | 1
|
2021-03-03T10:04:54.000Z
|
2021-03-03T10:04:54.000Z
|
gym_autoencoder/heist/envs/__init__.py
|
neuroevolution-ai/ProcgenAutoencoder
|
2dd0afd491701eff49be00774a7e63b56ff33fb9
|
[
"MIT"
] | null | null | null |
from gym_autoencoder.heist.envs.auto_basic_env import AutoencoderBasicEnv
from gym_autoencoder.heist.envs.auto_no_bottlneck_env import AutoencoderNoBottleneckEnv
from gym_autoencoder.heist.envs.auto_maxpool_big_env import AutoencoderMaxPoolBigEnv
from gym_autoencoder.heist.envs.auto_maxpool_env import AutoencoderMaxPoolEnv
from gym_autoencoder.heist.envs.auto_unpool_env import AutoencoderUnpoolEnv
from gym_autoencoder.heist.envs.vae_paper_env import VaritionalPaperEnv
from gym_autoencoder.heist.envs.vae_alex_env import VaritionalAlexEnv
| 77.428571
| 87
| 0.911439
| 72
| 542
| 6.541667
| 0.333333
| 0.104034
| 0.267516
| 0.341826
| 0.4862
| 0.4862
| 0.161359
| 0
| 0
| 0
| 0
| 0
| 0.049816
| 542
| 7
| 88
| 77.428571
| 0.914563
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4b01c3b110543f733d9b658b6cdcf68d3dae8616
| 148
|
py
|
Python
|
pipocoin/services/pipocoin_messages/__init__.py
|
bondiolipietro/pipocoin-python
|
e4abc019c2eb704d70899a2e441ee4be23aaeb4c
|
[
"MIT"
] | 1
|
2021-08-05T23:18:35.000Z
|
2021-08-05T23:18:35.000Z
|
pipocoin/services/pipocoin_messages/__init__.py
|
bondiolipietro/pipocoin-twitter-bot-python
|
e4abc019c2eb704d70899a2e441ee4be23aaeb4c
|
[
"MIT"
] | null | null | null |
pipocoin/services/pipocoin_messages/__init__.py
|
bondiolipietro/pipocoin-twitter-bot-python
|
e4abc019c2eb704d70899a2e441ee4be23aaeb4c
|
[
"MIT"
] | null | null | null |
from . import default
from . import create
from . import delete
from . import transfer
from . import work
from . import stats
from . import balance
| 18.5
| 22
| 0.763514
| 21
| 148
| 5.380952
| 0.428571
| 0.619469
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.189189
| 148
| 7
| 23
| 21.142857
| 0.941667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4b1fd10c8c879451e0d57f9624af11c4e3bc77ad
| 116
|
py
|
Python
|
db/test.py
|
Jakubsamurai/pywdbms
|
690d18bbc084962f55b573709c35b45cb78631e8
|
[
"Apache-2.0"
] | null | null | null |
db/test.py
|
Jakubsamurai/pywdbms
|
690d18bbc084962f55b573709c35b45cb78631e8
|
[
"Apache-2.0"
] | null | null | null |
db/test.py
|
Jakubsamurai/pywdbms
|
690d18bbc084962f55b573709c35b45cb78631e8
|
[
"Apache-2.0"
] | null | null | null |
from containers import Databases
from file import load_databases_from_file as load
load()
print(Databases.databases)
| 29
| 49
| 0.862069
| 17
| 116
| 5.705882
| 0.470588
| 0.268041
| 0.350515
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094828
| 116
| 4
| 50
| 29
| 0.92381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.25
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
d9b3d5cbaba640474e618632c3f9df7f2608033a
| 7,347
|
py
|
Python
|
WebServer/microservices/dispatcher/unittest/auth_token_test.py
|
AnneEjsing/TrafficDataAnonymisation
|
6ee5b4a46d53a656299d6a53896175b78008228a
|
[
"MIT"
] | 1
|
2020-03-12T13:27:58.000Z
|
2020-03-12T13:27:58.000Z
|
WebServer/microservices/dispatcher/unittest/auth_token_test.py
|
AnneEjsing/TrafficDataAnonymisation
|
6ee5b4a46d53a656299d6a53896175b78008228a
|
[
"MIT"
] | 7
|
2020-04-02T12:47:45.000Z
|
2022-03-02T07:35:49.000Z
|
WebServer/microservices/dispatcher/unittest/auth_token_test.py
|
AnneEjsing/Traffic-Data-Anonymisation-Web
|
6ee5b4a46d53a656299d6a53896175b78008228a
|
[
"MIT"
] | null | null | null |
import unittest2
import sys
import os
sys.path.append(os.getcwd() + '/..')
import auth_token
import datetime
import json
class AuthTokenTests(unittest2.TestCase):
@classmethod
def setUpClass(cls):
auth_token.secretKey = "test"
def test_is_not_expired_pass(self):
token = "eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiamlkIjoiMTIzIiwicmlnaHRzIjoiYWRtaW4iLCJleHAiOiIyMTIwLTA1LTA0VDIzOjU0OjIzLjIzMjMifQ.RmEnR7ygkmXGiT6k532Zj3kEHdYfiqPzd7zlRVc3XVqM6XpdT44QwOXqvmoGYmSQ6J81VzpR4mzPBqhGud6bZg"
res = auth_token.is_not_expired(token)
self.assertTrue(res)
#Change token
def test_is_not_expired_fail_expired_token(self):
token = "eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiamlkIjoiMTIzIiwicmlnaHRzIjoiYWRtaW4iLCJleHAiOiIyMDE5LTA1LTA0VDIzOjU0OjIzLjIzMjMifQ.ki7a9Fg3e6IcfOFFYqDOEj-tTdqhNmmzX769dqpwaXbcJEmgnEKPbLqR80_aEO_FNMINWZLV7vtPn94HByAdKw"
res = auth_token.is_not_expired(token)
self.assertFalse(res)
#Change token
def test_is_not_expired_fail_invalid_token(self):
token = "eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiamlkIjoiMTIzIiwicmlnaHRzIjoiYWRtaW4iLCJleHAiOiIyMTIwLTA1LTA0VDIzOjU0OjIzLjIzMjMifQRmEnR7ygkmXGiT6k532Zj3kEHdYfiqPzd7zlRVc3XVqM6XpdT44QwOXqvmoGYmSQ6J81VzpR4mzPBqhGud6bZg"
res = auth_token.is_not_expired(token)
self.assertFalse(res)
#Authenticate token
def test_authenticate_pass(self):
token = "eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiamlkIjoiMTIzIiwicmlnaHRzIjoiYWRtaW4iLCJleHAiOiIyMTIwLTA1LTA0VDIzOjU0OjIzLjIzMjMifQ.deQB3qsSJYzYAeyWlfoX9MIG1sMx1vEo9SHVQuj7_P7Sn655I-93Ng4A0WsdfGrMYY0LV3dQaJjxrXnaojVMPA"
res = auth_token.authenticate(token)
self.assertTrue(res)
def test_authenticate_invalid_token(self):
token = "eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiamlkIjoiMTIzIiwicmlnaHRzIjoiYWRtaW4iLCJleHAiOiIyMTIwLTA1LTA0VDIzOjU0OjIzLjIzMjMifQRmEnR7ygkmXGiT6k532Zj3kEHdYfiqPzd7zlRVc3XVqM6XpdT44QwOXqvmoGYmSQ6J81VzpR4mzPBqhGud6bZg"
res = auth_token.authenticate(token)
self.assertFalse(res)
def test_authenticate_wrong_secret(self):
token = "eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiamlkIjoiMTIzIiwicmlnaHRzIjoiYWRtaW4iLCJleHAiOiIyMTIwLTA1LTA0VDIzOjU0OjIzLjIzMjMifQ.RmEnR7ygkmXGiT6k532Zj3kEHdYfiqPzd7zlRVc3XVqM6XpdT44QwOXqvmoGYmSQ6J81VzpR4mzPBqhGud6bZg"
res = auth_token.authenticate(token)
self.assertFalse(res)
## Verify token
def test_verify_token_admin_pass(self):
token = "eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiamlkIjoiMTIzIiwicmlnaHRzIjoiYWRtaW4iLCJleHAiOiIyMTIwLTA1LTA0VDIzOjU0OjIzLjIzMjMifQ.deQB3qsSJYzYAeyWlfoX9MIG1sMx1vEo9SHVQuj7_P7Sn655I-93Ng4A0WsdfGrMYY0LV3dQaJjxrXnaojVMPA"
rights = 'admin'
expected = (True,200)
res = auth_token.verify_token(token,rights)
self.assertEqual(expected, res)
def test_verify_token_user_pass(self):
token = "eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiamlkIjoiMTIzIiwicmlnaHRzIjoidXNlciIsImV4cCI6IjIxMjAtMDUtMDRUMjM6NTQ6MjMuMjMyMyJ9.zayLZxR_D199MU8VpvhHiLO85fKm6td3ugdbi5Y7lGTLU9KJHIthSOpo-ydaZinwbGLKznCi-BDzYIESdr-aoA"
rights = 'user'
expected = (True,200)
res = auth_token.verify_token(token,rights)
self.assertEqual(expected, res)
def test_verify_token_fail_wrong_token(self):
token = "eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiamlkIjoiMTIzIiwicmlnaHRzIjoiY.WRtaW4iLCJleHAiOiIyMTIwLTA1LTA0VDIzOjU0OjIzLjIzMjMifQRmEnR7ygkmXGiT6k532Zj3kEHdYfiqPzd7zlRVc3XVqM6XpdT44QwOXqvmoGYmSQ6J81VzpR4mzPBqhGud6bZg"
rights = ""
expected = (False, 401)
res = auth_token.verify_token(token, rights)
self.assertEqual(expected, res)
def test_verify_token_fail_user_is_not_admin(self):
token = "eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiamlkIjoiMTIzIiwicmlnaHRzIjoidXNlciIsImV4cCI6IjIxMjAtMDUtMDRUMjM6NTQ6MjMuMjMyMyJ9.zayLZxR_D199MU8VpvhHiLO85fKm6td3ugdbi5Y7lGTLU9KJHIthSOpo-ydaZinwbGLKznCi-BDzYIESdr-aoA"
rights = "admin"
expected = (False, 403)
res = auth_token.verify_token(token, rights)
self.assertEqual(expected, res)
def test_verify_token_fail_admin_is_not_user(self):
token = "eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiamlkIjoiMTIzIiwicmlnaHRzIjoiYWRtaW4iLCJleHAiOiIyMTIwLTA1LTA0VDIzOjU0OjIzLjIzMjMifQ.deQB3qsSJYzYAeyWlfoX9MIG1sMx1vEo9SHVQuj7_P7Sn655I-93Ng4A0WsdfGrMYY0LV3dQaJjxrXnaojVMPA"
rights = "user"
expected = (False, 403)
res = auth_token.verify_token(token, rights)
self.assertEqual(expected, res)
def test_get_user_id_pass(self):
token = "eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiamlkIjoiMTIzIiwicmlnaHRzIjoidXNlciIsImV4cCI6IjIxMjAtMDUtMDRUMjM6NTQ6MjMuMjMyMyJ9.6VTtr_0f4LAwmiGoHLl43PiXmky82GWT3KSEO3EuQ5jI3Lo1z5GmcgJW2wCiSuFhwz_R8bAGzwXmQl_reNRHNg"
expected = "1234567890"
res = auth_token.get_user_id(token)
self.assertEqual(res,expected)
def test_get_user_id_wrong_token(self):
token = "eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiamlkIjoiMTIzIiwicmlnaHRzIjoidXNlciIsImV4cCI6IjIxMjAtMDUtMDRUMjM6NTQ6MjMuMjMyMyJ96VTtr_0f4LAwmiGoHLl43PiXmky82GWT3KSEO3EuQ5jI3Lo1z5GmcgJW2wCiSuFhwz_R8bAGzwXmQl_reNRHNg"
expected = None
res = auth_token.get_user_id(token)
self.assertEqual(res,expected)
def test_get_rights_pass(self):
token = "eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiamlkIjoiMTIzIiwicmlnaHRzIjoidXNlciIsImV4cCI6IjIxMjAtMDUtMDRUMjM6NTQ6MjMuMjMyMyJ9.6VTtr_0f4LAwmiGoHLl43PiXmky82GWT3KSEO3EuQ5jI3Lo1z5GmcgJW2wCiSuFhwz_R8bAGzwXmQl_reNRHNg"
expected = "user"
res = auth_token.get_rights(token)
self.assertEqual(res,expected)
def test_get_rights_wrong_token(self):
token = "eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiamlkIjoiMTIzIiwicmlnaHRzIjoidXNlciIsImV4cCI6IjIxMjAtMDUtMDRUMjM6NTQ6MjMuMjMyMyJ96VTtr_0f4LAwmiGoHLl43PiXmky82GWT3KSEO3EuQ5jI3Lo1z5GmcgJW2wCiSuFhwz_R8bAGzwXmQl_reNRHNg"
expected = None
res = auth_token.get_rights(token)
self.assertEqual(res,expected)
def test_is_authorized_wrong_token(self):
token = "eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiamlkIjoiMTIzIiwicmlnaHRzIjoidXNlciIsImV4cCI6IjIxMjAtMDUtMDRUMjM6NTQ6MjMuMjMyMyJ96VTtr_0f4LAwmiGoHLl43PiXmky82GWT3KSEO3EuQ5jI3Lo1z5GmcgJW2wCiSuFhwz_R8bAGzwXmQl_reNRHNg"
expected = False
res = auth_token.is_authorized(token,'admin')
self.assertEqual(res,expected)
def test_create_payload(self):
now = datetime.datetime.utcnow()
res = auth_token.create_payload(1,'admin')
res = json.loads(res)
dt = datetime.datetime.strptime(res['exp'], '%Y-%m-%dT%H:%M:%S.%f')
self.assertTrue(dt > now)
def test_encode_not_bytes(self):
string = "hej"
expected = 'aGVq'
res = auth_token.encode(string)
self.assertEqual(res,expected)
if __name__ == "__main__":
unittest2.main()
| 57.850394
| 252
| 0.80999
| 505
| 7,347
| 11.49703
| 0.192079
| 0.031002
| 0.037203
| 0.060282
| 0.798829
| 0.782122
| 0.770754
| 0.770754
| 0.540992
| 0.436962
| 0
| 0.065723
| 0.134341
| 7,347
| 127
| 253
| 57.850394
| 0.84717
| 0.007486
| 0
| 0.504854
| 0
| 0
| 0.523052
| 0.511114
| 0
| 0
| 0
| 0
| 0.174757
| 1
| 0.184466
| false
| 0.058252
| 0.058252
| 0
| 0.252427
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
d9c4f054bae09ac38f7758c4a499d8e3b64bc7c4
| 47
|
py
|
Python
|
markdown_it/extensions/anchors/__init__.py
|
wna-se/markdown-it-py
|
a7e3d3b436a00f3303aab03f81ba32de53a3ba71
|
[
"MIT"
] | 32
|
2021-05-20T04:11:11.000Z
|
2022-03-15T09:33:42.000Z
|
markdown_it/extensions/anchors/__init__.py
|
wna-se/markdown-it-py
|
a7e3d3b436a00f3303aab03f81ba32de53a3ba71
|
[
"MIT"
] | 41
|
2020-12-14T18:58:51.000Z
|
2022-03-02T14:19:43.000Z
|
markdown_it/extensions/anchors/__init__.py
|
wna-se/markdown-it-py
|
a7e3d3b436a00f3303aab03f81ba32de53a3ba71
|
[
"MIT"
] | 12
|
2020-12-14T21:49:37.000Z
|
2022-02-08T13:21:29.000Z
|
from .index import anchors_plugin # noqa F401
| 23.5
| 46
| 0.787234
| 7
| 47
| 5.142857
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 0.170213
| 47
| 1
| 47
| 47
| 0.846154
| 0.191489
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d9ca51718d1073e71a2dbca32d7c5af867e1ca88
| 526
|
py
|
Python
|
cbvblog/cbvblog/settings/partials/auth.py
|
LeoHeo/django-rmrf-init
|
cfef624e33a856c1d68f250750521298ea6e5175
|
[
"MIT"
] | null | null | null |
cbvblog/cbvblog/settings/partials/auth.py
|
LeoHeo/django-rmrf-init
|
cfef624e33a856c1d68f250750521298ea6e5175
|
[
"MIT"
] | null | null | null |
cbvblog/cbvblog/settings/partials/auth.py
|
LeoHeo/django-rmrf-init
|
cfef624e33a856c1d68f250750521298ea6e5175
|
[
"MIT"
] | null | null | null |
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
| 29.222222
| 91
| 0.693916
| 45
| 526
| 7.977778
| 0.444444
| 0.200557
| 0.189415
| 0.233983
| 0.43454
| 0.43454
| 0
| 0
| 0
| 0
| 0
| 0.004587
| 0.171103
| 526
| 17
| 92
| 30.941176
| 0.818807
| 0.180608
| 0
| 0
| 0
| 0
| 0.648712
| 0.611241
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.357143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
d9e80f3e87d620312ba1bdf1abecf779ad9c086f
| 23,943
|
py
|
Python
|
tests/third_party/cupy/sorting_tests/test_search.py
|
Rubtsowa/dpnp
|
ef404c0f284b0c508ed1e556e140f02f76ae5551
|
[
"BSD-2-Clause"
] | 37
|
2020-09-08T00:38:52.000Z
|
2022-03-18T01:44:10.000Z
|
tests/third_party/cupy/sorting_tests/test_search.py
|
Rubtsowa/dpnp
|
ef404c0f284b0c508ed1e556e140f02f76ae5551
|
[
"BSD-2-Clause"
] | 432
|
2020-09-07T09:48:41.000Z
|
2022-03-25T17:50:55.000Z
|
tests/third_party/cupy/sorting_tests/test_search.py
|
Rubtsowa/dpnp
|
ef404c0f284b0c508ed1e556e140f02f76ae5551
|
[
"BSD-2-Clause"
] | 17
|
2020-09-07T10:00:34.000Z
|
2022-03-25T13:53:43.000Z
|
import unittest
import numpy
import pytest
import dpnp as cupy
from tests.third_party.cupy import testing
# from cupy.core import _accelerator
@testing.gpu
class TestSearch(unittest.TestCase):
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmax_all(self, xp, dtype):
a = testing.shaped_random((2, 3), xp, dtype)
return a.argmax()
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_external_argmax_all(self, xp, dtype):
a = testing.shaped_random((2, 3), xp, dtype)
return xp.argmax(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_argmax_nan(self, xp, dtype):
a = xp.array([float('nan'), -1, 1], dtype)
return a.argmax()
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmax_axis_large(self, xp, dtype):
a = testing.shaped_random((3, 1000), xp, dtype)
return a.argmax(axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_external_argmax_axis_large(self, xp, dtype):
a = testing.shaped_random((3, 1000), xp, dtype)
return xp.argmax(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmax_axis0(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return a.argmax(axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmax_axis1(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return a.argmax(axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmax_axis2(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return a.argmax(axis=2)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmax_tie(self, xp, dtype):
a = xp.array([0, 5, 2, 3, 4, 5], dtype)
return a.argmax()
@testing.for_all_dtypes(no_complex=True)
def test_argmax_zero_size(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
a.argmax()
@testing.for_all_dtypes(no_complex=True)
def test_argmax_zero_size_axis0(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
a.argmax(axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmax_zero_size_axis1(self, xp, dtype):
a = testing.shaped_random((0, 1), xp, dtype)
return a.argmax(axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmin_all(self, xp, dtype):
a = testing.shaped_random((2, 3), xp, dtype)
return a.argmin()
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_argmin_nan(self, xp, dtype):
a = xp.array([float('nan'), -1, 1], dtype)
return a.argmin()
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_external_argmin_all(self, xp, dtype):
a = testing.shaped_random((2, 3), xp, dtype)
return xp.argmin(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmin_axis_large(self, xp, dtype):
a = testing.shaped_random((3, 1000), xp, dtype)
return a.argmin(axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_external_argmin_axis_large(self, xp, dtype):
a = testing.shaped_random((3, 1000), xp, dtype)
return xp.argmin(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmin_axis0(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return a.argmin(axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmin_axis1(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return a.argmin(axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmin_axis2(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return a.argmin(axis=2)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmin_tie(self, xp, dtype):
a = xp.array([0, 1, 2, 3, 0, 5], dtype)
return a.argmin()
@testing.for_all_dtypes(no_complex=True)
def test_argmin_zero_size(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
return a.argmin()
@testing.for_all_dtypes(no_complex=True)
def test_argmin_zero_size_axis0(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
a.argmin(axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmin_zero_size_axis1(self, xp, dtype):
a = testing.shaped_random((0, 1), xp, dtype)
return a.argmin(axis=1)
# This class compares CUB results against NumPy's
# TODO(leofang): test axis after support is added
# @testing.parameterize(*testing.product({
# 'shape': [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)],
# 'order': ('C', 'F'),
# }))
# @testing.gpu
# @unittest.skipUnless(cupy.cuda.cub.available, 'The CUB routine is not enabled')
# class TestCubReduction(unittest.TestCase):
# def setUp(self):
# self.old_accelerators = _accelerator.get_routine_accelerators()
# _accelerator.set_routine_accelerators(['cub'])
# def tearDown(self):
# _accelerator.set_routine_accelerators(self.old_accelerators)
# @testing.for_dtypes('bhilBHILefdFD')
# @testing.numpy_cupy_allclose(rtol=1E-5)
# def test_cub_argmin(self, xp, dtype):
# a = testing.shaped_random(self.shape, xp, dtype)
# if self.order == 'C':
# a = xp.ascontiguousarray(a)
# else:
# a = xp.asfortranarray(a)
# if xp is numpy:
# return a.argmin()
# # xp is cupy, first ensure we really use CUB
# ret = cupy.empty(()) # Cython checks return type, need to fool it
# func = 'cupy.core._routines_statistics.cub.device_reduce'
# with testing.AssertFunctionIsCalled(func, return_value=ret):
# a.argmin()
# # ...then perform the actual computation
# return a.argmin()
# @testing.for_dtypes('bhilBHILefdFD')
# @testing.numpy_cupy_allclose(rtol=1E-5)
# def test_cub_argmax(self, xp, dtype):
# a = testing.shaped_random(self.shape, xp, dtype)
# if self.order == 'C':
# a = xp.ascontiguousarray(a)
# else:
# a = xp.asfortranarray(a)
# if xp is numpy:
# return a.argmax()
# # xp is cupy, first ensure we really use CUB
# ret = cupy.empty(()) # Cython checks return type, need to fool it
# func = 'cupy.core._routines_statistics.cub.device_reduce'
# with testing.AssertFunctionIsCalled(func, return_value=ret):
# a.argmax()
# # ...then perform the actual computation
# return a.argmax()
@testing.gpu
@testing.parameterize(*testing.product({
'func': ['argmin', 'argmax'],
'is_module': [True, False],
'shape': [(3, 4), ()],
}))
class TestArgMinMaxDtype(unittest.TestCase):
@testing.for_dtypes(
dtypes=[numpy.int8, numpy.int16, numpy.int32, numpy.int64],
name='result_dtype')
@testing.for_all_dtypes(name='in_dtype')
def test_argminmax_dtype(self, in_dtype, result_dtype):
a = testing.shaped_random(self.shape, cupy, in_dtype)
if self.is_module:
func = getattr(cupy, self.func)
y = func(a, dtype=result_dtype)
else:
func = getattr(a, self.func)
y = func(dtype=result_dtype)
assert y.shape == ()
assert y.dtype == result_dtype
@testing.parameterize(
{'cond_shape': (2, 3, 4), 'x_shape': (2, 3, 4), 'y_shape': (2, 3, 4)},
{'cond_shape': (4,), 'x_shape': (2, 3, 4), 'y_shape': (2, 3, 4)},
{'cond_shape': (2, 3, 4), 'x_shape': (2, 3, 4), 'y_shape': (3, 4)},
{'cond_shape': (3, 4), 'x_shape': (2, 3, 4), 'y_shape': (4,)},
)
@testing.gpu
class TestWhereTwoArrays(unittest.TestCase):
@testing.for_all_dtypes_combination(
names=['cond_type', 'x_type', 'y_type'])
@testing.numpy_cupy_allclose()
def test_where_two_arrays(self, xp, cond_type, x_type, y_type):
m = testing.shaped_random(self.cond_shape, xp, xp.bool_)
# Almost all values of a matrix `shaped_random` makes are not zero.
# To make a sparse matrix, we need multiply `m`.
cond = testing.shaped_random(self.cond_shape, xp, cond_type) * m
x = testing.shaped_random(self.x_shape, xp, x_type, seed=0)
y = testing.shaped_random(self.y_shape, xp, y_type, seed=1)
return xp.where(cond, x, y)
@testing.parameterize(
{'cond_shape': (2, 3, 4)},
{'cond_shape': (4,)},
{'cond_shape': (2, 3, 4)},
{'cond_shape': (3, 4)},
)
@testing.gpu
class TestWhereCond(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_where_cond(self, xp, dtype):
m = testing.shaped_random(self.cond_shape, xp, xp.bool_)
cond = testing.shaped_random(self.cond_shape, xp, dtype) * m
return xp.where(cond)
@testing.gpu
class TestWhereError(unittest.TestCase):
def test_one_argument(self):
for xp in (numpy, cupy):
cond = testing.shaped_random((3, 4), xp, dtype=xp.bool_)
x = testing.shaped_random((2, 3, 4), xp, xp.int32)
with pytest.raises(ValueError):
xp.where(cond, x)
@testing.parameterize(
{'array': numpy.empty((0,))},
{'array': numpy.empty((0, 2))},
{'array': numpy.empty((0, 2, 0))},
)
@testing.gpu
class TestNonzero(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_nonzero(self, xp, dtype):
array = xp.array(self.array, dtype=dtype)
return xp.nonzero(array)
@testing.parameterize(
{'array': numpy.array(0)},
{'array': numpy.array(1)},
)
@testing.gpu
@testing.with_requires('numpy>=1.17.0')
class TestNonzeroZeroDimension(unittest.TestCase):
@testing.for_all_dtypes()
def test_nonzero(self, dtype):
for xp in (numpy, cupy):
array = xp.array(self.array, dtype=dtype)
with pytest.raises(DeprecationWarning):
xp.nonzero(array)
@testing.parameterize(
{'array': numpy.array(0)},
{'array': numpy.array(1)},
{'array': numpy.empty((0,))},
{'array': numpy.empty((0, 2))},
{'array': numpy.empty((0, 2, 0))},
)
@testing.gpu
class TestFlatNonzero(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_flatnonzero(self, xp, dtype):
array = xp.array(self.array, dtype=dtype)
return xp.flatnonzero(array)
@testing.parameterize(
{'array': numpy.empty((0,))},
{'array': numpy.empty((0, 2))},
{'array': numpy.empty((0, 2, 0))},
)
@testing.gpu
class TestArgwhere(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_argwhere(self, xp, dtype):
array = xp.array(self.array, dtype=dtype)
return xp.argwhere(array)
# DPNP_BUG
# dpnp/backend.pyx:86: in dpnp.backend.dpnp_array
# raise TypeError(f"Intel NumPy array(): Unsupported non-sequence obj={type(obj)}")
# E TypeError: Intel NumPy array(): Unsupported non-sequence obj=<class 'int'>
# @testing.parameterize(
# {'array': cupy.array(1)},
# )
# @testing.gpu
# class TestArgwhereZeroDimension(unittest.TestCase):
# def test_argwhere(self):
# with testing.assert_warns(DeprecationWarning):
# return cupy.nonzero(self.array)
@testing.gpu
class TestNanArgMin(unittest.TestCase):
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmin_all(self, xp, dtype):
a = testing.shaped_random((2, 3), xp, dtype)
return xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmin_nan(self, xp, dtype):
a = xp.array([float('nan'), -1, 1], dtype)
return xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmin_nan2(self, xp, dtype):
a = xp.array([float('nan'), float('nan'), -1, 1], dtype)
return xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmin_nan3(self, xp, dtype):
a = xp.array([float('nan'), float('nan'), -1, 1, 1.0, -2.0], dtype)
return xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmin_nan4(self, xp, dtype):
a = xp.array([-1, 1, 1.0, -2.0, float('nan'), float('nan')],
dtype)
return xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmin_nan5(self, xp, dtype):
a = xp.array([-1, 1, 1.0, -2.0, float('nan'), float('nan'), -1, 1],
dtype)
return xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmin_axis_large(self, xp, dtype):
a = testing.shaped_random((3, 1000), xp, dtype)
return xp.nanargmin(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmin_axis0(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return xp.nanargmin(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmin_axis1(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return xp.nanargmin(a, axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmin_axis2(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return xp.nanargmin(a, axis=2)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmin_tie(self, xp, dtype):
a = xp.array([0, 5, 2, 3, 4, 5], dtype)
return xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
def test_nanargmin_zero_size(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
def test_nanargmin_zero_size_axis0(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
return xp.nanargmin(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmin_zero_size_axis1(self, xp, dtype):
a = testing.shaped_random((0, 1), xp, dtype)
return xp.nanargmin(a, axis=1)
@testing.gpu
class TestNanArgMax(unittest.TestCase):
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmax_all(self, xp, dtype):
a = testing.shaped_random((2, 3), xp, dtype)
return xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmax_nan(self, xp, dtype):
a = xp.array([float('nan'), -1, 1], dtype)
return xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmax_nan2(self, xp, dtype):
a = xp.array([float('nan'), float('nan'), -1, 1], dtype)
return xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmax_nan3(self, xp, dtype):
a = xp.array([float('nan'), float('nan'), -1, 1, 1.0, -2.0], dtype)
return xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmax_nan4(self, xp, dtype):
a = xp.array([-1, 1, 1.0, -2.0, float('nan'), float('nan')],
dtype)
return xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmax_nan5(self, xp, dtype):
a = xp.array([-1, 1, 1.0, -2.0, float('nan'), float('nan'), -1, 1],
dtype)
return xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmax_axis_large(self, xp, dtype):
a = testing.shaped_random((3, 1000), xp, dtype)
return xp.nanargmax(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmax_axis0(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return xp.nanargmax(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmax_axis1(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return xp.nanargmax(a, axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmax_axis2(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return xp.nanargmax(a, axis=2)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmax_tie(self, xp, dtype):
a = xp.array([0, 5, 2, 3, 4, 5], dtype)
return xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
def test_nanargmax_zero_size(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
def test_nanargmax_zero_size_axis0(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
return xp.nanargmax(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmax_zero_size_axis1(self, xp, dtype):
a = testing.shaped_random((0, 1), xp, dtype)
return xp.nanargmax(a, axis=1)
@testing.gpu
@testing.parameterize(*testing.product(
{'bins': [
[],
[0, 1, 2, 4, 10],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
[0.0, 1.0, 2.5, 4.0, 10.0],
[-1.0, 1.0, 2.5, 4.0, 20.0],
[1.5, 2.5, 4.0, 6.0],
[float('-inf'), 1.5, 2.5, 4.0, 6.0],
[1.5, 2.5, 4.0, 6.0, float('inf')],
[float('-inf'), 1.5, 2.5, 4.0, 6.0, float('inf')],
[0.0, 1.0, 1.0, 4.0, 4.0, 10.0],
[0.0, 1.0, 1.0, 4.0, 4.0, 4.0, 4.0, 10.0],
],
'side': ['left', 'right'],
'shape': [(), (10,), (6, 3, 3)]})
)
class TestSearchSorted(unittest.TestCase):
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_array_equal()
def test_searchsorted(self, xp, dtype):
x = testing.shaped_arange(self.shape, xp, dtype)
bins = xp.array(self.bins)
y = xp.searchsorted(bins, x, side=self.side)
return y,
@testing.gpu
@testing.parameterize(
{'side': 'left'},
{'side': 'right'})
class TestSearchSortedNanInf(unittest.TestCase):
@testing.numpy_cupy_array_equal()
def test_searchsorted_nanbins(self, xp):
x = testing.shaped_arange((10,), xp, xp.float64)
bins = xp.array([0, 1, 2, 4, 10, float('nan')])
y = xp.searchsorted(bins, x, side=self.side)
return y,
@testing.numpy_cupy_array_equal()
def test_searchsorted_nan(self, xp):
x = testing.shaped_arange((10,), xp, xp.float64)
x[5] = float('nan')
bins = xp.array([0, 1, 2, 4, 10])
y = xp.searchsorted(bins, x, side=self.side)
return y,
# DPNP_BUG
# Segmentation fault on access to negative index # x[-1] = float('nan') #######
# @testing.numpy_cupy_array_equal()
# def test_searchsorted_nan_last(self, xp):
# x = testing.shaped_arange((10,), xp, xp.float64)
# x[-1] = float('nan')
# bins = xp.array([0, 1, 2, 4, float('nan')])
# y = xp.searchsorted(bins, x, side=self.side)
# return y,
# @testing.numpy_cupy_array_equal()
# def test_searchsorted_nan_last_repeat(self, xp):
# x = testing.shaped_arange((10,), xp, xp.float64)
# x[-1] = float('nan')
# bins = xp.array([0, 1, 2, float('nan'), float('nan')])
# y = xp.searchsorted(bins, x, side=self.side)
# return y,
# @testing.numpy_cupy_array_equal()
# def test_searchsorted_all_nans(self, xp):
# x = testing.shaped_arange((10,), xp, xp.float64)
# x[-1] = float('nan')
# bins = xp.array([float('nan'), float('nan'), float('nan'),
# float('nan'), float('nan')])
# y = xp.searchsorted(bins, x, side=self.side)
# return y,
###############################################################################
@testing.numpy_cupy_array_equal()
def test_searchsorted_inf(self, xp):
x = testing.shaped_arange((10,), xp, xp.float64)
x[5] = float('inf')
bins = xp.array([0, 1, 2, 4, 10])
y = xp.searchsorted(bins, x, side=self.side)
return y,
@testing.numpy_cupy_array_equal()
def test_searchsorted_minf(self, xp):
x = testing.shaped_arange((10,), xp, xp.float64)
x[5] = float('-inf')
bins = xp.array([0, 1, 2, 4, 10])
y = xp.searchsorted(bins, x, side=self.side)
return y,
@testing.gpu
class TestSearchSortedInvalid(unittest.TestCase):
# Cant test unordered bins due to numpy undefined
# behavior for searchsorted
def test_searchsorted_ndbins(self):
for xp in (numpy, cupy):
x = testing.shaped_arange((10,), xp, xp.float64)
bins = xp.array([[10, 4], [2, 1], [7, 8]])
with pytest.raises(ValueError):
xp.searchsorted(bins, x)
@testing.gpu
class TestSearchSortedWithSorter(unittest.TestCase):
@testing.numpy_cupy_array_equal()
def test_sorter(self, xp):
x = testing.shaped_arange((12,), xp, xp.float64)
bins = xp.array([10, 4, 2, 1, 8])
sorter = xp.array([3, 2, 1, 4, 0])
y = xp.searchsorted(bins, x, sorter=sorter)
return y,
def test_invalid_sorter(self):
for xp in (numpy, cupy):
x = testing.shaped_arange((12,), xp, xp.float64)
bins = xp.array([10, 4, 2, 1, 8])
sorter = xp.array([0])
with pytest.raises(ValueError):
xp.searchsorted(bins, x, sorter=sorter)
def test_nonint_sorter(self):
for xp in (numpy, cupy):
x = testing.shaped_arange((12,), xp, xp.float64)
bins = xp.array([10, 4, 2, 1, 8])
sorter = xp.array([], dtype=xp.float64)
with pytest.raises(TypeError):
xp.searchsorted(bins, x, sorter=sorter)
| 34.649783
| 87
| 0.624191
| 3,348
| 23,943
| 4.275687
| 0.070789
| 0.044988
| 0.054488
| 0.079637
| 0.825079
| 0.821376
| 0.796717
| 0.767167
| 0.747887
| 0.7365
| 0
| 0.030388
| 0.226204
| 23,943
| 690
| 88
| 34.7
| 0.742268
| 0.13599
| 0
| 0.621118
| 0
| 0
| 0.020045
| 0
| 0
| 0
| 0
| 0.001449
| 0.004141
| 1
| 0.142857
| false
| 0
| 0.010352
| 0
| 0.304348
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d9fe4a65295ec799d12fe0854e265d530dd1ac0c
| 440
|
py
|
Python
|
python/anyascii/_data/_008.py
|
casept/anyascii
|
d4f426b91751254b68eaa84c6cd23099edd668e6
|
[
"ISC"
] | null | null | null |
python/anyascii/_data/_008.py
|
casept/anyascii
|
d4f426b91751254b68eaa84c6cd23099edd668e6
|
[
"ISC"
] | null | null | null |
python/anyascii/_data/_008.py
|
casept/anyascii
|
d4f426b91751254b68eaa84c6cd23099edd668e6
|
[
"ISC"
] | null | null | null |
b="' b g d h w z h. t. y k l m n s ` f s. q r sh t ` ` y y e e a a a a a a a a u u i i i o ; : .. <. <: ? . -< -. -: =: |: / . ... a b g d h u z h. t. i k l m n s. ` p s q r sh t d. kd. ` ' , n j n t n n bh r l l s b b c t vb gb zl mv y ny r w y dz ts k u z n k mb mp t nr ny f q n # e o ou e on oun en e a i o u e e un on o o"
| 440
| 440
| 0.320455
| 119
| 440
| 1.184874
| 0.352941
| 0.099291
| 0.12766
| 0.141844
| 0.212766
| 0.056738
| 0.056738
| 0
| 0
| 0
| 0
| 0
| 0.577273
| 440
| 1
| 440
| 440
| 0.758065
| 0
| 0
| 0
| 0
| 1
| 0.988662
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8a274a3039fe41cd44288ed537663e2932960c89
| 44
|
py
|
Python
|
models/__init__.py
|
ModelZoo/BostonHousing
|
97e96a90cb07dcf9c1f8a2bc5985bd5b32bab473
|
[
"MIT"
] | 191
|
2018-10-05T14:58:00.000Z
|
2022-03-09T19:34:12.000Z
|
models/__init__.py
|
ModelZoo/BostonHousing
|
97e96a90cb07dcf9c1f8a2bc5985bd5b32bab473
|
[
"MIT"
] | 2
|
2019-06-29T08:44:48.000Z
|
2019-11-16T20:05:26.000Z
|
models/__init__.py
|
ModelZoo/BostonHousing
|
97e96a90cb07dcf9c1f8a2bc5985bd5b32bab473
|
[
"MIT"
] | 20
|
2018-10-06T12:54:50.000Z
|
2021-09-16T00:32:19.000Z
|
from .house import HousePricePredictionModel
| 44
| 44
| 0.909091
| 4
| 44
| 10
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068182
| 44
| 1
| 44
| 44
| 0.97561
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8a53bf1aa6547dfbdb50fd54dc7dca7f5228d312
| 27,508
|
py
|
Python
|
tests/test_act_quantized_ops.py
|
yachuan/actnn
|
e01575263c61723651e998a3b27918e0e1b687b7
|
[
"MIT"
] | 162
|
2021-04-29T04:11:55.000Z
|
2022-03-29T10:31:24.000Z
|
tests/test_act_quantized_ops.py
|
yachuan/actnn
|
e01575263c61723651e998a3b27918e0e1b687b7
|
[
"MIT"
] | 27
|
2021-07-13T11:12:16.000Z
|
2022-03-30T07:51:32.000Z
|
tests/test_act_quantized_ops.py
|
yachuan/actnn
|
e01575263c61723651e998a3b27918e0e1b687b7
|
[
"MIT"
] | 18
|
2021-07-09T10:39:00.000Z
|
2022-02-27T13:13:40.000Z
|
"""Test the activation quantized ops"""
import math
import numpy as np
import torch
from torch.nn import functional as F
from timeit_v2 import py_benchmark
from actnn import QScheme, QBNScheme, config, get_memory_usage, compute_tensor_bytes
from actnn.ops import ext_backward_func, ext_quantization
from actnn.ops import conv2d as quantized_conv2d, batch_norm as quantized_batch_norm, \
adaptive_avg_pool2d as quantized_adaptive_avg_pool2d
def test_relu_correctness():
print("========== ReLU Correctness Test ==========")
for dtype in ['float32', 'float16']:
print(f"test {dtype}...")
data_np = np.random.randn(128, 56, 56, 31).astype(dtype)
def test_implementation(func):
data = torch.tensor(data_np).to("cuda").requires_grad_()
output = func(data)
output.backward(torch.ones_like(output))
return [x.detach().cpu().numpy() for x in [output, data.grad]]
output_ref, grad_data_ref = test_implementation(F.relu)
output_us, grad_data_us = test_implementation(ext_quantization.act_quantized_relu)
np.testing.assert_allclose(output_ref, output_us)
np.testing.assert_allclose(grad_data_ref, grad_data_us)
def test_relu_memory():
print("========== ReLU Memory Test ==========")
for dtype in ['float32', 'float16']:
print(f"test {dtype}...")
data_np = np.random.randn(128, 56, 56, 32).astype(dtype)
def test_implementation(func):
data = torch.tensor(data_np).to("cuda").requires_grad_()
before = get_memory_usage()
for i in range(10):
data = func(data)
after = get_memory_usage()
return after - before
usage_ref = test_implementation(F.relu)
usage_us = test_implementation(ext_quantization.act_quantized_relu)
print("Exact. Usage: %.2f MB" % (usage_ref / 2 ** 20))
print("Quantized. Usage: %.2f MB" % (usage_us / 2 ** 20))
def test_relu_speed():
print("========== ReLU Speed Test ==========")
for dtype in ['float32', 'float16']:
print(f"test {dtype}...")
data_np = np.random.randn(256, 56, 56, 32).astype(dtype)
def test_implementation(func):
data = torch.tensor(data_np).to("cuda").requires_grad_()
stmt = "func(data)"
t_forward = py_benchmark(stmt, {**globals(), **locals()},
setup="torch.cuda.synchronize()", finish="torch.cuda.synchronize()")
output = func(data)
head = torch.ones_like(output)
stmt = "output.backward(head, retain_graph=True)"
t_backward = py_benchmark(stmt, {**globals(), **locals()},
setup="torch.cuda.synchronize()", finish="torch.cuda.synchronize()")
return t_forward, t_backward
forward_ref, backward_ref = test_implementation(F.relu)
forward_us, backward_us = test_implementation(ext_quantization.act_quantized_relu)
print("Exact. forward: %.2f ms\tbackward: %.2f ms\tsum: %.2f ms" %
(forward_ref * 1e3, backward_ref * 1e3, (forward_ref + backward_ref) * 1e3))
print("Quantized. forward: %.2f ms\tbackward: %.2f ms\tsum: %.2f ms" %
(forward_us * 1e3, backward_us * 1e3, (forward_us + backward_us) * 1e3))
def test_dropout_memory():
print("========== Dropout Memory Test ==========")
for dtype in ['float32', 'float16']:
print(f"test {dtype}...")
data_np = np.random.randn(128, 56, 56, 32).astype(dtype)
def test_implementation(func):
data = torch.tensor(data_np).to("cuda").requires_grad_()
before = get_memory_usage()
for i in range(10):
data = func(data, 0.2)
after = get_memory_usage()
return after - before
usage_ref = test_implementation(F.dropout)
usage_us = test_implementation(ext_quantization.act_quantized_dropout)
print("Exact. Usage: %.2f MB" % (usage_ref / 2 ** 20))
print("Quantized. Usage: %.2f MB" % (usage_us / 2 ** 20))
def test_dropout_speed():
print("========== Dropout Speed Test ==========")
for dtype in ['float32', 'float16']:
print(f"test {dtype}...")
data_np = np.random.randn(256, 56, 56, 32).astype(dtype)
def test_implementation(func):
data = torch.tensor(data_np).to("cuda").requires_grad_()
stmt = "func(data, 0.2)"
t_forward = py_benchmark(stmt, {**globals(), **locals()},
setup="torch.cuda.synchronize()", finish="torch.cuda.synchronize()")
output = func(data, 0.2)
head = torch.ones_like(output)
stmt = "output.backward(head, retain_graph=True)"
t_backward = py_benchmark(stmt, {**globals(), **locals()},
setup="torch.cuda.synchronize()", finish="torch.cuda.synchronize()")
return t_forward, t_backward
forward_ref, backward_ref = test_implementation(F.dropout)
forward_us, backward_us = test_implementation(ext_quantization.act_quantized_dropout)
print("Exact. forward: %.2f ms\tbackward: %.2f ms\tsum: %.2f ms" %
(forward_ref * 1e3, backward_ref * 1e3, (forward_ref + backward_ref) * 1e3))
print("Quantized. forward: %.2f ms\tbackward: %.2f ms\tsum: %.2f ms" %
(forward_us * 1e3, backward_us * 1e3, (forward_us + backward_us) * 1e3))
def test_adaptive_avg_pool2d_correctness():
"""Test the correctness of computation results"""
# arguments and test data
N, H, W, CI, CO, kernel_size, stride, padding, dilation, groups = 4, 28, 28, 256, 256, 3, 1, 1, 1, 1
data_np = np.random.randn(N, CI, H, W).astype('float32')
head_np = np.random.randn(N, CI, 1, 1).astype('float32')
output_size = 1, 1
def test_implementation(func):
torch.manual_seed(0)
data = torch.tensor(data_np).to("cuda").requires_grad_()
head = torch.tensor(head_np).to("cuda")
output = func(data, output_size)
output.backward(head)
return [x.detach().cpu().numpy() for x in [output, data.grad]]
output_ref, grad_data_ref = test_implementation(F.adaptive_avg_pool2d)
output_us, grad_data_us = test_implementation(quantized_adaptive_avg_pool2d.apply)
atol = 1e-4
rtol = 1e-4
print("========== AdaptiveAvgPool2d Correctness Test ==========")
np.testing.assert_allclose(output_ref, output_us, atol=atol, rtol=rtol)
np.testing.assert_allclose(grad_data_ref, grad_data_us, atol=atol, rtol=rtol)
def test_adaptive_avg_pool2d_memory():
"""Test the memory usage"""
# arguments and test data
N, H, W, CI = 1024, 4, 4, 1024
data_np = np.random.randn(N, CI, H, W).astype('float32')
output_size = (1, 1)
def test_implementation(func):
data = torch.tensor(data_np).to("cuda").requires_grad_()
output = func(data, output_size)
for i in range(10):
output = func(output, output_size)
return get_memory_usage() - compute_tensor_bytes([data, output])
usage_ref = test_implementation(F.adaptive_avg_pool2d)
usage_us = test_implementation(quantized_adaptive_avg_pool2d.apply)
print("========== AdaptiveAvgPool2d Memory Test ==========")
print("Exact. Usage: %.3f MB" % (usage_ref / 2 ** 20))
print("Quantized. Usage: %.2f MB" % (usage_us / 2 ** 20))
def test_max_pool2d_correctness():
"""Test the correctness of computation results"""
# arguments and test data
N, H, W, CI, kernel_size, stride, padding, dilation = 4, 28, 28, 8, 3, 2, 1, 1
ceil_mode, return_indices = False, False
print("========== MaxPool2d Correctness Test ==========")
for dtype in ['float32', 'float16']:
print(f"test {dtype}...")
data_np = np.random.randn(N, CI, H, W).astype(dtype)
def test_implementation(func):
data = torch.tensor(data_np).to("cuda").requires_grad_()
output = func(data, (kernel_size, kernel_size), (stride, stride), (padding, padding),
(dilation, dilation), ceil_mode, return_indices)
output.backward(torch.ones_like(output))
return [x.detach().cpu().numpy() for x in [output, data.grad]]
output_ref, grad_data_ref = test_implementation(F.max_pool2d)
output_us, grad_data_us = test_implementation(ext_quantization.act_quantized_max_pool2d)
atol = 1e-4
rtol = 1e-4
np.testing.assert_allclose(output_ref, output_us, atol=atol, rtol=rtol)
np.testing.assert_allclose(grad_data_ref, grad_data_us, atol=atol, rtol=rtol)
def test_max_pool2d_memory():
"""Test the memory usage"""
# arguments and test data
N, H, W, CI, kernel_size, stride, padding, dilation = 128, 28, 28, 8, 3, 2, 1, 1
ceil_mode, return_indices = False, False
print("========== MaxPool2d Memory Test ==========")
for dtype in ['float32', 'float16']:
print(f"test {dtype}...")
data_np = np.random.randn(N, CI, H, W).astype(dtype)
def test_implementation(func):
data = torch.tensor(data_np).to("cuda").requires_grad_()
output = func(data, (kernel_size, kernel_size), (stride, stride), (padding, padding),
(dilation, dilation), ceil_mode, return_indices)
return get_memory_usage() - compute_tensor_bytes([output, data])
usage_ref = test_implementation(F.max_pool2d)
usage_us = test_implementation(ext_quantization.act_quantized_max_pool2d)
print("Exact. Usage: %.3f MB" % (usage_ref / 2 ** 20))
print("Quantized. Usage: %.3f MB" % (usage_us / 2 ** 20))
def test_max_pool2d_speed():
"""Test the correctness of computation results"""
# arguments and test data
N, H, W, CI, kernel_size, stride, padding, dilation = 128, 28, 28, 128, 3, 2, 1, 1
ceil_mode, return_indices = False, False
print("========== MaxPool2d Speed Test ==========")
for dtype in ['float32', 'float16']:
print(f"test {dtype}...")
data_np = np.random.randn(N, CI, H, W).astype(dtype)
def test_implementation(func):
data = torch.tensor(data_np).to("cuda").requires_grad_()
stmt = "func(data, (kernel_size, kernel_size), (stride, stride), (padding, padding),"\
"(dilation, dilation), ceil_mode, return_indices)"
t_forward = py_benchmark(stmt, {**globals(), **locals()},
setup="torch.cuda.synchronize()", finish="torch.cuda.synchronize()")
output = func(data, (kernel_size, kernel_size), (stride, stride), (padding, padding),
(dilation, dilation), ceil_mode, return_indices)
head = torch.ones_like(output)
stmt = "output.backward(head, retain_graph=True)"
t_backward = py_benchmark(stmt, {**globals(), **locals()},
setup="torch.cuda.synchronize()", finish="torch.cuda.synchronize()")
return t_forward, t_backward
forward_ref, backward_ref = test_implementation(F.max_pool2d)
forward_us, backward_us = test_implementation(ext_quantization.act_quantized_max_pool2d)
print("Exact. forward: %.2f ms\tbackward: %.2f ms\tsum: %.2f ms" %
(forward_ref * 1e3, backward_ref * 1e3, (forward_ref + backward_ref) * 1e3))
print("Quantized. forward: %.2f ms\tbackward: %.2f ms\tsum: %.2f ms" %
(forward_us * 1e3, backward_us * 1e3, (forward_us + backward_us) * 1e3))
def test_upsample_memory():
"""Test the memory usage"""
# arguments and test data
N, H, W, CI = 128, 28, 28, 8
size, scale_factor, mode, align_corners = None, 2, 'bilinear', False
data_np = np.random.randn(N, CI, H, W).astype('float32')
def test_implementation(func):
data = torch.tensor(data_np).to("cuda").requires_grad_()
output = func(data, size, scale_factor, mode, align_corners)
output = func(output, size, scale_factor, mode, align_corners)
output = func(output, size, scale_factor, mode, align_corners)
return get_memory_usage() - compute_tensor_bytes([output, data])
usage_ref = test_implementation(F.interpolate)
print("========== Upsample Memory Test ==========")
print("Exact. Usage: %.3f MB" % (usage_ref / 2 ** 20))
def test_bn_correctness():
# arguments and test data
N, H, W, CI = 16, 28, 28, 256
data_np = np.random.randn(N, CI, H, W).astype('float32') * 0.01
running_mean_np = np.random.randn(CI).astype('float32')
running_var_np = np.random.randn(CI).astype('float32')
bn_weight_np = np.random.randn(CI).astype('float32')
bn_bias_np = np.random.randn(CI).astype('float32')
training = False
bn_scheme = QBNScheme()
config.compress_activation = False
def test_implementation(func):
torch.manual_seed(0)
data = torch.tensor(data_np).to("cuda").requires_grad_()
running_mean = torch.tensor(running_mean_np).to("cuda")
running_var = torch.tensor(running_var_np).to("cuda")
bn_weight = torch.tensor(bn_weight_np).to("cuda").requires_grad_()
bn_bias = torch.tensor(bn_bias_np).to("cuda").requires_grad_()
if func == F.batch_norm:
output = func(data, running_mean, running_var, bn_weight, bn_bias, training, 0.1, 1e-5)
else:
output = func(data, running_mean, running_var, bn_weight, bn_bias, training, 0.1, 1e-5, bn_scheme)
output.backward(torch.ones_like(output))
return [x.detach().cpu().numpy() for x in [output, data.grad, bn_weight.grad, bn_bias.grad]]
output_ref, grad_data_ref, grad_weight_ref, grad_bias_ref = test_implementation(F.batch_norm)
output_us, grad_data_us, grad_weight_us, grad_bias_us = test_implementation(quantized_batch_norm.apply)
atol = 1e-3
rtol = 1e-3
print("========== BN Correctness Test ==========")
np.testing.assert_allclose(output_ref, output_us, atol=atol, rtol=rtol)
np.testing.assert_allclose(grad_data_ref, grad_data_us, atol=atol, rtol=rtol)
np.testing.assert_allclose(grad_weight_ref, grad_weight_us, atol=atol, rtol=rtol)
np.testing.assert_allclose(grad_bias_ref, grad_bias_us, atol=atol, rtol=rtol)
def test_conv2d_correctness():
"""Test the correctness of computation results"""
# arguments and test data
N, H, W, CI, CO, kernel_size, stride, padding, dilation, groups = 4, 28, 28, 256, 256, 3, 1, 1, 1, 1
print("========== Conv2d Correctness Test ==========")
for dtype in ['float32', 'float16']:
print(f"test {dtype}...")
data_np = np.random.randn(N, CI, H, W).astype(dtype)
weight_np = np.random.randn(CO, CI // groups, kernel_size, kernel_size).astype(dtype)
bias_np = np.random.randn(CO).astype(dtype)
def test_implementation(func, scheme):
torch.manual_seed(0)
data = torch.tensor(data_np).to("cuda").requires_grad_()
weight = torch.tensor(weight_np).to("cuda").requires_grad_()
bias = torch.tensor(bias_np).to("cuda").requires_grad_()
output = func(data, weight, bias, stride, padding, dilation, groups, scheme)
output.backward(torch.ones_like(output))
return [x.detach().cpu().numpy() for x in [output, data.grad, weight.grad, bias.grad]]
config.activation_compression_bits = [16]
config.initial_bits = 16
config.perlayer = False
config.use_gradient = False
scheme = QScheme(None)
config.simulate = True
output_ref, grad_data_ref, grad_weight_ref, grad_bias_ref = test_implementation(quantized_conv2d.apply, scheme)
config.simulate = False
output_us, grad_data_us, grad_weight_us, grad_bias_us = test_implementation(quantized_conv2d.apply, scheme)
atol = 1e-2
rtol = 1e-2
assert output_ref.dtype == output_us.dtype
np.testing.assert_allclose(output_ref, output_us, atol=atol, rtol=rtol)
np.testing.assert_allclose(grad_data_ref, grad_data_us, atol=atol, rtol=rtol)
np.testing.assert_allclose(grad_weight_ref, grad_weight_us, atol=atol, rtol=rtol)
np.testing.assert_allclose(grad_bias_ref, grad_bias_us, atol=atol, rtol=rtol)
def test_conv2d_correctness_per_group_only():
"""Test the correctness of computation results
NOTE: This test will fail on large shapes or low bits.
To make this test pass, we should disable stochastic noise.
"""
# arguments and test data
N, H, W, CI, CO, kernel_size, stride, padding, dilation, groups = 2, 16, 16, 4, 4, 1, 1, 1, 1, 1
print("========== Conv2d Correctness Test (per group only) ==========")
for dtype in ['float32', 'float16']:
print(f"test {dtype}...")
data_np = np.random.randn(N, CI, H, W).astype(dtype)
weight_np = np.random.randn(CO, CI // groups, kernel_size, kernel_size).astype(dtype)
bias_np = np.random.randn(CO).astype(dtype)
def test_implementation(func, scheme):
torch.manual_seed(0)
data = torch.tensor(data_np).to("cuda").requires_grad_()
weight = torch.tensor(weight_np).to("cuda").requires_grad_()
bias = torch.tensor(bias_np).to("cuda").requires_grad_()
output = func(data, weight, bias, stride, padding, dilation, groups, scheme)
output.backward(torch.ones_like(output))
return [x.detach().cpu().numpy() for x in [output, data.grad, weight.grad, bias.grad]]
config.activation_compression_bits = [8]
config.perlayer = False
config.use_gradient = False
config.simulate = True
output_ref, grad_data_ref, grad_weight_ref, grad_bias_ref = test_implementation(quantized_conv2d.apply, None)
config.simulate = False
output_us, grad_data_us, grad_weight_us, grad_bias_us = test_implementation(quantized_conv2d.apply, None)
atol = 1e-1
rtol = 1e-1
assert output_ref.dtype == output_us.dtype
np.testing.assert_allclose(output_ref, output_us, atol=atol, rtol=rtol)
np.testing.assert_allclose(grad_data_ref, grad_data_us, atol=atol, rtol=rtol)
np.testing.assert_allclose(grad_weight_ref, grad_weight_us, atol=atol, rtol=rtol)
np.testing.assert_allclose(grad_bias_ref, grad_bias_us, atol=atol, rtol=rtol)
def test_conv2d_speed():
"""Test the speed of convolution layer"""
# arguments and test data
N, H, W, CI, CO, kernel_size, stride, padding, dilation, groups = 128, 28, 28, 256, 256, 3, 1, 1, 1, 1
print("========== Conv2d Speed Test ==========")
for dtype in ['float32', 'float16']:
print(f"test {dtype}...")
data_np = np.random.randn(N, CI, H, W).astype(dtype)
weight_np = np.random.randn(CO, CI // groups, kernel_size, kernel_size).astype(dtype)
bias_np = np.random.randn(CO).astype(dtype)
scheme = QScheme(None)
def test_implementation(func, scheme):
data = torch.tensor(data_np).to("cuda").requires_grad_()
weight = torch.tensor(weight_np).to("cuda").requires_grad_()
bias = torch.tensor(bias_np).to("cuda").requires_grad_()
if func == quantized_conv2d.apply:
output = func(data, weight, bias, stride, padding, dilation, groups, scheme)
stmt = "func(data, weight, bias, stride, padding, dilation, groups, scheme)"
else:
output = func(data, weight, bias, stride, padding, dilation, groups)
stmt = "func(data, weight, bias, stride, padding, dilation, groups)"
t_forward = py_benchmark(stmt, {**globals(), **locals()},
setup="torch.cuda.synchronize()", finish="torch.cuda.synchronize()")
head = torch.ones_like(output)
stmt = "output.backward(head, retain_graph=True)"
t_backward = py_benchmark(stmt, {**globals(), **locals()},
setup="torch.cuda.synchronize()", finish="torch.cuda.synchronize()")
return t_forward, t_backward
config.activation_compression_bits = [16]
config.initial_bits = 16
config.perlayer = False
config.use_gradient = False
config.simulate = False
scheme = QScheme(None)
forward_ref, backward_ref = test_implementation(F.conv2d, None)
forward_us, backward_us = test_implementation(quantized_conv2d.apply, scheme)
print("Exact. forward: %.2f ms\tbackward: %.2f ms\tsum: %.2f ms" %
(forward_ref * 1e3, backward_ref * 1e3, (forward_ref + backward_ref) * 1e3))
print("Quantized. forward: %.2f ms\tbackward: %.2f ms\tsum: %.2f ms" %
(forward_us * 1e3, backward_us * 1e3, (forward_us + backward_us) * 1e3))
def test_conv2d_memory_analytical():
"""Compute the memory of activation analytically"""
# arguments and test data
N, H, W, CI, CO, kernel_size, stride, padding, dilation, groups = 256, 28, 28, 256, 256, 3, 1, 1, 1, 1
data_np = np.random.randn(N, CI, H, W).astype('float32')
weight_np = np.random.randn(CO, CI // groups, kernel_size, kernel_size).astype('float32')
bias_np = np.random.randn(CO).astype('float32')
running_mean = np.zeros((CO,), dtype='float32')
running_var = np.ones((CO,), dtype='float32')
bn_weight = np.random.randn(CO).astype('float32')
bn_bias = np.random.randn(CO).astype('float32')
scheme = QScheme(num_locations=kernel_size**2)
bn_scheme = QBNScheme()
def test_implementation(conv_func, relu_func, bn_func, n_layers=10):
data = torch.tensor(data_np).to("cuda")
# allocate input and weights
data = torch.tensor(data_np).to("cuda").requires_grad_(False)
weights = []
running_means = []
running_vars = []
bn_weights = []
bn_biass = []
for i in range(n_layers):
weights.append(torch.tensor(weight_np).to("cuda").requires_grad_())
running_means.append(torch.tensor(running_mean).to("cuda"))
running_vars.append(torch.tensor(running_var).to("cuda"))
bn_weights.append(torch.tensor(bn_weight).to("cuda").requires_grad_())
bn_biass.append(torch.tensor(bn_bias).to("cuda").requires_grad_())
before_size = get_memory_usage(False)
# forward n convolution layers
output = data
for i in range(n_layers):
if conv_func == quantized_conv2d.apply:
output = conv_func(output, weights[i], None, stride, padding, dilation, groups, scheme)
output = bn_func(output, running_means[i], running_vars[i], bn_weights[i], bn_biass[i], True, 0.1, 1e-5, bn_scheme)
else:
output = conv_func(output, weights[i], None, stride, padding, dilation, groups)
output = bn_func(output, running_means[i], running_vars[i], bn_weights[i], bn_biass[i], True, 0.1, 1e-5)
output = relu_func(output)
output = output.sum()
after_size = get_memory_usage(False)
output_size = compute_tensor_bytes(output)
return after_size / 1024**2, (after_size - before_size - output_size) / 1024**2
total_size_ref, act_size_ref = test_implementation(F.conv2d, lambda x: F.relu(x, inplace=True), F.batch_norm)
config.simulate = True
total_size_sim, act_size_sim = test_implementation(quantized_conv2d.apply,
ext_quantization.act_quantized_relu, quantized_batch_norm.apply)
config.simulate = False
total_size_us, act_size_us = test_implementation(quantized_conv2d.apply,
ext_quantization.act_quantized_relu, quantized_batch_norm.apply)
print("========== Conv2d Activation Memory Test (bits = %d) ==========" % (config.activation_compression_bits))
print("Exact. Total: %7.2f MB\tAct: %7.2f MB" % (total_size_ref, act_size_ref))
print("Simulation. Total: %7.2f MB\tAct: %7.2f MB" % (total_size_sim, act_size_sim))
print("Quantized. Total: %7.2f MB\tAct: %7.2f MB" % (total_size_us, act_size_us))
def test_conv2d_memory_max_batch_size():
"""Find the maximum batch size by gradually increasing the batch size until hitting Out-of-memory error"""
for device in ["cuda"]:
def test_implementation(func, n_layers, batch_sizes):
def run_batch_size(batch_size):
N, H, W, CI, CO, kernel_size, stride, padding, dilation, groups = batch_size, 28, 28, 256, 256, 3, 1, 1, 1, 1
data_np = np.random.uniform(size=(N, CI, H, W)).astype('float32')
weight_np = np.random.uniform(size=(CO, CI // groups, kernel_size, kernel_size)).astype('float32')
bias_np = np.random.uniform(size=(CO,)).astype('float32')
# allocate input and weights
data = torch.tensor(data_np).to("cuda").requires_grad_(False)
weights = []
for i in range(n_layers):
weight = torch.tensor(weight_np).to("cuda").requires_grad_()
weights.append(weight)
before_size = get_memory_usage(False)
# forward n convolution layers
output = data
for i in range(n_layers):
output = func(output, weights[i], None, stride, padding, dilation, groups)
output = output.sum()
after_size = get_memory_usage(False)
output_size = compute_tensor_bytes(output)
return after_size / 1024**2, (after_size - before_size - output_size) / 1024**2
# try gradually increased batch sizes
try:
for i, batch_size in enumerate(batch_sizes):
total_size_ref, act_size_ref = run_batch_size(batch_size)
print("batch_size: %4d\t" % batch_size, end="")
print("total_memory: %7.2f MB\tact_memory: %7.2f MB" % (total_size_ref, act_size_ref))
except RuntimeError:
pass
finally:
print("Maximum batch size: %d" % (batch_sizes[i-1]))
print("========== Conv2d Batch Size Test ==========")
print("---> Exact")
test_implementation(F.conv2d, n_layers=50, batch_sizes=[100, 200, 250, 300, 350, 400, 450, 500, 1000])
print("---> Quantized")
test_implementation(act_quantized_conv2d.apply, n_layers=50, batch_sizes=[100, 200, 250, 500, 1000, 2200, 2300, 2400, 3000, 4000])
if __name__ == "__main__":
test_relu_correctness()
test_relu_memory()
test_relu_speed()
#test_dropout_memory()
#test_dropout_speed()
#test_adaptive_avg_pool2d_correctness()
#test_adaptive_avg_pool2d_memory()
#test_max_pool2d_correctness()
#test_max_pool2d_memory()
#test_max_pool2d_speed()
#test_upsample_memory()
#test_bn_correctness()
test_conv2d_correctness()
#test_conv2d_correctness_per_group_only()
#test_conv2d_speed()
#config.activation_compression_bits = 2
#test_conv2d_memory_analytical()
#config.activation_compression_bits = 2
#test_conv2d_memory_max_batch_size()
| 42.32
| 138
| 0.625491
| 3,566
| 27,508
| 4.588054
| 0.070387
| 0.056109
| 0.019559
| 0.026588
| 0.843958
| 0.821894
| 0.784426
| 0.764073
| 0.731496
| 0.711142
| 0
| 0.030196
| 0.237931
| 27,508
| 649
| 139
| 42.385208
| 0.750274
| 0.053185
| 0
| 0.571096
| 0
| 0
| 0.121275
| 0.018041
| 0
| 0
| 0
| 0
| 0.04662
| 1
| 0.081585
| false
| 0.002331
| 0.018648
| 0
| 0.13986
| 0.123543
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8a7ba4c19b8a0d426d6eb86c310e855394a70dba
| 625
|
py
|
Python
|
delta_array/scripts/example_movements.py
|
keenechin/robot-nonstationarity
|
67f66c5ca3114c458be60066be0e98bec12887c6
|
[
"Apache-2.0"
] | null | null | null |
delta_array/scripts/example_movements.py
|
keenechin/robot-nonstationarity
|
67f66c5ca3114c458be60066be0e98bec12887c6
|
[
"Apache-2.0"
] | null | null | null |
delta_array/scripts/example_movements.py
|
keenechin/robot-nonstationarity
|
67f66c5ca3114c458be60066be0e98bec12887c6
|
[
"Apache-2.0"
] | null | null | null |
from DeltaArray import DeltaArray
import numpy as np
import time
da = DeltaArray('/dev/ttyACM0')
print(da.get_joint_positions())
da.reset()
da.wait_until_done_moving()
print(da.get_joint_positions())
for i in range(1,10):
p = np.ones((1,12)) * 0.01 * i
duration = [1.0]
da.move_joint_position(p,duration)
da.wait_until_done_moving()
print(da.get_joint_positions())
p = np.ones((1,12)) * 0.1
duration = [1.0]
da.move_joint_position(p,duration)
da.wait_until_done_moving()
print(da.get_joint_positions())
da.reset()
da.wait_until_done_moving()
print(da.get_joint_positions())
da.close()
| 20.833333
| 36
| 0.7104
| 104
| 625
| 4.019231
| 0.326923
| 0.083732
| 0.119617
| 0.179426
| 0.760766
| 0.760766
| 0.708134
| 0.708134
| 0.708134
| 0.708134
| 0
| 0.035448
| 0.1424
| 625
| 30
| 37
| 20.833333
| 0.744403
| 0
| 0
| 0.652174
| 0
| 0
| 0.020101
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.130435
| 0
| 0.130435
| 0.217391
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8a92ca40a1f611828ba2d523be22511f72b04a54
| 44
|
py
|
Python
|
tests/conftest.py
|
luszak/pytest_pyramid
|
ede2a490eed4afbe43719e661e79ae98c496d5b6
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
luszak/pytest_pyramid
|
ede2a490eed4afbe43719e661e79ae98c496d5b6
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
luszak/pytest_pyramid
|
ede2a490eed4afbe43719e661e79ae98c496d5b6
|
[
"MIT"
] | null | null | null |
from pytest_pyramid.plugin import * # noqa
| 22
| 43
| 0.772727
| 6
| 44
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.159091
| 44
| 1
| 44
| 44
| 0.891892
| 0.090909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8aac0309898e077b3970f7c1d47f14869e263ee7
| 122
|
py
|
Python
|
IST652_lab1_SUNKARA.py
|
Pammu6/IST652-Scripting-for-Data-Analysis
|
78cf8851501466f0a74f926bae9734a725aeba1c
|
[
"CC0-1.0"
] | null | null | null |
IST652_lab1_SUNKARA.py
|
Pammu6/IST652-Scripting-for-Data-Analysis
|
78cf8851501466f0a74f926bae9734a725aeba1c
|
[
"CC0-1.0"
] | null | null | null |
IST652_lab1_SUNKARA.py
|
Pammu6/IST652-Scripting-for-Data-Analysis
|
78cf8851501466f0a74f926bae9734a725aeba1c
|
[
"CC0-1.0"
] | 1
|
2020-11-14T01:30:18.000Z
|
2020-11-14T01:30:18.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 4 17:37:16 2019
@author: KARTHEEK
"""
from pprint import pprint
| 13.555556
| 36
| 0.590164
| 18
| 122
| 4
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131868
| 0.254098
| 122
| 8
| 37
| 15.25
| 0.659341
| 0.631148
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
8ab47caad2489296c113b52ff16422aa6c0393ee
| 32,821
|
py
|
Python
|
spyder_okvim/executor/tests/test_vline.py
|
ok97465/spyder_okvim
|
6ba22c0013a2419a14f7950bd8931d6ee7e107e4
|
[
"MIT"
] | 3
|
2021-03-13T13:01:03.000Z
|
2021-12-05T05:19:55.000Z
|
spyder_okvim/executor/tests/test_vline.py
|
ok97465/spyder_okvim
|
6ba22c0013a2419a14f7950bd8931d6ee7e107e4
|
[
"MIT"
] | 18
|
2020-11-02T22:08:01.000Z
|
2021-09-20T05:53:12.000Z
|
spyder_okvim/executor/tests/test_vline.py
|
ok97465/spyder_okvim
|
6ba22c0013a2419a14f7950bd8931d6ee7e107e4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Tests for the executor_vline."""
# Third party imports
import pytest
from qtpy.QtCore import Qt
from spyder.config.manager import CONF
# Local imports
from spyder_okvim.spyder.config import CONF_SECTION
from spyder_okvim.utils.vim_status import VimState
@pytest.mark.parametrize(
"text, cmd_list, cursor_pos, sel_pos",
[
("import numpy as np", ['V'], 0, [0, 18]),
("import numpy as np", ['V', 'l', 'h'], 0, [0, 18]),
("import numpy as np", ['V', '5l'], 5, [0, 18]),
("import numpy as np", ['5l', 'V'], 5, [0, 18]),
("""import numpy as np
import matplotlib.pyplot as plt
import scipy.scipy as sc""", ['V', 'j'], 19, [0, 50]),
("""import numpy as np
import matplotlib.pyplot as plt
import scipy.scipy as sc""", ['2l', 'V', '2j', '5l'], 58, [0, 75]),
("""import numpy as np
import matplotlib.pyplot as plt
import scipy.scipy as sc
""", ['2l', 'V', '3j', 'k', 'j'], 76, [0, 76]),
("""
import matplotlib.pyplot as plt
import scipy.scipy as sc
""", ['5j', 'V', '5k'], 0, [0, 58]),
]
)
def test_V_cmd(vim_bot, text, cmd_list, cursor_pos, sel_pos):
"""Test V command."""
_, _, editor, vim, qtbot = vim_bot
editor.set_text(text)
cmd_line = vim.get_focus_widget()
for cmd in cmd_list:
qtbot.keyClicks(cmd_line, cmd)
sel = editor.get_extra_selections("vim_selection")[0]
sel_pos_ = [sel.cursor.selectionStart(), sel.cursor.selectionEnd()]
assert cmd_line.text() == ""
assert editor.textCursor().position() == cursor_pos
assert sel_pos_ == sel_pos
@pytest.mark.parametrize(
"text, cmd_list, cursor_pos, sel_pos",
[
("import numpy as np", ['V', '0'], 0, [0, 18]),
("import numpy as np", ['5l', 'V', '0'], 0, [0, 18]),
("""import numpy as np
import matplotlib.pyplot as plt
import scipy.scipy as sc""", ['V', 'j', '5l', '0'], 19, [0, 50])
]
)
def test_zero_cmd_in_vline(vim_bot, text, cmd_list, cursor_pos, sel_pos):
"""Test 0 command in v-line."""
_, _, editor, vim, qtbot = vim_bot
editor.set_text(text)
cmd_line = vim.get_focus_widget()
for cmd in cmd_list:
qtbot.keyClicks(cmd_line, cmd)
sel = editor.get_extra_selections("vim_selection")[0]
sel_pos_ = [sel.cursor.selectionStart(), sel.cursor.selectionEnd()]
assert cmd_line.text() == ""
assert editor.textCursor().position() == cursor_pos
assert sel_pos_ == sel_pos
@pytest.mark.parametrize(
"text, cmd_list, cursor_pos, sel_pos",
[
(" import numpy as np", ['V', '^'], 3, [0, 21]),
(" import numpy as np", ['10l', 'V', '^'], 3, [0, 21]),
]
)
def test_caret_cmd_in_vline(vim_bot, text, cmd_list, cursor_pos, sel_pos):
"""Test ^ command in v-line."""
_, _, editor, vim, qtbot = vim_bot
editor.set_text(text)
cmd_line = vim.get_focus_widget()
for cmd in cmd_list:
qtbot.keyClicks(cmd_line, cmd)
sel = editor.get_extra_selections("vim_selection")[0]
sel_pos_ = [sel.cursor.selectionStart(), sel.cursor.selectionEnd()]
assert cmd_line.text() == ""
assert editor.textCursor().position() == cursor_pos
assert sel_pos_ == sel_pos
@pytest.mark.parametrize(
"text, cmd_list, cursor_pos, sel_pos",
[
("import numpy as np", ['V', '$'], 18, [0, 18])
]
)
def test_dollar_cmd_in_vline(vim_bot, text, cmd_list, cursor_pos, sel_pos):
"""Test $ command in v-line."""
_, _, editor, vim, qtbot = vim_bot
editor.set_text(text)
cmd_line = vim.get_focus_widget()
for cmd in cmd_list:
qtbot.keyClicks(cmd_line, cmd)
sel = editor.get_extra_selections("vim_selection")[0]
sel_pos_ = [sel.cursor.selectionStart(), sel.cursor.selectionEnd()]
assert cmd_line.text() == ""
assert editor.textCursor().position() == cursor_pos
assert sel_pos_ == sel_pos
@pytest.mark.parametrize(
"text, cmd_list, cursor_pos, sel_pos",
[
("01 34\n", ['V', 'w'], 3, [0, 5]),
("01 34\n", ['V', 'w', 'o'], 0, [0, 5]),
("01 34\n", ['V', 'w', 'o', 'o'], 4, [0, 5]),
("\n", ['j', 'V'], 1, [1, 1]),
("\n", ['j', 'V', 'o'], 1, [1, 1]),
("01 34\n6\n", ['V', 'j'], 6, [0, 7]),
("01 34\n6\n", ['V', 'j', 'o'], 0, [0, 7]),
("01 34\n6\n", ['j', 'V', 'k', 'o'], 6, [0, 7]),
("01 34\n6\n8\n", ['j', 'V', 'j', '2k', 'o'], 6, [0, 7]),
]
)
def test_o_cmd_in_vline(vim_bot, text, cmd_list, cursor_pos, sel_pos):
"""Test o command in vline."""
_, _, editor, vim, qtbot = vim_bot
editor.set_text(text)
cmd_line = vim.get_focus_widget()
for cmd in cmd_list:
qtbot.keyClicks(cmd_line, cmd)
sel = editor.get_extra_selections("vim_selection")[0]
sel_pos_ = [sel.cursor.selectionStart(), sel.cursor.selectionEnd()]
assert cmd_line.text() == ""
assert editor.textCursor().position() == cursor_pos
assert sel_pos_ == sel_pos
@pytest.mark.parametrize(
"text, cmd_list, text_expected, cursor_pos",
[
("", ['V', 'J'], "", 0),
("\n\n", ['3j', 'V', 'J'], "\n\n", 2),
("0\n23", ['j', 'l', 'V', 'k', 'J'], "0 23", 1),
("0\n2\n4\n6\n8\n", ['V', '2j', 'J'], "0 2 4\n6\n8\n", 3),
("0\n2\n4\n6\n8\n", ['V', '2j', 'J', '.'], "0 2 4 6 8\n", 7)
]
)
def test_J_cmd_in_vline(vim_bot, text, cmd_list, text_expected, cursor_pos):
"""Test J command in vline."""
_, _, editor, vim, qtbot = vim_bot
editor.set_text(text)
cmd_line = vim.get_focus_widget()
for cmd in cmd_list:
qtbot.keyClicks(cmd_line, cmd)
assert cmd_line.text() == ""
assert editor.textCursor().position() == cursor_pos
assert editor.toPlainText() == text_expected
assert vim.vim_cmd.vim_status.get_pos_start_in_selection() is None
@pytest.mark.parametrize(
"text, cmd_list, cursor_pos, sel_pos",
[
("01 34", ['V', 'w'], 3, [0, 5]),
]
)
def test_w_cmd_in_vline(vim_bot, text, cmd_list, cursor_pos, sel_pos):
"""Test w command in vline."""
_, _, editor, vim, qtbot = vim_bot
editor.set_text(text)
cmd_line = vim.get_focus_widget()
for cmd in cmd_list:
qtbot.keyClicks(cmd_line, cmd)
sel = editor.get_extra_selections("vim_selection")[0]
sel_pos_ = [sel.cursor.selectionStart(), sel.cursor.selectionEnd()]
assert cmd_line.text() == ""
assert editor.textCursor().position() == cursor_pos
assert sel_pos_ == sel_pos
@pytest.mark.parametrize(
"text, cmd_list, cursor_pos, sel_pos",
[
('', ['V', 'W'], 0, [0, 0]),
('029.d98@jl 34', ['V', 'W'], 11, [0, 13]),
('029.d98@jl 34', ['V', '2W'], 13, [0, 13]),
('029.d98@jl 34\na', ['V', '2W'], 14, [0, 15]),
('029.d98@jl 34\n a', ['V', '2W'], 16, [0, 17]),
]
)
def test_W_cmd_in_vline(vim_bot, text, cmd_list, cursor_pos, sel_pos):
"""Test W command in vline."""
_, _, editor, vim, qtbot = vim_bot
editor.set_text(text)
cmd_line = vim.get_focus_widget()
for cmd in cmd_list:
qtbot.keyClicks(cmd_line, cmd)
sel = editor.get_extra_selections("vim_selection")[0]
sel_pos_ = [sel.cursor.selectionStart(), sel.cursor.selectionEnd()]
assert cmd_line.text() == ""
assert editor.textCursor().position() == cursor_pos
assert sel_pos_ == sel_pos
@pytest.mark.parametrize(
"text, cmd_list, cursor_pos, sel_pos",
[
("01 34", ['$', 'V', 'b'], 3, [0, 5]),
]
)
def test_b_cmd_in_vline(vim_bot, text, cmd_list, cursor_pos, sel_pos):
"""Test b command in vline."""
_, _, editor, vim, qtbot = vim_bot
editor.set_text(text)
cmd_line = vim.get_focus_widget()
for cmd in cmd_list:
qtbot.keyClicks(cmd_line, cmd)
sel = editor.get_extra_selections("vim_selection")[0]
sel_pos_ = [sel.cursor.selectionStart(), sel.cursor.selectionEnd()]
assert cmd_line.text() == ""
assert editor.textCursor().position() == cursor_pos
assert sel_pos_ == sel_pos
@pytest.mark.parametrize(
"text, cmd_list, cursor_pos, sel_pos",
[
("01.34", ['$', 'V', 'B'], 0, [0, 5]),
]
)
def test_B_cmd_in_vline(vim_bot, text, cmd_list, cursor_pos, sel_pos):
"""Test B command in vline."""
_, _, editor, vim, qtbot = vim_bot
editor.set_text(text)
cmd_line = vim.get_focus_widget()
for cmd in cmd_list:
qtbot.keyClicks(cmd_line, cmd)
sel = editor.get_extra_selections("vim_selection")[0]
sel_pos_ = [sel.cursor.selectionStart(), sel.cursor.selectionEnd()]
assert cmd_line.text() == ""
assert editor.textCursor().position() == cursor_pos
assert sel_pos_ == sel_pos
@pytest.mark.parametrize(
"text, cmd_list, cursor_pos, sel_pos",
[
("01 34\n67 90", ['V', 'e'], 1, [0, 5]),
("01 34\n67 90", ['V', '3e'], 7, [0, 11]),
]
)
def test_e_cmd_in_vline(vim_bot, text, cmd_list, cursor_pos, sel_pos):
"""Test e command in vline."""
_, _, editor, vim, qtbot = vim_bot
editor.set_text(text)
cmd_line = vim.get_focus_widget()
for cmd in cmd_list:
qtbot.keyClicks(cmd_line, cmd)
sel = editor.get_extra_selections("vim_selection")[0]
sel_pos_ = [sel.cursor.selectionStart(), sel.cursor.selectionEnd()]
assert cmd_line.text() == ""
assert editor.textCursor().position() == cursor_pos
assert sel_pos_ == sel_pos
@pytest.mark.parametrize(
"text, cmd_list, cursor_pos, sel_pos",
[
('0\n2\n4\n', ['V', '2G'], 2, [0, 3]),
('0\n \n8\n', ['V', '2G'], 6, [0, 7]),
('0\n2\n4\n', ['V', 'G'], 6, [0, 6]),
('0\n2\n4\n a', ['V', 'G'], 11, [0, 12])
]
)
def test_G_cmd_in_vline(vim_bot, text, cmd_list, cursor_pos, sel_pos):
"""Test G command in vline."""
_, _, editor, vim, qtbot = vim_bot
editor.set_text(text)
cmd_line = vim.get_focus_widget()
for cmd in cmd_list:
qtbot.keyClicks(cmd_line, cmd)
sel = editor.get_extra_selections("vim_selection")[0]
sel_pos_ = [sel.cursor.selectionStart(), sel.cursor.selectionEnd()]
assert cmd_line.text() == ""
assert editor.textCursor().position() == cursor_pos
assert sel_pos_ == sel_pos
@pytest.mark.parametrize(
"text, cmd_list, cursor_pos, sel_pos",
[
('0\n2\n4\n', ['V', '2gg'], 2, [0, 3]),
('0\n \n8\n', ['V', '2gg'], 6, [0, 7]),
(' 0\n2\n4\n', ['4j', 'V', 'gg'], 4, [0, 10])
]
)
def test_gg_cmd_in_vline(vim_bot, text, cmd_list, cursor_pos, sel_pos):
"""Test gg command in vline."""
_, _, editor, vim, qtbot = vim_bot
editor.set_text(text)
cmd_line = vim.get_focus_widget()
for cmd in cmd_list:
qtbot.keyClicks(cmd_line, cmd)
sel = editor.get_extra_selections("vim_selection")[0]
sel_pos_ = [sel.cursor.selectionStart(), sel.cursor.selectionEnd()]
assert cmd_line.text() == ""
assert editor.textCursor().position() == cursor_pos
assert sel_pos_ == sel_pos
@pytest.mark.parametrize(
"text, cmd_list, text_expected, cursor_pos",
[
('abcde', ['V', '~'], 'ABCDE', 0),
('abcde\na', ['l', 'V', '$', '~'], 'ABCDE\na', 0),
('abcde\na', ['l', 'V', '$', '~', 'j', '.'], 'ABCDE\nA', 6),
('', ['V', '~'], '', 0)
]
)
def test_tilde_cmd_in_vline(vim_bot, text, cmd_list, text_expected,
cursor_pos):
"""Test ~ command in vline."""
_, _, editor, vim, qtbot = vim_bot
editor.set_text(text)
cmd_line = vim.get_focus_widget()
for cmd in cmd_list:
qtbot.keyClicks(cmd_line, cmd)
assert cmd_line.text() == ""
assert editor.toPlainText() == text_expected
assert editor.textCursor().position() == cursor_pos
assert vim.vim_cmd.vim_status.get_pos_start_in_selection() is None
@pytest.mark.parametrize(
"text, cmd_list, cursor_pos, sel_pos",
[
('', ['V', '%'], 0, [0, 0]),
('\n', ['j', 'V', '%'], 1, [1, 1]),
(' ()', ['V', '%'], 2, [0, 3]),
(' ()', ['V', '%', '%'], 1, [0, 3])
]
)
def test_percent_cmd_in_vline(vim_bot, text, cmd_list, cursor_pos, sel_pos):
"""Test % command in vline."""
_, _, editor, vim, qtbot = vim_bot
editor.set_text(text)
cmd_line = vim.get_focus_widget()
for cmd in cmd_list:
qtbot.keyClicks(cmd_line, cmd)
sel = editor.get_extra_selections("vim_selection")[0]
sel_pos_ = [sel.cursor.selectionStart(), sel.cursor.selectionEnd()]
assert cmd_line.text() == ""
assert editor.textCursor().position() == cursor_pos
assert sel_pos_ == sel_pos
@pytest.mark.parametrize(
"text, cmd_list, cursor_pos, sel_pos",
[
('', ['V', 'f', 'r'], 0, [0, 0]),
('\n', ['j', 'V', 'f', 'r'], 1, [1, 1]),
(' rr', ['V', 'f', 'r'], 1, [0, 3]),
(' rr', ['V', 'f', 'r', ';'], 2, [0, 3]),
(' rr', ['V', 'f', 'r', ';', ','], 1, [0, 3]),
]
)
def test_f_cmd_in_vline(vim_bot, text, cmd_list, cursor_pos, sel_pos):
"""Test f command in vline."""
_, _, editor, vim, qtbot = vim_bot
editor.set_text(text)
cmd_line = vim.get_focus_widget()
for cmd in cmd_list:
qtbot.keyClicks(cmd_line, cmd)
sel = editor.get_extra_selections("vim_selection")[0]
sel_pos_ = [sel.cursor.selectionStart(), sel.cursor.selectionEnd()]
assert cmd_line.text() == ""
assert editor.textCursor().position() == cursor_pos
assert sel_pos_ == sel_pos
@pytest.mark.parametrize(
"text, cmd_list, cursor_pos, sel_pos",
[
('', ['V', 'F', 'r'], 0, [0, 0]),
('\n', ['j', 'V', 'F', 'r'], 1, [1, 1]),
(' rr ', ['V', '$', 'F', 'r'], 2, [0, 4]),
(' rr ', ['V', '$', 'F', 'r', ';'], 1, [0, 4]),
(' rr ', ['V', '$', 'F', 'r', ';', ','], 2, [0, 4]),
]
)
def test_F_cmd_in_vline(vim_bot, text, cmd_list, cursor_pos, sel_pos):
"""Test F command in vline."""
_, _, editor, vim, qtbot = vim_bot
editor.set_text(text)
cmd_line = vim.get_focus_widget()
for cmd in cmd_list:
qtbot.keyClicks(cmd_line, cmd)
sel = editor.get_extra_selections("vim_selection")[0]
sel_pos_ = [sel.cursor.selectionStart(), sel.cursor.selectionEnd()]
assert cmd_line.text() == ""
assert editor.textCursor().position() == cursor_pos
assert sel_pos_ == sel_pos
@pytest.mark.parametrize(
"text, cmd_list, cursor_pos, sel_pos",
[
('', ['V', 't', 'r'], 0, [0, 0]),
('\n', ['j', 'V', 't', 'r'], 1, [1, 1]),
(' rr', ['V', 't', 'r'], 1, [0, 4]),
(' rr', ['V', 't', 'r', ';'], 2, [0, 4]),
(' rrrr', ['V', 't', 'r', '4;'], 4, [0, 6]),
(' rrrr', ['V', 't', 'r', '4;', ','], 3, [0, 6]),
]
)
def test_t_cmd_in_vline(vim_bot, text, cmd_list, cursor_pos, sel_pos):
"""Test t command in vline."""
_, _, editor, vim, qtbot = vim_bot
editor.set_text(text)
cmd_line = vim.get_focus_widget()
for cmd in cmd_list:
qtbot.keyClicks(cmd_line, cmd)
sel = editor.get_extra_selections("vim_selection")[0]
sel_pos_ = [sel.cursor.selectionStart(), sel.cursor.selectionEnd()]
assert cmd_line.text() == ""
assert editor.textCursor().position() == cursor_pos
assert sel_pos_ == sel_pos
@pytest.mark.parametrize(
"text, cmd_list, cursor_pos, sel_pos",
[
('', ['V', 'T', 'r'], 0, [0, 0]),
('r\n', ['j', 'V', 'T', 'r'], 2, [2, 2]),
(' rr ', ['V', '$', 'T', 'r'], 4, [0, 6]),
(' rr ', ['V', '$', 'T', 'r', ';'], 3, [0, 6]),
(' rrrr', ['V', '$', 'T', 'r', '4;'], 3, [0, 6]),
(' rrrr', ['V', '$', 'T', 'r', '4;', ','], 4, [0, 6]),
]
)
def test_T_cmd_in_vline(vim_bot, text, cmd_list, cursor_pos, sel_pos):
"""Test T command in vline."""
_, _, editor, vim, qtbot = vim_bot
editor.set_text(text)
cmd_line = vim.get_focus_widget()
for cmd in cmd_list:
qtbot.keyClicks(cmd_line, cmd)
sel = editor.get_extra_selections("vim_selection")[0]
sel_pos_ = [sel.cursor.selectionStart(), sel.cursor.selectionEnd()]
assert cmd_line.text() == ""
assert editor.textCursor().position() == cursor_pos
assert sel_pos_ == sel_pos
@pytest.mark.parametrize(
"text, cmd_list, text_expected, cursor_pos",
[
("", ['V', 'r', 'r'], "", 0),
("1\n", ['j', 'V', 'r', 'r'], "1\n", 2),
("\n\na", ['j', 'V', 'r', 'r'], "\n\na", 1),
("a", ['V', 'r', 'r'], "r", 0),
(" a\nbc\n", ['l', 'V', 'j', 'r', 'r'], "rr\nrr\n", 0),
(" a\nbc\nkk", ['l', 'V', 'j', 'r', 'r'], "rr\nrr\nkk", 0),
(" a\nbc\nkk", ['l', 'V', 'j', 'r', 'r', 'j', '.'], "rr\nrr\nrr", 3),
]
)
def test_r_cmd_in_vline(vim_bot, text, cmd_list, text_expected, cursor_pos):
"""Test r command in vline."""
_, _, editor, vim, qtbot = vim_bot
editor.set_text(text)
cmd_line = vim.get_focus_widget()
for cmd in cmd_list:
qtbot.keyClicks(cmd_line, cmd)
sel = editor.get_extra_selections("vim_selection")
assert sel == []
assert cmd_line.text() == ""
assert vim.vim_cmd.vim_status.vim_state == VimState.NORMAL
assert editor.textCursor().position() == cursor_pos
assert editor.toPlainText() == text_expected
@pytest.mark.parametrize(
"text, cmd_list, text_expected, cursor_pos",
[
('ABCDE', ['V', 'u'], 'abcde', 0),
('ABCDE\nA', ['l', 'V', '$', 'u'], 'abcde\nA', 0),
('ABCDE\nA', ['l', 'V', '$', 'u', 'j', '.'], 'abcde\na', 6),
('', ['V', 'u'], '', 0)
]
)
def test_u_cmd_in_vline(vim_bot, text, cmd_list, text_expected, cursor_pos):
"""Test u command in vline."""
_, _, editor, vim, qtbot = vim_bot
editor.set_text(text)
cmd_line = vim.get_focus_widget()
for cmd in cmd_list:
qtbot.keyClicks(cmd_line, cmd)
assert cmd_line.text() == ""
assert editor.toPlainText() == text_expected
assert editor.textCursor().position() == cursor_pos
assert vim.vim_cmd.vim_status.get_pos_start_in_selection() is None
@pytest.mark.parametrize(
"text, cmd_list, text_expected, cursor_pos",
[
('ABCDE', ['V', 'g', 'u'], 'abcde', 0),
('ABCDE\nA', ['l', 'V', '$', 'g', 'u'], 'abcde\nA', 0),
('ABCDE\nA', ['l', 'V', '$', 'g', 'u', 'j', '.'], 'abcde\na', 6),
('', ['V', 'g', 'u'], '', 0)
]
)
def test_gu_cmd_in_vline(vim_bot, text, cmd_list, text_expected, cursor_pos):
"""Test gu command in vline."""
_, _, editor, vim, qtbot = vim_bot
editor.set_text(text)
cmd_line = vim.get_focus_widget()
for cmd in cmd_list:
qtbot.keyClicks(cmd_line, cmd)
assert cmd_line.text() == ""
assert editor.toPlainText() == text_expected
assert editor.textCursor().position() == cursor_pos
assert vim.vim_cmd.vim_status.get_pos_start_in_selection() is None
@pytest.mark.parametrize(
"text, cmd_list, text_expected, cursor_pos",
[
('abcde', ['V', 'U'], 'ABCDE', 0),
('abcde\na', ['l', 'V', '$', 'U'], 'ABCDE\na', 0),
('abcde\na', ['l', 'V', '$', 'U', 'j', '.'], 'ABCDE\nA', 6),
('', ['V', 'U'], '', 0)
]
)
def test_U_cmd_in_vline(vim_bot, text, cmd_list, text_expected, cursor_pos):
"""Test U command in vline."""
_, _, editor, vim, qtbot = vim_bot
editor.set_text(text)
cmd_line = vim.get_focus_widget()
for cmd in cmd_list:
qtbot.keyClicks(cmd_line, cmd)
assert cmd_line.text() == ""
assert editor.toPlainText() == text_expected
assert editor.textCursor().position() == cursor_pos
assert vim.vim_cmd.vim_status.get_pos_start_in_selection() is None
@pytest.mark.parametrize(
"text, cmd_list, text_expected, cursor_pos",
[
('abcde', ['V', 'g', 'U'], 'ABCDE', 0),
('abcde\na', ['l', 'V', '$', 'g', 'U'], 'ABCDE\na', 0),
('abcde\na', ['l', 'V', '$', 'g', 'U', 'j', '.'], 'ABCDE\nA', 6),
('', ['V', 'g', 'U'], '', 0)
]
)
def test_gU_cmd_in_vline(vim_bot, text, cmd_list, text_expected, cursor_pos):
"""Test gU command in vline."""
_, _, editor, vim, qtbot = vim_bot
editor.set_text(text)
cmd_line = vim.get_focus_widget()
for cmd in cmd_list:
qtbot.keyClicks(cmd_line, cmd)
assert cmd_line.text() == ""
assert editor.toPlainText() == text_expected
assert editor.textCursor().position() == cursor_pos
assert vim.vim_cmd.vim_status.get_pos_start_in_selection() is None
@pytest.mark.parametrize(
"text, cmd_list, text_expected, cursor_pos",
[
('abCde', ['V', 'g', '~'], 'ABcDE', 0),
('abCde\na', ['l', 'V', '$', 'g', '~'], 'ABcDE\na', 0),
('abCde\na', ['l', 'V', '$', 'g', '~', 'j', '.'], 'ABcDE\nA', 6),
('', ['V', 'g', '~'], '', 0)
]
)
def test_gtilde_cmd_in_vline(vim_bot, text, cmd_list, text_expected, cursor_pos):
"""Test g~ command in vline."""
_, _, editor, vim, qtbot = vim_bot
editor.set_text(text)
cmd_line = vim.get_focus_widget()
for cmd in cmd_list:
qtbot.keyClicks(cmd_line, cmd)
assert cmd_line.text() == ""
assert editor.toPlainText() == text_expected
assert editor.textCursor().position() == cursor_pos
assert vim.vim_cmd.vim_status.get_pos_start_in_selection() is None
@pytest.mark.parametrize(
"text, cmd_list, text_expected, cursor_pos",
[
('', ['V', '>'], '', 0),
('abcde', ['2l', 'V', '>'], ' abcde', 4),
(' abcde\na', ['V', '>'], ' abcde\na', 5),
('a\n\na', ['V', '2j', '>'], ' a\n\n a', 4),
]
)
def test_greater_cmd_in_vline(vim_bot, text, cmd_list, text_expected, cursor_pos):
"""Test > command in vline."""
_, _, editor, vim, qtbot = vim_bot
editor.set_text(text)
cmd_line = vim.get_focus_widget()
for cmd in cmd_list:
qtbot.keyClicks(cmd_line, cmd)
assert cmd_line.text() == ""
assert editor.toPlainText() == text_expected
assert editor.textCursor().position() == cursor_pos
assert vim.vim_cmd.vim_status.get_pos_start_in_selection() is None
@pytest.mark.parametrize(
"text, cmd_list, text_expected, cursor_pos",
[
('', ['V', '<'], '', 0),
(' abcde', ['2l', 'V', '<'], 'abcde', 0),
(' abcde\na', ['V', '<'], ' abcde\na', 1),
(' a\n\n a', ['V', '2j', '<'], 'a\n\na', 0),
]
)
def test_less_cmd_in_vline(vim_bot, text, cmd_list, text_expected, cursor_pos):
"""Test < command in vline."""
_, _, editor, vim, qtbot = vim_bot
editor.set_text(text)
cmd_line = vim.get_focus_widget()
for cmd in cmd_list:
qtbot.keyClicks(cmd_line, cmd)
assert cmd_line.text() == ""
assert editor.toPlainText() == text_expected
assert editor.textCursor().position() == cursor_pos
assert vim.vim_cmd.vim_status.get_pos_start_in_selection() is None
@pytest.mark.parametrize(
"text, cmd_list, cursor_pos, register_name, text_yanked",
[
('a', ['V', 'y'], 0, '"', 'a\n'),
('abcd', ['V', '"', '0', 'y'], 0, '0', 'abcd\n'),
('abcd\ne', ['V', 'j', '"', 'a', 'y'], 0, 'a', 'abcd\ne\n'),
]
)
def test_y_cmd_in_vline(vim_bot, text, cmd_list, cursor_pos, register_name,
text_yanked):
"""Test y command in vline."""
_, _, editor, vim, qtbot = vim_bot
editor.set_text(text)
cmd_line = vim.get_focus_widget()
for cmd in cmd_list:
qtbot.keyClicks(cmd_line, cmd)
reg = vim.vim_cmd.vim_status.register_dict[register_name]
assert cmd_line.text() == ""
assert editor.textCursor().position() == cursor_pos
assert reg.content == text_yanked
assert reg.type == VimState.VLINE
assert vim.vim_cmd.vim_status.get_pos_start_in_selection() is None
if register_name == '"':
reg0 = vim.vim_cmd.vim_status.register_dict['0']
assert reg0.content == text_yanked
@pytest.mark.parametrize(
"text, cmd_list, cursor_pos, text_expected",
[
('ak', ['V', 'p'], 0, ''),
('ak', ['v', 'l', 'y', 'V', 'p'], 0, 'ak'),
('ak', ['v', 'l', 'y', 'V', 'P'], 0, 'ak'),
('ak', ['v', 'l', 'y', 'V', '2p'], 0, 'ak\nak'),
('ab\n\ncd\n', ['v', 'j', 'y', '2j', 'V', '2p'], 4, 'ab\n\nab\n\n\nab\n\n\n'),
('ab\ncd\nef\ngh\n', ['V', 'j', 'y', '2j', 'V', 'p'], 6, 'ab\ncd\nab\ncd\ngh\n'),
('ab\ncd\nef\ngh\n', ['V', 'j', 'y', '2j', 'V', '2p'], 6, 'ab\ncd\nab\ncd\nab\ncd\ngh\n'),
]
)
def test_p_P_cmd_in_vline(vim_bot, text, cmd_list, cursor_pos, text_expected):
"""Test p command in vline."""
_, _, editor, vim, qtbot = vim_bot
editor.set_text(text)
cmd_line = vim.get_focus_widget()
for cmd in cmd_list:
qtbot.keyClicks(cmd_line, cmd)
assert cmd_line.text() == ""
assert editor.textCursor().position() == cursor_pos
assert editor.toPlainText() == text_expected
assert vim.vim_cmd.vim_status.get_pos_start_in_selection() is None
@pytest.mark.parametrize(
"text, cmd_list, cursor_pos, text_expected, reg_name, text_yanked",
[
('ab', ['V', 'd'], 0, '', '"', 'ab\n'),
(' ab\n cd\n ef', ['j', 'V', 'd'], 5, ' ab\n ef', '"', ' cd\n'),
(' ab\n cd\n', ['V', 'G', 'd'], 0, '', '"', ' ab\n cd\n\n'),
(' ab\n cd\n ef', ['2j', 'V', 'd'], 5, ' ab\n cd', '"', ' ef\n'),
(' ab\n cd\n ef', ['2j', 'V', 'k', 'd'], 1, ' ab', '"', ' cd\n ef\n'),
(' ab\n cd', ['$', 'V', 'd'], 1, ' cd', '"', ' ab\n'),
]
)
def test_d_cmd_in_vline(vim_bot, text, cmd_list, cursor_pos, text_expected, reg_name, text_yanked):
"""Test d command in vline."""
_, _, editor, vim, qtbot = vim_bot
editor.set_text(text)
cmd_line = vim.get_focus_widget()
for cmd in cmd_list:
qtbot.keyClicks(cmd_line, cmd)
reg = vim.vim_cmd.vim_status.register_dict[reg_name]
assert cmd_line.text() == ""
assert editor.toPlainText() == text_expected
assert reg.content == text_yanked
assert editor.textCursor().position() == cursor_pos
assert vim.vim_cmd.vim_status.get_pos_start_in_selection() is None
@pytest.mark.parametrize(
"text, cmd_list, cursor_pos, text_expected, reg_name, text_yanked",
[
('ab', ['V', 'x'], 0, '', '"', 'ab\n'),
(' ab\n cd\n ef', ['j', 'V', 'x'], 5, ' ab\n ef', '"', ' cd\n'),
(' ab\n cd\n', ['V', 'G', 'x'], 0, '', '"', ' ab\n cd\n\n'),
(' ab\n cd\n ef', ['2j', 'V', 'x'], 5, ' ab\n cd', '"', ' ef\n'),
(' ab\n cd\n ef', ['2j', 'V', 'k', 'x'], 1, ' ab', '"', ' cd\n ef\n'),
(' ab\n cd', ['$', 'V', 'x'], 1, ' cd', '"', ' ab\n'),
]
)
def test_x_cmd_in_vline(vim_bot, text, cmd_list, cursor_pos, text_expected, reg_name, text_yanked):
"""Test x command in vline."""
_, _, editor, vim, qtbot = vim_bot
editor.set_text(text)
cmd_line = vim.get_focus_widget()
for cmd in cmd_list:
qtbot.keyClicks(cmd_line, cmd)
reg = vim.vim_cmd.vim_status.register_dict[reg_name]
assert cmd_line.text() == ""
assert editor.toPlainText() == text_expected
assert reg.content == text_yanked
assert editor.textCursor().position() == cursor_pos
assert vim.vim_cmd.vim_status.get_pos_start_in_selection() is None
@pytest.mark.parametrize(
"text, cmd_list, cursor_pos, text_expected, reg_name, text_yanked",
[
('ab', ['V', 'c'], 0, '', '"', 'ab\n'),
(' ab\n cd\n ef', ['j', 'V', 'c'], 4, ' ab\n\n ef', '"', ' cd\n'),
(' ab\n cd\n', ['V', 'G', 'c'], 0, '', '"', ' ab\n cd\n\n'),
(' ab\n cd\n ef', ['2j', 'V', 'c'], 8, ' ab\n cd\n', '"', ' ef\n'),
(' ab\n cd\n ef', ['2j', 'V', 'k', 'c'], 4, ' ab\n', '"', ' cd\n ef\n'),
(' ab\n cd', ['$', 'V', 'c'], 0, '\n cd', '"', ' ab\n'),
]
)
def test_c_cmd_in_vline(vim_bot, text, cmd_list, cursor_pos, text_expected, reg_name, text_yanked):
"""Test c command in vline."""
_, _, editor, vim, qtbot = vim_bot
editor.set_text(text)
cmd_line = vim.get_focus_widget()
for cmd in cmd_list:
qtbot.keyClicks(cmd_line, cmd)
reg = vim.vim_cmd.vim_status.register_dict[reg_name]
assert cmd_line.text() == ""
assert editor.toPlainText() == text_expected
assert reg.content == text_yanked
assert editor.textCursor().position() == cursor_pos
assert vim.vim_cmd.vim_status.get_pos_start_in_selection() is None
@pytest.mark.parametrize(
"text, cmd_list, cursor_pos, text_expected, reg_name, text_yanked",
[
('ab', ['V', 's'], 0, '', '"', 'ab\n'),
(' ab\n cd\n ef', ['j', 'V', 's'], 4, ' ab\n\n ef', '"', ' cd\n'),
(' ab\n cd\n', ['V', 'G', 's'], 0, '', '"', ' ab\n cd\n\n'),
(' ab\n cd\n ef', ['2j', 'V', 's'], 8, ' ab\n cd\n', '"', ' ef\n'),
(' ab\n cd\n ef', ['2j', 'V', 'k', 's'], 4, ' ab\n', '"', ' cd\n ef\n'),
(' ab\n cd', ['$', 'V', 's'], 0, '\n cd', '"', ' ab\n'),
]
)
def test_s_cmd_in_vline(vim_bot, text, cmd_list, cursor_pos, text_expected, reg_name, text_yanked):
"""Test s command in vline."""
_, _, editor, vim, qtbot = vim_bot
editor.set_text(text)
cmd_line = vim.get_focus_widget()
for cmd in cmd_list:
qtbot.keyClicks(cmd_line, cmd)
reg = vim.vim_cmd.vim_status.register_dict[reg_name]
assert cmd_line.text() == ""
assert editor.toPlainText() == text_expected
assert reg.content == text_yanked
assert editor.textCursor().position() == cursor_pos
assert vim.vim_cmd.vim_status.get_pos_start_in_selection() is None
@pytest.mark.parametrize(
"text, cmd_list, cursor_pos, sel_pos",
[
('a', ['V', '/', 'b', '\r'], 0, [0, 1]),
('a', ['V', '/', 'b', '\r', 'n'], 0, [0, 1]),
(' dhr\n dhrwodn\n\ndhrwodn\n dhrwodn', ['V', '/', 'd', 'h', 'r', Qt.Key_Enter], 1, [0, 4]),
(' dhr\n dhrwodn\n\ndhrwodn\n dhrwodn', ['V', '/', 'd', 'h', 'r', Qt.Key_Enter, 'n'], 7, [0, 14]),
(' dhr\n dhrwodn\n\ndhrwodn\n dhrwodn', ['V', '/', 'd', 'h', 'r', Qt.Key_Return, 'n', 'N'], 1, [0, 4]),
]
)
def test_search_cmd_in_vline(vim_bot, text, cmd_list, cursor_pos, sel_pos):
"""Test / command in vline."""
_, _, editor, vim, qtbot = vim_bot
editor.set_text(text)
cmd_line = vim.get_focus_widget()
for cmd in cmd_list:
if isinstance(cmd, str):
qtbot.keyClicks(cmd_line, cmd)
else:
qtbot.keyPress(cmd_line, cmd)
sel = editor.get_extra_selections("vim_selection")[0]
sel_pos_ = [sel.cursor.selectionStart(), sel.cursor.selectionEnd()]
assert cmd_line.text() == ""
assert editor.textCursor().position() == cursor_pos
assert sel_pos == sel_pos_
@pytest.mark.parametrize(
"text, cmd_list, cursor_pos, sel_pos",
[
("01 34", ['V', Qt.Key_Space], 1, [0, 5]),
]
)
def test_space_cmd_in_vline(vim_bot, text, cmd_list, cursor_pos, sel_pos):
"""Test space command in vline."""
_, _, editor, vim, qtbot = vim_bot
CONF.set(CONF_SECTION, 'leader_key', 'F1')
vim.apply_plugin_settings("")
editor.set_text(text)
cmd_line = vim.get_focus_widget()
for cmd in cmd_list:
if isinstance(cmd, str):
qtbot.keyClicks(cmd_line, cmd)
else:
qtbot.keyPress(cmd_line, cmd)
sel = editor.get_extra_selections("vim_selection")[0]
sel_pos_ = [sel.cursor.selectionStart(), sel.cursor.selectionEnd()]
assert cmd_line.text() == ""
assert editor.textCursor().position() == cursor_pos
assert sel_pos_ == sel_pos
@pytest.mark.parametrize(
"text, cmd_list, cursor_pos, sel_pos",
[
("01 34", ['2l', 'V', Qt.Key_Backspace], 1, [0, 5]),
]
)
def test_backspace_cmd_in_vline(vim_bot, text, cmd_list, cursor_pos, sel_pos):
"""Test backspace command in vline."""
_, _, editor, vim, qtbot = vim_bot
editor.set_text(text)
cmd_line = vim.get_focus_widget()
for cmd in cmd_list:
if isinstance(cmd, str):
qtbot.keyClicks(cmd_line, cmd)
else:
qtbot.keyPress(cmd_line, cmd)
sel = editor.get_extra_selections("vim_selection")[0]
sel_pos_ = [sel.cursor.selectionStart(), sel.cursor.selectionEnd()]
assert cmd_line.text() == ""
assert editor.textCursor().position() == cursor_pos
assert sel_pos_ == sel_pos
@pytest.mark.parametrize(
"text, cmd_list, cursor_pos, sel_pos",
[
("01 34\n kj", ['V', Qt.Key_Enter], 9, [0, 11]),
]
)
def test_enter_cmd_in_vline(vim_bot, text, cmd_list, cursor_pos, sel_pos):
"""Test enter command in vline."""
_, _, editor, vim, qtbot = vim_bot
editor.set_text(text)
cmd_line = vim.get_focus_widget()
for cmd in cmd_list:
if isinstance(cmd, str):
qtbot.keyClicks(cmd_line, cmd)
else:
qtbot.keyPress(cmd_line, cmd)
sel = editor.get_extra_selections("vim_selection")[0]
sel_pos_ = [sel.cursor.selectionStart(), sel.cursor.selectionEnd()]
assert cmd_line.text() == ""
assert editor.textCursor().position() == cursor_pos
assert sel_pos_ == sel_pos
| 33.085685
| 114
| 0.567716
| 4,762
| 32,821
| 3.669467
| 0.042209
| 0.046068
| 0.046584
| 0.052535
| 0.934817
| 0.924974
| 0.912842
| 0.903342
| 0.893613
| 0.888406
| 0
| 0.022068
| 0.224064
| 32,821
| 991
| 115
| 33.119072
| 0.664075
| 0.03123
| 0
| 0.58634
| 0
| 0
| 0.144358
| 0.001582
| 0
| 0
| 0
| 0
| 0.17268
| 1
| 0.04768
| false
| 0
| 0.036082
| 0
| 0.083763
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8ab9f14b63c1512623d4174984830a2a3e8c0592
| 6,203
|
py
|
Python
|
tests/test_allow_none_vs_default.py
|
berland/configsuite
|
9c1eaeed3610ffaa9e549a35dc2709da44633c75
|
[
"MIT"
] | null | null | null |
tests/test_allow_none_vs_default.py
|
berland/configsuite
|
9c1eaeed3610ffaa9e549a35dc2709da44633c75
|
[
"MIT"
] | null | null | null |
tests/test_allow_none_vs_default.py
|
berland/configsuite
|
9c1eaeed3610ffaa9e549a35dc2709da44633c75
|
[
"MIT"
] | null | null | null |
"""Copyright 2019 Equinor ASA and The Netherlands Organisation for
Applied Scientific Research TNO.
Licensed under the MIT license.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the conditions stated in the LICENSE file in the project root for
details.
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
"""
import unittest
import configsuite
from configsuite import MetaKeys as MK
from configsuite import types
class TestNotAllowNoneVsDefault(unittest.TestCase):
def test_allow_none_default_not_required(self):
schema = {
MK.Type: types.NamedDict,
MK.Content: {
"my_value": {
MK.Type: types.Integer,
MK.AllowNone: True,
MK.Required: False,
MK.Default: 0,
},
},
}
for value in (-1, 4, 1000, None):
suite = configsuite.ConfigSuite({"my_value": value}, schema)
self.assertTrue(suite.valid)
self.assertEqual(value, suite.snapshot.my_value)
suite = configsuite.ConfigSuite({}, schema)
self.assertTrue(suite.valid)
self.assertEqual(0, suite.snapshot.my_value)
def test_allow_none_no_default_not_required(self):
schema = {
MK.Type: types.NamedDict,
MK.Content: {
"my_value": {
MK.Type: types.Integer,
MK.AllowNone: True,
MK.Required: False,
},
},
}
for value in (-1, 4, 1000, None):
suite = configsuite.ConfigSuite({"my_value": value}, schema)
self.assertTrue(suite.valid)
self.assertEqual(value, suite.snapshot.my_value)
suite = configsuite.ConfigSuite({}, schema)
self.assertTrue(suite.valid)
self.assertEqual(None, suite.snapshot.my_value)
def test_disallow_none_default_not_required(self):
schema = {
MK.Type: types.NamedDict,
MK.Content: {
"my_value": {
MK.Type: types.Integer,
MK.AllowNone: False,
MK.Default: 0,
MK.Required: False,
},
},
}
for value in (-1, 4, 1000):
suite = configsuite.ConfigSuite({"my_value": value}, schema)
self.assertTrue(suite.valid, suite.errors)
self.assertEqual(value, suite.snapshot.my_value)
suite = configsuite.ConfigSuite({}, schema)
self.assertTrue(suite.valid)
self.assertEqual(0, suite.snapshot.my_value)
suite = configsuite.ConfigSuite({"my_value": None}, schema)
self.assertFalse(suite.valid)
self.assertEqual(None, suite.snapshot.my_value)
def test_disallow_none_no_default_not_required(self):
schema = {
MK.Type: types.NamedDict,
MK.Content: {
"my_value": {
MK.Type: types.Integer,
MK.AllowNone: False,
MK.Required: False,
},
},
}
with self.assertRaises(ValueError) as error_context:
configsuite.ConfigSuite({}, schema)
self.assertIn("A type is not required only if", str(error_context.exception))
def test_allow_none_default_required(self):
schema = {
MK.Type: types.NamedDict,
MK.Content: {
"my_value": {
MK.Type: types.Integer,
MK.AllowNone: True,
MK.Default: 0,
MK.Required: True,
},
},
}
with self.assertRaises(ValueError) as error_context:
configsuite.ConfigSuite({}, schema)
self.assertIn("Required can not have Default", str(error_context.exception))
def test_allow_none_no_default_required(self):
schema = {
MK.Type: types.NamedDict,
MK.Content: {
"my_value": {
MK.Type: types.Integer,
MK.AllowNone: True,
MK.Required: True,
},
},
}
with self.assertRaises(ValueError) as error_context:
configsuite.ConfigSuite({}, schema)
self.assertIn("A type is not required only if", str(error_context.exception))
def test_disallow_none_default_required(self):
schema = {
MK.Type: types.NamedDict,
MK.Content: {
"my_value": {
MK.Type: types.Integer,
MK.AllowNone: False,
MK.Default: 0,
MK.Required: True,
},
},
}
with self.assertRaises(ValueError) as error_context:
configsuite.ConfigSuite({}, schema)
self.assertIn("Required can not have Default", str(error_context.exception))
def test_disallow_none_no_default_required(self):
schema = {
MK.Type: types.NamedDict,
MK.Content: {
"my_value": {
MK.Type: types.Integer,
MK.AllowNone: False,
MK.Required: True,
},
},
}
for value in (-1, 4, 1000):
suite = configsuite.ConfigSuite({"my_value": value}, schema)
self.assertTrue(suite.valid)
self.assertEqual(value, suite.snapshot.my_value)
for config in ({}, {"my_value": None}):
suite = configsuite.ConfigSuite(config, schema)
self.assertFalse(suite.valid)
self.assertEqual(None, suite.snapshot.my_value)
| 33.52973
| 85
| 0.555376
| 634
| 6,203
| 5.321767
| 0.200315
| 0.047718
| 0.052164
| 0.053349
| 0.75489
| 0.749259
| 0.745702
| 0.7377
| 0.729994
| 0.728512
| 0
| 0.008462
| 0.352249
| 6,203
| 184
| 86
| 33.711957
| 0.831259
| 0.119297
| 0
| 0.70922
| 0
| 0
| 0.042155
| 0
| 0
| 0
| 0
| 0
| 0.184397
| 1
| 0.056738
| false
| 0
| 0.028369
| 0
| 0.092199
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0a4b790740635a745b557ad1246e09a332f4bca0
| 42
|
py
|
Python
|
authserver/__init__.py
|
brighthive/authserver
|
848201324761269bc96b75ad9cb5242e2a6ee5a5
|
[
"MIT"
] | 3
|
2019-07-31T16:10:26.000Z
|
2021-05-14T20:06:07.000Z
|
authserver/__init__.py
|
brighthive/authserver
|
848201324761269bc96b75ad9cb5242e2a6ee5a5
|
[
"MIT"
] | 25
|
2019-08-20T20:19:59.000Z
|
2021-05-14T19:06:41.000Z
|
authserver/__init__.py
|
brighthive/authserver
|
848201324761269bc96b75ad9cb5242e2a6ee5a5
|
[
"MIT"
] | 1
|
2020-04-29T18:18:21.000Z
|
2020-04-29T18:18:21.000Z
|
from authserver.app.app import create_app
| 21
| 41
| 0.857143
| 7
| 42
| 5
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 42
| 1
| 42
| 42
| 0.921053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0a66a7b680d1ab194e196f03b529f197aaf33683
| 127
|
py
|
Python
|
backend/tasks/admin.py
|
mnieber/taskboard
|
7925342751e2782bd0a0258eb2d43d9ec90ce9d8
|
[
"MIT"
] | null | null | null |
backend/tasks/admin.py
|
mnieber/taskboard
|
7925342751e2782bd0a0258eb2d43d9ec90ce9d8
|
[
"MIT"
] | null | null | null |
backend/tasks/admin.py
|
mnieber/taskboard
|
7925342751e2782bd0a0258eb2d43d9ec90ce9d8
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Task
@admin.register(Task)
class TaskAdmin(admin.ModelAdmin):
pass
| 14.111111
| 34
| 0.771654
| 17
| 127
| 5.764706
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.149606
| 127
| 8
| 35
| 15.875
| 0.907407
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
6a5f23092e7bc29cfdaebc3d22e6089cf4da86de
| 14,887
|
py
|
Python
|
03/batch_myotis_melt_runs.py
|
npaulat/teava
|
ba0a02b2ce85a7d082e5c8a6bf7b90e98ef3418d
|
[
"MIT"
] | 1
|
2021-11-14T15:26:32.000Z
|
2021-11-14T15:26:32.000Z
|
03/batch_myotis_melt_runs.py
|
npaulat/teava
|
ba0a02b2ce85a7d082e5c8a6bf7b90e98ef3418d
|
[
"MIT"
] | null | null | null |
03/batch_myotis_melt_runs.py
|
npaulat/teava
|
ba0a02b2ce85a7d082e5c8a6bf7b90e98ef3418d
|
[
"MIT"
] | 1
|
2021-03-04T19:22:35.000Z
|
2021-03-04T19:22:35.000Z
|
import sys
import os
import argparse
import itertools
import subprocess
import fnmatch
from Bio import SeqIO
def get_args():
parser = argparse.ArgumentParser(description="Batch script generator for MELT runs given a specific max MEI mutation rate", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-tl', '--telist', type=str, help='Path to list of TEs to analyze with MELT', required=True)
parser.add_argument('-fl', '--falist', type=str, help='Path to list of TE FASTA file names (file basenames) to analyze with MELT, must be in same order as TE list', required=True)
parser.add_argument('-zl', '--ziplist', type=str, help='Path to list of TE ZIP file names (basenames) to analyze with MELT, must be in same order as TE list', required=True)
parser.add_argument('-r', '--referencegenome', type=str, help='Path to the reference genome', required=True)
#required = parser.add_argument_group('required arguments')
parser.add_argument('-m', '--rate', type=int, help="Maximum mutation rate for each TE MEI ZIP file to be made", default=5)
parser.add_argument('-np', '--proc', type=int, help='Number of cores to use for multithreaded applications', default=12)
parser.add_argument('-od', '--outdir', type=str, help='Location of directory for output files', required=True)
parser.add_argument('-q', '--queue', type=str, help='quanah or hrothgar', required=True)
args = parser.parse_args()
TE_LIST = args.telist
FASTA_LIST = args.falist
ZIP_LIST = args.ziplist
REF = args.referencegenome
RATE = args.rate
PROC = args.proc
OUTDIR = args.outdir
QUEUE = args.queue
return TE_LIST, FASTA_LIST, ZIP_LIST, REF, RATE, PROC, OUTDIR, QUEUE
TE_LIST, FASTA_LIST, ZIP_LIST, REF, RATE, PROC, OUTDIR, QUEUE = get_args()
#argument sanity checks
#if not args.telist:
# sys.exit('You must provide a TE list for the genome you are analyzing')
print('The TE list is ' + TE_LIST +'.')
print('The TE FASTA file list is ' + FASTA_LIST +'.')
print('The TE ZIP file list is ' + ZIP_LIST +'.')
print('The reference genome is ' + REF + '.')
print('The max mutation rate for MEI ZIP file is ' + str(RATE) + ' mutations in 100 bp.')
print('Use ' + str(PROC) + ' processors.')
print('The output directory is ' + OUTDIR + '.')
print('The queue is ' + QUEUE + '.')
MUT_RATE = 'mut' + str(RATE)
MUT_RATE_DIR = os.path.join(OUTDIR, MUT_RATE)
if not os.path.exists(MUT_RATE_DIR):
print(MUT_RATE_DIR + ' does not exist. Make it.')
os.mkdir(MUT_RATE_DIR)
else:
print(MUT_RATE_DIR + ' exists.')
JOBSSUBMISSON = MUT_RATE + '_jobs_submission_all_' + QUEUE + '.sh'
QSUB1FILENAME = MUT_RATE + '_DEL_qsub_' + QUEUE + '.sh'
DEL_DIR = 'del'
MUT_RATE_DEL_DIR = os.path.join(MUT_RATE_DIR, DEL_DIR)
if not os.path.exists(MUT_RATE_DEL_DIR):
print(MUT_RATE_DEL_DIR + ' does not exist. Make it.')
os.mkdir(MUT_RATE_DEL_DIR)
else:
print(MUT_RATE_DEL_DIR + ' exists.')
#For Hrothgar queue.
if QUEUE == 'hrothgar':
with open(QSUB1FILENAME, 'w') as f:
f.write('#!/bin/sh' + '\n')
f.write('#$ -V' + '\n')
f.write('#$ -cwd' + '\n')
f.write('#$ -S /bin/bash' + '\n')
f.write('#$ -N ' + MUT_RATE + '_DEL' + '\n')
f.write('#$ -o $JOB_NAME.o$JOB_ID' + '\n')
f.write('#$ -e $JOB_NAME.e$JOB_ID' + '\n')
f.write('#$ -q Chewie' + '\n')
f.write('#$ -pe sm ' + str(PROC) + '\n')
f.write('#$ -P communitycluster' + '\n')
f.write('\n')
f.write('#The reference genome is ' + REF + '.\n')
f.write('#The max mutation rate for MEI ZIP file is ' + str(RATE) + ' mutations in 100 bp.\n')
f.write('#Use ' + str(PROC) + ' processors.\n')
f.write('#The output directory is ' + MUT_RATE_DIR + '/del.\n')
f.write('#The queue is ' + QUEUE + '.\n')
f.write('\n')
f.write('module load intel/18.0.3.222 impi/2018.3.222 java/1.8.0 bowtie2/2.3.4 samtools/1.9\n')
f.write('\n')
f.write('##perl is built-in; perl5v16.3\n')
f.write('\n')
f.write('MELT_HOME=/lustre/work/npaulat/MELTv2.1.5\n')
f.write('WORKDIR=' + MUT_RATE_DIR + '/del\n')
#f.write('FILEDIR=/lustre/scratch/npaulat/MELTv2.1.5/combined_references\n')
f.write('FILEDIR=/lustre/scratch/npaulat/MELT/combined_references\n')
f.write('\n')
f.write('echo "Run MELT-DELETION."\n')
f.write('\n')
f.write('cd $WORKDIR\n')
f.write('\n')
f.write('echo "Begin Deletion-Genotype."\n')
f.write('for i in mAustroriparius mBrandtii mCiliolabrum mDavidii mOccultus mSeptentrionalis_TTU mSeptentrionalis_USDA mThysanodes mVelifer mVivesi mYumanensis; do java -Xmx2G -jar $MELT_HOME/MELT.jar Deletion-Genotype -w $WORKDIR -bamfile $FILEDIR/bams/${i}_paired.sorted.bam -bed $FILEDIR/beds/all_TEs_filtered.bed -h ' + REF + '; done\n')
f.write('\n')
f.write('readlink -f $WORKDIR/*.tsv > $WORKDIR/del_list.txt\n')
f.write('\n')
f.write('echo "Made list of deletion.tsv (full path) files to merge into final Deletion VCF."\n')
f.write('\n')
f.write('echo "Begin Deletion-Merge."\n')
f.write('java -Xmx2G -jar $MELT_HOME/MELT.jar Deletion-Merge -bed $FILEDIR/beds/all_TEs_filtered.bed -mergelist $WORKDIR/del_list.txt -h ' + REF + ' -o $WORKDIR\n')
f.write('\n')
f.write('echo "' + MUT_RATE + ' MELT-DELETION run completed."\n')
with open(JOBSSUBMISSON, 'a+') as g:
g.write('qsub ' + QSUB1FILENAME + '\n')
#For Quanah queue.
elif QUEUE == 'quanah':
with open(QSUB1FILENAME, 'w') as f:
f.write('#!/bin/sh' + '\n')
f.write('#$ -V' + '\n')
f.write('#$ -cwd' + '\n')
f.write('#$ -S /bin/bash' + '\n')
f.write('#$ -N ' + MUT_RATE + '_DEL' + '\n')
f.write('#$ -o $JOB_NAME.o$JOB_ID' + '\n')
f.write('#$ -e $JOB_NAME.e$JOB_ID' + '\n')
f.write('#$ -q omni' + '\n')
f.write('#$ -pe sm ' + str(PROC) + '\n')
f.write('#$ -P quanah' + '\n')
f.write('\n')
f.write('#The reference genome is ' + REF + '.\n')
f.write('#The max mutation rate for MEI ZIP file is ' + str(RATE) + ' mutations in 100 bp.\n')
f.write('#Use ' + str(PROC) + ' processors.\n')
f.write('#The output directory is ' + MUT_RATE_DIR + '/del.\n')
f.write('#The queue is ' + QUEUE + '.\n')
f.write('\n')
f.write('module load intel/18.0.3.222 impi/2018.3.222 java/1.8.0 bowtie2/2.3.4 samtools/1.9\n')
f.write('\n')
f.write('##perl is built-in; perl5v16.3\n')
f.write('\n')
f.write('MELT_HOME=/lustre/work/npaulat/MELTv2.1.5\n')
f.write('WORKDIR=' + MUT_RATE_DIR + '/del\n')
#f.write('FILEDIR=/lustre/scratch/npaulat/MELTv2.1.5/combined_references\n')
f.write('FILEDIR=/lustre/scratch/npaulat/MELT/combined_references\n')
f.write('\n')
f.write('echo "Run MELT-DELETION."\n')
f.write('\n')
f.write('cd $WORKDIR\n')
f.write('\n')
f.write('echo "Begin Deletion-Genotype."\n')
f.write('for i in mAustroriparius mBrandtii mCiliolabrum mDavidii mOccultus mSeptentrionalis_TTU mSeptentrionalis_USDA mThysanodes mVelifer mVivesi mYumanensis; do java -Xmx2G -jar $MELT_HOME/MELT.jar Deletion-Genotype -w $WORKDIR -bamfile $FILEDIR/bams/${i}_paired.sorted.bam -bed $FILEDIR/beds/all_TEs_filtered.bed -h ' + REF + '; done\n')
f.write('\n')
f.write('readlink -f $WORKDIR/*.tsv > $WORKDIR/del_list.txt\n')
f.write('\n')
f.write('echo "Made list of deletion.tsv (full path) files to merge into final Deletion VCF."\n')
f.write('\n')
f.write('echo "Begin Deletion-Merge."\n')
f.write('java -Xmx2G -jar $MELT_HOME/MELT.jar Deletion-Merge -bed $FILEDIR/beds/all_TEs_filtered.bed -mergelist $WORKDIR/del_list.txt -h ' + REF + ' -o $WORKDIR\n')
f.write('\n')
f.write('echo "' + MUT_RATE + ' MELT-DELETION run completed."\n')
with open(JOBSSUBMISSON, 'a+') as g:
g.write('qsub ' + QSUB1FILENAME + '\n')
else:
print('Bad queue choice. Your only choices are hrothgar and quanah.')
#with open("/lustre/scratch/npaulat/MELTv2.1.5/references/te_list_may.txt", "r") as d:
with open(TE_LIST, "r") as d:
TES = d.read().split(" ")
#with open("/lustre/scratch/npaulat/MELTv2.1.5/references/te_fasta_names_may.txt", "r") as d:
with open(FASTA_LIST, "r") as d:
FASTAS = d.read().split(" ")
#with open("/lustre/scratch/npaulat/MELTv2.1.5/references/zip_te_names_may.txt", "r") as d:
with open(ZIP_LIST, "r") as d:
ZIPS = d.read().split(" ")
#for TE in TES:
for TE, FASTA, ZIP in zip(TES, FASTAS, ZIPS):
MUT_RATE_TE_DIR = os.path.join(MUT_RATE_DIR, ZIP)
if not os.path.exists(MUT_RATE_TE_DIR):
print(MUT_RATE_TE_DIR + ' does not exist. Make it.')
os.mkdir(MUT_RATE_TE_DIR)
else:
print(MUT_RATE_TE_DIR + ' exists.')
#Create individual TE MEI ZIPs and MELT-SPLIT run qsubs, and the batch submission script
QSUB2FILENAME = MUT_RATE + '_' + ZIP + '_SPLIT_qsub_' + QUEUE + '.sh'
#For Hrothgar queue.
if QUEUE == 'hrothgar':
with open(QSUB2FILENAME, 'w') as h:
h.write('#!/bin/sh' + '\n')
h.write('#$ -V' + '\n')
h.write('#$ -cwd' + '\n')
h.write('#$ -S /bin/bash' + '\n')
h.write('#$ -N ' + ZIP + '_' + str(RATE) + '_SPLIT' + '\n')
h.write('#$ -o $JOB_NAME.o$JOB_ID' + '\n')
h.write('#$ -e $JOB_NAME.e$JOB_ID' + '\n')
h.write('#$ -q Chewie' + '\n')
h.write('#$ -pe sm ' + str(PROC) + '\n')
h.write('#$ -P communitycluster' + '\n')
h.write('\n')
h.write('module load intel/18.0.3.222 impi/2018.3.222 java/1.8.0 bowtie2/2.3.4 samtools/1.9\n')
h.write('\n')
h.write('##perl is built-in; perl5v16.3\n')
h.write('\n')
h.write('MELT_HOME=/lustre/work/npaulat/MELTv2.1.5\n')
h.write('WORKDIR=' + MUT_RATE_DIR + '\n')
#h.write('FILEDIR=/lustre/scratch/npaulat/MELTv2.1.5/combined_references\n')
h.write('FILEDIR=/lustre/scratch/npaulat/MELT/combined_references\n')
# h.write('\n')
# h.write('cd $FILEDIR/zips\n')
h.write('\n')
h.write('ZIP_NAME=' + ZIP + 'm' + str(RATE) + '\n')
h.write('ZIP_FILE=$ZIP_NAME"_MELT.zip"\n')
h.write('\n')
# h.write('echo "Create ' + ZIP + ' MEI ZIP with mutation rate max of ' + str(RATE) + ' reference file."\n')
# h.write('java -Xmx1G -jar $MELT_HOME/MELT.jar BuildTransposonZIP $FILEDIR/fastas/' + FASTA + '.fa' + ' $FILEDIR/beds/' + TE + '.bed $ZIP_NAME ' + str(RATE) + '\n')
# h.write('\n')
h.write('cd $WORKDIR\n')
h.write('\n')
h.write('#=== ' + TE + ' discovery\n')
h.write('echo "Begin IndivAnalysis."\n')
h.write('for i in mAustroriparius mBrandtii mCiliolabrum mDavidii mOccultus mSeptentrionalis_TTU mSeptentrionalis_USDA mThysanodes mVelifer mVivesi mYumanensis; do java -Xmx6G -jar $MELT_HOME/MELT.jar IndivAnalysis -w $WORKDIR/' + ZIP + ' -bamfile $FILEDIR/bams/${i}_paired.sorted.bam -c 14 -h ' + REF + ' -t $FILEDIR/zips/$ZIP_FILE -r 150; done\n')
h.write('\n')
h.write('echo "Begin GroupAnalysis."\n')
h.write('java -Xmx6G -jar $MELT_HOME/MELT.jar GroupAnalysis -discoverydir $WORKDIR/' + ZIP + ' -h ' + REF + ' -n $FILEDIR/mMyo_empty_annot.bed -t $FILEDIR/zips/$ZIP_FILE -w $WORKDIR/' + ZIP + ' -r 150\n')
h.write('\n')
h.write('echo "Begin Genotyping."\n')
h.write('for i in mAustroriparius mBrandtii mCiliolabrum mDavidii mOccultus mSeptentrionalis_TTU mSeptentrionalis_USDA mThysanodes mVelifer mVivesi mYumanensis; do java -Xmx6G -jar $MELT_HOME/MELT.jar Genotype -w $WORKDIR/' + ZIP + ' -bamfile $FILEDIR/bams/${i}_paired.sorted.bam -h ' + REF + ' -t $FILEDIR/zips/$ZIP_FILE -p $WORKDIR/' + ZIP + '; done\n')
h.write('\n')
h.write('echo "Generate mei list from .tsv files."\n')
h.write('ls $WORKDIR/' + ZIP + '/*.tsv > $WORKDIR/' + ZIP + '/mei_list.txt\n')
h.write('\n')
h.write('echo "Begin MakeVCF."\n')
h.write('java -Xmx6G -jar $MELT_HOME/MELT.jar MakeVCF -genotypingdir $WORKDIR/' + ZIP + ' -h ' + REF + ' -t $FILEDIR/zips/$ZIP_FILE -w $WORKDIR/' + ZIP + ' -p $WORKDIR/' + ZIP + '\n')
h.write('\n')
h.write('echo "' + TE + ' MELT-SPLIT run completed."\n')
with open(JOBSSUBMISSON, 'a+') as g:
g.write('qsub ' + QSUB2FILENAME + '\n')
#For Quanah queue.
elif QUEUE == 'quanah':
with open(QSUB2FILENAME, 'w') as h:
h.write('#!/bin/sh' + '\n')
h.write('#$ -V' + '\n')
h.write('#$ -cwd' + '\n')
h.write('#$ -S /bin/bash' + '\n')
h.write('#$ -N ' + TE + '_' + str(RATE) + '_SPLIT' + '\n')
h.write('#$ -o $JOB_NAME.o$JOB_ID' + '\n')
h.write('#$ -e $JOB_NAME.e$JOB_ID' + '\n')
h.write('#$ -q omni' + '\n')
h.write('#$ -pe sm ' + str(PROC) + '\n')
h.write('#$ -P quanah' + '\n')
h.write('\n')
h.write('module load intel/18.0.3.222 impi/2018.3.222 java/1.8.0 bowtie2/2.3.4 samtools/1.9\n')
h.write('\n')
h.write('##perl is built-in; perl5v16.3\n')
h.write('\n')
h.write('MELT_HOME=/lustre/work/npaulat/MELTv2.1.5\n')
h.write('WORKDIR=' + MUT_RATE_DIR + '\n')
#h.write('FILEDIR=/lustre/scratch/npaulat/MELTv2.1.5/combined_references\n')
h.write('FILEDIR=/lustre/scratch/npaulat/MELT/combined_references\n')
# h.write('\n')
# h.write('cd $FILEDIR/zips\n')
h.write('\n')
h.write('ZIP_NAME=' + ZIP + 'm' + str(RATE) + '\n')
h.write('ZIP_FILE=$ZIP_NAME"_MELT.zip"\n')
h.write('\n')
# h.write('echo "Create ' + ZIP + ' MEI ZIP with mutation rate max of ' + str(RATE) + ' reference file."\n')
# h.write('java -Xmx1G -jar $MELT_HOME/MELT.jar BuildTransposonZIP $FILEDIR/fastas/' + FASTA + '.fa' + ' $FILEDIR/beds/' + TE + '.bed $ZIP_NAME ' + str(RATE) + '\n')
# h.write('\n')
h.write('cd $WORKDIR\n')
h.write('\n')
h.write('#=== ' + TE + ' discovery\n')
h.write('echo "Begin IndivAnalysis."\n')
h.write('for i in mAustroriparius mBrandtii mCiliolabrum mDavidii mOccultus mSeptentrionalis_TTU mSeptentrionalis_USDA mThysanodes mVelifer mVivesi mYumanensis; do java -Xmx6G -jar $MELT_HOME/MELT.jar IndivAnalysis -w $WORKDIR/' + ZIP + ' -bamfile $FILEDIR/bams/${i}_paired.sorted.bam -c 14 -h ' + REF + ' -t $FILEDIR/zips/$ZIP_FILE -r 150; done\n')
h.write('\n')
h.write('echo "Begin GroupAnalysis."\n')
h.write('java -Xmx6G -jar $MELT_HOME/MELT.jar GroupAnalysis -discoverydir $WORKDIR/' + ZIP + ' -h $FILEDIR/myoLuc2.fa -n $FILEDIR/mMyo_empty_annot.bed -t $FILEDIR/zips/$ZIP_FILE -w $WORKDIR/' + ZIP + ' -r 150\n')
h.write('\n')
h.write('echo "Begin Genotyping."\n')
h.write('for i in mAustroriparius mBrandtii mCiliolabrum mDavidii mOccultus mSeptentrionalis_TTU mSeptentrionalis_USDA mThysanodes mVelifer mVivesi mYumanensis; do java -Xmx6G -jar $MELT_HOME/MELT.jar Genotype -w $WORKDIR/' + ZIP + ' -bamfile $FILEDIR/bams/${i}_paired.sorted.bam -h ' + REF + ' -t $FILEDIR/zips/$ZIP_FILE -p $WORKDIR/' + ZIP + '; done\n')
h.write('\n')
h.write('echo "Generate mei list from .tsv files."\n')
h.write('ls $WORKDIR/' + ZIP + '/*.tsv > $WORKDIR/' + ZIP + '/mei_list.txt\n')
h.write('\n')
h.write('echo "Begin MakeVCF."\n')
h.write('java -Xmx6G -jar $MELT_HOME/MELT.jar MakeVCF -genotypingdir $WORKDIR/' + ZIP + ' -h ' + REF + ' -t $FILEDIR/zips/$ZIP_FILE -w $WORKDIR/' + ZIP + ' -p $WORKDIR/' + ZIP + '\n')
h.write('\n')
h.write('echo "' + TE + ' MELT-SPLIT run completed."\n')
with open(JOBSSUBMISSON, 'a+') as g:
g.write('qsub ' + QSUB2FILENAME + '\n')
else:
print('Bad queue choice. Your only choices are hrothgar and quanah.')
| 50.636054
| 358
| 0.647142
| 2,383
| 14,887
| 3.951742
| 0.111204
| 0.059892
| 0.068387
| 0.023787
| 0.821387
| 0.80068
| 0.80068
| 0.783795
| 0.767017
| 0.760858
| 0
| 0.015671
| 0.151273
| 14,887
| 293
| 359
| 50.808874
| 0.72964
| 0.106872
| 0
| 0.697581
| 0
| 0.072581
| 0.531012
| 0.100761
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004032
| false
| 0
| 0.028226
| 0
| 0.03629
| 0.064516
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6ab976a3fc3df0bdc5f6d3b543af5c949ab3e3d4
| 46
|
py
|
Python
|
statsmodels/tools/sm_exceptions.py
|
escheffel/statsmodels
|
bc70147c4c7ea00b6ac7256bbaf107902983c189
|
[
"BSD-3-Clause"
] | 2
|
2017-01-05T22:44:37.000Z
|
2018-04-26T08:34:00.000Z
|
statsmodels/tools/sm_exceptions.py
|
langmore/statsmodels
|
a29d0418436a9b38b11101f7741ce6cb35b9e2cd
|
[
"BSD-3-Clause"
] | null | null | null |
statsmodels/tools/sm_exceptions.py
|
langmore/statsmodels
|
a29d0418436a9b38b11101f7741ce6cb35b9e2cd
|
[
"BSD-3-Clause"
] | null | null | null |
class PerfectSeparationError(Exception): pass
| 23
| 45
| 0.869565
| 4
| 46
| 10
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065217
| 46
| 1
| 46
| 46
| 0.930233
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
6ac4a18fd1870b5f6f285df0d7338d2776ae72e9
| 6,307
|
py
|
Python
|
tests/unit/opera/parser/tosca/v_1_3/test_condition_clause_definition.py
|
Legion2/xopera-opera
|
808f23cbac326b6d067e6ec531a0109ae02d0f5e
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/opera/parser/tosca/v_1_3/test_condition_clause_definition.py
|
Legion2/xopera-opera
|
808f23cbac326b6d067e6ec531a0109ae02d0f5e
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/opera/parser/tosca/v_1_3/test_condition_clause_definition.py
|
Legion2/xopera-opera
|
808f23cbac326b6d067e6ec531a0109ae02d0f5e
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from opera.error import ParseError
from opera.parser.tosca.v_1_3.condition_clause_definition import ConditionClauseDefinition
class TestParseValidate:
def test_valid_clause_direct_assertion(self, yaml_ast):
test_yaml = yaml_ast(
"""
my_attribute: [ { equal: 42 } ]
"""
)
ConditionClauseDefinition.parse(test_yaml)
ConditionClauseDefinition.validate(test_yaml)
def test_valid_clause_direct_assertion_list(self, yaml_ast):
test_yaml = yaml_ast(
"""
my_attribute: [ { min_length: 1 }, { min_length: 11 } ]
"""
)
ConditionClauseDefinition.parse(test_yaml)
ConditionClauseDefinition.validate(test_yaml)
def test_valid_clause_not(self, yaml_ast):
test_yaml = yaml_ast(
"""
not:
- my_attribute: [{equal: my_value}]
- my_other_attribute: [{equal: my_other_value}]
"""
)
ConditionClauseDefinition.parse(test_yaml)
ConditionClauseDefinition.validate(test_yaml)
def test_valid_clause_and(self, yaml_ast):
test_yaml = yaml_ast(
"""
and:
- my_attribute: [{equal: my_value}]
- my_other_attribute: [{equal: my_other_value}]
"""
)
ConditionClauseDefinition.parse(test_yaml)
ConditionClauseDefinition.validate(test_yaml)
def test_valid_clause_not_and(self, yaml_ast):
test_yaml = yaml_ast(
"""
not:
- and:
- my_attribute: [ { greater_than: 42 } ]
- my_other_attribute: [ { less_than: 1000 } ]
"""
)
ConditionClauseDefinition.parse(test_yaml)
ConditionClauseDefinition.validate(test_yaml)
def test_valid_clause_or(self, yaml_ast):
test_yaml = yaml_ast(
"""
or:
- my_attribute: [{equal: my_value}]
- my_other_attribute: [{equal: my_other_value}]
"""
)
ConditionClauseDefinition.parse(test_yaml)
ConditionClauseDefinition.validate(test_yaml)
def test_valid_clause_or_not(self, yaml_ast):
test_yaml = yaml_ast(
"""
or:
- not:
- my_attribute1: [{equal: value1}]
- not:
- my_attribute2: [{equal: value1}]
"""
)
ConditionClauseDefinition.parse(test_yaml)
ConditionClauseDefinition.validate(test_yaml)
def test_valid_clause_or_and_not(self, yaml_ast):
test_yaml = yaml_ast(
"""
or:
- and:
- protocol: { equal: http }
- port: { equal: 80 }
- and:
- protocol: { equal: https }
- port: { equal: 431 }
"""
)
ConditionClauseDefinition.parse(test_yaml)
ConditionClauseDefinition.validate(test_yaml)
def test_valid_clause_nested(self, yaml_ast):
test_yaml = yaml_ast(
"""
or:
- not:
- my_attribute1: [{equal: value1}]
- and:
- my_attribute2: { equal: value2 }
- and:
- my_attribute3: { equal: value3 }
- and:
- my_attribute4: { equal: value4 }
- my_attribute5: { equal: value5 }
- or:
- my_attribute6: { equal: value6 }
- my_attribute7: { equal: value7 }
- or:
- not:
- my_attribute8: { equal: value8 }
- my_attribute9: { equal: value9 }
- and:
- not:
- or:
- my_attribute10: { equal: value10 }
- my_attribute11: { equal: value11 }
- and:
- my_attribute12: { equal: value12 }
- my_attribute13: { equal: value13 }
"""
)
ConditionClauseDefinition.parse(test_yaml)
ConditionClauseDefinition.validate(test_yaml)
def test_invalid_clause_not(self, yaml_ast):
test_yaml = yaml_ast(
"""
nott:
- my_attribute: [{equal: my_value}]
- my_other_attribute: [{equal: my_other_value}]
"""
)
with pytest.raises(ParseError):
ConditionClauseDefinition.parse(test_yaml)
ConditionClauseDefinition.validate(test_yaml)
def test_invalid_clause_nested(self, yaml_ast):
test_yaml = yaml_ast(
"""
or:
- not:
- and:
- my_attribute2: { equals: value2 }
- and:
- my_attribute3: { equal: value3 }
- and:
- my_attribute4: { equal: value4 }
- my_attribute5: { equal: value5 }
- or:
- my_attribute6: { equal: value6 }
- my_attribute7: { equal: value7 }
- or:
- not:
- my_attribute8: { equal: value8 }
- my_attribute9: { equal: value9 }
- and:
- not:
- or:
- my_attribute10: { equal: value10 }
- my_attribute11: { equal: value11 }
- and:
- my_attribute12: { equal: value12 }
- my_attribute13: { equal: value13 }
"""
)
with pytest.raises(ParseError):
ConditionClauseDefinition.parse(test_yaml)
ConditionClauseDefinition.validate(test_yaml)
def test_invalid_clause_assert(self, yaml_ast):
test_yaml = yaml_ast(
"""
assert:
- my_attribute: [{equal: my_value}]
- my_other_attribute: [{in_range: [1, 10]}]
"""
)
with pytest.raises(ParseError):
ConditionClauseDefinition.parse(test_yaml)
ConditionClauseDefinition.validate(test_yaml)
| 33.547872
| 90
| 0.496591
| 520
| 6,307
| 5.692308
| 0.165385
| 0.097297
| 0.044595
| 0.060811
| 0.86723
| 0.86723
| 0.851014
| 0.84223
| 0.797973
| 0.742905
| 0
| 0.024555
| 0.412399
| 6,307
| 187
| 91
| 33.727273
| 0.77415
| 0
| 0
| 0.58209
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.044776
| 1
| 0.179104
| false
| 0
| 0.044776
| 0
| 0.238806
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6aea2600898e5e8bea2f01a5238341cb403eb863
| 151
|
py
|
Python
|
src/models/target_channel.py
|
ighanim/aws-cost-anomaly-alerts
|
ad6d601c7dbdfbdf22f174ea16e76c7ef268edda
|
[
"MIT"
] | null | null | null |
src/models/target_channel.py
|
ighanim/aws-cost-anomaly-alerts
|
ad6d601c7dbdfbdf22f174ea16e76c7ef268edda
|
[
"MIT"
] | null | null | null |
src/models/target_channel.py
|
ighanim/aws-cost-anomaly-alerts
|
ad6d601c7dbdfbdf22f174ea16e76c7ef268edda
|
[
"MIT"
] | null | null | null |
class TargetChannel:
def __init__(self, channel_type, channel_url):
self.channel_type = channel_type
self.channel_url = channel_url
| 37.75
| 50
| 0.728477
| 19
| 151
| 5.263158
| 0.421053
| 0.33
| 0.3
| 0.44
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.205298
| 151
| 4
| 51
| 37.75
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0a80b76067fdc83ba784e96469c96805116b571d
| 24
|
py
|
Python
|
test/smoke_test.py
|
1adrianb/binary-networks-pytorch
|
51bdeee64d3da6306aebe4f2464eebd778bf7a38
|
[
"BSD-3-Clause"
] | 63
|
2021-04-26T20:58:47.000Z
|
2022-03-31T09:42:53.000Z
|
test/smoke_test.py
|
1adrianb/binary-networks-pytorch
|
51bdeee64d3da6306aebe4f2464eebd778bf7a38
|
[
"BSD-3-Clause"
] | 4
|
2021-04-27T15:48:33.000Z
|
2021-07-23T07:41:28.000Z
|
test/smoke_test.py
|
1adrianb/binary-networks-pytorch
|
51bdeee64d3da6306aebe4f2464eebd778bf7a38
|
[
"BSD-3-Clause"
] | 6
|
2021-08-03T06:22:43.000Z
|
2022-03-16T03:21:43.000Z
|
import torch
import bnn
| 8
| 12
| 0.833333
| 4
| 24
| 5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 24
| 2
| 13
| 12
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0aae0ff8c69c1c0bd1f2dce22fbde882f56ba1a9
| 39,921
|
py
|
Python
|
tests/api/v3_0_0/test_certificates.py
|
CiscoISE/ciscoisesdk
|
860b0fc7cc15d0c2a39c64608195a7ab3d5f4885
|
[
"MIT"
] | 36
|
2021-05-18T16:24:19.000Z
|
2022-03-05T13:44:41.000Z
|
tests/api/v3_0_0/test_certificates.py
|
CiscoISE/ciscoisesdk
|
860b0fc7cc15d0c2a39c64608195a7ab3d5f4885
|
[
"MIT"
] | 15
|
2021-06-08T19:03:37.000Z
|
2022-02-25T14:47:33.000Z
|
tests/api/v3_0_0/test_certificates.py
|
CiscoISE/ciscoisesdk
|
860b0fc7cc15d0c2a39c64608195a7ab3d5f4885
|
[
"MIT"
] | 6
|
2021-06-10T09:32:01.000Z
|
2022-01-12T08:34:39.000Z
|
# -*- coding: utf-8 -*-
"""IdentityServicesEngineAPI certificates API fixtures and tests.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import pytest
from fastjsonschema.exceptions import JsonSchemaException
from ciscoisesdk.exceptions import MalformedRequest
from ciscoisesdk.exceptions import ciscoisesdkException
from tests.environment import IDENTITY_SERVICES_ENGINE_VERSION
pytestmark = pytest.mark.skipif(IDENTITY_SERVICES_ENGINE_VERSION != '3.0.0', reason='version does not match')
def is_valid_get_csrs(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_2eeef18d70b159f788b717e301dd3643_v3_0_0').validate(obj.response)
return True
def get_csrs(api):
endpoint_result = api.certificates.get_csrs(
filter='value1,value2',
filter_type='string',
page=0,
size=0,
sort='string',
sort_by='string'
)
return endpoint_result
@pytest.mark.certificates
def test_get_csrs(api, validator):
try:
assert is_valid_get_csrs(
validator,
get_csrs(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_csrs_default(api):
endpoint_result = api.certificates.get_csrs(
filter=None,
filter_type=None,
page=None,
size=None,
sort=None,
sort_by=None
)
return endpoint_result
@pytest.mark.certificates
def test_get_csrs_default(api, validator):
try:
assert is_valid_get_csrs(
validator,
get_csrs_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_generate_csr(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_e39868ea7aec5efcaaf55009699eda5d_v3_0_0').validate(obj.response)
return True
def generate_csr(api):
endpoint_result = api.certificates.generate_csr(
active_validation=False,
allow_wild_card_cert=True,
certificate_policies='string',
digest_type='string',
hostnames=['string'],
key_length='string',
key_type='string',
payload=None,
portal_group_tag='string',
san_dir=['string'],
san_dns=['string'],
san_ip=['string'],
san_uri=['string'],
subject_city='string',
subject_common_name='string',
subject_country='string',
subject_org='string',
subject_org_unit='string',
subject_state='string',
used_for='string'
)
return endpoint_result
@pytest.mark.certificates
def test_generate_csr(api, validator):
try:
assert is_valid_generate_csr(
validator,
generate_csr(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def generate_csr_default(api):
endpoint_result = api.certificates.generate_csr(
active_validation=False,
allow_wild_card_cert=None,
certificate_policies=None,
digest_type=None,
hostnames=None,
key_length=None,
key_type=None,
payload=None,
portal_group_tag=None,
san_dir=None,
san_dns=None,
san_ip=None,
san_uri=None,
subject_city=None,
subject_common_name=None,
subject_country=None,
subject_org=None,
subject_org_unit=None,
subject_state=None,
used_for=None
)
return endpoint_result
@pytest.mark.certificates
def test_generate_csr_default(api, validator):
try:
assert is_valid_generate_csr(
validator,
generate_csr_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_export_csr(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'data')
return True
def export_csr(api):
endpoint_result = api.certificates.export_csr(
dirpath=None,
save_file=None,
hostname='string',
id='string'
)
return endpoint_result
@pytest.mark.certificates
def test_export_csr(api, validator):
try:
assert is_valid_export_csr(
validator,
export_csr(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def export_csr_default(api):
endpoint_result = api.certificates.export_csr(
dirpath=None,
save_file=None,
hostname='string',
id='string'
)
return endpoint_result
@pytest.mark.certificates
def test_export_csr_default(api, validator):
try:
assert is_valid_export_csr(
validator,
export_csr_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_generate_intermediate_ca_csr(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_bf95f099207a5b6599e04c47c22789c0_v3_0_0').validate(obj.response)
return True
def generate_intermediate_ca_csr(api):
endpoint_result = api.certificates.generate_intermediate_ca_csr(
active_validation=False,
payload=None
)
return endpoint_result
@pytest.mark.certificates
def test_generate_intermediate_ca_csr(api, validator):
try:
assert is_valid_generate_intermediate_ca_csr(
validator,
generate_intermediate_ca_csr(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def generate_intermediate_ca_csr_default(api):
endpoint_result = api.certificates.generate_intermediate_ca_csr(
active_validation=False,
payload=None
)
return endpoint_result
@pytest.mark.certificates
def test_generate_intermediate_ca_csr_default(api, validator):
try:
assert is_valid_generate_intermediate_ca_csr(
validator,
generate_intermediate_ca_csr_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_csr_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_b8104a50fc565ae9a756d6d0152e0e5b_v3_0_0').validate(obj.response)
return True
def get_csr_by_id(api):
endpoint_result = api.certificates.get_csr_by_id(
host_name='string',
id='string'
)
return endpoint_result
@pytest.mark.certificates
def test_get_csr_by_id(api, validator):
try:
assert is_valid_get_csr_by_id(
validator,
get_csr_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_csr_by_id_default(api):
endpoint_result = api.certificates.get_csr_by_id(
host_name='string',
id='string'
)
return endpoint_result
@pytest.mark.certificates
def test_get_csr_by_id_default(api, validator):
try:
assert is_valid_get_csr_by_id(
validator,
get_csr_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_delete_csr_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_bf792ec664fa5202beb776556908b0c1_v3_0_0').validate(obj.response)
return True
def delete_csr_by_id(api):
endpoint_result = api.certificates.delete_csr_by_id(
host_name='string',
id='string'
)
return endpoint_result
@pytest.mark.certificates
def test_delete_csr_by_id(api, validator):
try:
assert is_valid_delete_csr_by_id(
validator,
delete_csr_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def delete_csr_by_id_default(api):
endpoint_result = api.certificates.delete_csr_by_id(
host_name='string',
id='string'
)
return endpoint_result
@pytest.mark.certificates
def test_delete_csr_by_id_default(api, validator):
try:
assert is_valid_delete_csr_by_id(
validator,
delete_csr_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_regenerate_ise_root_ca(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_18e6d1b224e058288a8c4d70be72c9a6_v3_0_0').validate(obj.response)
return True
def regenerate_ise_root_ca(api):
endpoint_result = api.certificates.regenerate_ise_root_ca(
active_validation=False,
payload=None,
remove_existing_ise_intermediate_csr=True
)
return endpoint_result
@pytest.mark.certificates
def test_regenerate_ise_root_ca(api, validator):
try:
assert is_valid_regenerate_ise_root_ca(
validator,
regenerate_ise_root_ca(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def regenerate_ise_root_ca_default(api):
endpoint_result = api.certificates.regenerate_ise_root_ca(
active_validation=False,
payload=None,
remove_existing_ise_intermediate_csr=None
)
return endpoint_result
@pytest.mark.certificates
def test_regenerate_ise_root_ca_default(api, validator):
try:
assert is_valid_regenerate_ise_root_ca(
validator,
regenerate_ise_root_ca_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_renew_certificates(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_254c288192f954309b4b35aa612ff226_v3_0_0').validate(obj.response)
return True
def renew_certificates(api):
endpoint_result = api.certificates.renew_certificates(
active_validation=False,
cert_type='string',
payload=None
)
return endpoint_result
@pytest.mark.certificates
def test_renew_certificates(api, validator):
try:
assert is_valid_renew_certificates(
validator,
renew_certificates(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def renew_certificates_default(api):
endpoint_result = api.certificates.renew_certificates(
active_validation=False,
cert_type=None,
payload=None
)
return endpoint_result
@pytest.mark.certificates
def test_renew_certificates_default(api, validator):
try:
assert is_valid_renew_certificates(
validator,
renew_certificates_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_bind_csr(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_2b94d7d3f0ed5d0b938151ae2cae9fa4_v3_0_0').validate(obj.response)
return True
def bind_csr(api):
endpoint_result = api.certificates.bind_csr(
active_validation=False,
admin=True,
allow_extended_validity=True,
allow_out_of_date_cert=True,
allow_replacement_of_certificates=True,
allow_replacement_of_portal_group_tag=True,
data='string',
eap=True,
host_name='string',
id='string',
ims=True,
name='string',
payload=None,
portal=True,
portal_group_tag='string',
pxgrid=True,
radius=True,
saml=True,
validate_certificate_extensions=True
)
return endpoint_result
@pytest.mark.certificates
def test_bind_csr(api, validator):
try:
assert is_valid_bind_csr(
validator,
bind_csr(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def bind_csr_default(api):
endpoint_result = api.certificates.bind_csr(
active_validation=False,
admin=None,
allow_extended_validity=None,
allow_out_of_date_cert=None,
allow_replacement_of_certificates=None,
allow_replacement_of_portal_group_tag=None,
data=None,
eap=None,
host_name=None,
id=None,
ims=None,
name=None,
payload=None,
portal=None,
portal_group_tag=None,
pxgrid=None,
radius=None,
saml=None,
validate_certificate_extensions=None
)
return endpoint_result
@pytest.mark.certificates
def test_bind_csr_default(api, validator):
try:
assert is_valid_bind_csr(
validator,
bind_csr_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_export_system_certificate(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'data')
return True
def export_system_certificate(api):
endpoint_result = api.certificates.export_system_certificate(
dirpath=None,
save_file=None,
active_validation=False,
export='string',
id='string',
password='string',
payload=None
)
return endpoint_result
@pytest.mark.certificates
def test_export_system_certificate(api, validator):
try:
assert is_valid_export_system_certificate(
validator,
export_system_certificate(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def export_system_certificate_default(api):
endpoint_result = api.certificates.export_system_certificate(
dirpath=None,
save_file=None,
active_validation=False,
export=None,
id=None,
password=None,
payload=None
)
return endpoint_result
@pytest.mark.certificates
def test_export_system_certificate_default(api, validator):
try:
assert is_valid_export_system_certificate(
validator,
export_system_certificate_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_import_system_certificate(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_517e6c7251a8508597f1b7ae61cbf953_v3_0_0').validate(obj.response)
return True
def import_system_certificate(api):
endpoint_result = api.certificates.import_system_certificate(
active_validation=False,
admin=True,
allow_extended_validity=True,
allow_out_of_date_cert=True,
allow_replacement_of_certificates=True,
allow_replacement_of_portal_group_tag=True,
allow_sha1_certificates=True,
allow_wild_card_certificates=True,
data='string',
eap=True,
ims=True,
name='string',
password='string',
payload=None,
portal=True,
portal_group_tag='string',
private_key_data='string',
pxgrid=True,
radius=True,
saml=True,
validate_certificate_extensions=True
)
return endpoint_result
@pytest.mark.certificates
def test_import_system_certificate(api, validator):
try:
assert is_valid_import_system_certificate(
validator,
import_system_certificate(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def import_system_certificate_default(api):
endpoint_result = api.certificates.import_system_certificate(
active_validation=False,
admin=None,
allow_extended_validity=None,
allow_out_of_date_cert=None,
allow_replacement_of_certificates=None,
allow_replacement_of_portal_group_tag=None,
allow_sha1_certificates=None,
allow_wild_card_certificates=None,
data=None,
eap=None,
ims=None,
name=None,
password=None,
payload=None,
portal=None,
portal_group_tag=None,
private_key_data=None,
pxgrid=None,
radius=None,
saml=None,
validate_certificate_extensions=None
)
return endpoint_result
@pytest.mark.certificates
def test_import_system_certificate_default(api, validator):
try:
assert is_valid_import_system_certificate(
validator,
import_system_certificate_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_system_certificates(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_662594a56f5c5f739a83e8806da16be5_v3_0_0').validate(obj.response)
return True
def get_system_certificates(api):
endpoint_result = api.certificates.get_system_certificates(
filter='value1,value2',
filter_type='string',
host_name='string',
page=0,
size=0,
sort='string',
sort_by='string'
)
return endpoint_result
@pytest.mark.certificates
def test_get_system_certificates(api, validator):
try:
assert is_valid_get_system_certificates(
validator,
get_system_certificates(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_system_certificates_default(api):
endpoint_result = api.certificates.get_system_certificates(
host_name='string',
filter=None,
filter_type=None,
page=None,
size=None,
sort=None,
sort_by=None
)
return endpoint_result
@pytest.mark.certificates
def test_get_system_certificates_default(api, validator):
try:
assert is_valid_get_system_certificates(
validator,
get_system_certificates_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_system_certificate_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_3f36e90115b05416a71506061fed7e5c_v3_0_0').validate(obj.response)
return True
def get_system_certificate_by_id(api):
endpoint_result = api.certificates.get_system_certificate_by_id(
host_name='string',
id='string'
)
return endpoint_result
@pytest.mark.certificates
def test_get_system_certificate_by_id(api, validator):
try:
assert is_valid_get_system_certificate_by_id(
validator,
get_system_certificate_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_system_certificate_by_id_default(api):
endpoint_result = api.certificates.get_system_certificate_by_id(
host_name='string',
id='string'
)
return endpoint_result
@pytest.mark.certificates
def test_get_system_certificate_by_id_default(api, validator):
try:
assert is_valid_get_system_certificate_by_id(
validator,
get_system_certificate_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_update_system_certificate(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_48fb9c22ad9a5eddb590c85abdab460b_v3_0_0').validate(obj.response)
return True
def update_system_certificate(api):
endpoint_result = api.certificates.update_system_certificate(
active_validation=False,
admin=True,
allow_replacement_of_portal_group_tag=True,
description='string',
eap=True,
expiration_ttl_period=0,
expiration_ttl_units='string',
host_name='string',
id='string',
ims=True,
name='string',
payload=None,
portal=True,
portal_group_tag='string',
pxgrid=True,
radius=True,
renew_self_signed_certificate=True,
saml=True
)
return endpoint_result
@pytest.mark.certificates
def test_update_system_certificate(api, validator):
try:
assert is_valid_update_system_certificate(
validator,
update_system_certificate(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def update_system_certificate_default(api):
endpoint_result = api.certificates.update_system_certificate(
active_validation=False,
host_name='string',
id='string',
admin=None,
allow_replacement_of_portal_group_tag=None,
description=None,
eap=None,
expiration_ttl_period=None,
expiration_ttl_units=None,
ims=None,
name=None,
payload=None,
portal=None,
portal_group_tag=None,
pxgrid=None,
radius=None,
renew_self_signed_certificate=None,
saml=None
)
return endpoint_result
@pytest.mark.certificates
def test_update_system_certificate_default(api, validator):
try:
assert is_valid_update_system_certificate(
validator,
update_system_certificate_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_delete_system_certificate_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_35241dc2eec65ad680a3c5de47cd87c8_v3_0_0').validate(obj.response)
return True
def delete_system_certificate_by_id(api):
endpoint_result = api.certificates.delete_system_certificate_by_id(
host_name='string',
id='string'
)
return endpoint_result
@pytest.mark.certificates
def test_delete_system_certificate_by_id(api, validator):
try:
assert is_valid_delete_system_certificate_by_id(
validator,
delete_system_certificate_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def delete_system_certificate_by_id_default(api):
endpoint_result = api.certificates.delete_system_certificate_by_id(
host_name='string',
id='string'
)
return endpoint_result
@pytest.mark.certificates
def test_delete_system_certificate_by_id_default(api, validator):
try:
assert is_valid_delete_system_certificate_by_id(
validator,
delete_system_certificate_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_trusted_certificates(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_c654a18faf1b5571ac5ba61145d298c4_v3_0_0').validate(obj.response)
return True
def get_trusted_certificates(api):
endpoint_result = api.certificates.get_trusted_certificates(
filter='value1,value2',
filter_type='string',
page=0,
size=0,
sort='string',
sort_by='string'
)
return endpoint_result
@pytest.mark.certificates
def test_get_trusted_certificates(api, validator):
try:
assert is_valid_get_trusted_certificates(
validator,
get_trusted_certificates(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_trusted_certificates_default(api):
endpoint_result = api.certificates.get_trusted_certificates(
filter=None,
filter_type=None,
page=None,
size=None,
sort=None,
sort_by=None
)
return endpoint_result
@pytest.mark.certificates
def test_get_trusted_certificates_default(api, validator):
try:
assert is_valid_get_trusted_certificates(
validator,
get_trusted_certificates_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_export_trusted_certificate(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'data')
return True
def export_trusted_certificate(api):
endpoint_result = api.certificates.export_trusted_certificate(
dirpath=None,
save_file=None,
id='string'
)
return endpoint_result
@pytest.mark.certificates
def test_export_trusted_certificate(api, validator):
try:
assert is_valid_export_trusted_certificate(
validator,
export_trusted_certificate(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def export_trusted_certificate_default(api):
endpoint_result = api.certificates.export_trusted_certificate(
dirpath=None,
save_file=None,
id='string'
)
return endpoint_result
@pytest.mark.certificates
def test_export_trusted_certificate_default(api, validator):
try:
assert is_valid_export_trusted_certificate(
validator,
export_trusted_certificate_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_import_trust_certificate(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_c8cd2f618b655d988ce626e579486596_v3_0_0').validate(obj.response)
return True
def import_trust_certificate(api):
endpoint_result = api.certificates.import_trust_certificate(
active_validation=False,
allow_basic_constraint_cafalse=True,
allow_out_of_date_cert=True,
allow_sha1_certificates=True,
data='string',
description='string',
name='string',
payload=None,
trust_for_certificate_based_admin_auth=True,
trust_for_cisco_services_auth=True,
trust_for_client_auth=True,
trust_for_ise_auth=True,
validate_certificate_extensions=True
)
return endpoint_result
@pytest.mark.certificates
def test_import_trust_certificate(api, validator):
try:
assert is_valid_import_trust_certificate(
validator,
import_trust_certificate(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def import_trust_certificate_default(api):
endpoint_result = api.certificates.import_trust_certificate(
active_validation=False,
allow_basic_constraint_cafalse=None,
allow_out_of_date_cert=None,
allow_sha1_certificates=None,
data=None,
description=None,
name=None,
payload=None,
trust_for_certificate_based_admin_auth=None,
trust_for_cisco_services_auth=None,
trust_for_client_auth=None,
trust_for_ise_auth=None,
validate_certificate_extensions=None
)
return endpoint_result
@pytest.mark.certificates
def test_import_trust_certificate_default(api, validator):
try:
assert is_valid_import_trust_certificate(
validator,
import_trust_certificate_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_trusted_certificate_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_1091757f8f4956d29b821fa9bbf23266_v3_0_0').validate(obj.response)
return True
def get_trusted_certificate_by_id(api):
endpoint_result = api.certificates.get_trusted_certificate_by_id(
id='string'
)
return endpoint_result
@pytest.mark.certificates
def test_get_trusted_certificate_by_id(api, validator):
try:
assert is_valid_get_trusted_certificate_by_id(
validator,
get_trusted_certificate_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_trusted_certificate_by_id_default(api):
endpoint_result = api.certificates.get_trusted_certificate_by_id(
id='string'
)
return endpoint_result
@pytest.mark.certificates
def test_get_trusted_certificate_by_id_default(api, validator):
try:
assert is_valid_get_trusted_certificate_by_id(
validator,
get_trusted_certificate_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_update_trusted_certificate(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_239661cb625d5ad0ad76b93282f5818a_v3_0_0').validate(obj.response)
return True
def update_trusted_certificate(api):
endpoint_result = api.certificates.update_trusted_certificate(
active_validation=False,
authenticate_before_crl_received=True,
automatic_crl_update=True,
automatic_crl_update_period=0,
automatic_crl_update_units='string',
crl_distribution_url='string',
crl_download_failure_retries=0,
crl_download_failure_retries_units='string',
description='string',
download_crl=True,
enable_ocsp_validation=True,
enable_server_identity_check=True,
id='string',
ignore_crl_expiration=True,
name='string',
non_automatic_crl_update_period=0,
non_automatic_crl_update_units='string',
payload=None,
reject_if_no_status_from_ocs_p=True,
reject_if_unreachable_from_ocs_p=True,
selected_ocsp_service='string',
status='string',
trust_for_certificate_based_admin_auth=True,
trust_for_cisco_services_auth=True,
trust_for_client_auth=True,
trust_for_ise_auth=True
)
return endpoint_result
@pytest.mark.certificates
def test_update_trusted_certificate(api, validator):
try:
assert is_valid_update_trusted_certificate(
validator,
update_trusted_certificate(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def update_trusted_certificate_default(api):
endpoint_result = api.certificates.update_trusted_certificate(
active_validation=False,
id='string',
authenticate_before_crl_received=None,
automatic_crl_update=None,
automatic_crl_update_period=None,
automatic_crl_update_units=None,
crl_distribution_url=None,
crl_download_failure_retries=None,
crl_download_failure_retries_units=None,
description=None,
download_crl=None,
enable_ocsp_validation=None,
enable_server_identity_check=None,
ignore_crl_expiration=None,
name=None,
non_automatic_crl_update_period=None,
non_automatic_crl_update_units=None,
payload=None,
reject_if_no_status_from_ocs_p=None,
reject_if_unreachable_from_ocs_p=None,
selected_ocsp_service=None,
status=None,
trust_for_certificate_based_admin_auth=None,
trust_for_cisco_services_auth=None,
trust_for_client_auth=None,
trust_for_ise_auth=None
)
return endpoint_result
@pytest.mark.certificates
def test_update_trusted_certificate_default(api, validator):
try:
assert is_valid_update_trusted_certificate(
validator,
update_trusted_certificate_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_delete_trusted_certificate_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_c578ef80918b5d038024d126cd6e3b8d_v3_0_0').validate(obj.response)
return True
def delete_trusted_certificate_by_id(api):
endpoint_result = api.certificates.delete_trusted_certificate_by_id(
id='string'
)
return endpoint_result
@pytest.mark.certificates
def test_delete_trusted_certificate_by_id(api, validator):
try:
assert is_valid_delete_trusted_certificate_by_id(
validator,
delete_trusted_certificate_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def delete_trusted_certificate_by_id_default(api):
endpoint_result = api.certificates.delete_trusted_certificate_by_id(
id='string'
)
return endpoint_result
@pytest.mark.certificates
def test_delete_trusted_certificate_by_id_default(api, validator):
try:
assert is_valid_delete_trusted_certificate_by_id(
validator,
delete_trusted_certificate_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
| 29.224744
| 109
| 0.688234
| 4,571
| 39,921
| 5.679282
| 0.063881
| 0.036402
| 0.048074
| 0.032357
| 0.874268
| 0.847958
| 0.842373
| 0.834091
| 0.811402
| 0.789792
| 0
| 0.014894
| 0.236417
| 39,921
| 1,365
| 110
| 29.246154
| 0.836729
| 0.029007
| 0
| 0.692308
| 0
| 0
| 0.055966
| 0.019971
| 0
| 0
| 0
| 0
| 0.106101
| 1
| 0.092838
| false
| 0.003537
| 0.023873
| 0
| 0.190981
| 0.018568
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0ae7fedf7a02f29abb1ce8bbb6b4cb7e596893fc
| 73,210
|
py
|
Python
|
txdav/caldav/datastore/test/test_attachments.py
|
eventable/CalendarServer
|
384444edb1966b530bc391789afbe3fb9cd6fd3e
|
[
"Apache-2.0"
] | 1
|
2017-02-18T19:22:19.000Z
|
2017-02-18T19:22:19.000Z
|
txdav/caldav/datastore/test/test_attachments.py
|
eventable/CalendarServer
|
384444edb1966b530bc391789afbe3fb9cd6fd3e
|
[
"Apache-2.0"
] | null | null | null |
txdav/caldav/datastore/test/test_attachments.py
|
eventable/CalendarServer
|
384444edb1966b530bc391789afbe3fb9cd6fd3e
|
[
"Apache-2.0"
] | null | null | null |
##
# Copyright (c) 2013-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from pycalendar.datetime import DateTime
from pycalendar.value import Value
from twext.enterprise.dal.syntax import Delete
from twext.python.clsprop import classproperty
from txweb2.http_headers import MimeType
from txweb2.stream import MemoryStream
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.python.filepath import FilePath
from twisted.trial import unittest
from twistedcaldav.config import config
from twistedcaldav.ical import Property, Component
from txdav.caldav.datastore.sql import CalendarStoreFeatures
from txdav.caldav.datastore.sql_attachment import DropBoxAttachment, \
ManagedAttachment
from txdav.caldav.datastore.test.common import CaptureProtocol
from txdav.caldav.icalendarstore import IAttachmentStorageTransport, IAttachment, \
QuotaExceeded, AttachmentSizeTooLarge
from txdav.common.datastore.sql_tables import schema
from txdav.common.datastore.test.util import CommonCommonTests, \
populateCalendarsFrom, deriveQuota, withSpecialQuota
import hashlib
import os
"""
Tests for txdav.caldav.datastore.sql attachment handling.
"""
storePath = FilePath(__file__).parent().child("calendar_store")
homeRoot = storePath.child("ho").child("me").child(u"home1")
cal1Root = homeRoot.child("calendar_1")
calendar1_objectNames = [
"1.ics",
"2.ics",
"3.ics",
"4.ics",
]
home1_calendarNames = [
"calendar_1",
]
class AttachmentTests(CommonCommonTests, unittest.TestCase):
metadata1 = {
"accessMode": "PUBLIC",
"isScheduleObject": True,
"scheduleTag": "abc",
"scheduleEtags": (),
"hasPrivateComment": False,
}
metadata2 = {
"accessMode": "PRIVATE",
"isScheduleObject": False,
"scheduleTag": "",
"scheduleEtags": (),
"hasPrivateComment": False,
}
metadata3 = {
"accessMode": "PUBLIC",
"isScheduleObject": None,
"scheduleTag": "abc",
"scheduleEtags": (),
"hasPrivateComment": True,
}
metadata4 = {
"accessMode": "PUBLIC",
"isScheduleObject": True,
"scheduleTag": "abc4",
"scheduleEtags": (),
"hasPrivateComment": False,
}
@inlineCallbacks
def setUp(self):
yield super(AttachmentTests, self).setUp()
yield self.buildStoreAndDirectory()
yield self.populate()
@inlineCallbacks
def populate(self):
yield populateCalendarsFrom(self.requirements, self.storeUnderTest())
self.notifierFactory.reset()
@classproperty(cache=False)
def requirements(cls): #@NoSelf
metadata1 = cls.metadata1.copy()
metadata2 = cls.metadata2.copy()
metadata3 = cls.metadata3.copy()
metadata4 = cls.metadata4.copy()
return {
"home1": {
"calendar_1": {
"1.ics": (cal1Root.child("1.ics").getContent(), metadata1),
"2.ics": (cal1Root.child("2.ics").getContent(), metadata2),
"3.ics": (cal1Root.child("3.ics").getContent(), metadata3),
"4.ics": (cal1Root.child("4.ics").getContent(), metadata4),
},
},
}
def storeUnderTest(self):
"""
Create and return a L{CalendarStore} for testing.
"""
return self._sqlCalendarStore
class DropBoxAttachmentTests(AttachmentTests):
eventWithDropbox = "\r\n".join("""
BEGIN:VCALENDAR
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
VERSION:2.0
BEGIN:VTIMEZONE
LAST-MODIFIED:20040110T032845Z
TZID:US/Eastern
BEGIN:DAYLIGHT
DTSTART:20000404T020000
RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=4
TZNAME:EDT
TZOFFSETFROM:-0500
TZOFFSETTO:-0400
END:DAYLIGHT
BEGIN:STANDARD
DTSTART:20001026T020000
RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10
TZNAME:EST
TZOFFSETFROM:-0400
TZOFFSETTO:-0500
END:STANDARD
END:VTIMEZONE
BEGIN:VEVENT
DTSTAMP:20051222T205953Z
CREATED:20060101T150000Z
DTSTART;TZID=US/Eastern:20060101T100000
DURATION:PT1H
SUMMARY:event 1
UID:event1@ninevah.local
ORGANIZER:user01
ATTENDEE;PARTSTAT=ACCEPTED:user01
ATTACH;VALUE=URI:/calendars/users/home1/some-dropbox-id/some-dropbox-id/caldavd.plist
X-APPLE-DROPBOX:/calendars/users/home1/dropbox/some-dropbox-id
END:VEVENT
END:VCALENDAR
""".strip().split("\n"))
@inlineCallbacks
def setUp(self):
yield super(DropBoxAttachmentTests, self).setUp()
# Need to tweak config and settings to setup dropbox to work
self.patch(config, "EnableDropBox", True)
self.patch(config, "EnableManagedAttachments", False)
self._sqlCalendarStore.enableManagedAttachments = False
txn = self._sqlCalendarStore.newTransaction()
cs = schema.CALENDARSERVER
yield Delete(
From=cs,
Where=cs.NAME == "MANAGED-ATTACHMENTS"
).on(txn)
yield txn.commit()
@inlineCallbacks
def createAttachmentTest(self, refresh):
"""
Common logic for attachment-creation tests.
"""
obj = yield self.calendarObjectUnderTest()
attachment = yield obj.createAttachmentWithName(
"new.attachment",
)
t = attachment.store(MimeType("text", "x-fixture"), "")
self.assertProvides(IAttachmentStorageTransport, t)
t.write("new attachment")
t.write(" text")
yield t.loseConnection()
obj = yield refresh(obj)
attachment = yield obj.attachmentWithName("new.attachment")
self.assertProvides(IAttachment, attachment)
data = yield self.attachmentToString(attachment)
self.assertEquals(data, "new attachment text")
contentType = attachment.contentType()
self.assertIsInstance(contentType, MimeType)
self.assertEquals(contentType, MimeType("text", "x-fixture"))
self.assertEquals(attachment.md5(), '50a9f27aeed9247a0833f30a631f1858')
self.assertEquals(
[_attachment.name() for _attachment in (yield obj.attachments())],
['new.attachment']
)
@inlineCallbacks
def stringToAttachment(self, obj, name, contents,
mimeType=MimeType("text", "x-fixture")):
"""
Convenience for producing an attachment from a calendar object.
@param obj: the calendar object which owns the dropbox associated with
the to-be-created attachment.
@param name: the (utf-8 encoded) name to create the attachment with.
@type name: C{bytes}
@param contents: the desired contents of the new attachment.
@type contents: C{bytes}
@param mimeType: the mime type of the incoming bytes.
@return: a L{Deferred} that fires with the L{IAttachment} that is
created, once all the bytes have been stored.
"""
att = yield obj.createAttachmentWithName(name)
t = att.store(mimeType, "")
t.write(contents)
yield t.loseConnection()
returnValue(att)
def attachmentToString(self, attachment):
"""
Convenience to convert an L{IAttachment} to a string.
@param attachment: an L{IAttachment} provider to convert into a string.
@return: a L{Deferred} that fires with the contents of the attachment.
@rtype: L{Deferred} firing C{bytes}
"""
capture = CaptureProtocol()
attachment.retrieve(capture)
return capture.deferred
@inlineCallbacks
def test_attachmentPath(self):
"""
L{ICalendarObject.createAttachmentWithName} will store an
L{IAttachment} object that can be retrieved by
L{ICalendarObject.attachmentWithName}.
"""
yield self.createAttachmentTest(lambda x: x)
attachmentRoot = (
yield self.calendarObjectUnderTest()
)._txn._store.attachmentsPath
obj = yield self.calendarObjectUnderTest()
hasheduid = hashlib.md5(obj._dropboxID).hexdigest()
attachmentPath = attachmentRoot.child(
hasheduid[0:2]).child(hasheduid[2:4]).child(hasheduid).child(
"new.attachment")
self.assertTrue(attachmentPath.isfile())
@inlineCallbacks
def test_dropboxID(self):
"""
L{ICalendarObject.dropboxID} should synthesize its dropbox from the X
-APPLE-DROPBOX property, if available.
"""
cal = yield self.calendarUnderTest()
yield cal.createCalendarObjectWithName("drop.ics", Component.fromString(
self.eventWithDropbox
))
obj = yield cal.calendarObjectWithName("drop.ics")
self.assertEquals((yield obj.dropboxID()), "some-dropbox-id")
@inlineCallbacks
def test_dropboxIDs(self):
"""
L{ICalendarObject.getAllDropboxIDs} returns a L{Deferred} that fires
with a C{list} of all Dropbox IDs.
"""
home = yield self.homeUnderTest()
# The only item in the home which has an ATTACH or X-APPLE-DROPBOX
# property.
allDropboxIDs = set([
u'FE5CDC6F-7776-4607-83A9-B90FF7ACC8D0.dropbox',
])
self.assertEquals(set((yield home.getAllDropboxIDs())),
allDropboxIDs)
@inlineCallbacks
def test_indexByDropboxProperty(self):
"""
L{ICalendarHome.calendarObjectWithDropboxID} will return a calendar
object in the calendar home with the given final segment in its C{X
-APPLE-DROPBOX} property URI.
"""
objName = "with-dropbox.ics"
cal = yield self.calendarUnderTest()
yield cal.createCalendarObjectWithName(
objName, Component.fromString(
self.eventWithDropbox
)
)
yield self.commit()
home = yield self.homeUnderTest()
cal = yield self.calendarUnderTest()
fromName = yield cal.calendarObjectWithName(objName)
fromDropbox = yield home.calendarObjectWithDropboxID("some-dropbox-id")
self.assertEquals(fromName, fromDropbox)
@inlineCallbacks
def test_twoAttachmentsWithTheSameName(self):
"""
Attachments are uniquely identified by their associated object and path;
two attachments with the same name won't overwrite each other.
"""
obj = yield self.calendarObjectUnderTest()
obj2 = yield self.calendarObjectUnderTest(name="2.ics")
att1 = yield self.stringToAttachment(obj, "sample.attachment",
"test data 1")
att2 = yield self.stringToAttachment(obj2, "sample.attachment",
"test data 2")
data1 = yield self.attachmentToString(att1)
data2 = yield self.attachmentToString(att2)
self.assertEquals(data1, "test data 1")
self.assertEquals(data2, "test data 2")
def test_createAttachment(self):
"""
L{ICalendarObject.createAttachmentWithName} will store an
L{IAttachment} object that can be retrieved by
L{ICalendarObject.attachmentWithName}.
"""
return self.createAttachmentTest(lambda x: x)
def test_createAttachmentCommit(self):
"""
L{ICalendarObject.createAttachmentWithName} will store an
L{IAttachment} object that can be retrieved by
L{ICalendarObject.attachmentWithName} in subsequent transactions.
"""
@inlineCallbacks
def refresh(obj):
yield self.commit()
result = yield self.calendarObjectUnderTest()
returnValue(result)
return self.createAttachmentTest(refresh)
@inlineCallbacks
def test_attachmentTemporaryFileCleanup(self):
"""
L{IAttachmentStream} object cleans-up its temporary file on txn abort.
"""
obj = yield self.calendarObjectUnderTest()
attachment = yield obj.createAttachmentWithName(
"new.attachment",
)
t = attachment.store(MimeType("text", "x-fixture"))
temp = t._path.path
yield self.abort()
self.assertFalse(os.path.exists(temp))
obj = yield self.calendarObjectUnderTest()
attachment = yield obj.createAttachmentWithName(
"new.attachment",
)
t = attachment.store(MimeType("text", "x-fixture"))
temp = t._path.path
os.remove(temp)
yield self.abort()
self.assertFalse(os.path.exists(temp))
@inlineCallbacks
def test_quotaAllowedBytes(self):
"""
L{ICalendarHome.quotaAllowedBytes} should return the configuration value
passed to the calendar store's constructor.
"""
expected = deriveQuota(self)
home = yield self.homeUnderTest()
actual = home.quotaAllowedBytes()
self.assertEquals(expected, actual)
@withSpecialQuota(None)
@inlineCallbacks
def test_quotaUnlimited(self):
"""
When L{ICalendarHome.quotaAllowedBytes} returns C{None}, quota is
unlimited; any sized attachment can be stored.
"""
home = yield self.homeUnderTest()
allowed = home.quotaAllowedBytes()
self.assertIdentical(allowed, None)
yield self.test_createAttachment()
@inlineCallbacks
def test_quotaTransportAddress(self):
"""
Since L{IAttachmentStorageTransport} is a subinterface of L{ITransport},
it must provide peer and host addresses.
"""
obj = yield self.calendarObjectUnderTest()
name = 'a-fun-attachment'
attachment = yield obj.createAttachmentWithName(name)
transport = attachment.store(MimeType("test", "x-something"), "")
peer = transport.getPeer()
host = transport.getHost()
self.assertIdentical(peer.attachment, attachment)
self.assertIdentical(host.attachment, attachment)
self.assertIn(name, repr(peer))
self.assertIn(name, repr(host))
@inlineCallbacks
def exceedQuotaTest(self, getit):
"""
If too many bytes are passed to the transport returned by
L{ICalendarObject.createAttachmentWithName},
L{IAttachmentStorageTransport.loseConnection} will return a L{Deferred}
that fails with L{QuotaExceeded}.
"""
home = yield self.homeUnderTest()
attachment = yield getit()
t = attachment.store(MimeType("text", "x-fixture"), "")
sample = "all work and no play makes jack a dull boy"
chunk = (sample * (home.quotaAllowedBytes() / len(sample)))
t.write(chunk)
t.writeSequence([chunk, chunk])
d = t.loseConnection()
yield self.failUnlessFailure(d, QuotaExceeded)
@inlineCallbacks
def test_exceedQuotaNew(self):
"""
When quota is exceeded on a new attachment, that attachment will no
longer exist.
"""
obj = yield self.calendarObjectUnderTest()
yield self.exceedQuotaTest(
lambda: obj.createAttachmentWithName("too-big.attachment")
)
self.assertEquals((yield obj.attachments()), [])
yield self.commit()
obj = yield self.calendarObjectUnderTest()
self.assertEquals((yield obj.attachments()), [])
@inlineCallbacks
def test_exceedQuotaReplace(self):
"""
When quota is exceeded while replacing an attachment, that attachment's
contents will not be replaced.
"""
obj = yield self.calendarObjectUnderTest()
create = lambda: obj.createAttachmentWithName("exists.attachment")
get = lambda: obj.attachmentWithName("exists.attachment")
attachment = yield create()
t = attachment.store(MimeType("text", "x-fixture"), "")
sampleData = "a reasonably sized attachment"
t.write(sampleData)
yield t.loseConnection()
yield self.exceedQuotaTest(get)
@inlineCallbacks
def checkOriginal():
actual = yield self.attachmentToString(attachment)
expected = sampleData
# note: 60 is less than len(expected); trimming is just to make
# the error message look sane when the test fails.
actual = actual[:60]
self.assertEquals(actual, expected)
yield checkOriginal()
yield self.commit()
# Make sure that things go back to normal after a commit of that
# transaction.
obj = yield self.calendarObjectUnderTest()
attachment = yield get()
yield checkOriginal()
@inlineCallbacks
def exceedSizeTest(self, getit):
"""
If too many bytes are passed to the transport returned by
L{ICalendarObject.createAttachmentWithName},
L{IAttachmentStorageTransport.loseConnection} will return a L{Deferred}
that fails with L{AttachmentSizeTooLarge}.
"""
attachment = yield getit()
t = attachment.store(MimeType("text", "x-fixture"), "")
sample = "all work and no play makes jack a dull boy"
chunk = (sample * (config.MaximumAttachmentSize / len(sample)))
t.write(chunk)
t.writeSequence([chunk, chunk])
d = t.loseConnection()
yield self.failUnlessFailure(d, AttachmentSizeTooLarge)
@inlineCallbacks
def test_exceedSizeNew(self):
"""
When size is exceeded on a new attachment, that attachment will no
longer exist.
"""
self.patch(config, "MaximumAttachmentSize", 100)
obj = yield self.calendarObjectUnderTest()
yield self.exceedSizeTest(
lambda: obj.createAttachmentWithName("too-big.attachment")
)
self.assertEquals((yield obj.attachments()), [])
yield self.commit()
obj = yield self.calendarObjectUnderTest()
self.assertEquals((yield obj.attachments()), [])
@inlineCallbacks
def test_exceedSizeReplace(self):
"""
When size is exceeded while replacing an attachment, that attachment's
contents will not be replaced.
"""
self.patch(config, "MaximumAttachmentSize", 100)
obj = yield self.calendarObjectUnderTest()
create = lambda: obj.createAttachmentWithName("exists.attachment")
get = lambda: obj.attachmentWithName("exists.attachment")
attachment = yield create()
t = attachment.store(MimeType("text", "x-fixture"), "")
sampleData = "a reasonably sized attachment"
t.write(sampleData)
yield t.loseConnection()
yield self.exceedSizeTest(get)
@inlineCallbacks
def checkOriginal():
actual = yield self.attachmentToString(attachment)
expected = sampleData
# note: 60 is less than len(expected); trimming is just to make
# the error message look sane when the test fails.
actual = actual[:60]
self.assertEquals(actual, expected)
yield checkOriginal()
yield self.commit()
# Make sure that things go back to normal after a commit of that
# transaction.
obj = yield self.calendarObjectUnderTest()
attachment = yield get()
yield checkOriginal()
def test_removeAttachmentWithName(self, refresh=lambda x: x):
"""
L{ICalendarObject.removeAttachmentWithName} will remove the calendar
object with the given name.
"""
@inlineCallbacks
def deleteIt(ignored):
obj = yield self.calendarObjectUnderTest()
yield obj.removeAttachmentWithName("new.attachment")
obj = yield refresh(obj)
self.assertIdentical(
None, (yield obj.attachmentWithName("new.attachment"))
)
self.assertEquals(list((yield obj.attachments())), [])
return self.test_createAttachmentCommit().addCallback(deleteIt)
def test_removeAttachmentWithNameCommit(self):
"""
L{ICalendarObject.removeAttachmentWithName} will remove the calendar
object with the given name. (After commit, it will still be gone.)
"""
@inlineCallbacks
def refresh(obj):
yield self.commit()
result = yield self.calendarObjectUnderTest()
returnValue(result)
return self.test_removeAttachmentWithName(refresh)
@inlineCallbacks
def test_noDropboxCalendar(self):
"""
L{ICalendarObject.createAttachmentWithName} may create a directory
named 'dropbox', but this should not be seen as a calendar by
L{ICalendarHome.calendarWithName} or L{ICalendarHome.calendars}.
"""
obj = yield self.calendarObjectUnderTest()
attachment = yield obj.createAttachmentWithName(
"new.attachment",
)
t = attachment.store(MimeType("text", "plain"), "")
t.write("new attachment text")
yield t.loseConnection()
yield self.commit()
home = (yield self.homeUnderTest())
calendars = (yield home.calendars())
self.assertEquals((yield home.calendarWithName("dropbox")), None)
self.assertEquals(
set([n.name() for n in calendars]),
set(home1_calendarNames))
@inlineCallbacks
def test_cleanupAttachments(self):
"""
L{ICalendarObject.remove} will remove an associated calendar
attachment.
"""
self.patch(config, "EnableTrashCollection", True)
# Create attachment
obj = yield self.calendarObjectUnderTest()
attachment = yield obj.createAttachmentWithName(
"new.attachment",
)
t = attachment.store(MimeType("text", "x-fixture"))
t.write("new attachment")
t.write(" text")
yield t.loseConnection()
apath = attachment._path.path
yield self.commit()
self.assertTrue(os.path.exists(apath))
home = (yield self.transactionUnderTest().calendarHomeWithUID(u"home1"))
quota = (yield home.quotaUsedBytes())
yield self.commit()
self.assertNotEqual(quota, 0)
# Remove resource (to trash)
obj = yield self.calendarObjectUnderTest()
yield obj.remove()
yield self.commit()
# Attachments still exist and count towards quota
self.assertTrue(os.path.exists(apath))
home = (yield self.transactionUnderTest().calendarHomeWithUID(u"home1"))
quota = (yield home.quotaUsedBytes())
yield self.commit()
self.assertNotEqual(quota, 0)
# Fully remove resource
objects = yield self.trashObjectsUnderTest()
yield objects[0].purge()
yield self.commit()
# Attachments don't exist and will not count towards quota
self.assertFalse(os.path.exists(apath))
home = (yield self.transactionUnderTest().calendarHomeWithUID(u"home1"))
quota = (yield home.quotaUsedBytes())
yield self.commit()
self.assertEqual(quota, 0)
@inlineCallbacks
def test_cleanupMultipleAttachments(self):
"""
L{ICalendarObject.remove} will remove all associated calendar
attachments.
"""
self.patch(config, "EnableTrashCollection", True)
# Create attachment
obj = yield self.calendarObjectUnderTest()
attachment = yield obj.createAttachmentWithName(
"new.attachment",
)
t = attachment.store(MimeType("text", "x-fixture"))
t.write("new attachment")
t.write(" text")
yield t.loseConnection()
apath1 = attachment._path.path
attachment = yield obj.createAttachmentWithName(
"new.attachment2",
)
t = attachment.store(MimeType("text", "x-fixture"))
t.write("new attachment 2")
t.write(" text")
yield t.loseConnection()
apath2 = attachment._path.path
yield self.commit()
self.assertTrue(os.path.exists(apath1))
self.assertTrue(os.path.exists(apath2))
home = (yield self.transactionUnderTest().calendarHomeWithUID(u"home1"))
quota = (yield home.quotaUsedBytes())
yield self.commit()
self.assertNotEqual(quota, 0)
# Remove resource
obj = yield self.calendarObjectUnderTest()
yield obj.purge()
yield self.commit()
self.assertFalse(os.path.exists(apath1))
self.assertFalse(os.path.exists(apath2))
home = (yield self.transactionUnderTest().calendarHomeWithUID(u"home1"))
quota = (yield home.quotaUsedBytes())
yield self.commit()
self.assertEqual(quota, 0)
@inlineCallbacks
def test_cleanupAttachmentsOnMultipleResources(self):
"""
L{ICalendarObject.remove} will remove all associated calendar
attachments unless used in another resource.
"""
# Create attachment
obj = yield self.calendarObjectUnderTest()
attachment = yield obj.createAttachmentWithName(
"new.attachment",
)
t = attachment.store(MimeType("text", "x-fixture"))
t.write("new attachment")
t.write(" text")
yield t.loseConnection()
apath = attachment._path.path
new_component = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//Apple Inc.//iCal 4.0.1//EN
CALSCALE:GREGORIAN
BEGIN:VTIMEZONE
TZID:US/Pacific
BEGIN:DAYLIGHT
TZOFFSETFROM:-0800
RRULE:FREQ=YEARLY;BYMONTH=3;BYDAY=2SU
DTSTART:20070311T020000
TZNAME:PDT
TZOFFSETTO:-0700
END:DAYLIGHT
BEGIN:STANDARD
TZOFFSETFROM:-0700
RRULE:FREQ=YEARLY;BYMONTH=11;BYDAY=1SU
DTSTART:20071104T020000
TZNAME:PST
TZOFFSETTO:-0800
END:STANDARD
END:VTIMEZONE
BEGIN:VEVENT
ATTENDEE;CN="Wilfredo Sanchez";CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:mailt
o:wsanchez@example.com
ATTENDEE;CN="Cyrus Daboo";CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:mailto:cda
boo@example.com
DTEND;TZID=US/Pacific:%(now)s0324T124500
TRANSP:OPAQUE
ORGANIZER;CN="Wilfredo Sanchez":mailto:wsanchez@example.com
UID:uid1-attachmenttest
DTSTAMP:20090326T145447Z
LOCATION:Wilfredo's Office
SEQUENCE:2
X-APPLE-EWS-BUSYSTATUS:BUSY
X-APPLE-DROPBOX:/calendars/__uids__/user01/dropbox/FE5CDC6F-7776-4607-83
A9-B90FF7ACC8D0.dropbox
SUMMARY:CalDAV protocol updates
DTSTART;TZID=US/Pacific:%(now)s0324T121500
CREATED:20090326T145440Z
BEGIN:VALARM
X-WR-ALARMUID:DB39AB67-449C-441C-89D2-D740B5F41A73
TRIGGER;VALUE=DATE-TIME:%(now)s0324T180009Z
ACTION:AUDIO
END:VALARM
END:VEVENT
END:VCALENDAR
""".replace("\n", "\r\n") % {"now": 2012}
calendar = yield self.calendarUnderTest()
yield calendar.createCalendarObjectWithName(
"test.ics", Component.fromString(new_component)
)
yield self.commit()
self.assertTrue(os.path.exists(apath))
home = (yield self.transactionUnderTest().calendarHomeWithUID(u"home1"))
quota = (yield home.quotaUsedBytes())
yield self.commit()
self.assertNotEqual(quota, 0)
# Remove resource
obj = yield self.calendarObjectUnderTest()
yield obj.purge()
yield self.commit()
self.assertTrue(os.path.exists(apath))
home = (yield self.transactionUnderTest().calendarHomeWithUID(u"home1"))
quota = (yield home.quotaUsedBytes())
yield self.commit()
self.assertNotEqual(quota, 0)
# Remove resource
obj = yield self.calendarObjectUnderTest(name="test.ics")
yield obj.purge()
yield self.commit()
self.assertFalse(os.path.exists(apath))
home = (yield self.transactionUnderTest().calendarHomeWithUID(u"home1"))
quota = (yield home.quotaUsedBytes())
yield self.commit()
self.assertEqual(quota, 0)
class ManagedAttachmentTests(AttachmentTests):
@inlineCallbacks
def setUp(self):
yield super(ManagedAttachmentTests, self).setUp()
# Need to tweak config and settings to setup dropbox to work
self.patch(config, "EnableDropBox", False)
self.patch(config, "EnableManagedAttachments", True)
self._sqlCalendarStore.enableManagedAttachments = True
# Make it look like we have migrated
if (yield self.transactionUnderTest().calendarserverValue("MANAGED-ATTACHMENTS", raiseIfMissing=False)) is None:
yield self.transactionUnderTest().setCalendarserverValue("MANAGED-ATTACHMENTS", "1")
yield self.commit()
@inlineCallbacks
def createAttachmentTest(self, refresh):
"""
Common logic for attachment-creation tests.
"""
obj = yield self.calendarObjectUnderTest()
attachment = yield obj.createManagedAttachment()
mid = attachment.managedID()
t = attachment.store(MimeType("text", "x-fixture"), "new.attachment")
self.assertProvides(IAttachmentStorageTransport, t)
t.write("new attachment")
t.write(" text")
yield t.loseConnection()
obj = yield refresh(obj)
attachment = yield obj.attachmentWithManagedID(mid)
self.assertProvides(IAttachment, attachment)
data = yield self.attachmentToString(attachment)
self.assertEquals(data, "new attachment text")
contentType = attachment.contentType()
self.assertIsInstance(contentType, MimeType)
self.assertEquals(contentType, MimeType("text", "x-fixture"))
self.assertEquals(attachment.md5(), '50a9f27aeed9247a0833f30a631f1858')
self.assertEquals(
(yield obj.managedAttachmentList()),
['new-%s.attachment' % (mid[:8],)]
)
returnValue(mid)
@inlineCallbacks
def stringToAttachment(self, obj, name, contents,
mimeType=MimeType("text", "x-fixture")):
"""
Convenience for producing an attachment from a calendar object.
@param obj: the calendar object which owns the dropbox associated with
the to-be-created attachment.
@param name: the (utf-8 encoded) name to create the attachment with.
@type name: C{bytes}
@param contents: the desired contents of the new attachment.
@type contents: C{bytes}
@param mimeType: the mime type of the incoming bytes.
@return: a L{Deferred} that fires with the L{IAttachment} that is
created, once all the bytes have been stored.
"""
att = yield obj.createManagedAttachment()
t = att.store(mimeType, name)
t.write(contents)
yield t.loseConnection()
returnValue(att)
def attachmentToString(self, attachment):
"""
Convenience to convert an L{IAttachment} to a string.
@param attachment: an L{IAttachment} provider to convert into a string.
@return: a L{Deferred} that fires with the contents of the attachment.
@rtype: L{Deferred} firing C{bytes}
"""
capture = CaptureProtocol()
attachment.retrieve(capture)
return capture.deferred
@inlineCallbacks
def test_attachmentPath(self):
"""
L{ICalendarObject.createManagedAttachment} will store an
L{IAttachment} object that can be retrieved by
L{ICalendarObject.attachmentWithManagedID}.
"""
mid = yield self.createAttachmentTest(lambda x: x)
obj = yield self.calendarObjectUnderTest()
attachment = yield obj.attachmentWithManagedID(mid)
hasheduid = hashlib.md5(str(attachment._attachmentID)).hexdigest()
attachmentRoot = (
yield self.calendarObjectUnderTest()
)._txn._store.attachmentsPath
attachmentPath = attachmentRoot.child(
hasheduid[0:2]).child(hasheduid[2:4]).child(hasheduid)
self.assertTrue(attachmentPath.isfile())
@inlineCallbacks
def test_twoAttachmentsWithTheSameName(self):
"""
Attachments are uniquely identified by their associated object and path;
two attachments with the same name won't overwrite each other.
"""
obj = yield self.calendarObjectUnderTest()
obj2 = yield self.calendarObjectUnderTest(name="2.ics")
att1 = yield self.stringToAttachment(obj, "sample.attachment",
"test data 1")
att2 = yield self.stringToAttachment(obj2, "sample.attachment",
"test data 2")
data1 = yield self.attachmentToString(att1)
data2 = yield self.attachmentToString(att2)
self.assertEquals(data1, "test data 1")
self.assertEquals(data2, "test data 2")
def test_createAttachment(self):
"""
L{ICalendarObject.createManagedAttachment} will store an
L{IAttachment} object that can be retrieved by
L{ICalendarObject.attachmentWithManagedID}.
"""
return self.createAttachmentTest(lambda x: x)
def test_createAttachmentCommit(self):
"""
L{ICalendarObject.createManagedAttachment} will store an
L{IAttachment} object that can be retrieved by
L{ICalendarObject.attachmentWithManagedID} in subsequent transactions.
"""
@inlineCallbacks
def refresh(obj):
yield self.commit()
result = yield self.calendarObjectUnderTest()
returnValue(result)
return self.createAttachmentTest(refresh)
@inlineCallbacks
def test_attachmentTemporaryFileCleanup(self):
"""
L{IAttachmentStream} object cleans-up its temporary file on txn abort.
"""
obj = yield self.calendarObjectUnderTest()
attachment = yield obj.createManagedAttachment()
t = attachment.store(MimeType("text", "x-fixture"), "new.attachment")
temp = t._path.path
yield self.abort()
self.assertFalse(os.path.exists(temp))
obj = yield self.calendarObjectUnderTest()
attachment = yield obj.createManagedAttachment()
t = attachment.store(MimeType("text", "x-fixture"), "new.attachment")
temp = t._path.path
os.remove(temp)
yield self.abort()
self.assertFalse(os.path.exists(temp))
@inlineCallbacks
def test_quotaAllowedBytes(self):
"""
L{ICalendarHome.quotaAllowedBytes} should return the configuration value
passed to the calendar store's constructor.
"""
expected = deriveQuota(self)
home = yield self.homeUnderTest()
actual = home.quotaAllowedBytes()
self.assertEquals(expected, actual)
@withSpecialQuota(None)
@inlineCallbacks
def test_quotaUnlimited(self):
"""
When L{ICalendarHome.quotaAllowedBytes} returns C{None}, quota is
unlimited; any sized attachment can be stored.
"""
home = yield self.homeUnderTest()
allowed = home.quotaAllowedBytes()
self.assertIdentical(allowed, None)
yield self.test_createAttachment()
@inlineCallbacks
def test_quotaTransportAddress(self):
"""
Since L{IAttachmentStorageTransport} is a subinterface of L{ITransport},
it must provide peer and host addresses.
"""
obj = yield self.calendarObjectUnderTest()
name = 'a-fun-attachment'
attachment = yield obj.createManagedAttachment()
transport = attachment.store(MimeType("test", "x-something"), name)
peer = transport.getPeer()
host = transport.getHost()
self.assertIdentical(peer.attachment, attachment)
self.assertIdentical(host.attachment, attachment)
self.assertIn(name, repr(peer))
self.assertIn(name, repr(host))
@inlineCallbacks
def exceedQuotaTest(self, getit, name):
"""
If too many bytes are passed to the transport returned by
L{ICalendarObject.createManagedAttachment},
L{IAttachmentStorageTransport.loseConnection} will return a L{Deferred}
that fails with L{QuotaExceeded}.
"""
home = yield self.homeUnderTest()
attachment = yield getit()
t = attachment.store(MimeType("text", "x-fixture"), name)
sample = "all work and no play makes jack a dull boy"
chunk = (sample * (home.quotaAllowedBytes() / len(sample)))
t.write(chunk)
t.writeSequence([chunk, chunk])
d = t.loseConnection()
yield self.failUnlessFailure(d, QuotaExceeded)
@inlineCallbacks
def test_exceedQuotaNew(self):
"""
When quota is exceeded on a new attachment, that attachment will no
longer exist.
"""
obj = yield self.calendarObjectUnderTest()
yield self.exceedQuotaTest(
lambda: obj.createManagedAttachment(), "too-big.attachment"
)
self.assertEquals((yield obj.managedAttachmentList()), [])
yield self.commit()
obj = yield self.calendarObjectUnderTest()
self.assertEquals((yield obj.managedAttachmentList()), [])
@inlineCallbacks
def test_exceedQuotaReplace(self):
"""
When quota is exceeded while replacing an attachment, that attachment's
contents will not be replaced.
"""
obj = yield self.calendarObjectUnderTest()
create = lambda: obj.createManagedAttachment()
attachment = yield create()
get = lambda: obj.attachmentWithManagedID(attachment.managedID())
t = attachment.store(MimeType("text", "x-fixture"), "new.attachment")
sampleData = "a reasonably sized attachment"
t.write(sampleData)
yield t.loseConnection()
yield self.exceedQuotaTest(get, "exists.attachment")
@inlineCallbacks
def checkOriginal():
actual = yield self.attachmentToString(attachment)
expected = sampleData
# note: 60 is less than len(expected); trimming is just to make
# the error message look sane when the test fails.
actual = actual[:60]
self.assertEquals(actual, expected)
yield checkOriginal()
yield self.commit()
# Make sure that things go back to normal after a commit of that
# transaction.
obj = yield self.calendarObjectUnderTest()
attachment = yield get()
yield checkOriginal()
@inlineCallbacks
def exceedSizeTest(self, getit, name):
"""
If too many bytes are passed to the transport returned by
L{ICalendarObject.createAttachmentWithName},
L{IAttachmentStorageTransport.loseConnection} will return a L{Deferred}
that fails with L{AttachmentSizeTooLarge}.
"""
attachment = yield getit()
t = attachment.store(MimeType("text", "x-fixture"), name)
sample = "all work and no play makes jack a dull boy"
chunk = (sample * (config.MaximumAttachmentSize / len(sample)))
t.write(chunk)
t.writeSequence([chunk, chunk])
d = t.loseConnection()
yield self.failUnlessFailure(d, AttachmentSizeTooLarge)
@inlineCallbacks
def test_exceedSizeNew(self):
"""
When size is exceeded on a new attachment, that attachment will no
longer exist.
"""
self.patch(config, "MaximumAttachmentSize", 100)
obj = yield self.calendarObjectUnderTest()
yield self.exceedSizeTest(
lambda: obj.createManagedAttachment(), "too-big.attachment"
)
self.assertEquals((yield obj.managedAttachmentList()), [])
yield self.commit()
obj = yield self.calendarObjectUnderTest()
self.assertEquals((yield obj.managedAttachmentList()), [])
@inlineCallbacks
def test_exceedSizeReplace(self):
"""
When size is exceeded while replacing an attachment, that attachment's
contents will not be replaced.
"""
self.patch(config, "MaximumAttachmentSize", 100)
obj = yield self.calendarObjectUnderTest()
create = lambda: obj.createManagedAttachment()
attachment = yield create()
get = lambda: obj.attachmentWithManagedID(attachment.managedID())
t = attachment.store(MimeType("text", "x-fixture"), "new.attachment")
sampleData = "a reasonably sized attachment"
t.write(sampleData)
yield t.loseConnection()
yield self.exceedSizeTest(get, "exists.attachment")
@inlineCallbacks
def checkOriginal():
actual = yield self.attachmentToString(attachment)
expected = sampleData
# note: 60 is less than len(expected); trimming is just to make
# the error message look sane when the test fails.
actual = actual[:60]
self.assertEquals(actual, expected)
yield checkOriginal()
yield self.commit()
# Make sure that things go back to normal after a commit of that
# transaction.
obj = yield self.calendarObjectUnderTest()
attachment = yield get()
yield checkOriginal()
def test_removeManagedAttachmentWithID(self, refresh=lambda x: x):
"""
L{ICalendarObject.removeManagedAttachmentWithID} will remove the calendar
object with the given managed-id.
"""
@inlineCallbacks
def deleteIt(mid):
obj = yield self.calendarObjectUnderTest()
yield obj.removeManagedAttachmentWithID(mid)
obj = yield refresh(obj)
self.assertIdentical(
None, (yield obj.attachmentWithManagedID(mid))
)
self.assertEquals(list((yield obj.managedAttachmentList())), [])
return self.test_createAttachmentCommit().addCallback(deleteIt)
def test_removeManagedAttachmentWithIDCommit(self):
"""
L{ICalendarObject.removeManagedAttachmentWithID} will remove the calendar
object with the given managed-id. (After commit, it will still be gone.)
"""
@inlineCallbacks
def refresh(obj):
yield self.commit()
result = yield self.calendarObjectUnderTest()
returnValue(result)
return self.test_removeManagedAttachmentWithID(refresh)
@inlineCallbacks
def test_noDropboxCalendar(self):
"""
L{ICalendarObject.createManagedAttachment} may create a directory
named 'dropbox', but this should not be seen as a calendar by
L{ICalendarHome.calendarWithName} or L{ICalendarHome.calendars}.
"""
obj = yield self.calendarObjectUnderTest()
attachment = yield obj.createManagedAttachment()
t = attachment.store(MimeType("text", "plain"), "new.attachment")
t.write("new attachment text")
yield t.loseConnection()
yield self.commit()
home = (yield self.homeUnderTest())
calendars = (yield home.calendars())
self.assertEquals((yield home.calendarWithName("dropbox")), None)
self.assertEquals(
set([n.name() for n in calendars]),
set(home1_calendarNames))
@inlineCallbacks
def test_cleanupAttachments(self):
"""
L{ICalendarObject.remove} will remove an associated calendar
attachment.
"""
self.patch(config, "EnableTrashCollection", True)
# Create attachment
obj = yield self.calendarObjectUnderTest()
attachment = yield obj.createManagedAttachment()
t = attachment.store(MimeType("text", "x-fixture"), "new.attachment")
t.write("new attachment")
t.write(" text")
yield t.loseConnection()
apath = attachment._path.path
yield self.commit()
self.assertTrue(os.path.exists(apath))
home = (yield self.transactionUnderTest().calendarHomeWithUID(u"home1"))
quota = (yield home.quotaUsedBytes())
yield self.commit()
self.assertNotEqual(quota, 0)
# Remove resource (to trash)
obj = yield self.calendarObjectUnderTest()
yield obj.remove()
yield self.commit()
# Attachments still exist and count towards quota
self.assertTrue(os.path.exists(apath))
home = (yield self.transactionUnderTest().calendarHomeWithUID(u"home1"))
quota = (yield home.quotaUsedBytes())
yield self.commit()
self.assertNotEqual(quota, 0)
# Fully remove resource
objects = yield self.trashObjectsUnderTest()
yield objects[0].purge()
yield self.commit()
# Attachments don't exist and will not count towards quota
self.assertFalse(os.path.exists(apath))
home = (yield self.transactionUnderTest().calendarHomeWithUID(u"home1"))
quota = (yield home.quotaUsedBytes())
yield self.commit()
self.assertEqual(quota, 0)
@inlineCallbacks
def test_cleanupMultipleAttachments(self):
"""
L{ICalendarObject.remove} will remove all associated calendar
attachments.
"""
# Create attachment
obj = yield self.calendarObjectUnderTest()
attachment = yield obj.createManagedAttachment()
t = attachment.store(MimeType("text", "x-fixture"), "new.attachment")
t.write("new attachment")
t.write(" text")
yield t.loseConnection()
apath1 = attachment._path.path
attachment = yield obj.createManagedAttachment()
t = attachment.store(MimeType("text", "x-fixture"), "new.attachment2")
t.write("new attachment 2")
t.write(" text")
yield t.loseConnection()
apath2 = attachment._path.path
yield self.commit()
self.assertTrue(os.path.exists(apath1))
self.assertTrue(os.path.exists(apath2))
home = (yield self.transactionUnderTest().calendarHomeWithUID(u"home1"))
quota = (yield home.quotaUsedBytes())
yield self.commit()
self.assertNotEqual(quota, 0)
# Remove resource
obj = yield self.calendarObjectUnderTest()
yield obj.purge()
yield self.commit()
self.assertFalse(os.path.exists(apath1))
self.assertFalse(os.path.exists(apath2))
home = (yield self.transactionUnderTest().calendarHomeWithUID(u"home1"))
quota = (yield home.quotaUsedBytes())
yield self.commit()
self.assertEqual(quota, 0)
@inlineCallbacks
def test_cleanupAttachmentsOnMultipleResources(self):
"""
L{ICalendarObject.remove} will remove all associated calendar
attachments unless used in another resource.
"""
# Create attachment
obj = yield self.calendarObjectUnderTest()
attachment, _ignore_location = yield obj.addAttachment(None, MimeType("text", "x-fixture"), "new.attachment", MemoryStream("new attachment text"))
apath = attachment._path.path
cdata = yield obj.componentForUser()
newcdata = Component.fromString(str(cdata).replace("uid1", "uid1-attached"))
calendar = yield self.calendarUnderTest()
yield calendar.createCalendarObjectWithName("test.ics", newcdata)
yield self.commit()
self.assertTrue(os.path.exists(apath))
home = (yield self.transactionUnderTest().calendarHomeWithUID(u"home1"))
quota = (yield home.quotaUsedBytes())
yield self.commit()
self.assertNotEqual(quota, 0)
# Remove resource
obj = yield self.calendarObjectUnderTest()
yield obj.purge()
yield self.commit()
self.assertTrue(os.path.exists(apath))
home = (yield self.transactionUnderTest().calendarHomeWithUID(u"home1"))
quota = (yield home.quotaUsedBytes())
yield self.commit()
self.assertNotEqual(quota, 0)
# Remove resource
obj = yield self.calendarObjectUnderTest(name="test.ics")
yield obj.purge()
yield self.commit()
self.assertFalse(os.path.exists(apath))
home = (yield self.transactionUnderTest().calendarHomeWithUID(u"home1"))
quota = (yield home.quotaUsedBytes())
yield self.commit()
self.assertEqual(quota, 0)
now = DateTime.getToday().getYear()
PLAIN_ICS = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//Apple Inc.//iCal 4.0.1//EN
CALSCALE:GREGORIAN
BEGIN:VTIMEZONE
TZID:US/Pacific
BEGIN:STANDARD
TZOFFSETFROM:-0700
RRULE:FREQ=YEARLY;UNTIL=20061029T090000Z;BYMONTH=10;BYDAY=-1SU
DTSTART:19621028T020000
TZNAME:PST
TZOFFSETTO:-0800
END:STANDARD
BEGIN:DAYLIGHT
TZOFFSETFROM:-0800
RRULE:FREQ=YEARLY;UNTIL=20060402T100000Z;BYMONTH=4;BYDAY=1SU
DTSTART:19870405T020000
TZNAME:PDT
TZOFFSETTO:-0700
END:DAYLIGHT
BEGIN:DAYLIGHT
TZOFFSETFROM:-0800
RRULE:FREQ=YEARLY;BYMONTH=3;BYDAY=2SU
DTSTART:20070311T020000
TZNAME:PDT
TZOFFSETTO:-0700
END:DAYLIGHT
BEGIN:STANDARD
TZOFFSETFROM:-0700
RRULE:FREQ=YEARLY;BYMONTH=11;BYDAY=1SU
DTSTART:20071104T020000
TZNAME:PST
TZOFFSETTO:-0800
END:STANDARD
END:VTIMEZONE
BEGIN:VEVENT
CREATED:20100303T181216Z
UID:685BC3A1-195A-49B3-926D-388DDACA78A6-%(uid)s
DTEND;TZID=US/Pacific:%(year)s0307T151500
TRANSP:OPAQUE
SUMMARY:Event without attachment
DTSTART;TZID=US/Pacific:%(year)s0307T111500
DTSTAMP:20100303T181220Z
SEQUENCE:2
END:VEVENT
END:VCALENDAR
""".replace("\n", "\r\n")
ATTACHMENT_ICS = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//Apple Inc.//iCal 4.0.1//EN
CALSCALE:GREGORIAN
BEGIN:VTIMEZONE
TZID:US/Pacific
BEGIN:STANDARD
TZOFFSETFROM:-0700
RRULE:FREQ=YEARLY;UNTIL=20061029T090000Z;BYMONTH=10;BYDAY=-1SU
DTSTART:19621028T020000
TZNAME:PST
TZOFFSETTO:-0800
END:STANDARD
BEGIN:DAYLIGHT
TZOFFSETFROM:-0800
RRULE:FREQ=YEARLY;UNTIL=20060402T100000Z;BYMONTH=4;BYDAY=1SU
DTSTART:19870405T020000
TZNAME:PDT
TZOFFSETTO:-0700
END:DAYLIGHT
BEGIN:DAYLIGHT
TZOFFSETFROM:-0800
RRULE:FREQ=YEARLY;BYMONTH=3;BYDAY=2SU
DTSTART:20070311T020000
TZNAME:PDT
TZOFFSETTO:-0700
END:DAYLIGHT
BEGIN:STANDARD
TZOFFSETFROM:-0700
RRULE:FREQ=YEARLY;BYMONTH=11;BYDAY=1SU
DTSTART:20071104T020000
TZNAME:PST
TZOFFSETTO:-0800
END:STANDARD
END:VTIMEZONE
BEGIN:VEVENT
CREATED:20100303T181216Z
UID:57A5D1F6-9A57-4F74-9520-25C617F54B88-%(uid)s
TRANSP:OPAQUE
SUMMARY:Event with attachment
DTSTART;TZID=US/Pacific:%(year)s0308T111500
DTEND;TZID=US/Pacific:%(year)s0308T151500
DTSTAMP:20100303T181220Z
X-APPLE-DROPBOX:/calendars/__uids__/%(userid)s/dropbox/%(dropboxid)s.dropbox
SEQUENCE:2
END:VEVENT
END:VCALENDAR
""".replace("\n", "\r\n")
class AttachmentMigrationTests(CommonCommonTests, unittest.TestCase):
"""
Test migrating dropbox to managed attachments.
"""
metadata = {
"accessMode": "PUBLIC",
"isScheduleObject": True,
"scheduleTag": "abc",
"scheduleEtags": (),
"hasPrivateComment": False,
}
requirements = {
u"home1" : {
"calendar1" : {
"1.1.ics" : (PLAIN_ICS % {"year": now, "uid": "1.1", }, metadata,),
"1.2.ics" : (ATTACHMENT_ICS % {"year": now, "uid": "1.2", "userid": "user01", "dropboxid": "1.2"}, metadata,),
"1.3.ics" : (ATTACHMENT_ICS % {"year": now, "uid": "1.3", "userid": "user01", "dropboxid": "1.3"}, metadata,),
"1.4.ics" : (ATTACHMENT_ICS % {"year": now, "uid": "1.4", "userid": "user01", "dropboxid": "1.4"}, metadata,),
"1.5.ics" : (ATTACHMENT_ICS % {"year": now, "uid": "1.5", "userid": "user01", "dropboxid": "1.4"}, metadata,),
}
},
u"home2" : {
"calendar2" : {
"2-2.1.ics" : (PLAIN_ICS % {"year": now, "uid": "2-2.1", }, metadata,),
"2-2.2.ics" : (ATTACHMENT_ICS % {"year": now, "uid": "2-2.2", "userid": "user02", "dropboxid": "2.2"}, metadata,),
"2-2.3.ics" : (ATTACHMENT_ICS % {"year": now, "uid": "1.3", "userid": "user01", "dropboxid": "1.3"}, metadata,),
},
"calendar3" : {
"2-3.1.ics" : (PLAIN_ICS % {"year": now, "uid": "2-3.1", }, metadata,),
"2-3.2.ics" : (ATTACHMENT_ICS % {"year": now, "uid": "1.4", "userid": "user01", "dropboxid": "1.4"}, metadata,),
"2-3.3.ics" : (ATTACHMENT_ICS % {"year": now, "uid": "1.5", "userid": "user01", "dropboxid": "1.4"}, metadata,),
}
}
}
@inlineCallbacks
def setUp(self):
yield super(AttachmentMigrationTests, self).setUp()
attachmentsFilePath = FilePath(
os.path.join(os.path.dirname(__file__), "attachments")
)
yield self.buildStoreAndDirectory(
accounts=attachmentsFilePath.child("accounts.xml"),
resources=attachmentsFilePath.child("resources.xml"),
)
yield self.populate()
self.paths = {}
@inlineCallbacks
def populate(self):
yield populateCalendarsFrom(self.requirements, self.storeUnderTest())
self.notifierFactory.reset()
txn = self._sqlCalendarStore.newTransaction()
yield Delete(
From=schema.ATTACHMENT,
Where=None
).on(txn)
yield Delete(
From=schema.ATTACHMENT_CALENDAR_OBJECT,
Where=None
).on(txn)
cs = schema.CALENDARSERVER
yield Delete(
From=cs,
Where=cs.NAME == "MANAGED-ATTACHMENTS"
).on(txn)
yield txn.commit()
@inlineCallbacks
def _addAttachment(self, home, calendar, event, dropboxid, name):
self._sqlCalendarStore._dropbox_ok = True
txn = self._sqlCalendarStore.newTransaction()
# Create an event with an attachment
home = (yield txn.calendarHomeWithUID(home))
calendar = (yield home.calendarWithName(calendar))
event = (yield calendar.calendarObjectWithName(event))
attachment = (yield event.createAttachmentWithName(name))
t = attachment.store(MimeType("text", "x-fixture"))
t.write("%s/%s/%s/%s" % (home, calendar, event, name,))
t.write(" attachment")
yield t.loseConnection()
self.paths[name] = attachment._path
cal = (yield event.componentForUser())
cal.mainComponent().addProperty(Property(
"ATTACH",
"http://localhost/calendars/users/%s/dropbox/%s.dropbox/%s" % (home.name(), dropboxid, name,),
valuetype=Value.VALUETYPE_URI
))
yield event.setComponent(cal)
yield txn.commit()
self._sqlCalendarStore._dropbox_ok = False
returnValue(attachment)
@inlineCallbacks
def _addAttachmentProperty(self, home, calendar, event, dropboxid, owner_home, name):
txn = self._sqlCalendarStore.newTransaction()
# Create an event with an attachment
home = (yield txn.calendarHomeWithUID(home))
calendar = (yield home.calendarWithName(calendar))
event = (yield calendar.calendarObjectWithName(event))
cal = (yield event.componentForUser())
cal.mainComponent().addProperty(Property(
"ATTACH",
"http://localhost/calendars/users/%s/dropbox/%s.dropbox/%s" % (owner_home, dropboxid, name,),
valuetype=Value.VALUETYPE_URI
))
yield event.setComponent(cal)
yield txn.commit()
@inlineCallbacks
def _addAllAttachments(self):
"""
Add the full set of attachments to be used for testing.
"""
yield self._addAttachment(u"home1", "calendar1", "1.2.ics", "1.2", "attach_1_2_1.txt")
yield self._addAttachment(u"home1", "calendar1", "1.2.ics", "1.2", "attach_1_2_2.txt")
yield self._addAttachment(u"home1", "calendar1", "1.3.ics", "1.3", "attach_1_3.txt")
yield self._addAttachment(u"home1", "calendar1", "1.4.ics", "1.4", "attach_1_4.txt")
yield self._addAttachmentProperty(u"home1", "calendar1", "1.5.ics", "1.4", "home1", "attach_1_4.txt")
yield self._addAttachment(u"home2", "calendar2", "2-2.2.ics", "2.2", "attach_2_2.txt")
yield self._addAttachmentProperty(u"home2", "calendar2", "2-2.3.ics", "1.3", "home1", "attach_1_3.txt")
yield self._addAttachmentProperty(u"home2", "calendar3", "2-3.2.ics", "1.4", "home1", "attach_1_4.txt")
yield self._addAttachmentProperty(u"home2", "calendar3", "2-3.3.ics", "1.4", "home1", "attach_1_4.txt")
@inlineCallbacks
def _verifyConversion(self, home, calendar, event, filenames):
"""
Verify that the specified event contains managed attachments only.
"""
txn = self._sqlCalendarStore.newTransaction()
home = (yield txn.calendarHomeWithUID(home))
calendar = (yield home.calendarWithName(calendar))
event = (yield calendar.calendarObjectWithName(event))
component = (yield event.componentForUser()).mainComponent()
# No more X-APPLE-DROPBOX
self.assertFalse(component.hasProperty("X-APPLE-DROPBOX"))
# Check only managed attachments exist
attachments = (yield event.componentForUser()).mainComponent().properties("ATTACH")
dropbox_count = 0
managed_count = 0
for attach in attachments:
if attach.hasParameter("MANAGED-ID"):
managed_count += 1
self.assertTrue(attach.value().find("/dropbox/") != -1)
self.assertTrue(attach.parameterValue("FILENAME") in filenames)
else:
dropbox_count += 1
self.assertEqual(managed_count, len(filenames))
self.assertEqual(dropbox_count, 0)
yield txn.commit()
@inlineCallbacks
def _verifyNoConversion(self, home, calendar, event, filenames):
"""
Verify that the specified event does not contain managed attachments.
"""
txn = self._sqlCalendarStore.newTransaction()
home = (yield txn.calendarHomeWithUID(home))
calendar = (yield home.calendarWithName(calendar))
event = (yield calendar.calendarObjectWithName(event))
component = (yield event.componentForUser()).mainComponent()
# X-APPLE-DROPBOX present
self.assertTrue(component.hasProperty("X-APPLE-DROPBOX"))
# Check only managed attachments exist
attachments = (yield event.componentForUser()).mainComponent().properties("ATTACH")
dropbox_count = 0
managed_count = 0
for attach in attachments:
if attach.hasParameter("MANAGED-ID"):
managed_count += 1
else:
dropbox_count += 1
self.assertTrue(attach.value().find("/dropbox/") != -1)
self.assertTrue(any([attach.value().endswith(filename) for filename in filenames]))
self.assertEqual(managed_count, 0)
self.assertEqual(dropbox_count, len(filenames))
yield txn.commit()
@inlineCallbacks
def test_loadCalendarObjectsForDropboxID(self):
"""
Test L{txdav.caldav.datastore.sql.CalendarStore._loadCalendarObjectsForDropboxID} returns the right set of
calendar objects.
"""
txn = self._sqlCalendarStore.newTransaction()
calstore = CalendarStoreFeatures(self._sqlCalendarStore)
for dropbox_id, result_count, result_names in (
("1.2", 1, ("1.2.ics",)),
("1.3", 2, ("1.3.ics", "2-2.3.ics",)),
("1.4", 4, ("1.4.ics", "1.5.ics", "2-3.2.ics", "2-3.3.ics",)),
("2.2", 1, ("2-2.2.ics",)),
):
cobjs = (yield calstore._loadCalendarObjectsForDropboxID(txn, "%s.dropbox" % (dropbox_id,)))
self.assertEqual(len(cobjs), result_count, "Failed count with dropbox id: %s" % (dropbox_id,))
names = set([cobj.name() for cobj in cobjs])
self.assertEqual(names, set(result_names), "Failed names with dropbox id: %s" % (dropbox_id,))
@inlineCallbacks
def test_convertToManaged(self):
"""
Test L{txdav.caldav.datastore.sql.DropboxAttachment.convertToManaged} converts properly to a ManagedAttachment.
"""
yield self._addAttachment(u"home1", "calendar1", "1.2.ics", "1.2", "attach_1_2.txt")
txn = self._sqlCalendarStore.newTransaction()
dattachment = (yield DropBoxAttachment.load(txn, "1.2.dropbox", "attach_1_2.txt"))
self.assertNotEqual(dattachment, None)
self.assertTrue(dattachment._path.exists())
mattachment = (yield dattachment.convertToManaged())
self.assertNotEqual(mattachment, None)
yield txn.commit()
self.assertFalse(dattachment._path.exists())
self.assertTrue(mattachment._path.exists())
# Dropbox attachment gone
txn = self._sqlCalendarStore.newTransaction()
dattachment2 = (yield DropBoxAttachment.load(txn, "1.2", "attach_1_2.txt"))
self.assertEqual(dattachment2, None)
# Managed attachment present
txn = self._sqlCalendarStore.newTransaction()
mattachment2 = (yield ManagedAttachment.load(txn, None, None, attachmentID=dattachment._attachmentID))
self.assertNotEqual(mattachment2, None)
self.assertTrue(mattachment2.isManaged())
@inlineCallbacks
def test_newReference(self):
"""
Test L{txdav.caldav.datastore.sql.ManagedAttachment.newReference} creates a new managed attachment reference.
"""
yield self._addAttachment(u"home1", "calendar1", "1.4.ics", "1.4", "attach_1_4.txt")
txn = self._sqlCalendarStore.newTransaction()
home = (yield txn.calendarHomeWithUID(u"home1"))
calendar = (yield home.calendarWithName("calendar1"))
event4 = (yield calendar.calendarObjectWithName("1.4.ics"))
event5 = (yield calendar.calendarObjectWithName("1.5.ics"))
dattachment = (yield DropBoxAttachment.load(txn, "1.4.dropbox", "attach_1_4.txt"))
self.assertNotEqual(dattachment, None)
self.assertTrue(dattachment._path.exists())
mattachment = (yield dattachment.convertToManaged())
self.assertNotEqual(mattachment, None)
self.assertNotEqual(mattachment.managedID(), None)
mnew4 = (yield mattachment.newReference(event4._resourceID))
self.assertNotEqual(mnew4, None)
self.assertEqual(mnew4.managedID(), mattachment.managedID())
mnew5 = (yield mattachment.newReference(event5._resourceID))
self.assertNotEqual(mnew5, None)
self.assertEqual(mnew5.managedID(), mattachment.managedID())
yield txn.commit()
# Managed attachment present
txn = self._sqlCalendarStore.newTransaction()
mtest4 = (yield ManagedAttachment.load(txn, event4._resourceID, mnew4.managedID()))
self.assertNotEqual(mtest4, None)
self.assertTrue(mtest4.isManaged())
self.assertEqual(mtest4._objectResourceID, event4._resourceID)
yield txn.commit()
# Managed attachment present
txn = self._sqlCalendarStore.newTransaction()
mtest5 = (yield ManagedAttachment.load(txn, event5._resourceID, mnew5.managedID()))
self.assertNotEqual(mtest5, None)
self.assertTrue(mtest5.isManaged())
self.assertEqual(mtest5._objectResourceID, event5._resourceID)
yield txn.commit()
@inlineCallbacks
def test_convertAttachments(self):
"""
Test L{txdav.caldav.datastore.sql.CalendarObject.convertAttachments} re-writes calendar data.
"""
yield self._addAttachment(u"home1", "calendar1", "1.2.ics", "1.2", "attach_1_2_1.txt")
yield self._addAttachment(u"home1", "calendar1", "1.2.ics", "1.2", "attach_1_2_2.txt")
txn = self._sqlCalendarStore.newTransaction()
home = (yield txn.calendarHomeWithUID(u"home1"))
calendar = (yield home.calendarWithName("calendar1"))
event = (yield calendar.calendarObjectWithName("1.2.ics"))
# Check that dropbox ATTACH exists
attachments = (yield event.componentForUser()).mainComponent().properties("ATTACH")
for attach in attachments:
self.assertTrue(attach.value().find("1.2.dropbox") != -1)
self.assertTrue(attach.value().endswith("attach_1_2_1.txt") or attach.value().endswith("attach_1_2_2.txt"))
self.assertFalse(attach.value().find("MANAGED-ID") != -1)
dattachment = (yield DropBoxAttachment.load(txn, "1.2.dropbox", "attach_1_2_1.txt"))
mattachment = (yield dattachment.convertToManaged())
mnew = (yield mattachment.newReference(event._resourceID))
yield event.convertAttachments(dattachment, mnew)
yield txn.commit()
txn = self._sqlCalendarStore.newTransaction()
home = (yield txn.calendarHomeWithUID(u"home1"))
calendar = (yield home.calendarWithName("calendar1"))
event = (yield calendar.calendarObjectWithName("1.2.ics"))
# Check that one managed-id and one dropbox ATTACH exist
attachments = (yield event.componentForUser()).mainComponent().properties("ATTACH")
dropbox_count = 0
managed_count = 0
for attach in attachments:
if attach.hasParameter("MANAGED-ID"):
managed_count += 1
self.assertTrue(attach.value().find("1.2.dropbox") != -1)
self.assertEqual(attach.parameterValue("MANAGED-ID"), mnew.managedID())
self.assertEqual(attach.parameterValue("FILENAME"), mnew.name())
else:
dropbox_count += 1
self.assertTrue(attach.value().find("1.2.dropbox") != -1)
self.assertTrue(attach.value().endswith("attach_1_2_2.txt"))
self.assertEqual(managed_count, 1)
self.assertEqual(dropbox_count, 1)
yield txn.commit()
# Convert the second dropbox attachment
txn = self._sqlCalendarStore.newTransaction()
home = (yield txn.calendarHomeWithUID(u"home1"))
calendar = (yield home.calendarWithName("calendar1"))
event = (yield calendar.calendarObjectWithName("1.2.ics"))
dattachment = (yield DropBoxAttachment.load(txn, "1.2.dropbox", "attach_1_2_2.txt"))
mattachment = (yield dattachment.convertToManaged())
mnew = (yield mattachment.newReference(event._resourceID))
yield event.convertAttachments(dattachment, mnew)
yield txn.commit()
txn = self._sqlCalendarStore.newTransaction()
home = (yield txn.calendarHomeWithUID(u"home1"))
calendar = (yield home.calendarWithName("calendar1"))
event = (yield calendar.calendarObjectWithName("1.2.ics"))
component = (yield event.componentForUser()).mainComponent()
# No more X-APPLE-DROPBOX
self.assertFalse(component.hasProperty("X-APPLE-DROPBOX"))
# Check that one managed-id and one dropbox ATTACH exist
attachments = (yield event.componentForUser()).mainComponent().properties("ATTACH")
dropbox_count = 0
managed_count = 0
for attach in attachments:
if attach.hasParameter("MANAGED-ID"):
managed_count += 1
self.assertTrue(attach.value().find("1.2.dropbox") != -1)
self.assertTrue(attach.parameterValue("FILENAME") in ("attach_1_2_1.txt", "attach_1_2_2.txt"))
else:
dropbox_count += 1
self.assertEqual(managed_count, 2)
self.assertEqual(dropbox_count, 0)
yield txn.commit()
@inlineCallbacks
def test_upgradeDropbox_oneEvent(self):
"""
Test L{txdav.caldav.datastore.sql.CalendarStoreFeatures._upgradeDropbox} re-writes calendar data
for one event with an attachment.
"""
yield self._addAllAttachments()
txn = self._sqlCalendarStore.newTransaction()
calstore = CalendarStoreFeatures(self._sqlCalendarStore)
yield calstore._upgradeDropbox(txn, "1.2.dropbox")
yield txn.commit()
yield self._verifyConversion(u"home1", "calendar1", "1.2.ics", ("attach_1_2_1.txt", "attach_1_2_2.txt",))
yield self._verifyNoConversion(u"home1", "calendar1", "1.3.ics", ("attach_1_3.txt",))
yield self._verifyNoConversion(u"home1", "calendar1", "1.4.ics", ("attach_1_4.txt",))
yield self._verifyNoConversion(u"home1", "calendar1", "1.5.ics", ("attach_1_4.txt",))
yield self._verifyNoConversion(u"home2", "calendar2", "2-2.2.ics", ("attach_2_2.txt",))
yield self._verifyNoConversion(u"home2", "calendar2", "2-2.3.ics", ("attach_1_3.txt",))
yield self._verifyNoConversion(u"home2", "calendar3", "2-3.2.ics", ("attach_1_4.txt",))
yield self._verifyNoConversion(u"home2", "calendar3", "2-3.3.ics", ("attach_1_4.txt",))
@inlineCallbacks
def test_upgradeDropbox_oneEventTwoHomes(self):
"""
Test L{txdav.caldav.datastore.sql.CalendarStoreFeatures._upgradeDropbox} re-writes calendar data
for multiple events across different homes with the same attachment.
"""
yield self._addAllAttachments()
txn = self._sqlCalendarStore.newTransaction()
calstore = CalendarStoreFeatures(self._sqlCalendarStore)
yield calstore._upgradeDropbox(txn, "1.3.dropbox")
yield txn.commit()
yield self._verifyNoConversion(u"home1", "calendar1", "1.2.ics", ("attach_1_2_1.txt", "attach_1_2_2.txt",))
yield self._verifyConversion(u"home1", "calendar1", "1.3.ics", ("attach_1_3.txt",))
yield self._verifyNoConversion(u"home1", "calendar1", "1.4.ics", ("attach_1_4.txt",))
yield self._verifyNoConversion(u"home1", "calendar1", "1.5.ics", ("attach_1_4.txt",))
yield self._verifyNoConversion(u"home2", "calendar2", "2-2.2.ics", ("attach_2_2.txt",))
yield self._verifyConversion(u"home2", "calendar2", "2-2.3.ics", ("attach_1_3.txt",))
yield self._verifyNoConversion(u"home2", "calendar3", "2-3.2.ics", ("attach_1_4.txt",))
yield self._verifyNoConversion(u"home2", "calendar3", "2-3.3.ics", ("attach_1_4.txt",))
@inlineCallbacks
def test_upgradeDropbox_twoEventsTwoHomes(self):
"""
Test L{txdav.caldav.datastore.sql.CalendarStoreFeatures._upgradeDropbox} re-writes calendar data
for multiple events across different homes with the same attachment.
"""
yield self._addAllAttachments()
txn = self._sqlCalendarStore.newTransaction()
calstore = CalendarStoreFeatures(self._sqlCalendarStore)
yield calstore._upgradeDropbox(txn, "1.4.dropbox")
yield txn.commit()
yield self._verifyNoConversion(u"home1", "calendar1", "1.2.ics", ("attach_1_2_1.txt", "attach_1_2_2.txt",))
yield self._verifyNoConversion(u"home1", "calendar1", "1.3.ics", ("attach_1_3.txt",))
yield self._verifyConversion(u"home1", "calendar1", "1.4.ics", ("attach_1_4.txt",))
yield self._verifyConversion(u"home1", "calendar1", "1.5.ics", ("attach_1_4.txt",))
yield self._verifyNoConversion(u"home2", "calendar2", "2-2.2.ics", ("attach_2_2.txt",))
yield self._verifyNoConversion(u"home2", "calendar2", "2-2.3.ics", ("attach_1_3.txt",))
yield self._verifyConversion(u"home2", "calendar3", "2-3.2.ics", ("attach_1_4.txt",))
yield self._verifyConversion(u"home2", "calendar3", "2-3.3.ics", ("attach_1_4.txt",))
@inlineCallbacks
def test_upgradeToManagedAttachments(self):
"""
Test L{txdav.caldav.datastore.sql.CalendarStoreFeatures.upgradeToManagedAttachments} re-writes calendar data
for all events with an attachment.
"""
yield self._addAllAttachments()
calstore = CalendarStoreFeatures(self._sqlCalendarStore)
yield calstore.upgradeToManagedAttachments(2)
yield self._verifyConversion(u"home1", "calendar1", "1.2.ics", ("attach_1_2_1.txt", "attach_1_2_2.txt",))
yield self._verifyConversion(u"home1", "calendar1", "1.3.ics", ("attach_1_3.txt",))
yield self._verifyConversion(u"home1", "calendar1", "1.4.ics", ("attach_1_4.txt",))
yield self._verifyConversion(u"home1", "calendar1", "1.5.ics", ("attach_1_4.txt",))
yield self._verifyConversion(u"home2", "calendar2", "2-2.2.ics", ("attach_2_2.txt",))
yield self._verifyConversion(u"home2", "calendar2", "2-2.3.ics", ("attach_1_3.txt",))
yield self._verifyConversion(u"home2", "calendar3", "2-3.2.ics", ("attach_1_4.txt",))
yield self._verifyConversion(u"home2", "calendar3", "2-3.3.ics", ("attach_1_4.txt",))
# Paths do not exist up to a certain point (the old two-level hash prefix
# might still be in use for the managed attachment path)
for path in self.paths.values():
for _ignore in range(2):
self.assertFalse(path.exists(), msg="Still exists: %s" % (path,))
path = path.parent()
| 36.314484
| 154
| 0.646373
| 7,522
| 73,210
| 6.236772
| 0.098245
| 0.042973
| 0.036834
| 0.034319
| 0.788609
| 0.773879
| 0.75154
| 0.730778
| 0.714578
| 0.701213
| 0
| 0.027319
| 0.2405
| 73,210
| 2,015
| 155
| 36.332506
| 0.816395
| 0.150622
| 0
| 0.731634
| 0
| 0.002249
| 0.167184
| 0.040932
| 0
| 0
| 0
| 0
| 0.108696
| 1
| 0.05997
| false
| 0
| 0.014243
| 0
| 0.091454
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7c26fa2e1fa0e1fbd8a55f37a170be00ed19f014
| 26
|
py
|
Python
|
ar/webcam/__init__.py
|
ceroytres/feeder
|
5fd13320e16962a9ac58f7126a5ddc6635c8b4f0
|
[
"MIT"
] | null | null | null |
ar/webcam/__init__.py
|
ceroytres/feeder
|
5fd13320e16962a9ac58f7126a5ddc6635c8b4f0
|
[
"MIT"
] | null | null | null |
ar/webcam/__init__.py
|
ceroytres/feeder
|
5fd13320e16962a9ac58f7126a5ddc6635c8b4f0
|
[
"MIT"
] | null | null | null |
from .webcam import Webcam
| 26
| 26
| 0.846154
| 4
| 26
| 5.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 26
| 1
| 26
| 26
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7c4a268519c947a0ddcd6a0e6d277ce7b0df1326
| 41
|
py
|
Python
|
rdfpandas/__init__.py
|
cadmiumkitty/rdfpandas
|
614457f20d60ec5f8046f4f3963f6406e05c5a37
|
[
"MIT"
] | 21
|
2018-06-20T21:54:03.000Z
|
2022-03-04T09:19:55.000Z
|
rdfpandas/__init__.py
|
cadmiumkitty/rdfpandas
|
614457f20d60ec5f8046f4f3963f6406e05c5a37
|
[
"MIT"
] | 8
|
2018-11-05T10:01:17.000Z
|
2021-12-17T09:59:25.000Z
|
rdfpandas/__init__.py
|
cadmiumkitty/rdfpandas
|
614457f20d60ec5f8046f4f3963f6406e05c5a37
|
[
"MIT"
] | 4
|
2021-05-25T05:31:11.000Z
|
2021-12-14T11:08:25.000Z
|
from .graph import to_graph, to_dataframe
| 41
| 41
| 0.853659
| 7
| 41
| 4.714286
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097561
| 41
| 1
| 41
| 41
| 0.891892
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7c5356201511207f649b717a9745622e36d5b54f
| 112
|
py
|
Python
|
tinacg/tinacg/views.py
|
reddress/vertfolia
|
57b93086b410ea5a8d5cbfe5231e4d2213171b61
|
[
"MIT"
] | null | null | null |
tinacg/tinacg/views.py
|
reddress/vertfolia
|
57b93086b410ea5a8d5cbfe5231e4d2213171b61
|
[
"MIT"
] | null | null | null |
tinacg/tinacg/views.py
|
reddress/vertfolia
|
57b93086b410ea5a8d5cbfe5231e4d2213171b61
|
[
"MIT"
] | null | null | null |
from django.http import HttpResponse
def index(request):
return HttpResponse("Welcome to tinacg.com")
| 18.666667
| 48
| 0.741071
| 14
| 112
| 5.928571
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178571
| 112
| 5
| 49
| 22.4
| 0.902174
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
7cb2c945a02379c93caf2c1657ae1a001dd7f037
| 45
|
py
|
Python
|
src/models/__init__.py
|
ethan-ou/speech-edit
|
d35b58f36c2f24423cf62013d54149da93deb245
|
[
"MIT"
] | 2
|
2021-04-15T15:47:33.000Z
|
2021-09-07T23:15:34.000Z
|
src/models/__init__.py
|
ethan-ou/speech-edit
|
d35b58f36c2f24423cf62013d54149da93deb245
|
[
"MIT"
] | null | null | null |
src/models/__init__.py
|
ethan-ou/speech-edit
|
d35b58f36c2f24423cf62013d54149da93deb245
|
[
"MIT"
] | 1
|
2020-09-28T01:48:09.000Z
|
2020-09-28T01:48:09.000Z
|
from .speech_detection import SpeechDetection
| 45
| 45
| 0.911111
| 5
| 45
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 45
| 1
| 45
| 45
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7cf1ccb55c7bfea206ccb8d483d2795341634f1f
| 92
|
py
|
Python
|
criticalityMaps/criticality/__init__.py
|
terrahaxton/criticalityMaps
|
533490e1bc0f178cbce94814602caa545e438dcf
|
[
"MIT"
] | null | null | null |
criticalityMaps/criticality/__init__.py
|
terrahaxton/criticalityMaps
|
533490e1bc0f178cbce94814602caa545e438dcf
|
[
"MIT"
] | null | null | null |
criticalityMaps/criticality/__init__.py
|
terrahaxton/criticalityMaps
|
533490e1bc0f178cbce94814602caa545e438dcf
|
[
"MIT"
] | 1
|
2020-03-12T12:36:06.000Z
|
2020-03-12T12:36:06.000Z
|
from .core import fire_criticality_analysis, pipe_criticality_analysis, process_criticality
| 46
| 91
| 0.902174
| 11
| 92
| 7.090909
| 0.727273
| 0.487179
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065217
| 92
| 1
| 92
| 92
| 0.906977
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7cf8647f8250aa11c3a4d8e690d8f89e7a419f52
| 61
|
py
|
Python
|
pyaiutils/__init__.py
|
GuilhermeCunha/pyaiutils
|
0d465946cef7f748ccf35fb3a0b255dbab8d2bf7
|
[
"Apache-2.0"
] | 1
|
2021-01-11T18:44:02.000Z
|
2021-01-11T18:44:02.000Z
|
pyaiutils/__init__.py
|
GuilhermeCunha/pyaiutils
|
0d465946cef7f748ccf35fb3a0b255dbab8d2bf7
|
[
"Apache-2.0"
] | null | null | null |
pyaiutils/__init__.py
|
GuilhermeCunha/pyaiutils
|
0d465946cef7f748ccf35fb3a0b255dbab8d2bf7
|
[
"Apache-2.0"
] | null | null | null |
from . import metrics
from . import plots
from . import utils
| 20.333333
| 21
| 0.770492
| 9
| 61
| 5.222222
| 0.555556
| 0.638298
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180328
| 61
| 3
| 22
| 20.333333
| 0.94
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7cfa3328d212b6424a2984e53870e04d5aba07d6
| 389,560
|
py
|
Python
|
test/augmenters/test_geometric.py
|
fchouteau/imgaug
|
b282b97c13a27a32f91c2e2666db1e128e00cfde
|
[
"MIT"
] | 1
|
2020-02-26T01:05:12.000Z
|
2020-02-26T01:05:12.000Z
|
test/augmenters/test_geometric.py
|
youbin2014/imgaug
|
b282b97c13a27a32f91c2e2666db1e128e00cfde
|
[
"MIT"
] | null | null | null |
test/augmenters/test_geometric.py
|
youbin2014/imgaug
|
b282b97c13a27a32f91c2e2666db1e128e00cfde
|
[
"MIT"
] | null | null | null |
from __future__ import print_function, division, absolute_import
import itertools
import warnings
import sys
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import skimage.morphology
import cv2
import imgaug as ia
from imgaug import random as iarandom
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from imgaug import dtypes as iadt
from imgaug.testutils import (
array_equal_lists, keypoints_equal, reseed, assert_cbaois_equal,
runtest_pickleable_uint8_img)
from imgaug.augmentables.heatmaps import HeatmapsOnImage
from imgaug.augmentables.segmaps import SegmentationMapsOnImage
import imgaug.augmenters.geometric as geometriclib
def _assert_same_min_max(observed, actual):
assert np.isclose(observed.min_value, actual.min_value, rtol=0, atol=1e-6)
assert np.isclose(observed.max_value, actual.max_value, rtol=0, atol=1e-6)
def _assert_same_shape(observed, actual):
assert observed.shape == actual.shape
# TODO add more tests for Affine .mode
# TODO add more tests for Affine shear
class TestAffine(unittest.TestCase):
def test_get_parameters(self):
aug = iaa.Affine(scale=1, translate_px=2, rotate=3, shear=4,
order=1, cval=0, mode="constant", backend="cv2",
fit_output=True)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic) # scale
assert isinstance(params[1], tuple) # translate
assert isinstance(params[2], iap.Deterministic) # rotate
assert isinstance(params[3], iap.Deterministic) # shear
assert params[0].value == 1 # scale
assert params[1][0].value == 2 # translate
assert params[2].value == 3 # rotate
assert params[3].value == 4 # shear
assert params[4].value == 1 # order
assert params[5].value == 0 # cval
assert params[6].value == "constant" # mode
assert params[7] == "cv2" # backend
assert params[8] is True # fit_output
class TestAffine___init__(unittest.TestCase):
def test___init___scale_is_stochastic_parameter(self):
aug = iaa.Affine(scale=iap.Uniform(0.7, 0.9))
assert isinstance(aug.scale, iap.Uniform)
assert isinstance(aug.scale.a, iap.Deterministic)
assert isinstance(aug.scale.b, iap.Deterministic)
assert 0.7 - 1e-8 < aug.scale.a.value < 0.7 + 1e-8
assert 0.9 - 1e-8 < aug.scale.b.value < 0.9 + 1e-8
def test___init___translate_percent_is_stochastic_parameter(self):
aug = iaa.Affine(translate_percent=iap.Uniform(0.7, 0.9))
assert isinstance(aug.translate, tuple)
assert isinstance(aug.translate[0], iap.Uniform)
assert isinstance(aug.translate[0].a, iap.Deterministic)
assert isinstance(aug.translate[0].b, iap.Deterministic)
assert 0.7 - 1e-8 < aug.translate[0].a.value < 0.7 + 1e-8
assert 0.9 - 1e-8 < aug.translate[0].b.value < 0.9 + 1e-8
assert aug.translate[1] is None
assert aug.translate[2] == "percent"
def test___init___translate_px_is_stochastic_parameter(self):
aug = iaa.Affine(translate_px=iap.DiscreteUniform(1, 10))
assert isinstance(aug.translate, tuple)
assert isinstance(aug.translate[0], iap.DiscreteUniform)
assert isinstance(aug.translate[0].a, iap.Deterministic)
assert isinstance(aug.translate[0].b, iap.Deterministic)
assert aug.translate[0].a.value == 1
assert aug.translate[0].b.value == 10
assert aug.translate[1] is None
assert aug.translate[2] == "px"
def test___init___rotate_is_stochastic_parameter(self):
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=iap.Uniform(10, 20),
shear=0)
assert isinstance(aug.rotate, iap.Uniform)
assert isinstance(aug.rotate.a, iap.Deterministic)
assert aug.rotate.a.value == 10
assert isinstance(aug.rotate.b, iap.Deterministic)
assert aug.rotate.b.value == 20
def test___init___shear_is_stochastic_parameter(self):
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=0,
shear=iap.Uniform(10, 20))
assert isinstance(aug.shear, iap.Uniform)
assert isinstance(aug.shear.a, iap.Deterministic)
assert aug.shear.a.value == 10
assert isinstance(aug.shear.b, iap.Deterministic)
assert aug.shear.b.value == 20
def test___init___cval_is_all(self):
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=ia.ALL)
assert isinstance(aug.cval, iap.Uniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 0
assert aug.cval.b.value == 255
def test___init___cval_is_stochastic_parameter(self):
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=iap.DiscreteUniform(1, 5))
assert isinstance(aug.cval, iap.DiscreteUniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 1
assert aug.cval.b.value == 5
def test___init___mode_is_all(self):
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=0, mode=ia.ALL)
assert isinstance(aug.mode, iap.Choice)
def test___init___mode_is_string(self):
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=0, mode="edge")
assert isinstance(aug.mode, iap.Deterministic)
assert aug.mode.value == "edge"
def test___init___mode_is_list(self):
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=0, mode=["constant", "edge"])
assert isinstance(aug.mode, iap.Choice)
assert (
len(aug.mode.a) == 2
and "constant" in aug.mode.a
and "edge" in aug.mode.a)
def test___init___mode_is_stochastic_parameter(self):
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=0, mode=iap.Choice(["constant", "edge"]))
assert isinstance(aug.mode, iap.Choice)
assert (
len(aug.mode.a) == 2
and "constant" in aug.mode.a
and "edge" in aug.mode.a)
def test___init___fit_output_is_true(self):
aug = iaa.Affine(fit_output=True)
assert aug.fit_output is True
# ------------
# exceptions for bad inputs
# ------------
def test___init___bad_datatype_for_scale_fails(self):
with self.assertRaises(Exception):
_ = iaa.Affine(scale=False)
def test___init___bad_datatype_for_translate_px_fails(self):
with self.assertRaises(Exception):
_ = iaa.Affine(translate_px=False)
def test___init___bad_datatype_for_translate_percent_fails(self):
with self.assertRaises(Exception):
_ = iaa.Affine(translate_percent=False)
def test___init___bad_datatype_for_rotate_fails(self):
with self.assertRaises(Exception):
_ = iaa.Affine(scale=1.0, translate_px=0, rotate=False, shear=0,
cval=0)
def test___init___bad_datatype_for_shear_fails(self):
with self.assertRaises(Exception):
_ = iaa.Affine(scale=1.0, translate_px=0, rotate=0, shear=False,
cval=0)
def test___init___bad_datatype_for_cval_fails(self):
with self.assertRaises(Exception):
_ = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=None)
def test___init___bad_datatype_for_mode_fails(self):
with self.assertRaises(Exception):
_ = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=0, mode=False)
def test___init___bad_datatype_for_order_fails(self):
# bad order datatype in case of backend=cv2
with self.assertRaises(Exception):
_ = iaa.Affine(backend="cv2", order="test")
def test___init___nonexistent_order_for_cv2_fails(self):
# non-existent order in case of backend=cv2
with self.assertRaises(AssertionError):
_ = iaa.Affine(backend="cv2", order=-1)
# TODO add test with multiple images
class TestAffine_noop(unittest.TestCase):
def setUp(self):
reseed()
@property
def base_img(self):
base_img = np.array([[0, 0, 0],
[0, 255, 0],
[0, 0, 0]], dtype=np.uint8)
return base_img[:, :, np.newaxis]
@property
def images(self):
return np.array([self.base_img])
@property
def kpsoi(self):
kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)]
return [ia.KeypointsOnImage(kps, shape=self.base_img.shape)]
@property
def psoi(self):
polygons = [ia.Polygon([(0, 0), (2, 0), (2, 2)])]
return [ia.PolygonsOnImage(polygons, shape=self.base_img.shape)]
@property
def lsoi(self):
ls = [ia.LineString([(0, 0), (2, 0), (2, 2)])]
return [ia.LineStringsOnImage(ls, shape=self.base_img.shape)]
@property
def bbsoi(self):
bbs = [ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)]
return [ia.BoundingBoxesOnImage(bbs, shape=self.base_img.shape)]
def test_image_noop(self):
# no translation/scale/rotate/shear, shouldnt change nothing
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=0, shear=0)
observed = aug.augment_images(self.images)
expected = self.images
assert np.array_equal(observed, expected)
def test_image_noop__deterministic(self):
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images(self.images)
expected = self.images
assert np.array_equal(observed, expected)
def test_image_noop__list(self):
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=0, shear=0)
observed = aug.augment_images([self.base_img])
expected = [self.base_img]
assert array_equal_lists(observed, expected)
def test_image_noop__list_and_deterministic(self):
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images([self.base_img])
expected = [self.base_img]
assert array_equal_lists(observed, expected)
def test_keypoints_noop(self):
self._test_cba_noop("augment_keypoints", self.kpsoi, False)
def test_keypoints_noop__deterministic(self):
self._test_cba_noop("augment_keypoints", self.kpsoi, True)
def test_polygons_noop(self):
self._test_cba_noop("augment_polygons", self.psoi, False)
def test_polygons_noop__deterministic(self):
self._test_cba_noop("augment_polygons", self.psoi, True)
def test_line_strings_noop(self):
self._test_cba_noop("augment_line_strings", self.lsoi, False)
def test_line_strings_noop__deterministic(self):
self._test_cba_noop("augment_line_strings", self.lsoi, True)
def test_bounding_boxes_noop(self):
self._test_cba_noop("augment_bounding_boxes", self.bbsoi, False)
def test_bounding_boxes_noop__deterministic(self):
self._test_cba_noop("augment_bounding_boxes", self.bbsoi, True)
@classmethod
def _test_cba_noop(cls, augf_name, cbaoi, deterministic):
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=0, shear=0)
if deterministic:
aug = aug.to_deterministic()
observed = getattr(aug, augf_name)(cbaoi)
expected = cbaoi
assert_cbaois_equal(observed, expected)
# TODO add test with multiple images
class TestAffine_scale(unittest.TestCase):
def setUp(self):
reseed()
# ---------------------
# scale: zoom in
# ---------------------
@property
def base_img(self):
base_img = np.array([[0, 0, 0],
[0, 255, 0],
[0, 0, 0]], dtype=np.uint8)
return base_img[:, :, np.newaxis]
@property
def images(self):
return np.array([self.base_img])
@property
def kpsoi(self):
kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)]
return [ia.KeypointsOnImage(kps, shape=self.base_img.shape)]
def kpsoi_scaled(self, scale_y, scale_x):
coords = np.array([
[0, 0],
[1, 1],
[2, 2]
], dtype=np.float32)
coords_scaled = self._scale_coordinates(coords, scale_y, scale_x)
return [ia.KeypointsOnImage.from_xy_array(
coords_scaled,
shape=self.base_img.shape)]
@property
def psoi(self):
polys = [ia.Polygon([(0, 0), (0, 2), (2, 2)])]
return [ia.PolygonsOnImage(polys, shape=self.base_img.shape)]
def psoi_scaled(self, scale_y, scale_x):
coords = np.array([
[0, 0],
[0, 2],
[2, 2]
], dtype=np.float32)
coords_scaled = self._scale_coordinates(coords, scale_y, scale_x)
return [ia.PolygonsOnImage(
[ia.Polygon(coords_scaled)],
shape=self.base_img.shape)]
@property
def lsoi(self):
ls = [ia.LineString([(0, 0), (0, 2), (2, 2)])]
return [ia.LineStringsOnImage(ls, shape=self.base_img.shape)]
def lsoi_scaled(self, scale_y, scale_x):
coords = np.array([
[0, 0],
[0, 2],
[2, 2]
], dtype=np.float32)
coords_scaled = self._scale_coordinates(coords, scale_y, scale_x)
return [ia.LineStringsOnImage(
[ia.LineString(coords_scaled)],
shape=self.base_img.shape)]
@property
def bbsoi(self):
bbs = [ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)]
return [ia.BoundingBoxesOnImage(bbs, shape=self.base_img.shape)]
def bbsoi_scaled(self, scale_y, scale_x):
coords = np.array([
[0, 1],
[2, 3]
], dtype=np.float32)
coords_scaled = self._scale_coordinates(coords, scale_y, scale_x)
return [ia.BoundingBoxesOnImage.from_xyxy_array(
coords_scaled.reshape((1, 4)),
shape=self.base_img.shape)]
def _scale_coordinates(self, coords, scale_y, scale_x):
height, width = self.base_img.shape[0:2]
coords_scaled = []
for x, y in coords:
# the additional +0.5 and -0.5 here makes up for the shift factor
# used in the affine matrix generation
offset = 0.0
x_centered = x - width/2 + offset
y_centered = y - height/2 + offset
x_new = x_centered * scale_x + width/2 - offset
y_new = y_centered * scale_y + height/2 - offset
coords_scaled.append((x_new, y_new))
return np.float32(coords_scaled)
@property
def scale_zoom_in_outer_pixels(self):
base_img = self.base_img
outer_pixels = ([], [])
for i in sm.xrange(base_img.shape[0]):
for j in sm.xrange(base_img.shape[1]):
if i != j:
outer_pixels[0].append(i)
outer_pixels[1].append(j)
return outer_pixels
def test_image_scale_zoom_in(self):
aug = iaa.Affine(scale=1.75, translate_px=0, rotate=0, shear=0)
observed = aug.augment_images(self.images)
outer_pixels = self.scale_zoom_in_outer_pixels
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
def test_image_scale_zoom_in__deterministic(self):
aug = iaa.Affine(scale=1.75, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images(self.images)
outer_pixels = self.scale_zoom_in_outer_pixels
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
def test_image_scale_zoom_in__list(self):
aug = iaa.Affine(scale=1.75, translate_px=0, rotate=0, shear=0)
observed = aug.augment_images([self.base_img])
outer_pixels = self.scale_zoom_in_outer_pixels
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
def test_image_scale_zoom_in__list_and_deterministic(self):
aug = iaa.Affine(scale=1.75, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images([self.base_img])
outer_pixels = self.scale_zoom_in_outer_pixels
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
def test_keypoints_scale_zoom_in(self):
self._test_cba_scale(
"augment_keypoints", 1.75,
self.kpsoi, self.kpsoi_scaled(1.75, 1.75), False)
def test_keypoints_scale_zoom_in__deterministic(self):
self._test_cba_scale(
"augment_keypoints", 1.75,
self.kpsoi, self.kpsoi_scaled(1.75, 1.75), True)
def test_polygons_scale_zoom_in(self):
self._test_cba_scale(
"augment_polygons", 1.75,
self.psoi, self.psoi_scaled(1.75, 1.75), False)
def test_polygons_scale_zoom_in__deterministic(self):
self._test_cba_scale(
"augment_polygons", 1.75,
self.psoi, self.psoi_scaled(1.75, 1.75), True)
def test_line_strings_scale_zoom_in(self):
self._test_cba_scale(
"augment_line_strings", 1.75,
self.lsoi, self.lsoi_scaled(1.75, 1.75), False)
def test_line_strings_scale_zoom_in__deterministic(self):
self._test_cba_scale(
"augment_line_strings", 1.75,
self.lsoi, self.lsoi_scaled(1.75, 1.75), True)
def test_bounding_boxes_scale_zoom_in(self):
self._test_cba_scale(
"augment_bounding_boxes", 1.75,
self.bbsoi, self.bbsoi_scaled(1.75, 1.75), False)
def test_bounding_boxes_scale_zoom_in__deterministic(self):
self._test_cba_scale(
"augment_bounding_boxes", 1.75,
self.bbsoi, self.bbsoi_scaled(1.75, 1.75), True)
@classmethod
def _test_cba_scale(cls, augf_name, scale, cbaoi, cbaoi_scaled,
deterministic):
aug = iaa.Affine(scale=scale, translate_px=0, rotate=0, shear=0)
if deterministic:
aug = aug.to_deterministic()
observed = getattr(aug, augf_name)(cbaoi)
assert_cbaois_equal(observed, cbaoi_scaled)
# ---------------------
# scale: zoom in only on x axis
# ---------------------
def test_image_scale_zoom_in_only_x_axis(self):
aug = iaa.Affine(scale={"x": 1.75, "y": 1.0},
translate_px=0, rotate=0, shear=0)
observed = aug.augment_images(self.images)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
def test_image_scale_zoom_in_only_x_axis__deterministic(self):
aug = iaa.Affine(scale={"x": 1.75, "y": 1.0},
translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images(self.images)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
def test_image_scale_zoom_in_only_x_axis__list(self):
aug = iaa.Affine(scale={"x": 1.75, "y": 1.0},
translate_px=0, rotate=0, shear=0)
observed = aug.augment_images([self.base_img])
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
def test_image_scale_zoom_in_only_x_axis__deterministic_and_list(self):
aug = iaa.Affine(scale={"x": 1.75, "y": 1.0},
translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images([self.base_img])
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
def test_keypoints_scale_zoom_in_only_x_axis(self):
self._test_cba_scale(
"augment_keypoints", {"y": 1.0, "x": 1.75}, self.kpsoi,
self.kpsoi_scaled(1.0, 1.75), False)
def test_keypoints_scale_zoom_in_only_x_axis__deterministic(self):
self._test_cba_scale(
"augment_keypoints", {"y": 1.0, "x": 1.75}, self.kpsoi,
self.kpsoi_scaled(1.0, 1.75), True)
def test_polygons_scale_zoom_in_only_x_axis(self):
self._test_cba_scale(
"augment_polygons", {"y": 1.0, "x": 1.75}, self.psoi,
self.psoi_scaled(1.0, 1.75), False)
def test_polygons_scale_zoom_in_only_x_axis__deterministic(self):
self._test_cba_scale(
"augment_polygons", {"y": 1.0, "x": 1.75}, self.psoi,
self.psoi_scaled(1.0, 1.75), True)
def test_line_strings_scale_zoom_in_only_x_axis(self):
self._test_cba_scale(
"augment_line_strings", {"y": 1.0, "x": 1.75}, self.lsoi,
self.lsoi_scaled(1.0, 1.75), False)
def test_line_strings_scale_zoom_in_only_x_axis__deterministic(self):
self._test_cba_scale(
"augment_line_strings", {"y": 1.0, "x": 1.75}, self.lsoi,
self.lsoi_scaled(1.0, 1.75), True)
def test_bounding_boxes_scale_zoom_in_only_x_axis(self):
self._test_cba_scale(
"augment_bounding_boxes", {"y": 1.0, "x": 1.75}, self.bbsoi,
self.bbsoi_scaled(1.0, 1.75), False)
def test_bounding_boxes_scale_zoom_in_only_x_axis__deterministic(self):
self._test_cba_scale(
"augment_bounding_boxes", {"y": 1.0, "x": 1.75}, self.bbsoi,
self.bbsoi_scaled(1.0, 1.75), True)
# ---------------------
# scale: zoom in only on y axis
# ---------------------
def test_image_scale_zoom_in_only_y_axis(self):
aug = iaa.Affine(scale={"x": 1.0, "y": 1.75},
translate_px=0, rotate=0, shear=0)
observed = aug.augment_images(self.images)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
def test_image_scale_zoom_in_only_y_axis__deterministic(self):
aug = iaa.Affine(scale={"x": 1.0, "y": 1.75},
translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images(self.images)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
def test_image_scale_zoom_in_only_y_axis__list(self):
aug = iaa.Affine(scale={"x": 1.0, "y": 1.75},
translate_px=0, rotate=0, shear=0)
observed = aug.augment_images([self.base_img])
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
def test_image_scale_zoom_in_only_y_axis__deterministic_and_list(self):
aug = iaa.Affine(scale={"x": 1.0, "y": 1.75},
translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images([self.base_img])
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
def test_keypoints_scale_zoom_in_only_y_axis(self):
self._test_cba_scale(
"augment_keypoints", {"y": 1.75, "x": 1.0}, self.kpsoi,
self.kpsoi_scaled(1.75, 1.0), False)
def test_keypoints_scale_zoom_in_only_y_axis__deterministic(self):
self._test_cba_scale(
"augment_keypoints", {"y": 1.75, "x": 1.0}, self.kpsoi,
self.kpsoi_scaled(1.75, 1.0), True)
def test_polygons_scale_zoom_in_only_y_axis(self):
self._test_cba_scale(
"augment_polygons", {"y": 1.75, "x": 1.0}, self.psoi,
self.psoi_scaled(1.75, 1.0), False)
def test_polygons_scale_zoom_in_only_y_axis__deterministic(self):
self._test_cba_scale(
"augment_polygons", {"y": 1.75, "x": 1.0}, self.psoi,
self.psoi_scaled(1.75, 1.0), True)
def test_line_strings_scale_zoom_in_only_y_axis(self):
self._test_cba_scale(
"augment_polygons", {"y": 1.75, "x": 1.0}, self.psoi,
self.psoi_scaled(1.75, 1.0), False)
def test_line_strings_scale_zoom_in_only_y_axis__deterministic(self):
self._test_cba_scale(
"augment_line_strings", {"y": 1.75, "x": 1.0}, self.lsoi,
self.lsoi_scaled(1.75, 1.0), True)
def test_bounding_boxes_scale_zoom_in_only_y_axis(self):
self._test_cba_scale(
"augment_bounding_boxes", {"y": 1.75, "x": 1.0}, self.bbsoi,
self.bbsoi_scaled(1.75, 1.0), False)
def test_bounding_boxes_scale_zoom_in_only_y_axis__deterministic(self):
self._test_cba_scale(
"augment_bounding_boxes", {"y": 1.75, "x": 1.0}, self.bbsoi,
self.bbsoi_scaled(1.75, 1.0), True)
# ---------------------
# scale: zoom out
# ---------------------
# these tests use a 4x4 area of all 255, which is zoomed out to a 4x4 area
# in which the center 2x2 area is 255
# zoom in should probably be adapted to this style
# no separate tests here for x/y axis, should work fine if zoom in works
# with that
@property
def scale_zoom_out_base_img(self):
return np.ones((4, 4, 1), dtype=np.uint8) * 255
@property
def scale_zoom_out_images(self):
return np.array([self.scale_zoom_out_base_img])
@property
def scale_zoom_out_outer_pixels(self):
outer_pixels = ([], [])
for y in sm.xrange(4):
xs = sm.xrange(4) if y in [0, 3] else [0, 3]
for x in xs:
outer_pixels[0].append(y)
outer_pixels[1].append(x)
return outer_pixels
@property
def scale_zoom_out_inner_pixels(self):
return [1, 1, 2, 2], [1, 2, 1, 2]
@property
def scale_zoom_out_kpsoi(self):
kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=3, y=0),
ia.Keypoint(x=0, y=3), ia.Keypoint(x=3, y=3)]
return [ia.KeypointsOnImage(kps,
shape=self.scale_zoom_out_base_img.shape)]
@property
def scale_zoom_out_kpsoi_aug(self):
kps_aug = [ia.Keypoint(x=0.765, y=0.765),
ia.Keypoint(x=2.235, y=0.765),
ia.Keypoint(x=0.765, y=2.235),
ia.Keypoint(x=2.235, y=2.235)]
return [ia.KeypointsOnImage(kps_aug,
shape=self.scale_zoom_out_base_img.shape)]
def test_image_scale_zoom_out(self):
aug = iaa.Affine(scale=0.49, translate_px=0, rotate=0, shear=0)
observed = aug.augment_images(self.scale_zoom_out_images)
outer_pixels = self.scale_zoom_out_outer_pixels
inner_pixels = self.scale_zoom_out_inner_pixels
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
def test_image_scale_zoom_out__deterministic(self):
aug = iaa.Affine(scale=0.49, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images(self.scale_zoom_out_images)
outer_pixels = self.scale_zoom_out_outer_pixels
inner_pixels = self.scale_zoom_out_inner_pixels
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
def test_image_scale_zoom_out__list(self):
aug = iaa.Affine(scale=0.49, translate_px=0, rotate=0, shear=0)
observed = aug.augment_images([self.scale_zoom_out_base_img])
outer_pixels = self.scale_zoom_out_outer_pixels
inner_pixels = self.scale_zoom_out_inner_pixels
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
def test_image_scale_zoom_out__list_and_deterministic(self):
aug = iaa.Affine(scale=0.49, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images([self.scale_zoom_out_base_img])
outer_pixels = self.scale_zoom_out_outer_pixels
inner_pixels = self.scale_zoom_out_inner_pixels
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
def test_keypoints_scale_zoom_out(self):
self._test_cba_scale(
"augment_keypoints", 0.49, self.kpsoi,
self.kpsoi_scaled(0.49, 0.49), False)
def test_keypoints_scale_zoom_out__deterministic(self):
self._test_cba_scale(
"augment_keypoints", 0.49, self.kpsoi,
self.kpsoi_scaled(0.49, 0.49), True)
def test_polygons_scale_zoom_out(self):
self._test_cba_scale(
"augment_polygons", 0.49, self.psoi,
self.psoi_scaled(0.49, 0.49), False)
def test_polygons_scale_zoom_out__deterministic(self):
self._test_cba_scale(
"augment_polygons", 0.49, self.psoi,
self.psoi_scaled(0.49, 0.49), True)
def test_line_strings_scale_zoom_out(self):
self._test_cba_scale(
"augment_line_strings", 0.49, self.lsoi,
self.lsoi_scaled(0.49, 0.49), False)
def test_line_strings_scale_zoom_out__deterministic(self):
self._test_cba_scale(
"augment_line_strings", 0.49, self.lsoi,
self.lsoi_scaled(0.49, 0.49), True)
def test_bounding_boxes_scale_zoom_out(self):
self._test_cba_scale(
"augment_bounding_boxes", 0.49, self.bbsoi,
self.bbsoi_scaled(0.49, 0.49), False)
def test_bounding_boxes_scale_zoom_out__deterministic(self):
self._test_cba_scale(
"augment_bounding_boxes", 0.49, self.bbsoi,
self.bbsoi_scaled(0.49, 0.49), True)
# ---------------------
# scale: x and y axis are both tuples
# ---------------------
def test_image_x_and_y_axis_are_tuples(self):
aug = iaa.Affine(scale={"x": (0.5, 1.5), "y": (0.5, 1.5)},
translate_px=0, rotate=0, shear=0)
image = np.array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 2, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]], dtype=np.uint8) * 100
image = image[:, :, np.newaxis]
images = np.array([image])
last_aug = None
nb_changed_aug = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
if i == 0:
last_aug = observed_aug
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
last_aug = observed_aug
assert nb_changed_aug >= int(nb_iterations * 0.8)
def test_image_x_and_y_axis_are_tuples__deterministic(self):
aug = iaa.Affine(scale={"x": (0.5, 1.5), "y": (0.5, 1.5)},
translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 2, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]], dtype=np.uint8) * 100
image = image[:, :, np.newaxis]
images = np.array([image])
last_aug_det = None
nb_changed_aug_det = 0
nb_iterations = 10
for i in sm.xrange(nb_iterations):
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug_det = observed_aug_det
assert nb_changed_aug_det == 0
# ------------
# alignment
# TODO add alignment tests for: BBs, Polys, LS
# ------------
def test_keypoint_alignment(self):
image = np.zeros((100, 100), dtype=np.uint8)
image[40-1:40+2, 40-1:40+2] = 255
image[40-1:40+2, 60-1:60+2] = 255
kps = [ia.Keypoint(x=40, y=40), ia.Keypoint(x=60, y=40)]
kpsoi = ia.KeypointsOnImage(kps, shape=image.shape)
images = [image, image, image]
kpsois = [kpsoi.deepcopy(),
ia.KeypointsOnImage([], shape=image.shape),
kpsoi.deepcopy()]
aug = iaa.Affine(scale=[0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5,
1.6, 1.7],
order=0)
for iter in sm.xrange(40):
images_aug, kpsois_aug = aug(images=images, keypoints=kpsois)
assert kpsois_aug[1].empty
for i in [0, 2]:
image_aug = images_aug[i]
kpsoi_aug = kpsois_aug[i]
for kp in kpsoi_aug.keypoints:
value = image_aug[int(kp.y), int(kp.x)]
assert value > 200
# ------------
# make sure that polygons stay valid upon extreme scaling
# ------------
def test_polygons_stay_valid_when_using_extreme_scalings(self):
scales = [1e-4, 1e-2, 1e2, 1e4]
backends = ["auto", "cv2", "skimage"]
orders = [0, 1, 3]
gen = itertools.product(scales, backends, orders)
for scale, backend, order in gen:
with self.subTest(scale=scale, backend=backend, order=order):
aug = iaa.Affine(scale=scale, order=order)
psoi = ia.PolygonsOnImage([
ia.Polygon([(0, 0), (10, 0), (5, 5)])],
shape=(10, 10))
psoi_aug = aug.augment_polygons(psoi)
poly = psoi_aug.polygons[0]
ext = poly.exterior
assert poly.is_valid
assert ext[0][0] < ext[2][0] < ext[1][0]
assert ext[0][1] < ext[2][1]
assert np.allclose(ext[0][1], ext[1][1])
class TestAffine_translate(unittest.TestCase):
def setUp(self):
reseed()
@property
def image(self):
return np.uint8([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
])[:, :, np.newaxis]
@property
def image_1px_right(self):
return np.uint8([
[0, 0, 0],
[0, 0, 1],
[0, 0, 0]
])[:, :, np.newaxis]
@property
def image_1px_bottom(self):
return np.uint8([
[0, 0, 0],
[0, 0, 0],
[0, 1, 0]
])[:, :, np.newaxis]
@property
def images(self):
return np.array([self.image])
@property
def images_1px_right(self):
return np.array([self.image_1px_right])
@property
def images_1px_bottom(self):
return np.array([self.image_1px_bottom])
@property
def kpsoi(self):
kps = [ia.Keypoint(x=1, y=1)]
return [ia.KeypointsOnImage(kps, shape=self.image.shape)]
@property
def kpsoi_1px_right(self):
kps = [ia.Keypoint(x=2, y=1)]
return [ia.KeypointsOnImage(kps, shape=self.image.shape)]
@property
def kpsoi_1px_bottom(self):
kps = [ia.Keypoint(x=1, y=2)]
return [ia.KeypointsOnImage(kps, shape=self.image.shape)]
@property
def psoi(self):
polys = [ia.Polygon([(0, 0), (2, 0), (2, 2)])]
return [ia.PolygonsOnImage(polys, shape=self.image.shape)]
@property
def psoi_1px_right(self):
polys = [ia.Polygon([(0+1, 0), (2+1, 0), (2+1, 2)])]
return [ia.PolygonsOnImage(polys, shape=self.image.shape)]
@property
def psoi_1px_bottom(self):
polys = [ia.Polygon([(0, 0+1), (2, 0+1), (2, 2+1)])]
return [ia.PolygonsOnImage(polys, shape=self.image.shape)]
@property
def lsoi(self):
ls = [ia.LineString([(0, 0), (2, 0), (2, 2)])]
return [ia.LineStringsOnImage(ls, shape=self.image.shape)]
@property
def lsoi_1px_right(self):
ls = [ia.LineString([(0+1, 0), (2+1, 0), (2+1, 2)])]
return [ia.LineStringsOnImage(ls, shape=self.image.shape)]
@property
def lsoi_1px_bottom(self):
ls = [ia.LineString([(0, 0+1), (2, 0+1), (2, 2+1)])]
return [ia.LineStringsOnImage(ls, shape=self.image.shape)]
@property
def bbsoi(self):
bbs = [ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)]
return [ia.BoundingBoxesOnImage(bbs, shape=self.image.shape)]
@property
def bbsoi_1px_right(self):
bbs = [ia.BoundingBox(x1=0+1, y1=1, x2=2+1, y2=3)]
return [ia.BoundingBoxesOnImage(bbs, shape=self.image.shape)]
@property
def bbsoi_1px_bottom(self):
bbs = [ia.BoundingBox(x1=0, y1=1+1, x2=2, y2=3+1)]
return [ia.BoundingBoxesOnImage(bbs, shape=self.image.shape)]
# ---------------------
# translate: move one pixel to the right
# ---------------------
def test_image_translate_1px_right(self):
# move one pixel to the right
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0,
shear=0)
observed = aug.augment_images(self.images)
assert np.array_equal(observed, self.images_1px_right)
def test_image_translate_1px_right__deterministic(self):
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0,
shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images(self.images)
assert np.array_equal(observed, self.images_1px_right)
def test_image_translate_1px_right__list(self):
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0,
shear=0)
observed = aug.augment_images([self.image])
assert array_equal_lists(observed, [self.image_1px_right])
def test_image_translate_1px_right__list_and_deterministic(self):
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0,
shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images([self.image])
assert array_equal_lists(observed, [self.image_1px_right])
def test_keypoints_translate_1px_right(self):
self._test_cba_translate_px(
"augment_keypoints", {"x": 1, "y": 0},
self.kpsoi, self.kpsoi_1px_right, False)
def test_keypoints_translate_1px_right__deterministic(self):
self._test_cba_translate_px(
"augment_keypoints", {"x": 1, "y": 0},
self.kpsoi, self.kpsoi_1px_right, True)
def test_polygons_translate_1px_right(self):
self._test_cba_translate_px(
"augment_polygons", {"x": 1, "y": 0},
self.psoi, self.psoi_1px_right, False)
def test_polygons_translate_1px_right__deterministic(self):
self._test_cba_translate_px(
"augment_polygons", {"x": 1, "y": 0},
self.psoi, self.psoi_1px_right, True)
def test_line_strings_translate_1px_right(self):
self._test_cba_translate_px(
"augment_line_strings", {"x": 1, "y": 0},
self.lsoi, self.lsoi_1px_right, False)
def test_line_strings_translate_1px_right__deterministic(self):
self._test_cba_translate_px(
"augment_line_strings", {"x": 1, "y": 0},
self.lsoi, self.lsoi_1px_right, True)
def test_bounding_boxes_translate_1px_right(self):
self._test_cba_translate_px(
"augment_bounding_boxes", {"x": 1, "y": 0},
self.bbsoi, self.bbsoi_1px_right, False)
def test_bounding_boxes_translate_1px_right__deterministic(self):
self._test_cba_translate_px(
"augment_bounding_boxes", {"x": 1, "y": 0},
self.bbsoi, self.bbsoi_1px_right, True)
@classmethod
def _test_cba_translate_px(cls, augf_name, px, cbaoi, cbaoi_translated,
deterministic):
aug = iaa.Affine(scale=1.0, translate_px=px, rotate=0, shear=0)
if deterministic:
aug = aug.to_deterministic()
observed = getattr(aug, augf_name)(cbaoi)
assert_cbaois_equal(observed, cbaoi_translated)
def test_image_translate_1px_right_skimage(self):
# move one pixel to the right
# with backend = skimage
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0,
shear=0, backend="skimage")
observed = aug.augment_images(self.images)
assert np.array_equal(observed, self.images_1px_right)
def test_image_translate_1px_right_skimage_order_all(self):
# move one pixel to the right
# with backend = skimage, order=ALL
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0,
shear=0, backend="skimage", order=ia.ALL)
observed = aug.augment_images(self.images)
assert np.array_equal(observed, self.images_1px_right)
def test_image_translate_1px_right_skimage_order_is_list(self):
# move one pixel to the right
# with backend = skimage, order=list
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0,
shear=0, backend="skimage", order=[0, 1, 3])
observed = aug.augment_images(self.images)
assert np.array_equal(observed, self.images_1px_right)
def test_image_translate_1px_right_cv2_order_is_list(self):
# move one pixel to the right
# with backend = cv2, order=list
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0,
shear=0, backend="cv2", order=[0, 1, 3])
observed = aug.augment_images(self.images)
assert np.array_equal(observed, self.images_1px_right)
def test_image_translate_1px_right_cv2_order_is_stoch_param(self):
# move one pixel to the right
# with backend = cv2, order=StochasticParameter
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0,
shear=0, backend="cv2", order=iap.Choice([0, 1, 3]))
observed = aug.augment_images(self.images)
assert np.array_equal(observed, self.images_1px_right)
# ---------------------
# translate: move one pixel to the bottom
# ---------------------
def test_image_translate_1px_bottom(self):
aug = iaa.Affine(scale=1.0, translate_px={"x": 0, "y": 1}, rotate=0,
shear=0)
observed = aug.augment_images(self.images)
assert np.array_equal(observed, self.images_1px_bottom)
def test_image_translate_1px_bottom__deterministic(self):
aug = iaa.Affine(scale=1.0, translate_px={"x": 0, "y": 1}, rotate=0,
shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images(self.images)
assert np.array_equal(observed, self.images_1px_bottom)
def test_image_translate_1px_bottom__list(self):
aug = iaa.Affine(scale=1.0, translate_px={"x": 0, "y": 1}, rotate=0,
shear=0)
observed = aug.augment_images([self.image])
assert array_equal_lists(observed, [self.image_1px_bottom])
def test_image_translate_1px_bottom__list_and_deterministic(self):
aug = iaa.Affine(scale=1.0, translate_px={"x": 0, "y": 1}, rotate=0,
shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images([self.image])
assert array_equal_lists(observed, [self.image_1px_bottom])
def test_keypoints_translate_1px_bottom(self):
self._test_cba_translate_px(
"augment_keypoints", {"x": 0, "y": 1},
self.kpsoi, self.kpsoi_1px_bottom, False)
def test_keypoints_translate_1px_bottom__deterministic(self):
self._test_cba_translate_px(
"augment_keypoints", {"x": 0, "y": 1},
self.kpsoi, self.kpsoi_1px_bottom, True)
def test_polygons_translate_1px_bottom(self):
self._test_cba_translate_px(
"augment_polygons", {"x": 0, "y": 1},
self.psoi, self.psoi_1px_bottom, False)
def test_polygons_translate_1px_bottom__deterministic(self):
self._test_cba_translate_px(
"augment_polygons", {"x": 0, "y": 1},
self.psoi, self.psoi_1px_bottom, True)
def test_line_strings_translate_1px_bottom(self):
self._test_cba_translate_px(
"augment_line_strings", {"x": 0, "y": 1},
self.lsoi, self.lsoi_1px_bottom, False)
def test_line_strings_translate_1px_bottom__deterministic(self):
self._test_cba_translate_px(
"augment_line_strings", {"x": 0, "y": 1},
self.lsoi, self.lsoi_1px_bottom, True)
def test_bounding_boxes_translate_1px_bottom(self):
self._test_cba_translate_px(
"augment_bounding_boxes", {"x": 0, "y": 1},
self.bbsoi, self.bbsoi_1px_bottom, False)
def test_bounding_boxes_translate_1px_bottom__deterministic(self):
self._test_cba_translate_px(
"augment_bounding_boxes", {"x": 0, "y": 1},
self.bbsoi, self.bbsoi_1px_bottom, True)
# ---------------------
# translate: fraction of the image size (towards the right)
# ---------------------
def test_image_translate_33percent_right(self):
aug = iaa.Affine(scale=1.0, translate_percent={"x": 0.3333, "y": 0},
rotate=0, shear=0)
observed = aug.augment_images(self.images)
assert np.array_equal(observed, self.images_1px_right)
def test_image_translate_33percent_right__deterministic(self):
aug = iaa.Affine(scale=1.0, translate_percent={"x": 0.3333, "y": 0},
rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images(self.images)
assert np.array_equal(observed, self.images_1px_right)
def test_image_translate_33percent_right__list(self):
aug = iaa.Affine(scale=1.0, translate_percent={"x": 0.3333, "y": 0},
rotate=0, shear=0)
observed = aug.augment_images([self.image])
assert array_equal_lists(observed, [self.image_1px_right])
def test_image_translate_33percent_right__list_and_deterministic(self):
aug = iaa.Affine(scale=1.0, translate_percent={"x": 0.3333, "y": 0},
rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images([self.image])
assert array_equal_lists(observed, [self.image_1px_right])
def test_keypoints_translate_33percent_right(self):
self._test_cba_translate_percent(
"augment_keypoints", {"x": 0.3333, "y": 0},
self.kpsoi, self.kpsoi_1px_right, False)
def test_keypoints_translate_33percent_right__deterministic(self):
self._test_cba_translate_percent(
"augment_keypoints", {"x": 0.3333, "y": 0},
self.kpsoi, self.kpsoi_1px_right, True)
def test_polygons_translate_33percent_right(self):
self._test_cba_translate_percent(
"augment_polygons", {"x": 0.3333, "y": 0},
self.psoi, self.psoi_1px_right, False)
def test_polygons_translate_33percent_right__deterministic(self):
self._test_cba_translate_percent(
"augment_polygons", {"x": 0.3333, "y": 0},
self.psoi, self.psoi_1px_right, True)
def test_line_strings_translate_33percent_right(self):
self._test_cba_translate_percent(
"augment_line_strings", {"x": 0.3333, "y": 0},
self.lsoi, self.lsoi_1px_right, False)
def test_line_strings_translate_33percent_right__deterministic(self):
self._test_cba_translate_percent(
"augment_line_strings", {"x": 0.3333, "y": 0},
self.lsoi, self.lsoi_1px_right, True)
def test_bounding_boxes_translate_33percent_right(self):
self._test_cba_translate_percent(
"augment_bounding_boxes", {"x": 0.3333, "y": 0},
self.bbsoi, self.bbsoi_1px_right, False)
def test_bounding_boxes_translate_33percent_right__deterministic(self):
self._test_cba_translate_percent(
"augment_bounding_boxes", {"x": 0.3333, "y": 0},
self.bbsoi, self.bbsoi_1px_right, True)
def test_keypoints_with_continuous_param_results_in_absolute_shift(self):
# This test ensures that t ~ uniform(a, b) results in a translation
# by t pixels and not t%
# see issue #505
# use iap.Uniform() here to ensure that is really a float value that
# is sampled and not accidentally DisceteUniform
aug = iaa.Affine(translate_px=iap.Uniform(10, 20))
kps = [ia.Keypoint(x=10, y=10)]
kpsoi = ia.KeypointsOnImage(kps, shape=(1000, 1000))
for _ in np.arange(5):
kpsoi_aug = aug.augment_keypoints(kpsoi)
kp_aug = kpsoi_aug.keypoints[0]
assert 10+10 <= kp_aug.x <= 10+20
assert 10+10 <= kp_aug.y <= 10+20
@classmethod
def _test_cba_translate_percent(cls, augf_name, percent, cbaoi,
cbaoi_translated, deterministic):
aug = iaa.Affine(scale=1.0, translate_percent=percent, rotate=0,
shear=0)
if deterministic:
aug = aug.to_deterministic()
observed = getattr(aug, augf_name)(cbaoi)
assert_cbaois_equal(observed, cbaoi_translated, max_distance=1e-3)
# ---------------------
# translate: fraction of the image size (towards the bottom)
# ---------------------
def test_image_translate_33percent_bottom(self):
# move 33% (one pixel) to the bottom
aug = iaa.Affine(scale=1.0, translate_percent={"x": 0, "y": 0.3333},
rotate=0, shear=0)
observed = aug.augment_images(self.images)
assert np.array_equal(observed, self.images_1px_bottom)
def test_image_translate_33percent_bottom__deterministic(self):
aug = iaa.Affine(scale=1.0, translate_percent={"x": 0, "y": 0.3333},
rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images(self.images)
assert np.array_equal(observed, self.images_1px_bottom)
def test_image_translate_33percent_bottom__list(self):
aug = iaa.Affine(scale=1.0, translate_percent={"x": 0, "y": 0.3333},
rotate=0, shear=0)
observed = aug.augment_images([self.image])
assert array_equal_lists(observed, [self.image_1px_bottom])
def test_image_translate_33percent_bottom__list_and_deterministic(self):
aug = iaa.Affine(scale=1.0, translate_percent={"x": 0, "y": 0.3333},
rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images([self.image])
assert array_equal_lists(observed, [self.image_1px_bottom])
def test_keypoints_translate_33percent_bottom(self):
self._test_cba_translate_percent(
"augment_keypoints", {"x": 0, "y": 0.3333},
self.kpsoi, self.kpsoi_1px_bottom, False)
def test_keypoints_translate_33percent_bottom__deterministic(self):
self._test_cba_translate_percent(
"augment_keypoints", {"x": 0, "y": 0.3333},
self.kpsoi, self.kpsoi_1px_bottom, True)
def test_polygons_translate_33percent_bottom(self):
self._test_cba_translate_percent(
"augment_polygons", {"x": 0, "y": 0.3333},
self.psoi, self.psoi_1px_bottom, False)
def test_polygons_translate_33percent_bottom__deterministic(self):
self._test_cba_translate_percent(
"augment_polygons", {"x": 0, "y": 0.3333},
self.psoi, self.psoi_1px_bottom, True)
def test_line_strings_translate_33percent_bottom(self):
self._test_cba_translate_percent(
"augment_line_strings", {"x": 0, "y": 0.3333},
self.lsoi, self.lsoi_1px_bottom, False)
def test_line_strings_translate_33percent_bottom__deterministic(self):
self._test_cba_translate_percent(
"augment_line_strings", {"x": 0, "y": 0.3333},
self.lsoi, self.lsoi_1px_bottom, True)
def test_bounding_boxes_translate_33percent_bottom(self):
self._test_cba_translate_percent(
"augment_bounding_boxes", {"x": 0, "y": 0.3333},
self.bbsoi, self.bbsoi_1px_bottom, False)
def test_bounding_boxes_translate_33percent_bottom__deterministic(self):
self._test_cba_translate_percent(
"augment_bounding_boxes", {"x": 0, "y": 0.3333},
self.bbsoi, self.bbsoi_1px_bottom, True)
# ---------------------
# translate: axiswise uniform distributions
# ---------------------
def test_image_translate_by_axiswise_uniform_distributions(self):
# 0-1px to left/right and 0-1px to top/bottom
aug = iaa.Affine(scale=1.0, translate_px={"x": (-1, 1), "y": (-1, 1)},
rotate=0, shear=0)
last_aug = None
nb_changed_aug = 0
nb_iterations = 1000
centers_aug = self.image.astype(np.int32) * 0
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(self.images)
if i == 0:
last_aug = observed_aug
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
last_aug = observed_aug
assert len(observed_aug[0].nonzero()[0]) == 1
centers_aug += (observed_aug[0] > 0)
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert (centers_aug > int(nb_iterations * (1/9 * 0.6))).all()
assert (centers_aug < int(nb_iterations * (1/9 * 1.4))).all()
def test_image_translate_by_axiswise_uniform_distributions__det(self):
# 0-1px to left/right and 0-1px to top/bottom
aug = iaa.Affine(scale=1.0, translate_px={"x": (-1, 1), "y": (-1, 1)},
rotate=0, shear=0)
aug_det = aug.to_deterministic()
last_aug_det = None
nb_changed_aug_det = 0
nb_iterations = 10
centers_aug_det = self.image.astype(np.int32) * 0
for i in sm.xrange(nb_iterations):
observed_aug_det = aug_det.augment_images(self.images)
if i == 0:
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug_det = observed_aug_det
assert len(observed_aug_det[0].nonzero()[0]) == 1
centers_aug_det += (observed_aug_det[0] > 0)
assert nb_changed_aug_det == 0
# ---------------------
# translate heatmaps
# ---------------------
@property
def heatmaps(self):
return ia.HeatmapsOnImage(
np.float32([
[0.0, 0.5, 0.75],
[0.0, 0.5, 0.75],
[0.75, 0.75, 0.75],
]),
shape=(3, 3, 3)
)
@property
def heatmaps_1px_right(self):
return ia.HeatmapsOnImage(
np.float32([
[0.0, 0.0, 0.5],
[0.0, 0.0, 0.5],
[0.0, 0.75, 0.75],
]),
shape=(3, 3, 3)
)
def test_heatmaps_translate_1px_right(self):
aug = iaa.Affine(translate_px={"x": 1})
observed = aug.augment_heatmaps([self.heatmaps])[0]
_assert_same_shape(observed, self.heatmaps)
_assert_same_min_max(observed, self.heatmaps)
assert np.array_equal(observed.get_arr(),
self.heatmaps_1px_right.get_arr())
def test_heatmaps_translate_1px_right_should_ignore_cval(self):
# should still use mode=constant cval=0 even when other settings chosen
aug = iaa.Affine(translate_px={"x": 1}, cval=255)
observed = aug.augment_heatmaps([self.heatmaps])[0]
_assert_same_shape(observed, self.heatmaps)
_assert_same_min_max(observed, self.heatmaps)
assert np.array_equal(observed.get_arr(),
self.heatmaps_1px_right.get_arr())
def test_heatmaps_translate_1px_right_should_ignore_mode(self):
aug = iaa.Affine(translate_px={"x": 1}, mode="edge", cval=255)
observed = aug.augment_heatmaps([self.heatmaps])[0]
_assert_same_shape(observed, self.heatmaps)
_assert_same_min_max(observed, self.heatmaps)
assert np.array_equal(observed.get_arr(),
self.heatmaps_1px_right.get_arr())
# ---------------------
# translate segmaps
# ---------------------
@property
def segmaps(self):
return SegmentationMapsOnImage(
np.int32([
[0, 1, 2],
[0, 1, 2],
[2, 2, 2],
]),
shape=(3, 3, 3)
)
@property
def segmaps_1px_right(self):
return SegmentationMapsOnImage(
np.int32([
[0, 0, 1],
[0, 0, 1],
[0, 2, 2],
]),
shape=(3, 3, 3)
)
def test_segmaps_translate_1px_right(self):
aug = iaa.Affine(translate_px={"x": 1})
observed = aug.augment_segmentation_maps([self.segmaps])[0]
_assert_same_shape(observed, self.segmaps)
assert np.array_equal(observed.get_arr(),
self.segmaps_1px_right.get_arr())
def test_segmaps_translate_1px_right_should_ignore_cval(self):
# should still use mode=constant cval=0 even when other settings chosen
aug = iaa.Affine(translate_px={"x": 1}, cval=255)
observed = aug.augment_segmentation_maps([self.segmaps])[0]
_assert_same_shape(observed, self.segmaps)
assert np.array_equal(observed.get_arr(),
self.segmaps_1px_right.get_arr())
def test_segmaps_translate_1px_right_should_ignore_mode(self):
aug = iaa.Affine(translate_px={"x": 1}, mode="edge", cval=255)
observed = aug.augment_segmentation_maps([self.segmaps])[0]
_assert_same_shape(observed, self.segmaps)
assert np.array_equal(observed.get_arr(),
self.segmaps_1px_right.get_arr())
class TestAffine_rotate(unittest.TestCase):
def setUp(self):
reseed()
@property
def image(self):
return np.uint8([
[0, 0, 0],
[255, 255, 255],
[0, 0, 0]
])[:, :, np.newaxis]
@property
def image_rot90(self):
return np.uint8([
[0, 255, 0],
[0, 255, 0],
[0, 255, 0]
])[:, :, np.newaxis]
@property
def images(self):
return np.array([self.image])
@property
def images_rot90(self):
return np.array([self.image_rot90])
@property
def kpsoi(self):
kps = [ia.Keypoint(x=0, y=1), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=1)]
return [ia.KeypointsOnImage(kps, shape=self.image.shape)]
@property
def kpsoi_rot90(self):
kps = [ia.Keypoint(x=3-1, y=0), ia.Keypoint(x=3-1, y=1),
ia.Keypoint(x=3-1, y=2)]
return [ia.KeypointsOnImage(kps, shape=self.image_rot90.shape)]
@property
def psoi(self):
polys = [ia.Polygon([(0, 0), (3, 0), (3, 3)])]
return [ia.PolygonsOnImage(polys, shape=self.image.shape)]
@property
def psoi_rot90(self):
polys = [ia.Polygon([(3-0, 0), (3-0, 3), (3-3, 3)])]
return [ia.PolygonsOnImage(polys, shape=self.image_rot90.shape)]
@property
def lsoi(self):
ls = [ia.LineString([(0, 0), (3, 0), (3, 3)])]
return [ia.LineStringsOnImage(ls, shape=self.image.shape)]
@property
def lsoi_rot90(self):
ls = [ia.LineString([(3-0, 0), (3-0, 3), (3-3, 3)])]
return [ia.LineStringsOnImage(ls, shape=self.image_rot90.shape)]
@property
def bbsoi(self):
bbs = [ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)]
return [ia.BoundingBoxesOnImage(bbs, shape=self.image.shape)]
@property
def bbsoi_rot90(self):
bbs = [ia.BoundingBox(x1=0, y1=0, x2=2, y2=2)]
return [ia.BoundingBoxesOnImage(bbs, shape=self.image_rot90.shape)]
def test_image_rot90(self):
# rotate by 90 degrees
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=90, shear=0)
observed = aug.augment_images(self.images)
observed[observed >= 100] = 255
observed[observed < 100] = 0
assert np.array_equal(observed, self.images_rot90)
def test_image_rot90__deterministic(self):
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=90, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images(self.images)
observed[observed >= 100] = 255
observed[observed < 100] = 0
assert np.array_equal(observed, self.images_rot90)
def test_image_rot90__list(self):
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=90, shear=0)
observed = aug.augment_images([self.image])
observed[0][observed[0] >= 100] = 255
observed[0][observed[0] < 100] = 0
assert array_equal_lists(observed, [self.image_rot90])
def test_image_rot90__list_and_deterministic(self):
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=90, shear=0)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images([self.image])
observed[0][observed[0] >= 100] = 255
observed[0][observed[0] < 100] = 0
assert array_equal_lists(observed, [self.image_rot90])
def test_keypoints_rot90(self):
self._test_cba_rotate(
"augment_keypoints", 90, self.kpsoi, self.kpsoi_rot90, False)
def test_keypoints_rot90__deterministic(self):
self._test_cba_rotate(
"augment_keypoints", 90, self.kpsoi, self.kpsoi_rot90, True)
def test_polygons_rot90(self):
self._test_cba_rotate(
"augment_polygons", 90, self.psoi, self.psoi_rot90, False)
def test_polygons_rot90__deterministic(self):
self._test_cba_rotate(
"augment_polygons", 90, self.psoi, self.psoi_rot90, True)
def test_line_strings_rot90(self):
self._test_cba_rotate(
"augment_line_strings", 90, self.lsoi, self.lsoi_rot90, False)
def test_line_strings_rot90__deterministic(self):
self._test_cba_rotate(
"augment_line_strings", 90, self.lsoi, self.lsoi_rot90, True)
def test_bounding_boxes_rot90(self):
self._test_cba_rotate(
"augment_bounding_boxes", 90, self.bbsoi, self.bbsoi_rot90, False)
def test_bounding_boxes_rot90__deterministic(self):
self._test_cba_rotate(
"augment_bounding_boxes", 90, self.bbsoi, self.bbsoi_rot90, True)
@classmethod
def _test_cba_rotate(cls, augf_name, rotate, cbaoi,
cbaoi_rotated, deterministic):
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=rotate,
shear=0)
if deterministic:
aug = aug.to_deterministic()
observed = getattr(aug, augf_name)(cbaoi)
assert_cbaois_equal(observed, cbaoi_rotated)
def test_image_rotate_is_tuple_0_to_364_deg(self):
# random rotation 0-364 degrees
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=(0, 364), shear=0)
last_aug = None
nb_changed_aug = 0
nb_iterations = 1000
pixels_sums_aug = self.image.astype(np.int32) * 0
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(self.images)
if i == 0:
last_aug = observed_aug
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
last_aug = observed_aug
pixels_sums_aug += (observed_aug[0] > 100)
assert nb_changed_aug >= int(nb_iterations * 0.9)
# center pixel, should always be white when rotating line around center
assert pixels_sums_aug[1, 1] > (nb_iterations * 0.98)
assert pixels_sums_aug[1, 1] < (nb_iterations * 1.02)
# outer pixels, should sometimes be white
# the values here had to be set quite tolerant, the middle pixels at
# top/left/bottom/right get more activation than expected
outer_pixels = ([0, 0, 0, 1, 1, 2, 2, 2],
[0, 1, 2, 0, 2, 0, 1, 2])
assert (
pixels_sums_aug[outer_pixels] > int(nb_iterations * (2/8 * 0.4))
).all()
assert (
pixels_sums_aug[outer_pixels] < int(nb_iterations * (2/8 * 2.0))
).all()
def test_image_rotate_is_tuple_0_to_364_deg__deterministic(self):
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=(0, 364), shear=0)
aug_det = aug.to_deterministic()
last_aug_det = None
nb_changed_aug_det = 0
nb_iterations = 10
pixels_sums_aug_det = self.image.astype(np.int32) * 0
for i in sm.xrange(nb_iterations):
observed_aug_det = aug_det.augment_images(self.images)
if i == 0:
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug_det = observed_aug_det
pixels_sums_aug_det += (observed_aug_det[0] > 100)
assert nb_changed_aug_det == 0
# center pixel, should always be white when rotating line around center
assert pixels_sums_aug_det[1, 1] > (nb_iterations * 0.98)
assert pixels_sums_aug_det[1, 1] < (nb_iterations * 1.02)
def test_alignment_between_images_and_heatmaps_for_fixed_rot(self):
# measure alignment between images and heatmaps when rotating
for backend in ["auto", "cv2", "skimage"]:
aug = iaa.Affine(rotate=45, backend=backend)
image = np.zeros((7, 6), dtype=np.uint8)
image[:, 2:3+1] = 255
hm = ia.HeatmapsOnImage(image.astype(np.float32)/255, shape=(7, 6))
img_aug = aug.augment_image(image)
hm_aug = aug.augment_heatmaps([hm])[0]
img_aug_mask = img_aug > 255*0.1
hm_aug_mask = hm_aug.arr_0to1 > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert hm_aug.shape == (7, 6)
assert hm_aug.arr_0to1.shape == (7, 6, 1)
assert (same / img_aug_mask.size) >= 0.95
def test_alignment_between_images_and_smaller_heatmaps_for_fixed_rot(self):
# measure alignment between images and heatmaps when rotating
# here with smaller heatmaps
for backend in ["auto", "cv2", "skimage"]:
aug = iaa.Affine(rotate=45, backend=backend)
image = np.zeros((56, 48), dtype=np.uint8)
image[:, 16:24+1] = 255
hm = ia.HeatmapsOnImage(
ia.imresize_single_image(
image, (28, 24), interpolation="cubic"
).astype(np.float32)/255,
shape=(56, 48)
)
img_aug = aug.augment_image(image)
hm_aug = aug.augment_heatmaps([hm])[0]
img_aug_mask = img_aug > 255*0.1
hm_aug_mask = ia.imresize_single_image(
hm_aug.arr_0to1, img_aug.shape[0:2], interpolation="cubic"
) > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert hm_aug.shape == (56, 48)
assert hm_aug.arr_0to1.shape == (28, 24, 1)
assert (same / img_aug_mask.size) >= 0.9
def test_bounding_boxes_have_expected_shape_after_augmentation(self):
image = np.zeros((100, 100), dtype=np.uint8)
image[20:80, 20:80] = 255
bb = ia.BoundingBox(x1=20, y1=20, x2=80, y2=80)
bbsoi = ia.BoundingBoxesOnImage([bb], shape=image.shape)
for rotate in [10, 20, 40, 80, 120]:
with self.subTest(rotate=rotate):
aug = iaa.Affine(rotate=rotate, order=0)
image_aug, bbsoi_aug = aug(image=image, bounding_boxes=bbsoi)
xx = np.nonzero(np.max(image_aug > 100, axis=0))[0]
yy = np.nonzero(np.max(image_aug > 100, axis=1))[0]
bb_exp_x1 = xx[0]
bb_exp_x2 = xx[-1]
bb_exp_y1 = yy[0]
bb_exp_y2 = yy[-1]
bb_expected = ia.BoundingBox(x1=bb_exp_x1, y1=bb_exp_y1,
x2=bb_exp_x2, y2=bb_exp_y2)
assert bbsoi_aug.bounding_boxes[0].iou(bb_expected) > 0.95
class TestAffine_cval(unittest.TestCase):
@property
def image(self):
return np.ones((3, 3, 1), dtype=np.uint8) * 255
@property
def images(self):
return np.array([self.image])
def test_image_fixed_cval(self):
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=128)
observed = aug.augment_images(self.images)
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
def test_image_fixed_cval__deterministic(self):
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=128)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images(self.images)
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
def test_image_fixed_cval__list(self):
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=128)
observed = aug.augment_images([self.image])
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
def test_image_fixed_cval__list_and_deterministic(self):
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=128)
aug_det = aug.to_deterministic()
observed = aug_det.augment_images([self.image])
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
def test_image_cval_is_tuple(self):
# random cvals
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=(0, 255))
last_aug = None
nb_changed_aug = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(self.images)
if i == 0:
last_aug = observed_aug
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
last_aug = observed_aug
assert nb_changed_aug >= int(nb_iterations * 0.9)
def test_image_cval_is_tuple__deterministic(self):
# random cvals
aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=(0, 255))
aug_det = aug.to_deterministic()
last_aug_det = None
nb_changed_aug_det = 0
nb_iterations = 10
for i in sm.xrange(nb_iterations):
observed_aug_det = aug_det.augment_images(self.images)
if i == 0:
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug_det = observed_aug_det
assert nb_changed_aug_det == 0
class TestAffine_fit_output(unittest.TestCase):
@property
def image(self):
return np.ones((3, 3, 1), dtype=np.uint8) * 255
@property
def images(self):
return np.array([self.image])
@property
def heatmaps(self):
return ia.HeatmapsOnImage(
np.float32([
[0.0, 0.5, 0.75],
[0.0, 0.5, 0.75],
[0.75, 0.75, 0.75],
]),
shape=(3, 3, 3)
)
@property
def kpsoi(self):
kps = [ia.Keypoint(x=0, y=1), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=1)]
return [ia.KeypointsOnImage(kps, shape=self.image.shape)]
def test_image_translate(self):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
aug = iaa.Affine(translate_px=100, fit_output=True,
backend=backend)
observed = aug.augment_images(self.images)
expected = self.images
assert np.array_equal(observed, expected)
def test_keypoints_translate(self):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
aug = iaa.Affine(translate_px=100, fit_output=True,
backend=backend)
observed = aug.augment_keypoints(self.kpsoi)
expected = self.kpsoi
assert keypoints_equal(observed, expected)
def test_heatmaps_translate(self):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
aug = iaa.Affine(translate_px=100, fit_output=True,
backend=backend)
observed = aug.augment_heatmaps([self.heatmaps])[0]
expected = self.heatmaps
assert np.allclose(observed.arr_0to1, expected.arr_0to1)
def test_image_rot45(self):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
aug = iaa.Affine(rotate=45, fit_output=True,
backend=backend)
img = np.zeros((10, 10), dtype=np.uint8)
img[0:2, 0:2] = 255
img[-2:, 0:2] = 255
img[0:2, -2:] = 255
img[-2:, -2:] = 255
img_aug = aug.augment_image(img)
_labels, nb_labels = skimage.morphology.label(
img_aug > 240, return_num=True, connectivity=2)
assert nb_labels == 4
def test_heatmaps_rot45(self):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
aug = iaa.Affine(rotate=45, fit_output=True,
backend=backend)
img = np.zeros((10, 10), dtype=np.uint8)
img[0:2, 0:2] = 255
img[-2:, 0:2] = 255
img[0:2, -2:] = 255
img[-2:, -2:] = 255
hm = ia.HeatmapsOnImage(img.astype(np.float32)/255,
shape=(10, 10))
hm_aug = aug.augment_heatmaps([hm])[0]
_labels, nb_labels = skimage.morphology.label(
hm_aug.arr_0to1 > 240/255, return_num=True, connectivity=2)
assert nb_labels == 4
def test_heatmaps_rot45__heatmaps_smaller_than_image(self):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
aug = iaa.Affine(rotate=45, fit_output=True,
backend=backend)
img = np.zeros((80, 80), dtype=np.uint8)
img[0:5, 0:5] = 255
img[-5:, 0:5] = 255
img[0:5, -5:] = 255
img[-5:, -5:] = 255
hm = HeatmapsOnImage(
ia.imresize_single_image(
img, (40, 40), interpolation="cubic"
).astype(np.float32)/255,
shape=(80, 80)
)
hm_aug = aug.augment_heatmaps([hm])[0]
# these asserts are deactivated because the image size can
# change under fit_output=True
# assert hm_aug.shape == (80, 80)
# assert hm_aug.arr_0to1.shape == (40, 40, 1)
_labels, nb_labels = skimage.morphology.label(
hm_aug.arr_0to1 > 200/255, return_num=True, connectivity=2)
assert nb_labels == 4
def test_image_heatmap_alignment_random_rots(self):
nb_iterations = 50
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
for _ in sm.xrange(nb_iterations):
aug = iaa.Affine(rotate=(0, 364), fit_output=True,
backend=backend)
img = np.zeros((80, 80), dtype=np.uint8)
img[0:5, 0:5] = 255
img[-5:, 0:5] = 255
img[0:5, -5:] = 255
img[-5:, -5:] = 255
hm = HeatmapsOnImage(
img.astype(np.float32)/255,
shape=(80, 80)
)
img_aug = aug.augment_image(img)
hm_aug = aug.augment_heatmaps([hm])[0]
img_aug_mask = img_aug > 255*0.1
hm_aug_mask = ia.imresize_single_image(
hm_aug.arr_0to1, img_aug.shape[0:2],
interpolation="cubic"
) > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert (same / img_aug_mask.size) >= 0.95
def test_image_heatmap_alignment_random_rots__hms_smaller_than_img(self):
nb_iterations = 50
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
for _ in sm.xrange(nb_iterations):
aug = iaa.Affine(rotate=(0, 364), fit_output=True,
backend=backend)
img = np.zeros((80, 80), dtype=np.uint8)
img[0:5, 0:5] = 255
img[-5:, 0:5] = 255
img[0:5, -5:] = 255
img[-5:, -5:] = 255
hm = HeatmapsOnImage(
ia.imresize_single_image(
img, (40, 40), interpolation="cubic"
).astype(np.float32)/255,
shape=(80, 80)
)
img_aug = aug.augment_image(img)
hm_aug = aug.augment_heatmaps([hm])[0]
img_aug_mask = img_aug > 255*0.1
hm_aug_mask = ia.imresize_single_image(
hm_aug.arr_0to1, img_aug.shape[0:2],
interpolation="cubic"
) > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert (same / img_aug_mask.size) >= 0.95
def test_segmaps_rot45(self):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
aug = iaa.Affine(rotate=45, fit_output=True,
backend=backend)
img = np.zeros((80, 80), dtype=np.uint8)
img[0:5, 0:5] = 255
img[-5:, 0:5] = 255
img[0:5, -5:] = 255
img[-5:, -5:] = 255
segmap = SegmentationMapsOnImage(
(img > 100).astype(np.int32),
shape=(80, 80)
)
segmap_aug = aug.augment_segmentation_maps([segmap])[0]
# these asserts are deactivated because the image size can
# change under fit_output=True
# assert segmap_aug.shape == (80, 80)
# assert segmap_aug.arr_0to1.shape == (40, 40, 1)
_labels, nb_labels = skimage.morphology.label(
segmap_aug.arr > 0, return_num=True, connectivity=2)
assert nb_labels == 4
def test_segmaps_rot45__segmaps_smaller_than_img(self):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
aug = iaa.Affine(rotate=45, fit_output=True,
backend=backend)
img = np.zeros((80, 80), dtype=np.uint8)
img[0:5, 0:5] = 255
img[-5:, 0:5] = 255
img[0:5, -5:] = 255
img[-5:, -5:] = 255
segmap = SegmentationMapsOnImage(
(
ia.imresize_single_image(
img, (40, 40), interpolation="cubic"
) > 100
).astype(np.int32),
shape=(80, 80)
)
segmap_aug = aug.augment_segmentation_maps([segmap])[0]
# these asserts are deactivated because the image size can
# change under fit_output=True
# assert segmap_aug.shape == (80, 80)
# assert segmap_aug.arr_0to1.shape == (40, 40, 1)
_labels, nb_labels = skimage.morphology.label(
segmap_aug.arr > 0, return_num=True, connectivity=2)
assert nb_labels == 4
def test_image_segmap_alignment_random_rots(self):
nb_iterations = 50
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
for _ in sm.xrange(nb_iterations):
aug = iaa.Affine(rotate=(0, 364), fit_output=True,
backend=backend)
img = np.zeros((80, 80), dtype=np.uint8)
img[0:5, 0:5] = 255
img[-5:, 0:5] = 255
img[0:5, -5:] = 255
img[-5:, -5:] = 255
segmap = SegmentationMapsOnImage(
(img > 100).astype(np.int32),
shape=(80, 80)
)
img_aug = aug.augment_image(img)
segmap_aug = aug.augment_segmentation_maps([segmap])[0]
img_aug_mask = img_aug > 100
segmap_aug_mask = ia.imresize_single_image(
segmap_aug.arr,
img_aug.shape[0:2],
interpolation="nearest"
) > 0
same = np.sum(img_aug_mask == segmap_aug_mask[:, :, 0])
assert (same / img_aug_mask.size) >= 0.95
def test_image_segmap_alignment_random_rots__sms_smaller_than_img(self):
nb_iterations = 50
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
for _ in sm.xrange(nb_iterations):
aug = iaa.Affine(rotate=(0, 364), fit_output=True,
backend=backend)
img = np.zeros((80, 80), dtype=np.uint8)
img[0:5, 0:5] = 255
img[-5:, 0:5] = 255
img[0:5, -5:] = 255
img[-5:, -5:] = 255
segmap = SegmentationMapsOnImage(
(
ia.imresize_single_image(
img, (40, 40), interpolation="cubic"
) > 100
).astype(np.int32),
shape=(80, 80)
)
img_aug = aug.augment_image(img)
segmap_aug = aug.augment_segmentation_maps([segmap])[0]
img_aug_mask = img_aug > 100
segmap_aug_mask = ia.imresize_single_image(
segmap_aug.arr,
img_aug.shape[0:2],
interpolation="nearest"
) > 0
same = np.sum(img_aug_mask == segmap_aug_mask[:, :, 0])
assert (same / img_aug_mask.size) >= 0.95
def test_keypoints_rot90_without_fit_output(self):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
aug = iaa.Affine(rotate=90, backend=backend)
kps = ia.KeypointsOnImage([ia.Keypoint(10, 10)],
shape=(100, 200, 3))
kps_aug = aug.augment_keypoints(kps)
assert kps_aug.shape == (100, 200, 3)
assert not np.allclose(
[kps_aug.keypoints[0].x, kps_aug.keypoints[0].y],
[kps.keypoints[0].x, kps.keypoints[0].y],
atol=1e-2, rtol=0)
def test_keypoints_rot90(self):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
aug = iaa.Affine(rotate=90, fit_output=True, backend=backend)
kps = ia.KeypointsOnImage([ia.Keypoint(10, 10)],
shape=(100, 200, 3))
kps_aug = aug.augment_keypoints(kps)
assert kps_aug.shape == (200, 100, 3)
assert not np.allclose(
[kps_aug.keypoints[0].x, kps_aug.keypoints[0].y],
[kps.keypoints[0].x, kps.keypoints[0].y],
atol=1e-2, rtol=0)
def test_empty_keypoints_rot90(self):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
aug = iaa.Affine(rotate=90, fit_output=True, backend=backend)
kps = ia.KeypointsOnImage([], shape=(100, 200, 3))
kps_aug = aug.augment_keypoints(kps)
assert kps_aug.shape == (200, 100, 3)
assert len(kps_aug.keypoints) == 0
def _test_cbaoi_rot90_without_fit_output(self, cbaoi, augf_name):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
# verify that shape in PolygonsOnImages changes
aug = iaa.Affine(rotate=90, backend=backend)
cbaoi_aug = getattr(aug, augf_name)([cbaoi, cbaoi])
assert len(cbaoi_aug) == 2
for cbaoi_aug_i in cbaoi_aug:
if isinstance(cbaoi, (ia.PolygonsOnImage,
ia.LineStringsOnImage)):
assert cbaoi_aug_i.shape == cbaoi.shape
assert not cbaoi_aug_i.items[0].coords_almost_equals(
cbaoi.items[0].coords, max_distance=1e-2)
else:
assert_cbaois_equal(cbaoi_aug_i, cbaoi)
def test_polygons_rot90_without_fit_output(self):
psoi = ia.PolygonsOnImage([
ia.Polygon([(10, 10), (20, 10), (20, 20)])
], shape=(100, 200, 3))
self._test_cbaoi_rot90_without_fit_output(psoi, "augment_polygons")
def test_line_strings_rot90_without_fit_output(self):
lsoi = ia.LineStringsOnImage([
ia.LineString([(10, 10), (20, 10), (20, 20), (10, 10)])
], shape=(100, 200, 3))
self._test_cbaoi_rot90_without_fit_output(lsoi, "augment_line_strings")
def _test_cbaoi_rot90(self, cbaoi, expected, augf_name):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
aug = iaa.Affine(rotate=90, fit_output=True, backend=backend)
cbaoi_aug = getattr(aug, augf_name)([cbaoi, cbaoi])
assert len(cbaoi_aug) == 2
for cbaoi_aug_i in cbaoi_aug:
assert_cbaois_equal(cbaoi_aug_i, expected)
def test_polygons_rot90(self):
psoi = ia.PolygonsOnImage([
ia.Polygon([(10, 10), (20, 10), (20, 20)])
], shape=(100, 200, 3))
expected = ia.PolygonsOnImage([
ia.Polygon([(100-10-1, 10), (100-10-1, 20), (100-20-1, 20)])
], shape=(200, 100, 3))
self._test_cbaoi_rot90(psoi, expected, "augment_polygons")
def test_line_strings_rot90(self):
lsoi = ia.LineStringsOnImage([
ia.LineString([(10, 10), (20, 10), (20, 20), (10, 10)])
], shape=(100, 200, 3))
expected = ia.LineStringsOnImage([
ia.LineString([(100-10-1, 10), (100-10-1, 20), (100-20-1, 20),
(100-10-1, 10)])
], shape=(200, 100, 3))
self._test_cbaoi_rot90(lsoi, expected, "augment_line_strings")
def test_bounding_boxes_rot90(self):
lsoi = ia.BoundingBoxesOnImage([
ia.BoundingBox(x1=10, y1=10, x2=20, y2=20)
], shape=(100, 200, 3))
expected = ia.BoundingBoxesOnImage([
ia.BoundingBox(x1=100-20-1, y1=10, x2=100-10-1, y2=20)
], shape=(200, 100, 3))
self._test_cbaoi_rot90(lsoi, expected, "augment_bounding_boxes")
def _test_empty_cbaoi_rot90(self, cbaoi, expected, augf_name):
for backend in ["auto", "cv2", "skimage"]:
with self.subTest(backend=backend):
aug = iaa.Affine(rotate=90, fit_output=True, backend=backend)
cbaoi_aug = getattr(aug, augf_name)(cbaoi)
assert_cbaois_equal(cbaoi_aug, expected)
def test_empty_polygons_rot90(self):
psoi = ia.PolygonsOnImage([], shape=(100, 200, 3))
expected = ia.PolygonsOnImage([], shape=(200, 100, 3))
self._test_empty_cbaoi_rot90(psoi, expected, "augment_polygons")
def test_empty_line_strings_rot90(self):
lsoi = ia.LineStringsOnImage([], shape=(100, 200, 3))
expected = ia.LineStringsOnImage([], shape=(200, 100, 3))
self._test_empty_cbaoi_rot90(lsoi, expected, "augment_line_strings")
def test_empty_bounding_boxes_rot90(self):
bbsoi = ia.BoundingBoxesOnImage([], shape=(100, 200, 3))
expected = ia.BoundingBoxesOnImage([], shape=(200, 100, 3))
self._test_empty_cbaoi_rot90(bbsoi, expected, "augment_bounding_boxes")
# TODO merge these into TestAffine_rotate since they are rotations?
# or extend to contain other affine params too?
class TestAffine_alignment(unittest.TestCase):
def setUp(self):
reseed()
def test_image_segmap_alignment_with_translate_px(self):
image = np.zeros((80, 100, 3), dtype=np.uint8)
image[40-10:40+10, 50-10:50+10, :] = 255
hm = np.zeros((40, 50, 1), dtype=np.float32)
hm[20-5:20+5, 25-5:25+5, 0] = 1.0
hm = ia.HeatmapsOnImage(hm, shape=image.shape)
# note that if x is an odd value (e.g. 1), the projection is a bit
# less accurate as x=1 projected to a half-sized segmap is x=0.5,
# leading to interpolation effects
xvals = [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, [0, 10, 20]]
for xvals_i in xvals:
with self.subTest(x=xvals_i):
aug = iaa.Affine(translate_px={"x": xvals_i})
iterations = 2 if ia.is_single_number(xvals_i) else 20
for _ in np.arange(iterations):
image_aug, hm_aug = aug(image=image, heatmaps=hm)
hm_aug_arr_rs = ia.imresize_single_image(
hm_aug.get_arr(), (80, 100), interpolation="nearest")
overlap_true = np.sum(
np.logical_and(
(image_aug[..., 0] > 220),
(hm_aug_arr_rs[..., 0] > 0.9)
)
)
p_same_on_zero_cells = np.average(
(image_aug[..., 0] > 220)
== (hm_aug_arr_rs[..., 0] > 0.9))
assert overlap_true > 19*19
assert p_same_on_zero_cells > 0.98
def test_image_segmap_alignment_with_translate_percent(self):
image = np.zeros((80, 100, 3), dtype=np.uint8)
image[40-10:40+10, 50-10:50+10, :] = 255
hm = np.zeros((40, 50, 1), dtype=np.float32)
hm[20-5:20+5, 25-5:25+5, 0] = 1.0
hm = ia.HeatmapsOnImage(hm, shape=image.shape)
# note that if x is an odd value (e.g. 1), the projection is a bit
# less accurate as x=1 projected to a half-sized segmap is x=0.5,
# leading to interpolation effects
width = image.shape[1]
xvals = [0/width, 2/width, 4/width, 6/width, 8/width, 10/width,
12/width, 14/width, 16/width, 18/width, 20/width,
[0/width, 10/width, 20/width]]
for xvals_i in xvals:
with self.subTest(x=xvals_i):
aug = iaa.Affine(translate_percent={"x": xvals_i})
iterations = 2 if ia.is_single_number(xvals_i) else 20
for _ in np.arange(iterations):
image_aug, hm_aug = aug(image=image, heatmaps=hm)
hm_aug_arr_rs = ia.imresize_single_image(
hm_aug.get_arr(), (80, 100), interpolation="nearest")
overlap_true = np.sum(
np.logical_and(
(image_aug[..., 0] > 220),
(hm_aug_arr_rs[..., 0] > 0.9)
)
)
p_same_on_zero_cells = np.average(
(image_aug[..., 0] > 220)
== (hm_aug_arr_rs[..., 0] > 0.9))
assert overlap_true > 19*19
assert p_same_on_zero_cells > 0.98
def test_image_keypoint_alignment(self):
aug = iaa.Affine(rotate=[0, 180], order=0)
img = np.zeros((10, 10), dtype=np.uint8)
img[0:5, 5] = 255
img[2, 4:6] = 255
img_rot = [np.copy(img), np.copy(np.flipud(np.fliplr(img)))]
kpsoi = ia.KeypointsOnImage([ia.Keypoint(x=5, y=2)], shape=img.shape)
kpsoi_rot = [(5, 2), (5, 10-2)]
img_aug_indices = []
kpsois_aug_indices = []
for _ in sm.xrange(40):
aug_det = aug.to_deterministic()
imgs_aug = aug_det.augment_images([img, img])
kpsois_aug = aug_det.augment_keypoints([kpsoi, kpsoi])
assert kpsois_aug[0].shape == img.shape
assert kpsois_aug[1].shape == img.shape
for img_aug in imgs_aug:
if np.array_equal(img_aug, img_rot[0]):
img_aug_indices.append(0)
elif np.array_equal(img_aug, img_rot[1]):
img_aug_indices.append(1)
else:
assert False
for kpsoi_aug in kpsois_aug:
similar_to_rot_0 = np.allclose(
[kpsoi_aug.keypoints[0].x, kpsoi_aug.keypoints[0].y],
kpsoi_rot[0])
similar_to_rot_180 = np.allclose(
[kpsoi_aug.keypoints[0].x, kpsoi_aug.keypoints[0].y],
kpsoi_rot[1])
if similar_to_rot_0:
kpsois_aug_indices.append(0)
elif similar_to_rot_180:
kpsois_aug_indices.append(1)
else:
assert False
assert np.array_equal(img_aug_indices, kpsois_aug_indices)
assert len(set(img_aug_indices)) == 2
assert len(set(kpsois_aug_indices)) == 2
@classmethod
def _test_image_cbaoi_alignment(cls, cbaoi, cbaoi_rot, augf_name):
aug = iaa.Affine(rotate=[0, 180], order=0)
img = np.zeros((10, 10), dtype=np.uint8)
img[0:5, 5] = 255
img[2, 4:6] = 255
img_rot = [np.copy(img), np.copy(np.flipud(np.fliplr(img)))]
img_aug_indices = []
cbaois_aug_indices = []
for _ in sm.xrange(40):
aug_det = aug.to_deterministic()
imgs_aug = aug_det.augment_images([img, img])
cbaois_aug = getattr(aug_det, augf_name)([cbaoi, cbaoi])
assert cbaois_aug[0].shape == img.shape
assert cbaois_aug[1].shape == img.shape
if hasattr(cbaois_aug[0].items[0], "is_valid"):
assert cbaois_aug[0].items[0].is_valid
assert cbaois_aug[1].items[0].is_valid
for img_aug in imgs_aug:
if np.array_equal(img_aug, img_rot[0]):
img_aug_indices.append(0)
elif np.array_equal(img_aug, img_rot[1]):
img_aug_indices.append(1)
else:
assert False
for cbaoi_aug in cbaois_aug:
if cbaoi_aug.items[0].coords_almost_equals(cbaoi_rot[0]):
cbaois_aug_indices.append(0)
elif cbaoi_aug.items[0].coords_almost_equals(cbaoi_rot[1]):
cbaois_aug_indices.append(1)
else:
assert False
assert np.array_equal(img_aug_indices, cbaois_aug_indices)
assert len(set(img_aug_indices)) == 2
assert len(set(cbaois_aug_indices)) == 2
def test_image_polygon_alignment(self):
psoi = ia.PolygonsOnImage([ia.Polygon([(1, 1), (9, 1), (5, 5)])],
shape=(10, 10))
psoi_rot = [
psoi.polygons[0].deepcopy(),
ia.Polygon([(10-1, 10-1), (10-9, 10-1), (10-5, 10-5)])
]
self._test_image_cbaoi_alignment(psoi, psoi_rot,
"augment_polygons")
def test_image_line_string_alignment(self):
lsoi = ia.LineStringsOnImage([ia.LineString([(1, 1), (9, 1), (5, 5)])],
shape=(10, 10))
lsoi_rot = [
lsoi.items[0].deepcopy(),
ia.LineString([(10-1, 10-1), (10-9, 10-1), (10-5, 10-5)])
]
self._test_image_cbaoi_alignment(lsoi, lsoi_rot,
"augment_line_strings")
def test_image_bounding_box_alignment(self):
bbsoi = ia.BoundingBoxesOnImage([
ia.BoundingBox(x1=1, y1=1, x2=9, y2=5)], shape=(10, 10))
bbsoi_rot = [
bbsoi.items[0].deepcopy(),
ia.BoundingBox(x1=10-9, y1=10-5, x2=10-1, y2=10-1)]
self._test_image_cbaoi_alignment(bbsoi, bbsoi_rot,
"augment_bounding_boxes")
class TestAffine_other_dtypes(unittest.TestCase):
@property
def translate_mask(self):
mask = np.zeros((3, 3), dtype=bool)
mask[1, 2] = True
return mask
@property
def image(self):
image = np.zeros((17, 17), dtype=bool)
image[2:15, 5:13] = True
return image
@property
def rot_mask_inner(self):
img_flipped = iaa.Fliplr(1.0)(image=self.image)
return img_flipped == 1
@property
def rot_mask_outer(self):
img_flipped = iaa.Fliplr(1.0)(image=self.image)
return img_flipped == 0
@property
def rot_thresh_inner(self):
return 0.9
@property
def rot_thresh_outer(self):
return 0.9
def rot_thresh_inner_float(self, order):
return 0.85 if order == 1 else 0.7
def rot_thresh_outer_float(self, order):
return 0.85 if order == 1 else 0.4
def test_translate_skimage_order_0_bool(self):
aug = iaa.Affine(translate_px={"x": 1}, order=0, mode="constant",
backend="skimage")
image = np.zeros((3, 3), dtype=bool)
image[1, 1] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert np.all(image_aug[~self.translate_mask] == 0)
assert np.all(image_aug[self.translate_mask] == 1)
def test_translate_skimage_order_0_uint_int(self):
dtypes = ["uint8", "uint16", "uint32", "int8", "int16", "int32"]
for dtype in dtypes:
aug = iaa.Affine(translate_px={"x": 1}, order=0, mode="constant",
backend="skimage")
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
if np.dtype(dtype).kind == "i":
values = [1, 5, 10, 100, int(0.1 * max_value),
int(0.2 * max_value), int(0.5 * max_value),
max_value - 100, max_value]
values = values + [(-1) * value for value in values]
else:
values = [1, 5, 10, 100, int(center_value),
int(0.1 * max_value), int(0.2 * max_value),
int(0.5 * max_value), max_value - 100, max_value]
for value in values:
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert np.all(image_aug[~self.translate_mask] == 0)
assert np.all(image_aug[self.translate_mask] == value)
def test_translate_skimage_order_0_float(self):
# float
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
aug = iaa.Affine(translate_px={"x": 1}, order=0, mode="constant",
backend="skimage")
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
def _isclose(a, b):
atol = 1e-4 if dtype == "float16" else 1e-8
return np.isclose(a, b, atol=atol, rtol=0)
isize = np.dtype(dtype).itemsize
values = [0.01, 1.0, 10.0, 100.0, 500 ** (isize - 1),
1000 ** (isize - 1)]
values = values + [(-1) * value for value in values]
values = values + [min_value, max_value]
for value in values:
with self.subTest(dtype=dtype, value=value):
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert np.all(_isclose(image_aug[~self.translate_mask], 0))
assert np.all(_isclose(image_aug[self.translate_mask],
np.float128(value)))
def test_rotate_skimage_order_not_0_bool(self):
# skimage, order!=0 and rotate=180
for order in [1, 3, 4, 5]:
aug = iaa.Affine(rotate=180, order=order, mode="constant",
backend="skimage")
aug_flip = iaa.Sequential([iaa.Flipud(1.0), iaa.Fliplr(1.0)])
image = np.zeros((17, 17), dtype=bool)
image[2:15, 5:13] = True
image_aug = aug.augment_image(image)
image_exp = aug_flip.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert (
np.sum(image_aug == image_exp)/image.size
) > self.rot_thresh_inner
def test_rotate_skimage_order_not_0_uint_int(self):
def _compute_matching(image_aug, image_exp, mask):
return np.sum(
np.isclose(image_aug[mask], image_exp[mask], rtol=0,
atol=1.001)
) / np.sum(mask)
dtypes = ["uint8", "uint16", "uint32", "int8", "int16", "int32"]
for dtype in dtypes:
for order in [1, 3, 4, 5]:
aug = iaa.Affine(rotate=180, order=order, mode="constant",
backend="skimage")
aug_flip = iaa.Sequential([iaa.Flipud(1.0), iaa.Fliplr(1.0)])
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
if np.dtype(dtype).kind == "i":
values = [1, 5, 10, 100, int(0.1 * max_value),
int(0.2 * max_value), int(0.5 * max_value),
max_value - 100, max_value]
values = values + [(-1) * value for value in values]
else:
values = [1, 5, 10, 100, int(center_value),
int(0.1 * max_value), int(0.2 * max_value),
int(0.5 * max_value), max_value - 100, max_value]
for value in values:
with self.subTest(dtype=dtype, order=order, value=value):
image = np.zeros((17, 17), dtype=dtype)
image[2:15, 5:13] = value
image_aug = aug.augment_image(image)
image_exp = aug_flip.augment_image(image)
assert image_aug.dtype.name == dtype
assert _compute_matching(
image_aug, image_exp, self.rot_mask_inner
) > self.rot_thresh_inner
assert _compute_matching(
image_aug, image_exp, self.rot_mask_outer
) > self.rot_thresh_outer
def test_rotate_skimage_order_not_0_float(self):
def _compute_matching(image_aug, image_exp, mask):
return np.sum(
_isclose(image_aug[mask], image_exp[mask])
) / np.sum(mask)
for order in [1, 3, 4, 5]:
dtypes = ["float16", "float32", "float64"]
if order == 5:
# float64 caused too many interpolation inaccuracies for
# order=5, not wrong but harder to test
dtypes = ["float16", "float32"]
for dtype in dtypes:
aug = iaa.Affine(rotate=180, order=order, mode="constant",
backend="skimage")
aug_flip = iaa.Sequential([iaa.Flipud(1.0), iaa.Fliplr(1.0)])
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
def _isclose(a, b):
atol = 1e-4 if dtype == "float16" else 1e-8
if order not in [0, 1]:
atol = 1e-2
return np.isclose(a, b, atol=atol, rtol=0)
isize = np.dtype(dtype).itemsize
values = [0.01, 1.0, 10.0, 100.0, 500 ** (isize - 1),
1000 ** (isize - 1)]
values = values + [(-1) * value for value in values]
if order not in [3, 4]: # results in NaNs otherwise
values = values + [min_value, max_value]
for value in values:
with self.subTest(order=order, dtype=dtype, value=value):
image = np.zeros((17, 17), dtype=dtype)
image[2:15, 5:13] = value
image_aug = aug.augment_image(image)
image_exp = aug_flip.augment_image(image)
assert image_aug.dtype.name == dtype
assert _compute_matching(
image_aug, image_exp, self.rot_mask_inner
) > self.rot_thresh_inner_float(order)
assert _compute_matching(
image_aug, image_exp, self.rot_mask_outer
) > self.rot_thresh_outer_float(order)
def test_translate_cv2_order_0_bool(self):
aug = iaa.Affine(translate_px={"x": 1}, order=0, mode="constant",
backend="cv2")
image = np.zeros((3, 3), dtype=bool)
image[1, 1] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert np.all(image_aug[~self.translate_mask] == 0)
assert np.all(image_aug[self.translate_mask] == 1)
def test_translate_cv2_order_0_uint_int(self):
aug = iaa.Affine(translate_px={"x": 1}, order=0, mode="constant",
backend="cv2")
dtypes = ["uint8", "uint16", "int8", "int16", "int32"]
for dtype in dtypes:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
if np.dtype(dtype).kind == "i":
values = [1, 5, 10, 100, int(0.1 * max_value),
int(0.2 * max_value), int(0.5 * max_value),
max_value - 100, max_value]
values = values + [(-1) * value for value in values]
else:
values = [1, 5, 10, 100, int(center_value),
int(0.1 * max_value), int(0.2 * max_value),
int(0.5 * max_value), max_value - 100, max_value]
for value in values:
with self.subTest(dtype=dtype, value=value):
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert np.all(image_aug[~self.translate_mask] == 0)
assert np.all(image_aug[self.translate_mask] == value)
def test_translate_cv2_order_0_float(self):
aug = iaa.Affine(translate_px={"x": 1}, order=0, mode="constant",
backend="cv2")
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
def _isclose(a, b):
atol = 1e-4 if dtype == "float16" else 1e-8
return np.isclose(a, b, atol=atol, rtol=0)
isize = np.dtype(dtype).itemsize
values = [0.01, 1.0, 10.0, 100.0, 500 ** (isize - 1),
1000 ** (isize - 1)]
values = values + [(-1) * value for value in values]
values = values + [min_value, max_value]
for value in values:
with self.subTest(dtype=dtype, value=value):
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert np.all(_isclose(image_aug[~self.translate_mask], 0))
assert np.all(_isclose(image_aug[self.translate_mask],
np.float128(value)))
def test_rotate_cv2_order_1_and_3_bool(self):
# cv2, order=1 and rotate=180
for order in [1, 3]:
aug = iaa.Affine(rotate=180, order=order, mode="constant",
backend="cv2")
aug_flip = iaa.Sequential([iaa.Flipud(1.0), iaa.Fliplr(1.0)])
image = np.zeros((17, 17), dtype=bool)
image[2:15, 5:13] = True
image_aug = aug.augment_image(image)
image_exp = aug_flip.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert (np.sum(image_aug == image_exp) / image.size) > 0.9
def test_rotate_cv2_order_1_and_3_uint_int(self):
# cv2, order=1 and rotate=180
for order in [1, 3]:
aug = iaa.Affine(rotate=180, order=order, mode="constant",
backend="cv2")
aug_flip = iaa.Sequential([iaa.Flipud(1.0), iaa.Fliplr(1.0)])
dtypes = ["uint8", "uint16", "int8", "int16"]
for dtype in dtypes:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
if np.dtype(dtype).kind == "i":
values = [1, 5, 10, 100, int(0.1 * max_value),
int(0.2 * max_value), int(0.5 * max_value),
max_value - 100, max_value]
values = values + [(-1) * value for value in values]
else:
values = [1, 5, 10, 100, int(center_value),
int(0.1 * max_value), int(0.2 * max_value),
int(0.5 * max_value), max_value - 100, max_value]
for value in values:
with self.subTest(order=order, dtype=dtype, value=value):
image = np.zeros((17, 17), dtype=dtype)
image[2:15, 5:13] = value
image_aug = aug.augment_image(image)
image_exp = aug_flip.augment_image(image)
assert image_aug.dtype.name == dtype
assert (
np.sum(image_aug == image_exp) / image.size
) > 0.9
def test_rotate_cv2_order_1_and_3_float(self):
# cv2, order=1 and rotate=180
for order in [1, 3]:
aug = iaa.Affine(rotate=180, order=order, mode="constant",
backend="cv2")
aug_flip = iaa.Sequential([iaa.Flipud(1.0), iaa.Fliplr(1.0)])
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
def _isclose(a, b):
atol = 1e-4 if dtype == "float16" else 1e-8
return np.isclose(a, b, atol=atol, rtol=0)
isize = np.dtype(dtype).itemsize
values = [0.01, 1.0, 10.0, 100.0, 500 ** (isize - 1),
1000 ** (isize - 1)]
values = values + [(-1) * value for value in values]
values = values + [min_value, max_value]
for value in values:
with self.subTest(order=order, dtype=dtype, value=value):
image = np.zeros((17, 17), dtype=dtype)
image[2:15, 5:13] = value
image_aug = aug.augment_image(image)
image_exp = aug_flip.augment_image(image)
assert image_aug.dtype.name == dtype
assert (
np.sum(_isclose(image_aug, image_exp)) / image.size
) > 0.9
class TestAffine_other(unittest.TestCase):
def test_unusual_channel_numbers(self):
nb_channels_lst = [4, 5, 512, 513]
orders = [0, 1, 3]
backends = ["auto", "skimage", "cv2"]
for nb_channels, order, backend in itertools.product(nb_channels_lst,
orders, backends):
with self.subTest(nb_channels=nb_channels, order=order,
backend=backend):
aug = iaa.Affine(translate_px={"x": -1}, mode="constant",
cval=255, order=order, backend=backend)
image = np.full((3, 3, nb_channels), 128, dtype=np.uint8)
heatmap_arr = np.full((3, 3, nb_channels), 0.5,
dtype=np.float32)
heatmap = ia.HeatmapsOnImage(heatmap_arr, shape=image.shape)
image_aug, heatmap_aug = aug(image=image, heatmaps=heatmap)
hm_aug_arr = heatmap_aug.arr_0to1
assert image_aug.shape == (3, 3, nb_channels)
assert heatmap_aug.arr_0to1.shape == (3, 3, nb_channels)
assert heatmap_aug.shape == image.shape
assert np.allclose(image_aug[:, 0:2, :], 128, rtol=0, atol=2)
assert np.allclose(image_aug[:, 2:3, 0:3], 255, rtol=0, atol=2)
assert np.allclose(image_aug[:, 2:3, 3:], 255, rtol=0, atol=2)
assert np.allclose(hm_aug_arr[:, 0:2, :], 0.5, rtol=0,
atol=0.025)
assert np.allclose(hm_aug_arr[:, 2:3, :], 0.0, rtol=0,
atol=0.025)
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 1),
(1, 0, 1)
]
for fit_output in [False, True]:
for shape in shapes:
with self.subTest(shape=shape, fit_output=fit_output):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Affine(rotate=45, fit_output=fit_output)
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
def test_pickleable(self):
aug = iaa.Affine(scale=(0.9, 1.1), translate_px=(-4, 4),
rotate=(-10, 10), shear=(-10, 10), order=[0, 1])
runtest_pickleable_uint8_img(aug, iterations=20)
class TestScaleX(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
aug = iaa.ScaleX(1.5)
assert isinstance(aug, iaa.Affine)
assert np.isclose(aug.scale[0].value, 1.5)
assert aug.order.value == 1
assert aug.cval.value == 0
assert aug.mode.value == "constant"
assert aug.fit_output is False
def test_integrationtest(self):
image = np.zeros((10, 10), dtype=np.uint8)
image[5, 5] = 255
aug = iaa.ScaleX(4.0, order=0)
image_aug = aug(image=image)
xx = np.nonzero(np.max(image_aug, axis=0) > 200)[0]
yy = np.nonzero(np.max(image_aug, axis=1) > 200)[0]
x1, x2 = xx[0], xx[-1]
y1, y2 = yy[0], yy[-1]
# not >=3, because if e.g. index 1 is spread to 0 to 3 after scaling,
# it covers four cells (0, 1, 2, 3), but 3-0 is 3
assert x2 - x1 >= 3
assert y2 - y1 < 1
class TestScaleY(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
aug = iaa.ScaleY(1.5)
assert isinstance(aug, iaa.Affine)
assert np.isclose(aug.scale[1].value, 1.5)
assert aug.order.value == 1
assert aug.cval.value == 0
assert aug.mode.value == "constant"
assert aug.fit_output is False
def test_integrationtest(self):
image = np.zeros((10, 10), dtype=np.uint8)
image[5, 5] = 255
aug = iaa.ScaleY(4.0, order=0)
image_aug = aug(image=image)
xx = np.nonzero(np.max(image_aug, axis=0) > 200)[0]
yy = np.nonzero(np.max(image_aug, axis=1) > 200)[0]
x1, x2 = xx[0], xx[-1]
y1, y2 = yy[0], yy[-1]
# not >=3, because if e.g. index 1 is spread to 0 to 3 after scaling,
# it covers four cells (0, 1, 2, 3), but 3-0 is 3
assert y2 - y1 >= 3
assert x2 - x1 < 1
class TestTranslateX(unittest.TestCase):
def setUp(self):
reseed()
def test___init___translate_percent(self):
aug = iaa.TranslateX(percent=0.5)
assert isinstance(aug, iaa.Affine)
assert np.isclose(aug.translate[0].value, 0.5)
assert aug.order.value == 1
assert aug.cval.value == 0
assert aug.mode.value == "constant"
assert aug.fit_output is False
def test___init___translate_px(self):
aug = iaa.TranslateX(px=2)
assert isinstance(aug, iaa.Affine)
assert np.isclose(aug.translate[0].value, 2)
assert aug.order.value == 1
assert aug.cval.value == 0
assert aug.mode.value == "constant"
assert aug.fit_output is False
def test___init___both_none(self):
with self.assertRaises(AssertionError) as ctx:
_aug = iaa.TranslateX()
assert "but both were None" in str(ctx.exception)
def test_integrationtest_translate_percent(self):
image = np.full((50, 50), 255, dtype=np.uint8)
aug = iaa.TranslateX(percent=0.5, order=1, cval=0)
image_aug = aug(image=image)
expected = np.copy(image)
expected[:, 0:25] = 0
overlap = np.average(np.isclose(image_aug, expected, atol=1.01))
assert overlap > (1.0 - (1/50) - 1e-4)
def test_integrationtest_translate_px(self):
image = np.full((50, 50), 255, dtype=np.uint8)
aug = iaa.TranslateX(px=25, order=1, cval=0)
image_aug = aug(image=image)
expected = np.copy(image)
expected[:, 0:25] = 0
overlap = np.average(np.isclose(image_aug, expected, atol=1.01))
assert overlap > (1.0 - (1/50) - 1e-4)
class TestTranslateY(unittest.TestCase):
def setUp(self):
reseed()
def test___init___translate_percent(self):
aug = iaa.TranslateY(percent=0.5)
assert isinstance(aug, iaa.Affine)
assert np.isclose(aug.translate[1].value, 0.5)
assert aug.order.value == 1
assert aug.cval.value == 0
assert aug.mode.value == "constant"
assert aug.fit_output is False
def test___init___translate_px(self):
aug = iaa.TranslateY(px=2)
assert isinstance(aug, iaa.Affine)
assert np.isclose(aug.translate[1].value, 2)
assert aug.order.value == 1
assert aug.cval.value == 0
assert aug.mode.value == "constant"
assert aug.fit_output is False
def test___init___both_none(self):
with self.assertRaises(AssertionError) as ctx:
_aug = iaa.TranslateY()
assert "but both were None" in str(ctx.exception)
def test_integrationtest_translate_percent(self):
image = np.full((50, 50), 255, dtype=np.uint8)
aug = iaa.TranslateY(percent=0.5, order=1, cval=0)
image_aug = aug(image=image)
expected = np.copy(image)
expected[0:25, :] = 0
overlap = np.average(np.isclose(image_aug, expected, atol=1.01))
assert overlap > (1.0 - (1/50) - 1e-4)
def test_integrationtest_translate_px(self):
image = np.full((50, 50), 255, dtype=np.uint8)
aug = iaa.TranslateY(px=25, order=1, cval=0)
image_aug = aug(image=image)
expected = np.copy(image)
expected[0:25, :] = 0
overlap = np.average(np.isclose(image_aug, expected, atol=1.01))
assert overlap > (1.0 - (1/50) - 1e-4)
class TestRotate(unittest.TestCase):
def setUp(self):
reseed()
def test___init___(self):
aug = iaa.Rotate(rotate=45)
assert isinstance(aug, iaa.Affine)
assert np.isclose(aug.rotate.value, 45)
assert aug.order.value == 1
assert aug.cval.value == 0
assert aug.mode.value == "constant"
assert aug.fit_output is False
def test_integrationtest(self):
image = np.zeros((40, 20), dtype=np.uint8)
image[:, 10:10+1] = 255
aug = iaa.Rotate(90, order=0)
image_aug = aug(image=image)
assert image_aug.shape == (40, 20)
assert np.isclose(np.sum(image_aug[20-1:20+2, :]), 255*20, atol=1)
class TestShearX(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
aug = iaa.ShearX(40)
assert isinstance(aug, iaa.Affine)
assert aug.shear[0].value == 40
assert aug.order.value == 1
assert aug.cval.value == 0
assert aug.mode.value == "constant"
assert aug.fit_output is False
def test_integrationtest(self):
def _find_coords(arr):
xx = np.nonzero(np.max(arr, axis=0) > 200)[0]
yy = np.nonzero(np.max(arr, axis=1) > 200)[0]
x1 = xx[0]
x2 = xx[-1]
y1 = yy[0]
y2 = yy[-1]
return x1+(x2-x1)/2, y1+(y2-y1)/2
image = np.zeros((50, 50, 4), dtype=np.uint8)
image[10:10+1, 20:20+1, 0] = 255
image[10:10+1, 30:30+1, 1] = 255
image[40:40+1, 30:30+1, 2] = 255
image[40:40+1, 20:20+1, 3] = 255
aug = iaa.ShearX(30, order=0)
image_aug = aug(image=image)
x1, y1 = _find_coords(image_aug[..., 0])
x2, y2 = _find_coords(image_aug[..., 1])
x3, y3 = _find_coords(image_aug[..., 2])
x4, y4 = _find_coords(image_aug[..., 3])
assert x1 > 20
assert y1 > 10
assert y2 > 10
assert np.isclose(y1, y2)
assert x3 < 30
assert y3 < 40
assert y4 < 40
assert np.isclose(y3, y4)
assert not np.isclose(x1, x4)
assert not np.isclose(x2, x3)
class TestShearY(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
aug = iaa.ShearY(40)
assert isinstance(aug, iaa.Affine)
assert aug.shear[1].value == 40
assert aug.order.value == 1
assert aug.cval.value == 0
assert aug.mode.value == "constant"
assert aug.fit_output is False
def test_integrationtest(self):
def _find_coords(arr):
xx = np.nonzero(np.max(arr, axis=0) > 200)[0]
yy = np.nonzero(np.max(arr, axis=1) > 200)[0]
x1 = xx[0]
x2 = xx[-1]
y1 = yy[0]
y2 = yy[-1]
return x1+(x2-x1)/2, y1+(y2-y1)/2
image = np.zeros((50, 50, 4), dtype=np.uint8)
image[20:20+1, 10:10+1, 0] = 255
image[20:20+1, 40:40+1, 1] = 255
image[30:30+1, 40:40+1, 2] = 255
image[30:30+1, 10:10+1, 3] = 255
aug = iaa.ShearY(30, order=0)
image_aug = aug(image=image)
x1, y1 = _find_coords(image_aug[..., 0])
x2, y2 = _find_coords(image_aug[..., 1])
x3, y3 = _find_coords(image_aug[..., 2])
x4, y4 = _find_coords(image_aug[..., 3])
assert y1 < 20
assert x1 > 10
assert x4 > 10
assert np.isclose(x1, x4)
assert y2 > 20
assert x2 < 40
assert x3 < 40
assert np.isclose(x2, x3)
assert not np.isclose(y1, y2)
assert not np.isclose(y3, y4)
# TODO migrate to unittest and split up tests or remove AffineCv2
def test_AffineCv2():
reseed()
with warnings.catch_warnings(record=True) as caught_warnings:
warnings.simplefilter("always")
_ = iaa.AffineCv2()
assert "is deprecated" in str(caught_warnings[0].message)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=ia.DeprecationWarning)
base_img = np.array([[0, 0, 0],
[0, 255, 0],
[0, 0, 0]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
images_list = [base_img]
outer_pixels = ([], [])
for i in sm.xrange(base_img.shape[0]):
for j in sm.xrange(base_img.shape[1]):
if i != j:
outer_pixels[0].append(i)
outer_pixels[1].append(j)
kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)]
keypoints = [ia.KeypointsOnImage(kps, shape=base_img.shape)]
# no translation/scale/rotate/shear, shouldnt change nothing
aug = iaa.AffineCv2(scale=1.0, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
# ---------------------
# scale
# ---------------------
# zoom in
aug = iaa.AffineCv2(scale=1.75, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
observed = aug_det.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
observed = aug.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
observed = aug_det.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
observed = aug.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x < 0
assert observed[0].keypoints[0].y < 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x > 2
assert observed[0].keypoints[2].y > 2
observed = aug_det.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x < 0
assert observed[0].keypoints[0].y < 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x > 2
assert observed[0].keypoints[2].y > 2
# zoom in only on x axis
aug = iaa.AffineCv2(scale={"x": 1.75, "y": 1.0}, translate_px=0,
rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
observed = aug_det.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
observed = aug.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
observed = aug_det.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
observed = aug.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x < 0
assert observed[0].keypoints[0].y == 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x > 2
assert observed[0].keypoints[2].y == 2
observed = aug_det.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x < 0
assert observed[0].keypoints[0].y == 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x > 2
assert observed[0].keypoints[2].y == 2
# zoom in only on y axis
aug = iaa.AffineCv2(scale={"x": 1.0, "y": 1.75}, translate_px=0,
rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
observed = aug_det.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
observed = aug.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
observed = aug_det.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
observed = aug.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x == 0
assert observed[0].keypoints[0].y < 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x == 2
assert observed[0].keypoints[2].y > 2
observed = aug_det.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x == 0
assert observed[0].keypoints[0].y < 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x == 2
assert observed[0].keypoints[2].y > 2
# zoom out
# this one uses a 4x4 area of all 255, which is zoomed out to a 4x4
# area in which the center 2x2 area is 255
# zoom in should probably be adapted to this style
# no separate tests here for x/y axis, should work fine if zoom in
# works with that
aug = iaa.AffineCv2(scale=0.49, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.ones((4, 4, 1), dtype=np.uint8) * 255
images = np.array([image])
images_list = [image]
outer_pixels = ([], [])
for y in sm.xrange(4):
xs = sm.xrange(4) if y in [0, 3] else [0, 3]
for x in xs:
outer_pixels[0].append(y)
outer_pixels[1].append(x)
inner_pixels = ([1, 1, 2, 2], [1, 2, 1, 2])
kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=3, y=0),
ia.Keypoint(x=0, y=3), ia.Keypoint(x=3, y=3)]
keypoints = [ia.KeypointsOnImage(kps, shape=image.shape)]
kps_aug = [ia.Keypoint(x=0.765, y=0.765),
ia.Keypoint(x=2.235, y=0.765),
ia.Keypoint(x=0.765, y=2.235),
ia.Keypoint(x=2.235, y=2.235)]
keypoints_aug = [ia.KeypointsOnImage(kps_aug, shape=image.shape)]
observed = aug.augment_images(images)
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
observed = aug_det.augment_images(images)
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
observed = aug.augment_images(images_list)
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
observed = aug_det.augment_images(images_list)
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# varying scales
aug = iaa.AffineCv2(scale={"x": (0.5, 1.5), "y": (0.5, 1.5)},
translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 2, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]], dtype=np.uint8) * 100
image = image[:, :, np.newaxis]
images = np.array([image])
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.8)
assert nb_changed_aug_det == 0
aug = iaa.AffineCv2(scale=iap.Uniform(0.7, 0.9))
assert isinstance(aug.scale, iap.Uniform)
assert isinstance(aug.scale.a, iap.Deterministic)
assert isinstance(aug.scale.b, iap.Deterministic)
assert 0.7 - 1e-8 < aug.scale.a.value < 0.7 + 1e-8
assert 0.9 - 1e-8 < aug.scale.b.value < 0.9 + 1e-8
# ---------------------
# translate
# ---------------------
# move one pixel to the right
aug = iaa.AffineCv2(scale=1.0, translate_px={"x": 1, "y": 0},
rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, 1] = 255
image_aug[1, 2] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)],
shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=2, y=1)],
shape=base_img.shape)]
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# move one pixel to the right
aug = iaa.AffineCv2(scale=1.0, translate_px={"x": 1, "y": 0},
rotate=0, shear=0)
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the right
aug = iaa.AffineCv2(scale=1.0, translate_px={"x": 1, "y": 0},
rotate=0, shear=0)
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the right
# with order=ALL
aug = iaa.AffineCv2(scale=1.0, translate_px={"x": 1, "y": 0},
rotate=0, shear=0, order=ia.ALL)
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the right
# with order=list
aug = iaa.AffineCv2(scale=1.0, translate_px={"x": 1, "y": 0},
rotate=0, shear=0, order=[0, 1, 2])
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the right
# with order=StochasticParameter
aug = iaa.AffineCv2(scale=1.0, translate_px={"x": 1, "y": 0},
rotate=0, shear=0, order=iap.Choice([0, 1, 2]))
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the bottom
aug = iaa.AffineCv2(scale=1.0, translate_px={"x": 0, "y": 1},
rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, 1] = 255
image_aug[2, 1] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)],
shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=2)],
shape=base_img.shape)]
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# move 33% (one pixel) to the right
aug = iaa.AffineCv2(scale=1.0, translate_percent={"x": 0.3333, "y": 0},
rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, 1] = 255
image_aug[1, 2] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)],
shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=2, y=1)],
shape=base_img.shape)]
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# move 33% (one pixel) to the bottom
aug = iaa.AffineCv2(scale=1.0, translate_percent={"x": 0, "y": 0.3333},
rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, 1] = 255
image_aug[2, 1] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)],
shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=2)],
shape=base_img.shape)]
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# 0-1px to left/right and 0-1px to top/bottom
aug = iaa.AffineCv2(scale=1.0,
translate_px={"x": (-1, 1), "y": (-1, 1)},
rotate=0, shear=0)
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
centers_aug = np.copy(image).astype(np.int32) * 0
centers_aug_det = np.copy(image).astype(np.int32) * 0
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert len(observed_aug[0].nonzero()[0]) == 1
assert len(observed_aug_det[0].nonzero()[0]) == 1
centers_aug += (observed_aug[0] > 0)
centers_aug_det += (observed_aug_det[0] > 0)
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
assert (centers_aug > int(nb_iterations * (1/9 * 0.6))).all()
assert (centers_aug < int(nb_iterations * (1/9 * 1.4))).all()
aug = iaa.AffineCv2(translate_percent=iap.Uniform(0.7, 0.9))
assert isinstance(aug.translate, iap.Uniform)
assert isinstance(aug.translate.a, iap.Deterministic)
assert isinstance(aug.translate.b, iap.Deterministic)
assert 0.7 - 1e-8 < aug.translate.a.value < 0.7 + 1e-8
assert 0.9 - 1e-8 < aug.translate.b.value < 0.9 + 1e-8
aug = iaa.AffineCv2(translate_px=iap.DiscreteUniform(1, 10))
assert isinstance(aug.translate, iap.DiscreteUniform)
assert isinstance(aug.translate.a, iap.Deterministic)
assert isinstance(aug.translate.b, iap.Deterministic)
assert aug.translate.a.value == 1
assert aug.translate.b.value == 10
# ---------------------
# translate heatmaps
# ---------------------
heatmaps = HeatmapsOnImage(
np.float32([
[0.0, 0.5, 0.75],
[0.0, 0.5, 0.75],
[0.75, 0.75, 0.75],
]),
shape=(3, 3, 3)
)
arr_expected_1px_right = np.float32([
[0.0, 0.0, 0.5],
[0.0, 0.0, 0.5],
[0.0, 0.75, 0.75],
])
aug = iaa.AffineCv2(translate_px={"x": 1})
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == heatmaps.shape
assert np.isclose(observed.min_value, heatmaps.min_value,
rtol=0, atol=1e-6)
assert np.isclose(observed.max_value, heatmaps.max_value,
rtol=0, atol=1e-6)
assert np.array_equal(observed.get_arr(), arr_expected_1px_right)
# should still use mode=constant cval=0 even when other settings chosen
aug = iaa.AffineCv2(translate_px={"x": 1}, cval=255)
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == heatmaps.shape
assert np.isclose(observed.min_value, heatmaps.min_value,
rtol=0, atol=1e-6)
assert np.isclose(observed.max_value, heatmaps.max_value,
rtol=0, atol=1e-6)
assert np.array_equal(observed.get_arr(), arr_expected_1px_right)
aug = iaa.AffineCv2(translate_px={"x": 1}, mode="replicate", cval=255)
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == heatmaps.shape
assert np.isclose(observed.min_value, heatmaps.min_value,
rtol=0, atol=1e-6)
assert np.isclose(observed.max_value, heatmaps.max_value,
rtol=0, atol=1e-6)
assert np.array_equal(observed.get_arr(), arr_expected_1px_right)
# ---------------------
# translate segmaps
# ---------------------
segmaps = SegmentationMapsOnImage(
np.int32([
[0, 1, 2],
[0, 1, 2],
[2, 2, 2],
]),
shape=(3, 3, 3)
)
arr_expected_1px_right = np.int32([
[0, 0, 1],
[0, 0, 1],
[0, 2, 2],
])
aug = iaa.AffineCv2(translate_px={"x": 1})
observed = aug.augment_segmentation_maps([segmaps])[0]
assert observed.shape == segmaps.shape
assert np.array_equal(observed.get_arr(), arr_expected_1px_right)
# should still use mode=constant cval=0 even when other settings chosen
aug = iaa.AffineCv2(translate_px={"x": 1}, cval=255)
observed = aug.augment_segmentation_maps([segmaps])[0]
assert observed.shape == segmaps.shape
assert np.array_equal(observed.get_arr(), arr_expected_1px_right)
aug = iaa.AffineCv2(translate_px={"x": 1}, mode="replicate", cval=255)
observed = aug.augment_segmentation_maps([segmaps])[0]
assert observed.shape == segmaps.shape
assert np.array_equal(observed.get_arr(), arr_expected_1px_right)
# ---------------------
# rotate
# ---------------------
# rotate by 45 degrees
aug = iaa.AffineCv2(scale=1.0, translate_px=0, rotate=90, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, :] = 255
image_aug[0, 1] = 255
image_aug[1, 1] = 255
image_aug[2, 1] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
kps = [ia.Keypoint(x=0, y=1), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=1)]
keypoints = [ia.KeypointsOnImage(kps, shape=base_img.shape)]
kps_aug = [ia.Keypoint(x=1, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=1, y=2)]
keypoints_aug = [ia.KeypointsOnImage(kps_aug, shape=base_img.shape)]
observed = aug.augment_images(images)
observed[observed >= 100] = 255
observed[observed < 100] = 0
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
observed[observed >= 100] = 255
observed[observed < 100] = 0
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
observed[0][observed[0] >= 100] = 255
observed[0][observed[0] < 100] = 0
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
observed[0][observed[0] >= 100] = 255
observed[0][observed[0] < 100] = 0
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# rotate by StochasticParameter
aug = iaa.AffineCv2(scale=1.0, translate_px=0,
rotate=iap.Uniform(10, 20), shear=0)
assert isinstance(aug.rotate, iap.Uniform)
assert isinstance(aug.rotate.a, iap.Deterministic)
assert aug.rotate.a.value == 10
assert isinstance(aug.rotate.b, iap.Deterministic)
assert aug.rotate.b.value == 20
# random rotation 0-364 degrees
aug = iaa.AffineCv2(scale=1.0, translate_px=0, rotate=(0, 364),
shear=0)
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
pixels_sums_aug = np.copy(image).astype(np.int32) * 0
pixels_sums_aug_det = np.copy(image).astype(np.int32) * 0
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
pixels_sums_aug += (observed_aug[0] > 100)
pixels_sums_aug_det += (observed_aug_det[0] > 100)
assert nb_changed_aug >= int(nb_iterations * 0.9)
assert nb_changed_aug_det == 0
# center pixel, should always be white when rotating line around center
assert pixels_sums_aug[1, 1] > (nb_iterations * 0.98)
assert pixels_sums_aug[1, 1] < (nb_iterations * 1.02)
# outer pixels, should sometimes be white
# the values here had to be set quite tolerant, the middle pixels at
# top/left/bottom/right get more activation than expected
outer_pixels = ([0, 0, 0, 1, 1, 2, 2, 2], [0, 1, 2, 0, 2, 0, 1, 2])
assert (
pixels_sums_aug[outer_pixels] > int(nb_iterations * (2/8 * 0.4))
).all()
assert (
pixels_sums_aug[outer_pixels] < int(nb_iterations * (2/8 * 2.0))
).all()
# ---------------------
# shear
# ---------------------
# TODO
# shear by StochasticParameter
aug = iaa.AffineCv2(scale=1.0, translate_px=0, rotate=0,
shear=iap.Uniform(10, 20))
assert isinstance(aug.shear, iap.Uniform)
assert isinstance(aug.shear.a, iap.Deterministic)
assert aug.shear.a.value == 10
assert isinstance(aug.shear.b, iap.Deterministic)
assert aug.shear.b.value == 20
# ---------------------
# cval
# ---------------------
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=128)
aug_det = aug.to_deterministic()
image = np.ones((3, 3, 1), dtype=np.uint8) * 255
image_aug = np.copy(image)
images = np.array([image])
images_list = [image]
observed = aug.augment_images(images)
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
observed = aug_det.augment_images(images)
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
observed = aug.augment_images(images_list)
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
observed = aug_det.augment_images(images_list)
assert (observed[0] > 128 - 30).all()
assert (observed[0] < 128 + 30).all()
# random cvals
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=(0, 255))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
averages = []
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
averages.append(int(np.average(observed_aug)))
assert nb_changed_aug >= int(nb_iterations * 0.9)
assert nb_changed_aug_det == 0
# center pixel, should always be white when rotating line around center
assert pixels_sums_aug[1, 1] > (nb_iterations * 0.98)
assert pixels_sums_aug[1, 1] < (nb_iterations * 1.02)
assert len(set(averages)) > 200
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=ia.ALL)
assert isinstance(aug.cval, iap.DiscreteUniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 0
assert aug.cval.b.value == 255
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=iap.DiscreteUniform(1, 5))
assert isinstance(aug.cval, iap.DiscreteUniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 1
assert aug.cval.b.value == 5
# ------------
# mode
# ------------
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=0, mode=ia.ALL)
assert isinstance(aug.mode, iap.Choice)
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=0, mode="replicate")
assert isinstance(aug.mode, iap.Deterministic)
assert aug.mode.value == "replicate"
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=0, mode=["replicate", "reflect"])
assert isinstance(aug.mode, iap.Choice)
assert (
len(aug.mode.a) == 2
and "replicate" in aug.mode.a
and "reflect" in aug.mode.a)
aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0,
cval=0,
mode=iap.Choice(["replicate", "reflect"]))
assert isinstance(aug.mode, iap.Choice)
assert (
len(aug.mode.a) == 2
and "replicate" in aug.mode.a
and "reflect" in aug.mode.a)
# ------------
# exceptions for bad inputs
# ------------
# scale
got_exception = False
try:
_ = iaa.AffineCv2(scale=False)
except Exception:
got_exception = True
assert got_exception
# translate_px
got_exception = False
try:
_ = iaa.AffineCv2(translate_px=False)
except Exception:
got_exception = True
assert got_exception
# translate_percent
got_exception = False
try:
_ = iaa.AffineCv2(translate_percent=False)
except Exception:
got_exception = True
assert got_exception
# rotate
got_exception = False
try:
_ = iaa.AffineCv2(scale=1.0, translate_px=0, rotate=False,
shear=0, cval=0)
except Exception:
got_exception = True
assert got_exception
# shear
got_exception = False
try:
_ = iaa.AffineCv2(scale=1.0, translate_px=0, rotate=0,
shear=False, cval=0)
except Exception:
got_exception = True
assert got_exception
# cval
got_exception = False
try:
_ = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0,
shear=0, cval=None)
except Exception:
got_exception = True
assert got_exception
# mode
got_exception = False
try:
_ = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0,
shear=0, cval=0, mode=False)
except Exception:
got_exception = True
assert got_exception
# non-existent order
got_exception = False
try:
_ = iaa.AffineCv2(order=-1)
except Exception:
got_exception = True
assert got_exception
# bad order datatype
got_exception = False
try:
_ = iaa.AffineCv2(order="test")
except Exception:
got_exception = True
assert got_exception
# ----------
# get_parameters
# ----------
aug = iaa.AffineCv2(scale=1, translate_px=2, rotate=3, shear=4,
order=1, cval=0, mode="constant")
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic) # scale
assert isinstance(params[1], iap.Deterministic) # translate
assert isinstance(params[2], iap.Deterministic) # rotate
assert isinstance(params[3], iap.Deterministic) # shear
assert params[0].value == 1 # scale
assert params[1].value == 2 # translate
assert params[2].value == 3 # rotate
assert params[3].value == 4 # shear
assert params[4].value == 1 # order
assert params[5].value == 0 # cval
assert params[6].value == "constant" # mode
class TestPiecewiseAffine(unittest.TestCase):
def setUp(self):
reseed()
@property
def image(self):
img = np.zeros((60, 80), dtype=np.uint8)
img[:, 9:11+1] = 255
img[:, 69:71+1] = 255
return img
@property
def mask(self):
return self.image > 0
@property
def heatmaps(self):
return HeatmapsOnImage((self.image / 255.0).astype(np.float32),
shape=(60, 80, 3))
@property
def segmaps(self):
return SegmentationMapsOnImage(self.mask.astype(np.int32),
shape=(60, 80, 3))
# -----
# __init__
# -----
def test___init___scale_is_list(self):
# scale as list
aug = iaa.PiecewiseAffine(scale=[0.01, 0.10], nb_rows=12, nb_cols=4)
assert isinstance(aug.scale, iap.Choice)
assert 0.01 - 1e-8 < aug.scale.a[0] < 0.01 + 1e-8
assert 0.10 - 1e-8 < aug.scale.a[1] < 0.10 + 1e-8
def test___init___scale_is_tuple(self):
# scale as tuple
aug = iaa.PiecewiseAffine(scale=(0.01, 0.10), nb_rows=12, nb_cols=4)
assert isinstance(aug.jitter.scale, iap.Uniform)
assert isinstance(aug.jitter.scale.a, iap.Deterministic)
assert isinstance(aug.jitter.scale.b, iap.Deterministic)
assert 0.01 - 1e-8 < aug.jitter.scale.a.value < 0.01 + 1e-8
assert 0.10 - 1e-8 < aug.jitter.scale.b.value < 0.10 + 1e-8
def test___init___scale_is_stochastic_parameter(self):
# scale as StochasticParameter
aug = iaa.PiecewiseAffine(scale=iap.Uniform(0.01, 0.10), nb_rows=12,
nb_cols=4)
assert isinstance(aug.jitter.scale, iap.Uniform)
assert isinstance(aug.jitter.scale.a, iap.Deterministic)
assert isinstance(aug.jitter.scale.b, iap.Deterministic)
assert 0.01 - 1e-8 < aug.jitter.scale.a.value < 0.01 + 1e-8
assert 0.10 - 1e-8 < aug.jitter.scale.b.value < 0.10 + 1e-8
def test___init___bad_datatype_for_scale_leads_to_failure(self):
# bad datatype for scale
got_exception = False
try:
_ = iaa.PiecewiseAffine(scale=False, nb_rows=12, nb_cols=4)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test___init___nb_rows_is_list(self):
# rows as list
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=[4, 20], nb_cols=4)
assert isinstance(aug.nb_rows, iap.Choice)
assert aug.nb_rows.a[0] == 4
assert aug.nb_rows.a[1] == 20
def test___init___nb_rows_is_tuple(self):
# rows as tuple
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=(4, 20), nb_cols=4)
assert isinstance(aug.nb_rows, iap.DiscreteUniform)
assert isinstance(aug.nb_rows.a, iap.Deterministic)
assert isinstance(aug.nb_rows.b, iap.Deterministic)
assert aug.nb_rows.a.value == 4
assert aug.nb_rows.b.value == 20
def test___init___nb_rows_is_stochastic_parameter(self):
# rows as StochasticParameter
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=iap.DiscreteUniform(4, 20),
nb_cols=4)
assert isinstance(aug.nb_rows, iap.DiscreteUniform)
assert isinstance(aug.nb_rows.a, iap.Deterministic)
assert isinstance(aug.nb_rows.b, iap.Deterministic)
assert aug.nb_rows.a.value == 4
assert aug.nb_rows.b.value == 20
def test___init___bad_datatype_for_nb_rows_leads_to_failure(self):
# bad datatype for rows
got_exception = False
try:
_ = iaa.PiecewiseAffine(scale=0.05, nb_rows=False, nb_cols=4)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test___init___nb_cols_is_list(self):
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=[4, 20])
assert isinstance(aug.nb_cols, iap.Choice)
assert aug.nb_cols.a[0] == 4
assert aug.nb_cols.a[1] == 20
def test___init___nb_cols_is_tuple(self):
# cols as tuple
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=(4, 20))
assert isinstance(aug.nb_cols, iap.DiscreteUniform)
assert isinstance(aug.nb_cols.a, iap.Deterministic)
assert isinstance(aug.nb_cols.b, iap.Deterministic)
assert aug.nb_cols.a.value == 4
assert aug.nb_cols.b.value == 20
def test___init___nb_cols_is_stochastic_parameter(self):
# cols as StochasticParameter
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=4,
nb_cols=iap.DiscreteUniform(4, 20))
assert isinstance(aug.nb_cols, iap.DiscreteUniform)
assert isinstance(aug.nb_cols.a, iap.Deterministic)
assert isinstance(aug.nb_cols.b, iap.Deterministic)
assert aug.nb_cols.a.value == 4
assert aug.nb_cols.b.value == 20
def test___init___bad_datatype_for_nb_cols_leads_to_failure(self):
# bad datatype for cols
got_exception = False
try:
_aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test___init___order_is_int(self):
# single int for order
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, order=0)
assert isinstance(aug.order, iap.Deterministic)
assert aug.order.value == 0
def test___init___order_is_list(self):
# list for order
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,
order=[0, 1, 3])
assert isinstance(aug.order, iap.Choice)
assert all([v in aug.order.a for v in [0, 1, 3]])
def test___init___order_is_stochastic_parameter(self):
# StochasticParameter for order
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,
order=iap.Choice([0, 1, 3]))
assert isinstance(aug.order, iap.Choice)
assert all([v in aug.order.a for v in [0, 1, 3]])
def test___init___order_is_all(self):
# ALL for order
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,
order=ia.ALL)
assert isinstance(aug.order, iap.Choice)
assert all([v in aug.order.a for v in [0, 1, 3, 4, 5]])
def test___init___bad_datatype_for_order_leads_to_failure(self):
# bad datatype for order
got_exception = False
try:
_ = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,
order=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test___init___cval_is_list(self):
# cval as list
aug = iaa.PiecewiseAffine(scale=0.7, nb_rows=5, nb_cols=5,
mode="constant", cval=[0, 10])
assert isinstance(aug.cval, iap.Choice)
assert aug.cval.a[0] == 0
assert aug.cval.a[1] == 10
def test___init___cval_is_tuple(self):
# cval as tuple
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,
mode="constant", cval=(0, 10))
assert isinstance(aug.cval, iap.Uniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 0
assert aug.cval.b.value == 10
def test___init___cval_is_stochastic_parameter(self):
# cval as StochasticParameter
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,
mode="constant",
cval=iap.DiscreteUniform(0, 10))
assert isinstance(aug.cval, iap.DiscreteUniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 0
assert aug.cval.b.value == 10
def test___init___cval_is_all(self):
# ALL as cval
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,
mode="constant", cval=ia.ALL)
assert isinstance(aug.cval, iap.Uniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 0
assert aug.cval.b.value == 255
def test___init___bad_datatype_for_cval_leads_to_failure(self):
# bas datatype for cval
got_exception = False
try:
_ = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, cval=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test___init___mode_is_string(self):
# single string for mode
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,
mode="nearest")
assert isinstance(aug.mode, iap.Deterministic)
assert aug.mode.value == "nearest"
def test___init___mode_is_list(self):
# list for mode
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,
mode=["nearest", "edge", "symmetric"])
assert isinstance(aug.mode, iap.Choice)
assert all([
v in aug.mode.a for v in ["nearest", "edge", "symmetric"]
])
def test___init___mode_is_stochastic_parameter(self):
# StochasticParameter for mode
aug = iaa.PiecewiseAffine(
scale=0.1, nb_rows=8, nb_cols=8,
mode=iap.Choice(["nearest", "edge", "symmetric"]))
assert isinstance(aug.mode, iap.Choice)
assert all([
v in aug.mode.a for v in ["nearest", "edge", "symmetric"]
])
def test___init___mode_is_all(self):
# ALL for mode
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, mode=ia.ALL)
assert isinstance(aug.mode, iap.Choice)
assert all([
v in aug.mode.a
for v
in ["constant", "edge", "symmetric", "reflect", "wrap"]
])
def test___init___bad_datatype_for_mode_leads_to_failure(self):
# bad datatype for mode
got_exception = False
try:
_ = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,
mode=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# -----
# scale
# -----
def test_scale_is_small_image(self):
# basic test
aug = iaa.PiecewiseAffine(scale=0.01, nb_rows=12, nb_cols=4)
observed = aug.augment_image(self.image)
assert (
100.0
< np.average(observed[self.mask])
< np.average(self.image[self.mask])
)
assert (
100.0-75.0
> np.average(observed[~self.mask])
> np.average(self.image[~self.mask])
)
def test_scale_is_small_image_absolute_scale(self):
aug = iaa.PiecewiseAffine(scale=1, nb_rows=12, nb_cols=4,
absolute_scale=True)
observed = aug.augment_image(self.image)
assert (
100.0
< np.average(observed[self.mask])
< np.average(self.image[self.mask])
)
assert (
100.0-75.0
> np.average(observed[~self.mask])
> np.average(self.image[~self.mask])
)
def test_scale_is_small_heatmaps(self):
# basic test, heatmaps
aug = iaa.PiecewiseAffine(scale=0.01, nb_rows=12, nb_cols=4)
observed = aug.augment_heatmaps([self.heatmaps])[0]
observed_arr = observed.get_arr()
assert observed.shape == self.heatmaps.shape
_assert_same_min_max(observed, self.heatmaps)
assert (
100.0/255.0
< np.average(observed_arr[self.mask])
< np.average(self.heatmaps.get_arr()[self.mask]))
assert (
(100.0-75.0)/255.0
> np.average(observed_arr[~self.mask])
> np.average(self.heatmaps.get_arr()[~self.mask]))
def test_scale_is_small_segmaps(self):
# basic test, segmaps
aug = iaa.PiecewiseAffine(scale=0.001, nb_rows=12, nb_cols=4)
observed = aug.augment_segmentation_maps([self.segmaps])[0]
observed_arr = observed.get_arr()
# left column starts at 9-11 and right one at 69-71
# result is 9-11 (curvy, i.e. like 50% filled) and 70-71 (straight,
# i.e. 100% filled). Reason for that is unclear, maybe a scikit-image
# problem.
observed_arr_left_col = observed_arr[:, 9:11+1]
observed_arr_right_col = observed_arr[:, 69:71+1]
assert observed.shape == self.segmaps.shape
assert np.average(observed_arr_left_col == 1) > 0.5
assert np.average(observed_arr_right_col == 1) > 0.5
assert np.average(observed_arr[~self.mask] == 0) > 0.9
def test_scale_is_zero_image(self):
# scale 0
aug = iaa.PiecewiseAffine(scale=0, nb_rows=12, nb_cols=4)
observed = aug.augment_image(self.image)
assert np.array_equal(observed, self.image)
def test_scale_is_zero_image_absolute_scale(self):
aug = iaa.PiecewiseAffine(scale=0, nb_rows=12, nb_cols=4,
absolute_scale=True)
observed = aug.augment_image(self.image)
assert np.array_equal(observed, self.image)
def test_scale_is_zero_heatmaps(self):
# scale 0, heatmaps
aug = iaa.PiecewiseAffine(scale=0, nb_rows=12, nb_cols=4)
observed = aug.augment_heatmaps([self.heatmaps])[0]
observed_arr = observed.get_arr()
assert observed.shape == self.heatmaps.shape
_assert_same_min_max(observed, self.heatmaps)
assert np.array_equal(observed_arr, self.heatmaps.get_arr())
def test_scale_is_zero_segmaps(self):
# scale 0, segmaps
aug = iaa.PiecewiseAffine(scale=0, nb_rows=12, nb_cols=4)
observed = aug.augment_segmentation_maps([self.segmaps])[0]
observed_arr = observed.get_arr()
assert observed.shape == self.segmaps.shape
assert np.array_equal(observed_arr, self.segmaps.get_arr())
def test_scale_is_zero_keypoints(self):
# scale 0, keypoints
aug = iaa.PiecewiseAffine(scale=0, nb_rows=12, nb_cols=4)
kps = [ia.Keypoint(x=5, y=3), ia.Keypoint(x=3, y=8)]
kpsoi = ia.KeypointsOnImage(kps, shape=(14, 14, 3))
kpsoi_aug = aug.augment_keypoints([kpsoi])[0]
assert_cbaois_equal(kpsoi_aug, kpsoi)
@classmethod
def _test_scale_is_zero_cbaoi(cls, cbaoi, augf_name):
aug = iaa.PiecewiseAffine(scale=0, nb_rows=10, nb_cols=10)
observed = getattr(aug, augf_name)(cbaoi)
assert_cbaois_equal(observed, cbaoi)
def test_scale_is_zero_polygons(self):
exterior = [(10, 10),
(70, 10), (70, 20), (70, 30), (70, 40),
(70, 50), (70, 60), (70, 70), (70, 80),
(70, 90),
(10, 90),
(10, 80), (10, 70), (10, 60), (10, 50),
(10, 40), (10, 30), (10, 20), (10, 10)]
poly = ia.Polygon(exterior)
psoi = ia.PolygonsOnImage([poly, poly.shift(x=1, y=1)],
shape=(100, 80))
self._test_scale_is_zero_cbaoi(psoi, "augment_polygons")
def test_scale_is_zero_line_strings(self):
coords = [(10, 10),
(70, 10), (70, 20), (70, 30), (70, 40),
(70, 50), (70, 60), (70, 70), (70, 80),
(70, 90),
(10, 90),
(10, 80), (10, 70), (10, 60), (10, 50),
(10, 40), (10, 30), (10, 20), (10, 10)]
ls = ia.LineString(coords)
lsoi = ia.LineStringsOnImage([ls, ls.shift(x=1, y=1)],
shape=(100, 80))
self._test_scale_is_zero_cbaoi(lsoi, "augment_line_strings")
def test_scale_is_zero_bounding_boxes(self):
bb = ia.BoundingBox(x1=10, y1=10, x2=70, y2=20)
bbsoi = ia.BoundingBoxesOnImage([bb, bb.shift(x=1, y=1)],
shape=(100, 80))
self._test_scale_is_zero_cbaoi(bbsoi, "augment_bounding_boxes")
def test_scale_stronger_values_should_increase_changes_images(self):
# stronger scale should lead to stronger changes
aug1 = iaa.PiecewiseAffine(scale=0.01, nb_rows=12, nb_cols=4)
aug2 = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)
observed1 = aug1.augment_image(self.image)
observed2 = aug2.augment_image(self.image)
assert (
np.average(observed1[~self.mask])
< np.average(observed2[~self.mask])
)
def test_scale_stronger_values_should_increase_changes_images_abs(self):
aug1 = iaa.PiecewiseAffine(scale=1, nb_rows=12, nb_cols=4,
absolute_scale=True)
aug2 = iaa.PiecewiseAffine(scale=10, nb_rows=12, nb_cols=4,
absolute_scale=True)
observed1 = aug1.augment_image(self.image)
observed2 = aug2.augment_image(self.image)
assert (
np.average(observed1[~self.mask])
< np.average(observed2[~self.mask])
)
def test_scale_stronger_values_should_increase_changes_heatmaps(self):
# stronger scale should lead to stronger changes, heatmaps
aug1 = iaa.PiecewiseAffine(scale=0.01, nb_rows=12, nb_cols=4)
aug2 = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)
observed1 = aug1.augment_heatmaps([self.heatmaps])[0]
observed2 = aug2.augment_heatmaps([self.heatmaps])[0]
observed1_arr = observed1.get_arr()
observed2_arr = observed2.get_arr()
assert observed1.shape == self.heatmaps.shape
assert observed2.shape == self.heatmaps.shape
_assert_same_min_max(observed1, self.heatmaps)
_assert_same_min_max(observed2, self.heatmaps)
assert (
np.average(observed1_arr[~self.mask])
< np.average(observed2_arr[~self.mask])
)
def test_scale_stronger_values_should_increase_changes_heatmaps_abs(self):
aug1 = iaa.PiecewiseAffine(scale=1, nb_rows=12, nb_cols=4,
absolute_scale=True)
aug2 = iaa.PiecewiseAffine(scale=10, nb_rows=12, nb_cols=4,
absolute_scale=True)
observed1 = aug1.augment_heatmaps([self.heatmaps])[0]
observed2 = aug2.augment_heatmaps([self.heatmaps])[0]
observed1_arr = observed1.get_arr()
observed2_arr = observed2.get_arr()
assert observed1.shape == self.heatmaps.shape
assert observed2.shape == self.heatmaps.shape
_assert_same_min_max(observed1, self.heatmaps)
_assert_same_min_max(observed2, self.heatmaps)
assert (
np.average(observed1_arr[~self.mask])
< np.average(observed2_arr[~self.mask])
)
def test_scale_stronger_values_should_increase_changes_segmaps(self):
# stronger scale should lead to stronger changes, segmaps
aug1 = iaa.PiecewiseAffine(scale=0.01, nb_rows=12, nb_cols=4)
aug2 = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)
observed1 = aug1.augment_segmentation_maps([self.segmaps])[0]
observed2 = aug2.augment_segmentation_maps([self.segmaps])[0]
observed1_arr = observed1.get_arr()
observed2_arr = observed2.get_arr()
assert observed1.shape == self.segmaps.shape
assert observed2.shape == self.segmaps.shape
assert (
np.average(observed1_arr[~self.mask] == 0)
> np.average(observed2_arr[~self.mask] == 0)
)
def test_scale_alignment_between_images_and_heatmaps(self):
# strong scale, measure alignment between images and heatmaps
aug = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)
aug_det = aug.to_deterministic()
img_aug = aug_det.augment_image(self.image)
hm_aug = aug_det.augment_heatmaps([self.heatmaps])[0]
img_aug_mask = img_aug > 255*0.1
hm_aug_mask = hm_aug.arr_0to1 > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert hm_aug.shape == (60, 80, 3)
_assert_same_min_max(hm_aug, self.heatmaps)
assert (same / img_aug_mask.size) >= 0.98
def test_scale_alignment_between_images_and_segmaps(self):
# strong scale, measure alignment between images and segmaps
aug = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)
aug_det = aug.to_deterministic()
img_aug = aug_det.augment_image(self.image)
segmap_aug = aug_det.augment_segmentation_maps([self.segmaps])[0]
img_aug_mask = (img_aug > 255*0.1)
segmap_aug_mask = (segmap_aug.arr == 1)
same = np.sum(img_aug_mask == segmap_aug_mask[:, :, 0])
assert segmap_aug.shape == (60, 80, 3)
assert (same / img_aug_mask.size) >= 0.9
def test_scale_alignment_between_images_and_smaller_heatmaps(self):
# strong scale, measure alignment between images and heatmaps
# heatmaps here smaller than image
aug = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)
aug_det = aug.to_deterministic()
heatmaps_small = ia.HeatmapsOnImage(
(
ia.imresize_single_image(
self.image, (30, 40+10), interpolation="cubic"
) / 255.0
).astype(np.float32),
shape=(60, 80, 3)
)
img_aug = aug_det.augment_image(self.image)
hm_aug = aug_det.augment_heatmaps([heatmaps_small])[0]
img_aug_mask = img_aug > 255*0.1
hm_aug_mask = ia.imresize_single_image(
hm_aug.arr_0to1, (60, 80), interpolation="cubic"
) > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert hm_aug.shape == (60, 80, 3)
assert hm_aug.arr_0to1.shape == (30, 40+10, 1)
assert (same / img_aug_mask.size) >= 0.9 # seems to be 0.948 actually
def test_scale_alignment_between_images_and_smaller_heatmaps_abs(self):
# image is 60x80, so a scale of 8 is about 0.1*max(60,80)
aug = iaa.PiecewiseAffine(scale=8, nb_rows=12, nb_cols=4,
absolute_scale=True)
aug_det = aug.to_deterministic()
heatmaps_small = ia.HeatmapsOnImage(
(
ia.imresize_single_image(
self.image, (30, 40+10), interpolation="cubic"
) / 255.0
).astype(np.float32),
shape=(60, 80, 3)
)
img_aug = aug_det.augment_image(self.image)
hm_aug = aug_det.augment_heatmaps([heatmaps_small])[0]
img_aug_mask = img_aug > 255*0.1
hm_aug_mask = ia.imresize_single_image(
hm_aug.arr_0to1, (60, 80), interpolation="cubic"
) > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert hm_aug.shape == (60, 80, 3)
assert hm_aug.arr_0to1.shape == (30, 40+10, 1)
assert (same / img_aug_mask.size) >= 0.9 # seems to be 0.930 actually
def test_scale_alignment_between_images_and_smaller_segmaps(self):
# strong scale, measure alignment between images and segmaps
# segmaps here smaller than image
aug = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)
aug_det = aug.to_deterministic()
segmaps_small = SegmentationMapsOnImage(
(
ia.imresize_single_image(
self.image, (30, 40+10), interpolation="cubic"
) > 100
).astype(np.int32),
shape=(60, 80, 3)
)
img_aug = aug_det.augment_image(self.image)
segmaps_aug = aug_det.augment_segmentation_maps([segmaps_small])[0]
img_aug_mask = img_aug > 255*0.1
segmaps_aug_mask = (
ia.imresize_single_image(
segmaps_aug.arr, (60, 80),
interpolation="nearest"
) == 1
)
same = np.sum(img_aug_mask == segmaps_aug_mask[:, :, 0])
assert segmaps_aug.shape == (60, 80, 3)
assert segmaps_aug.arr.shape == (30, 40+10, 1)
assert (same / img_aug_mask.size) >= 0.9
def test_scale_alignment_between_images_and_keypoints(self):
# strong scale, measure alignment between images and keypoints
aug = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)
aug_det = aug.to_deterministic()
kps = [ia.Keypoint(x=5, y=15), ia.Keypoint(x=17, y=12)]
kpsoi = ia.KeypointsOnImage(kps, shape=(24, 30, 3))
img_kps = np.zeros((24, 30, 3), dtype=np.uint8)
img_kps = kpsoi.draw_on_image(img_kps, color=[255, 255, 255])
img_kps_aug = aug_det.augment_image(img_kps)
kpsoi_aug = aug_det.augment_keypoints([kpsoi])[0]
assert kpsoi_aug.shape == (24, 30, 3)
bb1 = ia.BoundingBox(
x1=kpsoi_aug.keypoints[0].x-1, y1=kpsoi_aug.keypoints[0].y-1,
x2=kpsoi_aug.keypoints[0].x+1, y2=kpsoi_aug.keypoints[0].y+1)
bb2 = ia.BoundingBox(
x1=kpsoi_aug.keypoints[1].x-1, y1=kpsoi_aug.keypoints[1].y-1,
x2=kpsoi_aug.keypoints[1].x+1, y2=kpsoi_aug.keypoints[1].y+1)
patch1 = bb1.extract_from_image(img_kps_aug)
patch2 = bb2.extract_from_image(img_kps_aug)
assert np.max(patch1) > 150
assert np.max(patch2) > 150
assert np.average(img_kps_aug) < 40
# this test was apparently added later on (?) without noticing that
# a similar test already existed
def test_scale_alignment_between_images_and_keypoints2(self):
img = np.zeros((100, 80), dtype=np.uint8)
img[:, 9:11+1] = 255
img[:, 69:71+1] = 255
kps = [ia.Keypoint(x=10, y=20), ia.Keypoint(x=10, y=40),
ia.Keypoint(x=70, y=20), ia.Keypoint(x=70, y=40)]
kpsoi = ia.KeypointsOnImage(kps, shape=img.shape)
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=10, nb_cols=10)
aug_det = aug.to_deterministic()
observed_img = aug_det.augment_image(img)
observed_kpsoi = aug_det.augment_keypoints([kpsoi])
assert not keypoints_equal([kpsoi], observed_kpsoi)
for kp in observed_kpsoi[0].keypoints:
assert observed_img[int(kp.y), int(kp.x)] > 0
@classmethod
def _test_scale_alignment_between_images_and_poly_or_line_strings(
cls, cba_class, cbaoi_class, augf_name):
img = np.zeros((100, 80), dtype=np.uint8)
img[:, 10-5:10+5] = 255
img[:, 70-5:70+5] = 255
coords = [(10, 10),
(70, 10), (70, 20), (70, 30), (70, 40),
(70, 50), (70, 60), (70, 70), (70, 80),
(70, 90),
(10, 90),
(10, 80), (10, 70), (10, 60), (10, 50),
(10, 40), (10, 30), (10, 20), (10, 10)]
cba = cba_class(coords)
cbaoi = cbaoi_class([cba, cba.shift(x=1, y=1)],
shape=img.shape)
aug = iaa.PiecewiseAffine(scale=0.03, nb_rows=10, nb_cols=10)
aug_det = aug.to_deterministic()
observed_imgs = aug_det.augment_images([img, img])
observed_cbaois = getattr(aug_det, augf_name)([cbaoi, cbaoi])
for observed_img, observed_cbaoi in zip(observed_imgs, observed_cbaois):
assert observed_cbaoi.shape == img.shape
for cba_aug in observed_cbaoi.items:
if hasattr(cba_aug, "is_valid"):
assert cba_aug.is_valid
for point_aug in cba_aug.coords:
x = int(np.round(point_aug[0]))
y = int(np.round(point_aug[1]))
assert observed_img[y, x] > 0
def test_scale_alignment_between_images_and_polygons(self):
self._test_scale_alignment_between_images_and_poly_or_line_strings(
ia.Polygon, ia.PolygonsOnImage, "augment_polygons")
def test_scale_alignment_between_images_and_line_strings(self):
self._test_scale_alignment_between_images_and_poly_or_line_strings(
ia.LineString, ia.LineStringsOnImage, "augment_line_strings")
def test_scale_alignment_between_images_and_bounding_boxes(self):
img = np.zeros((100, 80), dtype=np.uint8)
s = 0
img[10-s:10+s+1, 20-s:20+s+1] = 255
img[60-s:60+s+1, 70-s:70+s+1] = 255
bb = ia.BoundingBox(y1=10, x1=20, y2=60, x2=70)
bbsoi = ia.BoundingBoxesOnImage([bb], shape=img.shape)
aug = iaa.PiecewiseAffine(scale=0.03, nb_rows=10, nb_cols=10)
observed_imgs, observed_bbsois = aug(
images=[img], bounding_boxes=[bbsoi])
for observed_img, observed_bbsoi in zip(observed_imgs, observed_bbsois):
assert observed_bbsoi.shape == img.shape
observed_img_x = np.max(observed_img, axis=0)
observed_img_y = np.max(observed_img, axis=1)
nonz_x = np.nonzero(observed_img_x)[0]
nonz_y = np.nonzero(observed_img_y)[0]
img_x1 = min(nonz_x)
img_x2 = max(nonz_x)
img_y1 = min(nonz_y)
img_y2 = max(nonz_y)
expected = ia.BoundingBox(x1=img_x1, y1=img_y1,
x2=img_x2, y2=img_y2)
for bb_aug in observed_bbsoi.bounding_boxes:
# we don't expect perfect IoU here, because the actual
# underlying KP aug used distance maps
# most IoUs seem to end up in the range 0.9-0.95
assert bb_aug.iou(expected) > 0.8
def test_scale_is_list(self):
aug1 = iaa.PiecewiseAffine(scale=0.01, nb_rows=12, nb_cols=4)
aug2 = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)
aug = iaa.PiecewiseAffine(scale=[0.01, 0.10], nb_rows=12, nb_cols=4)
avg1 = np.average([
np.average(
aug1.augment_image(self.image)
* (~self.mask).astype(np.float32)
)
for _ in sm.xrange(3)
])
avg2 = np.average([
np.average(
aug2.augment_image(self.image)
* (~self.mask).astype(np.float32)
)
for _ in sm.xrange(3)
])
seen = [0, 0]
for _ in sm.xrange(15):
observed = aug.augment_image(self.image)
avg = np.average(observed * (~self.mask).astype(np.float32))
diff1 = abs(avg - avg1)
diff2 = abs(avg - avg2)
if diff1 < diff2:
seen[0] += 1
else:
seen[1] += 1
assert seen[0] > 0
assert seen[1] > 0
# -----
# rows and cols
# -----
@classmethod
def _compute_observed_std_ygrad_in_mask(cls, observed, mask):
grad_vert = (
observed[1:, :].astype(np.float32)
- observed[:-1, :].astype(np.float32)
)
grad_vert = grad_vert * (~mask[1:, :]).astype(np.float32)
return np.std(grad_vert)
def _compute_std_ygrad_in_mask(self, aug, image, mask, nb_iterations):
stds = []
for _ in sm.xrange(nb_iterations):
observed = aug.augment_image(image)
stds.append(
self._compute_observed_std_ygrad_in_mask(observed, mask)
)
return np.average(stds)
def test_nb_rows_affects_images(self):
# verify effects of rows
aug1 = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=4)
aug2 = iaa.PiecewiseAffine(scale=0.05, nb_rows=30, nb_cols=4)
std1 = self._compute_std_ygrad_in_mask(aug1, self.image, self.mask, 3)
std2 = self._compute_std_ygrad_in_mask(aug2, self.image, self.mask, 3)
assert std1 < std2
def test_nb_rows_is_list_affects_images(self):
# rows as list
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=[4, 20], nb_cols=4)
aug1 = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=4)
aug2 = iaa.PiecewiseAffine(scale=0.05, nb_rows=30, nb_cols=4)
std1 = self._compute_std_ygrad_in_mask(aug1, self.image, self.mask, 3)
std2 = self._compute_std_ygrad_in_mask(aug2, self.image, self.mask, 3)
seen = [0, 0]
for _ in sm.xrange(20):
observed = aug.augment_image(self.image)
std = self._compute_observed_std_ygrad_in_mask(observed, self.mask)
diff1 = abs(std - std1)
diff2 = abs(std - std2)
if diff1 < diff2:
seen[0] += 1
else:
seen[1] += 1
assert seen[0] > 0
assert seen[1] > 0
def test_nb_cols_affects_images(self):
# verify effects of cols
image = self.image.T
mask = self.mask.T
aug1 = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=4)
aug2 = iaa.PiecewiseAffine(scale=0.05, nb_rows=20, nb_cols=4)
std1 = self._compute_std_ygrad_in_mask(aug1, image, mask, 3)
std2 = self._compute_std_ygrad_in_mask(aug2, image, mask, 3)
assert std1 < std2
def test_nb_cols_is_list_affects_images(self):
# cols as list
image = self.image.T
mask = self.mask.T
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=[4, 20])
aug1 = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=4)
aug2 = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=30)
std1 = self._compute_std_ygrad_in_mask(aug1, image, mask, 3)
std2 = self._compute_std_ygrad_in_mask(aug2, image, mask, 3)
seen = [0, 0]
for _ in sm.xrange(20):
observed = aug.augment_image(image)
std = self._compute_observed_std_ygrad_in_mask(observed, mask)
diff1 = abs(std - std1)
diff2 = abs(std - std2)
if diff1 < diff2:
seen[0] += 1
else:
seen[1] += 1
assert seen[0] > 0
assert seen[1] > 0
# -----
# order
# -----
# TODO
# -----
# cval
# -----
def test_cval_is_zero(self):
# since scikit-image 0.16.2 and scipy 1.4.0(!), this test requires
# several iterations to find one image that required filling with cval
found = False
for _ in np.arange(50):
img = np.zeros((16, 16, 3), dtype=np.uint8) + 255
aug = iaa.PiecewiseAffine(scale=0.7, nb_rows=10, nb_cols=10,
mode="constant", cval=0)
observed = aug.augment_image(img)
if np.sum([observed[:, :] == [0, 0, 0]]) > 0:
found = True
break
assert found
def test_cval_should_be_ignored_by_heatmaps(self):
# cval as deterministic, heatmaps should always use cval=0
heatmaps = HeatmapsOnImage(
np.zeros((50, 50, 1), dtype=np.float32), shape=(50, 50, 3))
aug = iaa.PiecewiseAffine(scale=0.7, nb_rows=10, nb_cols=10,
mode="constant", cval=255)
observed = aug.augment_heatmaps([heatmaps])[0]
assert np.sum([observed.get_arr()[:, :] >= 0.01]) == 0
def test_cval_should_be_ignored_by_segmaps(self):
# cval as deterministic, segmaps should always use cval=0
segmaps = SegmentationMapsOnImage(
np.zeros((50, 50, 1), dtype=np.int32), shape=(50, 50, 3))
aug = iaa.PiecewiseAffine(scale=0.7, nb_rows=10, nb_cols=10,
mode="constant", cval=255)
observed = aug.augment_segmentation_maps([segmaps])[0]
assert np.sum([observed.get_arr()[:, :] > 0]) == 0
def test_cval_is_list(self):
# cval as list
img = np.zeros((20, 20), dtype=np.uint8) + 255
aug = iaa.PiecewiseAffine(scale=0.7, nb_rows=5, nb_cols=5,
mode="constant", cval=[0, 10])
seen = [0, 0, 0]
for _ in sm.xrange(30):
observed = aug.augment_image(img)
nb_0 = np.sum([observed[:, :] == 0])
nb_10 = np.sum([observed[:, :] == 10])
if nb_0 > 0:
seen[0] += 1
elif nb_10 > 0:
seen[1] += 1
else:
seen[2] += 1
assert seen[0] > 5
assert seen[1] > 5
assert seen[2] <= 4
# -----
# mode
# -----
# TODO
# ---------
# remaining keypoints tests
# ---------
def test_keypoints_outside_of_image(self):
# keypoints outside of image
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=10, nb_cols=10)
kps = [ia.Keypoint(x=-10, y=-20)]
kpsoi = ia.KeypointsOnImage(kps, shape=(10, 10, 3))
observed = aug.augment_keypoints(kpsoi)
assert_cbaois_equal(observed, kpsoi)
def test_keypoints_empty(self):
# empty keypoints
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=10, nb_cols=10)
kpsoi = ia.KeypointsOnImage([], shape=(10, 10, 3))
observed = aug.augment_keypoints(kpsoi)
assert_cbaois_equal(observed, kpsoi)
# ---------
# remaining polygons tests
# ---------
def test_polygons_outside_of_image(self):
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=10, nb_cols=10)
exterior = [(-10, -10), (110, -10), (110, 90), (-10, 90)]
poly = ia.Polygon(exterior)
psoi = ia.PolygonsOnImage([poly], shape=(10, 10, 3))
observed = aug.augment_polygons(psoi)
assert_cbaois_equal(observed, psoi)
def test_empty_polygons(self):
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=10, nb_cols=10)
psoi = ia.PolygonsOnImage([], shape=(10, 10, 3))
observed = aug.augment_polygons(psoi)
assert_cbaois_equal(observed, psoi)
# ---------
# remaining line string tests
# ---------
def test_line_strings_outside_of_image(self):
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=10, nb_cols=10)
coords = [(-10, -10), (110, -10), (110, 90), (-10, 90)]
ls = ia.LineString(coords)
lsoi = ia.LineStringsOnImage([ls], shape=(10, 10, 3))
observed = aug.augment_line_strings(lsoi)
assert_cbaois_equal(observed, lsoi)
def test_empty_line_strings(self):
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=10, nb_cols=10)
lsoi = ia.LineStringsOnImage([], shape=(10, 10, 3))
observed = aug.augment_line_strings(lsoi)
assert_cbaois_equal(observed, lsoi)
# ---------
# remaining bounding box tests
# ---------
def test_bounding_boxes_outside_of_image(self):
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=10, nb_cols=10)
bbs = ia.BoundingBox(x1=-10, y1=-10, x2=15, y2=15)
bbsoi = ia.BoundingBoxesOnImage([bbs], shape=(10, 10, 3))
observed = aug.augment_bounding_boxes(bbsoi)
assert_cbaois_equal(observed, bbsoi)
def test_empty_bounding_boxes(self):
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=10, nb_cols=10)
bbsoi = ia.BoundingBoxesOnImage([], shape=(10, 10, 3))
observed = aug.augment_bounding_boxes(bbsoi)
assert_cbaois_equal(observed, bbsoi)
# ---------
# zero-sized axes
# ---------
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=2, nb_cols=2)
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
def test_zero_sized_axes_absolute_scale(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.PiecewiseAffine(scale=5, nb_rows=2, nb_cols=2,
absolute_scale=True)
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
# ---------
# other methods
# ---------
def test_get_parameters(self):
aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=10, order=1,
cval=2, mode="constant",
absolute_scale=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert isinstance(params[2], iap.Deterministic)
assert isinstance(params[3], iap.Deterministic)
assert isinstance(params[4], iap.Deterministic)
assert isinstance(params[5], iap.Deterministic)
assert params[6] is False
assert 0.1 - 1e-8 < params[0].value < 0.1 + 1e-8
assert params[1].value == 8
assert params[2].value == 10
assert params[3].value == 1
assert params[4].value == 2
assert params[5].value == "constant"
# ---------
# other dtypes
# ---------
@property
def other_dtypes_mask(self):
mask = np.zeros((21, 21), dtype=bool)
mask[:, 7:13] = True
return mask
def test_other_dtypes_bool(self):
aug = iaa.PiecewiseAffine(scale=0.2, nb_rows=8, nb_cols=4, order=0,
mode="constant")
image = np.zeros((21, 21), dtype=bool)
image[self.other_dtypes_mask] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert not np.all(image_aug == 1)
assert np.any(image_aug[~self.other_dtypes_mask] == 1)
def test_other_dtypes_uint_int(self):
aug = iaa.PiecewiseAffine(scale=0.2, nb_rows=8, nb_cols=4, order=0,
mode="constant")
dtypes = ["uint8", "uint16", "uint32", "int8", "int16", "int32"]
for dtype in dtypes:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
if np.dtype(dtype).kind == "i":
values = [1, 5, 10, 100, int(0.1 * max_value),
int(0.2 * max_value), int(0.5 * max_value),
max_value-100, max_value]
values = values + [(-1)*value for value in values]
else:
values = [1, 5, 10, 100, int(center_value),
int(0.1 * max_value), int(0.2 * max_value),
int(0.5 * max_value), max_value-100, max_value]
for value in values:
with self.subTest(dtype=dtype, value=value):
image = np.zeros((21, 21), dtype=dtype)
image[:, 7:13] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert not np.all(image_aug == value)
assert np.any(image_aug[~self.other_dtypes_mask] == value)
def test_other_dtypes_float(self):
aug = iaa.PiecewiseAffine(scale=0.2, nb_rows=8, nb_cols=4, order=0,
mode="constant")
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
def _isclose(a, b):
atol = 1e-4 if dtype == "float16" else 1e-8
return np.isclose(a, b, atol=atol, rtol=0)
isize = np.dtype(dtype).itemsize
values = [0.01, 1.0, 10.0, 100.0, 500 ** (isize - 1),
1000 ** (isize - 1)]
values = values + [(-1) * value for value in values]
values = values + [min_value, max_value]
for value in values:
with self.subTest(dtype=dtype, value=value):
image = np.zeros((21, 21), dtype=dtype)
image[:, 7:13] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
# TODO switch all other tests from float(...) to
# np.float128(...) pattern, seems to be more accurate
# for 128bit floats
assert not np.all(_isclose(image_aug, np.float128(value)))
assert np.any(_isclose(image_aug[~self.other_dtypes_mask],
np.float128(value)))
def test_pickleable(self):
aug = iaa.PiecewiseAffine(scale=0.2, nb_rows=4, nb_cols=4, seed=1)
runtest_pickleable_uint8_img(aug, iterations=3, shape=(25, 25, 1))
class TestPerspectiveTransform(unittest.TestCase):
def setUp(self):
reseed()
@property
def image(self):
img = np.zeros((30, 30), dtype=np.uint8)
img[10:20, 10:20] = 255
return img
@property
def heatmaps(self):
return HeatmapsOnImage((self.image / 255.0).astype(np.float32),
shape=self.image.shape)
@property
def segmaps(self):
return SegmentationMapsOnImage((self.image > 0).astype(np.int32),
shape=self.image.shape)
# --------
# __init__
# --------
def test___init___scale_is_tuple(self):
# tuple for scale
aug = iaa.PerspectiveTransform(scale=(0.1, 0.2))
assert isinstance(aug.jitter.scale, iap.Uniform)
assert isinstance(aug.jitter.scale.a, iap.Deterministic)
assert isinstance(aug.jitter.scale.b, iap.Deterministic)
assert 0.1 - 1e-8 < aug.jitter.scale.a.value < 0.1 + 1e-8
assert 0.2 - 1e-8 < aug.jitter.scale.b.value < 0.2 + 1e-8
def test___init___scale_is_list(self):
# list for scale
aug = iaa.PerspectiveTransform(scale=[0.1, 0.2, 0.3])
assert isinstance(aug.jitter.scale, iap.Choice)
assert len(aug.jitter.scale.a) == 3
assert 0.1 - 1e-8 < aug.jitter.scale.a[0] < 0.1 + 1e-8
assert 0.2 - 1e-8 < aug.jitter.scale.a[1] < 0.2 + 1e-8
assert 0.3 - 1e-8 < aug.jitter.scale.a[2] < 0.3 + 1e-8
def test___init___scale_is_stochastic_parameter(self):
# StochasticParameter for scale
aug = iaa.PerspectiveTransform(scale=iap.Choice([0.1, 0.2, 0.3]))
assert isinstance(aug.jitter.scale, iap.Choice)
assert len(aug.jitter.scale.a) == 3
assert 0.1 - 1e-8 < aug.jitter.scale.a[0] < 0.1 + 1e-8
assert 0.2 - 1e-8 < aug.jitter.scale.a[1] < 0.2 + 1e-8
assert 0.3 - 1e-8 < aug.jitter.scale.a[2] < 0.3 + 1e-8
def test___init___bad_datatype_for_scale_leads_to_failure(self):
# bad datatype for scale
got_exception = False
try:
_ = iaa.PerspectiveTransform(scale=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test___init___mode_is_all(self):
aug = iaa.PerspectiveTransform(cval=0, mode=ia.ALL)
assert isinstance(aug.mode, iap.Choice)
def test___init___mode_is_string(self):
aug = iaa.PerspectiveTransform(cval=0, mode="replicate")
assert isinstance(aug.mode, iap.Deterministic)
assert aug.mode.value == "replicate"
def test___init___mode_is_list(self):
aug = iaa.PerspectiveTransform(cval=0, mode=["replicate", "constant"])
assert isinstance(aug.mode, iap.Choice)
assert (
len(aug.mode.a) == 2
and "replicate" in aug.mode.a
and "constant" in aug.mode.a)
def test___init___mode_is_stochastic_parameter(self):
aug = iaa.PerspectiveTransform(
cval=0, mode=iap.Choice(["replicate", "constant"]))
assert isinstance(aug.mode, iap.Choice)
assert (
len(aug.mode.a) == 2
and "replicate" in aug.mode.a
and "constant" in aug.mode.a)
# --------
# image, heatmaps, segmaps
# --------
def test_image_without_keep_size(self):
# without keep_size
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)
aug.jitter = iap.Deterministic(0.2)
observed = aug.augment_image(self.image)
y1 = int(30*0.2)
y2 = int(30*0.8)
x1 = int(30*0.2)
x2 = int(30*0.8)
expected = self.image[y1:y2, x1:x2]
assert all([
abs(s1-s2) <= 1 for s1, s2 in zip(observed.shape, expected.shape)
])
if observed.shape != expected.shape:
observed = ia.imresize_single_image(
observed, expected.shape[0:2], interpolation="cubic")
# differences seem to mainly appear around the border of the inner
# rectangle, possibly due to interpolation
assert np.average(
np.abs(observed.astype(np.int32) - expected.astype(np.int32))
) < 30.0
def test_image_heatmaps_alignment_without_keep_size(self):
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)
aug.jitter = iap.Deterministic(0.2)
hm = HeatmapsOnImage(
self.image.astype(np.float32)/255.0,
shape=(30, 30)
)
observed = aug.augment_image(self.image)
hm_aug = aug.augment_heatmaps([hm])[0]
y1 = int(30*0.2)
y2 = int(30*0.8)
x1 = int(30*0.2)
x2 = int(30*0.8)
expected = (y2 - y1, x2 - x1)
assert all([
abs(s1-s2) <= 1
for s1, s2
in zip(hm_aug.shape, expected)
])
assert all([
abs(s1-s2) <= 1
for s1, s2
in zip(hm_aug.arr_0to1.shape, expected + (1,))
])
img_aug_mask = observed > 255*0.1
hm_aug_mask = hm_aug.arr_0to1 > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert (same / img_aug_mask.size) >= 0.99
def test_image_segmaps_alignment_without_keep_size(self):
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)
aug.jitter = iap.Deterministic(0.2)
segmaps = SegmentationMapsOnImage(
(self.image > 100).astype(np.int32),
shape=(30, 30)
)
observed = aug.augment_image(self.image)
segmaps_aug = aug.augment_segmentation_maps([segmaps])[0]
y1 = int(30*0.2)
y2 = int(30*0.8)
x1 = int(30*0.2)
x2 = int(30*0.8)
expected = (y2 - y1, x2 - x1)
assert all([
abs(s1-s2) <= 1
for s1, s2
in zip(segmaps_aug.shape, expected)
])
assert all([
abs(s1-s2) <= 1
for s1, s2
in zip(segmaps_aug.arr.shape, expected + (1,))
])
img_aug_mask = observed > 255*0.5
segmaps_aug_mask = segmaps_aug.arr > 0
same = np.sum(img_aug_mask == segmaps_aug_mask[:, :, 0])
assert (same / img_aug_mask.size) >= 0.99
def test_consecutive_calls_produce_different_results(self):
# PerspectiveTransform works with random_state.copy(), so we
# test explicitly that it doesn't always use the same samples
aug = iaa.PerspectiveTransform((0.0, 0.2))
image = np.mod(np.arange(16*16), 255).astype(np.uint8).reshape((16, 16))
nb_same = 0
last_image = aug(image=image)
for _ in np.arange(100):
image_aug = aug(image=image)
nb_same += int(np.array_equal(image_aug, last_image))
assert nb_same <= 1
def test_heatmaps_smaller_than_image_without_keep_size(self):
# without keep_size, different heatmap size
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)
aug.jitter = iap.Deterministic(0.2)
height, width = 300, 200
height_small, width_small = 150, 100
y1 = int(height*0.2)
y2 = int(height*0.8)
x1 = int(width*0.2)
x2 = int(width*0.8)
y1_small = int(height_small*0.2)
y2_small = int(height_small*0.8)
x1_small = int(width_small*0.2)
x2_small = int(width_small*0.8)
img_small = ia.imresize_single_image(
self.image,
(height_small, width_small),
interpolation="cubic")
hm = ia.HeatmapsOnImage(
img_small.astype(np.float32)/255.0,
shape=(height, width))
img_aug = aug.augment_image(self.image)
hm_aug = aug.augment_heatmaps([hm])[0]
expected = (y2 - y1, x2 - x1)
expected_small = (y2_small - y1_small, x2_small - x1_small, 1)
assert all([
abs(s1-s2) <= 1
for s1, s2
in zip(hm_aug.shape, expected)
])
assert all([
abs(s1-s2) <= 1
for s1, s2
in zip(hm_aug.arr_0to1.shape, expected_small)
])
img_aug_mask = img_aug > 255*0.1
hm_aug_mask = ia.imresize_single_image(
hm_aug.arr_0to1, img_aug.shape[0:2], interpolation="linear"
) > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert (same / img_aug_mask.size) >= 0.96
def test_segmaps_smaller_than_image_without_keep_size(self):
# without keep_size, different segmap size
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)
aug.jitter = iap.Deterministic(0.2)
y1 = int(30*0.2)
y2 = int(30*0.8)
x1 = int(30*0.2)
x2 = int(30*0.8)
x1_small = int(25*0.2)
x2_small = int(25*0.8)
y1_small = int(20*0.2)
y2_small = int(20*0.8)
img_small = ia.imresize_single_image(
self.image,
(20, 25),
interpolation="cubic")
seg = SegmentationMapsOnImage(
(img_small > 100).astype(np.int32),
shape=(30, 30))
img_aug = aug.augment_image(self.image)
seg_aug = aug.augment_segmentation_maps([seg])[0]
expected = (y2 - y1, x2 - x1)
expected_small = (y2_small - y1_small, x2_small - x1_small, 1)
assert all([
abs(s1-s2) <= 1
for s1, s2
in zip(seg_aug.shape, expected)
])
assert all([
abs(s1-s2) <= 1
for s1, s2
in zip(seg_aug.arr.shape, expected_small)
])
img_aug_mask = img_aug > 255*0.5
seg_aug_mask = ia.imresize_single_image(
seg_aug.arr, img_aug.shape[0:2], interpolation="nearest") > 0
same = np.sum(img_aug_mask == seg_aug_mask[:, :, 0])
assert (same / img_aug_mask.size) >= 0.92
def test_image_with_keep_size(self):
# with keep_size
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
aug.jitter = iap.Deterministic(0.2)
observed = aug.augment_image(self.image)
expected = self.image[int(30*0.2):int(30*0.8),
int(30*0.2):int(30*0.8)]
expected = ia.imresize_single_image(
expected,
self.image.shape[0:2],
interpolation="cubic")
assert observed.shape == self.image.shape
# differences seem to mainly appear around the border of the inner
# rectangle, possibly due to interpolation
assert np.average(
np.abs(observed.astype(np.int32) - expected.astype(np.int32))
) < 30.0
def test_heatmaps_with_keep_size(self):
# with keep_size, heatmaps
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
aug.jitter = iap.Deterministic(0.2)
observed = aug.augment_heatmaps([self.heatmaps])[0]
heatmaps_arr = self.heatmaps.get_arr()
expected = heatmaps_arr[int(30*0.2):int(30*0.8),
int(30*0.2):int(30*0.8)]
expected = ia.imresize_single_image(
(expected*255).astype(np.uint8),
self.image.shape[0:2],
interpolation="cubic")
expected = (expected / 255.0).astype(np.float32)
assert observed.shape == self.heatmaps.shape
_assert_same_min_max(observed, self.heatmaps)
# differences seem to mainly appear around the border of the inner
# rectangle, possibly due to interpolation
assert np.average(np.abs(observed.get_arr() - expected)) < 30.0
def test_segmaps_with_keep_size(self):
# with keep_size, segmaps
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
aug.jitter = iap.Deterministic(0.2)
observed = aug.augment_segmentation_maps([self.segmaps])[0]
segmaps_arr = self.segmaps.get_arr()
expected = segmaps_arr[int(30*0.2):int(30*0.8),
int(30*0.2):int(30*0.8)]
expected = ia.imresize_single_image(
(expected*255).astype(np.uint8),
self.image.shape[0:2],
interpolation="cubic")
expected = (expected > 255*0.5).astype(np.int32)
assert observed.shape == self.segmaps.shape
assert np.average(observed.get_arr() != expected) < 0.05
def test_image_rgb_with_keep_size(self):
# with keep_size, RGB images
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
aug.jitter = iap.Deterministic(0.2)
imgs = np.tile(self.image[np.newaxis, :, :, np.newaxis], (2, 1, 1, 3))
observed = aug.augment_images(imgs)
for img_idx in sm.xrange(2):
for c in sm.xrange(3):
observed_i = observed[img_idx, :, :, c]
expected = imgs[img_idx,
int(30*0.2):int(30*0.8),
int(30*0.2):int(30*0.8),
c]
expected = ia.imresize_single_image(
expected, imgs.shape[1:3], interpolation="cubic")
assert observed_i.shape == imgs.shape[1:3]
# differences seem to mainly appear around the border of the
# inner rectangle, possibly due to interpolation
assert np.average(
np.abs(
observed_i.astype(np.int32) - expected.astype(np.int32)
)
) < 30.0
# --------
# keypoints
# --------
def test_keypoints_without_keep_size(self):
# keypoint augmentation without keep_size
# TODO deviations of around 0.4-0.7 in this and the next test (between
# expected and observed coordinates) -- why?
kps = [ia.Keypoint(x=10, y=10), ia.Keypoint(x=14, y=11)]
kpsoi = ia.KeypointsOnImage(kps, shape=self.image.shape)
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)
aug.jitter = iap.Deterministic(0.2)
observed = aug.augment_keypoints([kpsoi])
kps_expected = [
ia.Keypoint(x=10-0.2*30, y=10-0.2*30),
ia.Keypoint(x=14-0.2*30, y=11-0.2*30)
]
gen = zip(observed[0].keypoints, kps_expected)
# TODO deviations of around 0.5 here from expected values, why?
for kp_observed, kp_expected in gen:
assert kp_observed.coords_almost_equals(
kp_expected, max_distance=1.5)
def test_keypoints_with_keep_size(self):
# keypoint augmentation with keep_size
kps = [ia.Keypoint(x=10, y=10), ia.Keypoint(x=14, y=11)]
kpsoi = ia.KeypointsOnImage(kps, shape=self.image.shape)
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
aug.jitter = iap.Deterministic(0.2)
observed = aug.augment_keypoints([kpsoi])
kps_expected = [
ia.Keypoint(x=((10-0.2*30)/(30*0.6))*30,
y=((10-0.2*30)/(30*0.6))*30),
ia.Keypoint(x=((14-0.2*30)/(30*0.6))*30,
y=((11-0.2*30)/(30*0.6))*30)
]
gen = zip(observed[0].keypoints, kps_expected)
# TODO deviations of around 0.5 here from expected values, why?
for kp_observed, kp_expected in gen:
assert kp_observed.coords_almost_equals(
kp_expected, max_distance=1.5)
def test_image_keypoint_alignment(self):
img = np.zeros((100, 100), dtype=np.uint8)
img[25-3:25+3, 25-3:25+3] = 255
img[50-3:50+3, 25-3:25+3] = 255
img[75-3:75+3, 25-3:25+3] = 255
img[25-3:25+3, 75-3:75+3] = 255
img[50-3:50+3, 75-3:75+3] = 255
img[75-3:75+3, 75-3:75+3] = 255
img[50-3:75+3, 50-3:75+3] = 255
kps = [
ia.Keypoint(y=25, x=25), ia.Keypoint(y=50, x=25),
ia.Keypoint(y=75, x=25), ia.Keypoint(y=25, x=75),
ia.Keypoint(y=50, x=75), ia.Keypoint(y=75, x=75),
ia.Keypoint(y=50, x=50)
]
kpsoi = ia.KeypointsOnImage(kps, shape=img.shape)
aug = iaa.PerspectiveTransform(scale=(0.05, 0.15), keep_size=True)
for _ in sm.xrange(10):
aug_det = aug.to_deterministic()
imgs_aug = aug_det.augment_images([img, img])
kpsois_aug = aug_det.augment_keypoints([kpsoi, kpsoi])
for img_aug, kpsoi_aug in zip(imgs_aug, kpsois_aug):
assert kpsoi_aug.shape == img.shape
for kp_aug in kpsoi_aug.keypoints:
x, y = int(np.round(kp_aug.x)), int(np.round(kp_aug.y))
if 0 <= x < img.shape[1] and 0 <= y < img.shape[0]:
assert img_aug[y, x] > 10
def test_empty_keypoints(self):
# test empty keypoints
kpsoi = ia.KeypointsOnImage([], shape=(20, 10, 3))
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
observed = aug.augment_keypoints(kpsoi)
assert_cbaois_equal(observed, kpsoi)
# --------
# abstract test methods for polygons and line strings
# --------
@classmethod
def _test_cbaois_without_keep_size(cls, cba_class, cbaoi_class, augf_name):
points = np.float32([
[10, 10],
[25, 10],
[25, 25],
[10, 25]
])
cbaoi = cbaoi_class([cba_class(points)], shape=(30, 30, 3))
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)
aug.jitter = iap.Deterministic(0.2)
observed = getattr(aug, augf_name)(cbaoi)
assert observed.shape == (30 - 12, 30 - 12, 3)
assert len(observed.items) == 1
if hasattr(observed.items[0], "is_valid"):
assert observed.items[0].is_valid
points_expected = np.copy(points)
points_expected[:, 0] -= 0.2 * 30
points_expected[:, 1] -= 0.2 * 30
# TODO deviations of around 0.5 here from expected values, why?
assert observed.items[0].coords_almost_equals(
points_expected, max_distance=1.5)
@classmethod
def _test_cbaois_with_keep_size(cls, cba_class, cbaoi_class, augf_name):
# polygon augmentation with keep_size
points = np.float32([
[10, 10],
[25, 10],
[25, 25],
[10, 25]
])
cbaoi = cbaoi_class([cba_class(points)], shape=(30, 30, 3))
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
aug.jitter = iap.Deterministic(0.2)
observed = getattr(aug, augf_name)(cbaoi)
assert observed.shape == (30, 30, 3)
assert len(observed.items) == 1
if hasattr(observed.items[0], "is_valid"):
assert observed.items[0].is_valid
points_expected = np.copy(points)
points_expected[:, 0] = (
(points_expected[:, 0] - 0.2 * 30) / (30 * 0.6)
) * 30
points_expected[:, 1] = (
(points_expected[:, 1] - 0.2 * 30) / (30 * 0.6)
) * 30
# TODO deviations of around 0.5 here from expected values, why?
assert observed.items[0].coords_almost_equals(
points_expected, max_distance=2.5)
@classmethod
def _test_image_cba_alignment(cls, cba_class, cbaoi_class, augf_name):
img = np.zeros((100, 100), dtype=np.uint8)
img[25-3:25+3, 25-3:25+3] = 255
img[50-3:50+3, 25-3:25+3] = 255
img[75-3:75+3, 25-3:25+3] = 255
img[25-3:25+3, 75-3:75+3] = 255
img[50-3:50+3, 75-3:75+3] = 255
img[75-3:75+3, 75-3:75+3] = 255
points = [
[25, 25],
[75, 25],
[75, 50],
[75, 75],
[25, 75],
[25, 50]
]
cbaoi = cbaoi_class([cba_class(points)], shape=img.shape)
aug = iaa.PerspectiveTransform(scale=0.1, keep_size=True)
for _ in sm.xrange(10):
aug_det = aug.to_deterministic()
imgs_aug = aug_det.augment_images([img] * 4)
cbaois_aug = getattr(aug_det, augf_name)([cbaoi] * 4)
for img_aug, cbaoi_aug in zip(imgs_aug, cbaois_aug):
assert cbaoi_aug.shape == img.shape
for cba_aug in cbaoi_aug.items:
if hasattr(cba_aug, "is_valid"):
assert cba_aug.is_valid
for x, y in cba_aug.coords:
if 0 <= x < img.shape[1] and 0 <= y < img.shape[0]:
bb = ia.BoundingBox(x1=x-2, x2=x+2, y1=y-2, y2=y+2)
img_ex = bb.extract_from_image(img_aug)
assert np.any(img_ex > 10)
@classmethod
def _test_empty_cba(cls, cbaoi, augf_name):
# test empty polygons
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
observed = getattr(aug, augf_name)(cbaoi)
assert_cbaois_equal(observed, cbaoi)
# --------
# polygons
# --------
def test_polygons_without_keep_size(self):
self._test_cbaois_without_keep_size(ia.Polygon, ia.PolygonsOnImage,
"augment_polygons")
def test_polygons_with_keep_size(self):
self._test_cbaois_with_keep_size(ia.Polygon, ia.PolygonsOnImage,
"augment_polygons")
def test_image_polygon_alignment(self):
self._test_image_cba_alignment(ia.Polygon, ia.PolygonsOnImage,
"augment_polygons")
def test_empty_polygons(self):
psoi = ia.PolygonsOnImage([], shape=(20, 10, 3))
self._test_empty_cba(psoi, "augment_polygons")
def test_polygons_under_extreme_scale_values(self):
# test extreme scales
# TODO when setting .min_height and .min_width in PerspectiveTransform
# to 1x1, at least one of the output polygons was invalid and had
# only 3 instead of the expected 4 points - why?
for scale in [0.1, 0.2, 0.3, 0.4]:
with self.subTest(scale=scale):
exterior = np.float32([
[10, 10],
[25, 10],
[25, 25],
[10, 25]
])
psoi = ia.PolygonsOnImage([ia.Polygon(exterior)],
shape=(30, 30, 3))
aug = iaa.PerspectiveTransform(scale=scale, keep_size=True)
aug.jitter = iap.Deterministic(scale)
observed = aug.augment_polygons(psoi)
assert observed.shape == (30, 30, 3)
assert len(observed.polygons) == 1
assert observed.polygons[0].is_valid
# FIXME this part is currently deactivated due to too large
# deviations from expectations. As the alignment check
# works, this is probably some error on the test side
"""
exterior_expected = np.copy(exterior)
exterior_expected[:, 0] = (
(exterior_expected[:, 0] - scale * 30) / (30*(1-2*scale))
) * 30
exterior_expected[:, 1] = (
(exterior_expected[:, 1] - scale * 30) / (30*(1-2*scale))
) * 30
poly0 = observed.polygons[0]
# TODO deviations of around 0.5 here from expected values, why?
assert poly0.exterior_almost_equals(
exterior_expected, max_distance=2.0)
"""
# --------
# line strings
# --------
def test_line_strings_without_keep_size(self):
self._test_cbaois_without_keep_size(ia.LineString, ia.LineStringsOnImage,
"augment_line_strings")
def test_line_strings_with_keep_size(self):
self._test_cbaois_with_keep_size(ia.LineString, ia.LineStringsOnImage,
"augment_line_strings")
def test_image_line_string_alignment(self):
self._test_image_cba_alignment(ia.LineString, ia.LineStringsOnImage,
"augment_line_strings")
def test_empty_line_strings(self):
lsoi = ia.LineStringsOnImage([], shape=(20, 10, 3))
self._test_empty_cba(lsoi, "augment_line_strings")
# --------
# bounding boxes
# --------
def test_bounding_boxes_without_keep_size(self):
# BB augmentation without keep_size
# TODO deviations of around 0.4-0.7 in this and the next test (between
# expected and observed coordinates) -- why?
bbs = [ia.BoundingBox(x1=0, y1=10, x2=20, y2=20)]
bbsoi = ia.BoundingBoxesOnImage(bbs, shape=self.image.shape)
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)
aug.jitter = iap.Deterministic(0.2)
observed = aug.augment_bounding_boxes([bbsoi])
bbs_expected = [
ia.BoundingBox(x1=0-0.2*30, y1=10-0.2*30,
x2=20-0.2*30, y2=20-0.2*30)
]
gen = zip(observed[0].bounding_boxes, bbs_expected)
# TODO deviations of around 0.5 here from expected values, why?
for bb_observed, bb_expected in gen:
assert bb_observed.coords_almost_equals(
bb_expected, max_distance=1.5)
def test_bounding_boxes_with_keep_size(self):
# BB augmentation with keep_size
bbs = [ia.BoundingBox(x1=0, y1=10, x2=20, y2=20)]
bbsoi = ia.BoundingBoxesOnImage(bbs, shape=self.image.shape)
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
aug.jitter = iap.Deterministic(0.2)
observed = aug.augment_bounding_boxes([bbsoi])
bbs_expected = [
ia.BoundingBox(
x1=((0-0.2*30)/(30*0.6))*30,
y1=((10-0.2*30)/(30*0.6))*30,
x2=((20-0.2*30)/(30*0.6))*30,
y2=((20-0.2*30)/(30*0.6))*30
)
]
gen = zip(observed[0].bounding_boxes, bbs_expected)
# TODO deviations of around 0.5 here from expected values, why?
for bb_observed, bb_expected in gen:
assert bb_observed.coords_almost_equals(
bb_expected, max_distance=1.5)
def test_image_bounding_box_alignment(self):
img = np.zeros((100, 100), dtype=np.uint8)
img[35:35+1, 35:65+1] = 255
img[65:65+1, 35:65+1] = 255
img[35:65+1, 35:35+1] = 255
img[35:65+1, 65:65+1] = 255
bbs = [
ia.BoundingBox(y1=35.5, x1=35.5, y2=65.5, x2=65.5),
]
bbsoi = ia.BoundingBoxesOnImage(bbs, shape=img.shape)
aug = iaa.PerspectiveTransform(scale=(0.05, 0.2), keep_size=True)
for _ in sm.xrange(10):
imgs_aug, bbsois_aug = aug(
images=[img, img, img, img],
bounding_boxes=[bbsoi, bbsoi, bbsoi, bbsoi])
nb_skipped = 0
for img_aug, bbsoi_aug in zip(imgs_aug, bbsois_aug):
assert bbsoi_aug.shape == img_aug.shape
for bb_aug in bbsoi_aug.bounding_boxes:
if bb_aug.is_fully_within_image(img_aug):
# top, bottom, left, right
x1 = bb_aug.x1_int
x2 = bb_aug.x2_int
y1 = bb_aug.y1_int
y2 = bb_aug.y2_int
top_row = img_aug[y1-1:y1+1, x1-1:x2+1]
btm_row = img_aug[y2-1:y2+1, x1-1:x2+1]
lft_row = img_aug[y1-1:y2+1, x1-1:x1+1]
rgt_row = img_aug[y1-1:y2+1, x2-1:x2+1]
assert np.max(top_row) > 10
assert np.max(btm_row) > 10
assert np.max(lft_row) > 10
assert np.max(rgt_row) > 10
else:
nb_skipped += 1
assert nb_skipped <= 2
def test_bounding_boxes_cover_extreme_points(self):
# Test that for BBs, the augmented BB x coord is really the minimum
# of the BB corner x-coords after augmentation and e.g. not just always
# the augmented top-left corner's coordinate.
h = w = 200 # height, width
s = 5 # block size
j_r = 0.1 # relative amount of jitter
j = int(h * j_r) # absolute amount of jitter
# Note that PerspectiveTransform currently places four points on the
# image and back-projects to the image size (roughly).
# That's why e.g. TopWiderThanBottom has coordinates that seem like
# the top is thinner than the bottom (after projecting back to the
# image rectangle, the top becomes wider).
class _JitterTopWiderThanBottom(object):
def draw_samples(self, size, random_state):
return np.float32([
[
[j_r, 0.0], # top-left
[j_r, 0.0], # top-right
[0.0, 0.0], # bottom-right
[0.0, 0.0], # bottom-left
]
])
class _JitterTopThinnerThanBottom(object):
def draw_samples(self, size, random_state):
return np.float32([
[
[0.0, 0.0], # top-left
[0.0, 0.0], # top-right
[j_r, 0.0], # bottom-right
[j_r, 0.0], # bottom-left
]
])
class _JitterLeftWiderThanRight(object):
def draw_samples(self, size, random_state):
return np.float32([
[
[0.0, j_r], # top-left
[0.0, 0.0], # top-right
[0.0, 0.0], # bottom-right
[0.0, j_r], # bottom-left
]
])
class _JitterLeftThinnerThanRight(object):
def draw_samples(self, size, random_state):
return np.float32([
[
[0.0, 0.0], # top-left
[0.0, j_r], # top-right
[0.0, j_r], # bottom-right
[0.0, 0.0], # bottom-left
]
])
jitters = [
_JitterTopWiderThanBottom(),
_JitterTopThinnerThanBottom(),
_JitterLeftWiderThanRight(),
_JitterLeftThinnerThanRight(),
]
# expected coordinates after applying the above jitter
# coordinates here are given as
# (ystart, yend), (xstart, xend)
coords = [
# top wider than bottom
[
[(0+j, s+j+1), (0, s+1)], # top left
[(0+j, s+j+1), (w-s, w+1)], # top right
[(h-s-j, h-j+1), (w-s-j, w-j+1)], # bottom right
[(h-s-j, h-j+1), (0+j, s+j+1)] # bottom left
],
# top thinner than bottom
[
[(0+j, s+j+1), (0+j, s+j+1)],
[(0+j, s+j+1), (w-s-j, w-j+1)],
[(h-s-j, h-j+1), (w-s, w+1)],
[(h-s-j, h-j+1), (0, s+1)]
],
# left wider than right
[
[(0, s+1), (0+j, s+j+1)],
[(0+j, s+j+1), (w-s-j, w-j+1)],
[(h-s-j, h-j+1), (w-s-j, w-j+1)],
[(h-s, h+1), (0+j, s+j+1)]
],
# left thinner than right
[
[(0+j, s+j+1), (0+j, s+j+1)],
[(0, s+1), (w-s-j, w-j+1)],
[(h-s, h+1), (w-s-j, w-j+1)],
[(h-s-j, h-j+1), (0+j, s+j+1)]
],
]
image = np.zeros((h-1, w-1, 4), dtype=np.uint8)
image = iaa.pad(image, top=1, right=1, bottom=1, left=1, cval=50)
image[0+j:s+j+1, 0+j:s+j+1, 0] = 255
image[0+j:s+j+1, w-s-j:w-j+1, 1] = 255
image[h-s-j:h-j+1, w-s-j:w-j+1, 2] = 255
image[h-s-j:h-j+1, 0+j:s+j+1, 3] = 255
bb = ia.BoundingBox(x1=0.0+j,
y1=0.0+j,
x2=w-j,
y2=h-j)
bbsoi = ia.BoundingBoxesOnImage([bb], shape=image.shape)
i = 0
for jitter, coords_i in zip(jitters, coords):
with self.subTest(jitter=jitter.__class__.__name__):
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
aug.jitter = jitter
image_aug, bbsoi_aug = aug(image=image, bounding_boxes=bbsoi)
assert image_aug.shape == image.shape
(tl_y1, tl_y2), (tl_x1, tl_x2) = coords_i[0]
(tr_y1, tr_y2), (tr_x1, tr_x2) = coords_i[1]
(br_y1, br_y2), (br_x1, br_x2) = coords_i[2]
(bl_y1, bl_y2), (bl_x1, bl_x2) = coords_i[3]
# We have to be rather tolerant here (>100 instead of e.g.
# >200), because the transformation seems to be not that
# accurate and the blobs may be a few pixels off the expected
# coorindates.
assert np.max(image_aug[tl_y1:tl_y2, tl_x1:tl_x2, 0]) > 100
assert np.max(image_aug[tr_y1:tr_y2, tr_x1:tr_x2, 1]) > 100
assert np.max(image_aug[br_y1:br_y2, br_x1:br_x2, 2]) > 100
assert np.max(image_aug[bl_y1:bl_y2, bl_x1:bl_x2, 3]) > 100
# We have rather strong tolerances of 7.5 here, partially
# because the blobs are wide and the true coordinates are in
# the center of the blobs; partially, because of above
# mentioned inaccuracy of PerspectiveTransform.
bb_aug = bbsoi_aug.bounding_boxes[0]
exp_x1 = min([tl_x1, tr_x1, br_x1, bl_x1])
exp_x2 = max([tl_x2, tr_x2, br_x2, bl_x2])
exp_y1 = min([tl_y1, tr_y1, br_y1, bl_y1])
exp_y2 = max([tl_y2, tr_y2, br_y2, bl_y2])
assert np.isclose(bb_aug.x1, exp_x1, atol=7.5)
assert np.isclose(bb_aug.y1, exp_y1, atol=7.5)
assert np.isclose(bb_aug.x2, exp_x2, atol=7.5)
assert np.isclose(bb_aug.y2, exp_y2, atol=7.5)
def test_empty_bounding_boxes(self):
# test empty bounding boxes
bbsoi = ia.BoundingBoxesOnImage([], shape=(20, 10, 3))
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)
observed = aug.augment_bounding_boxes(bbsoi)
assert_cbaois_equal(observed, bbsoi)
# ------------
# mode
# ------------
def test_draw_samples_with_mode_being_int(self):
aug = iaa.PerspectiveTransform(scale=0.001, mode=cv2.BORDER_REPLICATE)
samples = aug._draw_samples([(10, 10, 3)], iarandom.RNG(0))
assert samples.modes.shape == (1,)
assert samples.modes[0] == cv2.BORDER_REPLICATE
def test_draw_samples_with_mode_being_string(self):
aug = iaa.PerspectiveTransform(scale=0.001, mode="replicate")
samples = aug._draw_samples([(10, 10, 3)], iarandom.RNG(0))
assert samples.modes.shape == (1,)
assert samples.modes[0] == cv2.BORDER_REPLICATE
def test_mode_replicate_copies_values(self):
aug = iaa.PerspectiveTransform(
scale=0.001, mode="replicate", cval=0, seed=31)
img = np.ones((256, 256, 3), dtype=np.uint8) * 255
img_aug = aug.augment_image(img)
assert (img_aug == 255).all()
def test_mode_constant_uses_cval(self):
aug255 = iaa.PerspectiveTransform(
scale=0.001, mode="constant", cval=255, seed=31)
aug0 = iaa.PerspectiveTransform(
scale=0.001, mode="constant", cval=0, seed=31)
img = np.ones((256, 256, 3), dtype=np.uint8) * 255
img_aug255 = aug255.augment_image(img)
img_aug0 = aug0.augment_image(img)
assert (img_aug255 == 255).all()
# TODO This was originally "assert not (...)", but since
# PerspectiveTransform has become more precise, there are no
# filled pixels anymore at the edges. That is because PerspT
# currently only zooms in and not out. Filled pixels at the sides
# were previously due to a bug.
assert (img_aug0 == 255).all()
# ---------
# fit_output
# ---------
def test_fit_output_with_fixed_jitter(self):
aug = iaa.PerspectiveTransform(scale=0.2, fit_output=True,
keep_size=False)
aug.jitter = iap.Deterministic(0.2)
image = np.zeros((40, 40, 3), dtype=np.uint8)
image[0:3, 0:3, 0] = 255
image[0:3, 40-3:, 1] = 255
image[40-3:, 40-3:, 2] = 255
image_aug = aug(image=image)
h, w = image_aug.shape[0:2]
y0 = np.argmax(image_aug[:, 0, 0])
x0 = np.argmax(image_aug[0, :, 0])
y1 = np.argmax(image_aug[:, w-1, 1])
x1 = np.argmax(image_aug[0, :, 1])
y2 = np.argmax(image_aug[:, w-1, 2])
x2 = np.argmax(image_aug[h-1, :, 2])
# different shape
assert image_aug.shape == image.shape
# corners roughly still at top-left, top-right, bottom-right
assert 0 <= y0 <= 3
assert 0 <= x0 <= 3
assert 0 <= y1 <= 3
assert image_aug.shape[1]-3 <= x1 <= image_aug.shape[1]
assert image_aug.shape[1]-3 <= y2 <= image_aug.shape[1]
assert image_aug.shape[1]-3 <= x2 <= image_aug.shape[1]
# no corner pixels now in the center
assert np.max(image_aug[8:h-8, 8:w-8, :]) == 0
def test_fit_output_with_random_jitter(self):
aug = iaa.PerspectiveTransform(scale=0.1, fit_output=True,
keep_size=False)
image = np.zeros((50, 50, 4), dtype=np.uint8)
image[0:5, 0:5, 0] = 255
image[0:5, 50-5:, 1] = 255
image[50-5:, 50-5:, 2] = 255
image[50-5:, 0:5, 3] = 255
for _ in sm.xrange(10):
image_aug = aug(image=image)
h, w = image_aug.shape[0:2]
arr_nochan = np.max(image_aug, axis=2)
y_idx = np.where(np.max(arr_nochan, axis=1))[0]
x_idx = np.where(np.max(arr_nochan, axis=0))[0]
y_min = np.min(y_idx)
y_max = np.max(y_idx)
x_min = np.min(x_idx)
x_max = np.max(x_idx)
tol = 0
assert 0 <= y_min <= 5+tol
assert 0 <= x_min <= 5+tol
assert h-5-tol <= y_max <= h-1
assert w-5-tol <= x_max <= w-1
def test_fit_output_with_random_jitter__segmentation_maps(self):
aug = iaa.PerspectiveTransform(scale=0.1, fit_output=True,
keep_size=False)
arr = np.zeros((50, 50, 4), dtype=np.uint8)
arr[0:5, 0:5, 0] = 1
arr[0:5, 50-5:, 1] = 1
arr[50-5:, 50-5:, 2] = 1
arr[50-5:, 0:5, 3] = 1
segmap = ia.SegmentationMapsOnImage(arr, shape=(50, 50, 3))
image = np.zeros((49, 49, 3), dtype=np.uint8)
image = iaa.pad(image, top=1, right=1, bottom=1, left=1, cval=128)
for _ in sm.xrange(10):
image_aug, segmap_aug = aug(image=image, segmentation_maps=segmap)
h, w = segmap_aug.arr.shape[0:2]
arr_nochan = np.max(segmap_aug.arr, axis=2)
y_idx = np.where(np.max(arr_nochan, axis=1))[0]
x_idx = np.where(np.max(arr_nochan, axis=0))[0]
y_min = np.min(y_idx)
y_max = np.max(y_idx)
x_min = np.min(x_idx)
x_max = np.max(x_idx)
tol = 0
assert 0 <= y_min <= 5+tol
assert 0 <= x_min <= 5+tol
assert h-5-tol <= y_max <= h-1
assert w-5-tol <= x_max <= w-1
def test_fit_output_with_fixed_jitter__keypoints(self):
aug = iaa.PerspectiveTransform(scale=0.1, fit_output=True,
keep_size=False)
kpsoi = ia.KeypointsOnImage.from_xy_array([
(0, 0),
(50, 0),
(50, 50),
(0, 50)
], shape=(50, 50, 3))
for i in sm.xrange(10):
kpsoi_aug = aug(keypoints=kpsoi)
h, w = kpsoi_aug.shape[0:2]
y0, x0 = kpsoi_aug.keypoints[0].y, kpsoi_aug.keypoints[0].x
y1, x1 = kpsoi_aug.keypoints[1].y, kpsoi_aug.keypoints[1].x
y2, x2 = kpsoi_aug.keypoints[2].y, kpsoi_aug.keypoints[2].x
y3, x3 = kpsoi_aug.keypoints[3].y, kpsoi_aug.keypoints[3].x
y_min = min([y0, y1, y2, y3])
y_max = max([y0, y1, y2, y3])
x_min = min([x0, x1, x2, x3])
x_max = max([x0, x1, x2, x3])
tol = 0.5
assert 0-tol <= y_min <= tol, "Got y_min=%.4f at %d" % (y_min, i)
assert 0-tol <= x_min <= tol, "Got x_min=%.4f at %d" % (x_min, i)
assert h-tol <= y_max <= h+tol, (
"Got y_max=%.4f for h=%.2f at %d" % (y_max, h, i))
assert w-tol <= x_max <= w+tol, (
"Got x_max=%.4f for w=%.2f at %d" % (x_max, w, i))
# ---------
# unusual channel numbers
# ---------
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.PerspectiveTransform(scale=0.01)
image_aug = aug(image=image)
assert np.all(image_aug == 0)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
# ---------
# zero-sized axes
# ---------
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
for keep_size in [False, True]:
with self.subTest(shape=shape, keep_size=keep_size):
for _ in sm.xrange(3):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.PerspectiveTransform(scale=0.01)
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
# --------
# get_parameters
# --------
def test_get_parameters(self):
aug = iaa.PerspectiveTransform(scale=0.1, keep_size=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Normal)
assert isinstance(params[0].scale, iap.Deterministic)
assert 0.1 - 1e-8 < params[0].scale.value < 0.1 + 1e-8
assert params[1] is False
assert params[2].value == 0
assert params[3].value == "constant"
assert params[4] is False
# --------
# other dtypes
# --------
def test_other_dtypes_bool(self):
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)
aug.jitter = iap.Deterministic(0.2)
y1 = int(30 * 0.2)
y2 = int(30 * 0.8)
x1 = int(30 * 0.2)
x2 = int(30 * 0.8)
image = np.zeros((30, 30), dtype=bool)
image[12:18, :] = True
image[:, 12:18] = True
expected = image[y1:y2, x1:x2]
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert image_aug.shape == expected.shape
assert (np.sum(image_aug == expected) / expected.size) > 0.9
def test_other_dtypes_uint_int(self):
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)
aug.jitter = iap.Deterministic(0.2)
y1 = int(30 * 0.2)
y2 = int(30 * 0.8)
x1 = int(30 * 0.2)
x2 = int(30 * 0.8)
dtypes = ["uint8", "uint16", "int8", "int16"]
for dtype in dtypes:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
if np.dtype(dtype).kind == "i":
values = [0, 1, 5, 10, 100, int(0.1 * max_value),
int(0.2 * max_value), int(0.5 * max_value),
max_value-100, max_value]
values = values + [(-1)*value for value in values]
else:
values = [0, 1, 5, 10, 100, int(center_value),
int(0.1 * max_value), int(0.2 * max_value),
int(0.5 * max_value), max_value-100, max_value]
for value in values:
with self.subTest(dtype=dtype, value=value):
image = np.zeros((30, 30), dtype=dtype)
image[12:18, :] = value
image[:, 12:18] = value
expected = image[y1:y2, x1:x2]
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert image_aug.shape == expected.shape
# rather high tolerance of 0.7 here because of
# interpolation
assert (
np.sum(image_aug == expected) / expected.size
) > 0.7
def test_other_dtypes_float(self):
aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)
aug.jitter = iap.Deterministic(0.2)
y1 = int(30 * 0.2)
y2 = int(30 * 0.8)
x1 = int(30 * 0.2)
x2 = int(30 * 0.8)
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
def _isclose(a, b):
atol = 1e-4 if dtype == "float16" else 1e-8
return np.isclose(a, b, atol=atol, rtol=0)
isize = np.dtype(dtype).itemsize
values = [0.01, 1.0, 10.0, 100.0, 500 ** (isize - 1),
1000 ** (isize - 1)]
values = values + [(-1) * value for value in values]
for value in values:
with self.subTest(dtype=dtype, value=value):
image = np.zeros((30, 30), dtype=dtype)
image[12:18, :] = value
image[:, 12:18] = value
expected = image[y1:y2, x1:x2]
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert image_aug.shape == expected.shape
# rather high tolerance of 0.7 here because of
# interpolation
assert (
np.sum(_isclose(image_aug, expected)) / expected.size
) > 0.7
def test_pickleable(self):
aug = iaa.PerspectiveTransform(0.2, seed=1)
runtest_pickleable_uint8_img(aug, iterations=4, shape=(25, 25, 1))
class _elastic_trans_temp_thresholds(object):
def __init__(self, alpha, sigma):
self.alpha = alpha
self.sigma = sigma
self.old_alpha = None
self.old_sigma = None
def __enter__(self):
self.old_alpha = iaa.ElasticTransformation.KEYPOINT_AUG_ALPHA_THRESH
self.old_sigma = iaa.ElasticTransformation.KEYPOINT_AUG_SIGMA_THRESH
iaa.ElasticTransformation.KEYPOINT_AUG_ALPHA_THRESH = self.alpha
iaa.ElasticTransformation.KEYPOINT_AUG_SIGMA_THRESH = self.sigma
def __exit__(self, exc_type, exc_val, exc_tb):
iaa.ElasticTransformation.KEYPOINT_AUG_ALPHA_THRESH = self.old_alpha
iaa.ElasticTransformation.KEYPOINT_AUG_SIGMA_THRESH = self.old_sigma
# TODO add tests for order
# TODO improve tests for cval
# TODO add tests for mode
class TestElasticTransformation(unittest.TestCase):
def setUp(self):
reseed()
@property
def image(self):
img = np.zeros((50, 50), dtype=np.uint8) + 255
img = np.pad(img, ((100, 100), (100, 100)), mode="constant",
constant_values=0)
return img
@property
def mask(self):
img = self.image
mask = img > 0
return mask
@property
def heatmaps(self):
img = self.image
return HeatmapsOnImage(img.astype(np.float32) / 255.0,
shape=img.shape)
@property
def segmaps(self):
img = self.image
return SegmentationMapsOnImage((img > 0).astype(np.int32),
shape=img.shape)
# -----------
# __init__
# -----------
def test___init___bad_datatype_for_alpha_leads_to_failure(self):
# test alpha having bad datatype
got_exception = False
try:
_ = iaa.ElasticTransformation(alpha=False, sigma=0.25)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test___init___alpha_is_tuple(self):
# test alpha being tuple
aug = iaa.ElasticTransformation(alpha=(1.0, 2.0), sigma=0.25)
assert isinstance(aug.alpha, iap.Uniform)
assert isinstance(aug.alpha.a, iap.Deterministic)
assert isinstance(aug.alpha.b, iap.Deterministic)
assert 1.0 - 1e-8 < aug.alpha.a.value < 1.0 + 1e-8
assert 2.0 - 1e-8 < aug.alpha.b.value < 2.0 + 1e-8
def test___init___sigma_is_tuple(self):
# test sigma being tuple
aug = iaa.ElasticTransformation(alpha=0.25, sigma=(1.0, 2.0))
assert isinstance(aug.sigma, iap.Uniform)
assert isinstance(aug.sigma.a, iap.Deterministic)
assert isinstance(aug.sigma.b, iap.Deterministic)
assert 1.0 - 1e-8 < aug.sigma.a.value < 1.0 + 1e-8
assert 2.0 - 1e-8 < aug.sigma.b.value < 2.0 + 1e-8
def test___init___bad_datatype_for_sigma_leads_to_failure(self):
# test sigma having bad datatype
got_exception = False
try:
_ = iaa.ElasticTransformation(alpha=0.25, sigma=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test___init___order_is_all(self):
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, order=ia.ALL)
assert isinstance(aug.order, iap.Choice)
assert all([order in aug.order.a for order in [0, 1, 2, 3, 4, 5]])
def test___init___order_is_int(self):
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, order=1)
assert isinstance(aug.order, iap.Deterministic)
assert aug.order.value == 1
def test___init___order_is_list(self):
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, order=[0, 1, 2])
assert isinstance(aug.order, iap.Choice)
assert all([order in aug.order.a for order in [0, 1, 2]])
def test___init___order_is_stochastic_parameter(self):
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0,
order=iap.Choice([0, 1, 2, 3]))
assert isinstance(aug.order, iap.Choice)
assert all([order in aug.order.a for order in [0, 1, 2, 3]])
def test___init___bad_datatype_for_order_leads_to_failure(self):
got_exception = False
try:
_ = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, order=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test___init___cval_is_all(self):
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, cval=ia.ALL)
assert isinstance(aug.cval, iap.Uniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 0
assert aug.cval.b.value == 255
def test___init___cval_is_int(self):
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, cval=128)
assert isinstance(aug.cval, iap.Deterministic)
assert aug.cval.value == 128
def test___init___cval_is_list(self):
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0,
cval=[16, 32, 64])
assert isinstance(aug.cval, iap.Choice)
assert all([cval in aug.cval.a for cval in [16, 32, 64]])
def test___init___cval_is_stochastic_parameter(self):
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0,
cval=iap.Choice([16, 32, 64]))
assert isinstance(aug.cval, iap.Choice)
assert all([cval in aug.cval.a for cval in [16, 32, 64]])
def test___init___cval_is_tuple(self):
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, cval=(128, 255))
assert isinstance(aug.cval, iap.Uniform)
assert isinstance(aug.cval.a, iap.Deterministic)
assert isinstance(aug.cval.b, iap.Deterministic)
assert aug.cval.a.value == 128
assert aug.cval.b.value == 255
def test___init___bad_datatype_for_cval_leads_to_failure(self):
got_exception = False
try:
_ = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, cval=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test___init___mode_is_all(self):
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, mode=ia.ALL)
assert isinstance(aug.mode, iap.Choice)
assert all([
mode in aug.mode.a
for mode
in ["constant", "nearest", "reflect", "wrap"]])
def test___init___mode_is_string(self):
aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, mode="nearest")
assert isinstance(aug.mode, iap.Deterministic)
assert aug.mode.value == "nearest"
def test___init___mode_is_list(self):
aug = iaa.ElasticTransformation(
alpha=0.25, sigma=1.0, mode=["constant", "nearest"])
assert isinstance(aug.mode, iap.Choice)
assert all([mode in aug.mode.a for mode in ["constant", "nearest"]])
def test___init___mode_is_stochastic_parameter(self):
aug = iaa.ElasticTransformation(
alpha=0.25, sigma=1.0, mode=iap.Choice(["constant", "nearest"]))
assert isinstance(aug.mode, iap.Choice)
assert all([mode in aug.mode.a for mode in ["constant", "nearest"]])
def test___init___bad_datatype_for_mode_leads_to_failure(self):
got_exception = False
try:
_ = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, mode=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
# -----------
# alpha, sigma
# -----------
def test_images(self):
# test basic funtionality
aug = iaa.ElasticTransformation(alpha=0.5, sigma=0.25)
observed = aug.augment_image(self.image)
mask = self.mask
# assume that some white/255 pixels have been moved away from the
# center and replaced by black/0 pixels
assert np.sum(observed[mask]) < np.sum(self.image[mask])
# assume that some black/0 pixels have been moved away from the outer
# area and replaced by white/255 pixels
assert np.sum(observed[~mask]) > np.sum(self.image[~mask])
def test_images_nonsquare(self):
# test basic funtionality with non-square images
aug = iaa.ElasticTransformation(alpha=0.5, sigma=0.25)
img_nonsquare = np.zeros((50, 100), dtype=np.uint8) + 255
img_nonsquare = np.pad(img_nonsquare, ((100, 100), (100, 100)),
mode="constant", constant_values=0)
mask_nonsquare = (img_nonsquare > 0)
observed = aug.augment_image(img_nonsquare)
assert (
np.sum(observed[mask_nonsquare])
< np.sum(img_nonsquare[mask_nonsquare]))
assert (
np.sum(observed[~mask_nonsquare])
> np.sum(img_nonsquare[~mask_nonsquare]))
def test_images_unusual_channel_numbers(self):
# test unusual channels numbers
aug = iaa.ElasticTransformation(alpha=5, sigma=0.5)
for nb_channels in [1, 2, 4, 5, 7, 10, 11]:
img_c = np.tile(self.image[..., np.newaxis], (1, 1, nb_channels))
assert img_c.shape == (250, 250, nb_channels)
observed = aug.augment_image(img_c)
assert observed.shape == (250, 250, nb_channels)
for c in sm.xrange(1, nb_channels):
assert np.array_equal(observed[..., c], observed[..., 0])
def test_heatmaps(self):
# test basic funtionality, heatmaps
aug = iaa.ElasticTransformation(alpha=0.5, sigma=0.25)
observed = aug.augment_heatmaps([self.heatmaps])[0]
mask = self.mask
assert observed.shape == self.heatmaps.shape
_assert_same_min_max(observed, self.heatmaps)
assert (
np.sum(observed.get_arr()[mask])
< np.sum(self.heatmaps.get_arr()[mask]))
assert (
np.sum(observed.get_arr()[~mask])
> np.sum(self.heatmaps.get_arr()[~mask]))
def test_segmaps(self):
# test basic funtionality, segmaps
# alpha=1.5 instead of 0.5 as above here, because otherwise nothing
# is moved
aug = iaa.ElasticTransformation(alpha=1.5, sigma=0.25)
observed = aug.augment_segmentation_maps([self.segmaps])[0]
mask = self.mask
assert observed.shape == self.segmaps.shape
assert (
np.sum(observed.get_arr()[mask])
< np.sum(self.segmaps.get_arr()[mask]))
assert (
np.sum(observed.get_arr()[~mask])
> np.sum(self.segmaps.get_arr()[~mask]))
def test_images_weak_vs_strong_alpha(self):
# test effects of increased alpha strength
aug1 = iaa.ElasticTransformation(alpha=0.1, sigma=0.25)
aug2 = iaa.ElasticTransformation(alpha=5.0, sigma=0.25)
observed1 = aug1.augment_image(self.image)
observed2 = aug2.augment_image(self.image)
mask = self.mask
# assume that the inner area has become more black-ish when using high
# alphas (more white pixels were moved out of the inner area)
assert np.sum(observed1[mask]) > np.sum(observed2[mask])
# assume that the outer area has become more white-ish when using high
# alphas (more black pixels were moved into the inner area)
assert np.sum(observed1[~mask]) < np.sum(observed2[~mask])
def test_heatmaps_weak_vs_strong_alpha(self):
# test effects of increased alpha strength, heatmaps
aug1 = iaa.ElasticTransformation(alpha=0.1, sigma=0.25)
aug2 = iaa.ElasticTransformation(alpha=5.0, sigma=0.25)
observed1 = aug1.augment_heatmaps([self.heatmaps])[0]
observed2 = aug2.augment_heatmaps([self.heatmaps])[0]
mask = self.mask
assert observed1.shape == self.heatmaps.shape
assert observed2.shape == self.heatmaps.shape
_assert_same_min_max(observed1, self.heatmaps)
_assert_same_min_max(observed2, self.heatmaps)
assert (
np.sum(observed1.get_arr()[mask])
> np.sum(observed2.get_arr()[mask]))
assert (
np.sum(observed1.get_arr()[~mask])
< np.sum(observed2.get_arr()[~mask]))
def test_segmaps_weak_vs_strong_alpha(self):
# test effects of increased alpha strength, segmaps
aug1 = iaa.ElasticTransformation(alpha=0.1, sigma=0.25)
aug2 = iaa.ElasticTransformation(alpha=5.0, sigma=0.25)
observed1 = aug1.augment_segmentation_maps([self.segmaps])[0]
observed2 = aug2.augment_segmentation_maps([self.segmaps])[0]
mask = self.mask
assert observed1.shape == self.segmaps.shape
assert observed2.shape == self.segmaps.shape
assert (
np.sum(observed1.get_arr()[mask])
> np.sum(observed2.get_arr()[mask]))
assert (
np.sum(observed1.get_arr()[~mask])
< np.sum(observed2.get_arr()[~mask]))
def test_images_low_vs_high_sigma(self):
# test effects of increased sigmas
aug1 = iaa.ElasticTransformation(alpha=3.0, sigma=0.1)
aug2 = iaa.ElasticTransformation(alpha=3.0, sigma=3.0)
observed1 = aug1.augment_image(self.image)
observed2 = aug2.augment_image(self.image)
observed1_std_hori = np.std(
observed1.astype(np.float32)[:, 1:]
- observed1.astype(np.float32)[:, :-1])
observed2_std_hori = np.std(
observed2.astype(np.float32)[:, 1:]
- observed2.astype(np.float32)[:, :-1])
observed1_std_vert = np.std(
observed1.astype(np.float32)[1:, :]
- observed1.astype(np.float32)[:-1, :])
observed2_std_vert = np.std(
observed2.astype(np.float32)[1:, :]
- observed2.astype(np.float32)[:-1, :])
observed1_std = (observed1_std_hori + observed1_std_vert) / 2
observed2_std = (observed2_std_hori + observed2_std_vert) / 2
assert observed1_std > observed2_std
def test_images_alpha_is_stochastic_parameter(self):
# test alpha being iap.Choice
aug = iaa.ElasticTransformation(alpha=iap.Choice([0.001, 5.0]),
sigma=0.25)
seen = [0, 0]
for _ in sm.xrange(100):
observed = aug.augment_image(self.image)
diff = np.average(
np.abs(
self.image.astype(np.float32)
- observed.astype(np.float32)
)
)
if diff < 1.0:
seen[0] += 1
else:
seen[1] += 1
assert seen[0] > 10
assert seen[1] > 10
def test_sigma_is_stochastic_parameter(self):
# test sigma being iap.Choice
aug = iaa.ElasticTransformation(alpha=3.0,
sigma=iap.Choice([0.01, 5.0]))
seen = [0, 0]
for _ in sm.xrange(100):
observed = aug.augment_image(self.image)
observed_std_hori = np.std(
observed.astype(np.float32)[:, 1:]
- observed.astype(np.float32)[:, :-1])
observed_std_vert = np.std(
observed.astype(np.float32)[1:, :]
- observed.astype(np.float32)[:-1, :])
observed_std = (observed_std_hori + observed_std_vert) / 2
if observed_std > 10.0:
seen[0] += 1
else:
seen[1] += 1
assert seen[0] > 10
assert seen[1] > 10
# -----------
# cval
# -----------
def test_images_cval_is_int_and_order_is_0(self):
aug = iaa.ElasticTransformation(alpha=30.0, sigma=3.0, mode="constant",
cval=255, order=0)
img = np.zeros((100, 100), dtype=np.uint8)
observed = aug.augment_image(img)
assert np.sum(observed == 255) > 0
assert np.sum(np.logical_and(0 < observed, observed < 255)) == 0
def test_images_cval_is_int_and_order_is_0_weak_alpha(self):
aug = iaa.ElasticTransformation(alpha=3.0, sigma=3.0, mode="constant",
cval=0, order=0)
img = np.zeros((100, 100), dtype=np.uint8)
observed = aug.augment_image(img)
assert np.sum(observed == 255) == 0
def test_images_cval_is_int_and_order_is_2(self):
aug = iaa.ElasticTransformation(alpha=3.0, sigma=3.0, mode="constant",
cval=255, order=2)
img = np.zeros((100, 100), dtype=np.uint8)
observed = aug.augment_image(img)
assert np.sum(np.logical_and(0 < observed, observed < 255)) > 0
def test_images_cval_is_int_image_hw3(self):
aug = iaa.ElasticTransformation(alpha=5.0, sigma=3.0, mode="constant",
cval=255, order=0)
img = np.zeros((100, 100, 3), dtype=np.uint8)
observed = aug.augment_image(img)
count_255 = np.sum(observed == 255, axis=2)
mask_not_all_channels_same_intensity = np.logical_and(
count_255 > 0, count_255 < 3)
mask_all_channels_same_intensity = (count_255 == 3)
assert not np.any(mask_not_all_channels_same_intensity)
assert np.any(mask_all_channels_same_intensity)
def test_heatmaps_ignore_cval(self):
# cval with heatmaps
heatmaps = HeatmapsOnImage(
np.zeros((32, 32, 1), dtype=np.float32), shape=(32, 32, 3))
aug = iaa.ElasticTransformation(alpha=3.0, sigma=3.0,
mode="constant", cval=255)
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == heatmaps.shape
_assert_same_min_max(observed, heatmaps)
assert np.sum(observed.get_arr() > 0.01) == 0
def test_segmaps_ignore_cval(self):
# cval with segmaps
segmaps = SegmentationMapsOnImage(
np.zeros((32, 32, 1), dtype=np.int32), shape=(32, 32, 3))
aug = iaa.ElasticTransformation(alpha=3.0, sigma=3.0, mode="constant",
cval=255)
observed = aug.augment_segmentation_maps([segmaps])[0]
assert observed.shape == segmaps.shape
assert np.sum(observed.get_arr() > 0) == 0
# -----------
# keypoints
# -----------
def test_keypoints_no_movement_if_alpha_below_threshold(self):
# for small alpha, should not move if below threshold
with _elastic_trans_temp_thresholds(alpha=1.0, sigma=0.0):
kps = [
ia.Keypoint(x=1, y=1), ia.Keypoint(x=15, y=25),
ia.Keypoint(x=5, y=5), ia.Keypoint(x=7, y=4),
ia.Keypoint(x=48, y=5), ia.Keypoint(x=21, y=37),
ia.Keypoint(x=32, y=39), ia.Keypoint(x=6, y=8),
ia.Keypoint(x=12, y=21), ia.Keypoint(x=3, y=45),
ia.Keypoint(x=45, y=3), ia.Keypoint(x=7, y=48)]
kpsoi = ia.KeypointsOnImage(kps, shape=(50, 50))
aug = iaa.ElasticTransformation(alpha=0.001, sigma=1.0)
observed = aug.augment_keypoints([kpsoi])[0]
d = kpsoi.to_xy_array() - observed.to_xy_array()
d[:, 0] = d[:, 0] ** 2
d[:, 1] = d[:, 1] ** 2
d = np.sum(d, axis=1)
d = np.average(d, axis=0)
assert d < 1e-8
def test_keypoints_no_movement_if_sigma_below_threshold(self):
# for small sigma, should not move if below threshold
with _elastic_trans_temp_thresholds(alpha=0.0, sigma=1.0):
kps = [
ia.Keypoint(x=1, y=1), ia.Keypoint(x=15, y=25),
ia.Keypoint(x=5, y=5), ia.Keypoint(x=7, y=4),
ia.Keypoint(x=48, y=5), ia.Keypoint(x=21, y=37),
ia.Keypoint(x=32, y=39), ia.Keypoint(x=6, y=8),
ia.Keypoint(x=12, y=21), ia.Keypoint(x=3, y=45),
ia.Keypoint(x=45, y=3), ia.Keypoint(x=7, y=48)]
kpsoi = ia.KeypointsOnImage(kps, shape=(50, 50))
aug = iaa.ElasticTransformation(alpha=1.0, sigma=0.001)
observed = aug.augment_keypoints([kpsoi])[0]
d = kpsoi.to_xy_array() - observed.to_xy_array()
d[:, 0] = d[:, 0] ** 2
d[:, 1] = d[:, 1] ** 2
d = np.sum(d, axis=1)
d = np.average(d, axis=0)
assert d < 1e-8
def test_keypoints_small_movement_for_weak_alpha_if_threshold_zero(self):
# for small alpha (at sigma 1.0), should barely move
# if thresholds set to zero
with _elastic_trans_temp_thresholds(alpha=0.0, sigma=0.0):
kps = [
ia.Keypoint(x=1, y=1), ia.Keypoint(x=15, y=25),
ia.Keypoint(x=5, y=5), ia.Keypoint(x=7, y=4),
ia.Keypoint(x=48, y=5), ia.Keypoint(x=21, y=37),
ia.Keypoint(x=32, y=39), ia.Keypoint(x=6, y=8),
ia.Keypoint(x=12, y=21), ia.Keypoint(x=3, y=45),
ia.Keypoint(x=45, y=3), ia.Keypoint(x=7, y=48)]
kpsoi = ia.KeypointsOnImage(kps, shape=(50, 50))
aug = iaa.ElasticTransformation(alpha=0.001, sigma=1.0)
observed = aug.augment_keypoints([kpsoi])[0]
d = kpsoi.to_xy_array() - observed.to_xy_array()
d[:, 0] = d[:, 0] ** 2
d[:, 1] = d[:, 1] ** 2
d = np.sum(d, axis=1)
d = np.average(d, axis=0)
assert d < 0.5
def test_image_keypoint_alignment(self):
# test alignment between between images and keypoints
image = np.zeros((120, 70), dtype=np.uint8)
s = 3
image[:, 35-s:35+s+1] = 255
kps = [ia.Keypoint(x=35, y=20),
ia.Keypoint(x=35, y=40),
ia.Keypoint(x=35, y=60),
ia.Keypoint(x=35, y=80),
ia.Keypoint(x=35, y=100)]
kpsoi = ia.KeypointsOnImage(kps, shape=image.shape)
aug = iaa.ElasticTransformation(alpha=70, sigma=5)
aug_det = aug.to_deterministic()
images_aug = aug_det.augment_images([image, image])
kpsois_aug = aug_det.augment_keypoints([kpsoi, kpsoi])
count_bad = 0
for image_aug, kpsoi_aug in zip(images_aug, kpsois_aug):
assert kpsoi_aug.shape == (120, 70)
assert len(kpsoi_aug.keypoints) == 5
for kp_aug in kpsoi_aug.keypoints:
x, y = int(np.round(kp_aug.x)), int(np.round(kp_aug.y))
bb = ia.BoundingBox(x1=x-2, x2=x+2+1, y1=y-2, y2=y+2+1)
img_ex = bb.extract_from_image(image_aug)
if np.any(img_ex > 10):
pass # close to expected location
else:
count_bad += 1
assert count_bad <= 1
def test_empty_keypoints(self):
aug = iaa.ElasticTransformation(alpha=10, sigma=10)
kpsoi = ia.KeypointsOnImage([], shape=(10, 10, 3))
kpsoi_aug = aug.augment_keypoints(kpsoi)
assert len(kpsoi_aug.keypoints) == 0
assert kpsoi_aug.shape == (10, 10, 3)
# -----------
# abstract methods for polygons and line strings
# -----------
@classmethod
def _test_cbaois_no_movement_if_alpha_below_threshold(
cls, cba_class, cbaoi_class, augf_name):
# for small alpha, should not move if below threshold
with _elastic_trans_temp_thresholds(alpha=1.0, sigma=0.0):
cba = cba_class([(10, 15), (40, 15), (40, 35), (10, 35)])
cbaoi = cbaoi_class([cba], shape=(50, 50))
aug = iaa.ElasticTransformation(alpha=0.001, sigma=1.0)
observed = getattr(aug, augf_name)(cbaoi)
assert observed.shape == (50, 50)
assert len(observed.items) == 1
assert observed.items[0].coords_almost_equals(cba)
if hasattr(observed.items[0], "is_valid"):
assert observed.items[0].is_valid
@classmethod
def _test_cbaois_no_movement_if_sigma_below_threshold(
cls, cba_class, cbaoi_class, augf_name):
# for small sigma, should not move if below threshold
with _elastic_trans_temp_thresholds(alpha=0.0, sigma=1.0):
cba = cba_class([(10, 15), (40, 15), (40, 35), (10, 35)])
cbaoi = cbaoi_class([cba], shape=(50, 50))
aug = iaa.ElasticTransformation(alpha=1.0, sigma=0.001)
observed = getattr(aug, augf_name)(cbaoi)
assert observed.shape == (50, 50)
assert len(observed.items) == 1
assert observed.items[0].coords_almost_equals(cba)
if hasattr(observed.items[0], "is_valid"):
assert observed.items[0].is_valid
@classmethod
def _test_cbaois_small_movement_for_weak_alpha_if_threshold_zero(
cls, cba_class, cbaoi_class, augf_name):
# for small alpha (at sigma 1.0), should barely move
# if thresholds set to zero
with _elastic_trans_temp_thresholds(alpha=0.0, sigma=0.0):
cba = cba_class([(10, 15), (40, 15), (40, 35), (10, 35)])
cbaoi = cbaoi_class([cba], shape=(50, 50))
aug = iaa.ElasticTransformation(alpha=0.001, sigma=1.0)
observed = getattr(aug, augf_name)(cbaoi)
assert observed.shape == (50, 50)
assert len(observed.items) == 1
assert observed.items[0].coords_almost_equals(
cba, max_distance=0.5)
if hasattr(observed.items[0], "is_valid"):
assert observed.items[0].is_valid
@classmethod
def _test_image_cbaoi_alignment(cls, cba_class, cbaoi_class, augf_name):
# test alignment between between images and polygons
height_step_size = 50
width_step_size = 30
height_steps = 2 # don't set >2, otherwise polygon will be broken
width_steps = 10
height = (2+height_steps) * height_step_size
width = (2+width_steps) * width_step_size
s = 3
image = np.zeros((height, width), dtype=np.uint8)
points = []
for w in sm.xrange(0, 2+width_steps):
if w not in [0, width_steps+2-1]:
x = width_step_size * w
y = height_step_size
points.append((x, y))
image[y-s:y+s+1, x-s:x+s+1] = 255
for w in sm.xrange(2+width_steps-1, 0, -1):
if w not in [0, width_steps+2-1]:
x = width_step_size * w
y = height_step_size*2
points.append((x, y))
image[y-s:y+s+1, x-s:x+s+1] = 255
cba = cba_class(points)
cbaoi = cbaoi_class([cba], shape=image.shape)
aug = iaa.ElasticTransformation(alpha=100, sigma=7)
aug_det = aug.to_deterministic()
images_aug = aug_det.augment_images([image, image])
cbaois_aug = getattr(aug_det, augf_name)([cbaoi, cbaoi])
count_bad = 0
for image_aug, cbaoi_aug in zip(images_aug, cbaois_aug):
assert cbaoi_aug.shape == image.shape
assert len(cbaoi_aug.items) == 1
for cba_aug in cbaoi_aug.items:
if hasattr(cba_aug, "is_valid"):
assert cba_aug.is_valid
for point_aug in cba_aug.coords:
x, y = point_aug[0], point_aug[1]
bb = ia.BoundingBox(x1=x-2, x2=x+2, y1=y-2, y2=y+2)
img_ex = bb.extract_from_image(image_aug)
if np.any(img_ex > 10):
pass # close to expected location
else:
count_bad += 1
assert count_bad <= 3
@classmethod
def _test_empty_cbaois(cls, cbaoi, augf_name):
aug = iaa.ElasticTransformation(alpha=10, sigma=10)
cbaoi_aug = getattr(aug, augf_name)(cbaoi)
assert_cbaois_equal(cbaoi_aug, cbaoi)
# -----------
# polygons
# -----------
def test_polygons_no_movement_if_alpha_below_threshold(self):
self._test_cbaois_no_movement_if_alpha_below_threshold(
ia.Polygon, ia.PolygonsOnImage, "augment_polygons")
def test_polygons_no_movement_if_sigma_below_threshold(self):
self._test_cbaois_no_movement_if_sigma_below_threshold(
ia.Polygon, ia.PolygonsOnImage, "augment_polygons")
def test_polygons_small_movement_for_weak_alpha_if_threshold_zero(self):
self._test_cbaois_small_movement_for_weak_alpha_if_threshold_zero(
ia.Polygon, ia.PolygonsOnImage, "augment_polygons")
def test_image_polygon_alignment(self):
self._test_image_cbaoi_alignment(
ia.Polygon, ia.PolygonsOnImage, "augment_polygons")
def test_empty_polygons(self):
cbaoi = ia.PolygonsOnImage([], shape=(10, 10, 3))
self._test_empty_cbaois(cbaoi, "augment_polygons")
# -----------
# line strings
# -----------
def test_line_strings_no_movement_if_alpha_below_threshold(self):
self._test_cbaois_no_movement_if_alpha_below_threshold(
ia.LineString, ia.LineStringsOnImage, "augment_line_strings")
def test_line_strings_no_movement_if_sigma_below_threshold(self):
self._test_cbaois_no_movement_if_sigma_below_threshold(
ia.LineString, ia.LineStringsOnImage, "augment_line_strings")
def test_line_strings_small_movement_for_weak_alpha_if_threshold_zero(self):
self._test_cbaois_small_movement_for_weak_alpha_if_threshold_zero(
ia.LineString, ia.LineStringsOnImage, "augment_line_strings")
def test_image_line_string_alignment(self):
self._test_image_cbaoi_alignment(
ia.LineString, ia.LineStringsOnImage, "augment_line_strings")
def test_empty_line_strings(self):
cbaoi = ia.LineStringsOnImage([], shape=(10, 10, 3))
self._test_empty_cbaois(cbaoi, "augment_line_strings")
# -----------
# bounding boxes
# -----------
def test_bounding_boxes_no_movement_if_alpha_below_threshold(self):
# for small alpha, should not move if below threshold
with _elastic_trans_temp_thresholds(alpha=1.0, sigma=0.0):
bbs = [
ia.BoundingBox(x1=10, y1=12, x2=20, y2=22),
ia.BoundingBox(x1=20, y1=32, x2=40, y2=42)
]
bbsoi = ia.BoundingBoxesOnImage(bbs, shape=(50, 50))
aug = iaa.ElasticTransformation(alpha=0.001, sigma=1.0)
observed = aug.augment_bounding_boxes([bbsoi])[0]
d = bbsoi.to_xyxy_array() - observed.to_xyxy_array()
d = d.reshape((2*2, 2))
d[:, 0] = d[:, 0] ** 2
d[:, 1] = d[:, 1] ** 2
d = np.sum(d, axis=1)
d = np.average(d, axis=0)
assert d < 1e-8
def test_bounding_boxes_no_movement_if_sigma_below_threshold(self):
# for small sigma, should not move if below threshold
with _elastic_trans_temp_thresholds(alpha=0.0, sigma=1.0):
bbs = [
ia.BoundingBox(x1=10, y1=12, x2=20, y2=22),
ia.BoundingBox(x1=20, y1=32, x2=40, y2=42)
]
bbsoi = ia.BoundingBoxesOnImage(bbs, shape=(50, 50))
aug = iaa.ElasticTransformation(alpha=1.0, sigma=0.001)
observed = aug.augment_bounding_boxes([bbsoi])[0]
d = bbsoi.to_xyxy_array() - observed.to_xyxy_array()
d = d.reshape((2*2, 2))
d[:, 0] = d[:, 0] ** 2
d[:, 1] = d[:, 1] ** 2
d = np.sum(d, axis=1)
d = np.average(d, axis=0)
assert d < 1e-8
def test_bounding_boxes_small_movement_for_weak_alpha_if_threshold_zero(
self):
# for small alpha (at sigma 1.0), should barely move
# if thresholds set to zero
with _elastic_trans_temp_thresholds(alpha=0.0, sigma=0.0):
bbs = [
ia.BoundingBox(x1=10, y1=12, x2=20, y2=22),
ia.BoundingBox(x1=20, y1=32, x2=40, y2=42)
]
bbsoi = ia.BoundingBoxesOnImage(bbs, shape=(50, 50))
aug = iaa.ElasticTransformation(alpha=0.001, sigma=1.0)
observed = aug.augment_bounding_boxes([bbsoi])[0]
d = bbsoi.to_xyxy_array() - observed.to_xyxy_array()
d = d.reshape((2*2, 2))
d[:, 0] = d[:, 0] ** 2
d[:, 1] = d[:, 1] ** 2
d = np.sum(d, axis=1)
d = np.average(d, axis=0)
assert d < 0.5
def test_image_bounding_box_alignment(self):
# test alignment between between images and bounding boxes
image = np.zeros((100, 100), dtype=np.uint8)
image[35:35+1, 35:65+1] = 255
image[65:65+1, 35:65+1] = 255
image[35:65+1, 35:35+1] = 255
image[35:65+1, 65:65+1] = 255
bbs = [
ia.BoundingBox(x1=35.5, y1=35.5, x2=65.5, y2=65.5)
]
bbsoi = ia.BoundingBoxesOnImage(bbs, shape=image.shape)
aug = iaa.ElasticTransformation(alpha=70, sigma=5)
images_aug, bbsois_aug = aug(images=[image, image],
bounding_boxes=[bbsoi, bbsoi])
count_bad = 0
for image_aug, bbsoi_aug in zip(images_aug, bbsois_aug):
assert bbsoi_aug.shape == (100, 100)
assert len(bbsoi_aug.bounding_boxes) == 1
for bb_aug in bbsoi_aug.bounding_boxes:
if bb_aug.is_fully_within_image(image_aug):
# top, bottom, left, right
x1 = bb_aug.x1_int
x2 = bb_aug.x2_int
y1 = bb_aug.y1_int
y2 = bb_aug.y2_int
top_row = image_aug[y1-2:y1+2, x1-2:x2+2]
btm_row = image_aug[y2-2:y2+2, x1-2:x2+2]
lft_row = image_aug[y1-2:y2+2, x1-2:x1+2]
rgt_row = image_aug[y1-2:y2+2, x2-2:x2+2]
assert np.max(top_row) > 10
assert np.max(btm_row) > 10
assert np.max(lft_row) > 10
assert np.max(rgt_row) > 10
else:
count_bad += 1
assert count_bad <= 1
def test_empty_bounding_boxes(self):
aug = iaa.ElasticTransformation(alpha=10, sigma=10)
bbsoi = ia.BoundingBoxesOnImage([], shape=(10, 10, 3))
bbsoi_aug = aug.augment_bounding_boxes(bbsoi)
assert len(bbsoi_aug.bounding_boxes) == 0
assert bbsoi_aug.shape == (10, 10, 3)
# -----------
# heatmaps alignment
# -----------
def test_image_heatmaps_alignment(self):
# test alignment between images and heatmaps
img = np.zeros((80, 80), dtype=np.uint8)
img[:, 30:50] = 255
img[30:50, :] = 255
hm = HeatmapsOnImage(img.astype(np.float32)/255.0, shape=(80, 80))
aug = iaa.ElasticTransformation(alpha=60.0, sigma=4.0, mode="constant",
cval=0)
aug_det = aug.to_deterministic()
img_aug = aug_det.augment_image(img)
hm_aug = aug_det.augment_heatmaps([hm])[0]
img_aug_mask = img_aug > 255*0.1
hm_aug_mask = hm_aug.arr_0to1 > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert hm_aug.shape == (80, 80)
assert hm_aug.arr_0to1.shape == (80, 80, 1)
assert (same / img_aug_mask.size) >= 0.99
def test_image_heatmaps_alignment_if_heatmaps_smaller_than_image(self):
# test alignment between images and heatmaps
# here with heatmaps that are smaller than the image
img = np.zeros((80, 80), dtype=np.uint8)
img[:, 30:50] = 255
img[30:50, :] = 255
img_small = ia.imresize_single_image(
img, (40, 40), interpolation="nearest")
hm = HeatmapsOnImage(
img_small.astype(np.float32)/255.0,
shape=(80, 80))
aug = iaa.ElasticTransformation(
alpha=60.0, sigma=4.0, mode="constant", cval=0)
aug_det = aug.to_deterministic()
img_aug = aug_det.augment_image(img)
hm_aug = aug_det.augment_heatmaps([hm])[0]
img_aug_mask = img_aug > 255*0.1
hm_aug_mask = ia.imresize_single_image(
hm_aug.arr_0to1, (80, 80), interpolation="nearest"
) > 0.1
same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])
assert hm_aug.shape == (80, 80)
assert hm_aug.arr_0to1.shape == (40, 40, 1)
assert (same / img_aug_mask.size) >= 0.94
# -----------
# segmaps alignment
# -----------
def test_image_segmaps_alignment(self):
# test alignment between images and segmaps
img = np.zeros((80, 80), dtype=np.uint8)
img[:, 30:50] = 255
img[30:50, :] = 255
segmaps = SegmentationMapsOnImage(
(img > 0).astype(np.int32),
shape=(80, 80))
aug = iaa.ElasticTransformation(
alpha=60.0, sigma=4.0, mode="constant", cval=0, order=0)
aug_det = aug.to_deterministic()
img_aug = aug_det.augment_image(img)
segmaps_aug = aug_det.augment_segmentation_maps([segmaps])[0]
img_aug_mask = img_aug > 255*0.1
segmaps_aug_mask = segmaps_aug.arr > 0
same = np.sum(img_aug_mask == segmaps_aug_mask[:, :, 0])
assert segmaps_aug.shape == (80, 80)
assert segmaps_aug.arr.shape == (80, 80, 1)
assert (same / img_aug_mask.size) >= 0.99
def test_image_segmaps_alignment_if_heatmaps_smaller_than_image(self):
# test alignment between images and segmaps
# here with segmaps that are smaller than the image
img = np.zeros((80, 80), dtype=np.uint8)
img[:, 30:50] = 255
img[30:50, :] = 255
img_small = ia.imresize_single_image(
img, (40, 40), interpolation="nearest")
segmaps = SegmentationMapsOnImage(
(img_small > 0).astype(np.int32), shape=(80, 80))
aug = iaa.ElasticTransformation(
alpha=60.0, sigma=4.0, mode="constant", cval=0, order=0)
aug_det = aug.to_deterministic()
img_aug = aug_det.augment_image(img)
segmaps_aug = aug_det.augment_segmentation_maps([segmaps])[0]
img_aug_mask = img_aug > 255*0.1
segmaps_aug_mask = ia.imresize_single_image(
segmaps_aug.arr, (80, 80), interpolation="nearest") > 0
same = np.sum(img_aug_mask == segmaps_aug_mask[:, :, 0])
assert segmaps_aug.shape == (80, 80)
assert segmaps_aug.arr.shape == (40, 40, 1)
assert (same / img_aug_mask.size) >= 0.94
# ---------
# unusual channel numbers
# ---------
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.ElasticTransformation(alpha=2.0, sigma=2.0)
image_aug = aug(image=image)
assert np.all(image_aug == 0)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
# ---------
# zero-sized axes
# ---------
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
for keep_size in [False, True]:
with self.subTest(shape=shape, keep_size=keep_size):
for _ in sm.xrange(3):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.ElasticTransformation(alpha=2.0, sigma=2.0)
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
# -----------
# get_parameters
# -----------
def test_get_parameters(self):
aug = iaa.ElasticTransformation(
alpha=0.25, sigma=1.0, order=2, cval=10, mode="constant")
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert isinstance(params[2], iap.Deterministic)
assert isinstance(params[3], iap.Deterministic)
assert isinstance(params[4], iap.Deterministic)
assert 0.25 - 1e-8 < params[0].value < 0.25 + 1e-8
assert 1.0 - 1e-8 < params[1].value < 1.0 + 1e-8
assert params[2].value == 2
assert params[3].value == 10
assert params[4].value == "constant"
# -----------
# other dtypes
# -----------
def test_other_dtypes_bool(self):
aug = iaa.ElasticTransformation(sigma=0.5, alpha=5, order=0)
mask = np.zeros((21, 21), dtype=bool)
mask[7:13, 7:13] = True
image = np.zeros((21, 21), dtype=bool)
image[mask] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert not np.all(image_aug == 1)
assert np.any(image_aug[~mask] == 1)
def test_other_dtypes_uint_int(self):
aug = iaa.ElasticTransformation(sigma=0.5, alpha=5, order=0)
mask = np.zeros((21, 21), dtype=bool)
mask[7:13, 7:13] = True
dtypes = ["uint8", "uint16", "uint32", "int8", "int16", "int32"]
for dtype in dtypes:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
image = np.zeros((21, 21), dtype=dtype)
image[7:13, 7:13] = max_value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert not np.all(image_aug == max_value)
assert np.any(image_aug[~mask] == max_value)
def test_other_dtypes_float(self):
aug = iaa.ElasticTransformation(sigma=0.5, alpha=5, order=0)
mask = np.zeros((21, 21), dtype=bool)
mask[7:13, 7:13] = True
for dtype in ["float16", "float32", "float64"]:
def _isclose(a, b):
atol = 1e-4 if dtype == "float16" else 1e-8
return np.isclose(a, b, atol=atol, rtol=0)
isize = np.dtype(dtype).itemsize
values = [0.01, 1.0, 10.0, 100.0, 500 ** (isize - 1),
1000 ** (isize - 1)]
values = values + [(-1) * value for value in values]
for value in values:
with self.subTest(dtype=dtype, value=value):
image = np.zeros((21, 21), dtype=dtype)
image[7:13, 7:13] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert not np.all(_isclose(image_aug, np.float128(value)))
assert np.any(_isclose(image_aug[~mask],
np.float128(value)))
def test_other_dtypes_bool_all_orders(self):
mask = np.zeros((50, 50), dtype=bool)
mask[10:40, 20:30] = True
mask[20:30, 10:40] = True
for order in [0, 1, 2, 3, 4, 5]:
aug = iaa.ElasticTransformation(sigma=1.0, alpha=50, order=order)
image = np.zeros((50, 50), dtype=bool)
image[mask] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert not np.all(image_aug == 1)
assert np.any(image_aug[~mask] == 1)
def test_other_dtypes_uint_int_all_orders(self):
mask = np.zeros((50, 50), dtype=bool)
mask[10:40, 20:30] = True
mask[20:30, 10:40] = True
for order in [0, 1, 2, 3, 4, 5]:
aug = iaa.ElasticTransformation(sigma=1.0, alpha=50, order=order)
dtypes = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
if order == 0:
dtypes = ["uint8", "uint16", "uint32",
"int8", "int16", "int32"]
for dtype in dtypes:
with self.subTest(dtype=dtype):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
dynamic_range = max_value - min_value
image = np.zeros((50, 50), dtype=dtype)
image[mask] = max_value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
if order == 0:
assert not np.all(image_aug == max_value)
assert np.any(image_aug[~mask] == max_value)
else:
atol = 0.1 * dynamic_range
assert not np.all(
np.isclose(image_aug,
max_value,
rtol=0, atol=atol)
)
assert np.any(
np.isclose(image_aug[~mask],
max_value,
rtol=0, atol=atol))
def test_other_dtypes_float_all_orders(self):
mask = np.zeros((50, 50), dtype=bool)
mask[10:40, 20:30] = True
mask[20:30, 10:40] = True
for order in [0, 1, 2, 3, 4, 5]:
aug = iaa.ElasticTransformation(sigma=1.0, alpha=50, order=order)
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
with self.subTest(dtype=dtype):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
def _isclose(a, b):
atol = 1e-4 if dtype == "float16" else 1e-8
return np.isclose(a, b, atol=atol, rtol=0)
value = (
0.1 * max_value
if dtype != "float64"
else 0.0001 * max_value)
image = np.zeros((50, 50), dtype=dtype)
image[mask] = value
image_aug = aug.augment_image(image)
if order == 0:
assert image_aug.dtype.name == dtype
assert not np.all(
_isclose(image_aug, np.float128(value))
)
assert np.any(
_isclose(image_aug[~mask], np.float128(value))
)
else:
atol = (
10
if dtype == "float16"
else 0.00001 * max_value)
assert not np.all(
np.isclose(
image_aug,
np.float128(value),
rtol=0, atol=atol
))
assert np.any(
np.isclose(
image_aug[~mask],
np.float128(value),
rtol=0, atol=atol
))
def test_pickleable(self):
aug = iaa.ElasticTransformation(alpha=(0.2, 1.5), sigma=(1.0, 10.0),
seed=1)
runtest_pickleable_uint8_img(aug, iterations=4, shape=(25, 25, 1))
class _TwoValueParam(iap.StochasticParameter):
def __init__(self, v1, v2):
super(_TwoValueParam, self).__init__()
self.v1 = v1
self.v2 = v2
def _draw_samples(self, size, random_state):
arr = np.full(size, self.v1, dtype=np.int32)
arr[1::2] = self.v2
return arr
class TestRot90(unittest.TestCase):
@property
def kp_offset(self):
# set this to -1 when using integer-based KP rotation instead of
# subpixel/float-based rotation
return 0
@property
def image(self):
return np.arange(4*4*3).reshape((4, 4, 3)).astype(np.uint8)
@property
def heatmaps(self):
return HeatmapsOnImage(self.image[..., 0:1].astype(np.float32) / 255,
shape=(4, 4, 3))
@property
def heatmaps_smaller(self):
return HeatmapsOnImage(
np.float32([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]), shape=(4, 8, 3))
@property
def segmaps(self):
return SegmentationMapsOnImage(
self.image[..., 0:1].astype(np.int32), shape=(4, 4, 3))
@property
def segmaps_smaller(self):
return SegmentationMapsOnImage(
np.int32([[0, 1, 2], [3, 4, 5]]), shape=(4, 8, 3))
@property
def kpsoi(self):
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=2, y=3)]
return ia.KeypointsOnImage(kps, shape=(4, 8, 3))
@property
def psoi(self):
return ia.PolygonsOnImage(
[ia.Polygon([(1, 1), (3, 1), (3, 3), (1, 3)])],
shape=(4, 8, 3)
)
@property
def lsoi(self):
return ia.LineStringsOnImage(
[ia.LineString([(1, 1), (3, 1), (3, 3), (1, 3)])],
shape=(4, 8, 3)
)
@property
def bbsoi(self):
return ia.BoundingBoxesOnImage(
[ia.BoundingBox(x1=1, y1=1, x2=3, y2=3)],
shape=(4, 8, 3)
)
@property
def kpsoi_k1(self):
# without keep size
kp_offset = self.kp_offset
expected_k1_kps = [(4-2+kp_offset, 1),
(4-3+kp_offset, 2)]
kps = [ia.Keypoint(x, y) for x, y in expected_k1_kps]
return ia.KeypointsOnImage(kps, shape=(8, 4, 3))
@property
def kpsoi_k2(self):
# without keep size
kp_offset = self.kp_offset
expected_k1_kps = self.kpsoi_k1.to_xy_array()
expected_k2_kps = [
(8-expected_k1_kps[0][1]+kp_offset, expected_k1_kps[0][0]),
(8-expected_k1_kps[1][1]+kp_offset, expected_k1_kps[1][0])]
kps = [ia.Keypoint(x, y) for x, y in expected_k2_kps]
return ia.KeypointsOnImage(kps, shape=(4, 8, 3))
@property
def kpsoi_k3(self):
# without keep size
kp_offset = self.kp_offset
expected_k2_kps = self.kpsoi_k2.to_xy_array()
expected_k3_kps = [
(4-expected_k2_kps[0][1]+kp_offset, expected_k2_kps[0][0]),
(4-expected_k2_kps[1][1]+kp_offset, expected_k2_kps[1][0])]
kps = [ia.Keypoint(x, y) for x, y in expected_k3_kps]
return ia.KeypointsOnImage(kps, shape=(8, 4, 3))
@property
def psoi_k1(self):
# without keep size
kp_offset = self.kp_offset
expected_k1_polys = [(4-1+kp_offset, 1),
(4-1+kp_offset, 3),
(4-3+kp_offset, 3),
(4-3+kp_offset, 1)]
return ia.PolygonsOnImage([ia.Polygon(expected_k1_polys)],
shape=(8, 4, 3))
@property
def psoi_k2(self):
# without keep size
kp_offset = self.kp_offset
expected_k1_polys = self.psoi_k1.polygons[0].exterior
expected_k2_polys = [
(8-expected_k1_polys[0][1]+kp_offset, expected_k1_polys[0][0]),
(8-expected_k1_polys[1][1]+kp_offset, expected_k1_polys[1][0]),
(8-expected_k1_polys[2][1]+kp_offset, expected_k1_polys[2][0]),
(8-expected_k1_polys[3][1]+kp_offset, expected_k1_polys[3][0])]
return ia.PolygonsOnImage([ia.Polygon(expected_k2_polys)],
shape=(4, 8, 3))
@property
def psoi_k3(self):
# without keep size
kp_offset = self.kp_offset
expected_k2_polys = self.psoi_k2.polygons[0].exterior
expected_k3_polys = [
(4-expected_k2_polys[0][1]+kp_offset, expected_k2_polys[0][0]),
(4-expected_k2_polys[1][1]+kp_offset, expected_k2_polys[1][0]),
(4-expected_k2_polys[2][1]+kp_offset, expected_k2_polys[2][0]),
(4-expected_k2_polys[3][1]+kp_offset, expected_k2_polys[3][0])]
return ia.PolygonsOnImage([ia.Polygon(expected_k3_polys)],
shape=(8, 4, 3))
@property
def lsoi_k1(self):
# without keep size
kp_offset = self.kp_offset
expected_k1_ls = [(4-1+kp_offset, 1),
(4-1+kp_offset, 3),
(4-3+kp_offset, 3),
(4-3+kp_offset, 1)]
return ia.LineStringsOnImage([ia.LineString(expected_k1_ls)],
shape=(8, 4, 3))
@property
def lsoi_k2(self):
# without keep size
kp_offset = self.kp_offset
expected_k1_ls = self.psoi_k1.items[0].coords
expected_k2_ls = [
(8-expected_k1_ls[0][1]+kp_offset, expected_k1_ls[0][0]),
(8-expected_k1_ls[1][1]+kp_offset, expected_k1_ls[1][0]),
(8-expected_k1_ls[2][1]+kp_offset, expected_k1_ls[2][0]),
(8-expected_k1_ls[3][1]+kp_offset, expected_k1_ls[3][0])]
return ia.LineStringsOnImage([ia.LineString(expected_k2_ls)],
shape=(4, 8, 3))
@property
def lsoi_k3(self):
# without keep size
kp_offset = self.kp_offset
expected_k2_ls = self.lsoi_k2.items[0].coords
expected_k3_ls = [
(4-expected_k2_ls[0][1]+kp_offset, expected_k2_ls[0][0]),
(4-expected_k2_ls[1][1]+kp_offset, expected_k2_ls[1][0]),
(4-expected_k2_ls[2][1]+kp_offset, expected_k2_ls[2][0]),
(4-expected_k2_ls[3][1]+kp_offset, expected_k2_ls[3][0])]
return ia.LineStringsOnImage([ia.LineString(expected_k3_ls)],
shape=(8, 4, 3))
@property
def bbsoi_k1(self):
# without keep size
kp_offset = self.kp_offset
expected_k1_coords = [
(4-1+kp_offset, 1),
(4-3+kp_offset, 3)]
return ia.BoundingBoxesOnImage([
ia.BoundingBox(
x1=min(expected_k1_coords[0][0], expected_k1_coords[1][0]),
y1=min(expected_k1_coords[0][1], expected_k1_coords[1][1]),
x2=max(expected_k1_coords[1][0], expected_k1_coords[0][0]),
y2=max(expected_k1_coords[1][1], expected_k1_coords[0][1])
)], shape=(8, 4, 3))
@property
def bbsoi_k2(self):
# without keep size
kp_offset = self.kp_offset
coords = self.bbsoi_k1.bounding_boxes[0].coords
expected_k2_coords = [
(8-coords[0][1]+kp_offset, coords[0][0]),
(8-coords[1][1]+kp_offset, coords[1][0])]
return ia.BoundingBoxesOnImage([
ia.BoundingBox(
x1=min(expected_k2_coords[0][0], expected_k2_coords[1][0]),
y1=min(expected_k2_coords[0][1], expected_k2_coords[1][1]),
x2=max(expected_k2_coords[1][0], expected_k2_coords[0][0]),
y2=max(expected_k2_coords[1][1], expected_k2_coords[0][1])
)],
shape=(4, 8, 3))
@property
def bbsoi_k3(self):
# without keep size
kp_offset = self.kp_offset
coords = self.bbsoi_k2.bounding_boxes[0].coords
expected_k3_coords = [
(4-coords[0][1]+kp_offset, coords[0][0]),
(4-coords[1][1]+kp_offset, coords[1][0])]
return ia.BoundingBoxesOnImage([
ia.BoundingBox(
x1=min(expected_k3_coords[0][0], expected_k3_coords[1][0]),
y1=min(expected_k3_coords[0][1], expected_k3_coords[1][1]),
x2=max(expected_k3_coords[1][0], expected_k3_coords[0][0]),
y2=max(expected_k3_coords[1][1], expected_k3_coords[0][1])
)],
shape=(8, 4, 3))
def test___init___k_is_list(self):
aug = iaa.Rot90([1, 3])
assert isinstance(aug.k, iap.Choice)
assert len(aug.k.a) == 2
assert aug.k.a[0] == 1
assert aug.k.a[1] == 3
def test___init___k_is_all(self):
aug = iaa.Rot90(ia.ALL)
assert isinstance(aug.k, iap.Choice)
assert len(aug.k.a) == 4
assert aug.k.a == [0, 1, 2, 3]
def test_images_k_is_0_and_4(self):
for k in [0, 4]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
img_aug = aug.augment_image(self.image)
assert img_aug.dtype.name == "uint8"
assert np.array_equal(img_aug, self.image)
def test_heatmaps_k_is_0_and_4(self):
for k in [0, 4]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
hms_aug = aug.augment_heatmaps([self.heatmaps])[0]
assert (hms_aug.arr_0to1.dtype.name
== self.heatmaps.arr_0to1.dtype.name)
assert np.allclose(hms_aug.arr_0to1, self.heatmaps.arr_0to1)
assert hms_aug.shape == self.heatmaps.shape
def test_segmaps_k_is_0_and_4(self):
for k in [0, 4]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
segmaps_aug = aug.augment_segmentation_maps(
[self.segmaps]
)[0]
assert (
segmaps_aug.arr.dtype.name
== self.segmaps.arr.dtype.name)
assert np.allclose(segmaps_aug.arr, self.segmaps.arr)
assert segmaps_aug.shape == self.segmaps.shape
def test_keypoints_k_is_0_and_4(self):
for k in [0, 4]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
kpsoi_aug = aug.augment_keypoints([self.kpsoi])[0]
assert_cbaois_equal(kpsoi_aug, self.kpsoi)
def test_polygons_k_is_0_and_4(self):
for k in [0, 4]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
psoi_aug = aug.augment_polygons(self.psoi)
assert_cbaois_equal(psoi_aug, self.psoi)
def test_line_strings_k_is_0_and_4(self):
for k in [0, 4]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
lsoi_aug = aug.augment_line_strings(self.lsoi)
assert_cbaois_equal(lsoi_aug, self.lsoi)
def test_bounding_boxes_k_is_0_and_4(self):
for k in [0, 4]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
bbsoi_aug = aug.augment_bounding_boxes(self.bbsoi)
assert_cbaois_equal(bbsoi_aug, self.bbsoi)
def test_images_k_is_1_and_5(self):
for k in [1, 5]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
img_aug = aug.augment_image(self.image)
assert img_aug.dtype.name == "uint8"
assert np.array_equal(img_aug,
np.rot90(self.image, 1, axes=(1, 0)))
def test_heatmaps_k_is_1_and_5(self):
for k in [1, 5]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
hms_aug = aug.augment_heatmaps([self.heatmaps])[0]
assert (hms_aug.arr_0to1.dtype.name
== self.heatmaps.arr_0to1.dtype.name)
assert np.allclose(
hms_aug.arr_0to1,
np.rot90(self.heatmaps.arr_0to1, 1, axes=(1, 0)))
assert hms_aug.shape == (4, 4, 3)
def test_heatmaps_smaller_than_image_k_is_1_and_5(self):
for k in [1, 5]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
hms_smaller_aug = aug.augment_heatmaps(
[self.heatmaps_smaller]
)[0]
assert (
hms_smaller_aug.arr_0to1.dtype.name
== self.heatmaps_smaller.arr_0to1.dtype.name)
assert np.allclose(
hms_smaller_aug.arr_0to1,
np.rot90(self.heatmaps_smaller.arr_0to1, 1, axes=(1, 0)))
assert hms_smaller_aug.shape == (8, 4, 3)
def test_segmaps_k_is_1_and_5(self):
for k in [1, 5]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
segmaps_aug = aug.augment_segmentation_maps(
[self.segmaps]
)[0]
assert (
segmaps_aug.arr.dtype.name
== self.segmaps.arr.dtype.name)
assert np.allclose(
segmaps_aug.arr,
np.rot90(self.segmaps.arr, 1, axes=(1, 0)))
assert segmaps_aug.shape == (4, 4, 3)
def test_segmaps_smaller_than_image_k_is_1_and_5(self):
for k in [1, 5]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
segmaps_smaller_aug = aug.augment_segmentation_maps(
self.segmaps_smaller)
assert (
segmaps_smaller_aug.arr.dtype.name
== self.segmaps_smaller.arr.dtype.name)
assert np.allclose(
segmaps_smaller_aug.arr,
np.rot90(self.segmaps_smaller.arr, 1, axes=(1, 0)))
assert segmaps_smaller_aug.shape == (8, 4, 3)
def test_keypoints_k_is_1_and_5(self):
for k in [1, 5]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
kpsoi_aug = aug.augment_keypoints([self.kpsoi])[0]
assert_cbaois_equal(kpsoi_aug, self.kpsoi_k1)
def test_polygons_k_is_1_and_5(self):
for k in [1, 5]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
psoi_aug = aug.augment_polygons(self.psoi)
assert_cbaois_equal(psoi_aug, self.psoi_k1)
def test_line_strings_k_is_1_and_5(self):
for k in [1, 5]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
lsoi_aug = aug.augment_line_strings(self.lsoi)
assert_cbaois_equal(lsoi_aug, self.lsoi_k1)
def test_bounding_boxes_k_is_1_and_5(self):
for k in [1, 5]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
bbsoi_aug = aug.augment_bounding_boxes(self.bbsoi)
assert_cbaois_equal(bbsoi_aug, self.bbsoi_k1)
def test_images_k_is_2(self):
aug = iaa.Rot90(2, keep_size=False)
img = self.image
img_aug = aug.augment_image(img)
assert img_aug.dtype.name == "uint8"
assert np.array_equal(img_aug, np.rot90(img, 2, axes=(1, 0)))
def test_heatmaps_k_is_2(self):
aug = iaa.Rot90(2, keep_size=False)
hms = self.heatmaps
hms_aug = aug.augment_heatmaps([hms])[0]
assert hms_aug.arr_0to1.dtype.name == hms.arr_0to1.dtype.name
assert np.allclose(
hms_aug.arr_0to1,
np.rot90(hms.arr_0to1, 2, axes=(1, 0)))
assert hms_aug.shape == (4, 4, 3)
def test_heatmaps_smaller_than_image_k_is_2(self):
aug = iaa.Rot90(2, keep_size=False)
hms_smaller = self.heatmaps_smaller
hms_smaller_aug = aug.augment_heatmaps([hms_smaller])[0]
assert (hms_smaller_aug.arr_0to1.dtype.name
== hms_smaller.arr_0to1.dtype.name)
assert np.allclose(
hms_smaller_aug.arr_0to1,
np.rot90(hms_smaller.arr_0to1, 2, axes=(1, 0)))
assert hms_smaller_aug.shape == (4, 8, 3)
def test_segmaps_k_is_2(self):
aug = iaa.Rot90(2, keep_size=False)
segmaps = self.segmaps
segmaps_aug = aug.augment_segmentation_maps([segmaps])[0]
assert segmaps_aug.arr.dtype.name == segmaps.arr.dtype.name
assert np.allclose(
segmaps_aug.arr,
np.rot90(segmaps.arr, 2, axes=(1, 0)))
assert segmaps_aug.shape == (4, 4, 3)
def test_segmaps_smaller_than_image_k_is_2(self):
aug = iaa.Rot90(2, keep_size=False)
segmaps_smaller = self.segmaps_smaller
segmaps_smaller_aug = aug.augment_segmentation_maps(segmaps_smaller)
assert (segmaps_smaller_aug.arr.dtype.name
== segmaps_smaller.arr.dtype.name)
assert np.allclose(
segmaps_smaller_aug.arr,
np.rot90(segmaps_smaller.arr, 2, axes=(1, 0)))
assert segmaps_smaller_aug.shape == (4, 8, 3)
def test_keypoints_k_is_2(self):
aug = iaa.Rot90(2, keep_size=False)
kpsoi_aug = aug.augment_keypoints([self.kpsoi])[0]
assert_cbaois_equal(kpsoi_aug, self.kpsoi_k2)
def test_polygons_k_is_2(self):
aug = iaa.Rot90(2, keep_size=False)
psoi_aug = aug.augment_polygons(self.psoi)
assert_cbaois_equal(psoi_aug, self.psoi_k2)
def test_line_strings_k_is_2(self):
aug = iaa.Rot90(2, keep_size=False)
lsoi_aug = aug.augment_line_strings(self.lsoi)
assert_cbaois_equal(lsoi_aug, self.lsoi_k2)
def test_bounding_boxes_k_is_2(self):
aug = iaa.Rot90(2, keep_size=False)
bbsoi_aug = aug.augment_bounding_boxes(self.bbsoi)
assert_cbaois_equal(bbsoi_aug, self.bbsoi_k2)
def test_images_k_is_3_and_minus1(self):
img = self.image
for k in [3, -1]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
img_aug = aug.augment_image(img)
assert img_aug.dtype.name == "uint8"
assert np.array_equal(img_aug, np.rot90(img, 3, axes=(1, 0)))
def test_heatmaps_k_is_3_and_minus1(self):
hms = self.heatmaps
for k in [3, -1]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
hms_aug = aug.augment_heatmaps([hms])[0]
assert (hms_aug.arr_0to1.dtype.name
== hms.arr_0to1.dtype.name)
assert np.allclose(
hms_aug.arr_0to1,
np.rot90(hms.arr_0to1, 3, axes=(1, 0)))
assert hms_aug.shape == (4, 4, 3)
def test_heatmaps_smaller_than_image_k_is_3_and_minus1(self):
hms_smaller = self.heatmaps_smaller
for k in [3, -1]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
hms_smaller_aug = aug.augment_heatmaps([hms_smaller])[0]
assert (hms_smaller_aug.arr_0to1.dtype.name
== hms_smaller.arr_0to1.dtype.name)
assert np.allclose(
hms_smaller_aug.arr_0to1,
np.rot90(hms_smaller.arr_0to1, 3, axes=(1, 0)))
assert hms_smaller_aug.shape == (8, 4, 3)
def test_segmaps_k_is_3_and_minus1(self):
segmaps = self.segmaps
for k in [3, -1]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
segmaps_aug = aug.augment_segmentation_maps([segmaps])[0]
assert (segmaps_aug.arr.dtype.name
== segmaps.arr.dtype.name)
assert np.allclose(
segmaps_aug.arr,
np.rot90(segmaps.arr, 3, axes=(1, 0)))
assert segmaps_aug.shape == (4, 4, 3)
def test_segmaps_smaller_than_image_k_is_3_and_minus1(self):
segmaps_smaller = self.segmaps_smaller
for k in [3, -1]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
segmaps_smaller_aug = aug.augment_segmentation_maps(
segmaps_smaller)
assert (segmaps_smaller_aug.arr.dtype.name
== segmaps_smaller.arr.dtype.name)
assert np.allclose(
segmaps_smaller_aug.arr,
np.rot90(segmaps_smaller.arr, 3, axes=(1, 0)))
assert segmaps_smaller_aug.shape == (8, 4, 3)
def test_keypoints_k_is_3_and_minus1(self):
for k in [3, -1]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
kpsoi_aug = aug.augment_keypoints([self.kpsoi])[0]
assert_cbaois_equal(kpsoi_aug, self.kpsoi_k3)
def test_polygons_k_is_3_and_minus1(self):
for k in [3, -1]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
psoi_aug = aug.augment_polygons(self.psoi)
assert_cbaois_equal(psoi_aug, self.psoi_k3)
def test_line_strings_k_is_3_and_minus1(self):
for k in [3, -1]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
lsoi_aug = aug.augment_line_strings(self.lsoi)
assert_cbaois_equal(lsoi_aug, self.lsoi_k3)
def test_bounding_boxes_k_is_3_and_minus1(self):
for k in [3, -1]:
with self.subTest(k=k):
aug = iaa.Rot90(k, keep_size=False)
bbsoi_aug = aug.augment_bounding_boxes(self.bbsoi)
assert_cbaois_equal(bbsoi_aug, self.bbsoi_k3)
def test_images_k_is_1_verify_without_using_numpy_rot90(self):
# verify once without np.rot90
aug = iaa.Rot90(k=1, keep_size=False)
image = np.uint8([[1, 0, 0],
[0, 2, 0]])
img_aug = aug.augment_image(image)
expected = np.uint8([[0, 1], [2, 0], [0, 0]])
assert np.array_equal(img_aug, expected)
def test_images_k_is_1_keep_size_is_true(self):
# keep_size=True, k=1
aug = iaa.Rot90(1, keep_size=True)
img_nonsquare = np.arange(5*4*3).reshape((5, 4, 3)).astype(np.uint8)
img_aug = aug.augment_image(img_nonsquare)
assert img_aug.dtype.name == "uint8"
assert np.array_equal(
img_aug,
ia.imresize_single_image(
np.rot90(img_nonsquare, 1, axes=(1, 0)),
(5, 4)
)
)
def test_heatmaps_k_is_1_keep_size_is_true(self):
aug = iaa.Rot90(1, keep_size=True)
hms = self.heatmaps
hms_aug = aug.augment_heatmaps([hms])[0]
assert hms_aug.arr_0to1.dtype.name == hms.arr_0to1.dtype.name
assert np.allclose(
hms_aug.arr_0to1,
np.rot90(hms.arr_0to1, 1, axes=(1, 0)))
assert hms_aug.shape == (4, 4, 3)
def test_heatmaps_smaller_than_image_k_is_1_keep_size_is_true(self):
aug = iaa.Rot90(1, keep_size=True)
hms_smaller = self.heatmaps_smaller
hms_smaller_aug = aug.augment_heatmaps([hms_smaller])[0]
hms_smaller_rot = np.rot90(hms_smaller.arr_0to1, 1, axes=(1, 0))
hms_smaller_rot = np.clip(
ia.imresize_single_image(
hms_smaller_rot, (2, 3), interpolation="cubic"
),
0.0, 1.0)
assert (hms_smaller_aug.arr_0to1.dtype.name
== hms_smaller.arr_0to1.dtype.name)
assert np.allclose(hms_smaller_aug.arr_0to1, hms_smaller_rot)
assert hms_smaller_aug.shape == (4, 8, 3)
def test_segmaps_k_is_1_keep_size_is_true(self):
aug = iaa.Rot90(1, keep_size=True)
segmaps = self.segmaps
segmaps_aug = aug.augment_segmentation_maps([segmaps])[0]
assert (segmaps_aug.arr.dtype.name
== segmaps.arr.dtype.name)
assert np.allclose(segmaps_aug.arr,
np.rot90(segmaps.arr, 1, axes=(1, 0)))
assert segmaps_aug.shape == (4, 4, 3)
def test_segmaps_smaller_than_image_k_is_1_keep_size_is_true(self):
aug = iaa.Rot90(1, keep_size=True)
segmaps_smaller = self.segmaps_smaller
segmaps_smaller_aug = aug.augment_segmentation_maps(segmaps_smaller)
segmaps_smaller_rot = np.rot90(segmaps_smaller.arr, 1, axes=(1, 0))
segmaps_smaller_rot = ia.imresize_single_image(
segmaps_smaller_rot, (2, 3), interpolation="nearest")
assert (segmaps_smaller_aug.arr.dtype.name
== segmaps_smaller.arr.dtype.name)
assert np.allclose(segmaps_smaller_aug.arr, segmaps_smaller_rot)
assert segmaps_smaller_aug.shape == (4, 8, 3)
def test_keypoints_k_is_1_keep_size_is_true(self):
aug = iaa.Rot90(1, keep_size=True)
kp_offset = self.kp_offset
kpsoi = self.kpsoi
kpsoi_aug = aug.augment_keypoints([kpsoi])[0]
expected = [(4-2+kp_offset, 1), (4-3+kp_offset, 2)]
expected = [(8*x/4, 4*y/8) for x, y in expected]
assert kpsoi_aug.shape == (4, 8, 3)
for kp_aug, kp in zip(kpsoi_aug.keypoints, expected):
assert np.allclose([kp_aug.x, kp_aug.y], [kp[0], kp[1]])
def test_polygons_k_is_1_keep_size_is_true(self):
aug = iaa.Rot90(1, keep_size=True)
psoi = self.psoi
kp_offset = self.kp_offset
psoi_aug = aug.augment_polygons(psoi)
expected = [(4-1+kp_offset, 1), (4-1+kp_offset, 3),
(4-3+kp_offset, 3), (4-3+kp_offset, 1)]
expected = [(8*x/4, 4*y/8) for x, y in expected]
assert psoi_aug.shape == (4, 8, 3)
assert len(psoi_aug.polygons) == 1
assert psoi_aug.polygons[0].is_valid
assert psoi_aug.polygons[0].exterior_almost_equals(expected)
def test_line_strings_k_is_1_keep_size_is_true(self):
aug = iaa.Rot90(1, keep_size=True)
lsoi = self.lsoi
kp_offset = self.kp_offset
lsoi_aug = aug.augment_line_strings(lsoi)
expected = [(4-1+kp_offset, 1), (4-1+kp_offset, 3),
(4-3+kp_offset, 3), (4-3+kp_offset, 1)]
expected = [(8*x/4, 4*y/8) for x, y in expected]
assert lsoi_aug.shape == (4, 8, 3)
assert len(lsoi_aug.items) == 1
assert lsoi_aug.items[0].coords_almost_equals(expected)
def test_bounding_boxes_k_is_1_keep_size_is_true(self):
aug = iaa.Rot90(1, keep_size=True)
bbsoi = self.bbsoi
kp_offset = self.kp_offset
bbsoi_aug = aug.augment_bounding_boxes(bbsoi)
expected = [(4-1+kp_offset, 1),
(4-3+kp_offset, 3)]
expected = [(8*x/4, 4*y/8) for x, y in expected]
expected = np.float32([
[min(expected[0][0], expected[1][0]),
min(expected[0][1], expected[1][1])],
[max(expected[0][0], expected[1][0]),
max(expected[0][1], expected[1][1])]
])
assert bbsoi_aug.shape == (4, 8, 3)
assert len(bbsoi_aug.bounding_boxes) == 1
assert bbsoi_aug.bounding_boxes[0].coords_almost_equals(expected)
def test_images_k_is_list(self):
aug = iaa.Rot90(_TwoValueParam(1, 2), keep_size=False)
img = self.image
imgs_aug = aug.augment_images([img] * 4)
assert np.array_equal(imgs_aug[0], np.rot90(img, 1, axes=(1, 0)))
assert np.array_equal(imgs_aug[1], np.rot90(img, 2, axes=(1, 0)))
assert np.array_equal(imgs_aug[2], np.rot90(img, 1, axes=(1, 0)))
assert np.array_equal(imgs_aug[3], np.rot90(img, 2, axes=(1, 0)))
def test_heatmaps_smaller_than_image_k_is_list(self):
def _rot_hm(hm, k):
return np.rot90(hm.arr_0to1, k, axes=(1, 0))
aug = iaa.Rot90(_TwoValueParam(1, 2), keep_size=False)
hms_smaller = self.heatmaps_smaller
hms_aug = aug.augment_heatmaps([hms_smaller] * 4)
assert hms_aug[0].shape == (8, 4, 3)
assert hms_aug[1].shape == (4, 8, 3)
assert hms_aug[2].shape == (8, 4, 3)
assert hms_aug[3].shape == (4, 8, 3)
assert np.allclose(hms_aug[0].arr_0to1, _rot_hm(hms_smaller, 1))
assert np.allclose(hms_aug[1].arr_0to1, _rot_hm(hms_smaller, 2))
assert np.allclose(hms_aug[2].arr_0to1, _rot_hm(hms_smaller, 1))
assert np.allclose(hms_aug[3].arr_0to1, _rot_hm(hms_smaller, 2))
def test_segmaps_smaller_than_image_k_is_list(self):
def _rot_sm(segmap, k):
return np.rot90(segmap.arr, k, axes=(1, 0))
aug = iaa.Rot90(_TwoValueParam(1, 2), keep_size=False)
segmaps_smaller = self.segmaps_smaller
segmaps_aug = aug.augment_segmentation_maps([segmaps_smaller] * 4)
assert segmaps_aug[0].shape == (8, 4, 3)
assert segmaps_aug[1].shape == (4, 8, 3)
assert segmaps_aug[2].shape == (8, 4, 3)
assert segmaps_aug[3].shape == (4, 8, 3)
assert np.allclose(segmaps_aug[0].arr, _rot_sm(segmaps_smaller, 1))
assert np.allclose(segmaps_aug[1].arr, _rot_sm(segmaps_smaller, 2))
assert np.allclose(segmaps_aug[2].arr, _rot_sm(segmaps_smaller, 1))
assert np.allclose(segmaps_aug[3].arr, _rot_sm(segmaps_smaller, 2))
def test_keypoints_k_is_list(self):
aug = iaa.Rot90(_TwoValueParam(1, 2), keep_size=False)
kpsoi = self.kpsoi
kpsoi_aug = aug.augment_keypoints([kpsoi] * 4)
assert_cbaois_equal(kpsoi_aug[0], self.kpsoi_k1)
assert_cbaois_equal(kpsoi_aug[1], self.kpsoi_k2)
assert_cbaois_equal(kpsoi_aug[2], self.kpsoi_k1)
assert_cbaois_equal(kpsoi_aug[3], self.kpsoi_k2)
def test_polygons_k_is_list(self):
aug = iaa.Rot90(_TwoValueParam(1, 2), keep_size=False)
psoi = self.psoi
psoi_aug = aug.augment_polygons([psoi] * 4)
assert_cbaois_equal(psoi_aug[0], self.psoi_k1)
assert_cbaois_equal(psoi_aug[1], self.psoi_k2)
assert_cbaois_equal(psoi_aug[2], self.psoi_k1)
assert_cbaois_equal(psoi_aug[3], self.psoi_k2)
def test_line_strings_k_is_list(self):
aug = iaa.Rot90(_TwoValueParam(1, 2), keep_size=False)
lsoi = self.lsoi
lsoi_aug = aug.augment_line_strings([lsoi] * 4)
assert_cbaois_equal(lsoi_aug[0], self.lsoi_k1)
assert_cbaois_equal(lsoi_aug[1], self.lsoi_k2)
assert_cbaois_equal(lsoi_aug[2], self.lsoi_k1)
assert_cbaois_equal(lsoi_aug[3], self.lsoi_k2)
def test_bounding_boxes_k_is_list(self):
aug = iaa.Rot90(_TwoValueParam(1, 2), keep_size=False)
bbsoi = self.bbsoi
bbsoi_aug = aug.augment_bounding_boxes([bbsoi] * 4)
assert_cbaois_equal(bbsoi_aug[0], self.bbsoi_k1)
assert_cbaois_equal(bbsoi_aug[1], self.bbsoi_k2)
assert_cbaois_equal(bbsoi_aug[2], self.bbsoi_k1)
assert_cbaois_equal(bbsoi_aug[3], self.bbsoi_k2)
def test_empty_keypoints(self):
aug = iaa.Rot90(k=1, keep_size=False)
kpsoi = ia.KeypointsOnImage([], shape=(4, 8, 3))
kpsoi_aug = aug.augment_keypoints(kpsoi)
expected = self.kpsoi_k1
expected.keypoints = []
assert_cbaois_equal(kpsoi_aug, expected)
def test_empty_polygons(self):
aug = iaa.Rot90(k=1, keep_size=False)
psoi = ia.PolygonsOnImage([], shape=(4, 8, 3))
psoi_aug = aug.augment_polygons(psoi)
expected = self.psoi_k1
expected.polygons = []
assert_cbaois_equal(psoi_aug, expected)
def test_empty_line_strings(self):
aug = iaa.Rot90(k=1, keep_size=False)
lsoi = ia.LineStringsOnImage([], shape=(4, 8, 3))
lsoi_aug = aug.augment_line_strings(lsoi)
expected = self.lsoi_k1
expected.line_strings = []
assert_cbaois_equal(lsoi_aug, expected)
def test_empty_bounding_boxes(self):
aug = iaa.Rot90(k=1, keep_size=False)
bbsoi = ia.BoundingBoxesOnImage([], shape=(4, 8, 3))
bbsoi_aug = aug.augment_bounding_boxes(bbsoi)
expected = self.bbsoi_k1
expected.bounding_boxes = []
assert_cbaois_equal(bbsoi_aug, expected)
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Rot90(k=1)
image_aug = aug(image=image)
shape_expected = tuple([shape[1], shape[0]] + list(shape[2:]))
assert np.all(image_aug == 0)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape_expected
def test_zero_sized_axes_k_0_or_2(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
for keep_size in [False, True]:
with self.subTest(shape=shape, keep_size=keep_size):
for _ in sm.xrange(10):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Rot90([0, 2], keep_size=keep_size)
image_aug = aug(image=image)
assert image_aug.shape == shape
def test_zero_sized_axes_k_1_or_3_no_keep_size(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
for _ in sm.xrange(10):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Rot90([1, 3], keep_size=False)
image_aug = aug(image=image)
shape_expected = tuple([shape[1], shape[0]]
+ list(shape[2:]))
assert image_aug.shape == shape_expected
def test_zero_sized_axes_k_1_or_3_keep_size(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
for _ in sm.xrange(10):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Rot90([1, 3], keep_size=True)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
def test_get_parameters(self):
aug = iaa.Rot90([1, 3], keep_size=False)
assert aug.get_parameters()[0] == aug.k
assert aug.get_parameters()[1] is False
def test_other_dtypes_bool(self):
aug = iaa.Rot90(2)
image = np.zeros((3, 3), dtype=bool)
image[0, 0] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert np.all(image_aug[0, 0] == 0)
assert np.all(image_aug[2, 2] == 1)
def test_other_dtypes_uint_int(self):
aug = iaa.Rot90(2)
dtypes = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dtype in dtypes:
with self.subTest(dtype=dtype):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
image = np.zeros((3, 3), dtype=dtype)
image[0, 0] = max_value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert np.all(image_aug[0, 0] == 0)
assert np.all(image_aug[2, 2] == max_value)
def test_other_dtypes_float(self):
aug = iaa.Rot90(2)
dtypes = ["float16", "float32", "float64", "float128"]
for dtype in dtypes:
def _allclose(a, b):
atol = 1e-4 if dtype == "float16" else 1e-8
return np.allclose(a, b, atol=atol, rtol=0)
isize = np.dtype(dtype).itemsize
values = [0, 1.0, 10.0, 100.0, 500 ** (isize-1), 1000 ** (isize-1)]
values = values + [(-1) * value for value in values]
for value in values:
with self.subTest(dtype=dtype, value=value):
image = np.zeros((3, 3), dtype=dtype)
image[0, 0] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert _allclose(image_aug[0, 0], 0)
assert _allclose(image_aug[2, 2], np.float128(value))
def test_pickleable(self):
aug = iaa.Rot90([0, 1, 2, 3], seed=1)
runtest_pickleable_uint8_img(aug, iterations=5)
class TestWithPolarWarping(unittest.TestCase):
def setUp(self):
reseed()
def test___init___single_augmenter_as_child(self):
aug = iaa.WithPolarWarping(iaa.Noop())
assert isinstance(aug.children, iaa.Sequential)
assert isinstance(aug.children[0], iaa.Noop)
def test___init___list_of_augmenters_as_child(self):
aug = iaa.WithPolarWarping([iaa.Noop(), iaa.Noop()])
assert isinstance(aug.children, iaa.Sequential)
assert isinstance(aug.children[0], iaa.Noop)
assert isinstance(aug.children[1], iaa.Noop)
def test_images_no_change(self):
image = np.mod(np.arange(10*20*3), 255).astype(np.uint8)
image = image.reshape((10, 20, 3))
aug = iaa.WithPolarWarping(iaa.Noop())
image_aug = aug(image=image)
avg_dist = np.average(
np.abs(
image_aug.astype(np.int32)[2:-2, 2:-2]
- image.astype(np.int32)[2:-2, 2:-2]
)
)
assert image_aug.shape == (10, 20, 3)
assert avg_dist < 7.0
def test_heatmaps_no_change(self):
hm = np.linspace(0, 1.0, 10*20, dtype=np.float32).reshape((10, 20, 1))
hm = ia.HeatmapsOnImage(hm, shape=(10, 20, 3))
aug = iaa.WithPolarWarping(iaa.Noop())
hm_aug = aug(heatmaps=hm)
avg_dist = np.average(
np.abs(
hm_aug.get_arr()[2:-2, 2:-2]
- hm.get_arr()[2:-2, 2:-2]
)
)
assert hm_aug.shape == (10, 20, 3)
assert avg_dist < 0.0125
def test_segmentation_maps_no_change(self):
sm = np.zeros((10, 20, 1), dtype=np.int32)
sm[1, 0:5] = 1
sm[3:3, 3:3] = 2
sm[7:9, :] = 3
sm = ia.SegmentationMapsOnImage(sm, shape=(10, 20, 3))
aug = iaa.WithPolarWarping(iaa.Noop())
sm_aug = aug(segmentation_maps=sm)
p_same = np.average(
sm_aug.get_arr()[2:-2, 2:-2]
== sm.get_arr()[2:-2, 2:-2]
)
assert sm_aug.shape == (10, 20, 3)
assert p_same > 0.95
def test_keypoints_no_change(self):
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=5, y=5),
ia.Keypoint(x=5, y=9)]
kpsoi = ia.KeypointsOnImage(kps, shape=(10, 20, 3))
aug = iaa.WithPolarWarping(iaa.Noop())
kpsoi_aug = aug(keypoints=kpsoi)
assert kpsoi_aug.shape == (10, 20, 3)
assert np.allclose(kpsoi_aug.to_xy_array(), kpsoi.to_xy_array(),
atol=0.01)
def test_bounding_boxes_no_change(self):
bbs = [
ia.BoundingBox(x1=1, y1=2, x2=3, y2=4, label="foo"),
ia.BoundingBox(x1=3, y1=5, x2=7, y2=10),
]
bbsoi = ia.BoundingBoxesOnImage(bbs, shape=(10, 20, 3))
aug = iaa.WithPolarWarping(iaa.Noop())
bbsoi_aug = aug(bounding_boxes=bbsoi)
assert bbsoi_aug.items[0].label == "foo"
assert bbsoi_aug.items[1].label is None
assert bbsoi_aug.shape == (10, 20, 3)
assert np.allclose(bbsoi_aug.to_xy_array(), bbsoi.to_xy_array(),
atol=0.01)
def test_polygons_no_change(self):
ps = [
ia.Polygon([(0, 2), (4, 2), (4, 4)], label="foo"),
ia.Polygon([(0, 0), (5, 0), (5, 5), (0, 5)])
]
psoi = ia.PolygonsOnImage(ps, shape=(10, 20, 3))
aug = iaa.WithPolarWarping(iaa.Noop())
psoi_aug = aug(polygons=psoi)
assert psoi_aug.items[0].label == "foo"
assert psoi_aug.items[1].label is None
assert psoi_aug.shape == (10, 20, 3)
assert np.allclose(psoi_aug.to_xy_array(), psoi.to_xy_array(),
atol=0.01)
def test_line_strings_no_change(self):
ls = [
ia.LineString([(0, 2), (4, 2), (4, 4)]),
ia.LineString([(0, 0), (5, 0), (5, 5), (0, 5)])
]
lsoi = ia.LineStringsOnImage(ls, shape=(10, 20, 3))
aug = iaa.WithPolarWarping(iaa.Noop())
lsoi_aug = aug(line_strings=lsoi)
assert lsoi_aug.shape == (10, 20, 3)
assert np.allclose(lsoi_aug.to_xy_array(), lsoi.to_xy_array(),
atol=0.01)
def test_bounding_boxes_and_polygons_provided_no_change(self):
bbs = [
ia.BoundingBox(x1=1, y1=2, x2=3, y2=4, label="foo"),
ia.BoundingBox(x1=3, y1=5, x2=7, y2=10),
]
bbsoi = ia.BoundingBoxesOnImage(bbs, shape=(10, 20, 3))
ps = [
ia.Polygon([(0, 2), (4, 2), (4, 4)], label="foo"),
ia.Polygon([(0, 0), (5, 0), (5, 5), (0, 5)])
]
psoi = ia.PolygonsOnImage(ps, shape=(10, 20, 3))
aug = iaa.WithPolarWarping(iaa.Noop())
aug = aug.to_deterministic()
bbsoi_aug = aug.augment_bounding_boxes(bbsoi)
psoi_aug = aug.augment_polygons(psoi)
assert bbsoi_aug.items[0].label == "foo"
assert bbsoi_aug.items[1].label is None
assert bbsoi_aug.shape == (10, 20, 3)
assert np.allclose(bbsoi_aug.to_xy_array(), bbsoi.to_xy_array(),
atol=0.01)
assert psoi_aug.items[0].label == "foo"
assert psoi_aug.items[1].label is None
assert psoi_aug.shape == (10, 20, 3)
assert np.allclose(psoi_aug.to_xy_array(), psoi.to_xy_array(),
atol=0.01)
def test_images_translation_x(self):
image = np.zeros((50, 70, 3), dtype=np.uint8)
image[20-1:20+1, 30-1:30+1, 0] = 255
image[30-1:30+1, 40-1:40+1, 1] = 255
aug = iaa.WithPolarWarping(iaa.Affine(translate_px={"x": 15}))
image_aug = aug(image=image)
x1 = np.argmax(np.max(image_aug[..., 0], axis=0))
y1 = np.argmax(np.max(image_aug[..., 0], axis=1))
x2 = np.argmax(np.max(image_aug[..., 1], axis=0))
y2 = np.argmax(np.max(image_aug[..., 1], axis=1))
# translation on x axis in polar representation should move all points
# a bit away from the center
min_diff = 4
assert image_aug.shape == (50, 70, 3)
assert x1 < 30 - min_diff
assert y1 < 20 - min_diff
assert x2 > 40 + min_diff
assert y2 > 30 + min_diff
def test_heatmaps_translation_x(self):
hm = np.zeros((50, 70, 2), dtype=np.float32)
hm[20-1:20+1, 30-1:30+1, 0] = 1.0
hm[30-1:30+1, 40-1:40+1, 1] = 1.0
hm = ia.HeatmapsOnImage(hm, shape=(50, 70, 3))
aug = iaa.WithPolarWarping(iaa.Affine(translate_px={"x": 15}))
hm_aug = aug(heatmaps=hm)
hm_aug_arr = hm_aug.get_arr()
x1 = np.argmax(np.max(hm_aug_arr[..., 0], axis=0))
y1 = np.argmax(np.max(hm_aug_arr[..., 0], axis=1))
x2 = np.argmax(np.max(hm_aug_arr[..., 1], axis=0))
y2 = np.argmax(np.max(hm_aug_arr[..., 1], axis=1))
# translation on x axis in polar representation should move all points
# a bit away from the center
min_diff = 4
assert hm_aug_arr.shape == (50, 70, 2)
assert hm_aug.shape == (50, 70, 3)
assert x1 < 30 - min_diff
assert y1 < 20 - min_diff
assert x2 > 40 + min_diff
assert y2 > 30 + min_diff
def test_segmentation_maps_translation_x(self):
sm = np.zeros((50, 70, 2), dtype=np.int32)
sm[20-1:20+1, 30-1:30+1, 0] = 1
sm[30-1:30+1, 40-1:40+1, 1] = 2
sm = ia.SegmentationMapsOnImage(sm, shape=(50, 70, 3))
aug = iaa.WithPolarWarping(iaa.Affine(translate_px={"x": 15}))
sm_aug = aug(segmentation_maps=sm)
sm_aug_arr = sm_aug.get_arr()
x1 = np.argmax(np.max(sm_aug_arr[..., 0], axis=0))
y1 = np.argmax(np.max(sm_aug_arr[..., 0], axis=1))
x2 = np.argmax(np.max(sm_aug_arr[..., 1], axis=0))
y2 = np.argmax(np.max(sm_aug_arr[..., 1], axis=1))
# translation on x axis in polar representation should move all points
# a bit away from the center
min_diff = 4
assert sm_aug_arr.shape == (50, 70, 2)
assert sm_aug.shape == (50, 70, 3)
assert x1 < 30 - min_diff
assert y1 < 20 - min_diff
assert x2 > 40 + min_diff
assert y2 > 30 + min_diff
def test_keypoints_translation_x(self):
cbas = [ia.Keypoint(y=20, x=30), ia.Keypoint(y=30, x=40)]
cbaoi = ia.KeypointsOnImage(cbas, shape=(50, 70, 3))
aug = iaa.WithPolarWarping(iaa.Affine(translate_px={"x": 15}))
cbaoi_aug = aug(keypoints=cbaoi)
x1 = cbaoi_aug.items[0].x
y1 = cbaoi_aug.items[0].y
x2 = cbaoi_aug.items[1].x
y2 = cbaoi_aug.items[1].y
# translation on x axis in polar representation should move all points
# a bit away from the center
min_diff = 4
assert cbaoi_aug.shape == (50, 70, 3)
assert x1 < 30 - min_diff
assert y1 < 20 - min_diff
assert x2 > 40 + min_diff
assert y2 > 30 + min_diff
def test_bounding_boxes_translation_x(self):
cbas = [ia.BoundingBox(y1=20, x1=30, y2=20+2, x2=30+2),
ia.BoundingBox(y1=30, x1=40, y2=30+2, x2=40+2)]
cbaoi = ia.BoundingBoxesOnImage(cbas, shape=(50, 70, 3))
aug = iaa.WithPolarWarping(iaa.Affine(translate_px={"x": 15}))
cbaoi_aug = aug(bounding_boxes=cbaoi)
x1 = cbaoi_aug.items[0].x1
y1 = cbaoi_aug.items[0].y1
x2 = cbaoi_aug.items[1].x2
y2 = cbaoi_aug.items[1].y2
# translation on x axis in polar representation should move all points
# a bit away from the center
min_diff = 4
assert cbaoi_aug.shape == (50, 70, 3)
assert x1 < 30 - min_diff
assert y1 < 20 - min_diff
assert x2 > 40 + min_diff
assert y2 > 30 + min_diff
def test_polygons_translation_x(self):
cbas = [ia.Polygon([(30, 20), (30+2, 20), (30+2, 20+2)]),
ia.Polygon([(40, 30), (40+2, 30), (40+2, 30+2)])]
cbaoi = ia.PolygonsOnImage(cbas, shape=(50, 70, 3))
aug = iaa.WithPolarWarping(iaa.Affine(translate_px={"x": 15}))
cbaoi_aug = aug(polygons=cbaoi)
x1 = cbaoi_aug.items[0].coords[0][0]
y1 = cbaoi_aug.items[0].coords[0][1]
x2 = cbaoi_aug.items[1].coords[2][0]
y2 = cbaoi_aug.items[1].coords[2][1]
# translation on x axis in polar representation should move all points
# a bit away from the center
min_diff = 4
assert cbaoi_aug.shape == (50, 70, 3)
assert x1 < 30 - min_diff
assert y1 < 20 - min_diff
assert x2 > 40 + min_diff
assert y2 > 30 + min_diff
def test_line_strings_translation_x(self):
cbas = [ia.LineString([(30, 20), (30+2, 20), (30+2, 20+2)]),
ia.LineString([(40, 30), (40+2, 30), (40+2, 30+2)])]
cbaoi = ia.LineStringsOnImage(cbas, shape=(50, 70, 3))
aug = iaa.WithPolarWarping(iaa.Affine(translate_px={"x": 15}))
cbaoi_aug = aug(line_strings=cbaoi)
x1 = cbaoi_aug.items[0].coords[0][0]
y1 = cbaoi_aug.items[0].coords[0][1]
x2 = cbaoi_aug.items[1].coords[2][0]
y2 = cbaoi_aug.items[1].coords[2][1]
# translation on x axis in polar representation should move all points
# a bit away from the center
min_diff = 4
assert cbaoi_aug.shape == (50, 70, 3)
assert x1 < 30 - min_diff
assert y1 < 20 - min_diff
assert x2 > 40 + min_diff
assert y2 > 30 + min_diff
def test_image_heatmap_alignment(self):
image = np.zeros((80, 100, 3), dtype=np.uint8)
image[40-10:40+10, 50-10:50+10, :] = 255
hm = np.zeros((40, 50, 1), dtype=np.float32)
hm[20-5:20+5, 25-5:25+5, :] = 1.0
hm = ia.HeatmapsOnImage(hm, shape=image.shape)
aug = iaa.WithPolarWarping(iaa.Affine(translate_px={"x": 10}))
image_aug, hm_aug = aug(image=image, heatmaps=hm)
hm_aug_arr = hm_aug.get_arr()
hm_aug_arr_rs = ia.imresize_single_image(hm_aug_arr, (80, 100),
interpolation="nearest")
overlap = np.average(
(image_aug[..., 0] > 200)
== (hm_aug_arr_rs[..., 0] > 0.9)
)
assert image_aug.shape == (80, 100, 3)
assert hm_aug.shape == (80, 100, 3)
assert hm_aug_arr.shape == (40, 50, 1)
assert overlap > 0.96
def test_image_segmentation_map_alignment(self):
image = np.zeros((80, 100, 3), dtype=np.uint8)
image[40-10:40+10, 50-10:50+10, :] = 255
sm = np.zeros((40, 50, 1), dtype=np.int32)
sm[20-5:20+5, 25-5:25+5, :] = 1
sm = ia.SegmentationMapsOnImage(sm, shape=image.shape)
aug = iaa.WithPolarWarping(iaa.Affine(translate_px={"x": 10}))
image_aug, sm_aug = aug(image=image, segmentation_maps=sm)
sm_aug_arr = sm_aug.get_arr()
sm_aug_arr_rs = ia.imresize_single_image(sm_aug_arr, (80, 100),
interpolation="nearest")
overlap = np.average(
(image_aug[..., 0] > 200)
== (sm_aug_arr_rs[..., 0] == 1)
)
assert image_aug.shape == (80, 100, 3)
assert sm_aug.shape == (80, 100, 3)
assert sm_aug_arr.shape == (40, 50, 1)
assert overlap > 0.96
def test_image_keypoint_alignment(self):
image = np.zeros((80, 100, 3), dtype=np.uint8)
image[40-10:40-10+3, 50-10:50-10+3, :] = 255
image[40+10:40+10+3, 50+10:50+10+3, :] = 255
kps = [ia.Keypoint(y=40-10+1.5, x=50-10+1.5),
ia.Keypoint(y=40+10+1.5, x=50+10+1.5)]
kpsoi = ia.KeypointsOnImage(kps, shape=image.shape)
aug = iaa.WithPolarWarping(iaa.Affine(translate_px={"x": 10}))
image_aug, kpsoi_aug = aug(image=image, keypoints=kpsoi)
kp1 = kpsoi_aug.items[0]
kp2 = kpsoi_aug.items[1]
kp1_intensity = image_aug[int(kp1.y), int(kp1.x), 0]
kp2_intensity = image_aug[int(kp2.y), int(kp2.x), 0]
assert image_aug.shape == (80, 100, 3)
assert kpsoi_aug.shape == (80, 100, 3)
assert kp1_intensity > 200
assert kp2_intensity > 200
def test_image_is_noncontiguous(self):
image = np.mod(np.arange(10*20*3), 255).astype(np.uint8)
image = image.reshape((10, 20, 3))
image_cp = np.fliplr(np.copy(image))
image = np.fliplr(image)
assert image.flags["C_CONTIGUOUS"] is False
aug = iaa.WithPolarWarping(iaa.Noop())
image_aug = aug(image=image)
avg_dist = np.average(
np.abs(
image_aug.astype(np.int32)[2:-2, 2:-2]
- image_cp.astype(np.int32)[2:-2, 2:-2]
)
)
assert image_aug.shape == (10, 20, 3)
assert avg_dist < 7.0
def test_image_is_view(self):
image = np.mod(np.arange(10*20*3), 255).astype(np.uint8)
image = image.reshape((10, 20, 3))
image_cp = np.copy(image)[2:, 2:, :]
image = image[2:, 2:, :]
assert image.flags["OWNDATA"] is False
aug = iaa.WithPolarWarping(iaa.Noop())
image_aug = aug(image=image)
avg_dist = np.average(
np.abs(
image_aug.astype(np.int32)[2:-2, 2:-2]
- image_cp.astype(np.int32)[2:-2, 2:-2]
)
)
assert image_aug.shape == (8, 18, 3)
assert avg_dist < 7.0
def test_propagation_hooks(self):
image = np.mod(np.arange(30*30), 255).astype(np.uint8)
image = image.reshape((30, 30))
aug = iaa.WithPolarWarping(iaa.Add(50))
def _propagator(images, augmenter, parents, default):
return False if augmenter is aug else default
hooks = ia.HooksImages(propagator=_propagator)
observed1 = aug.augment_image(image)
observed2 = aug.augment_image(image, hooks=hooks)
image_plus50 = np.clip(image.astype(np.int32)+50, 0, 255)
diff1 = np.abs(observed1[2:-2].astype(np.int32)
- image_plus50[2:-2].astype(np.int32))
diff2 = np.abs(observed2[2:-2].astype(np.int32)
- image_plus50[2:-2].astype(np.int32))
overlap_1_add = np.average(diff1 <= 1)
overlap_2_add = np.average(diff2 <= 2)
assert overlap_1_add >= 0.9
assert overlap_2_add < 0.01
def test_unusual_channel_numbers(self):
shapes = [
(5, 5, 4),
(5, 5, 5),
(5, 5, 512),
(5, 5, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.WithPolarWarping(iaa.Noop())
image_aug = aug(image=image)
shape_expected = tuple([shape[1], shape[0]] + list(shape[2:]))
assert np.all(image_aug == 0)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape_expected
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
kpsoi = ia.KeypointsOnImage([ia.Keypoint(x=1, y=2)],
shape=image.shape)
sm_arr = np.zeros((3, 3), dtype=np.int32)
sm_arr[1, 1] = 1
sm = ia.SegmentationMapsOnImage(sm_arr, shape=image.shape)
aug = iaa.WithPolarWarping(iaa.Noop())
aug_det = aug.to_deterministic()
image_aug = aug(image=image)
kpsoi_aug = aug(keypoints=kpsoi)
sm_aug = aug(segmentation_maps=sm)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
assert np.allclose(kpsoi_aug.to_xy_array(),
kpsoi.to_xy_array())
assert kpsoi_aug.shape == shape
assert np.array_equal(sm_aug.get_arr(), sm_arr)
assert sm_aug.shape == shape
def test_other_dtypes_bool(self):
aug = iaa.WithPolarWarping(iaa.Noop())
arr = np.zeros((20, 20), dtype=bool)
arr[10-3:10+3, 10-3:10+3] = True
arr_aug = aug(image=arr)
overlap = np.average(arr_aug == arr)
assert arr_aug.shape == (20, 20)
assert arr_aug.dtype.name == "bool"
assert overlap > 0.95
def test_other_dtypes_uint_int(self):
aug = iaa.WithPolarWarping(iaa.Noop())
dtypes = ["uint8", "uint16",
"int8", "int16", "int32",]
for dtype in dtypes:
with self.subTest(dtype=dtype):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
center_value = int(center_value)
image = np.zeros((30, 10), dtype=dtype)
image[0:10, :] = min_value
image[10:20, :] = center_value
image[20:30, :] = max_value
image = iaa.pad(image, top=2, right=2, bottom=2, left=2,
cval=0)
image_aug = aug.augment_image(image)
image_aug = image_aug[2:-2, 2:-2]
overlap_min = np.average(image_aug[0:10] == min_value)
overlap_cv = np.average(image_aug[10:20] == center_value)
overlap_max = np.average(image_aug[20:30] == max_value)
assert image_aug.dtype.name == dtype
assert overlap_min > 0.9
assert overlap_cv > 0.9
assert overlap_max > 0.9
def test_other_dtypes_float(self):
def _avg_close(arr_aug, expected_val):
atol = 1e-8
return np.average(np.isclose(arr_aug, expected_val,
rtol=0, atol=atol))
aug = iaa.WithPolarWarping(iaa.Noop())
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
with self.subTest(dtype=dtype):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
center_value = center_value
image = np.zeros((70, 10), dtype=dtype)
image[0:10, :] = min_value
image[10:20, :] = center_value
image[20:30, :] = max_value
image[30:40, :] = -1.0
image[40:50, :] = 1.0
image[50:60, :] = -100.0
image[60:70, :] = 100.0
image = iaa.pad(image, top=2, right=2, bottom=2, left=2,
cval=0)
image_aug = aug.augment_image(image)
image_aug = image_aug[2:-2, 2:-2]
overlap1 = _avg_close(image_aug[0:10], min_value)
overlap2 = _avg_close(image_aug[10:20], center_value)
overlap3 = _avg_close(image_aug[20:30], max_value)
overlap4 = _avg_close(image_aug[30:40], -1.0)
overlap5 = _avg_close(image_aug[40:50], 1.0)
overlap6 = _avg_close(image_aug[50:60], -100.0)
overlap7 = _avg_close(image_aug[60:70], 100.0)
assert image_aug.dtype.name == dtype
assert overlap1 > 0.9
assert overlap2 > 0.9
assert overlap3 > 0.9
assert overlap4 > 0.9
assert overlap5 > 0.9
assert overlap6 > 0.9
assert overlap7 > 0.9
def test_get_parameters(self):
aug = iaa.WithPolarWarping(iaa.Noop())
params = aug.get_parameters()
assert len(params) == 0
def test_get_children_lists(self):
children = iaa.Sequential([iaa.Noop()])
aug = iaa.WithPolarWarping(children)
assert aug.get_children_lists() == [children]
def test_to_deterministic(self):
child = iaa.Identity()
aug = iaa.WithPolarWarping([child])
aug_det = aug.to_deterministic()
assert aug_det.deterministic
assert aug_det.random_state is not aug.random_state
assert aug_det.children.deterministic
assert aug_det.children[0].deterministic
def test___repr___and___str__(self):
children = iaa.Sequential([iaa.Noop()])
aug = iaa.WithPolarWarping(children, name="WithPolarWarpingTest")
expected = (
"WithPolarWarping("
"name=WithPolarWarpingTest, "
"children=%s, "
"deterministic=False"
")" % (str(children),))
assert aug.__repr__() == expected
assert aug.__str__() == expected
def test_pickleable(self):
aug = iaa.WithPolarWarping(
iaa.Affine(translate_px=(0, 10), seed=1),
seed=2)
runtest_pickleable_uint8_img(aug, iterations=5, shape=(25, 25, 1))
class Test_apply_jigsaw(unittest.TestCase):
def test_no_movement(self):
dtypes = ["bool",
"uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64",
"float16", "float32", "float64", "float128"]
for dtype in dtypes:
with self.subTest(dtype=dtype):
arr = np.arange(20*20*1).reshape((20, 20, 1))
if dtype == "bool":
mask = np.logical_or(
arr % 4 == 0,
arr % 7 == 0)
arr[mask] = 1
arr[~mask] = 0
arr = arr.astype(dtype)
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
arr[0, 0] = min_value
arr[0, 1] = max_value
destinations = np.arange(5*5).reshape((5, 5))
observed = iaa.apply_jigsaw(arr, destinations)
if arr.dtype.kind != "f":
assert np.array_equal(observed, arr)
else:
atol = 1e-4 if dtype == "float16" else 1e-8
assert np.allclose(observed, arr, rtol=0, atol=atol)
def test_no_movement_zero_sized_axes(self):
sizes = [
(0, 1),
(1, 0),
(0, 0)
]
dtype = "uint8"
for size in sizes:
with self.subTest(size=size):
arr = np.zeros(size, dtype=dtype)
destinations = np.arange(1*1).reshape((1, 1))
observed = iaa.apply_jigsaw(arr, destinations)
assert np.array_equal(observed, arr)
def _test_two_cells_moved__n_channels(self, nb_channels):
dtypes = ["bool",
"uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64",
"float16", "float32", "float64", "float128"]
for dtype in dtypes:
with self.subTest(dtype=dtype):
c = 1 if nb_channels is None else nb_channels
arr = np.arange(20*20*c)
if dtype == "bool":
mask = np.logical_or(
arr % 4 == 0,
arr % 7 == 0)
arr[mask] = 1
arr[~mask] = 0
if nb_channels is not None:
arr = arr.reshape((20, 20, c))
else:
arr = arr.reshape((20, 20))
arr = arr.astype(dtype)
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
arr[0, 0] = min_value
arr[0, 1] = max_value
destinations = np.arange(5*5).reshape((5, 5))
destinations[0, 0] = 4 # cell 0 will be filled with 4
destinations[0, 4] = 0 # cell 4 will be filled with 0
destinations[0, 1] = 6 # cell 1 will be filled with 6
destinations[1, 1] = 1 # cell 6 will be filled with 1
observed = iaa.apply_jigsaw(arr, destinations)
cell_0_obs = observed[0:4, 0:4]
cell_0_exp = arr[0:4, 16:20]
cell_4_obs = observed[0:4, 16:20]
cell_4_exp = arr[0:4, 0:4]
cell_1_obs = observed[0:4, 4:8]
cell_1_exp = arr[4:8, 4:8]
cell_6_obs = observed[4:8, 4:8]
cell_6_exp = arr[0:4, 4:8]
cell_2_obs = observed[0:4, 8:12]
cell_2_exp = arr[0:4, 8:12]
if arr.dtype.kind != "f":
assert np.array_equal(cell_0_obs, cell_0_exp)
assert np.array_equal(cell_4_obs, cell_4_exp)
assert np.array_equal(cell_1_obs, cell_1_exp)
assert np.array_equal(cell_6_obs, cell_6_exp)
assert np.array_equal(cell_2_obs, cell_2_exp)
else:
atol = 1e-4 if dtype == "float16" else 1e-8
kwargs = {"rtol": 0, "atol": atol}
assert np.allclose(cell_0_obs, cell_0_exp, **kwargs)
assert np.allclose(cell_4_obs, cell_4_exp, **kwargs)
assert np.allclose(cell_1_obs, cell_1_exp, **kwargs)
assert np.allclose(cell_6_obs, cell_6_exp, **kwargs)
assert np.allclose(cell_2_obs, cell_2_exp, **kwargs)
assert observed.shape == arr.shape
assert observed.dtype.name == dtype
def test_two_cells_moved__no_channels(self):
self._test_two_cells_moved__n_channels(None)
def test_two_cells_moved__1_channel(self):
self._test_two_cells_moved__n_channels(1)
def test_two_cells_moved__3_channels(self):
self._test_two_cells_moved__n_channels(3)
class Test_apply_jigsaw_to_coords(unittest.TestCase):
def test_no_movement(self):
arr = np.float32([
(0.0, 0.0),
(5.0, 5.0),
(25.0, 50.5),
(10.01, 21.0)
])
destinations = np.arange(10*10).reshape((10, 10))
observed = iaa.apply_jigsaw_to_coords(arr, destinations, (50, 100))
assert np.allclose(observed, arr)
def test_with_movement(self):
arr = np.float32([
(0.0, 0.0), # in cell (0,0) = idx 0
(5.0, 5.0), # in cell (0,0) = idx 0
(25.0, 50.5), # in cell (5,2) = idx 52
(10.01, 21.0) # in cell (2,1) = idx 12
])
destinations = np.arange(10*10).reshape((10, 10))
destinations[0, 0] = 1
destinations[0, 1] = 0
destinations[5, 2] = 7
destinations[0, 7] = 52
observed = iaa.apply_jigsaw_to_coords(arr, destinations, (100, 100))
expected = np.float32([
(10.0, 0.0),
(15.0, 5.0),
(75.0, 0.5),
(10.01, 21.0)
])
assert np.allclose(observed, expected)
def test_with_movement_non_square_image(self):
arr = np.float32([
(0.5, 0.6), # in cell (0,0) = idx 0
(180.7, 90.8), # in cell (9,9) = idx 99
])
destinations = np.arange(10*10).reshape((10, 10))
destinations[0, 0] = 99
destinations[9, 9] = 0
observed = iaa.apply_jigsaw_to_coords(arr, destinations, (100, 200))
expected = np.float32([
(180+0.5, 90+0.6),
(0+0.7, 0+0.8)
])
assert np.allclose(observed, expected)
def test_empty_coords(self):
arr = np.zeros((0, 2), dtype=np.float32)
destinations = np.arange(10*10).reshape((10, 10))
observed = iaa.apply_jigsaw_to_coords(arr, destinations, (100, 100))
assert np.allclose(observed, arr)
class Test_generate_jigsaw_destinations(unittest.TestCase):
def test_max_steps_0(self):
rng = iarandom.RNG(0)
max_steps = 0
rows = 10
cols = 20
observed = iaa.generate_jigsaw_destinations(rows, cols, max_steps, rng,
connectivity=8)
assert np.array_equal(
observed,
np.arange(rows*cols).reshape((rows, cols)))
def test_max_steps_1(self):
rng = iarandom.RNG(0)
max_steps = 1
rows = 10
cols = 20
observed = iaa.generate_jigsaw_destinations(rows, cols, max_steps, rng,
connectivity=8)
yy = (observed // cols).reshape((rows, cols))
xx = np.mod(observed, cols).reshape((rows, cols))
yy_expected = np.tile(np.arange(rows).reshape((rows, 1)), (1, cols))
xx_expected = np.tile(np.arange(cols).reshape((1, cols)), (rows, 1))
yy_diff = yy_expected - yy
xx_diff = xx_expected - xx
dist = np.sqrt(yy_diff ** 2 + xx_diff ** 2)
assert np.min(dist) <= 0.01
assert np.any(dist >= np.sqrt(2) - 1e-4)
assert np.max(dist) <= np.sqrt(2) + 1e-4
def test_max_steps_1_connectivity_4(self):
rng = iarandom.RNG(0)
max_steps = 1
rows = 10
cols = 20
observed = iaa.generate_jigsaw_destinations(rows, cols, max_steps, rng,
connectivity=4)
yy = (observed // cols).reshape((rows, cols))
xx = np.mod(observed, cols).reshape((rows, cols))
yy_expected = np.tile(np.arange(rows).reshape((rows, 1)), (1, cols))
xx_expected = np.tile(np.arange(cols).reshape((1, cols)), (rows, 1))
yy_diff = yy_expected - yy
xx_diff = xx_expected - xx
dist = np.sqrt(yy_diff ** 2 + xx_diff ** 2)
assert np.min(dist) <= 0.01
assert np.any(dist >= 0.99)
assert np.max(dist) <= 1.01
class TestJigsaw(unittest.TestCase):
def setUp(self):
reseed()
def test___init___defaults(self):
aug = iaa.Jigsaw(nb_rows=1, nb_cols=2)
assert aug.nb_rows.value == 1
assert aug.nb_cols.value == 2
assert aug.max_steps.value == 2
assert aug.allow_pad is True
def test___init___custom(self):
aug = iaa.Jigsaw(nb_rows=1, nb_cols=2, max_steps=3, allow_pad=False)
assert aug.nb_rows.value == 1
assert aug.nb_cols.value == 2
assert aug.max_steps.value == 3
assert aug.allow_pad is False
def test__draw_samples(self):
aug = iaa.Jigsaw(nb_rows=(1, 5), nb_cols=(1, 6), max_steps=(1, 3))
batch = mock.Mock()
batch.nb_rows = 100
samples = aug._draw_samples(batch, iarandom.RNG(0))
assert len(np.unique(samples.nb_rows)) > 1
assert len(np.unique(samples.nb_cols)) > 1
assert len(np.unique(samples.max_steps)) > 1
assert np.all(samples.nb_rows >= 1)
assert np.all(samples.nb_rows <= 5)
assert np.all(samples.nb_cols >= 1)
assert np.all(samples.nb_cols <= 6)
assert np.all(samples.max_steps >= 1)
assert np.all(samples.max_steps <= 3)
all_same = True
first = samples.destinations[0]
for dest in samples.destinations:
this_same = (dest.shape == first.shape
and np.array_equal(dest, first))
all_same = all_same and this_same
assert not all_same
def test_images_without_shifts(self):
aug = iaa.Jigsaw(nb_rows=2, nb_cols=2, max_steps=0)
image = np.mod(np.arange(20*20*3), 255).astype(np.uint8)
image = image.reshape((20, 20, 3))
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == (20, 20, 3)
assert np.array_equal(image_aug, image)
def test_heatmaps_without_shifts(self):
aug = iaa.Jigsaw(nb_rows=2, nb_cols=2, max_steps=0)
arr = np.linspace(0, 1.0, 20*20*1).astype(np.float32)
arr = arr.reshape((20, 20, 1))
heatmap = ia.HeatmapsOnImage(arr, shape=(20, 20, 3))
heatmap_aug = aug(heatmaps=heatmap)
assert heatmap_aug.shape == (20, 20, 3)
assert np.allclose(heatmap_aug.arr_0to1, heatmap.arr_0to1)
def test_segmaps_without_shifts(self):
aug = iaa.Jigsaw(nb_rows=2, nb_cols=2, max_steps=0)
arr = np.zeros((20, 20, 1), dtype=np.int32)
arr[0:10, :] = 1
arr[10:20, 10:20] = 2
arr = arr.reshape((20, 20, 1))
segmap = ia.SegmentationMapsOnImage(arr, shape=(20, 20, 3))
segmap_aug = aug(segmentation_maps=segmap)
assert segmap_aug.shape == (20, 20, 3)
assert np.array_equal(segmap_aug.arr, segmap.arr)
def test_keypoints_without_shifts(self):
aug = iaa.Jigsaw(nb_rows=2, nb_cols=2, max_steps=0)
kpsoi = ia.KeypointsOnImage.from_xy_array([
(0, 0),
(5.5, 3.5),
(12.1, 23.5)
], shape=(20, 20, 3))
kpsoi_aug = aug(keypoints=kpsoi)
assert kpsoi_aug.shape == (20, 20, 3)
assert np.allclose(kpsoi_aug.to_xy_array(), kpsoi.to_xy_array())
def test_images_with_shifts(self):
# these rows/cols/max_steps parameters are mostly ignored due to the
# mocked _draw_samples method below
aug = iaa.Jigsaw(nb_rows=2, nb_cols=2, max_steps=1)
image = np.mod(np.arange(19*19*3), 255).astype(np.uint8)
image = image.reshape((19, 19, 3))
destinations = np.array([
[3, 1],
[2, 0]
], dtype=np.int32)
old_func = aug._draw_samples
def _mocked_draw_samples(batch, random_state):
samples = old_func(batch, random_state)
return geometriclib._JigsawSamples(
nb_rows=samples.nb_rows,
nb_cols=samples.nb_cols,
max_steps=samples.max_steps,
destinations=[destinations])
aug._draw_samples = _mocked_draw_samples
image_aug = aug(image=image)
expected = iaa.pad(image, bottom=1, right=1, cval=0)
expected = iaa.apply_jigsaw(expected, destinations)
assert np.array_equal(image_aug, expected)
def test_heatmaps_with_shifts(self):
# these rows/cols/max_steps parameters are mostly ignored due to the
# mocked _draw_samples method below
aug = iaa.Jigsaw(nb_rows=2, nb_cols=2, max_steps=1)
arr = np.linspace(0, 1.0, 18*18*1).astype(np.float32)
arr = arr.reshape((18, 18, 1))
heatmap = ia.HeatmapsOnImage(arr, shape=(19, 19, 3))
destinations = np.array([
[3, 1],
[2, 0]
], dtype=np.int32)
old_func = aug._draw_samples
def _mocked_draw_samples(batch, random_state):
samples = old_func(batch, random_state)
return geometriclib._JigsawSamples(
nb_rows=samples.nb_rows,
nb_cols=samples.nb_cols,
max_steps=samples.max_steps,
destinations=[destinations])
aug._draw_samples = _mocked_draw_samples
heatmap_aug = aug(heatmaps=heatmap)
expected = ia.imresize_single_image(arr, (19, 19),
interpolation="cubic")
expected = np.clip(expected, 0, 1.0)
expected = iaa.pad(expected, bottom=1, right=1, cval=0.0)
expected = iaa.apply_jigsaw(expected, destinations)
expected = ia.imresize_single_image(expected, (18, 18),
interpolation="cubic")
expected = np.clip(expected, 0, 1.0)
assert np.allclose(heatmap_aug.arr_0to1, expected)
def test_segmaps_with_shifts(self):
# these rows/cols/max_steps parameters are mostly ignored due to the
# mocked _draw_samples method below
aug = iaa.Jigsaw(nb_rows=2, nb_cols=2, max_steps=1)
arr = np.zeros((18, 18, 1), dtype=np.int32)
arr[0:10, :] = 1
arr[10:18, 10:18] = 2
arr = arr.reshape((18, 18, 1))
segmap = ia.SegmentationMapsOnImage(arr, shape=(19, 19, 3))
destinations = np.array([
[3, 1],
[2, 0]
], dtype=np.int32)
old_func = aug._draw_samples
def _mocked_draw_samples(batch, random_state):
samples = old_func(batch, random_state)
return geometriclib._JigsawSamples(
nb_rows=samples.nb_rows,
nb_cols=samples.nb_cols,
max_steps=samples.max_steps,
destinations=[destinations])
aug._draw_samples = _mocked_draw_samples
segmap_aug = aug(segmentation_maps=segmap)
expected = ia.imresize_single_image(arr, (19, 19),
interpolation="nearest")
expected = iaa.pad(expected, bottom=1, right=1, cval=0)
expected = iaa.apply_jigsaw(expected, destinations)
expected = ia.imresize_single_image(expected, (18, 18),
interpolation="nearest")
assert np.array_equal(segmap_aug.arr, expected)
def test_keypoints_with_shifts(self):
# these rows/cols/max_steps parameters are mostly ignored due to the
# mocked _draw_samples method below
aug = iaa.Jigsaw(nb_rows=5, nb_cols=5, max_steps=1)
kpsoi = ia.KeypointsOnImage.from_xy_array([
(0, 0),
(5.5, 3.5),
(4.0, 12.5),
(11.1, 11.2),
(12.1, 23.5)
], shape=(18, 18, 3))
destinations = np.array([
[3, 1],
[2, 0]
], dtype=np.int32)
old_func = aug._draw_samples
def _mocked_draw_samples(batch, random_state):
samples = old_func(batch, random_state)
return geometriclib._JigsawSamples(
nb_rows=samples.nb_rows,
nb_cols=samples.nb_cols,
max_steps=samples.max_steps,
destinations=[destinations])
aug._draw_samples = _mocked_draw_samples
kpsoi_aug = aug(keypoints=kpsoi)
expected = kpsoi.deepcopy()
expected.shape = (20, 20, 3)
# (0.0, 0.0) to cell at bottom-right, 1px pad at top and left
expected.keypoints[0].x = 10.0 + (0.0 - 0.0) + 1.0
expected.keypoints[0].y = 10.0 + (0.0 - 0.0) + 1.0
# (5.5, 3.5) to cell at bottom-right, 1px pad at top and left
expected.keypoints[1].x = 10.0 + (5.5 - 0.0) + 1.0
expected.keypoints[1].y = 10.0 + (3.5 - 0.0) + 1.0
# (4.0, 12.5) not moved to other cell, but 1px pad at top and left
expected.keypoints[2].x = 4.0 + 1.0
expected.keypoints[2].y = 12.5 + 1.0
# (11.0, 11.0) to cell at top-left, 1px pad at top and left
expected.keypoints[3].x = 0.0 + (11.1 - 10.0) + 1.0
expected.keypoints[3].y = 0.0 + (11.2 - 10.0) + 1.0
# (12.1, 23.5) not moved to other cell, but 1px pad at top and left
expected.keypoints[4].x = 12.1 + 1.0
expected.keypoints[4].y = 23.5 + 1.0
expected.shape = (20, 20, 3)
assert kpsoi_aug.shape == (20, 20, 3)
assert np.allclose(kpsoi_aug.to_xy_array(), expected.to_xy_array())
def test_images_and_heatmaps_aligned(self):
nb_changed = 0
rs = iarandom.RNG(0)
for _ in np.arange(10):
aug = iaa.Jigsaw(nb_rows=(2, 5), nb_cols=(2, 5), max_steps=(0, 3))
image_small = rs.integers(0, 10, size=(10, 15)).astype(np.float32)
image_small = image_small / 10.0
image = ia.imresize_single_image(image_small, (20, 30),
interpolation="cubic")
image = np.clip(image, 0, 1.0)
hm = ia.HeatmapsOnImage(image_small, shape=(20, 30))
images_aug, hms_aug = aug(images=[image, image, image],
heatmaps=[hm, hm, hm])
for image_aug, hm_aug in zip(images_aug, hms_aug):
# TODO added squeeze here because get_arr() falsely returns
# (H,W,1) for 2D inputs
arr = np.squeeze(hm_aug.get_arr())
image_aug_rs = ia.imresize_single_image(
image_aug.astype(np.float32),
arr.shape[0:2],
interpolation="cubic")
image_aug_rs = np.clip(image_aug_rs, 0, 1.0)
overlap = np.average(np.isclose(image_aug_rs, arr))
assert overlap > 0.99
if not np.array_equal(arr, hm.get_arr()):
nb_changed += 1
assert nb_changed > 5
def test_images_and_segmaps_aligned(self):
nb_changed = 0
rs = iarandom.RNG(0)
for _ in np.arange(10):
aug = iaa.Jigsaw(nb_rows=(2, 5), nb_cols=(2, 5), max_steps=(0, 3))
image_small = rs.integers(0, 10, size=(10, 15))
image = ia.imresize_single_image(image_small, (20, 30),
interpolation="nearest")
image = image.astype(np.uint8)
segm = ia.SegmentationMapsOnImage(image_small, shape=(20, 30))
images_aug, sms_aug = aug(images=[image, image, image],
segmentation_maps=[segm, segm, segm])
for image_aug, sm_aug in zip(images_aug, sms_aug):
arr = sm_aug.get_arr()
image_aug_rs = ia.imresize_single_image(
image_aug, arr.shape[0:2], interpolation="nearest")
overlap = np.average(image_aug_rs == arr)
assert overlap > 0.99
if not np.array_equal(arr, segm.arr):
nb_changed += 1
assert nb_changed > 5
def test_images_and_keypoints_aligned(self):
for i in np.arange(20):
aug = iaa.Jigsaw(nb_rows=(1, 3), nb_cols=(1, 3), max_steps=(2, 5),
seed=i)
# make sure that these coords are not exactly at a grid cell
# border with any possibly sampled height/width in grid cells
y = 17.5
x = 25.5
kpsoi = ia.KeypointsOnImage([ia.Keypoint(x=x, y=y)],
shape=(20, 30))
image = np.zeros((20, 30), dtype=np.uint8)
image[int(y), int(x)] = 255
images_aug, kpsois_aug = aug(images=[image, image, image],
keypoints=[kpsoi, kpsoi, kpsoi])
for image_aug, kpsoi_aug in zip(images_aug, kpsois_aug):
x_aug = kpsoi_aug.keypoints[0].x
y_aug = kpsoi_aug.keypoints[0].y
idx = np.argmax(image_aug)
y_aug_img, x_aug_img = np.unravel_index(idx,
image_aug.shape)
dist = np.sqrt((x_aug - x_aug_img)**2 + (y_aug - y_aug_img)**2)
# best possible distance is about 0.7 as KP coords are in cell
# center and sampled coords are at cell top left
assert dist < 0.8
def test_no_error_for_1x1_grids(self):
aug = iaa.Jigsaw(nb_rows=1, nb_cols=1, max_steps=2)
image = np.mod(np.arange(19*19*3), 255).astype(np.uint8)
image = image.reshape((19, 19, 3))
kpsoi = ia.KeypointsOnImage.from_xy_array([
(0, 0),
(5.5, 3.5),
(4.0, 12.5),
(11.1, 11.2),
(12.1, 23.5)
], shape=(19, 19, 3))
image_aug, kpsoi_aug = aug(image=image, keypoints=kpsoi)
assert np.array_equal(image_aug, image)
assert np.allclose(kpsoi_aug.to_xy_array(), kpsoi.to_xy_array())
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
for _ in sm.xrange(3):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Jigsaw(nb_rows=2, nb_cols=2, max_steps=2)
image_aug = aug(image=image)
# (2, 2, [C]) here, because rows/cols are padded to be
# multiple of nb_rows and nb_cols
shape_exp = tuple([2, 2] + list(shape[2:]))
assert image_aug.dtype.name == "uint8"
assert np.array_equal(image_aug,
np.zeros(shape_exp, dtype=np.uint8))
def test_get_parameters(self):
aug = iaa.Jigsaw(nb_rows=1, nb_cols=2)
params = aug.get_parameters()
assert params[0] is aug.nb_rows
assert params[1] is aug.nb_cols
assert params[2] is aug.max_steps
assert params[3] is True
def test_pickleable(self):
aug = iaa.Jigsaw(nb_rows=(1, 4), nb_cols=(1, 4), max_steps=(1, 3))
runtest_pickleable_uint8_img(aug, iterations=20, shape=(32, 32, 3))
| 38.528335
| 81
| 0.558987
| 51,756
| 389,560
| 4.000927
| 0.019321
| 0.01981
| 0.008693
| 0.005587
| 0.872967
| 0.831358
| 0.788571
| 0.749478
| 0.715703
| 0.684177
| 0
| 0.065048
| 0.316223
| 389,560
| 10,110
| 82
| 38.532146
| 0.712331
| 0.049145
| 0
| 0.66037
| 0
| 0
| 0.014533
| 0.001501
| 0
| 0
| 0
| 0.000297
| 0.194721
| 1
| 0.096131
| false
| 0.000259
| 0.002976
| 0.006599
| 0.120714
| 0.000129
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6b1a025f4739f3c9092028dcd801bf40f016fe26
| 146
|
py
|
Python
|
auladjango/vdjango/meusite/views.py
|
lel352/Curso-Python
|
d65484c807db52d57042eee20ccbd3131825fa98
|
[
"MIT"
] | 1
|
2021-09-04T14:34:34.000Z
|
2021-09-04T14:34:34.000Z
|
auladjango/vdjango/meusite/views.py
|
lel352/Curso-Python
|
d65484c807db52d57042eee20ccbd3131825fa98
|
[
"MIT"
] | null | null | null |
auladjango/vdjango/meusite/views.py
|
lel352/Curso-Python
|
d65484c807db52d57042eee20ccbd3131825fa98
|
[
"MIT"
] | null | null | null |
# uma forma de criar uma home em usar app
from django.shortcuts import render
def index(request):
return render(request, 'home/index.html')
| 20.857143
| 45
| 0.746575
| 23
| 146
| 4.73913
| 0.782609
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171233
| 146
| 6
| 46
| 24.333333
| 0.900826
| 0.267123
| 0
| 0
| 0
| 0
| 0.144231
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
6b20ab3d5c88981c654482cf3bd5c6b6d04ba358
| 7,850
|
py
|
Python
|
test5_10.py
|
MASQA/seleniumtrainingPython
|
8ee0f168ce8be3820e87b43b5e87666bde8d74c4
|
[
"Apache-2.0"
] | null | null | null |
test5_10.py
|
MASQA/seleniumtrainingPython
|
8ee0f168ce8be3820e87b43b5e87666bde8d74c4
|
[
"Apache-2.0"
] | null | null | null |
test5_10.py
|
MASQA/seleniumtrainingPython
|
8ee0f168ce8be3820e87b43b5e87666bde8d74c4
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import re
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
@pytest.fixture
def driver():
wd = webdriver.Chrome()
wd.quit
return wd
test_data_for_checking_names_and_costs = [
('.name', '#box-product .title'),
('.regular-price', '#box-product .regular-price'),
('.campaign-price', '#box-product .campaign-price')
]
@pytest.mark.parametrize("target_on_main_page,target_on_product_page",
test_data_for_checking_names_and_costs)
def test_names_and_costs_products(driver, target_on_main_page, target_on_product_page):
"""
Тест проверяет совпадение названия и значения цен на главной и странице продукта
:param driver:
:param target_on_main_page: локатор для нахождения названия/цены(обычной / акционной)на главной странице
:param target_on_product_page: локатор для нахождения названия/цены(обычной / акционной)на странице продукта
:return:
"""
driver.get("http://www.litecart.com/")
main_page_element = driver.find_element(By.CSS_SELECTOR, '#box-campaigns ul > li')
value_on_main_page = ''
value_on_product_page = ''
# print('Name of ', i, '-th prod', elements[i].find_element(By.CSS_SELECTOR, '.name').text)
value_on_main_page += main_page_element.find_element(By.CSS_SELECTOR, target_on_main_page).text
main_page_element.click()
value_on_product_page += driver.find_element(By.CSS_SELECTOR, target_on_product_page).text
# print('Name in prod page of ', i, '-th prod', driver.find_element(By.CSS_SELECTOR, '#box-product .title').text)
print('List of product titles on main pages', value_on_main_page)
print('List of product titles on product pages', value_on_product_page)
assert value_on_main_page == value_on_product_page
def test_color_regular_cost_products_on_main_page(driver):
"""
Тест проверяет цвет обычной цены на главной странице
:param driver:
:return:
"""
driver.get("http://www.litecart.com/")
element = driver.find_element(By.CSS_SELECTOR, '#box-campaigns ul > li .regular-price')
color_string = element.value_of_css_property('color')
nums = re.findall(r'\d+', color_string)
colors = [int(i) for i in nums]
print('color', colors)
assert colors[0] == colors[1] == colors[2]
def test_color_campaign_cost_products_on_main_page(driver):
"""
Тест проверяет цвет акционной цены на главной странице
:param driver:
:return:
"""
driver.get("http://www.litecart.com/")
element = driver.find_element(By.CSS_SELECTOR, '#box-campaigns ul > li .campaign-price')
color_string = element.value_of_css_property('color')
nums = re.findall(r'\d+', color_string)
colors = [int(i) for i in nums]
print('color', colors)
assert colors[1] == colors[2] == 0
def test_color_regular_cost_products_on_product_page(driver):
"""
Тест проверяет цвет обычной цены на странице продукта
:param driver:
:return:
"""
driver.get("http://www.litecart.com/")
driver.find_element(By.CSS_SELECTOR, '#box-campaigns ul > li').click()
element = driver.find_element(By.CSS_SELECTOR, '#box-product .regular-price')
color_string = element.value_of_css_property('color')
nums = re.findall(r'\d+', color_string)
colors = [int(i) for i in nums]
print('color', colors)
assert colors[0] == colors[1] == colors[2]
def test_color_campaign_cost_products_on_product_page(driver):
"""
Тест проверяет цвет акционной цены на странице продукта
"""
driver.get("http://www.litecart.com/")
driver.find_element(By.CSS_SELECTOR, '#box-campaigns ul > li').click()
element = driver.find_element(By.CSS_SELECTOR, '#box-product .campaign-price')
color_string = element.value_of_css_property('color')
nums = re.findall(r'\d+', color_string)
colors = [int(i) for i in nums]
print('color', colors)
assert colors[1] == colors[2] == 0
def test_text_style_regular_cost_products_on_main_page(driver):
"""
Тест проверяет что для обычной цены на главной странице используется зачеркнтый шрифт
:param driver:
:return:
"""
driver.get("http://www.litecart.com/")
element = driver.find_element(By.CSS_SELECTOR, '#box-campaigns ul > li .regular-price')
text_decoration = element.value_of_css_property('text-decoration-line')
print('text_decoration ', text_decoration)
assert text_decoration == 'line-through'
def test_text_style_campaign_cost_products_on_main_page(driver):
"""
Тест проверяет что для акционной цены на главной странице используется жирный шрифт
:param driver:
:return:
"""
driver.get("http://www.litecart.com/")
element = driver.find_element(By.CSS_SELECTOR, '#box-campaigns ul > li .campaign-price')
font_weight = element.value_of_css_property('font-weight')
print('font_weight', font_weight)
assert font_weight == '700'
def test_text_style_regular_cost_products_on_product_page(driver):
"""
Тест проверяет что для обычной цены на странице продукта используется зачеркнутый шрифт
:param driver:
:return:
"""
driver.get("http://www.litecart.com/")
driver.find_element(By.CSS_SELECTOR, '#box-campaigns ul > li').click()
element = driver.find_element(By.CSS_SELECTOR, '#box-product .regular-price')
text_decoration = element.value_of_css_property('text-decoration-line')
print('text_decoration ', text_decoration)
assert text_decoration == 'line-through'
def test_text_style_campaign_cost_products_on_product_page(driver):
"""
Тест проверяет что для акционной цены на странице продукта используется жирный шрифт
:param driver:
:return:
"""
driver.get("http://www.litecart.com/")
driver.find_element(By.CSS_SELECTOR, '#box-campaigns ul > li').click()
element = driver.find_element(By.CSS_SELECTOR, '#box-product .campaign-price')
font_weight = element.value_of_css_property('font-weight')
print('font_weight', font_weight)
assert font_weight == '700'
def test_comparing_cost_text_size_on_main_page(driver):
"""
Тест проверяет что для размер шрифта обычной цены меньше размера шрифта акционной цены
на главной продукта
:param driver:
:return:
"""
driver.get("http://www.litecart.com/")
regular_cost = driver.find_element(By.CSS_SELECTOR, '#box-campaigns ul > li .regular-price')
text_size_regular_cost = regular_cost .value_of_css_property('font-size')
campaign_cost = driver.find_element(By.CSS_SELECTOR, '#box-campaigns ul > li .campaign-price')
text_size_campaign_cost = campaign_cost .value_of_css_property('font-size')
print('text_size_regular_cost:', text_size_regular_cost, 'text_size_campaign_cost ', text_size_campaign_cost )
assert text_size_regular_cost < text_size_campaign_cost
def test_comparing_cost_text_size_on_product_page(driver):
"""
Тест проверяет что для размер шрифта обычной цены меньше размера шрифта акционной цены
на странице продукта
:param driver:
:return:
"""
driver.get("http://www.litecart.com/")
driver.find_element(By.CSS_SELECTOR, '#box-campaigns ul > li').click()
regular_cost = driver.find_element(By.CSS_SELECTOR, '#box-product .regular-price')
text_size_regular_cost = regular_cost.value_of_css_property('font-size')
campaign_cost = driver.find_element(By.CSS_SELECTOR, '#box-product .campaign-price')
text_size_campaign_cost = campaign_cost.value_of_css_property('font-size')
print('text_size_regular_cost:', text_size_regular_cost, 'text_size_campaign_cost ', text_size_campaign_cost)
assert text_size_regular_cost < text_size_campaign_cost
| 35.844749
| 117
| 0.723185
| 1,081
| 7,850
| 4.969473
| 0.111008
| 0.045048
| 0.053239
| 0.065525
| 0.851266
| 0.827811
| 0.817014
| 0.774758
| 0.728779
| 0.660276
| 0
| 0.002735
| 0.161656
| 7,850
| 218
| 118
| 36.009174
| 0.813554
| 0.192866
| 0
| 0.603774
| 0
| 0
| 0.221249
| 0.022141
| 0
| 0
| 0
| 0
| 0.103774
| 1
| 0.113208
| false
| 0
| 0.056604
| 0
| 0.179245
| 0.113208
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6b44d4d760bca1d5eed67c20a98a4ae53f9aa7a4
| 61,209
|
py
|
Python
|
autotest/pst_from_tests.py
|
scalet98/pyemu
|
c0314c8a705d5523ba7cd66dbf452ab2990c0e4d
|
[
"BSD-3-Clause"
] | 1
|
2020-09-18T12:09:55.000Z
|
2020-09-18T12:09:55.000Z
|
autotest/pst_from_tests.py
|
scalet98/pyemu
|
c0314c8a705d5523ba7cd66dbf452ab2990c0e4d
|
[
"BSD-3-Clause"
] | null | null | null |
autotest/pst_from_tests.py
|
scalet98/pyemu
|
c0314c8a705d5523ba7cd66dbf452ab2990c0e4d
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import sys
import platform
# sys.path.append(os.path.join("..","pyemu"))
import pyemu
from pyemu import os_utils
from pyemu.utils import PstFrom
import shutil
ext = ''
bin_path = os.path.join("..", "..", "bin")
if "linux" in platform.platform().lower():
bin_path = os.path.join(bin_path, "linux")
elif "darwin" in platform.platform().lower():
bin_path = os.path.join(bin_path, "mac")
else:
bin_path = os.path.join(bin_path, "win")
ext = '.exe'
mf_exe_path = os.path.join(bin_path, "mfnwt")
mt_exe_path = os.path.join(bin_path, "mt3dusgs")
mf6_exe_path = os.path.join(bin_path, "mf6")
pp_exe_path = os.path.join(bin_path, "pestpp")
ies_exe_path = os.path.join(bin_path, "pestpp-ies")
swp_exe_path = os.path.join(bin_path, "pestpp-swp")
mf_exe_name = os.path.basename(mf_exe_path)
mf6_exe_name = os.path.basename(mf6_exe_path)
def freyberg_test():
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
try:
import flopy
except:
return
org_model_ws = os.path.join("..", "examples", "freyberg_sfr_update")
nam_file = "freyberg.nam"
m = flopy.modflow.Modflow.load(nam_file, model_ws=org_model_ws,
check=False, forgive=False,
exe_name=mf_exe_path)
flopy.modflow.ModflowRiv(m, stress_period_data={
0: [[0, 0, 0, m.dis.top.array[0, 0], 1.0, m.dis.botm.array[0, 0, 0]],
[0, 0, 1, m.dis.top.array[0, 1], 1.0, m.dis.botm.array[0, 0, 1]],
[0, 0, 1, m.dis.top.array[0, 1], 1.0, m.dis.botm.array[0, 0, 1]]]})
org_model_ws = "temp_pst_from"
if os.path.exists(org_model_ws):
shutil.rmtree(org_model_ws)
m.external_path = "."
m.change_model_ws(org_model_ws)
m.write_input()
print("{0} {1}".format(mf_exe_path, m.name + ".nam"), org_model_ws)
os_utils.run("{0} {1}".format(mf_exe_path, m.name + ".nam"),
cwd=org_model_ws)
hds_kperk = []
for k in range(m.nlay):
for kper in range(m.nper):
hds_kperk.append([kper, k])
hds_runline, df = pyemu.gw_utils.setup_hds_obs(
os.path.join(m.model_ws, f"{m.name}.hds"), kperk_pairs=None, skip=None,
prefix="hds", include_path=False)
sfo = flopy.utils.SfrFile(os.path.join(m.model_ws, 'freyberg.sfr.out'))
sfodf = sfo.get_dataframe()
sfodf[['kstp', 'kper']] = pd.DataFrame(sfodf.kstpkper.to_list(),
index=sfodf.index)
sfodf = sfodf.drop('kstpkper', axis=1)
# just adding a bit of header in for test purposes
sfo_pp_file = os.path.join(m.model_ws, 'freyberg.sfo.dat')
with open(sfo_pp_file, 'w') as fp:
fp.writelines(["This is a post processed sfr output file\n",
"Processed into tabular form using the lines:\n",
"sfo = flopy.utils.SfrFile('freyberg.sfr.out')\n",
"sfo.get_dataframe().to_csv('freyberg.sfo.dat')\n"])
sfodf.sort_index(1).to_csv(fp, sep=' ', index_label='idx')
sfodf.sort_index(1).to_csv(os.path.join(m.model_ws, 'freyberg.sfo.csv'),
index_label='idx')
template_ws = "new_temp"
# sr0 = m.sr
sr = pyemu.helpers.SpatialReference.from_namfile(
os.path.join(m.model_ws, m.namefile),
delr=m.dis.delr, delc=m.dis.delc)
# set up PstFrom object
pf = PstFrom(original_d=org_model_ws, new_d=template_ws,
remove_existing=True,
longnames=True, spatial_reference=sr,
zero_based=False)
# obs
# using tabular style model output
# (generated by pyemu.gw_utils.setup_hds_obs())
pf.add_observations('freyberg.hds.dat', insfile='freyberg.hds.dat.ins2',
index_cols='obsnme', use_cols='obsval', prefix='hds')
# using the ins file generated by pyemu.gw_utils.setup_hds_obs()
pf.add_observations_from_ins(ins_file='freyberg.hds.dat.ins')
pf.post_py_cmds.append(hds_runline)
pf.tmp_files.append(f"{m.name}.hds")
# sfr outputs to obs
sfr_idx = ['segment', 'reach', 'kstp', 'kper']
sfr_use = ["Qaquifer", "Qout", 'width']
pf.add_observations('freyberg.sfo.dat', insfile=None,
index_cols=sfr_idx,
use_cols=sfr_use, prefix='sfr',
ofile_skip=4, ofile_sep=' ', use_rows=np.arange(0, 50))
# check obs set up
sfrobs = pf.obs_dfs[-1].copy()
sfrobs[['usecol'] + sfr_idx] = sfrobs.obsnme.apply(
lambda x: pd.Series(
dict([s.split(':') for s in x.split('_') if ':' in s])))
sfrobs.loc[:, sfr_idx] = sfrobs.loc[:, sfr_idx].astype(int)
sfrobs_p = sfrobs.pivot_table(index=sfr_idx,
columns=['usecol'], values='obsval')
sfodf_c = sfodf.set_index(sfr_idx).sort_index()
sfodf_c.columns = sfodf_c.columns.str.lower()
assert (sfrobs_p == sfodf_c.loc[sfrobs_p.index,
sfrobs_p.columns]).all().all(), (
"Mis-match between expected and processed obs values")
pf.tmp_files.append(f"{m.name}.sfr.out")
pf.extra_py_imports.append('flopy')
pf.post_py_cmds.extend(
["sfo_pp_file = 'freyberg.sfo.dat'",
"sfo = flopy.utils.SfrFile('freyberg.sfr.out')",
"sfodf = sfo.get_dataframe()",
"sfodf[['kstp', 'kper']] = pd.DataFrame(sfodf.kstpkper.to_list(), index=sfodf.index)",
"sfodf = sfodf.drop('kstpkper', axis=1)",
"with open(sfo_pp_file, 'w') as fp:",
" fp.writelines(['This is a post processed sfr output file\\n', "
"'Processed into tabular form using the lines:\\n', "
"'sfo = flopy.utils.SfrFile(`freyberg.sfr.out`)\\n', "
"'sfo.get_dataframe().to_csv(`freyberg.sfo.dat`)\\n'])",
" sfodf.sort_index(1).to_csv(fp, sep=' ', index_label='idx')"])
# csv version of sfr obs
# sfr outputs to obs
pf.add_observations('freyberg.sfo.csv', insfile=None,
index_cols=['segment', 'reach', 'kstp', 'kper'],
use_cols=["Qaquifer", "Qout", "width"], prefix='sfr2',
ofile_sep=',', obsgp=['qaquifer', 'qout', "width"],
use_rows=np.arange(50, 101))
# check obs set up
sfrobs = pf.obs_dfs[-1].copy()
sfrobs[['usecol'] + sfr_idx] = sfrobs.obsnme.apply(
lambda x: pd.Series(
dict([s.split(':') for s in x.split('_') if ':' in s])))
sfrobs.loc[:, sfr_idx] = sfrobs.loc[:, sfr_idx].astype(int)
sfrobs_p = sfrobs.pivot_table(index=sfr_idx,
columns=['usecol'], values='obsval')
sfodf_c = sfodf.set_index(sfr_idx).sort_index()
sfodf_c.columns = sfodf_c.columns.str.lower()
assert (sfrobs_p == sfodf_c.loc[sfrobs_p.index,
sfrobs_p.columns]).all().all(), (
"Mis-match between expected and processed obs values")
obsnmes = pd.concat([df.obgnme for df in pf.obs_dfs]).unique()
assert all([gp in obsnmes for gp in ['qaquifer', 'qout']])
pf.post_py_cmds.append(
"sfodf.sort_index(1).to_csv('freyberg.sfo.csv', sep=',', index_label='idx')")
# pars
pf.add_parameters(filenames="RIV_0000.dat", par_type="grid",
index_cols=[0, 1, 2], use_cols=[3, 5],
par_name_base=["rivstage_grid", "rivbot_grid"],
mfile_fmt='%10d%10d%10d %15.8F %15.8F %15.8F',
pargp='rivbot')
pf.add_parameters(filenames="RIV_0000.dat", par_type="grid",
index_cols=[0, 1, 2], use_cols=4)
pf.add_parameters(filenames=["WEL_0000.dat", "WEL_0001.dat"],
par_type="grid", index_cols=[0, 1, 2], use_cols=3,
par_name_base="welflux_grid",
zone_array=m.bas6.ibound.array)
pf.add_parameters(filenames=["WEL_0000.dat"], par_type="constant",
index_cols=[0, 1, 2], use_cols=3,
par_name_base=["flux_const"])
pf.add_parameters(filenames="rech_1.ref", par_type="grid",
zone_array=m.bas6.ibound[0].array,
par_name_base="rch_datetime:1-1-1970")
pf.add_parameters(filenames=["rech_1.ref", "rech_2.ref"],
par_type="zone", zone_array=m.bas6.ibound[0].array)
pf.add_parameters(filenames="rech_1.ref", par_type="pilot_point",
zone_array=m.bas6.ibound[0].array,
par_name_base="rch_datetime:1-1-1970", pp_space=4)
pf.add_parameters(filenames="rech_1.ref", par_type="pilot_point",
zone_array=m.bas6.ibound[0].array,
par_name_base="rch_datetime:1-1-1970", pp_space=1,
ult_ubound=100, ult_lbound=0.0)
# add model run command
pf.mod_sys_cmds.append("{0} {1}".format(mf_exe_name, m.name + ".nam"))
print(pf.mult_files)
print(pf.org_files)
# build pest
pst = pf.build_pst('freyberg.pst')
# check mult files are in pst input files
csv = os.path.join(template_ws, "mult2model_info.csv")
df = pd.read_csv(csv, index_col=0)
mults_not_linked_to_pst = ((set(df.mlt_file.unique()) -
set(pst.input_files)) -
set(df.loc[df.pp_file.notna()].mlt_file))
assert len(mults_not_linked_to_pst) == 0, print(mults_not_linked_to_pst)
pst.write_input_files(pst_path=pf.new_d)
# test par mults are working
b_d = os.getcwd()
os.chdir(pf.new_d)
try:
pyemu.helpers.apply_list_and_array_pars(
arr_par_file="mult2model_info.csv")
except Exception as e:
os.chdir(b_d)
raise Exception(str(e))
os.chdir(b_d)
pst.control_data.noptmax = 0
pst.write(os.path.join(pf.new_d, "freyberg.pst"))
pyemu.os_utils.run("{0} freyberg.pst".format(ies_exe_path), cwd=pf.new_d)
res_file = os.path.join(pf.new_d, "freyberg.base.rei")
assert os.path.exists(res_file), res_file
pst.set_res(res_file)
print(pst.phi)
assert pst.phi < 1.0e-5, pst.phi
def freyberg_prior_build_test():
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
try:
import flopy
except:
return
org_model_ws = os.path.join("..", "examples", "freyberg_sfr_update")
nam_file = "freyberg.nam"
m = flopy.modflow.Modflow.load(nam_file, model_ws=org_model_ws,
check=False, forgive=False,
exe_name=mf_exe_path)
flopy.modflow.ModflowRiv(m, stress_period_data={
0: [[0, 0, 0, m.dis.top.array[0, 0], 1.0, m.dis.botm.array[0, 0, 0]],
[0, 0, 1, m.dis.top.array[0, 1], 1.0, m.dis.botm.array[0, 0, 1]],
[0, 0, 1, m.dis.top.array[0, 1], 1.0, m.dis.botm.array[0, 0, 1]]]})
welsp = m.wel.stress_period_data.data.copy()
addwell = welsp[0].copy()
addwell['k'] = 1
welsp[0] = np.rec.array(np.concatenate([welsp[0], addwell]))
samewell = welsp[1].copy()
samewell['flux'] *= 10
welsp[1] = np.rec.array(np.concatenate([welsp[1], samewell]))
m.wel.stress_period_data = welsp
org_model_ws = "temp_pst_from"
if os.path.exists(org_model_ws):
shutil.rmtree(org_model_ws)
m.external_path = "."
m.change_model_ws(org_model_ws)
m.write_input()
# for exe in [mf_exe_path, mt_exe_path, ies_exe_path]:
# shutil.copy(os.path.relpath(exe, '..'), org_model_ws)
print("{0} {1}".format(mf_exe_path, m.name + ".nam"), org_model_ws)
os_utils.run("{0} {1}".format(mf_exe_path, m.name + ".nam"),
cwd=org_model_ws)
hds_kperk = []
for k in range(m.nlay):
for kper in range(m.nper):
hds_kperk.append([kper, k])
hds_runline, df = pyemu.gw_utils.setup_hds_obs(
os.path.join(m.model_ws, f"{m.name}.hds"), kperk_pairs=None, skip=None,
prefix="hds", include_path=False)
template_ws = "new_temp"
# sr0 = m.sr
sr = pyemu.helpers.SpatialReference.from_namfile(
os.path.join(m.model_ws, m.namefile),
delr=m.dis.delr, delc=m.dis.delc)
# set up PstFrom object
pf = PstFrom(original_d=org_model_ws, new_d=template_ws,
remove_existing=True,
longnames=True, spatial_reference=sr,
zero_based=False)
pf.extra_py_imports.append('flopy')
pf.mod_sys_cmds.append("which python")
# obs
# using tabular style model output
# (generated by pyemu.gw_utils.setup_hds_obs())
pf.add_observations('freyberg.hds.dat', insfile='freyberg.hds.dat.ins2',
index_cols='obsnme', use_cols='obsval', prefix='hds')
pf.post_py_cmds.append(hds_runline)
pf.tmp_files.append(f"{m.name}.hds")
# pars
v = pyemu.geostats.ExpVario(contribution=1.0, a=2500)
geostruct = pyemu.geostats.GeoStruct(variograms=v, transform='log')
# Pars for river list style model file, every entry in columns 3 and 4
# specifying formatted model file and passing a geostruct # TODO method for appending specific ult bounds
# pf.add_parameters(filenames="RIV_0000.dat", par_type="grid",
# index_cols=[0, 1, 2], use_cols=[3, 4],
# par_name_base=["rivstage_grid", "rivcond_grid"],
# mfile_fmt='%10d%10d%10d %15.8F %15.8F %15.8F',
# geostruct=geostruct, lower_bound=[0.9, 0.01],
# upper_bound=[1.1, 100.], ult_lbound=[0.3, None])
# # 2 constant pars applied to columns 3 and 4
# # this time specifying free formatted model file
# pf.add_parameters(filenames="RIV_0000.dat", par_type="constant",
# index_cols=[0, 1, 2], use_cols=[3, 4],
# par_name_base=["rivstage", "rivcond"],
# mfile_fmt='free', lower_bound=[0.9, 0.01],
# upper_bound=[1.1, 100.], ult_lbound=[None, 0.01])
# Pars for river list style model file, every entry in column 4
pf.add_parameters(filenames="RIV_0000.dat", par_type="grid",
index_cols=[0, 1, 2], use_cols=[4],
par_name_base=["rivcond_grid"],
mfile_fmt='%10d%10d%10d %15.8F %15.8F %15.8F',
geostruct=geostruct, lower_bound=[0.01],
upper_bound=[100.], ult_lbound=[None])
# constant par applied to column 4
# this time specifying free formatted model file
pf.add_parameters(filenames="RIV_0000.dat", par_type="constant",
index_cols=[0, 1, 2], use_cols=[4],
par_name_base=["rivcond"],
mfile_fmt='free', lower_bound=[0.01],
upper_bound=[100.], ult_lbound=[0.01])
# pf.add_parameters(filenames="RIV_0000.dat", par_type="constant",
# index_cols=[0, 1, 2], use_cols=5,
# par_name_base="rivbot",
# mfile_fmt='free', lower_bound=0.9,
# upper_bound=1.1, ult_ubound=100.,
# ult_lbound=0.001)
# setting up temporal variogram for correlating temporal pars
date = m.dis.start_datetime
v = pyemu.geostats.ExpVario(contribution=1.0, a=180.0) # 180 correlation length
t_geostruct = pyemu.geostats.GeoStruct(variograms=v, transform='log')
# looping over temporal list style input files
# setting up constant parameters for col 3 for each temporal file
# making sure all are set up with same pargp and geostruct (to ensure correlation)
# Parameters for wel list style
well_mfiles = ["WEL_0000.dat", "WEL_0001.dat", "WEL_0002.dat"]
for t, well_file in enumerate(well_mfiles):
# passing same temporal geostruct and pargp,
# date is incremented and will be used for correlation with
pf.add_parameters(filenames=well_file, par_type="constant",
index_cols=[0, 1, 2], use_cols=3,
par_name_base="flux", alt_inst_str='kper',
datetime=date, geostruct=t_geostruct,
pargp='wellflux_t', lower_bound=0.25,
upper_bound=1.75)
date = (pd.to_datetime(date) +
pd.DateOffset(m.dis.perlen.array[t], 'day'))
# par for each well (same par through time)
pf.add_parameters(filenames=well_mfiles,
par_type="grid", index_cols=[0, 1, 2], use_cols=3,
par_name_base="welflux_grid",
zone_array=m.bas6.ibound.array,
geostruct=geostruct, lower_bound=0.25, upper_bound=1.75)
# global constant across all files
pf.add_parameters(filenames=well_mfiles,
par_type="constant",
index_cols=[0, 1, 2], use_cols=3,
par_name_base=["flux_global"],
lower_bound=0.25, upper_bound=1.75)
# Spatial array style pars - cell-by-cell
hk_files = ["hk_Layer_{0:d}.ref".format(i) for i in range(1, 4)]
for hk in hk_files:
pf.add_parameters(filenames=hk, par_type="grid",
zone_array=m.bas6.ibound[0].array,
par_name_base="hk", alt_inst_str='lay',
geostruct=geostruct,
lower_bound=0.01, upper_bound=100.)
# Pars for temporal array style model files
date = m.dis.start_datetime # reset date
rch_mfiles = ["rech_0.ref", "rech_1.ref", "rech_2.ref"]
for t, rch_file in enumerate(rch_mfiles):
# constant par for each file but linked by geostruct and pargp
pf.add_parameters(filenames=rch_file, par_type="constant",
zone_array=m.bas6.ibound[0].array,
par_name_base="rch", alt_inst_str='kper',
datetime=date, geostruct=t_geostruct,
pargp='rch_t', lower_bound=0.9, upper_bound=1.1)
date = (pd.to_datetime(date) +
pd.DateOffset(m.dis.perlen.array[t], 'day'))
# spatially distributed array style pars - cell-by-cell
# pf.add_parameters(filenames=rch_mfiles, par_type="grid",
# zone_array=m.bas6.ibound[0].array,
# par_name_base="rch",
# geostruct=geostruct)
pf.add_parameters(filenames=rch_mfiles, par_type="pilot_point",
zone_array=m.bas6.ibound[0].array,
par_name_base="rch", pp_space=1,
ult_ubound=None, ult_lbound=None,
geostruct=geostruct, lower_bound=0.9, upper_bound=1.1)
# global constant recharge par
pf.add_parameters(filenames=rch_mfiles, par_type="constant",
zone_array=m.bas6.ibound[0].array,
par_name_base="rch_global", lower_bound=0.9,
upper_bound=1.1)
# zonal recharge pars
pf.add_parameters(filenames=rch_mfiles,
par_type="zone", par_name_base='rch_zone',
lower_bound=0.9, upper_bound=1.1, ult_lbound=1.e-6,
ult_ubound=100.)
# add model run command
pf.mod_sys_cmds.append("{0} {1}".format(mf_exe_name, m.name + ".nam"))
print(pf.mult_files)
print(pf.org_files)
# build pest
pst = pf.build_pst('freyberg.pst')
cov = pf.build_prior(fmt="ascii")
pe = pf.draw(100, use_specsim=True)
# check mult files are in pst input files
csv = os.path.join(template_ws, "mult2model_info.csv")
df = pd.read_csv(csv, index_col=0)
mults_not_linked_to_pst = ((set(df.mlt_file.unique()) -
set(pst.input_files)) -
set(df.loc[df.pp_file.notna()].mlt_file))
assert len(mults_not_linked_to_pst) == 0, print(mults_not_linked_to_pst)
pst.write_input_files(pst_path=pf.new_d)
# test par mults are working
b_d = os.getcwd()
os.chdir(pf.new_d)
try:
pyemu.helpers.apply_list_and_array_pars(
arr_par_file="mult2model_info.csv")
except Exception as e:
os.chdir(b_d)
raise Exception(str(e))
os.chdir(b_d)
pst.control_data.noptmax = 0
pst.write(os.path.join(pf.new_d, "freyberg.pst"))
pyemu.os_utils.run("{0} freyberg.pst".format(ies_exe_path), cwd=pf.new_d)
res_file = os.path.join(pf.new_d, "freyberg.base.rei")
assert os.path.exists(res_file), res_file
pst.set_res(res_file)
print(pst.phi)
assert pst.phi < 1.0e-5, pst.phi
pe.to_binary(os.path.join(pf.new_d, 'par.jcb'))
# quick sweep test?
pst.pestpp_options["ies_par_en"] = 'par.jcb'
pst.pestpp_options["ies_num_reals"] = 10
pst.control_data.noptmax = -1
# par = pst.parameter_data
# par.loc[:, 'parval1'] = pe.iloc[0].T
pst.write(os.path.join(pf.new_d, "freyberg.pst"))
pyemu.os_utils.run("{0} freyberg.pst".format(ies_exe_path), cwd=pf.new_d)
# pyemu.os_utils.start_workers(pf.new_d,
# exe_rel_path="pestpp-ies",
# pst_rel_path="freyberg.pst",
# num_workers=20, master_dir="master",
# cleanup=False, port=4005)
def generic_function():
import pandas as pd
import numpy as np
#onames = ["generic_obs_{0}".format(i) for i in range(100)]
onames = pd.date_range("1-1-2020",periods=100,freq='d')
df = pd.DataFrame({"index_2":np.arange(100),"simval1":1,"simval2":2,"datetime":onames})
df.index = df.pop("datetime")
df.to_csv("generic.csv",date_format="%d-%m-%Y %H:%M:%S")
return df
def another_generic_function(some_arg):
import pandas as pd
import numpy as np
print(some_arg)
def mf6_freyberg_test():
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
try:
import flopy
except:
return
org_model_ws = os.path.join('..', 'examples', 'freyberg_mf6')
tmp_model_ws = "temp_pst_from"
if os.path.exists(tmp_model_ws):
shutil.rmtree(tmp_model_ws)
os.mkdir(tmp_model_ws)
sim = flopy.mf6.MFSimulation.load(sim_ws=org_model_ws)
# sim.set_all_data_external()
sim.simulation_data.mfpath.set_sim_path(tmp_model_ws)
# sim.set_all_data_external()
m = sim.get_model("freyberg6")
sim.set_all_data_external(check_data=False)
sim.write_simulation()
# to by pass the issues with flopy
# shutil.copytree(org_model_ws,tmp_model_ws)
# sim = flopy.mf6.MFSimulation.load(sim_ws=org_model_ws)
# m = sim.get_model("freyberg6")
# SETUP pest stuff...
os_utils.run("{0} ".format(mf6_exe_path), cwd=tmp_model_ws)
# doctor some of the list par files to add a comment string
with open(
os.path.join('temp_pst_from',
"freyberg6.wel_stress_period_data_1.txt"), 'r') as fr:
lines = [line for line in fr]
with open(os.path.join('temp_pst_from',
"freyberg6.wel_stress_period_data_1.txt"), 'w') as fw:
fw.write("# comment line explaining this external file\n")
for line in lines:
fw.write(line)
with open(
os.path.join('temp_pst_from',
"freyberg6.wel_stress_period_data_2.txt"), 'r') as fr:
lines = [line for line in fr]
with open(os.path.join('temp_pst_from',
"freyberg6.wel_stress_period_data_2.txt"), 'w') as fw:
fw.write("# comment line explaining this external file\n")
for line in lines[0:3] + ["# comment mid table \n"] + lines[3:]:
fw.write(line)
with open(
os.path.join('temp_pst_from',
"freyberg6.wel_stress_period_data_3.txt"), 'r') as fr:
lines = [line for line in fr]
with open(os.path.join('temp_pst_from',
"freyberg6.wel_stress_period_data_3.txt"), 'w') as fw:
fw.write("#k i j flux \n")
for line in lines:
fw.write(line)
with open(
os.path.join('temp_pst_from',
"freyberg6.wel_stress_period_data_4.txt"), 'r') as fr:
lines = [line for line in fr]
with open(os.path.join('temp_pst_from',
"freyberg6.wel_stress_period_data_4.txt"), 'w') as fw:
fw.write("# comment line explaining this external file\n"
"#k i j flux\n")
for line in lines:
fw.write(line)
# generate a test with headers and non spatial idex
sfr_pkgdf = pd.DataFrame.from_records(m.sfr.packagedata.array)
l = sfr_pkgdf.columns.to_list()
l = ['#rno', 'k', 'i', 'j'] + l[2:]
with open(
os.path.join('temp_pst_from',
"freyberg6.sfr_packagedata.txt"), 'r') as fr:
lines = [line for line in fr]
with open(os.path.join('temp_pst_from',
"freyberg6.sfr_packagedata_test.txt"), 'w') as fw:
fw.write(' '.join(l))
fw.write('\n')
for line in lines:
fw.write(line)
template_ws = "new_temp"
# sr0 = m.sr
# sr = pyemu.helpers.SpatialReference.from_namfile(
# os.path.join(tmp_model_ws, "freyberg6.nam"),
# delr=m.dis.delr.array, delc=m.dis.delc.array)
sr = m.modelgrid
# set up PstFrom object
pf = PstFrom(original_d=tmp_model_ws, new_d=template_ws,
remove_existing=True,
longnames=True, spatial_reference=sr,
zero_based=False,start_datetime="1-1-2018")
# obs
# using tabular style model output
# (generated by pyemu.gw_utils.setup_hds_obs())
# pf.add_observations('freyberg.hds.dat', insfile='freyberg.hds.dat.ins2',
# index_cols='obsnme', use_cols='obsval', prefix='hds')
# call generic once so that the output file exists
os.chdir(template_ws)
df = generic_function()
os.chdir("..")
# add the values in generic to the ctl file
pf.add_observations("generic.csv",insfile="generic.csv.ins",index_cols=["datetime","index_2"],use_cols=["simval1","simval2"])
# add the function call to make generic to the forward run script
pf.add_py_function("pst_from_tests.py","generic_function()",is_pre_cmd=False)
# add a function that isnt going to be called directly
pf.add_py_function("pst_from_tests.py","another_generic_function(some_arg)",is_pre_cmd=None)
#pf.post_py_cmds.append("generic_function()")
df = pd.read_csv(os.path.join(tmp_model_ws, "sfr.csv"), index_col=0)
pf.add_observations("sfr.csv", insfile="sfr.csv.ins", index_cols="time", use_cols=list(df.columns.values))
v = pyemu.geostats.ExpVario(contribution=1.0,a=1000)
gr_gs = pyemu.geostats.GeoStruct(variograms=v)
rch_temporal_gs = pyemu.geostats.GeoStruct(variograms=pyemu.geostats.ExpVario(contribution=1.0,a=60))
pf.extra_py_imports.append('flopy')
ib = m.dis.idomain[0].array
tags = {"npf_k_":[0.1,10.],"npf_k33_":[.1,10],"sto_ss":[.1,10],"sto_sy":[.9,1.1],"rch_recharge":[.5,1.5]}
dts = pd.to_datetime("1-1-2018") + pd.to_timedelta(np.cumsum(sim.tdis.perioddata.array["perlen"]),unit="d")
print(dts)
for tag,bnd in tags.items():
lb,ub = bnd[0],bnd[1]
arr_files = [f for f in os.listdir(tmp_model_ws) if tag in f and f.endswith(".txt")]
if "rch" in tag:
pf.add_parameters(filenames=arr_files, par_type="grid", par_name_base="rch_gr",
pargp="rch_gr", zone_array=ib, upper_bound=ub, lower_bound=lb,
geostruct=gr_gs)
for arr_file in arr_files:
kper = int(arr_file.split('.')[1].split('_')[-1]) - 1
pf.add_parameters(filenames=arr_file,par_type="constant",par_name_base=arr_file.split('.')[1]+"_cn",
pargp="rch_const",zone_array=ib,upper_bound=ub,lower_bound=lb,geostruct=rch_temporal_gs,
datetime=dts[kper])
else:
for arr_file in arr_files:
# these ult bounds are used later in an assert
ult_lb = None
ult_ub = None
if "npf_k_" in arr_file:
ult_ub = 20.0
ult_lb = 2.0
pf.add_parameters(filenames=arr_file,par_type="grid",par_name_base=arr_file.split('.')[1]+"_gr",
pargp=arr_file.split('.')[1]+"_gr",zone_array=ib,upper_bound=ub,lower_bound=lb,
geostruct=gr_gs,ult_ubound=None if ult_ub is None else ult_ub + 1,
ult_lbound=None if ult_lb is None else ult_lb + 1)
# use a slightly lower ult bound here
pf.add_parameters(filenames=arr_file, par_type="pilotpoints", par_name_base=arr_file.split('.')[1]+"_pp",
pargp=arr_file.split('.')[1]+"_pp", zone_array=ib,upper_bound=ub,lower_bound=lb,
ult_ubound=None if ult_ub is None else ult_ub - 1,
ult_lbound=None if ult_lb is None else ult_lb - 1)
# add SP1 spatially constant, but temporally correlated wel flux pars
kper = 0
list_file = "freyberg6.wel_stress_period_data_{0}.txt".format(kper+1)
pf.add_parameters(filenames=list_file, par_type="constant",
par_name_base="twel_mlt_{0}".format(kper),
pargp="twel_mlt".format(kper), index_cols=[0, 1, 2],
use_cols=[3], upper_bound=1.5, lower_bound=0.5,
datetime=dts[kper], geostruct=rch_temporal_gs,
mfile_skip=1)
# add temporally indep, but spatially correlated wel flux pars
pf.add_parameters(filenames=list_file, par_type="grid",
par_name_base="wel_grid_{0}".format(kper),
pargp="wel_{0}".format(kper), index_cols=[0, 1, 2],
use_cols=[3], upper_bound=1.5, lower_bound=0.5,
geostruct=gr_gs, mfile_skip=1)
kper = 1
list_file = "freyberg6.wel_stress_period_data_{0}.txt".format(kper+1)
pf.add_parameters(filenames=list_file, par_type="constant",
par_name_base="twel_mlt_{0}".format(kper),
pargp="twel_mlt".format(kper), index_cols=[0, 1, 2],
use_cols=[3], upper_bound=1.5, lower_bound=0.5,
datetime=dts[kper], geostruct=rch_temporal_gs,
mfile_skip='#')
# add temporally indep, but spatially correlated wel flux pars
pf.add_parameters(filenames=list_file, par_type="grid",
par_name_base="wel_grid_{0}".format(kper),
pargp="wel_{0}".format(kper), index_cols=[0, 1, 2],
use_cols=[3], upper_bound=1.5, lower_bound=0.5,
geostruct=gr_gs, mfile_skip='#')
kper = 2
list_file = "freyberg6.wel_stress_period_data_{0}.txt".format(kper+1)
pf.add_parameters(filenames=list_file, par_type="constant",
par_name_base="twel_mlt_{0}".format(kper),
pargp="twel_mlt".format(kper), index_cols=['#k', 'i', 'j'],
use_cols=['flux'], upper_bound=1.5, lower_bound=0.5,
datetime=dts[kper], geostruct=rch_temporal_gs)
# add temporally indep, but spatially correlated wel flux pars
pf.add_parameters(filenames=list_file, par_type="grid",
par_name_base="wel_grid_{0}".format(kper),
pargp="wel_{0}".format(kper), index_cols=['#k', 'i', 'j'],
use_cols=['flux'], upper_bound=1.5, lower_bound=0.5,
geostruct=gr_gs)
kper = 3
list_file = "freyberg6.wel_stress_period_data_{0}.txt".format(kper+1)
pf.add_parameters(filenames=list_file, par_type="constant",
par_name_base="twel_mlt_{0}".format(kper),
pargp="twel_mlt".format(kper), index_cols=['#k', 'i', 'j'],
use_cols=['flux'], upper_bound=1.5, lower_bound=0.5,
datetime=dts[kper], geostruct=rch_temporal_gs,
mfile_skip=1)
# add temporally indep, but spatially correlated wel flux pars
pf.add_parameters(filenames=list_file, par_type="grid",
par_name_base="wel_grid_{0}".format(kper),
pargp="wel_{0}".format(kper), index_cols=['#k', 'i', 'j'],
use_cols=['flux'], upper_bound=1.5, lower_bound=0.5,
geostruct=gr_gs, mfile_skip=1)
list_files = ["freyberg6.wel_stress_period_data_{0}.txt".format(t)
for t in range(5, m.nper+1)]
for list_file in list_files:
kper = int(list_file.split(".")[1].split('_')[-1]) - 1
# add spatially constant, but temporally correlated wel flux pars
pf.add_parameters(filenames=list_file,par_type="constant",par_name_base="twel_mlt_{0}".format(kper),
pargp="twel_mlt".format(kper),index_cols=[0,1,2],use_cols=[3],
upper_bound=1.5,lower_bound=0.5, datetime=dts[kper], geostruct=rch_temporal_gs)
# add temporally indep, but spatially correlated wel flux pars
pf.add_parameters(filenames=list_file, par_type="grid", par_name_base="wel_grid_{0}".format(kper),
pargp="wel_{0}".format(kper), index_cols=[0, 1, 2], use_cols=[3],
upper_bound=1.5, lower_bound=0.5, geostruct=gr_gs)
# test non spatial idx in list like
pf.add_parameters(filenames="freyberg6.sfr_packagedata_test.txt", par_name_base="sfr_rhk",
pargp="sfr_rhk", index_cols=['#rno'], use_cols=['rhk'], upper_bound=10.,
lower_bound=0.1,
par_type="grid")
# add model run command
pf.mod_sys_cmds.append("mf6")
print(pf.mult_files)
print(pf.org_files)
# build pest
pst = pf.build_pst('freyberg.pst')
# quick check of write and apply method
pars = pst.parameter_data
# set reach 1 hk to 100
sfr_pars = pars.loc[pars.parnme.str.startswith('sfr')].index
pars.loc[sfr_pars, 'parval1'] = np.random.random(len(sfr_pars)) * 10
sfr_pars = pars.loc[sfr_pars].copy()
sfr_pars[['inst', 'usecol', '#rno']] = sfr_pars.parnme.apply(
lambda x: pd.DataFrame([s.split(':') for s in x.split('_')
if ':' in s]).set_index(0)[1])
sfr_pars['#rno'] = sfr_pars['#rno'] .astype(int)
os.chdir(pf.new_d)
pst.write_input_files()
try:
pyemu.helpers.apply_list_and_array_pars()
except Exception as e:
os.chdir('..')
raise e
os.chdir('..')
# verify apply
df = pd.read_csv(os.path.join(
pf.new_d, "freyberg6.sfr_packagedata_test.txt"),
delim_whitespace=True, index_col=0)
df.index = df.index - 1
print(df.rhk)
print((sfr_pkgdf.set_index('rno').loc[df.index, 'rhk'] *
sfr_pars.set_index('#rno').loc[df.index, 'parval1']))
assert np.isclose(
df.rhk, (sfr_pkgdf.set_index('rno').loc[df.index, 'rhk'] *
sfr_pars.set_index('#rno').loc[df.index, 'parval1'])).all()
pars.loc[sfr_pars.index, 'parval1'] = 1.0
# add more:
pf.add_parameters(filenames="freyberg6.sfr_packagedata.txt", par_name_base="sfr_rhk",
pargp="sfr_rhk", index_cols={'k': 1, 'i': 2, 'j': 3}, use_cols=[9], upper_bound=10.,
lower_bound=0.1,
par_type="grid", rebuild_pst=True)
df = pd.read_csv(os.path.join(tmp_model_ws, "heads.csv"), index_col=0)
pf.add_observations("heads.csv", insfile="heads.csv.ins", index_cols="time", use_cols=list(df.columns.values),
prefix="hds", rebuild_pst=True)
# test par mults are working
b_d = os.getcwd()
os.chdir(pf.new_d)
try:
pyemu.helpers.apply_list_and_array_pars(
arr_par_file="mult2model_info.csv",chunk_len=1)
except Exception as e:
os.chdir(b_d)
raise Exception(str(e))
os.chdir(b_d)
cov = pf.build_prior(fmt="none").to_dataframe()
twel_pars = [p for p in pst.par_names if "twel_mlt" in p]
twcov = cov.loc[twel_pars,twel_pars]
dsum = np.diag(twcov.values).sum()
assert twcov.sum().sum() > dsum
rch_cn = [p for p in pst.par_names if "_cn" in p]
print(rch_cn)
rcov = cov.loc[rch_cn,rch_cn]
dsum = np.diag(rcov.values).sum()
assert rcov.sum().sum() > dsum
num_reals = 100
pe = pf.draw(num_reals, use_specsim=True)
pe.to_binary(os.path.join(template_ws, "prior.jcb"))
assert pe.shape[1] == pst.npar_adj, "{0} vs {1}".format(pe.shape[0], pst.npar_adj)
assert pe.shape[0] == num_reals
pst.control_data.noptmax = 0
pst.pestpp_options["additional_ins_delimiters"] = ","
pst.write(os.path.join(pf.new_d, "freyberg.pst"))
pyemu.os_utils.run("{0} freyberg.pst".format(ies_exe_path), cwd=pf.new_d)
res_file = os.path.join(pf.new_d, "freyberg.base.rei")
assert os.path.exists(res_file), res_file
pst.set_res(res_file)
print(pst.phi)
#assert pst.phi < 1.0e-5, pst.phi
# check mult files are in pst input files
csv = os.path.join(template_ws, "mult2model_info.csv")
df = pd.read_csv(csv, index_col=0)
mults_not_linked_to_pst = ((set(df.mlt_file.unique()) -
set(pst.input_files)) -
set(df.loc[df.pp_file.notna()].mlt_file))
assert len(mults_not_linked_to_pst) == 0, print(mults_not_linked_to_pst)
# make sure the appropriate ult bounds have made it thru
df = pd.read_csv(os.path.join(template_ws,"mult2model_info.csv"))
print(df.columns)
df = df.loc[df.model_file.apply(lambda x: "npf_k_" in x),:]
print(df)
print(df.upper_bound)
print(df.lower_bound)
assert np.abs(float(df.upper_bound.min()) - 19.) < 1.0e-6,df.upper_bound.min()
assert np.abs(float(df.lower_bound.max()) - 3.) < 1.0e-6,df.lower_bound.max()
def mf6_freyberg_shortnames_test():
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
try:
import flopy
except:
return
org_model_ws = os.path.join('..', 'examples', 'freyberg_mf6')
tmp_model_ws = "temp_pst_from"
if os.path.exists(tmp_model_ws):
shutil.rmtree(tmp_model_ws)
# os.mkdir(tmp_model_ws)
# sim = flopy.mf6.MFSimulation.load(sim_ws=org_model_ws)
# # sim.set_all_data_external()
# sim.simulation_data.mfpath.set_sim_path(tmp_model_ws)
# # sim.set_all_data_external()
# m = sim.get_model("freyberg6")
# sim.set_all_data_external()
# sim.write_simulation()
# to by pass the issues with flopy
shutil.copytree(org_model_ws,tmp_model_ws)
sim = flopy.mf6.MFSimulation.load(sim_ws=org_model_ws)
m = sim.get_model("freyberg6")
# SETUP pest stuff...
os_utils.run("{0} ".format("mf6"), cwd=tmp_model_ws)
template_ws = "new_temp"
# sr0 = m.sr
sr = pyemu.helpers.SpatialReference.from_namfile(
os.path.join(tmp_model_ws, "freyberg6.nam"),
delr=m.dis.delr.array, delc=m.dis.delc.array)
# set up PstFrom object
pf = PstFrom(original_d=tmp_model_ws, new_d=template_ws,
remove_existing=True,
longnames=False, spatial_reference=sr,
zero_based=False,start_datetime="1-1-2018")
# obs
# using tabular style model output
# (generated by pyemu.gw_utils.setup_hds_obs())
# pf.add_observations('freyberg.hds.dat', insfile='freyberg.hds.dat.ins2',
# index_cols='obsnme', use_cols='obsval', prefix='hds')
df = pd.read_csv(os.path.join(tmp_model_ws,"heads.csv"),index_col=0)
pf.add_observations("heads.csv",insfile="heads.csv.ins",index_cols="time",use_cols=list(df.columns.values),prefix="hds")
df = pd.read_csv(os.path.join(tmp_model_ws, "sfr.csv"), index_col=0)
pf.add_observations("sfr.csv", insfile="sfr.csv.ins", index_cols="time", use_cols=list(df.columns.values))
v = pyemu.geostats.ExpVario(contribution=1.0,a=1000)
gr_gs = pyemu.geostats.GeoStruct(variograms=v)
rch_temporal_gs = pyemu.geostats.GeoStruct(variograms=pyemu.geostats.ExpVario(contribution=1.0,a=60))
pf.extra_py_imports.append('flopy')
ib = m.dis.idomain[0].array
tags = {"npf_k_":[0.1,10.],"npf_k33_":[.1,10],"sto_ss":[.1,10],"sto_sy":[.9,1.1],"rch_recharge":[.5,1.5]}
dts = pd.to_datetime("1-1-2018") + pd.to_timedelta(np.cumsum(sim.tdis.perioddata.array["perlen"]),unit="d")
print(dts)
for tag,bnd in tags.items():
lb,ub = bnd[0],bnd[1]
arr_files = [f for f in os.listdir(tmp_model_ws) if tag in f and f.endswith(".txt")]
if "rch" in tag:
pf.add_parameters(filenames=arr_files, par_type="grid", par_name_base="rg",
pargp="rg", zone_array=ib, upper_bound=ub, lower_bound=lb,
geostruct=gr_gs)
for arr_file in arr_files:
kper = int(arr_file.split('.')[1].split('_')[-1]) - 1
pf.add_parameters(filenames=arr_file,par_type="constant",par_name_base="rc{0}_".format(kper),
pargp="rc",zone_array=ib,upper_bound=ub,lower_bound=lb,geostruct=rch_temporal_gs,
datetime=dts[kper])
else:
for arr_file in arr_files:
pb = tag.split('_')[1] + arr_file.split('.')[1][-1]
pf.add_parameters(filenames=arr_file,par_type="grid",par_name_base=pb+"g",
pargp=pb+"g",zone_array=ib,upper_bound=ub,lower_bound=lb,
geostruct=gr_gs)
pf.add_parameters(filenames=arr_file, par_type="pilotpoints", par_name_base=pb+"p",
pargp=pb+"p", zone_array=ib,upper_bound=ub,lower_bound=lb,)
list_files = [f for f in os.listdir(tmp_model_ws) if "wel_stress_period_data" in f]
for list_file in list_files:
kper = list_file.split(".")[1].split('_')[-1]
pf.add_parameters(filenames=list_file,par_type="constant",par_name_base="w{0}".format(kper),
pargp="wel_{0}".format(kper),index_cols=[0,1,2],use_cols=[3],
upper_bound=1.5,lower_bound=0.5)
pf.add_parameters(filenames="freyberg6.sfr_packagedata.txt", par_name_base="rhk",
pargp="sfr_rhk", index_cols=[0, 1, 2, 3], use_cols=[9], upper_bound=10., lower_bound=0.1,
par_type="grid")
# add model run command
pf.mod_sys_cmds.append("mf6")
print(pf.mult_files)
print(pf.org_files)
# build pest
pst = pf.build_pst('freyberg.pst')
num_reals = 100
pe = pf.draw(num_reals, use_specsim=True)
pe.to_binary(os.path.join(template_ws, "prior.jcb"))
assert pe.shape[1] == pst.npar_adj, "{0} vs {1}".format(pe.shape[0], pst.npar_adj)
assert pe.shape[0] == num_reals
# test par mults are working
b_d = os.getcwd()
os.chdir(pf.new_d)
try:
pyemu.helpers.apply_list_and_array_pars(
arr_par_file="mult2model_info.csv")
except Exception as e:
os.chdir(b_d)
raise Exception(str(e))
os.chdir(b_d)
pst.control_data.noptmax = 0
pst.pestpp_options["additional_ins_delimiters"] = ","
pst.write(os.path.join(pf.new_d, "freyberg.pst"))
pyemu.os_utils.run("{0} freyberg.pst".format(ies_exe_path), cwd=pf.new_d)
res_file = os.path.join(pf.new_d, "freyberg.base.rei")
assert os.path.exists(res_file), res_file
pst.set_res(res_file)
print(pst.phi)
#assert pst.phi < 1.0e-5, pst.phi
# check mult files are in pst input files
csv = os.path.join(template_ws, "mult2model_info.csv")
df = pd.read_csv(csv, index_col=0)
mults_not_linked_to_pst = ((set(df.mlt_file.unique()) -
set(pst.input_files)) -
set(df.loc[df.pp_file.notna()].mlt_file))
assert len(mults_not_linked_to_pst) == 0, print(mults_not_linked_to_pst)
def mf6_freyberg_da_test():
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
try:
import flopy
except:
return
org_model_ws = os.path.join('..', 'examples', 'freyberg_mf6_da')
tmp_model_ws = "temp_pst_from"
if os.path.exists(tmp_model_ws):
shutil.rmtree(tmp_model_ws)
# to by pass the issues with flopy
shutil.copytree(org_model_ws,tmp_model_ws)
sim = flopy.mf6.MFSimulation.load(sim_ws=org_model_ws)
m = sim.get_model("freyberg6")
# SETUP pest stuff...
os_utils.run("{0} ".format("mf6"), cwd=tmp_model_ws)
template_ws = "new_temp_da"
# sr0 = m.sr
sr = pyemu.helpers.SpatialReference.from_namfile(
os.path.join(tmp_model_ws, "freyberg6.nam"),
delr=m.dis.delr.array, delc=m.dis.delc.array)
# set up PstFrom object
pf = PstFrom(original_d=tmp_model_ws, new_d=template_ws,
remove_existing=True,
longnames=True, spatial_reference=sr,
zero_based=False,start_datetime="1-1-2018")
# obs
# using tabular style model output
# (generated by pyemu.gw_utils.setup_hds_obs())
# pf.add_observations('freyberg.hds.dat', insfile='freyberg.hds.dat.ins2',
# index_cols='obsnme', use_cols='obsval', prefix='hds')
df = pd.read_csv(os.path.join(tmp_model_ws,"heads.csv"),index_col=0)
pf.add_observations("heads.csv",insfile="heads.csv.ins",index_cols="time",use_cols=list(df.columns.values),prefix="hds")
df = pd.read_csv(os.path.join(tmp_model_ws, "sfr.csv"), index_col=0)
pf.add_observations("sfr.csv", insfile="sfr.csv.ins", index_cols="time", use_cols=list(df.columns.values))
v = pyemu.geostats.ExpVario(contribution=1.0,a=1000)
gr_gs = pyemu.geostats.GeoStruct(variograms=v)
rch_temporal_gs = pyemu.geostats.GeoStruct(variograms=pyemu.geostats.ExpVario(contribution=1.0,a=60))
pf.extra_py_imports.append('flopy')
ib = m.dis.idomain[0].array
tags = {"npf_k_":[0.1,10.],"npf_k33_":[.1,10],"sto_ss":[.1,10],"sto_sy":[.9,1.1],"rch_recharge":[.5,1.5]}
dts = pd.to_datetime("1-1-2018") + pd.to_timedelta(np.cumsum(sim.tdis.perioddata.array["perlen"]),unit="d")
print(dts)
for tag,bnd in tags.items():
lb,ub = bnd[0],bnd[1]
arr_files = [f for f in os.listdir(tmp_model_ws) if tag in f and f.endswith(".txt")]
if "rch" in tag:
pf.add_parameters(filenames=arr_files, par_type="grid", par_name_base="rch_gr",
pargp="rch_gr", zone_array=ib, upper_bound=ub, lower_bound=lb,
geostruct=gr_gs)
for arr_file in arr_files:
kper = int(arr_file.split('.')[1].split('_')[-1]) - 1
pf.add_parameters(filenames=arr_file,par_type="constant",par_name_base=arr_file.split('.')[1]+"_cn",
pargp="rch_const",zone_array=ib,upper_bound=ub,lower_bound=lb,geostruct=rch_temporal_gs,
datetime=dts[kper])
else:
for arr_file in arr_files:
pf.add_parameters(filenames=arr_file,par_type="grid",par_name_base=arr_file.split('.')[1]+"_gr",
pargp=arr_file.split('.')[1]+"_gr",zone_array=ib,upper_bound=ub,lower_bound=lb,
geostruct=gr_gs)
pf.add_parameters(filenames=arr_file, par_type="pilotpoints", par_name_base=arr_file.split('.')[1]+"_pp",
pargp=arr_file.split('.')[1]+"_pp", zone_array=ib,upper_bound=ub,lower_bound=lb,)
list_files = [f for f in os.listdir(tmp_model_ws) if "wel_stress_period_data" in f]
for list_file in list_files:
kper = int(list_file.split(".")[1].split('_')[-1]) - 1
# add spatially constant, but temporally correlated wel flux pars
pf.add_parameters(filenames=list_file,par_type="constant",par_name_base="twel_mlt_{0}".format(kper),
pargp="twel_mlt".format(kper),index_cols=[0,1,2],use_cols=[3],
upper_bound=1.5,lower_bound=0.5, datetime=dts[kper], geostruct=rch_temporal_gs)
# add temporally indep, but spatially correlated wel flux pars
pf.add_parameters(filenames=list_file, par_type="grid", par_name_base="wel_grid_{0}".format(kper),
pargp="wel_{0}".format(kper), index_cols=[0, 1, 2], use_cols=[3],
upper_bound=1.5, lower_bound=0.5, geostruct=gr_gs)
pf.add_parameters(filenames="freyberg6.sfr_packagedata.txt",par_name_base="sfr_rhk",
pargp="sfr_rhk",index_cols={'k':1,'i':2,'j':3},use_cols=[9],upper_bound=10.,lower_bound=0.1,
par_type="grid")
# add model run command
pf.mod_sys_cmds.append("mf6")
print(pf.mult_files)
print(pf.org_files)
# build pest
pst = pf.build_pst('freyberg.pst')
pst.write(os.path.join(template_ws,"freyberg6_da.pst"),version=2)
# setup direct (non mult) pars on the IC files with par names that match the obs names
obs = pst.observation_data
hobs = obs.loc[obs.obsnme.str.startswith("hds"),:].copy()
hobs.loc[:,"k"] = hobs.obsnme.apply(lambda x: int(x.split(':')[1].split("_")[1]))
hobs.loc[:, "i"] = hobs.obsnme.apply(lambda x: int(x.split(':')[1].split("_")[2]))
hobs.loc[:, "j"] = hobs.obsnme.apply(lambda x: int(x.split(':')[1].split("_")[3]))
hobs_set = set(hobs.obsnme.to_list())
ic_files = [f for f in os.listdir(template_ws) if "ic_strt" in f and f.endswith(".txt")]
print(ic_files)
ib = m.dis.idomain[0].array
tpl_files = []
for ic_file in ic_files:
tpl_file = os.path.join(template_ws,ic_file+".tpl")
vals,names = [],[]
with open(tpl_file,'w') as f:
f.write("ptf ~\n")
k = int(ic_file.split('.')[1][-1]) - 1
org_arr = np.loadtxt(os.path.join(template_ws,ic_file))
for i in range(org_arr.shape[0]):
for j in range(org_arr.shape[1]):
if ib[i,j] < 1:
f.write(" -1.0e+30 ")
else:
pname = "hds_usecol:trgw_{0}_{1}_{2}_time:31.0".format(k,i,j)
if pname not in hobs_set and ib[i,j] > 0:
print(k,i,j,pname,ib[i,j])
f.write(" ~ {0} ~".format(pname))
vals.append(org_arr[i,j])
names.append(pname)
f.write("\n")
df = pf.pst.add_parameters(tpl_file,pst_path=".")
pf.pst.parameter_data.loc[df.parnme,"partrans"] = "fixed"
pf.pst.parameter_data.loc[names,"parval1"] = vals
pf.pst.write(os.path.join(template_ws,"freyberg6_da.pst"),version=2)
num_reals = 100
pe = pf.draw(num_reals, use_specsim=True)
pe.to_binary(os.path.join(template_ws, "prior.jcb"))
assert pe.shape[1] == pst.npar_adj, "{0} vs {1}".format(pe.shape[0], pst.npar_adj)
assert pe.shape[0] == num_reals
# test par mults are working
b_d = os.getcwd()
os.chdir(pf.new_d)
try:
pyemu.helpers.apply_list_and_array_pars(
arr_par_file="mult2model_info.csv")
except Exception as e:
os.chdir(b_d)
raise Exception(str(e))
os.chdir(b_d)
pst.control_data.noptmax = 0
pst.pestpp_options["additional_ins_delimiters"] = ","
pst.write(os.path.join(pf.new_d, "freyberg.pst"))
pyemu.os_utils.run("{0} freyberg.pst".format(ies_exe_path), cwd=pf.new_d)
res_file = os.path.join(pf.new_d, "freyberg.base.rei")
assert os.path.exists(res_file), res_file
pst.set_res(res_file)
print(pst.phi)
#assert pst.phi < 1.0e-5, pst.phi
# check mult files are in pst input files
csv = os.path.join(template_ws, "mult2model_info.csv")
df = pd.read_csv(csv, index_col=0)
mults_not_linked_to_pst = ((set(df.mlt_file.unique()) -
set(pst.input_files)) -
set(df.loc[df.pp_file.notna()].mlt_file))
assert len(mults_not_linked_to_pst) == 0, print(mults_not_linked_to_pst)
def mf6_freyberg_direct_test():
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
try:
import flopy
except:
return
org_model_ws = os.path.join('..', 'examples', 'freyberg_mf6')
tmp_model_ws = "temp_pst_from_direct"
if os.path.exists(tmp_model_ws):
shutil.rmtree(tmp_model_ws)
os.mkdir(tmp_model_ws)
sim = flopy.mf6.MFSimulation.load(sim_ws=org_model_ws)
# sim.set_all_data_external()
sim.simulation_data.mfpath.set_sim_path(tmp_model_ws)
# sim.set_all_data_external()
m = sim.get_model("freyberg6")
sim.set_all_data_external()
sim.write_simulation()
# to by pass the issues with flopy
# shutil.copytree(org_model_ws,tmp_model_ws)
# sim = flopy.mf6.MFSimulation.load(sim_ws=org_model_ws)
# m = sim.get_model("freyberg6")
# SETUP pest stuff...
os_utils.run("{0} ".format("mf6"), cwd=tmp_model_ws)
template_ws = "new_temp_direct"
sr = m.modelgrid
# set up PstFrom object
pf = PstFrom(original_d=tmp_model_ws, new_d=template_ws,
remove_existing=True,
longnames=True, spatial_reference=sr,
zero_based=False, start_datetime="1-1-2018")
# obs
# using tabular style model output
# (generated by pyemu.gw_utils.setup_hds_obs())
# pf.add_observations('freyberg.hds.dat', insfile='freyberg.hds.dat.ins2',
# index_cols='obsnme', use_cols='obsval', prefix='hds')
df = pd.read_csv(os.path.join(tmp_model_ws, "sfr.csv"), index_col=0)
pf.add_observations("sfr.csv", insfile="sfr.csv.ins", index_cols="time", use_cols=list(df.columns.values))
v = pyemu.geostats.ExpVario(contribution=1.0, a=1000)
gr_gs = pyemu.geostats.GeoStruct(variograms=v,transform="log")
rch_temporal_gs = pyemu.geostats.GeoStruct(variograms=pyemu.geostats.ExpVario(contribution=1.0, a=60))
pf.extra_py_imports.append('flopy')
ib = m.dis.idomain[0].array
tags = {"npf_k_": [0.1, 10.], "npf_k33_": [.1, 10], "sto_ss": [.1, 10], "sto_sy": [.9, 1.1],
"rch_recharge": [.5, 1.5]}
dts = pd.to_datetime("1-1-2018") + pd.to_timedelta(np.cumsum(sim.tdis.perioddata.array["perlen"]), unit="d")
print(dts)
for tag, bnd in tags.items():
lb, ub = bnd[0], bnd[1]
arr_files = [f for f in os.listdir(tmp_model_ws) if tag in f and f.endswith(".txt")]
if "rch" in tag:
for arr_file in arr_files:
pf.add_parameters(filenames=arr_file, par_type="grid", par_name_base="rch_gr",
pargp="rch_gr", zone_array=ib, upper_bound=1.0e-3, lower_bound=1.0e-7,
geostruct=gr_gs,par_style="direct")
for arr_file in arr_files:
kper = int(arr_file.split('.')[1].split('_')[-1]) - 1
pf.add_parameters(filenames=arr_file, par_type="constant",
par_name_base=arr_file.split('.')[1] + "_cn",
pargp="rch_const", zone_array=ib, upper_bound=ub, lower_bound=lb,
geostruct=rch_temporal_gs,
datetime=dts[kper])
else:
for arr_file in arr_files:
pf.add_parameters(filenames=arr_file, par_type="grid", par_name_base=arr_file.split('.')[1] + "_gr",
pargp=arr_file.split('.')[1] + "_gr", zone_array=ib, upper_bound=ub,
lower_bound=lb,
geostruct=gr_gs)
list_files = ["freyberg6.wel_stress_period_data_{0}.txt".format(t)
for t in range(1, m.nper + 1)]
list_files.sort()
for list_file in list_files:
kper = int(list_file.split(".")[1].split('_')[-1]) - 1
#add spatially constant, but temporally correlated wel flux pars
pf.add_parameters(filenames=list_file, par_type="constant", par_name_base="twel_mlt_{0}".format(kper),
pargp="twel_mlt".format(kper), index_cols=[0, 1, 2], use_cols=[3],
upper_bound=1.5, lower_bound=0.5, datetime=dts[kper], geostruct=rch_temporal_gs)
# add temporally indep, but spatially correlated wel flux pars
pf.add_parameters(filenames=list_file, par_type="grid", par_name_base="wel_grid_{0}".format(kper),
pargp="wel_{0}".format(kper), index_cols=[0, 1, 2], use_cols=[3],
upper_bound=0.0, lower_bound=-1000, geostruct=gr_gs,par_style="direct",
transform="none")
list_file = "freyberg6.ghb_stress_period_data_1.txt"
pf.add_parameters(filenames=list_file, par_type="grid", par_name_base=["ghb_stage","ghb_cond"],
pargp=["ghb_stage","ghb_cond"], index_cols=[0, 1, 2], use_cols=[3,4],
upper_bound=[35,150], lower_bound=[32,50], geostruct=gr_gs, par_style="direct",
transform="none")
# add model run command
pf.mod_sys_cmds.append("mf6")
print(pf.mult_files)
print(pf.org_files)
# build pest
pst = pf.build_pst('freyberg.pst')
pst.try_parse_name_metadata()
df = pd.read_csv(os.path.join(tmp_model_ws, "heads.csv"), index_col=0)
pf.add_observations("heads.csv", insfile="heads.csv.ins", index_cols="time", use_cols=list(df.columns.values),
prefix="hds", rebuild_pst=True)
# test par mults are working
b_d = os.getcwd()
os.chdir(pf.new_d)
try:
pyemu.helpers.apply_list_and_array_pars(
arr_par_file="mult2model_info.csv", chunk_len=1)
except Exception as e:
os.chdir(b_d)
raise Exception(str(e))
os.chdir(b_d)
num_reals = 100
pe = pf.draw(num_reals, use_specsim=True)
pe.to_binary(os.path.join(template_ws, "prior.jcb"))
assert pe.shape[1] == pst.npar_adj, "{0} vs {1}".format(pe.shape[0], pst.npar_adj)
assert pe.shape[0] == num_reals
pst.control_data.noptmax = 0
pst.pestpp_options["additional_ins_delimiters"] = ","
pst.write(os.path.join(pf.new_d, "freyberg.pst"))
pyemu.os_utils.run("{0} freyberg.pst".format(ies_exe_path), cwd=pf.new_d)
res_file = os.path.join(pf.new_d, "freyberg.base.rei")
assert os.path.exists(res_file), res_file
pst.set_res(res_file)
print(pst.phi)
assert pst.phi < 0.1, pst.phi
# turn direct recharge to min and direct wel to min and
# check that the model results are consistent
par = pst.parameter_data
rch_par = par.loc[par.parnme.apply(
lambda x: "rch_gr" in x and "direct" in x), "parnme"]
wel_par = par.loc[par.parnme.apply(
lambda x: "wel_grid" in x and "direct" in x), "parnme"]
par.loc[rch_par,"parval1"] = par.loc[rch_par, "parlbnd"]
# this should set wells to zero since they are negative values in the control file
par.loc[wel_par,"parval1"] = par.loc[wel_par, "parubnd"]
pst.write(os.path.join(pf.new_d, "freyberg.pst"))
pyemu.os_utils.run("{0} freyberg.pst".format(ies_exe_path), cwd=pf.new_d)
lst = flopy.utils.Mf6ListBudget(os.path.join(pf.new_d, "freyberg6.lst"))
flx, cum = lst.get_dataframes(diff=True)
wel_tot = flx.wel.apply(np.abs).sum()
print(flx.wel)
assert wel_tot < 1.0e-6, wel_tot
rch_files = [f for f in os.listdir(pf.new_d)
if ".rch_recharge" in f and f.endswith(".txt")]
rch_val = par.loc[rch_par,"parval1"][0]
i, j = par.loc[rch_par, ["i", 'j']].astype(int).values.T
for rch_file in rch_files:
arr = np.loadtxt(os.path.join(pf.new_d, rch_file))[i, j]
print(rch_file, rch_val, arr.mean(), arr.max(), arr.min())
if np.abs(arr.max() - rch_val) > 1.0e-6 or np.abs(arr.min() - rch_val) > 1.0e-6:
raise Exception("recharge too diff")
if __name__ == "__main__":
# freyberg_test()
# freyberg_prior_build_test()
mf6_freyberg_test()
# mf6_freyberg_shortnames_test()
# mf6_freyberg_da_test()
# mf6_freyberg_direct_test()
| 45.678358
| 129
| 0.60362
| 9,012
| 61,209
| 3.868953
| 0.067466
| 0.015832
| 0.022084
| 0.039235
| 0.835661
| 0.814811
| 0.800155
| 0.780595
| 0.750767
| 0.741704
| 0
| 0.026311
| 0.253639
| 61,209
| 1,339
| 130
| 45.712472
| 0.73691
| 0.126599
| 0
| 0.658659
| 0
| 0.001001
| 0.128668
| 0.027806
| 0
| 0
| 0
| 0.000747
| 0.031031
| 1
| 0.008008
| false
| 0
| 0.035035
| 0
| 0.05005
| 0.041041
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6b4bffe29a8cf45c7795e6468753c5ef35349b28
| 16,486
|
py
|
Python
|
xnas/search_space/DrNAS/DARTSspace/cnn.py
|
MAC-AutoML/XNAS
|
2c54ceb09b255cbcabd67f3c39fc777c4b2403f4
|
[
"MIT"
] | 9
|
2021-04-21T08:14:03.000Z
|
2021-11-26T11:52:40.000Z
|
xnas/search_space/DrNAS/DARTSspace/cnn.py
|
MAC-AutoML/XNAS
|
2c54ceb09b255cbcabd67f3c39fc777c4b2403f4
|
[
"MIT"
] | null | null | null |
xnas/search_space/DrNAS/DARTSspace/cnn.py
|
MAC-AutoML/XNAS
|
2c54ceb09b255cbcabd67f3c39fc777c4b2403f4
|
[
"MIT"
] | 6
|
2021-05-19T02:36:43.000Z
|
2021-12-03T07:21:37.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.distributions.dirichlet import Dirichlet
from torch.distributions.kl import kl_divergence
from xnas.search_space.DARTS.genos import PRIMITIVES, Genotype
from xnas.search_space.DrNAS.DARTSspace.ops import *
from xnas.search_space.DrNAS.utils import process_step_matrix, prune
class Cell(nn.Module):
def __init__(
self, steps, multiplier, C_prev_prev, C_prev, C, reduction, reduction_prev, k
):
super(Cell, self).__init__()
self.reduction = reduction
self.k = k
if reduction_prev:
self.preprocess0 = FactorizedReduce(C_prev_prev, C, affine=False)
else:
self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0, affine=False)
self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0, affine=False)
self._steps = steps
self._multiplier = multiplier
self._ops = nn.ModuleList()
self._bns = nn.ModuleList()
for i in range(self._steps):
for j in range(2 + i):
stride = 2 if reduction and j < 2 else 1
op = MixedOp(C, stride, self.k)
self._ops.append(op)
def forward(self, s0, s1, weights):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
offset = 0
for i in range(self._steps):
s = sum(
self._ops[offset + j](h, weights[offset + j])
for j, h in enumerate(states)
)
offset += len(states)
states.append(s)
return torch.cat(states[-self._multiplier :], dim=1)
def wider(self, k):
self.k = k
for op in self._ops:
op.wider(k)
class NetworkCIFAR(nn.Module):
def __init__(
self,
C,
num_classes,
layers,
criterion,
steps=4,
multiplier=4,
stem_multiplier=3,
k=4,
reg_type="l2",
reg_scale=1e-3,
):
super(NetworkCIFAR, self).__init__()
self._C = C
self._num_classes = num_classes
self._layers = layers
self._criterion = criterion
self._steps = steps
self._multiplier = multiplier
self.k = k
C_curr = stem_multiplier * C
self.stem = nn.Sequential(
nn.Conv2d(3, C_curr, 3, padding=1, bias=False), nn.BatchNorm2d(C_curr)
)
C_prev_prev, C_prev, C_curr = C_curr, C_curr, C
self.cells = nn.ModuleList()
reduction_prev = False
for i in range(layers):
if i in [layers // 3, 2 * layers // 3]:
C_curr *= 2
reduction = True
else:
reduction = False
cell = Cell(
steps,
multiplier,
C_prev_prev,
C_prev,
C_curr,
reduction,
reduction_prev,
k,
)
reduction_prev = reduction
self.cells += [cell]
C_prev_prev, C_prev = C_prev, multiplier * C_curr
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
self._initialize_alphas()
#### reg
self.reg_type = reg_type
self.reg_scale = reg_scale
self.anchor_normal = Dirichlet(torch.ones_like(self.alphas_normal).cuda())
self.anchor_reduce = Dirichlet(torch.ones_like(self.alphas_reduce).cuda())
def new(self):
model_new = NetworkCIFAR(
self._C, self._num_classes, self._layers, self._criterion
).cuda()
for x, y in zip(model_new.arch_parameters(), self.arch_parameters()):
x.data.copy_(y.data)
return model_new
def show_arch_parameters(self, logger):
with torch.no_grad():
logger.info(
"alphas normal :\n{:}".format(
process_step_matrix(
self.alphas_normal, "softmax", self.mask_normal
).cpu()
)
)
logger.info(
"alphas reduce :\n{:}".format(
process_step_matrix(
self.alphas_reduce, "softmax", self.mask_reduce
).cpu()
)
)
logger.info(
"concentration normal:\n{:}".format(
(F.elu(self.alphas_normal) + 1).cpu()
)
)
logger.info(
"concentration reduce:\n{:}".format(
(F.elu(self.alphas_reduce) + 1).cpu()
)
)
def pruning(self, num_keep):
with torch.no_grad():
self.mask_normal = prune(self.alphas_normal, num_keep, self.mask_normal)
self.mask_reduce = prune(self.alphas_reduce, num_keep, self.mask_reduce)
def wider(self, k):
self.k = k
for cell in self.cells:
cell.wider(k)
def forward(self, input):
s0 = s1 = self.stem(input)
weights_normal = process_step_matrix(
self.alphas_normal, "dirichlet", self.mask_normal
)
weights_reduce = process_step_matrix(
self.alphas_reduce, "dirichlet", self.mask_reduce
)
if not self.mask_normal is None:
assert (weights_normal[~self.mask_normal] == 0.0).all()
if not self.mask_reduce is None:
assert (weights_reduce[~self.mask_reduce] == 0.0).all()
for i, cell in enumerate(self.cells):
if cell.reduction:
weights = weights_reduce
else:
weights = weights_normal
s0, s1 = s1, cell(s0, s1, weights)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0), -1))
return logits
def _loss(self, input, target):
logits = self(input)
loss = self._criterion(logits, target)
if self.reg_type == "kl":
loss += self._get_kl_reg()
return loss
def _get_kl_reg(self):
cons_normal = F.elu(self.alphas_normal) + 1
cons_reduce = F.elu(self.alphas_reduce) + 1
q_normal = Dirichlet(cons_normal)
q_reduce = Dirichlet(cons_reduce)
p_normal = self.anchor_normal
p_reduce = self.anchor_reduce
kl_reg = self.reg_scale * (
torch.sum(kl_divergence(q_reduce, p_reduce))
+ torch.sum(kl_divergence(q_normal, p_normal))
)
return kl_reg
def _initialize_alphas(self):
k = sum(1 for i in range(self._steps) for n in range(2 + i))
num_ops = len(PRIMITIVES)
self.alphas_normal = Variable(
1e-3 * torch.randn(k, num_ops).cuda(), requires_grad=True
)
self.alphas_reduce = Variable(
1e-3 * torch.randn(k, num_ops).cuda(), requires_grad=True
)
self._arch_parameters = [
self.alphas_normal,
self.alphas_reduce,
]
self.mask_normal = None
self.mask_reduce = None
def arch_parameters(self):
return self._arch_parameters
def genotype(self):
def _parse(weights):
gene = []
n = 2
start = 0
for i in range(self._steps):
end = start + n
W = weights[start:end].copy()
# edges = sorted(range(i + 2), key=lambda x: -max(W[x][k] for k in range(len(W[x])) if k != PRIMITIVES.index('none')))[:2]
edges = sorted(
range(i + 2), key=lambda x: -max(W[x][k] for k in range(len(W[x])))
)[:2]
for j in edges:
k_best = None
for k in range(len(W[j])):
# if k != PRIMITIVES.index('none'):
if k_best is None or W[j][k] > W[j][k_best]:
k_best = k
gene.append((PRIMITIVES[k_best], j))
start = end
n += 1
return gene
# gene_normal = _parse(F.softmax(self.alphas_normal, dim=-1).data.cpu().numpy())
# gene_reduce = _parse(F.softmax(self.alphas_reduce, dim=-1).data.cpu().numpy())
gene_normal = _parse(
process_step_matrix(self.alphas_normal, "softmax", self.mask_normal)
.data.cpu()
.numpy()
)
gene_reduce = _parse(
process_step_matrix(self.alphas_reduce, "softmax", self.mask_reduce)
.data.cpu()
.numpy()
)
concat = range(2 + self._steps - self._multiplier, self._steps + 2)
genotype = Genotype(
normal=gene_normal,
normal_concat=concat,
reduce=gene_reduce,
reduce_concat=concat,
)
return genotype
class NetworkImageNet(nn.Module):
def __init__(
self,
C,
num_classes,
layers,
criterion,
steps=4,
multiplier=4,
stem_multiplier=3,
k=4,
):
super(NetworkImageNet, self).__init__()
self._C = C
self._num_classes = num_classes
self._layers = layers
self._criterion = criterion
self._steps = steps
self._multiplier = multiplier
self.k = k
C_curr = stem_multiplier * C
self.stem0 = nn.Sequential(
nn.Conv2d(3, C_curr // 2, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(C_curr // 2),
nn.ReLU(inplace=True),
nn.Conv2d(C_curr // 2, C_curr, 3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(C_curr),
)
self.stem1 = nn.Sequential(
nn.ReLU(inplace=True),
nn.Conv2d(C_curr, C_curr, 3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(C_curr),
)
C_prev_prev, C_prev, C_curr = C_curr, C_curr, C
self.cells = nn.ModuleList()
reduction_prev = True
for i in range(layers):
if i in [layers // 3, 2 * layers // 3]:
C_curr *= 2
reduction = True
else:
reduction = False
cell = Cell(
steps,
multiplier,
C_prev_prev,
C_prev,
C_curr,
reduction,
reduction_prev,
k,
)
reduction_prev = reduction
self.cells += [cell]
C_prev_prev, C_prev = C_prev, multiplier * C_curr
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
self._initialize_alphas()
def new(self):
model_new = NetworkImageNet(
self._C, self._num_classes, self._layers, self._criterion
).cuda()
for x, y in zip(model_new.arch_parameters(), self.arch_parameters()):
x.data.copy_(y.data)
return model_new
def show_arch_parameters(self, logger):
with torch.no_grad():
logger.info(
"alphas normal :\n{:}".format(
process_step_matrix(
self.alphas_normal, "softmax", self.mask_normal
).cpu()
)
)
logger.info(
"alphas reduce :\n{:}".format(
process_step_matrix(
self.alphas_reduce, "softmax", self.mask_reduce
).cpu()
)
)
logger.info(
"concentration normal:\n{:}".format(
(F.elu(self.alphas_normal) + 1).cpu()
)
)
logger.info(
"concentration reduce:\n{:}".format(
(F.elu(self.alphas_reduce) + 1).cpu()
)
)
def pruning(self, num_keep):
with torch.no_grad():
self.mask_normal = prune(self.alphas_normal, num_keep, self.mask_normal)
self.mask_reduce = prune(self.alphas_reduce, num_keep, self.mask_reduce)
def wider(self, k):
self.k = k
for cell in self.cells:
cell.wider(k)
def forward(self, input):
s0 = self.stem0(input)
s1 = self.stem1(s0)
weights_normal = process_step_matrix(
self.alphas_normal, "dirichlet", self.mask_normal
)
weights_reduce = process_step_matrix(
self.alphas_reduce, "dirichlet", self.mask_reduce
)
if not self.mask_normal is None:
assert (weights_normal[~self.mask_normal] == 0.0).all()
if not self.mask_reduce is None:
assert (weights_reduce[~self.mask_reduce] == 0.0).all()
for i, cell in enumerate(self.cells):
if cell.reduction:
weights = weights_reduce
else:
weights = weights_normal
s0, s1 = s1, cell(s0, s1, weights)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0), -1))
return logits
def _loss(self, input, target):
logits = self(input)
return self._criterion(logits, target)
def _initialize_alphas(self):
k = sum(1 for i in range(self._steps) for n in range(2 + i))
num_ops = len(PRIMITIVES)
self.alphas_normal = Variable(
1e-3 * torch.randn(k, num_ops).cuda(), requires_grad=True
)
self.alphas_reduce = Variable(
1e-3 * torch.randn(k, num_ops).cuda(), requires_grad=True
)
self._arch_parameters = [
self.alphas_normal,
self.alphas_reduce,
]
self.mask_normal = None
self.mask_reduce = None
def arch_parameters(self):
return self._arch_parameters
def genotype(self):
def _parse(weights):
gene = []
n = 2
start = 0
for i in range(self._steps):
end = start + n
W = weights[start:end].copy()
# edges = sorted(range(i + 2), key=lambda x: -max(W[x][k] for k in range(len(W[x])) if k != PRIMITIVES.index('none')))[:2]
edges = sorted(
range(i + 2), key=lambda x: -max(W[x][k] for k in range(len(W[x])))
)[:2]
for j in edges:
k_best = None
for k in range(len(W[j])):
# if k != PRIMITIVES.index('none'):
if k_best is None or W[j][k] > W[j][k_best]:
k_best = k
gene.append((PRIMITIVES[k_best], j))
start = end
n += 1
return gene
# gene_normal = _parse(F.softmax(self.alphas_normal, dim=-1).data.cpu().numpy())
# gene_reduce = _parse(F.softmax(self.alphas_reduce, dim=-1).data.cpu().numpy())
gene_normal = _parse(
process_step_matrix(self.alphas_normal, "softmax", self.mask_normal)
.data.cpu()
.numpy()
)
gene_reduce = _parse(
process_step_matrix(self.alphas_reduce, "softmax", self.mask_reduce)
.data.cpu()
.numpy()
)
concat = range(2 + self._steps - self._multiplier, self._steps + 2)
genotype = Genotype(
normal=gene_normal,
normal_concat=concat,
reduce=gene_reduce,
reduce_concat=concat,
)
return genotype
# build API
def _DrNASCNN_DARTSspace(criterion):
from xnas.core.config import cfg
if cfg.SEARCH.DATASET == 'cifar10':
return NetworkCIFAR(
C=cfg.SPACE.CHANNEL,
num_classes=cfg.SEARCH.NUM_CLASSES,
layers=cfg.SPACE.LAYERS,
criterion=criterion,
k=cfg.DRNAS.K,
reg_type=cfg.DRNAS.REG_TYPE,
reg_scale=cfg.DRNAS.REG_SCALE
)
elif cfg.SEARCH.DATASET == 'imagenet':
return NetworkImageNet(
C=cfg.SPACE.CHANNEL,
num_classes=cfg.SEARCH.NUM_CLASSES,
layers=cfg.SPACE.LAYERS,
criterion=criterion,
k=cfg.DRNAS.K
)
else:
print("dataset not support (cifar10 / imagenet)")
exit(1)
| 32.710317
| 138
| 0.524627
| 1,919
| 16,486
| 4.301199
| 0.09901
| 0.043615
| 0.034892
| 0.030531
| 0.799976
| 0.782409
| 0.769566
| 0.753332
| 0.734189
| 0.734189
| 0
| 0.014438
| 0.369829
| 16,486
| 503
| 139
| 32.775348
| 0.780056
| 0.03876
| 0
| 0.703872
| 0
| 0
| 0.021158
| 0
| 0
| 0
| 0
| 0
| 0.009112
| 1
| 0.061503
| false
| 0
| 0.022779
| 0.004556
| 0.127563
| 0.002278
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
863b896f49fe82c4076fc5275ae3e7433a8e8d29
| 88
|
py
|
Python
|
Pattern/3_pattern.py
|
manish1822510059/Python-1000-program
|
d03c1920fe63a7e32ac5bd9a13e2766d7a25756c
|
[
"Apache-2.0"
] | 1
|
2021-03-06T03:33:42.000Z
|
2021-03-06T03:33:42.000Z
|
Pattern/3_pattern.py
|
manish1822510059/Python-1000-programs
|
d03c1920fe63a7e32ac5bd9a13e2766d7a25756c
|
[
"Apache-2.0"
] | null | null | null |
Pattern/3_pattern.py
|
manish1822510059/Python-1000-programs
|
d03c1920fe63a7e32ac5bd9a13e2766d7a25756c
|
[
"Apache-2.0"
] | null | null | null |
for x in range(1,6):
for y in range(1,6):
print(y,end="")
print()
| 22
| 25
| 0.454545
| 16
| 88
| 2.5
| 0.5625
| 0.35
| 0.4
| 0.45
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 0.363636
| 88
| 4
| 26
| 22
| 0.642857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
868be53896eb429a3f08e06629aa1b9117b71fa1
| 36
|
py
|
Python
|
src/data/__init__.py
|
gokhankesler/python-ds-unit-testing
|
9443a90361adde41d0f5703aacdce670a36fec16
|
[
"MIT"
] | null | null | null |
src/data/__init__.py
|
gokhankesler/python-ds-unit-testing
|
9443a90361adde41d0f5703aacdce670a36fec16
|
[
"MIT"
] | 5
|
2021-07-12T16:34:28.000Z
|
2022-03-12T00:59:38.000Z
|
packages/impyrial/src/data/__init__.py
|
kwhjvdkamp/PythonTutotial
|
cbe52c83b0ff2b30f746977f698186dad055b1f4
|
[
"MIT"
] | null | null | null |
from .preprocessing_helpers import *
| 36
| 36
| 0.861111
| 4
| 36
| 7.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 36
| 1
| 36
| 36
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
869c0a95a29c40971051013dc75e4a01192b4db8
| 104,709
|
py
|
Python
|
lib/python27/Lib/site-packages/wx-2.8-msw-ansi/wx/tools/Editra/src/extern/aui/tabart.py
|
bo3b/iZ3D
|
ced8b3a4b0a152d0177f2e94008918efc76935d5
|
[
"MIT"
] | 27
|
2020-11-12T19:24:54.000Z
|
2022-03-27T23:10:45.000Z
|
lib/python27/Lib/site-packages/wx-2.8-msw-ansi/wx/tools/Editra/src/extern/aui/tabart.py
|
bo3b/iZ3D
|
ced8b3a4b0a152d0177f2e94008918efc76935d5
|
[
"MIT"
] | 2
|
2020-11-02T06:30:39.000Z
|
2022-02-23T18:39:55.000Z
|
lib/python27/Lib/site-packages/wx-2.8-msw-ansi/wx/tools/Editra/src/extern/aui/tabart.py
|
bo3b/iZ3D
|
ced8b3a4b0a152d0177f2e94008918efc76935d5
|
[
"MIT"
] | 3
|
2021-08-16T00:21:08.000Z
|
2022-02-23T19:19:36.000Z
|
"""
Tab art provider code - a tab provider provides all drawing functionality to
the L{AuiNotebook}. This allows the L{AuiNotebook} to have a plugable look-and-feel.
By default, a L{AuiNotebook} uses an instance of this class called L{AuiDefaultTabArt}
which provides bitmap art and a colour scheme that is adapted to the major platforms'
look. You can either derive from that class to alter its behaviour or write a
completely new tab art class. Call L{AuiNotebook.SetArtProvider} to make use this
new tab art.
"""
__author__ = "Andrea Gavana <andrea.gavana@gmail.com>"
__date__ = "31 March 2009"
import wx
if wx.Platform == '__WXMAC__':
import Carbon.Appearance
from aui_utilities import BitmapFromBits, StepColour, IndentPressedBitmap, ChopText
from aui_utilities import GetBaseColour, DrawMACCloseButton, LightColour, TakeScreenShot
from aui_utilities import CopyAttributes
from aui_constants import *
# -- GUI helper classes and functions --
class AuiCommandCapture(wx.PyEvtHandler):
""" A class to handle the dropdown window menu. """
def __init__(self):
""" Default class constructor. """
wx.PyEvtHandler.__init__(self)
self._last_id = 0
def GetCommandId(self):
""" Returns the event command identifier. """
return self._last_id
def ProcessEvent(self, event):
"""
Processes an event, searching event tables and calling zero or more suitable
event handler function(s).
:param `event`: the event to process.
:note: Normally, your application would not call this function: it is called
in the wxPython implementation to dispatch incoming user interface events
to the framework (and application).
However, you might need to call it if implementing new functionality (such as
a new control) where you define new event types, as opposed to allowing the
user to override functions.
An instance where you might actually override the L{ProcessEvent} function is where
you want to direct event processing to event handlers not normally noticed by
wxPython. For example, in the document/view architecture, documents and views
are potential event handlers. When an event reaches a frame, L{ProcessEvent} will
need to be called on the associated document and view in case event handler
functions are associated with these objects.
The normal order of event table searching is as follows:
1. If the object is disabled (via a call to `SetEvtHandlerEnabled`) the function
skips to step (6).
2. If the object is a `wx.Window`, L{ProcessEvent} is recursively called on the window's
`wx.Validator`. If this returns ``True``, the function exits.
3. wxWidgets `SearchEventTable` is called for this event handler. If this fails, the
base class table is tried, and so on until no more tables exist or an appropriate
function was found, in which case the function exits.
4. The search is applied down the entire chain of event handlers (usually the chain
has a length of one). If this succeeds, the function exits.
5. If the object is a `wx.Window` and the event is a `wx.CommandEvent`, L{ProcessEvent} is
recursively applied to the parent window's event handler. If this returns ``True``,
the function exits.
6. Finally, L{ProcessEvent} is called on the `wx.App` object.
"""
if event.GetEventType() == wx.wxEVT_COMMAND_MENU_SELECTED:
self._last_id = event.GetId()
return True
if self.GetNextHandler():
return self.GetNextHandler().ProcessEvent(event)
return False
class AuiDefaultTabArt(object):
"""
Tab art provider code - a tab provider provides all drawing functionality to
the L{AuiNotebook}. This allows the L{AuiNotebook} to have a plugable look-and-feel.
By default, a L{AuiNotebook} uses an instance of this class called L{AuiDefaultTabArt}
which provides bitmap art and a colour scheme that is adapted to the major platforms'
look. You can either derive from that class to alter its behaviour or write a
completely new tab art class. Call L{AuiNotebook.SetArtProvider} to make use this
new tab art.
"""
def __init__(self):
""" Default class constructor. """
self._normal_font = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT)
self._selected_font = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT)
self._selected_font.SetWeight(wx.BOLD)
self._measuring_font = self._selected_font
self._fixed_tab_width = 100
self._tab_ctrl_height = 0
self._buttonRect = wx.Rect()
base_colour = GetBaseColour()
self._base_colour = base_colour
border_colour = StepColour(base_colour, 75)
self._border_pen = wx.Pen(border_colour)
self._base_colour_pen = wx.Pen(self._base_colour)
self._base_colour_brush = wx.Brush(self._base_colour)
if wx.Platform == "__WXMAC__":
bmp_colour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DDKSHADOW)
self._active_close_bmp = DrawMACCloseButton(bmp_colour)
self._disabled_close_bmp = DrawMACCloseButton(wx.Colour(128, 128, 128))
else:
self._active_close_bmp = BitmapFromBits(nb_close_bits, 16, 16, wx.BLACK)
self._disabled_close_bmp = BitmapFromBits(nb_close_bits, 16, 16, wx.Colour(128, 128, 128))
self._hover_close_bmp = self._active_close_bmp
self._pressed_close_bmp = self._active_close_bmp
self._active_left_bmp = BitmapFromBits(nb_left_bits, 16, 16, wx.BLACK)
self._disabled_left_bmp = BitmapFromBits(nb_left_bits, 16, 16, wx.Colour(128, 128, 128))
self._active_right_bmp = BitmapFromBits(nb_right_bits, 16, 16, wx.BLACK)
self._disabled_right_bmp = BitmapFromBits(nb_right_bits, 16, 16, wx.Colour(128, 128, 128))
self._active_windowlist_bmp = BitmapFromBits(nb_list_bits, 16, 16, wx.BLACK)
self._disabled_windowlist_bmp = BitmapFromBits(nb_list_bits, 16, 16, wx.Colour(128, 128, 128))
if wx.Platform == "__WXMAC__":
# Get proper highlight colour for focus rectangle from the
# current Mac theme. kThemeBrushFocusHighlight is
# available on Mac OS 8.5 and higher
if hasattr(wx, 'MacThemeColour'):
c = wx.MacThemeColour(Carbon.Appearance.kThemeBrushFocusHighlight)
else:
brush = wx.Brush(wx.BLACK)
brush.MacSetTheme(Carbon.Appearance.kThemeBrushFocusHighlight)
c = brush.GetColour()
self._focusPen = wx.Pen(c, 2, wx.SOLID)
else:
self._focusPen = wx.Pen(wx.BLACK, 1, wx.USER_DASH)
self._focusPen.SetDashes([1, 1])
self._focusPen.SetCap(wx.CAP_BUTT)
def Clone(self):
""" Clones the art object. """
art = AuiDefaultTabArt()
art.SetNormalFont(self.GetNormalFont())
art.SetSelectedFont(self.GetSelectedFont())
art.SetMeasuringFont(self.GetMeasuringFont())
art = CopyAttributes(art, self)
return art
def SetAGWFlags(self, agwFlags):
"""
Sets the tab art flags.
:param `agwFlags`: a combination of the following values:
==================================== ==================================
Flag name Description
==================================== ==================================
``AUI_NB_TOP`` With this style, tabs are drawn along the top of the notebook
``AUI_NB_LEFT`` With this style, tabs are drawn along the left of the notebook. Not implemented yet.
``AUI_NB_RIGHT`` With this style, tabs are drawn along the right of the notebook. Not implemented yet.
``AUI_NB_BOTTOM`` With this style, tabs are drawn along the bottom of the notebook.
``AUI_NB_TAB_SPLIT`` Allows the tab control to be split by dragging a tab
``AUI_NB_TAB_MOVE`` Allows a tab to be moved horizontally by dragging
``AUI_NB_TAB_EXTERNAL_MOVE`` Allows a tab to be moved to another tab control
``AUI_NB_TAB_FIXED_WIDTH`` With this style, all tabs have the same width
``AUI_NB_SCROLL_BUTTONS`` With this style, left and right scroll buttons are displayed
``AUI_NB_WINDOWLIST_BUTTON`` With this style, a drop-down list of windows is available
``AUI_NB_CLOSE_BUTTON`` With this style, a close button is available on the tab bar
``AUI_NB_CLOSE_ON_ACTIVE_TAB`` With this style, a close button is available on the active tab
``AUI_NB_CLOSE_ON_ALL_TABS`` With this style, a close button is available on all tabs
``AUI_NB_MIDDLE_CLICK_CLOSE`` Allows to close AuiNotebook tabs by mouse middle button click
``AUI_NB_SUB_NOTEBOOK`` This style is used by AuiManager to create automatic AuiNotebooks
``AUI_NB_HIDE_ON_SINGLE_TAB`` Hides the tab window if only one tab is present
``AUI_NB_SMART_TABS`` Use Smart Tabbing, like ``Alt``+``Tab`` on Windows
``AUI_NB_USE_IMAGES_DROPDOWN`` Uses images on dropdown window list menu instead of check items
``AUI_NB_CLOSE_ON_TAB_LEFT`` Draws the tab close button on the left instead of on the right (a la Camino browser)
``AUI_NB_TAB_FLOAT`` Allows the floating of single tabs. Known limitation: when the notebook is more or less full screen, tabs cannot be dragged far enough outside of the notebook to become floating pages
``AUI_NB_DRAW_DND_TAB`` Draws an image representation of a tab while dragging (on by default)
==================================== ==================================
"""
self._agwFlags = agwFlags
def GetAGWFlags(self):
"""
Returns the tab art flags.
:see: L{SetAGWFlags} for a list of possible return values.
"""
return self._agwFlags
def SetSizingInfo(self, tab_ctrl_size, tab_count, minMaxTabWidth):
"""
Sets the tab sizing information.
:param `tab_ctrl_size`: the size of the tab control area;
:param `tab_count`: the number of tabs;
:param `minMaxTabWidth`: the minimum and maximum tab widths to be used
when the ``AUI_NB_TAB_FIXED_WIDTH`` style is active.
"""
self._fixed_tab_width = 100
minTabWidth, maxTabWidth = minMaxTabWidth
tot_width = tab_ctrl_size.x - self.GetIndentSize() - 4
agwFlags = self.GetAGWFlags()
if agwFlags & AUI_NB_CLOSE_BUTTON:
tot_width -= self._active_close_bmp.GetWidth()
if agwFlags & AUI_NB_WINDOWLIST_BUTTON:
tot_width -= self._active_windowlist_bmp.GetWidth()
if tab_count > 0:
self._fixed_tab_width = tot_width/tab_count
if self._fixed_tab_width < 100:
self._fixed_tab_width = 100
if self._fixed_tab_width > tot_width/2:
self._fixed_tab_width = tot_width/2
if self._fixed_tab_width > 220:
self._fixed_tab_width = 220
if minTabWidth > -1:
self._fixed_tab_width = max(self._fixed_tab_width, minTabWidth)
if maxTabWidth > -1:
self._fixed_tab_width = min(self._fixed_tab_width, maxTabWidth)
self._tab_ctrl_height = tab_ctrl_size.y
def DrawBackground(self, dc, wnd, rect):
"""
Draws the tab area background.
:param `dc`: a `wx.DC` device context;
:param `wnd`: a `wx.Window` instance object;
:param `rect`: the tab control rectangle.
"""
self._buttonRect = wx.Rect()
# draw background
agwFlags = self.GetAGWFlags()
if agwFlags & AUI_NB_BOTTOM:
r = wx.Rect(rect.x, rect.y, rect.width+2, rect.height)
# TODO: else if (agwFlags & AUI_NB_LEFT)
# TODO: else if (agwFlags & AUI_NB_RIGHT)
else: #for AUI_NB_TOP
r = wx.Rect(rect.x, rect.y, rect.width+2, rect.height-3)
top_colour = StepColour(self._base_colour, 90)
bottom_colour = StepColour(self._base_colour, 170)
dc.GradientFillLinear(r, top_colour, bottom_colour, wx.SOUTH)
# draw base lines
dc.SetPen(self._border_pen)
y = rect.GetHeight()
w = rect.GetWidth()
if agwFlags & AUI_NB_BOTTOM:
dc.SetBrush(wx.Brush(bottom_colour))
dc.DrawRectangle(-1, 0, w+2, 4)
# TODO: else if (agwFlags & AUI_NB_LEFT)
# TODO: else if (agwFlags & AUI_NB_RIGHT)
else: # for AUI_NB_TOP
dc.SetBrush(self._base_colour_brush)
dc.DrawRectangle(-1, y-4, w+2, 4)
def DrawTab(self, dc, wnd, page, in_rect, close_button_state, paint_control=False):
"""
Draws a single tab.
:param `dc`: a `wx.DC` device context;
:param `wnd`: a `wx.Window` instance object;
:param `page`: the tab control page associated with the tab;
:param `in_rect`: rectangle the tab should be confined to;
:param `close_button_state`: the state of the close button on the tab;
:param `paint_control`: whether to draw the control inside a tab (if any) on a `wx.MemoryDC`.
"""
# if the caption is empty, measure some temporary text
caption = page.caption
if not caption:
caption = "Xj"
dc.SetFont(self._selected_font)
selected_textx, selected_texty, dummy = dc.GetMultiLineTextExtent(caption)
dc.SetFont(self._normal_font)
normal_textx, normal_texty, dummy = dc.GetMultiLineTextExtent(caption)
control = page.control
# figure out the size of the tab
tab_size, x_extent = self.GetTabSize(dc, wnd, page.caption, page.bitmap,
page.active, close_button_state, control)
tab_height = self._tab_ctrl_height - 3
tab_width = tab_size[0]
tab_x = in_rect.x
tab_y = in_rect.y + in_rect.height - tab_height
caption = page.caption
# select pen, brush and font for the tab to be drawn
if page.active:
dc.SetFont(self._selected_font)
textx, texty = selected_textx, selected_texty
else:
dc.SetFont(self._normal_font)
textx, texty = normal_textx, normal_texty
if not page.enabled:
dc.SetTextForeground(wx.SystemSettings.GetColour(wx.SYS_COLOUR_GRAYTEXT))
pagebitmap = page.dis_bitmap
else:
dc.SetTextForeground(page.text_colour)
pagebitmap = page.bitmap
# create points that will make the tab outline
clip_width = tab_width
if tab_x + clip_width > in_rect.x + in_rect.width:
clip_width = in_rect.x + in_rect.width - tab_x
# since the above code above doesn't play well with WXDFB or WXCOCOA,
# we'll just use a rectangle for the clipping region for now --
dc.SetClippingRegion(tab_x, tab_y, clip_width+1, tab_height-3)
border_points = [wx.Point() for i in xrange(6)]
agwFlags = self.GetAGWFlags()
if agwFlags & AUI_NB_BOTTOM:
border_points[0] = wx.Point(tab_x, tab_y)
border_points[1] = wx.Point(tab_x, tab_y+tab_height-6)
border_points[2] = wx.Point(tab_x+2, tab_y+tab_height-4)
border_points[3] = wx.Point(tab_x+tab_width-2, tab_y+tab_height-4)
border_points[4] = wx.Point(tab_x+tab_width, tab_y+tab_height-6)
border_points[5] = wx.Point(tab_x+tab_width, tab_y)
else: #if (agwFlags & AUI_NB_TOP)
border_points[0] = wx.Point(tab_x, tab_y+tab_height-4)
border_points[1] = wx.Point(tab_x, tab_y+2)
border_points[2] = wx.Point(tab_x+2, tab_y)
border_points[3] = wx.Point(tab_x+tab_width-2, tab_y)
border_points[4] = wx.Point(tab_x+tab_width, tab_y+2)
border_points[5] = wx.Point(tab_x+tab_width, tab_y+tab_height-4)
# TODO: else if (agwFlags & AUI_NB_LEFT)
# TODO: else if (agwFlags & AUI_NB_RIGHT)
drawn_tab_yoff = border_points[1].y
drawn_tab_height = border_points[0].y - border_points[1].y
if page.active:
# draw active tab
# draw base background colour
r = wx.Rect(tab_x, tab_y, tab_width, tab_height)
dc.SetPen(self._base_colour_pen)
dc.SetBrush(self._base_colour_brush)
dc.DrawRectangle(r.x+1, r.y+1, r.width-1, r.height-4)
# this white helps fill out the gradient at the top of the tab
dc.SetPen(wx.WHITE_PEN)
dc.SetBrush(wx.WHITE_BRUSH)
dc.DrawRectangle(r.x+2, r.y+1, r.width-3, r.height-4)
# these two points help the rounded corners appear more antialiased
dc.SetPen(self._base_colour_pen)
dc.DrawPoint(r.x+2, r.y+1)
dc.DrawPoint(r.x+r.width-2, r.y+1)
# set rectangle down a bit for gradient drawing
r.SetHeight(r.GetHeight()/2)
r.x += 2
r.width -= 2
r.y += r.height
r.y -= 2
# draw gradient background
top_colour = wx.WHITE
bottom_colour = self._base_colour
dc.GradientFillLinear(r, bottom_colour, top_colour, wx.NORTH)
else:
# draw inactive tab
r = wx.Rect(tab_x, tab_y+1, tab_width, tab_height-3)
# start the gradent up a bit and leave the inside border inset
# by a pixel for a 3D look. Only the top half of the inactive
# tab will have a slight gradient
r.x += 3
r.y += 1
r.width -= 4
r.height /= 2
r.height -= 1
# -- draw top gradient fill for glossy look
top_colour = self._base_colour
bottom_colour = StepColour(top_colour, 160)
dc.GradientFillLinear(r, bottom_colour, top_colour, wx.NORTH)
r.y += r.height
r.y -= 1
# -- draw bottom fill for glossy look
top_colour = self._base_colour
bottom_colour = self._base_colour
dc.GradientFillLinear(r, top_colour, bottom_colour, wx.SOUTH)
# draw tab outline
dc.SetPen(self._border_pen)
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.DrawPolygon(border_points)
# there are two horizontal grey lines at the bottom of the tab control,
# this gets rid of the top one of those lines in the tab control
if page.active:
if agwFlags & AUI_NB_BOTTOM:
dc.SetPen(wx.Pen(StepColour(self._base_colour, 170)))
# TODO: else if (agwFlags & AUI_NB_LEFT)
# TODO: else if (agwFlags & AUI_NB_RIGHT)
else: # for AUI_NB_TOP
dc.SetPen(self._base_colour_pen)
dc.DrawLine(border_points[0].x+1,
border_points[0].y,
border_points[5].x,
border_points[5].y)
text_offset = tab_x + 8
close_button_width = 0
if close_button_state != AUI_BUTTON_STATE_HIDDEN:
close_button_width = self._active_close_bmp.GetWidth()
if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT:
text_offset += close_button_width - 5
bitmap_offset = 0
if pagebitmap.IsOk():
bitmap_offset = tab_x + 8
if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT and close_button_width:
bitmap_offset += close_button_width - 5
# draw bitmap
dc.DrawBitmap(pagebitmap,
bitmap_offset,
drawn_tab_yoff + (drawn_tab_height/2) - (pagebitmap.GetHeight()/2),
True)
text_offset = bitmap_offset + pagebitmap.GetWidth()
text_offset += 3 # bitmap padding
else:
if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT == 0 or not close_button_width:
text_offset = tab_x + 8
draw_text = ChopText(dc, caption, tab_width - (text_offset-tab_x) - close_button_width)
ypos = drawn_tab_yoff + (drawn_tab_height)/2 - (texty/2) - 1
offset_focus = text_offset
if control is not None:
if control.GetPosition() != wx.Point(text_offset+1, ypos):
control.SetPosition(wx.Point(text_offset+1, ypos))
if not control.IsShown():
control.Show()
if paint_control:
bmp = TakeScreenShot(control.GetScreenRect())
dc.DrawBitmap(bmp, text_offset+1, ypos, True)
controlW, controlH = control.GetSize()
text_offset += controlW + 4
textx += controlW + 4
# draw tab text
rectx, recty, dummy = dc.GetMultiLineTextExtent(draw_text)
dc.DrawLabel(draw_text, wx.Rect(text_offset, ypos, rectx, recty))
# draw focus rectangle
self.DrawFocusRectangle(dc, page, wnd, draw_text, offset_focus, bitmap_offset, drawn_tab_yoff, drawn_tab_height, textx, texty)
out_button_rect = wx.Rect()
# draw close button if necessary
if close_button_state != AUI_BUTTON_STATE_HIDDEN:
bmp = self._disabled_close_bmp
if close_button_state == AUI_BUTTON_STATE_HOVER:
bmp = self._hover_close_bmp
elif close_button_state == AUI_BUTTON_STATE_PRESSED:
bmp = self._pressed_close_bmp
shift = (agwFlags & AUI_NB_BOTTOM and [1] or [0])[0]
if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT:
rect = wx.Rect(tab_x + 4, tab_y + (tab_height - bmp.GetHeight())/2 - shift,
close_button_width, tab_height)
else:
rect = wx.Rect(tab_x + tab_width - close_button_width - 1,
tab_y + (tab_height - bmp.GetHeight())/2 - shift,
close_button_width, tab_height)
rect = IndentPressedBitmap(rect, close_button_state)
dc.DrawBitmap(bmp, rect.x, rect.y, True)
out_button_rect = rect
out_tab_rect = wx.Rect(tab_x, tab_y, tab_width, tab_height)
dc.DestroyClippingRegion()
return out_tab_rect, out_button_rect, x_extent
def SetCustomButton(self, bitmap_id, button_state, bmp):
"""
Sets a custom bitmap for the close, left, right and window list
buttons.
:param `bitmap_id`: the button identifier;
:param `button_state`: the button state;
:param `bmp`: the custom bitmap to use for the button.
"""
if bitmap_id == AUI_BUTTON_CLOSE:
if button_state == AUI_BUTTON_STATE_NORMAL:
self._active_close_bmp = bmp
self._hover_close_bmp = self._active_close_bmp
self._pressed_close_bmp = self._active_close_bmp
self._disabled_close_bmp = self._active_close_bmp
elif button_state == AUI_BUTTON_STATE_HOVER:
self._hover_close_bmp = bmp
elif button_state == AUI_BUTTON_STATE_PRESSED:
self._pressed_close_bmp = bmp
else:
self._disabled_close_bmp = bmp
elif bitmap_id == AUI_BUTTON_LEFT:
if button_state & AUI_BUTTON_STATE_DISABLED:
self._disabled_left_bmp = bmp
else:
self._active_left_bmp = bmp
elif bitmap_id == AUI_BUTTON_RIGHT:
if button_state & AUI_BUTTON_STATE_DISABLED:
self._disabled_right_bmp = bmp
else:
self._active_right_bmp = bmp
elif bitmap_id == AUI_BUTTON_WINDOWLIST:
if button_state & AUI_BUTTON_STATE_DISABLED:
self._disabled_windowlist_bmp = bmp
else:
self._active_windowlist_bmp = bmp
def GetIndentSize(self):
""" Returns the tabs indent size. """
return 5
def GetTabSize(self, dc, wnd, caption, bitmap, active, close_button_state, control=None):
"""
Returns the tab size for the given caption, bitmap and button state.
:param `dc`: a `wx.DC` device context;
:param `wnd`: a `wx.Window` instance object;
:param `caption`: the tab text caption;
:param `bitmap`: the bitmap displayed on the tab;
:param `active`: whether the tab is selected or not;
:param `close_button_state`: the state of the close button on the tab;
:param `control`: a `wx.Window` instance inside a tab (or ``None``).
"""
dc.SetFont(self._measuring_font)
measured_textx, measured_texty, dummy = dc.GetMultiLineTextExtent(caption)
# add padding around the text
tab_width = measured_textx
tab_height = measured_texty
# if the close button is showing, add space for it
if close_button_state != AUI_BUTTON_STATE_HIDDEN:
tab_width += self._active_close_bmp.GetWidth() + 3
# if there's a bitmap, add space for it
if bitmap.IsOk():
tab_width += bitmap.GetWidth()
tab_width += 3 # right side bitmap padding
tab_height = max(tab_height, bitmap.GetHeight())
# add padding
tab_width += 16
tab_height += 10
agwFlags = self.GetAGWFlags()
if agwFlags & AUI_NB_TAB_FIXED_WIDTH:
tab_width = self._fixed_tab_width
if control is not None:
tab_width += control.GetSize().GetWidth() + 4
x_extent = tab_width
return (tab_width, tab_height), x_extent
def DrawButton(self, dc, wnd, in_rect, button, orientation):
"""
Draws a button on the tab or on the tab area, depending on the button identifier.
:param `dc`: a `wx.DC` device context;
:param `wnd`: a `wx.Window` instance object;
:param `in_rect`: rectangle the tab should be confined to;
:param `button`: an instance of the button class;
:param `orientation`: the tab orientation.
"""
bitmap_id, button_state = button.id, button.cur_state
if bitmap_id == AUI_BUTTON_CLOSE:
if button_state & AUI_BUTTON_STATE_DISABLED:
bmp = self._disabled_close_bmp
elif button_state & AUI_BUTTON_STATE_HOVER:
bmp = self._hover_close_bmp
elif button_state & AUI_BUTTON_STATE_PRESSED:
bmp = self._pressed_close_bmp
else:
bmp = self._active_close_bmp
elif bitmap_id == AUI_BUTTON_LEFT:
if button_state & AUI_BUTTON_STATE_DISABLED:
bmp = self._disabled_left_bmp
else:
bmp = self._active_left_bmp
elif bitmap_id == AUI_BUTTON_RIGHT:
if button_state & AUI_BUTTON_STATE_DISABLED:
bmp = self._disabled_right_bmp
else:
bmp = self._active_right_bmp
elif bitmap_id == AUI_BUTTON_WINDOWLIST:
if button_state & AUI_BUTTON_STATE_DISABLED:
bmp = self._disabled_windowlist_bmp
else:
bmp = self._active_windowlist_bmp
else:
if button_state & AUI_BUTTON_STATE_DISABLED:
bmp = button.dis_bitmap
else:
bmp = button.bitmap
if not bmp.IsOk():
return
rect = wx.Rect(*in_rect)
if orientation == wx.LEFT:
rect.SetX(in_rect.x)
rect.SetY(((in_rect.y + in_rect.height)/2) - (bmp.GetHeight()/2))
rect.SetWidth(bmp.GetWidth())
rect.SetHeight(bmp.GetHeight())
else:
rect = wx.Rect(in_rect.x + in_rect.width - bmp.GetWidth(),
((in_rect.y + in_rect.height)/2) - (bmp.GetHeight()/2),
bmp.GetWidth(), bmp.GetHeight())
rect = IndentPressedBitmap(rect, button_state)
dc.DrawBitmap(bmp, rect.x, rect.y, True)
out_rect = rect
if bitmap_id == AUI_BUTTON_RIGHT:
self._buttonRect = wx.Rect(rect.x, rect.y, 30, rect.height)
return out_rect
def DrawFocusRectangle(self, dc, page, wnd, draw_text, text_offset, bitmap_offset, drawn_tab_yoff, drawn_tab_height, textx, texty):
"""
Draws the focus rectangle on a tab.
:param `dc`: a `wx.DC` device context;
:param `page`: the page associated with the tab;
:param `wnd`: a `wx.Window` instance object;
:param `draw_text`: the text that has been drawn on the tab;
:param `text_offset`: the text offset on the tab;
:param `bitmap_offset`: the bitmap offset on the tab;
:param `drawn_tab_yoff`: the y offset of the tab text;
:param `drawn_tab_height`: the height of the tab;
:param `textx`: the x text extent;
:param `texty`: the y text extent.
"""
if page.active and wx.Window.FindFocus() == wnd:
focusRectText = wx.Rect(text_offset, (drawn_tab_yoff + (drawn_tab_height)/2 - (texty/2)),
textx, texty)
if page.bitmap.IsOk():
focusRectBitmap = wx.Rect(bitmap_offset, drawn_tab_yoff + (drawn_tab_height/2) - (page.bitmap.GetHeight()/2),
page.bitmap.GetWidth(), page.bitmap.GetHeight())
if page.bitmap.IsOk() and draw_text == "":
focusRect = wx.Rect(*focusRectBitmap)
elif not page.bitmap.IsOk() and draw_text != "":
focusRect = wx.Rect(*focusRectText)
elif page.bitmap.IsOk() and draw_text != "":
focusRect = focusRectText.Union(focusRectBitmap)
focusRect.Inflate(2, 2)
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.SetPen(self._focusPen)
dc.DrawRoundedRectangleRect(focusRect, 2)
def GetBestTabCtrlSize(self, wnd, pages, required_bmp_size):
"""
Returns the best tab control size.
:param `wnd`: a `wx.Window` instance object;
:param `pages`: the pages associated with the tabs;
:param `required_bmp_size`: the size of the bitmap on the tabs.
"""
dc = wx.ClientDC(wnd)
dc.SetFont(self._measuring_font)
# sometimes a standard bitmap size needs to be enforced, especially
# if some tabs have bitmaps and others don't. This is important because
# it prevents the tab control from resizing when tabs are added.
measure_bmp = wx.NullBitmap
if required_bmp_size.IsFullySpecified():
measure_bmp = wx.EmptyBitmap(required_bmp_size.x,
required_bmp_size.y)
max_y = 0
for page in pages:
if measure_bmp.IsOk():
bmp = measure_bmp
else:
bmp = page.bitmap
# we don't use the caption text because we don't
# want tab heights to be different in the case
# of a very short piece of text on one tab and a very
# tall piece of text on another tab
s, x_ext = self.GetTabSize(dc, wnd, page.caption, bmp, True, AUI_BUTTON_STATE_HIDDEN, None)
max_y = max(max_y, s[1])
if page.control:
controlW, controlH = page.control.GetSize()
max_y = max(max_y, controlH+4)
return max_y + 2
def SetNormalFont(self, font):
"""
Sets the normal font for drawing tab labels.
:param `font`: a `wx.Font` object.
"""
self._normal_font = font
def SetSelectedFont(self, font):
"""
Sets the selected tab font for drawing tab labels.
:param `font`: a `wx.Font` object.
"""
self._selected_font = font
def SetMeasuringFont(self, font):
"""
Sets the font for calculating text measurements.
:param `font`: a `wx.Font` object.
"""
self._measuring_font = font
def GetNormalFont(self):
""" Returns the normal font for drawing tab labels. """
return self._normal_font
def GetSelectedFont(self):
""" Returns the selected tab font for drawing tab labels. """
return self._selected_font
def GetMeasuringFont(self):
""" Returns the font for calculating text measurements. """
return self._measuring_font
def ShowDropDown(self, wnd, pages, active_idx):
"""
Shows the drop-down window menu on the tab area.
:param `wnd`: a `wx.Window` derived window instance;
:param `pages`: the pages associated with the tabs;
:param `active_idx`: the active tab index.
"""
useImages = self.GetAGWFlags() & AUI_NB_USE_IMAGES_DROPDOWN
menuPopup = wx.Menu()
longest = 0
for i, page in enumerate(pages):
caption = page.caption
# if there is no caption, make it a space. This will prevent
# an assert in the menu code.
if caption == "":
caption = " "
# Save longest caption width for calculating menu width with
width = wnd.GetTextExtent(caption)[0]
if width > longest:
longest = width
if useImages:
menuItem = wx.MenuItem(menuPopup, 1000+i, caption)
if page.bitmap:
menuItem.SetBitmap(page.bitmap)
menuPopup.AppendItem(menuItem)
else:
menuPopup.AppendCheckItem(1000+i, caption)
menuPopup.Enable(1000+i, page.enabled)
if active_idx != -1 and not useImages:
menuPopup.Check(1000+active_idx, True)
# find out the screen coordinate at the bottom of the tab ctrl
cli_rect = wnd.GetClientRect()
# Calculate the approximate size of the popupmenu for setting the
# position of the menu when its shown.
# Account for extra padding on left/right of text on mac menus
if wx.Platform in ['__WXMAC__', '__WXMSW__']:
longest += 32
# Bitmap/Checkmark width + padding
longest += 20
if self.GetAGWFlags() & AUI_NB_CLOSE_BUTTON:
longest += 16
pt = wx.Point(cli_rect.x + cli_rect.GetWidth() - longest,
cli_rect.y + cli_rect.height)
cc = AuiCommandCapture()
wnd.PushEventHandler(cc)
wnd.PopupMenu(menuPopup, pt)
command = cc.GetCommandId()
wnd.PopEventHandler(True)
if command >= 1000:
return command - 1000
return -1
class AuiSimpleTabArt(object):
""" A simple-looking implementation of a tab art. """
def __init__(self):
""" Default class constructor. """
self._normal_font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
self._selected_font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
self._selected_font.SetWeight(wx.BOLD)
self._measuring_font = self._selected_font
self._agwFlags = 0
self._fixed_tab_width = 100
base_colour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DFACE)
background_colour = base_colour
normaltab_colour = base_colour
selectedtab_colour = wx.WHITE
self._bkbrush = wx.Brush(background_colour)
self._normal_bkbrush = wx.Brush(normaltab_colour)
self._normal_bkpen = wx.Pen(normaltab_colour)
self._selected_bkbrush = wx.Brush(selectedtab_colour)
self._selected_bkpen = wx.Pen(selectedtab_colour)
self._active_close_bmp = BitmapFromBits(nb_close_bits, 16, 16, wx.BLACK)
self._disabled_close_bmp = BitmapFromBits(nb_close_bits, 16, 16, wx.Colour(128, 128, 128))
self._active_left_bmp = BitmapFromBits(nb_left_bits, 16, 16, wx.BLACK)
self._disabled_left_bmp = BitmapFromBits(nb_left_bits, 16, 16, wx.Colour(128, 128, 128))
self._active_right_bmp = BitmapFromBits(nb_right_bits, 16, 16, wx.BLACK)
self._disabled_right_bmp = BitmapFromBits(nb_right_bits, 16, 16, wx.Colour(128, 128, 128))
self._active_windowlist_bmp = BitmapFromBits(nb_list_bits, 16, 16, wx.BLACK)
self._disabled_windowlist_bmp = BitmapFromBits(nb_list_bits, 16, 16, wx.Colour(128, 128, 128))
def Clone(self):
""" Clones the art object. """
art = AuiSimpleTabArt()
art.SetNormalFont(self.GetNormalFont())
art.SetSelectedFont(self.GetSelectedFont())
art.SetMeasuringFont(self.GetMeasuringFont())
art = CopyAttributes(art, self)
return art
def SetAGWFlags(self, agwFlags):
"""
Sets the tab art flags.
:param `agwFlags`: a combination of the following values:
==================================== ==================================
Flag name Description
==================================== ==================================
``AUI_NB_TOP`` With this style, tabs are drawn along the top of the notebook
``AUI_NB_LEFT`` With this style, tabs are drawn along the left of the notebook. Not implemented yet.
``AUI_NB_RIGHT`` With this style, tabs are drawn along the right of the notebook. Not implemented yet.
``AUI_NB_BOTTOM`` With this style, tabs are drawn along the bottom of the notebook.
``AUI_NB_TAB_SPLIT`` Allows the tab control to be split by dragging a tab
``AUI_NB_TAB_MOVE`` Allows a tab to be moved horizontally by dragging
``AUI_NB_TAB_EXTERNAL_MOVE`` Allows a tab to be moved to another tab control
``AUI_NB_TAB_FIXED_WIDTH`` With this style, all tabs have the same width
``AUI_NB_SCROLL_BUTTONS`` With this style, left and right scroll buttons are displayed
``AUI_NB_WINDOWLIST_BUTTON`` With this style, a drop-down list of windows is available
``AUI_NB_CLOSE_BUTTON`` With this style, a close button is available on the tab bar
``AUI_NB_CLOSE_ON_ACTIVE_TAB`` With this style, a close button is available on the active tab
``AUI_NB_CLOSE_ON_ALL_TABS`` With this style, a close button is available on all tabs
``AUI_NB_MIDDLE_CLICK_CLOSE`` Allows to close AuiNotebook tabs by mouse middle button click
``AUI_NB_SUB_NOTEBOOK`` This style is used by AuiManager to create automatic AuiNotebooks
``AUI_NB_HIDE_ON_SINGLE_TAB`` Hides the tab window if only one tab is present
``AUI_NB_SMART_TABS`` Use Smart Tabbing, like ``Alt``+``Tab`` on Windows
``AUI_NB_USE_IMAGES_DROPDOWN`` Uses images on dropdown window list menu instead of check items
``AUI_NB_CLOSE_ON_TAB_LEFT`` Draws the tab close button on the left instead of on the right (a la Camino browser)
``AUI_NB_TAB_FLOAT`` Allows the floating of single tabs. Known limitation: when the notebook is more or less full screen, tabs cannot be dragged far enough outside of the notebook to become floating pages
``AUI_NB_DRAW_DND_TAB`` Draws an image representation of a tab while dragging (on by default)
==================================== ==================================
"""
self._agwFlags = agwFlags
def GetAGWFlags(self):
"""
Returns the tab art flags.
:see: L{SetAGWFlags} for a list of possible return values.
"""
return self._agwFlags
def SetSizingInfo(self, tab_ctrl_size, tab_count, minMaxTabWidth):
"""
Sets the tab sizing information.
:param `tab_ctrl_size`: the size of the tab control area;
:param `tab_count`: the number of tabs;
:param `minMaxTabWidth`: the minimum and maximum tab widths to be used
when the ``AUI_NB_TAB_FIXED_WIDTH`` style is active.
"""
self._fixed_tab_width = 100
minTabWidth, maxTabWidth = minMaxTabWidth
tot_width = tab_ctrl_size.x - self.GetIndentSize() - 4
if self._agwFlags & AUI_NB_CLOSE_BUTTON:
tot_width -= self._active_close_bmp.GetWidth()
if self._agwFlags & AUI_NB_WINDOWLIST_BUTTON:
tot_width -= self._active_windowlist_bmp.GetWidth()
if tab_count > 0:
self._fixed_tab_width = tot_width/tab_count
if self._fixed_tab_width < 100:
self._fixed_tab_width = 100
if self._fixed_tab_width > tot_width/2:
self._fixed_tab_width = tot_width/2
if self._fixed_tab_width > 220:
self._fixed_tab_width = 220
if minTabWidth > -1:
self._fixed_tab_width = max(self._fixed_tab_width, minTabWidth)
if maxTabWidth > -1:
self._fixed_tab_width = min(self._fixed_tab_width, maxTabWidth)
self._tab_ctrl_height = tab_ctrl_size.y
def DrawBackground(self, dc, wnd, rect):
"""
Draws the tab area background.
:param `dc`: a `wx.DC` device context;
:param `wnd`: a `wx.Window` instance object;
:param `rect`: the tab control rectangle.
"""
# draw background
dc.SetBrush(self._bkbrush)
dc.SetPen(wx.TRANSPARENT_PEN)
dc.DrawRectangle(-1, -1, rect.GetWidth()+2, rect.GetHeight()+2)
# draw base line
dc.SetPen(wx.GREY_PEN)
dc.DrawLine(0, rect.GetHeight()-1, rect.GetWidth(), rect.GetHeight()-1)
def DrawTab(self, dc, wnd, page, in_rect, close_button_state, paint_control=False):
"""
Draws a single tab.
:param `dc`: a `wx.DC` device context;
:param `wnd`: a `wx.Window` instance object;
:param `page`: the tab control page associated with the tab;
:param `in_rect`: rectangle the tab should be confined to;
:param `close_button_state`: the state of the close button on the tab;
:param `paint_control`: whether to draw the control inside a tab (if any) on a `wx.MemoryDC`.
"""
# if the caption is empty, measure some temporary text
caption = page.caption
if caption == "":
caption = "Xj"
agwFlags = self.GetAGWFlags()
dc.SetFont(self._selected_font)
selected_textx, selected_texty, dummy = dc.GetMultiLineTextExtent(caption)
dc.SetFont(self._normal_font)
normal_textx, normal_texty, dummy = dc.GetMultiLineTextExtent(caption)
control = page.control
# figure out the size of the tab
tab_size, x_extent = self.GetTabSize(dc, wnd, page.caption, page.bitmap,
page.active, close_button_state, control)
tab_height = tab_size[1]
tab_width = tab_size[0]
tab_x = in_rect.x
tab_y = in_rect.y + in_rect.height - tab_height
caption = page.caption
# select pen, brush and font for the tab to be drawn
if page.active:
dc.SetPen(self._selected_bkpen)
dc.SetBrush(self._selected_bkbrush)
dc.SetFont(self._selected_font)
textx = selected_textx
texty = selected_texty
else:
dc.SetPen(self._normal_bkpen)
dc.SetBrush(self._normal_bkbrush)
dc.SetFont(self._normal_font)
textx = normal_textx
texty = normal_texty
if not page.enabled:
dc.SetTextForeground(wx.SystemSettings.GetColour(wx.SYS_COLOUR_GRAYTEXT))
else:
dc.SetTextForeground(page.text_colour)
# -- draw line --
points = [wx.Point() for i in xrange(7)]
points[0].x = tab_x
points[0].y = tab_y + tab_height - 1
points[1].x = tab_x + tab_height - 3
points[1].y = tab_y + 2
points[2].x = tab_x + tab_height + 3
points[2].y = tab_y
points[3].x = tab_x + tab_width - 2
points[3].y = tab_y
points[4].x = tab_x + tab_width
points[4].y = tab_y + 2
points[5].x = tab_x + tab_width
points[5].y = tab_y + tab_height - 1
points[6] = points[0]
dc.SetClippingRect(in_rect)
dc.DrawPolygon(points)
dc.SetPen(wx.GREY_PEN)
dc.DrawLines(points)
close_button_width = 0
if close_button_state != AUI_BUTTON_STATE_HIDDEN:
close_button_width = self._active_close_bmp.GetWidth()
if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT:
if control:
text_offset = tab_x + (tab_height/2) + close_button_width - (textx/2) - 2
else:
text_offset = tab_x + (tab_height/2) + ((tab_width+close_button_width)/2) - (textx/2) - 2
else:
if control:
text_offset = tab_x + (tab_height/2) + close_button_width - (textx/2)
else:
text_offset = tab_x + (tab_height/2) + ((tab_width-close_button_width)/2) - (textx/2)
else:
text_offset = tab_x + (tab_height/3) + (tab_width/2) - (textx/2)
if control:
if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT:
text_offset = tab_x + (tab_height/3) - (textx/2) + close_button_width + 2
else:
text_offset = tab_x + (tab_height/3) - (textx/2)
# set minimum text offset
if text_offset < tab_x + tab_height:
text_offset = tab_x + tab_height
# chop text if necessary
if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT:
draw_text = ChopText(dc, caption, tab_width - (text_offset-tab_x))
else:
draw_text = ChopText(dc, caption,
tab_width - (text_offset-tab_x) - close_button_width)
ypos = (tab_y + tab_height)/2 - (texty/2) + 1
if control is not None:
if control.GetPosition() != wx.Point(text_offset+1, ypos):
control.SetPosition(wx.Point(text_offset+1, ypos))
if not control.IsShown():
control.Show()
if paint_control:
bmp = TakeScreenShot(control.GetScreenRect())
dc.DrawBitmap(bmp, text_offset+1, ypos, True)
controlW, controlH = control.GetSize()
text_offset += controlW + 4
# draw tab text
rectx, recty, dummy = dc.GetMultiLineTextExtent(draw_text)
dc.DrawLabel(draw_text, wx.Rect(text_offset, ypos, rectx, recty))
# draw focus rectangle
if page.active and wx.Window.FindFocus() == wnd:
focusRect = wx.Rect(text_offset, ((tab_y + tab_height)/2 - (texty/2) + 1),
selected_textx, selected_texty)
focusRect.Inflate(2, 2)
# TODO:
# This should be uncommented when DrawFocusRect will become
# available in wxPython
# wx.RendererNative.Get().DrawFocusRect(wnd, dc, focusRect, 0)
out_button_rect = wx.Rect()
# draw close button if necessary
if close_button_state != AUI_BUTTON_STATE_HIDDEN:
if page.active:
bmp = self._active_close_bmp
else:
bmp = self._disabled_close_bmp
if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT:
rect = wx.Rect(tab_x + tab_height - 2,
tab_y + (tab_height/2) - (bmp.GetHeight()/2) + 1,
close_button_width, tab_height - 1)
else:
rect = wx.Rect(tab_x + tab_width - close_button_width - 1,
tab_y + (tab_height/2) - (bmp.GetHeight()/2) + 1,
close_button_width, tab_height - 1)
self.DrawButtons(dc, rect, bmp, wx.WHITE, close_button_state)
out_button_rect = wx.Rect(*rect)
out_tab_rect = wx.Rect(tab_x, tab_y, tab_width, tab_height)
dc.DestroyClippingRegion()
return out_tab_rect, out_button_rect, x_extent
def DrawButtons(self, dc, _rect, bmp, bkcolour, button_state):
"""
Convenience method to draw tab buttons.
:param `dc`: a `wx.DC` device context;
:param `_rect`: the tab rectangle;
:param `bmp`: the tab bitmap;
:param `bkcolour`: the tab background colour;
:param `button_state`: the state of the tab button.
"""
rect = wx.Rect(*_rect)
if button_state == AUI_BUTTON_STATE_PRESSED:
rect.x += 1
rect.y += 1
if button_state in [AUI_BUTTON_STATE_HOVER, AUI_BUTTON_STATE_PRESSED]:
dc.SetBrush(wx.Brush(StepColour(bkcolour, 120)))
dc.SetPen(wx.Pen(StepColour(bkcolour, 75)))
# draw the background behind the button
dc.DrawRectangle(rect.x, rect.y, 15, 15)
# draw the button itself
dc.DrawBitmap(bmp, rect.x, rect.y, True)
def GetIndentSize(self):
""" Returns the tabs indent size. """
return 0
def GetTabSize(self, dc, wnd, caption, bitmap, active, close_button_state, control=None):
"""
Returns the tab size for the given caption, bitmap and button state.
:param `dc`: a `wx.DC` device context;
:param `wnd`: a `wx.Window` instance object;
:param `caption`: the tab text caption;
:param `bitmap`: the bitmap displayed on the tab;
:param `active`: whether the tab is selected or not;
:param `close_button_state`: the state of the close button on the tab;
:param `control`: a `wx.Window` instance inside a tab (or ``None``).
"""
dc.SetFont(self._measuring_font)
measured_textx, measured_texty, dummy = dc.GetMultiLineTextExtent(caption)
tab_height = measured_texty + 4
tab_width = measured_textx + tab_height + 5
if close_button_state != AUI_BUTTON_STATE_HIDDEN:
tab_width += self._active_close_bmp.GetWidth()
if self._agwFlags & AUI_NB_TAB_FIXED_WIDTH:
tab_width = self._fixed_tab_width
if control is not None:
controlW, controlH = control.GetSize()
tab_width += controlW + 4
x_extent = tab_width - (tab_height/2) - 1
return (tab_width, tab_height), x_extent
def DrawButton(self, dc, wnd, in_rect, button, orientation):
"""
Draws a button on the tab or on the tab area, depending on the button identifier.
:param `dc`: a `wx.DC` device context;
:param `wnd`: a `wx.Window` instance object;
:param `in_rect`: rectangle the tab should be confined to;
:param `button`: an instance of the button class;
:param `orientation`: the tab orientation.
"""
bitmap_id, button_state = button.id, button.cur_state
if bitmap_id == AUI_BUTTON_CLOSE:
if button_state & AUI_BUTTON_STATE_DISABLED:
bmp = self._disabled_close_bmp
else:
bmp = self._active_close_bmp
elif bitmap_id == AUI_BUTTON_LEFT:
if button_state & AUI_BUTTON_STATE_DISABLED:
bmp = self._disabled_left_bmp
else:
bmp = self._active_left_bmp
elif bitmap_id == AUI_BUTTON_RIGHT:
if button_state & AUI_BUTTON_STATE_DISABLED:
bmp = self._disabled_right_bmp
else:
bmp = self._active_right_bmp
elif bitmap_id == AUI_BUTTON_WINDOWLIST:
if button_state & AUI_BUTTON_STATE_DISABLED:
bmp = self._disabled_windowlist_bmp
else:
bmp = self._active_windowlist_bmp
else:
if button_state & AUI_BUTTON_STATE_DISABLED:
bmp = button.dis_bitmap
else:
bmp = button.bitmap
if not bmp.IsOk():
return
rect = wx.Rect(*in_rect)
if orientation == wx.LEFT:
rect.SetX(in_rect.x)
rect.SetY(((in_rect.y + in_rect.height)/2) - (bmp.GetHeight()/2))
rect.SetWidth(bmp.GetWidth())
rect.SetHeight(bmp.GetHeight())
else:
rect = wx.Rect(in_rect.x + in_rect.width - bmp.GetWidth(),
((in_rect.y + in_rect.height)/2) - (bmp.GetHeight()/2),
bmp.GetWidth(), bmp.GetHeight())
self.DrawButtons(dc, rect, bmp, wx.WHITE, button_state)
out_rect = wx.Rect(*rect)
return out_rect
def ShowDropDown(self, wnd, pages, active_idx):
"""
Shows the drop-down window menu on the tab area.
:param `wnd`: a `wx.Window` derived window instance;
:param `pages`: the pages associated with the tabs;
:param `active_idx`: the active tab index.
"""
menuPopup = wx.Menu()
useImages = self.GetAGWFlags() & AUI_NB_USE_IMAGES_DROPDOWN
for i, page in enumerate(pages):
if useImages:
menuItem = wx.MenuItem(menuPopup, 1000+i, page.caption)
if page.bitmap:
menuItem.SetBitmap(page.bitmap)
menuPopup.AppendItem(menuItem)
else:
menuPopup.AppendCheckItem(1000+i, page.caption)
menuPopup.Enable(1000+i, page.enabled)
if active_idx != -1 and not useImages:
menuPopup.Check(1000+active_idx, True)
# find out where to put the popup menu of window
# items. Subtract 100 for now to center the menu
# a bit, until a better mechanism can be implemented
pt = wx.GetMousePosition()
pt = wnd.ScreenToClient(pt)
if pt.x < 100:
pt.x = 0
else:
pt.x -= 100
# find out the screen coordinate at the bottom of the tab ctrl
cli_rect = wnd.GetClientRect()
pt.y = cli_rect.y + cli_rect.height
cc = AuiCommandCapture()
wnd.PushEventHandler(cc)
wnd.PopupMenu(menuPopup, pt)
command = cc.GetCommandId()
wnd.PopEventHandler(True)
if command >= 1000:
return command-1000
return -1
def GetBestTabCtrlSize(self, wnd, pages, required_bmp_size):
"""
Returns the best tab control size.
:param `wnd`: a `wx.Window` instance object;
:param `pages`: the pages associated with the tabs;
:param `required_bmp_size`: the size of the bitmap on the tabs.
"""
dc = wx.ClientDC(wnd)
dc.SetFont(self._measuring_font)
s, x_extent = self.GetTabSize(dc, wnd, "ABCDEFGHIj", wx.NullBitmap, True,
AUI_BUTTON_STATE_HIDDEN, None)
max_y = s[1]
for page in pages:
if page.control:
controlW, controlH = page.control.GetSize()
max_y = max(max_y, controlH+4)
textx, texty, dummy = dc.GetMultiLineTextExtent(page.caption)
max_y = max(max_y, texty)
return max_y + 3
def SetNormalFont(self, font):
"""
Sets the normal font for drawing tab labels.
:param `font`: a `wx.Font` object.
"""
self._normal_font = font
def SetSelectedFont(self, font):
"""
Sets the selected tab font for drawing tab labels.
:param `font`: a `wx.Font` object.
"""
self._selected_font = font
def SetMeasuringFont(self, font):
"""
Sets the font for calculating text measurements.
:param `font`: a `wx.Font` object.
"""
self._measuring_font = font
def GetNormalFont(self):
""" Returns the normal font for drawing tab labels. """
return self._normal_font
def GetSelectedFont(self):
""" Returns the selected tab font for drawing tab labels. """
return self._selected_font
def GetMeasuringFont(self):
""" Returns the font for calculating text measurements. """
return self._measuring_font
def SetCustomButton(self, bitmap_id, button_state, bmp):
"""
Sets a custom bitmap for the close, left, right and window list
buttons.
:param `bitmap_id`: the button identifier;
:param `button_state`: the button state;
:param `bmp`: the custom bitmap to use for the button.
"""
if bitmap_id == AUI_BUTTON_CLOSE:
if button_state == AUI_BUTTON_STATE_NORMAL:
self._active_close_bmp = bmp
self._hover_close_bmp = self._active_close_bmp
self._pressed_close_bmp = self._active_close_bmp
self._disabled_close_bmp = self._active_close_bmp
elif button_state == AUI_BUTTON_STATE_HOVER:
self._hover_close_bmp = bmp
elif button_state == AUI_BUTTON_STATE_PRESSED:
self._pressed_close_bmp = bmp
else:
self._disabled_close_bmp = bmp
elif bitmap_id == AUI_BUTTON_LEFT:
if button_state & AUI_BUTTON_STATE_DISABLED:
self._disabled_left_bmp = bmp
else:
self._active_left_bmp = bmp
elif bitmap_id == AUI_BUTTON_RIGHT:
if button_state & AUI_BUTTON_STATE_DISABLED:
self._disabled_right_bmp = bmp
else:
self._active_right_bmp = bmp
elif bitmap_id == AUI_BUTTON_WINDOWLIST:
if button_state & AUI_BUTTON_STATE_DISABLED:
self._disabled_windowlist_bmp = bmp
else:
self._active_windowlist_bmp = bmp
class VC71TabArt(AuiDefaultTabArt):
""" A class to draw tabs using the Visual Studio 2003 (VC71) style. """
def __init__(self):
""" Default class constructor. """
AuiDefaultTabArt.__init__(self)
def Clone(self):
""" Clones the art object. """
art = VC71TabArt()
art.SetNormalFont(self.GetNormalFont())
art.SetSelectedFont(self.GetSelectedFont())
art.SetMeasuringFont(self.GetMeasuringFont())
art = CopyAttributes(art, self)
return art
def DrawTab(self, dc, wnd, page, in_rect, close_button_state, paint_control=False):
"""
Draws a single tab.
:param `dc`: a `wx.DC` device context;
:param `wnd`: a `wx.Window` instance object;
:param `page`: the tab control page associated with the tab;
:param `in_rect`: rectangle the tab should be confined to;
:param `close_button_state`: the state of the close button on the tab;
:param `paint_control`: whether to draw the control inside a tab (if any) on a `wx.MemoryDC`.
"""
# Visual studio 7.1 style
# This code is based on the renderer included in FlatNotebook
# figure out the size of the tab
control = page.control
tab_size, x_extent = self.GetTabSize(dc, wnd, page.caption, page.bitmap, page.active,
close_button_state, control)
tab_height = self._tab_ctrl_height - 3
tab_width = tab_size[0]
tab_x = in_rect.x
tab_y = in_rect.y + in_rect.height - tab_height
clip_width = tab_width
if tab_x + clip_width > in_rect.x + in_rect.width - 4:
clip_width = (in_rect.x + in_rect.width) - tab_x - 4
dc.SetClippingRegion(tab_x, tab_y, clip_width + 1, tab_height - 3)
agwFlags = self.GetAGWFlags()
if agwFlags & AUI_NB_BOTTOM:
tab_y -= 1
dc.SetPen((page.active and [wx.Pen(wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DHIGHLIGHT))] or \
[wx.Pen(wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DSHADOW))])[0])
dc.SetBrush((page.active and [wx.Brush(wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DFACE))] or \
[wx.TRANSPARENT_BRUSH])[0])
if page.active:
tabH = tab_height - 2
dc.DrawRectangle(tab_x, tab_y, tab_width, tabH)
rightLineY1 = (agwFlags & AUI_NB_BOTTOM and [vertical_border_padding - 2] or \
[vertical_border_padding - 1])[0]
rightLineY2 = tabH + 3
dc.SetPen(wx.Pen(wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DSHADOW)))
dc.DrawLine(tab_x + tab_width - 1, rightLineY1 + 1, tab_x + tab_width - 1, rightLineY2)
if agwFlags & AUI_NB_BOTTOM:
dc.DrawLine(tab_x + 1, rightLineY2 - 3 , tab_x + tab_width - 1, rightLineY2 - 3)
dc.SetPen(wx.Pen(wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DDKSHADOW)))
dc.DrawLine(tab_x + tab_width, rightLineY1, tab_x + tab_width, rightLineY2)
if agwFlags & AUI_NB_BOTTOM:
dc.DrawLine(tab_x, rightLineY2 - 2, tab_x + tab_width, rightLineY2 - 2)
else:
# We dont draw a rectangle for non selected tabs, but only
# vertical line on the right
blackLineY1 = (agwFlags & AUI_NB_BOTTOM and [vertical_border_padding + 2] or \
[vertical_border_padding + 1])[0]
blackLineY2 = tab_height - 5
dc.DrawLine(tab_x + tab_width, blackLineY1, tab_x + tab_width, blackLineY2)
border_points = [0, 0]
if agwFlags & AUI_NB_BOTTOM:
border_points[0] = wx.Point(tab_x, tab_y)
border_points[1] = wx.Point(tab_x, tab_y + tab_height - 6)
else: # if (agwFlags & AUI_NB_TOP)
border_points[0] = wx.Point(tab_x, tab_y + tab_height - 4)
border_points[1] = wx.Point(tab_x, tab_y + 2)
drawn_tab_yoff = border_points[1].y
drawn_tab_height = border_points[0].y - border_points[1].y
text_offset = tab_x + 8
close_button_width = 0
if close_button_state != AUI_BUTTON_STATE_HIDDEN:
close_button_width = self._active_close_bmp.GetWidth()
if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT:
text_offset += close_button_width - 5
if not page.enabled:
dc.SetTextForeground(wx.SystemSettings.GetColour(wx.SYS_COLOUR_GRAYTEXT))
pagebitmap = page.dis_bitmap
else:
dc.SetTextForeground(page.text_colour)
pagebitmap = page.bitmap
shift = 0
if agwFlags & AUI_NB_BOTTOM:
shift = (page.active and [1] or [2])[0]
bitmap_offset = 0
if pagebitmap.IsOk():
bitmap_offset = tab_x + 8
if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT and close_button_width:
bitmap_offset += close_button_width - 5
# draw bitmap
dc.DrawBitmap(pagebitmap, bitmap_offset,
drawn_tab_yoff + (drawn_tab_height/2) - (pagebitmap.GetHeight()/2) + shift,
True)
text_offset = bitmap_offset + pagebitmap.GetWidth()
text_offset += 3 # bitmap padding
else:
if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT == 0 or not close_button_width:
text_offset = tab_x + 8
# if the caption is empty, measure some temporary text
caption = page.caption
if caption == "":
caption = "Xj"
if page.active:
dc.SetFont(self._selected_font)
textx, texty, dummy = dc.GetMultiLineTextExtent(caption)
else:
dc.SetFont(self._normal_font)
textx, texty, dummy = dc.GetMultiLineTextExtent(caption)
draw_text = ChopText(dc, caption, tab_width - (text_offset-tab_x) - close_button_width)
ypos = drawn_tab_yoff + (drawn_tab_height)/2 - (texty/2) - 1 + shift
offset_focus = text_offset
if control is not None:
if control.GetPosition() != wx.Point(text_offset+1, ypos):
control.SetPosition(wx.Point(text_offset+1, ypos))
if not control.IsShown():
control.Show()
if paint_control:
bmp = TakeScreenShot(control.GetScreenRect())
dc.DrawBitmap(bmp, text_offset+1, ypos, True)
controlW, controlH = control.GetSize()
text_offset += controlW + 4
textx += controlW + 4
# draw tab text
rectx, recty, dummy = dc.GetMultiLineTextExtent(draw_text)
dc.DrawLabel(draw_text, wx.Rect(text_offset, ypos, rectx, recty))
out_button_rect = wx.Rect()
# draw focus rectangle
self.DrawFocusRectangle(dc, page, wnd, draw_text, offset_focus, bitmap_offset, drawn_tab_yoff+shift,
drawn_tab_height+shift, textx, texty)
# draw 'x' on tab (if enabled)
if close_button_state != AUI_BUTTON_STATE_HIDDEN:
close_button_width = self._active_close_bmp.GetWidth()
bmp = self._disabled_close_bmp
if close_button_state == AUI_BUTTON_STATE_HOVER:
bmp = self._hover_close_bmp
elif close_button_state == AUI_BUTTON_STATE_PRESSED:
bmp = self._pressed_close_bmp
if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT:
rect = wx.Rect(tab_x + 4,
drawn_tab_yoff + (drawn_tab_height / 2) - (bmp.GetHeight() / 2) + shift,
close_button_width, tab_height)
else:
rect = wx.Rect(tab_x + tab_width - close_button_width - 3,
drawn_tab_yoff + (drawn_tab_height / 2) - (bmp.GetHeight() / 2) + shift,
close_button_width, tab_height)
# Indent the button if it is pressed down:
rect = IndentPressedBitmap(rect, close_button_state)
dc.DrawBitmap(bmp, rect.x, rect.y, True)
out_button_rect = rect
out_tab_rect = wx.Rect(tab_x, tab_y, tab_width, tab_height)
dc.DestroyClippingRegion()
return out_tab_rect, out_button_rect, x_extent
class FF2TabArt(AuiDefaultTabArt):
""" A class to draw tabs using the Firefox 2 (FF2) style. """
def __init__(self):
""" Default class constructor. """
AuiDefaultTabArt.__init__(self)
def Clone(self):
""" Clones the art object. """
art = FF2TabArt()
art.SetNormalFont(self.GetNormalFont())
art.SetSelectedFont(self.GetSelectedFont())
art.SetMeasuringFont(self.GetMeasuringFont())
art = CopyAttributes(art, self)
return art
def GetTabSize(self, dc, wnd, caption, bitmap, active, close_button_state, control):
"""
Returns the tab size for the given caption, bitmap and button state.
:param `dc`: a `wx.DC` device context;
:param `wnd`: a `wx.Window` instance object;
:param `caption`: the tab text caption;
:param `bitmap`: the bitmap displayed on the tab;
:param `active`: whether the tab is selected or not;
:param `close_button_state`: the state of the close button on the tab;
:param `control`: a `wx.Window` instance inside a tab (or ``None``).
"""
tab_size, x_extent = AuiDefaultTabArt.GetTabSize(self, dc, wnd, caption, bitmap,
active, close_button_state, control)
tab_width, tab_height = tab_size
# add some vertical padding
tab_height += 2
return (tab_width, tab_height), x_extent
def DrawTab(self, dc, wnd, page, in_rect, close_button_state, paint_control=False):
"""
Draws a single tab.
:param `dc`: a `wx.DC` device context;
:param `wnd`: a `wx.Window` instance object;
:param `page`: the tab control page associated with the tab;
:param `in_rect`: rectangle the tab should be confined to;
:param `close_button_state`: the state of the close button on the tab;
:param `paint_control`: whether to draw the control inside a tab (if any) on a `wx.MemoryDC`.
"""
# Firefox 2 style
control = page.control
# figure out the size of the tab
tab_size, x_extent = self.GetTabSize(dc, wnd, page.caption, page.bitmap,
page.active, close_button_state, control)
tab_height = self._tab_ctrl_height - 2
tab_width = tab_size[0]
tab_x = in_rect.x
tab_y = in_rect.y + in_rect.height - tab_height
clip_width = tab_width
if tab_x + clip_width > in_rect.x + in_rect.width - 4:
clip_width = (in_rect.x + in_rect.width) - tab_x - 4
dc.SetClippingRegion(tab_x, tab_y, clip_width + 1, tab_height - 3)
tabPoints = [wx.Point() for i in xrange(7)]
adjust = 0
if not page.active:
adjust = 1
agwFlags = self.GetAGWFlags()
tabPoints[0].x = tab_x + 3
tabPoints[0].y = (agwFlags & AUI_NB_BOTTOM and [3] or [tab_height - 2])[0]
tabPoints[1].x = tabPoints[0].x
tabPoints[1].y = (agwFlags & AUI_NB_BOTTOM and [tab_height - (vertical_border_padding + 2) - adjust] or \
[(vertical_border_padding + 2) + adjust])[0]
tabPoints[2].x = tabPoints[1].x+2
tabPoints[2].y = (agwFlags & AUI_NB_BOTTOM and [tab_height - vertical_border_padding - adjust] or \
[vertical_border_padding + adjust])[0]
tabPoints[3].x = tab_x + tab_width - 2
tabPoints[3].y = tabPoints[2].y
tabPoints[4].x = tabPoints[3].x + 2
tabPoints[4].y = tabPoints[1].y
tabPoints[5].x = tabPoints[4].x
tabPoints[5].y = tabPoints[0].y
tabPoints[6].x = tabPoints[0].x
tabPoints[6].y = tabPoints[0].y
rr = wx.RectPP(tabPoints[2], tabPoints[5])
self.DrawTabBackground(dc, rr, page.active, (agwFlags & AUI_NB_BOTTOM) == 0)
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.SetPen(wx.Pen(wx.SystemSettings_GetColour(wx.SYS_COLOUR_BTNSHADOW)))
# Draw the tab as rounded rectangle
dc.DrawPolygon(tabPoints)
if page.active:
dc.DrawLine(tabPoints[0].x + 1, tabPoints[0].y, tabPoints[5].x , tabPoints[0].y)
drawn_tab_yoff = tabPoints[1].y
drawn_tab_height = tabPoints[0].y - tabPoints[2].y
text_offset = tab_x + 8
close_button_width = 0
if close_button_state != AUI_BUTTON_STATE_HIDDEN:
close_button_width = self._active_close_bmp.GetWidth()
if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT:
text_offset += close_button_width - 4
if not page.enabled:
dc.SetTextForeground(wx.SystemSettings.GetColour(wx.SYS_COLOUR_GRAYTEXT))
pagebitmap = page.dis_bitmap
else:
dc.SetTextForeground(page.text_colour)
pagebitmap = page.bitmap
shift = -1
if agwFlags & AUI_NB_BOTTOM:
shift = 2
bitmap_offset = 0
if pagebitmap.IsOk():
bitmap_offset = tab_x + 8
if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT and close_button_width:
bitmap_offset += close_button_width - 4
# draw bitmap
dc.DrawBitmap(pagebitmap, bitmap_offset,
drawn_tab_yoff + (drawn_tab_height/2) - (pagebitmap.GetHeight()/2) + shift,
True)
text_offset = bitmap_offset + pagebitmap.GetWidth()
text_offset += 3 # bitmap padding
else:
if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT == 0 or not close_button_width:
text_offset = tab_x + 8
# if the caption is empty, measure some temporary text
caption = page.caption
if caption == "":
caption = "Xj"
if page.active:
dc.SetFont(self._selected_font)
textx, texty, dummy = dc.GetMultiLineTextExtent(caption)
else:
dc.SetFont(self._normal_font)
textx, texty, dummy = dc.GetMultiLineTextExtent(caption)
if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT:
draw_text = ChopText(dc, caption, tab_width - (text_offset-tab_x) - close_button_width + 1)
else:
draw_text = ChopText(dc, caption, tab_width - (text_offset-tab_x) - close_button_width)
ypos = drawn_tab_yoff + drawn_tab_height/2 - texty/2 - 1 + shift
offset_focus = text_offset
if control is not None:
if control.GetPosition() != wx.Point(text_offset+1, ypos):
control.SetPosition(wx.Point(text_offset+1, ypos))
if not control.IsShown():
control.Show()
if paint_control:
bmp = TakeScreenShot(control.GetScreenRect())
dc.DrawBitmap(bmp, text_offset+1, ypos, True)
controlW, controlH = control.GetSize()
text_offset += controlW + 4
textx += controlW + 4
# draw tab text
rectx, recty, dummy = dc.GetMultiLineTextExtent(draw_text)
dc.DrawLabel(draw_text, wx.Rect(text_offset, ypos, rectx, recty))
# draw focus rectangle
self.DrawFocusRectangle(dc, page, wnd, draw_text, offset_focus, bitmap_offset, drawn_tab_yoff+shift,
drawn_tab_height, textx, texty)
out_button_rect = wx.Rect()
# draw 'x' on tab (if enabled)
if close_button_state != AUI_BUTTON_STATE_HIDDEN:
close_button_width = self._active_close_bmp.GetWidth()
bmp = self._disabled_close_bmp
if close_button_state == AUI_BUTTON_STATE_HOVER:
bmp = self._hover_close_bmp
elif close_button_state == AUI_BUTTON_STATE_PRESSED:
bmp = self._pressed_close_bmp
if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT:
rect = wx.Rect(tab_x + 5,
drawn_tab_yoff + (drawn_tab_height / 2) - (bmp.GetHeight() / 2) + shift,
close_button_width, tab_height)
else:
rect = wx.Rect(tab_x + tab_width - close_button_width - 3,
drawn_tab_yoff + (drawn_tab_height / 2) - (bmp.GetHeight() / 2) + shift,
close_button_width, tab_height)
# Indent the button if it is pressed down:
rect = IndentPressedBitmap(rect, close_button_state)
dc.DrawBitmap(bmp, rect.x, rect.y, True)
out_button_rect = rect
out_tab_rect = wx.Rect(tab_x, tab_y, tab_width, tab_height)
dc.DestroyClippingRegion()
return out_tab_rect, out_button_rect, x_extent
def DrawTabBackground(self, dc, rect, focus, upperTabs):
"""
Draws the tab background for the Firefox 2 style.
This is more consistent with L{FlatNotebook} than before.
:param `dc`: a `wx.DC` device context;
:param `rect`: rectangle the tab should be confined to;
:param `focus`: whether the tab has focus or not;
:param `upperTabs`: whether the style is ``AUI_NB_TOP`` or ``AUI_NB_BOTTOM``.
"""
# Define the rounded rectangle base on the given rect
# we need an array of 9 points for it
regPts = [wx.Point() for indx in xrange(9)]
if focus:
if upperTabs:
leftPt = wx.Point(rect.x, rect.y + (rect.height / 10)*8)
rightPt = wx.Point(rect.x + rect.width - 2, rect.y + (rect.height / 10)*8)
else:
leftPt = wx.Point(rect.x, rect.y + (rect.height / 10)*5)
rightPt = wx.Point(rect.x + rect.width - 2, rect.y + (rect.height / 10)*5)
else:
leftPt = wx.Point(rect.x, rect.y + (rect.height / 2))
rightPt = wx.Point(rect.x + rect.width - 2, rect.y + (rect.height / 2))
# Define the top region
top = wx.RectPP(rect.GetTopLeft(), rightPt)
bottom = wx.RectPP(leftPt, rect.GetBottomRight())
topStartColour = wx.WHITE
if not focus:
topStartColour = LightColour(wx.SystemSettings_GetColour(wx.SYS_COLOUR_3DFACE), 50)
topEndColour = wx.SystemSettings_GetColour(wx.SYS_COLOUR_3DFACE)
bottomStartColour = topEndColour
bottomEndColour = topEndColour
# Incase we use bottom tabs, switch the colours
if upperTabs:
if focus:
dc.GradientFillLinear(top, topStartColour, topEndColour, wx.SOUTH)
dc.GradientFillLinear(bottom, bottomStartColour, bottomEndColour, wx.SOUTH)
else:
dc.GradientFillLinear(top, topEndColour , topStartColour, wx.SOUTH)
dc.GradientFillLinear(bottom, bottomStartColour, bottomEndColour, wx.SOUTH)
else:
if focus:
dc.GradientFillLinear(bottom, topEndColour, bottomEndColour, wx.SOUTH)
dc.GradientFillLinear(top, topStartColour, topStartColour, wx.SOUTH)
else:
dc.GradientFillLinear(bottom, bottomStartColour, bottomEndColour, wx.SOUTH)
dc.GradientFillLinear(top, topEndColour, topStartColour, wx.SOUTH)
dc.SetBrush(wx.TRANSPARENT_BRUSH)
class VC8TabArt(AuiDefaultTabArt):
""" A class to draw tabs using the Visual Studio 2005 (VC8) style. """
def __init__(self):
""" Default class constructor. """
AuiDefaultTabArt.__init__(self)
def Clone(self):
""" Clones the art object. """
art = VC8TabArt()
art.SetNormalFont(self.GetNormalFont())
art.SetSelectedFont(self.GetSelectedFont())
art.SetMeasuringFont(self.GetMeasuringFont())
art = CopyAttributes(art, self)
return art
def SetSizingInfo(self, tab_ctrl_size, tab_count, minMaxTabWidth):
"""
Sets the tab sizing information.
:param `tab_ctrl_size`: the size of the tab control area;
:param `tab_count`: the number of tabs;
:param `minMaxTabWidth`: the minimum and maximum tab widths to be used
when the ``AUI_NB_TAB_FIXED_WIDTH`` style is active.
"""
AuiDefaultTabArt.SetSizingInfo(self, tab_ctrl_size, tab_count, minMaxTabWidth)
minTabWidth, maxTabWidth = minMaxTabWidth
if minTabWidth > -1:
self._fixed_tab_width = max(self._fixed_tab_width, minTabWidth)
if maxTabWidth > -1:
self._fixed_tab_width = min(self._fixed_tab_width, maxTabWidth)
self._fixed_tab_width -= 5
def GetTabSize(self, dc, wnd, caption, bitmap, active, close_button_state, control=None):
"""
Returns the tab size for the given caption, bitmap and button state.
:param `dc`: a `wx.DC` device context;
:param `wnd`: a `wx.Window` instance object;
:param `caption`: the tab text caption;
:param `bitmap`: the bitmap displayed on the tab;
:param `active`: whether the tab is selected or not;
:param `close_button_state`: the state of the close button on the tab;
:param `control`: a `wx.Window` instance inside a tab (or ``None``).
"""
tab_size, x_extent = AuiDefaultTabArt.GetTabSize(self, dc, wnd, caption, bitmap,
active, close_button_state, control)
tab_width, tab_height = tab_size
# add some padding
tab_width += 10
tab_height += 2
return (tab_width, tab_height), x_extent
def DrawTab(self, dc, wnd, page, in_rect, close_button_state, paint_control=False):
"""
Draws a single tab.
:param `dc`: a `wx.DC` device context;
:param `wnd`: a `wx.Window` instance object;
:param `page`: the tab control page associated with the tab;
:param `in_rect`: rectangle the tab should be confined to;
:param `close_button_state`: the state of the close button on the tab;
:param `paint_control`: whether to draw the control inside a tab (if any) on a `wx.MemoryDC`.
"""
# Visual Studio 8 style
control = page.control
# figure out the size of the tab
tab_size, x_extent = self.GetTabSize(dc, wnd, page.caption, page.bitmap,
page.active, close_button_state, control)
tab_height = self._tab_ctrl_height - 1
tab_width = tab_size[0]
tab_x = in_rect.x
tab_y = in_rect.y + in_rect.height - tab_height
clip_width = tab_width + 3
if tab_x + clip_width > in_rect.x + in_rect.width - 4:
clip_width = (in_rect.x + in_rect.width) - tab_x - 4
tabPoints = [wx.Point() for i in xrange(8)]
# If we draw the first tab or the active tab,
# we draw a full tab, else we draw a truncated tab
#
# X(2) X(3)
# X(1) X(4)
#
# X(5)
#
# X(0),(7) X(6)
#
#
adjust = 0
if not page.active:
adjust = 1
agwFlags = self.GetAGWFlags()
tabPoints[0].x = (agwFlags & AUI_NB_BOTTOM and [tab_x] or [tab_x + adjust])[0]
tabPoints[0].y = (agwFlags & AUI_NB_BOTTOM and [2] or [tab_height - 3])[0]
tabPoints[1].x = tabPoints[0].x + tab_height - vertical_border_padding - 3 - adjust
tabPoints[1].y = (agwFlags & AUI_NB_BOTTOM and [tab_height - (vertical_border_padding+2)] or \
[(vertical_border_padding+2)])[0]
tabPoints[2].x = tabPoints[1].x + 4
tabPoints[2].y = (agwFlags & AUI_NB_BOTTOM and [tab_height - vertical_border_padding] or \
[vertical_border_padding])[0]
tabPoints[3].x = tabPoints[2].x + tab_width - tab_height + vertical_border_padding
tabPoints[3].y = (agwFlags & AUI_NB_BOTTOM and [tab_height - vertical_border_padding] or \
[vertical_border_padding])[0]
tabPoints[4].x = tabPoints[3].x + 1
tabPoints[4].y = (agwFlags & AUI_NB_BOTTOM and [tabPoints[3].y - 1] or [tabPoints[3].y + 1])[0]
tabPoints[5].x = tabPoints[4].x + 1
tabPoints[5].y = (agwFlags & AUI_NB_BOTTOM and [(tabPoints[4].y - 1)] or [tabPoints[4].y + 1])[0]
tabPoints[6].x = tabPoints[2].x + tab_width - tab_height + 2 + vertical_border_padding
tabPoints[6].y = tabPoints[0].y
tabPoints[7].x = tabPoints[0].x
tabPoints[7].y = tabPoints[0].y
self.FillVC8GradientColour(dc, tabPoints, page.active)
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.SetPen(wx.Pen(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNSHADOW)))
dc.DrawPolygon(tabPoints)
if page.active:
# Delete the bottom line (or the upper one, incase we use wxBOTTOM)
dc.SetPen(wx.WHITE_PEN)
dc.DrawLine(tabPoints[0].x, tabPoints[0].y, tabPoints[6].x, tabPoints[6].y)
dc.SetClippingRegion(tab_x, tab_y, clip_width + 2, tab_height - 3)
drawn_tab_yoff = tabPoints[1].y
drawn_tab_height = tabPoints[0].y - tabPoints[2].y
text_offset = tab_x + 20
close_button_width = 0
if close_button_state != AUI_BUTTON_STATE_HIDDEN:
close_button_width = self._active_close_bmp.GetWidth()
if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT:
text_offset += close_button_width
if not page.enabled:
dc.SetTextForeground(wx.SystemSettings.GetColour(wx.SYS_COLOUR_GRAYTEXT))
pagebitmap = page.dis_bitmap
else:
dc.SetTextForeground(page.text_colour)
pagebitmap = page.bitmap
shift = 0
if agwFlags & AUI_NB_BOTTOM:
shift = (page.active and [1] or [2])[0]
bitmap_offset = 0
if pagebitmap.IsOk():
bitmap_offset = tab_x + 20
if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT and close_button_width:
bitmap_offset += close_button_width
# draw bitmap
dc.DrawBitmap(pagebitmap, bitmap_offset,
drawn_tab_yoff + (drawn_tab_height/2) - (pagebitmap.GetHeight()/2) + shift,
True)
text_offset = bitmap_offset + pagebitmap.GetWidth()
text_offset += 3 # bitmap padding
else:
if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT == 0 or not close_button_width:
text_offset = tab_x + tab_height
# if the caption is empty, measure some temporary text
caption = page.caption
if caption == "":
caption = "Xj"
if page.active:
dc.SetFont(self._selected_font)
textx, texty, dummy = dc.GetMultiLineTextExtent(caption)
else:
dc.SetFont(self._normal_font)
textx, texty, dummy = dc.GetMultiLineTextExtent(caption)
if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT:
draw_text = ChopText(dc, caption, tab_width - (text_offset-tab_x))
else:
draw_text = ChopText(dc, caption, tab_width - (text_offset-tab_x) - close_button_width)
ypos = drawn_tab_yoff + drawn_tab_height/2 - texty/2 - 1 + shift
offset_focus = text_offset
if control is not None:
if control.GetPosition() != wx.Point(text_offset+1, ypos):
control.SetPosition(wx.Point(text_offset+1, ypos))
if not control.IsShown():
control.Show()
if paint_control:
bmp = TakeScreenShot(control.GetScreenRect())
dc.DrawBitmap(bmp, text_offset+1, ypos, True)
controlW, controlH = control.GetSize()
text_offset += controlW + 4
textx += controlW + 4
# draw tab text
rectx, recty, dummy = dc.GetMultiLineTextExtent(draw_text)
dc.DrawLabel(draw_text, wx.Rect(text_offset, ypos, rectx, recty))
# draw focus rectangle
self.DrawFocusRectangle(dc, page, wnd, draw_text, offset_focus, bitmap_offset, drawn_tab_yoff+shift,
drawn_tab_height+shift, textx, texty)
out_button_rect = wx.Rect()
# draw 'x' on tab (if enabled)
if close_button_state != AUI_BUTTON_STATE_HIDDEN:
close_button_width = self._active_close_bmp.GetWidth()
bmp = self._disabled_close_bmp
if close_button_state == AUI_BUTTON_STATE_HOVER:
bmp = self._hover_close_bmp
elif close_button_state == AUI_BUTTON_STATE_PRESSED:
bmp = self._pressed_close_bmp
if page.active:
xpos = tab_x + tab_width - close_button_width + 3
else:
xpos = tab_x + tab_width - close_button_width - 5
if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT:
rect = wx.Rect(tab_x + 20,
drawn_tab_yoff + (drawn_tab_height / 2) - (bmp.GetHeight() / 2) + shift,
close_button_width, tab_height)
else:
rect = wx.Rect(xpos,
drawn_tab_yoff + (drawn_tab_height / 2) - (bmp.GetHeight() / 2) + shift,
close_button_width, tab_height)
# Indent the button if it is pressed down:
rect = IndentPressedBitmap(rect, close_button_state)
dc.DrawBitmap(bmp, rect.x, rect.y, True)
out_button_rect = rect
out_tab_rect = wx.Rect(tab_x, tab_y, x_extent, tab_height)
dc.DestroyClippingRegion()
return out_tab_rect, out_button_rect, x_extent
def FillVC8GradientColour(self, dc, tabPoints, active):
"""
Fills the tab with the Visual Studio 2005 gradient background.
:param `dc`: a `wx.DC` device context;
:param `tabPoints`: a list of `wx.Point` objects describing the tab shape;
:param `active`: whether the tab is selected or not.
"""
xList = [pt.x for pt in tabPoints]
yList = [pt.y for pt in tabPoints]
minx, maxx = min(xList), max(xList)
miny, maxy = min(yList), max(yList)
rect = wx.Rect(minx, maxy, maxx-minx, miny-maxy+1)
region = wx.RegionFromPoints(tabPoints)
if self._buttonRect.width > 0:
buttonRegion = wx.Region(*self._buttonRect)
region.XorRegion(buttonRegion)
dc.SetClippingRegionAsRegion(region)
if active:
bottom_colour = top_colour = wx.WHITE
else:
bottom_colour = StepColour(self._base_colour, 90)
top_colour = StepColour(self._base_colour, 170)
dc.GradientFillLinear(rect, top_colour, bottom_colour, wx.SOUTH)
dc.DestroyClippingRegion()
class ChromeTabArt(AuiDefaultTabArt):
"""
A class to draw tabs using the Google Chrome browser style.
It uses custom bitmap to render the tabs, so that the look and feel is as close
as possible to the Chrome style.
"""
def __init__(self):
""" Default class constructor. """
AuiDefaultTabArt.__init__(self)
self.SetBitmaps(mirror=False)
closeBmp = tab_close.GetBitmap()
closeHBmp = tab_close_h.GetBitmap()
closePBmp = tab_close_p.GetBitmap()
self.SetCustomButton(AUI_BUTTON_CLOSE, AUI_BUTTON_STATE_NORMAL, closeBmp)
self.SetCustomButton(AUI_BUTTON_CLOSE, AUI_BUTTON_STATE_HOVER, closeHBmp)
self.SetCustomButton(AUI_BUTTON_CLOSE, AUI_BUTTON_STATE_PRESSED, closePBmp)
def SetAGWFlags(self, agwFlags):
"""
Sets the tab art flags.
:param `agwFlags`: a combination of the following values:
==================================== ==================================
Flag name Description
==================================== ==================================
``AUI_NB_TOP`` With this style, tabs are drawn along the top of the notebook
``AUI_NB_LEFT`` With this style, tabs are drawn along the left of the notebook. Not implemented yet.
``AUI_NB_RIGHT`` With this style, tabs are drawn along the right of the notebook. Not implemented yet.
``AUI_NB_BOTTOM`` With this style, tabs are drawn along the bottom of the notebook.
``AUI_NB_TAB_SPLIT`` Allows the tab control to be split by dragging a tab
``AUI_NB_TAB_MOVE`` Allows a tab to be moved horizontally by dragging
``AUI_NB_TAB_EXTERNAL_MOVE`` Allows a tab to be moved to another tab control
``AUI_NB_TAB_FIXED_WIDTH`` With this style, all tabs have the same width
``AUI_NB_SCROLL_BUTTONS`` With this style, left and right scroll buttons are displayed
``AUI_NB_WINDOWLIST_BUTTON`` With this style, a drop-down list of windows is available
``AUI_NB_CLOSE_BUTTON`` With this style, a close button is available on the tab bar
``AUI_NB_CLOSE_ON_ACTIVE_TAB`` With this style, a close button is available on the active tab
``AUI_NB_CLOSE_ON_ALL_TABS`` With this style, a close button is available on all tabs
``AUI_NB_MIDDLE_CLICK_CLOSE`` Allows to close AuiNotebook tabs by mouse middle button click
``AUI_NB_SUB_NOTEBOOK`` This style is used by AuiManager to create automatic AuiNotebooks
``AUI_NB_HIDE_ON_SINGLE_TAB`` Hides the tab window if only one tab is present
``AUI_NB_SMART_TABS`` Use Smart Tabbing, like ``Alt``+``Tab`` on Windows
``AUI_NB_USE_IMAGES_DROPDOWN`` Uses images on dropdown window list menu instead of check items
``AUI_NB_CLOSE_ON_TAB_LEFT`` Draws the tab close button on the left instead of on the right (a la Camino browser)
``AUI_NB_TAB_FLOAT`` Allows the floating of single tabs. Known limitation: when the notebook is more or less full screen, tabs cannot be dragged far enough outside of the notebook to become floating pages
``AUI_NB_DRAW_DND_TAB`` Draws an image representation of a tab while dragging (on by default)
==================================== ==================================
:note: Overridden from L{AuiDefaultTabArt}.
"""
if agwFlags & AUI_NB_TOP:
self.SetBitmaps(mirror=False)
elif agwFlags & AUI_NB_BOTTOM:
self.SetBitmaps(mirror=True)
AuiDefaultTabArt.SetAGWFlags(self, agwFlags)
def SetBitmaps(self, mirror):
"""
Assigns the tab custom bitmaps
:param `mirror`: whether to vertically mirror the bitmap or not.
"""
bmps = [tab_active_left.GetBitmap(), tab_active_center.GetBitmap(),
tab_active_right.GetBitmap(), tab_inactive_left.GetBitmap(),
tab_inactive_center.GetBitmap(), tab_inactive_right.GetBitmap()]
if mirror:
for indx, bmp in enumerate(bmps):
img = bmp.ConvertToImage()
img = img.Mirror(horizontally=False)
bmps[indx] = img.ConvertToBitmap()
self._leftActiveBmp = bmps[0]
self._centerActiveBmp = bmps[1]
self._rightActiveBmp = bmps[2]
self._leftInactiveBmp = bmps[3]
self._centerInactiveBmp = bmps[4]
self._rightInactiveBmp = bmps[5]
def Clone(self):
""" Clones the art object. """
art = ChromeTabArt()
art.SetNormalFont(self.GetNormalFont())
art.SetSelectedFont(self.GetSelectedFont())
art.SetMeasuringFont(self.GetMeasuringFont())
art = CopyAttributes(art, self)
return art
def SetSizingInfo(self, tab_ctrl_size, tab_count, minMaxTabWidth):
"""
Sets the tab sizing information.
:param `tab_ctrl_size`: the size of the tab control area;
:param `tab_count`: the number of tabs;
:param `minMaxTabWidth`: the minimum and maximum tab widths to be used
when the ``AUI_NB_TAB_FIXED_WIDTH`` style is active.
"""
AuiDefaultTabArt.SetSizingInfo(self, tab_ctrl_size, tab_count, minMaxTabWidth)
minTabWidth, maxTabWidth = minMaxTabWidth
if minTabWidth > -1:
self._fixed_tab_width = max(self._fixed_tab_width, minTabWidth)
if maxTabWidth > -1:
self._fixed_tab_width = min(self._fixed_tab_width, maxTabWidth)
self._fixed_tab_width -= 5
def GetTabSize(self, dc, wnd, caption, bitmap, active, close_button_state, control=None):
"""
Returns the tab size for the given caption, bitmap and button state.
:param `dc`: a `wx.DC` device context;
:param `wnd`: a `wx.Window` instance object;
:param `caption`: the tab text caption;
:param `bitmap`: the bitmap displayed on the tab;
:param `active`: whether the tab is selected or not;
:param `close_button_state`: the state of the close button on the tab;
:param `control`: a `wx.Window` instance inside a tab (or ``None``).
"""
tab_size, x_extent = AuiDefaultTabArt.GetTabSize(self, dc, wnd, caption, bitmap,
active, close_button_state, control)
tab_width, tab_height = tab_size
# add some padding
tab_width += self._leftActiveBmp.GetWidth()
tab_height += 2
tab_height = max(tab_height, self._centerActiveBmp.GetHeight())
return (tab_width, tab_height), x_extent
def DrawTab(self, dc, wnd, page, in_rect, close_button_state, paint_control=False):
"""
Draws a single tab.
:param `dc`: a `wx.DC` device context;
:param `wnd`: a `wx.Window` instance object;
:param `page`: the tab control page associated with the tab;
:param `in_rect`: rectangle the tab should be confined to;
:param `close_button_state`: the state of the close button on the tab;
:param `paint_control`: whether to draw the control inside a tab (if any) on a `wx.MemoryDC`.
"""
# Chrome tab style
control = page.control
# figure out the size of the tab
tab_size, x_extent = self.GetTabSize(dc, wnd, page.caption, page.bitmap, page.active,
close_button_state, control)
agwFlags = self.GetAGWFlags()
tab_height = self._tab_ctrl_height - 1
tab_width = tab_size[0]
tab_x = in_rect.x
tab_y = in_rect.y + in_rect.height - tab_height
clip_width = tab_width
if tab_x + clip_width > in_rect.x + in_rect.width - 4:
clip_width = (in_rect.x + in_rect.width) - tab_x - 4
dc.SetClippingRegion(tab_x, tab_y, clip_width + 1, tab_height - 3)
drawn_tab_yoff = 1
if page.active:
left = self._leftActiveBmp
center = self._centerActiveBmp
right = self._rightActiveBmp
else:
left = self._leftInactiveBmp
center = self._centerInactiveBmp
right = self._rightInactiveBmp
dc.DrawBitmap(left, tab_x, tab_y)
leftw = left.GetWidth()
centerw = center.GetWidth()
rightw = right.GetWidth()
available = tab_x + tab_width - rightw
posx = tab_x + leftw
while 1:
if posx >= available:
break
dc.DrawBitmap(center, posx, tab_y)
posx += centerw
dc.DrawBitmap(right, posx, tab_y)
drawn_tab_height = center.GetHeight()
text_offset = tab_x + leftw
close_button_width = 0
if close_button_state != AUI_BUTTON_STATE_HIDDEN:
close_button_width = self._active_close_bmp.GetWidth()
if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT:
text_offset += close_button_width
if not page.enabled:
dc.SetTextForeground(wx.SystemSettings.GetColour(wx.SYS_COLOUR_GRAYTEXT))
pagebitmap = page.dis_bitmap
else:
dc.SetTextForeground(page.text_colour)
pagebitmap = page.bitmap
bitmap_offset = 0
if pagebitmap.IsOk():
bitmap_offset = tab_x + leftw
if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT and close_button_width:
bitmap_offset += close_button_width
# draw bitmap
dc.DrawBitmap(pagebitmap, bitmap_offset,
drawn_tab_yoff + (drawn_tab_height/2) - (pagebitmap.GetHeight()/2),
True)
text_offset = bitmap_offset + pagebitmap.GetWidth()
text_offset += 3 # bitmap padding
else:
if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT == 0 or not close_button_width:
text_offset = tab_x + leftw
# if the caption is empty, measure some temporary text
caption = page.caption
if caption == "":
caption = "Xj"
if page.active:
dc.SetFont(self._selected_font)
textx, texty, dummy = dc.GetMultiLineTextExtent(caption)
else:
dc.SetFont(self._normal_font)
textx, texty, dummy = dc.GetMultiLineTextExtent(caption)
if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT:
draw_text = ChopText(dc, caption, tab_width - (text_offset-tab_x) - leftw)
else:
draw_text = ChopText(dc, caption, tab_width - (text_offset-tab_x) - close_button_width - leftw)
ypos = drawn_tab_yoff + drawn_tab_height/2 - texty/2 - 1
if control is not None:
if control.GetPosition() != wx.Point(text_offset+1, ypos):
control.SetPosition(wx.Point(text_offset+1, ypos))
if not control.IsShown():
control.Show()
if paint_control:
bmp = TakeScreenShot(control.GetScreenRect())
dc.DrawBitmap(bmp, text_offset+1, ypos, True)
controlW, controlH = control.GetSize()
text_offset += controlW + 4
# draw tab text
rectx, recty, dummy = dc.GetMultiLineTextExtent(draw_text)
dc.DrawLabel(draw_text, wx.Rect(text_offset, ypos, rectx, recty))
out_button_rect = wx.Rect()
# draw 'x' on tab (if enabled)
if close_button_state != AUI_BUTTON_STATE_HIDDEN:
close_button_width = self._active_close_bmp.GetWidth()
bmp = self._disabled_close_bmp
if close_button_state == AUI_BUTTON_STATE_HOVER:
bmp = self._hover_close_bmp
elif close_button_state == AUI_BUTTON_STATE_PRESSED:
bmp = self._pressed_close_bmp
if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT:
rect = wx.Rect(tab_x + leftw - 2,
drawn_tab_yoff + (drawn_tab_height / 2) - (bmp.GetHeight() / 2) + 1,
close_button_width, tab_height)
else:
rect = wx.Rect(tab_x + tab_width - close_button_width - rightw + 2,
drawn_tab_yoff + (drawn_tab_height / 2) - (bmp.GetHeight() / 2) + 1,
close_button_width, tab_height)
if agwFlags & AUI_NB_BOTTOM:
rect.y -= 1
# Indent the button if it is pressed down:
rect = IndentPressedBitmap(rect, close_button_state)
dc.DrawBitmap(bmp, rect.x, rect.y, True)
out_button_rect = rect
out_tab_rect = wx.Rect(tab_x, tab_y, tab_width, tab_height)
dc.DestroyClippingRegion()
return out_tab_rect, out_button_rect, x_extent
| 38.284826
| 229
| 0.586855
| 12,829
| 104,709
| 4.560683
| 0.059241
| 0.030081
| 0.015775
| 0.013588
| 0.806902
| 0.790169
| 0.767523
| 0.747885
| 0.722179
| 0.71001
| 0
| 0.012789
| 0.324203
| 104,709
| 2,734
| 230
| 38.29883
| 0.814053
| 0.256052
| 0
| 0.696842
| 0
| 0
| 0.001801
| 0.000336
| 0
| 0
| 0
| 0.001097
| 0
| 1
| 0.044912
| false
| 0
| 0.004211
| 0
| 0.082807
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
86be56f897f1e7c214e501c14635a5d21cba6f61
| 38
|
py
|
Python
|
hubic/__init__.py
|
lduchesne/python-openstacksdk-hubic
|
25e752f847613bb7e068c05e094a8abadaa7925a
|
[
"Apache-2.0"
] | 1
|
2016-01-02T00:39:45.000Z
|
2016-01-02T00:39:45.000Z
|
hubic/__init__.py
|
lduchesne/python-openstacksdk-hubic
|
25e752f847613bb7e068c05e094a8abadaa7925a
|
[
"Apache-2.0"
] | null | null | null |
hubic/__init__.py
|
lduchesne/python-openstacksdk-hubic
|
25e752f847613bb7e068c05e094a8abadaa7925a
|
[
"Apache-2.0"
] | null | null | null |
from .hubic import HubiCAuthenticator
| 19
| 37
| 0.868421
| 4
| 38
| 8.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 38
| 1
| 38
| 38
| 0.970588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
812bcd7f11391cee4cc58a41251065439a3a050c
| 2,359
|
py
|
Python
|
rgd/geodata/tests/test_filters.py
|
venkatabhishek/ResonantGeoData
|
4e946e25c194874c22f4ba2ab49d6f0cf803e673
|
[
"Apache-2.0"
] | null | null | null |
rgd/geodata/tests/test_filters.py
|
venkatabhishek/ResonantGeoData
|
4e946e25c194874c22f4ba2ab49d6f0cf803e673
|
[
"Apache-2.0"
] | null | null | null |
rgd/geodata/tests/test_filters.py
|
venkatabhishek/ResonantGeoData
|
4e946e25c194874c22f4ba2ab49d6f0cf803e673
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from rgd.geodata import models
from rgd.geodata.filters import RasterMetaEntryFilter, SpatialEntryFilter
@pytest.mark.django_db(transaction=True)
def test_q_distance(sample_raster_a, sample_raster_b):
assert models.SpatialEntry.objects.count() == 2
# Make sure all are returned sorted by distance
filterset = SpatialEntryFilter(
data={
'q': f'SRID=4326;{sample_raster_a.outline.wkt}',
}
)
assert filterset.is_valid()
qs = filterset.filter_queryset(models.SpatialEntry.objects.all())
assert qs.count() == 2
assert qs.first().spatial_id == sample_raster_a.spatial_id
filterset = SpatialEntryFilter(
data={
'q': f'SRID=4326;{sample_raster_b.outline.wkt}',
}
)
assert filterset.is_valid()
qs = filterset.filter_queryset(models.SpatialEntry.objects.all())
assert qs.count() == 2
assert qs.first().spatial_id == sample_raster_b.spatial_id
@pytest.mark.django_db(transaction=True)
def test_raster_intersects(sample_raster_a, sample_raster_b):
assert models.SpatialEntry.objects.count() == 2
filterset = SpatialEntryFilter(
data={
'q': f'SRID=4326;{sample_raster_a.outline.wkt}',
'predicate': 'intersects',
}
)
assert filterset.is_valid()
qs = filterset.filter_queryset(models.RasterMetaEntry.objects.all())
assert qs.count() == 1
@pytest.mark.django_db(transaction=True)
def test_raster_num_bands(sample_raster_b, sample_raster_c):
# b has many bands and c has 1 band
assert models.SpatialEntry.objects.count() == 2
filterset = RasterMetaEntryFilter(
data={
'num_bands_max': 2,
}
)
assert filterset.is_valid()
qs = filterset.filter_queryset(models.RasterMetaEntry.objects.all())
assert qs.count() == 1
@pytest.mark.django_db(transaction=True)
def test_geojson_intersects(sample_raster_a, sample_raster_b):
assert models.SpatialEntry.objects.count() == 2
filterset = SpatialEntryFilter(
data={
'q': f'{sample_raster_a.outline.geojson}',
'predicate': 'intersects',
}
)
assert filterset.is_valid()
qs = filterset.filter_queryset(models.SpatialEntry.objects.all())
assert qs.count() == 1
assert qs.first().spatial_id == sample_raster_a.spatial_id
| 32.763889
| 73
| 0.684612
| 287
| 2,359
| 5.414634
| 0.216028
| 0.11583
| 0.066924
| 0.070785
| 0.812098
| 0.812098
| 0.811454
| 0.781853
| 0.756113
| 0.688546
| 0
| 0.012189
| 0.200085
| 2,359
| 71
| 74
| 33.225352
| 0.811341
| 0.033489
| 0
| 0.633333
| 0
| 0
| 0.090031
| 0.065876
| 0
| 0
| 0
| 0
| 0.283333
| 1
| 0.066667
| false
| 0
| 0.05
| 0
| 0.116667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
812d90739045fef450ebc3f7364c47bcb7fb3100
| 190
|
py
|
Python
|
blog/app/admin.py
|
shazlycode/testsite-blog
|
ba1373aecb9a9e8e1d14520663c848e6e9c85f31
|
[
"bzip2-1.0.6"
] | 1
|
2019-09-24T14:05:13.000Z
|
2019-09-24T14:05:13.000Z
|
blog/app/admin.py
|
shazlycode/testsite-blog
|
ba1373aecb9a9e8e1d14520663c848e6e9c85f31
|
[
"bzip2-1.0.6"
] | null | null | null |
blog/app/admin.py
|
shazlycode/testsite-blog
|
ba1373aecb9a9e8e1d14520663c848e6e9c85f31
|
[
"bzip2-1.0.6"
] | null | null | null |
from django.contrib import admin
from app import models
# Register your models here.
admin.site.register(models.Post)
admin.site.register(models.Comment)
admin.site.register(models.Profile)
| 27.142857
| 35
| 0.821053
| 28
| 190
| 5.571429
| 0.5
| 0.173077
| 0.326923
| 0.442308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084211
| 190
| 6
| 36
| 31.666667
| 0.896552
| 0.136842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
81382a72d938c3bc6d980116cded7afe4844c34a
| 18,453
|
py
|
Python
|
tests/test_datetime.py
|
ActivisionGameScience/assertpy
|
c0989de171bcf3e21dbad9415ff9d3b8f5fe78fc
|
[
"BSD-3-Clause"
] | 246
|
2015-01-14T01:40:03.000Z
|
2021-08-03T02:50:50.000Z
|
tests/test_datetime.py
|
ActivisionGameScience/assertpy
|
c0989de171bcf3e21dbad9415ff9d3b8f5fe78fc
|
[
"BSD-3-Clause"
] | 98
|
2015-01-01T14:28:55.000Z
|
2019-11-14T21:36:18.000Z
|
tests/test_datetime.py
|
ActivisionGameScience/assertpy
|
c0989de171bcf3e21dbad9415ff9d3b8f5fe78fc
|
[
"BSD-3-Clause"
] | 54
|
2015-01-14T01:42:10.000Z
|
2019-11-18T10:04:42.000Z
|
# Copyright (c) 2015-2019, Activision Publishing, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
from assertpy import assert_that,fail
d1 = datetime.datetime.today()
def test_is_before():
d2 = datetime.datetime.today()
assert_that(d1).is_before(d2)
def test_is_before_failure():
try:
d2 = datetime.datetime.today()
assert_that(d2).is_before(d1)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be before <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}>, but was not.')
def test_is_before_bad_val_type_failure():
try:
assert_that(123).is_before(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('val must be datetime, but was type <int>')
def test_is_before_bad_arg_type_failure():
try:
assert_that(d1).is_before(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be datetime, but was type <int>')
def test_is_after():
d2 = datetime.datetime.today()
assert_that(d2).is_after(d1)
def test_is_after_failure():
try:
d2 = datetime.datetime.today()
assert_that(d1).is_after(d2)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be after <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}>, but was not.')
def test_is_after_bad_val_type_failure():
try:
assert_that(123).is_after(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('val must be datetime, but was type <int>')
def test_is_after_bad_arg_type_failure():
try:
assert_that(d1).is_after(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be datetime, but was type <int>')
def test_is_equal_to_ignoring_milliseconds():
assert_that(d1).is_equal_to_ignoring_milliseconds(d1)
def test_is_equal_to_ignoring_milliseconds_failure():
try:
d2 = datetime.datetime.today() + datetime.timedelta(days=1)
assert_that(d1).is_equal_to_ignoring_milliseconds(d2)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be equal to <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}>, but was not.')
def test_is_equal_to_ignoring_milliseconds_bad_val_type_failure():
try:
assert_that(123).is_equal_to_ignoring_milliseconds(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('val must be datetime, but was type <int>')
def test_is_equal_to_ignoring_milliseconds_bad_arg_type_failure():
try:
assert_that(d1).is_equal_to_ignoring_milliseconds(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be datetime, but was type <int>')
def test_is_equal_to_ignoring_seconds():
assert_that(d1).is_equal_to_ignoring_seconds(d1)
def test_is_equal_to_ignoring_seconds_failure():
try:
d2 = datetime.datetime.today() + datetime.timedelta(days=1)
assert_that(d1).is_equal_to_ignoring_seconds(d2)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}> to be equal to <\d{4}-\d{2}-\d{2} \d{2}:\d{2}>, but was not.')
def test_is_equal_to_ignoring_seconds_bad_val_type_failure():
try:
assert_that(123).is_equal_to_ignoring_seconds(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('val must be datetime, but was type <int>')
def test_is_equal_to_ignoring_seconds_bad_arg_type_failure():
try:
assert_that(d1).is_equal_to_ignoring_seconds(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be datetime, but was type <int>')
def test_is_equal_to_ignoring_time():
assert_that(d1).is_equal_to_ignoring_time(d1)
def test_is_equal_to_ignoring_time_failure():
try:
d2 = datetime.datetime.today() + datetime.timedelta(days=1)
assert_that(d1).is_equal_to_ignoring_time(d2)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2}> to be equal to <\d{4}-\d{2}-\d{2}>, but was not.')
def test_is_equal_to_ignoring_time_bad_val_type_failure():
try:
assert_that(123).is_equal_to_ignoring_time(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('val must be datetime, but was type <int>')
def test_is_equal_to_ignoring_time_bad_arg_type_failure():
try:
assert_that(d1).is_equal_to_ignoring_time(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be datetime, but was type <int>')
def test_is_greater_than():
d2 = datetime.datetime.today()
assert_that(d2).is_greater_than(d1)
def test_is_greater_than_failure():
try:
d2 = datetime.datetime.today()
assert_that(d1).is_greater_than(d2)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be greater than <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}>, but was not.')
def test_is_greater_than_bad_arg_type_failure():
try:
assert_that(d1).is_greater_than(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be <datetime>, but was <int>')
def test_is_greater_than_or_equal_to():
assert_that(d1).is_greater_than_or_equal_to(d1)
def test_is_greater_than_or_equal_to_failure():
try:
d2 = datetime.datetime.today()
assert_that(d1).is_greater_than_or_equal_to(d2)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be greater than or equal to <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}>, but was not.')
def test_is_greater_than_or_equal_to_bad_arg_type_failure():
try:
assert_that(d1).is_greater_than_or_equal_to(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be <datetime>, but was <int>')
def test_is_less_than():
d2 = datetime.datetime.today()
assert_that(d1).is_less_than(d2)
def test_is_less_than_failure():
try:
d2 = datetime.datetime.today()
assert_that(d2).is_less_than(d1)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be less than <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}>, but was not.')
def test_is_less_than_bad_arg_type_failure():
try:
assert_that(d1).is_less_than(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be <datetime>, but was <int>')
def test_is_less_than_or_equal_to():
assert_that(d1).is_less_than_or_equal_to(d1)
def test_is_less_than_or_equal_to_failure():
try:
d2 = datetime.datetime.today()
assert_that(d2).is_less_than_or_equal_to(d1)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be less than or equal to <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}>, but was not.')
def test_is_less_than_or_equal_to_bad_arg_type_failure():
try:
assert_that(d1).is_less_than_or_equal_to(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be <datetime>, but was <int>')
def test_is_between():
d2 = datetime.datetime.today()
d3 = datetime.datetime.today()
assert_that(d2).is_between(d1, d3)
def test_is_between_failure():
try:
d2 = datetime.datetime.today()
d3 = datetime.datetime.today()
assert_that(d1).is_between(d2, d3)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be between <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> and <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}>, but was not.')
def test_is_between_bad_arg1_type_failure():
try:
assert_that(d1).is_between(123, 456)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given low arg must be <datetime>, but was <int>')
def test_is_between_bad_arg2_type_failure():
try:
d2 = datetime.datetime.today()
assert_that(d1).is_between(d2, 123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given high arg must be <datetime>, but was <datetime>')
def test_is_not_between():
d2 = d1 + datetime.timedelta(minutes=5)
d3 = d1 + datetime.timedelta(minutes=10)
assert_that(d1).is_not_between(d2, d3)
def test_is_not_between_failure():
try:
d2 = d1 + datetime.timedelta(minutes=5)
d3 = d1 + datetime.timedelta(minutes=10)
assert_that(d2).is_not_between(d1, d3)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to not be between <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> and <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}>, but was.')
def test_is_not_between_bad_arg1_type_failure():
try:
assert_that(d1).is_not_between(123, 456)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given low arg must be <datetime>, but was <int>')
def test_is_not_between_bad_arg2_type_failure():
try:
d2 = datetime.datetime.today()
assert_that(d1).is_not_between(d2, 123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given high arg must be <datetime>, but was <datetime>')
def test_is_close_to():
d2 = datetime.datetime.today()
assert_that(d1).is_close_to(d2, datetime.timedelta(minutes=5))
def test_is_close_to_failure():
try:
d2 = d1 + datetime.timedelta(minutes=5)
assert_that(d1).is_close_to(d2, datetime.timedelta(minutes=1))
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to be close to <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> within tolerance <\d+:\d{2}:\d{2}>, but was not.')
def test_is_close_to_bad_arg_type_failure():
try:
assert_that(d1).is_close_to(123, 456)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be datetime, but was <int>')
def test_is_close_to_bad_tolerance_arg_type_failure():
try:
d2 = datetime.datetime.today()
assert_that(d1).is_close_to(d2, 123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given tolerance arg must be timedelta, but was <int>')
def test_is_not_close_to():
d2 = d1 + datetime.timedelta(minutes=5)
assert_that(d1).is_not_close_to(d2, datetime.timedelta(minutes=4))
def test_is_not_close_to_failure():
try:
d2 = datetime.datetime.today()
assert_that(d1).is_not_close_to(d2, datetime.timedelta(minutes=5))
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> to not be close to <\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}> within tolerance <\d+:\d{2}:\d{2}>, but was.')
def test_is_not_close_to_bad_arg_type_failure():
try:
assert_that(d1).is_not_close_to(123, 456)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be datetime, but was <int>')
def test_is_not_close_to_bad_tolerance_arg_type_failure():
try:
d2 = datetime.datetime.today()
assert_that(d1).is_not_close_to(d2, 123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given tolerance arg must be timedelta, but was <int>')
t1 = datetime.timedelta(seconds=60)
def test_is_greater_than_timedelta():
d2 = datetime.timedelta(seconds=120)
assert_that(d2).is_greater_than(t1)
def test_is_greater_than_timedelta_failure():
try:
t2 = datetime.timedelta(seconds=90)
assert_that(t1).is_greater_than(t2)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{1,2}:\d{2}:\d{2}> to be greater than <\d{1,2}:\d{2}:\d{2}>, but was not.')
def test_is_greater_than_timedelta_bad_arg_type_failure():
try:
assert_that(t1).is_greater_than(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be <timedelta>, but was <int>')
def test_is_greater_than_or_equal_to_timedelta():
assert_that(t1).is_greater_than_or_equal_to(t1)
def test_is_greater_than_or_equal_to_timedelta_failure():
try:
t2 = datetime.timedelta(seconds=90)
assert_that(t1).is_greater_than_or_equal_to(t2)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{1,2}:\d{2}:\d{2}> to be greater than or equal to <\d{1,2}:\d{2}:\d{2}>, but was not.')
def test_is_greater_than_or_equal_to_timedelta_bad_arg_type_failure():
try:
assert_that(t1).is_greater_than_or_equal_to(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be <timedelta>, but was <int>')
def test_is_less_than_timedelta():
t2 = datetime.timedelta(seconds=90)
assert_that(t1).is_less_than(t2)
def test_is_less_than_timedelta_failure():
try:
t2 = datetime.timedelta(seconds=90)
assert_that(t2).is_less_than(t1)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{1,2}:\d{2}:\d{2}> to be less than <\d{1,2}:\d{2}:\d{2}>, but was not.')
def test_is_less_than_timedelta_bad_arg_type_failure():
try:
assert_that(t1).is_less_than(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be <timedelta>, but was <int>')
def test_is_less_than_or_equal_to_timedelta():
assert_that(t1).is_less_than_or_equal_to(t1)
def test_is_less_than_or_equal_to_timedelta_failure():
try:
t2 = datetime.timedelta(seconds=90)
assert_that(t2).is_less_than_or_equal_to(t1)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{1,2}:\d{2}:\d{2}> to be less than or equal to <\d{1,2}:\d{2}:\d{2}>, but was not.')
def test_is_less_than_or_equal_to_timedelta_bad_arg_type_failure():
try:
assert_that(t1).is_less_than_or_equal_to(123)
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('given arg must be <timedelta>, but was <int>')
def test_is_between_timedelta():
d2 = datetime.timedelta(seconds=90)
d3 = datetime.timedelta(seconds=120)
assert_that(d2).is_between(t1, d3)
def test_is_between_timedelta_failure():
try:
d2 = datetime.timedelta(seconds=30)
d3 = datetime.timedelta(seconds=40)
assert_that(t1).is_between(d2, d3)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{1,2}:\d{2}:\d{2}> to be between <\d{1,2}:\d{2}:\d{2}> and <\d{1,2}:\d{2}:\d{2}>, but was not.')
def test_is_not_between_timedelta():
d2 = datetime.timedelta(seconds=90)
d3 = datetime.timedelta(seconds=120)
assert_that(t1).is_not_between(d2, d3)
def test_is_not_between_timedelta_failure():
try:
d2 = datetime.timedelta(seconds=90)
d3 = datetime.timedelta(seconds=120)
assert_that(d2).is_not_between(t1, d3)
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).matches(r'Expected <\d{1,2}:\d{2}:\d{2}> to not be between <\d{1,2}:\d{2}:\d{2}> and <\d{1,2}:\d{2}:\d{2}>, but was.')
| 40.915743
| 195
| 0.680757
| 3,054
| 18,453
| 3.876555
| 0.063523
| 0.027705
| 0.033956
| 0.040544
| 0.892812
| 0.878706
| 0.862995
| 0.85193
| 0.811471
| 0.778782
| 0
| 0.036343
| 0.18138
| 18,453
| 450
| 196
| 41.006667
| 0.747385
| 0.081125
| 0
| 0.566197
| 0
| 0.053521
| 0.26087
| 0.017722
| 0
| 0
| 0
| 0
| 0.36338
| 1
| 0.180282
| false
| 0
| 0.005634
| 0
| 0.185915
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d4aa2b80a01c0ca116c021b6f4969479123fafe2
| 47,729
|
py
|
Python
|
openprocurement/auctions/tessel/tests/blanks/tender_blanks.py
|
bdmbdsm/openprocurement.auctions.tessel
|
840990e01c6ad3e4b49c80d5d3031575cef318e3
|
[
"Apache-2.0"
] | null | null | null |
openprocurement/auctions/tessel/tests/blanks/tender_blanks.py
|
bdmbdsm/openprocurement.auctions.tessel
|
840990e01c6ad3e4b49c80d5d3031575cef318e3
|
[
"Apache-2.0"
] | null | null | null |
openprocurement/auctions/tessel/tests/blanks/tender_blanks.py
|
bdmbdsm/openprocurement.auctions.tessel
|
840990e01c6ad3e4b49c80d5d3031575cef318e3
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from copy import deepcopy
from hashlib import sha512
from uuid import uuid4
from datetime import timedelta
from iso8601 import parse_date
import pytz
from openprocurement.auctions.core.tests.base import JSON_RENDERER_ERROR
from openprocurement.auctions.core.utils import (
SANDBOX_MODE, TZ, get_now
)
from openprocurement.auctions.tessel.constants import (
CONTRACT_TYPES
)
# InsiderAuctionTest
def create_role(self):
fields = set([
'awardCriteriaDetails', 'awardCriteriaDetails_en', 'awardCriteriaDetails_ru',
'description', 'description_en', 'description_ru', 'tenderAttempts',
'features', 'guarantee', 'hasEnquiries', 'items', 'lots', 'minimalStep', 'mode',
'procurementMethodRationale', 'procurementMethodRationale_en', 'procurementMethodRationale_ru',
'procurementMethodType', 'procuringEntity', 'status', 'contractTerms',
'submissionMethodDetails', 'submissionMethodDetails_en', 'submissionMethodDetails_ru',
'title', 'title_en', 'title_ru', 'value', 'auctionPeriod',
'auctionParameters', 'merchandisingObject', 'bankAccount', 'registrationFee', 'documents'
])
if SANDBOX_MODE:
fields.add('procurementMethodDetails')
self.assertEqual(set(self.auction._fields) - self.auction._options.roles['create'].fields, fields)
def edit_role(self):
fields = set([])
role = self.auction._options.roles['edit_active.tendering']
if role.function.__name__ == 'blacklist':
self.assertEqual(set(self.auction._fields) - role.fields, fields)
else:
self.assertEqual(set(self.auction._fields).intersection(role.fields), fields)
# InsiderAuctionResourceTest
def create_auction_invalid(self):
request_path = '/auctions'
response = self.app.post(request_path, 'data', status=415)
self.assertEqual(response.status, '415 Unsupported Media Type')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description':
u"Content-Type header should be one of ['application/json']", u'location': u'header', u'name': u'Content-Type'}
])
response = self.app.post(
request_path, 'data', content_type='application/json', status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
JSON_RENDERER_ERROR
])
response = self.app.post_json(request_path, 'data', status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Data not available',
u'location': u'body', u'name': u'data'}
])
response = self.app.post_json(request_path, {'not_data': {}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Data not available',
u'location': u'body', u'name': u'data'}
])
response = self.app.post_json(request_path, {'data': []}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Data not available',
u'location': u'body', u'name': u'data'}
])
response = self.app.post_json(request_path, {'data': {'procurementMethodType': 'invalid_value'}}, status=415)
self.assertEqual(response.status, '415 Unsupported Media Type')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'procurementMethodType is not implemented', u'location': u'body', u'name': u'data'}
])
response = self.app.post_json(request_path, {'data': {'invalid_field': 'invalid_value', 'procurementMethodType': self.initial_data['procurementMethodType']}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Rogue field', u'location':
u'body', u'name': u'invalid_field'}
])
response = self.app.post_json(request_path, {'data': {'value': 'invalid_value', 'procurementMethodType': self.initial_data['procurementMethodType']}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [
u'Please use a mapping for this field or Value instance instead of unicode.'], u'location': u'body', u'name': u'value'}
])
response = self.app.post_json(request_path, {'data': {'procurementMethod': 'invalid_value', 'procurementMethodType': self.initial_data['procurementMethodType']}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertIn({u'description': [u"Value must be one of ['open', 'selective', 'limited']."], u'location': u'body', u'name': u'procurementMethod'}, response.json['errors'])
#self.assertIn({u'description': [u'This field is required.'], u'location': u'body', u'name': u'tenderPeriod'}, response.json['errors'])
# self.assertIn({u'description': [u'This field is required.'], u'location': u'body', u'name': u'minimalStep'}, response.json['errors'])
#self.assertIn({u'description': [u'This field is required.'], u'location': u'body', u'name': u'enquiryPeriod'}, response.json['errors'])
self.assertIn({u'description': [u'This field is required.'], u'location': u'body', u'name': u'value'}, response.json['errors'])
response = self.app.post_json(request_path, {'data': {'enquiryPeriod': {'endDate': 'invalid_value'}, 'procurementMethodType': self.initial_data['procurementMethodType']}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': {u'endDate': [u"Could not parse invalid_value. Should be ISO8601."]}, u'location': u'body', u'name': u'enquiryPeriod'}
])
response = self.app.post_json(request_path, {'data': {'enquiryPeriod': {'endDate': '9999-12-31T23:59:59.999999'}, 'procurementMethodType': self.initial_data['procurementMethodType']}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': {u'endDate': [u'date value out of range']}, u'location': u'body', u'name': u'enquiryPeriod'}
])
self.initial_data['tenderPeriod'] = self.initial_data.pop('auctionPeriod')
response = self.app.post_json(request_path, {'data': self.initial_data}, status=422)
self.initial_data['auctionPeriod'] = self.initial_data.pop('tenderPeriod')
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': {u'startDate': [u'This field is required.']}, u'location': u'body', u'name': u'auctionPeriod'}
])
self.initial_data['tenderPeriod'] = {'startDate': '2014-10-31T00:00:00', 'endDate': '2014-10-01T00:00:00'}
response = self.app.post_json(request_path, {'data': self.initial_data}, status=422)
self.initial_data.pop('tenderPeriod')
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': {u'startDate': [u'period should begin before its end']}, u'location': u'body', u'name': u'tenderPeriod'}
])
#data = self.initial_data['tenderPeriod']
#self.initial_data['tenderPeriod'] = {'startDate': '2014-10-31T00:00:00', 'endDate': '2015-10-01T00:00:00'}
#response = self.app.post_json(request_path, {'data': self.initial_data}, status=422)
#self.initial_data['tenderPeriod'] = data
#self.assertEqual(response.status, '422 Unprocessable Entity')
#self.assertEqual(response.content_type, 'application/json')
#self.assertEqual(response.json['status'], 'error')
#self.assertEqual(response.json['errors'], [
#{u'description': [u'period should begin after enquiryPeriod'], u'location': u'body', u'name': u'tenderPeriod'}
#])
now = get_now()
#self.initial_data['awardPeriod'] = {'startDate': now.isoformat(), 'endDate': now.isoformat()}
#response = self.app.post_json(request_path, {'data': self.initial_data}, status=422)
#del self.initial_data['awardPeriod']
#self.assertEqual(response.status, '422 Unprocessable Entity')
#self.assertEqual(response.content_type, 'application/json')
#self.assertEqual(response.json['status'], 'error')
#self.assertEqual(response.json['errors'], [
#{u'description': [u'period should begin after tenderPeriod'], u'location': u'body', u'name': u'awardPeriod'}
#])
data = self.initial_data['auctionPeriod']
self.initial_data['auctionPeriod'] = {'startDate': (now + timedelta(days=15)).isoformat(), 'endDate': (now + timedelta(days=15)).isoformat()}
self.initial_data['awardPeriod'] = {'startDate': (now + timedelta(days=14)).isoformat(), 'endDate': (now + timedelta(days=14)).isoformat()}
response = self.app.post_json(request_path, {'data': self.initial_data}, status=422)
self.initial_data['auctionPeriod'] = data
del self.initial_data['awardPeriod']
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [u'period should begin after auctionPeriod'], u'location': u'body', u'name': u'awardPeriod'}
])
#
# data = self.initial_data['minimalStep']
# self.initial_data['minimalStep'] = {'amount': '1000.0'}
# response = self.app.post_json(request_path, {'data': self.initial_data}, status=422)
# self.initial_data['minimalStep'] = data
# self.assertEqual(response.status, '422 Unprocessable Entity')
# self.assertEqual(response.content_type, 'application/json')
# self.assertEqual(response.json['status'], 'error')
# self.assertEqual(response.json['errors'], [
# {u'description': [u'value should be less than value of auction'], u'location': u'body', u'name': u'minimalStep'}
# ])
#
# data = self.initial_data['minimalStep']
# self.initial_data['minimalStep'] = {'amount': '100.0', 'valueAddedTaxIncluded': False}
# response = self.app.post_json(request_path, {'data': self.initial_data}, status=422)
# self.initial_data['minimalStep'] = data
# self.assertEqual(response.status, '422 Unprocessable Entity')
# self.assertEqual(response.content_type, 'application/json')
# self.assertEqual(response.json['status'], 'error')
# self.assertEqual(response.json['errors'], [
# {u'description': [u'valueAddedTaxIncluded should be identical to valueAddedTaxIncluded of value of auction'], u'location': u'body', u'name': u'minimalStep'}
# ])
#
# data = self.initial_data['minimalStep']
# self.initial_data['minimalStep'] = {'amount': '100.0', 'currency': "USD"}
# response = self.app.post_json(request_path, {'data': self.initial_data}, status=422)
# self.initial_data['minimalStep'] = data
# self.assertEqual(response.status, '422 Unprocessable Entity')
# self.assertEqual(response.content_type, 'application/json')
# self.assertEqual(response.json['status'], 'error')
# self.assertEqual(response.json['errors'], [
# {u'description': [u'currency should be identical to currency of value of auction'], u'location': u'body', u'name': u'minimalStep'}
# ])
#
# auction_data = deepcopy(self.initial_data)
# auction_data['value'] = {'amount': '100.0', 'currency': "USD"}
# auction_data['minimalStep'] = {'amount': '5.0', 'currency': "USD"}
# response = self.app.post_json(request_path, {'data': auction_data}, status=422)
# self.assertEqual(response.status, '422 Unprocessable Entity')
# self.assertEqual(response.content_type, 'application/json')
# self.assertEqual(response.json['status'], 'error')
# self.assertEqual(response.json['errors'], [
# {u'description': [u'currency should be only UAH'], u'location': u'body', u'name': u'value'}
# ])
data = self.initial_data["procuringEntity"]["contactPoint"]["telephone"]
del self.initial_data["procuringEntity"]["contactPoint"]["telephone"]
response = self.app.post_json(request_path, {'data': self.initial_data}, status=422)
self.initial_data["procuringEntity"]["contactPoint"]["telephone"] = data
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': {u'contactPoint': {u'email': [u'telephone or email should be present']}}, u'location': u'body', u'name': u'procuringEntity'}
])
self.initial_data['contractTerms'] = {'type': 'wrong_type'}
response = self.app.post_json(request_path, {'data': self.initial_data}, status=422)
del self.initial_data["contractTerms"]
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [u'type must be one of {}'.format(CONTRACT_TYPES)], u'location': u'body', u'name': u'contractTerms'}
])
def create_auction_auctionPeriod(self):
data = self.initial_data.copy()
#tenderPeriod = data.pop('tenderPeriod')
#data['auctionPeriod'] = {'startDate': tenderPeriod['endDate']}
response = self.app.post_json('/auctions', {'data': data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
auction = response.json['data']
self.assertIn('tenderPeriod', auction)
self.assertIn('auctionPeriod', auction)
self.assertNotIn('startDate', auction['auctionPeriod'])
self.assertEqual(parse_date(data['auctionPeriod']['startDate']).date(), parse_date(auction['auctionPeriod']['shouldStartAfter'], TZ).date())
if SANDBOX_MODE:
auction_startDate = parse_date(data['auctionPeriod']['startDate'], None)
if not auction_startDate.tzinfo:
auction_startDate = TZ.localize(auction_startDate)
tender_endDate = parse_date(auction['tenderPeriod']['endDate'], None)
if not tender_endDate.tzinfo:
tender_endDate = TZ.localize(tender_endDate)
self.assertLessEqual((auction_startDate - tender_endDate).total_seconds(), 70)
else:
self.assertEqual(parse_date(auction['tenderPeriod']['endDate']).date(), parse_date(auction['auctionPeriod']['shouldStartAfter'], TZ).date())
self.assertGreater(parse_date(auction['tenderPeriod']['endDate']).time(), parse_date(auction['auctionPeriod']['shouldStartAfter'], TZ).time())
def create_auction_in_pending_activation(self):
not_used_transfer = self.app.post_json('/transfers', {"data": {}}).json
self.app.authorization = ('Basic', ('concierge', ''))
transfer_token = sha512(not_used_transfer['access']['transfer']).hexdigest()
data = deepcopy(self.initial_data)
data['transfer_token'] = transfer_token
data['status'] = 'pending.activation'
data['merchandisingObject'] = uuid4().hex
response = self.app.post_json('/auctions', {'data': data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['status'], data['status'])
self.assertEqual(response.json['data']['merchandisingObject'], data['merchandisingObject'])
self.assertNotIn('transfer', response.json['access'])
def create_auction_generated(self):
data = self.initial_data.copy()
#del data['awardPeriod']
data.update({'id': 'hash', 'doc_id': 'hash2', 'auctionID': 'hash3'})
response = self.app.post_json('/auctions', {'data': data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
auction = response.json['data']
for key in ['procurementMethodDetails', 'submissionMethodDetails']:
if key in auction:
auction.pop(key)
self.assertEqual(set(auction), set([
u'procurementMethodType', u'id', u'date', u'dateModified', u'auctionID', u'status', u'enquiryPeriod',
u'tenderPeriod', u'minimalStep', u'items', u'value', u'procuringEntity', u'next_check',
u'procurementMethod', u'awardCriteria', u'submissionMethod', u'title', u'owner', u'auctionPeriod',
u'documents', u'tenderAttempts', u'auctionParameters', u'bankAccount', u'registrationFee'
]))
self.assertNotEqual(data['id'], auction['id'])
self.assertNotEqual(data['doc_id'], auction['id'])
self.assertNotEqual(data['auctionID'], auction['auctionID'])
def create_auction(self):
response = self.app.get('/auctions')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 0)
response = self.app.post_json('/auctions', {"data": self.initial_data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
auction = response.json['data']
if self.initial_organization == self.test_financial_organization:
self.assertEqual(set(auction) - set(self.initial_data), set([
u'id', u'dateModified', u'auctionID', u'date', u'status', u'procurementMethod',
u'awardCriteria', u'submissionMethod', u'next_check', u'owner', u'enquiryPeriod', u'tenderPeriod',
u'minimalStep'
]))
else:
self.assertEqual(set(auction) - set(self.initial_data), set([
u'id', u'dateModified', u'auctionID', u'date', u'status', u'procurementMethod',
u'awardCriteria', u'submissionMethod', u'next_check', u'owner', u'enquiryPeriod', u'tenderPeriod',
u'minimalStep'
]))
self.assertIn(auction['id'], response.headers['Location'])
response = self.app.get('/auctions/{}'.format(auction['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(set(response.json['data']), set(auction))
self.assertEqual(response.json['data'], auction)
response = self.app.post_json('/auctions?opt_jsonp=callback', {"data": self.initial_data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/javascript')
self.assertIn('callback({"', response.body)
response = self.app.post_json('/auctions?opt_pretty=1', {"data": self.initial_data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('{\n "', response.body)
response = self.app.post_json('/auctions', {"data": self.initial_data, "options": {"pretty": True}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('{\n "', response.body)
auction_data = deepcopy(self.initial_data)
auction_data['guarantee'] = {"amount": 100500, "currency": "USD"}
response = self.app.post_json('/auctions', {'data': auction_data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
data = response.json['data']
self.assertIn('guarantee', data)
self.assertEqual(data['guarantee']['amount'], 100500)
self.assertEqual(data['guarantee']['currency'], "USD")
def check_daylight_savings_timezone(self):
data = deepcopy(self.initial_data)
ua_tz = pytz.timezone('Europe/Kiev')
response = self.app.post_json('/auctions', {'data': data})
timezone_before = parse_date(response.json['data']['tenderPeriod']['endDate']).astimezone(tz=ua_tz)
timezone_before = timezone_before.strftime('%Z')
now = get_now()
list_of_timezone_bools = []
# check if DST working with different time periods
for i in (10, 90, 180, 210, 240):
data.update({
"auctionPeriod": {
"startDate": (now + timedelta(days=i)).isoformat(),
}})
response = self.app.post_json('/auctions', {'data': data})
timezone_after = parse_date(response.json['data']['tenderPeriod']['endDate']).astimezone(tz=ua_tz)
timezone_after = timezone_after.strftime('%Z')
list_of_timezone_bools.append(timezone_before != timezone_after)
self.assertTrue(any(list_of_timezone_bools))
# InsiderAuctionProcessTest
def first_bid_auction(self):
self.app.authorization = ('Basic', ('broker', ''))
# empty auctions listing
response = self.app.get('/auctions')
self.assertEqual(response.json['data'], [])
# create auction
response = self.app.post_json('/auctions',
{"data": self.initial_data})
auction_id = self.auction_id = response.json['data']['id']
owner_token = response.json['access']['token']
# switch to active.tendering
self.set_status('active.tendering')
# create bid
self.app.authorization = ('Basic', ('broker', ''))
if self.initial_organization == self.test_financial_organization:
response = self.app.post_json('/auctions/{}/bids'.format(auction_id),
{'data': {'tenderers': [self.initial_organization], "value": {"amount": 450}, 'qualified': True, 'eligible': True}})
else:
response = self.app.post_json('/auctions/{}/bids'.format(auction_id),
{'data': {'tenderers': [self.initial_organization], "value": {"amount": 450}, 'qualified': True}})
bid_id = response.json['data']['id']
bid_token = response.json['access']['token']
bids_tokens = {bid_id: bid_token}
# create second bid
self.app.authorization = ('Basic', ('broker', ''))
if self.initial_organization == self.test_financial_organization:
response = self.app.post_json('/auctions/{}/bids'.format(auction_id),
{'data': {'tenderers': [self.initial_organization], "value": {"amount": 450}, 'qualified': True, 'eligible': True}})
else:
response = self.app.post_json('/auctions/{}/bids'.format(auction_id),
{'data': {'tenderers': [self.initial_organization], "value": {"amount": 450}, 'qualified': True}})
bids_tokens[response.json['data']['id']] = response.json['access']['token']
# switch to active.auction
self.set_status('active.auction')
# get auction info
self.app.authorization = ('Basic', ('auction', ''))
response = self.app.get('/auctions/{}/auction'.format(auction_id))
auction_bids_data = response.json['data']['bids']
# check bid participationUrl
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.get('/auctions/{}/bids/{}?acc_token={}'.format(auction_id, bid_id, bid_token))
self.assertIn('participationUrl', response.json['data'])
# posting auction results
self.app.authorization = ('Basic', ('auction', ''))
response = self.app.get('/auctions/{}'.format(self.auction_id))
self.assertEqual(response.status, '200 OK')
auction = response.json['data']
value_threshold = auction['value']['amount'] + auction['minimalStep']['amount']
now = get_now()
auction_result = {
'bids': [
{
"id": b['id'],
"date": (now - timedelta(seconds=i)).isoformat(),
"value": {"amount": value_threshold * 2},
}
for i, b in enumerate(auction_bids_data)
]
}
response = self.app.post_json('/auctions/{}/auction'.format(self.auction_id), {'data': auction_result})
# get awards
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.get('/auctions/{}/awards?acc_token={}'.format(auction_id, owner_token))
# get pending award
award = [i for i in response.json['data'] if i['status'] == 'pending'][0]
award_id = award['id']
# Upload rejectProtocol
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.post('/auctions/{}/awards/{}/documents?acc_token={}'.format(
self.auction_id, award_id, owner_token), upload_files=[('file', 'auction_protocol.pdf', 'content')])
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
response = self.app.patch_json('/auctions/{}/awards/{}/documents/{}?acc_token={}'.format(self.auction_id, award_id, doc_id, owner_token), {"data": {
"description": "rejection protocol",
"documentType": 'rejectionProtocol'
}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json["data"]["documentType"], 'rejectionProtocol')
self.assertEqual(response.json["data"]["author"], 'auction_owner')
# set award as unsuccessful
response = self.app.patch_json('/auctions/{}/awards/{}?acc_token={}'.format(auction_id, award_id, owner_token),
{"data": {"status": "unsuccessful"}})
# get awards
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.get('/auctions/{}/awards?acc_token={}'.format(auction_id, owner_token))
# get pending award
award2 = [i for i in response.json['data'] if i['status'] == 'pending'][0]
award2_id = award2['id']
self.assertNotEqual(award_id, award2_id)
# create first award complaint
# self.app.authorization = ('Basic', ('broker', ''))
# response = self.app.post_json('/auctions/{}/awards/{}/complaints?acc_token={}'.format(auction_id, award_id, bid_token),
# {'data': {'title': 'complaint title', 'description': 'complaint description', 'author': self.initial_organization, 'status': 'claim'}})
# complaint_id = response.json['data']['id']
# complaint_owner_token = response.json['access']['token']
# # create first award complaint #2
# response = self.app.post_json('/auctions/{}/awards/{}/complaints?acc_token={}'.format(auction_id, award_id, bid_token),
# {'data': {'title': 'complaint title', 'description': 'complaint description', 'author': self.initial_organization}})
# # answering claim
# self.app.patch_json('/auctions/{}/awards/{}/complaints/{}?acc_token={}'.format(auction_id, award_id, complaint_id, owner_token), {"data": {
# "status": "answered",
# "resolutionType": "resolved",
# "resolution": "resolution text " * 2
# }})
# # satisfying resolution
# self.app.patch_json('/auctions/{}/awards/{}/complaints/{}?acc_token={}'.format(auction_id, award_id, complaint_id, complaint_owner_token), {"data": {
# "satisfied": True,
# "status": "resolved"
# }})
# get awards
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.get('/auctions/{}/awards?acc_token={}'.format(auction_id, owner_token))
# get pending award
award = [i for i in response.json['data'] if i['status'] == 'pending'][0]
award_id = award['id']
# Upload auction protocol
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.post('/auctions/{}/awards/{}/documents?acc_token={}'.format(
self.auction_id, award_id, owner_token), upload_files=[('file', 'auction_protocol.pdf', 'content')])
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
response = self.app.patch_json('/auctions/{}/awards/{}/documents/{}?acc_token={}'.format(self.auction_id, award_id, doc_id, owner_token), {"data": {
"description": "auction protocol",
"documentType": 'auctionProtocol'
}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json["data"]["documentType"], 'auctionProtocol')
self.assertEqual(response.json["data"]["author"], 'auction_owner')
# set award as active
self.app.patch_json('/auctions/{}/awards/{}?acc_token={}'.format(auction_id, award_id, owner_token), {"data": {"status": "active"}})
# get contract id
response = self.app.get('/auctions/{}'.format(auction_id))
contract_id = response.json['data']['contracts'][-1]['id']
# create auction contract document for test
response = self.app.post('/auctions/{}/contracts/{}/documents?acc_token={}'.format(auction_id, contract_id, owner_token), upload_files=[('file', 'name.doc', 'content')], status=201)
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
self.assertIn(doc_id, response.headers['Location'])
# after stand slill period
self.app.authorization = ('Basic', ('chronograph', ''))
self.set_status('complete', {'status': 'active.awarded'})
# time travel
auction = self.db.get(auction_id)
for i in auction.get('awards', []):
i['complaintPeriod']['endDate'] = i['complaintPeriod']['startDate']
self.db.save(auction)
# sign contract
# Upload document
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.post_json(
'/auctions/{}/contracts/{}/documents?acc_token={}'.format(self.auction_id, contract_id, owner_token),
params={
'data': {
'documentType': 'contractSigned',
'title': 'Signed contract',
'format': 'application/msword',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32
}
})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['title'], 'Signed contract')
self.assertEqual(response.json['data']['documentType'], 'contractSigned')
# Patch dateSigned field
signature_date = get_now().isoformat()
response = self.app.patch_json('/auctions/{}/contracts/{}?acc_token={}'.format(
self.auction_id, contract_id, owner_token
), {"data": {"dateSigned": signature_date}})
self.assertEqual(response.status, '200 OK')
self.app.authorization = ('Basic', ('broker', ''))
self.app.patch_json('/auctions/{}/contracts/{}?acc_token={}'.format(auction_id, contract_id, owner_token), {"data": {"status": "active"}})
# check status
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.get('/auctions/{}'.format(auction_id))
self.assertEqual(response.json['data']['status'], 'complete')
response = self.app.post('/auctions/{}/contracts/{}/documents?acc_token={}'.format(auction_id, contract_id, owner_token), upload_files=[('file', 'name.doc', 'content')], status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't add document in current (complete) auction status")
response = self.app.patch_json('/auctions/{}/contracts/{}/documents/{}?acc_token={}'.format(auction_id, contract_id, doc_id, owner_token), {"data": {"description": "document description"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't update document in current (complete) auction status")
response = self.app.put('/auctions/{}/contracts/{}/documents/{}?acc_token={}'.format(auction_id, contract_id, doc_id, owner_token), upload_files=[('file', 'name.doc', 'content3')], status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't update document in current (complete) auction status")
def auctionUrl_in_active_auction(self):
self.app.authorization = ('Basic', ('broker', ''))
# empty auctions listing
response = self.app.get('/auctions')
self.assertEqual(response.json['data'], [])
# create auction
response = self.app.post_json('/auctions',
{"data": self.initial_data})
auction_id = self.auction_id = response.json['data']['id']
owner_token = response.json['access']['token']
# switch to active.tendering
response = self.set_status('active.tendering', {"auctionPeriod": {"startDate": (get_now() + timedelta(days=10)).isoformat()}})
self.assertIn("auctionPeriod", response.json['data'])
# create bid
self.app.authorization = ('Basic', ('broker', ''))
if self.initial_organization == self.test_financial_organization:
response = self.app.post_json('/auctions/{}/bids'.format(auction_id),
{'data': {'tenderers': [self.initial_organization], 'qualified': True, 'eligible': True}})
else:
response = self.app.post_json('/auctions/{}/bids'.format(auction_id),
{'data': {'tenderers': [self.initial_organization], 'qualified': True}})
# switch to active.qualification
self.set_status('active.auction', {'status': 'active.tendering'})
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/auctions/{}'.format(auction_id), {"data": {"id": auction_id}})
self.assertIn('auctionUrl', response.json['data'])
self.assertIn(auction_id, response.json['data']['auctionUrl'])
def suspended_auction(self):
self.app.authorization = ('Basic', ('broker', ''))
# empty auctions listing
response = self.app.get('/auctions')
self.assertEqual(response.json['data'], [])
# create auction
auction_data = deepcopy(self.initial_data)
auction_data['suspended'] = True
response = self.app.post_json('/auctions',
{"data": auction_data})
auction_id = self.auction_id = response.json['data']['id']
owner_token = response.json['access']['token']
self.assertNotIn('suspended', response.json['data'])
response = self.app.patch_json('/auctions/{}'.format(auction_id), {"data": {"suspended": True}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
authorization = self.app.authorization
self.app.authorization = ('Basic', ('administrator', ''))
response = self.app.patch_json('/auctions/{}'.format(auction_id), {"data": {"suspended": True}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['suspended'], True)
self.assertNotIn('next_check', response.json['data'])
self.app.authorization = authorization
response = self.app.patch_json('/auctions/{}'.format(auction_id), {"data": {"suspended": False}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.app.authorization = ('Basic', ('administrator', ''))
response = self.app.patch_json('/auctions/{}'.format(auction_id), {"data": {"suspended": False}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['suspended'], False)
self.assertIn('next_check', response.json['data'])
self.app.authorization = authorization
# switch to active.tendering
self.set_status('active.tendering')
# create bid
self.app.authorization = ('Basic', ('broker', ''))
if self.initial_organization == self.test_financial_organization:
response = self.app.post_json('/auctions/{}/bids'.format(auction_id),
{'data': {'tenderers': [self.initial_organization], "value": {"amount": 450}, 'qualified': True, 'eligible': True}})
else:
response = self.app.post_json('/auctions/{}/bids'.format(auction_id),
{'data': {'tenderers': [self.initial_organization], "value": {"amount": 450}, 'qualified': True}})
bid_id = response.json['data']['id']
bid_token = response.json['access']['token']
# create second bid
self.app.authorization = ('Basic', ('broker', ''))
if self.initial_organization == self.test_financial_organization:
response = self.app.post_json('/auctions/{}/bids'.format(auction_id),
{'data': {'tenderers': [self.initial_organization], "value": {"amount": 450}, 'qualified': True, 'eligible': True}})
else:
response = self.app.post_json('/auctions/{}/bids'.format(auction_id),
{'data': {'tenderers': [self.initial_organization], "value": {"amount": 450}, 'qualified': True}})
authorization = self.app.authorization
self.app.authorization = ('Basic', ('administrator', ''))
response = self.app.patch_json('/auctions/{}'.format(auction_id), {"data": {"suspended": True}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['suspended'], True)
self.assertNotIn('next_check', response.json['data'])
response = self.app.patch_json('/auctions/{}'.format(auction_id), {"data": {"suspended": False}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['suspended'], False)
self.assertIn('next_check', response.json['data'])
self.app.authorization = authorization
# switch to active.auction
self.set_status('active.auction')
# get auction info
self.app.authorization = ('Basic', ('auction', ''))
response = self.app.get('/auctions/{}/auction'.format(auction_id))
auction_bids_data = response.json['data']['bids']
# check bid participationUrl
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.get('/auctions/{}/bids/{}?acc_token={}'.format(auction_id, bid_id, bid_token))
self.assertIn('participationUrl', response.json['data'])
# posting auction results
self.app.authorization = ('Basic', ('auction', ''))
response = self.app.get('/auctions/{}'.format(self.auction_id))
self.assertEqual(response.status, '200 OK')
auction = response.json['data']
value_threshold = auction['value']['amount'] + auction['minimalStep']['amount']
now = get_now()
auction_result = {
'bids': [
{
"id": b['id'],
"date": (now - timedelta(seconds=i)).isoformat(),
"value": {"amount": value_threshold * 2},
}
for i, b in enumerate(auction_bids_data)
]
}
response = self.app.post_json('/auctions/{}/auction'.format(self.auction_id), {'data': auction_result})
# get awards
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.get('/auctions/{}/awards?acc_token={}'.format(auction_id, owner_token))
# get pending award
award_id = [i['id'] for i in response.json['data'] if i['status'] == 'pending'][0]
authorization = self.app.authorization
self.app.authorization = ('Basic', ('administrator', ''))
response = self.app.patch_json('/auctions/{}'.format(auction_id), {"data": {"suspended": True}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['suspended'], True)
self.assertNotIn('next_check', response.json['data'])
response = self.app.patch_json('/auctions/{}'.format(auction_id), {"data": {"suspended": False}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['suspended'], False)
self.app.authorization = authorization
# set award as unsuccessful
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.post_json(
'/auctions/{}/awards/{}/documents?acc_token={}'.format(self.auction_id, award_id, owner_token),
params={
'data': {
'documentType': 'rejectionProtocol',
'title': 'rejection protocol',
'format': 'application/msword',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32
}
})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['title'], 'rejection protocol')
self.assertEqual(response.json['data']['documentType'], 'rejectionProtocol')
response = self.app.patch_json('/auctions/{}/awards/{}?acc_token={}'.format(auction_id, award_id, owner_token),
{"data": {"status": "unsuccessful"}})
self.assertEqual(response.json['data']['status'], 'unsuccessful')
# get awards
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.get('/auctions/{}/awards?acc_token={}'.format(auction_id, owner_token))
self.assertEqual(len(response.json['data']), 2)
self.assertEqual(response.json['data'][0]['status'], 'unsuccessful')
# get pending award
award2_id = [i['id'] for i in response.json['data'] if i['status'] == 'pending'][0]
self.assertNotEqual(award_id, award2_id)
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.get('/auctions/{}/awards?acc_token={}'.format(auction_id, owner_token))
# get pending award
award_id = [i['id'] for i in response.json['data'] if i['status'] == 'pending'][0]
response = self.app.post('/auctions/{}/awards/{}/documents?acc_token={}'.format(
self.auction_id, award_id, owner_token), upload_files=[('file', 'auction_protocol.pdf', 'content')])
doc_id = response.json["data"]['id']
response = self.app.patch_json('/auctions/{}/awards/{}/documents/{}?acc_token={}'.format(auction_id, award_id, doc_id, owner_token), {"data": {"documentType": 'auctionProtocol'}})
# set award as active
self.app.patch_json('/auctions/{}/awards/{}?acc_token={}'.format(auction_id, award_id, owner_token), {"data": {"status": "active"}})
authorization = self.app.authorization
self.app.authorization = ('Basic', ('administrator', ''))
response = self.app.patch_json('/auctions/{}'.format(auction_id), {"data": {"suspended": True}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['suspended'], True)
self.assertNotIn('next_check', response.json['data'])
response = self.app.patch_json('/auctions/{}'.format(auction_id), {"data": {"suspended": False}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['suspended'], False)
self.app.authorization = authorization
response = self.app.patch_json(
'/auctions/{}/awards/{}?acc_token={}'.format(auction_id, award_id, owner_token),
{"data": {"status": "active"}},
status=403
)
self.assertEqual(response.json['errors'][0]['description'], "Can\'t update award in current (active) status")
# get contract id
response = self.app.get('/auctions/{}'.format(auction_id))
contract_id = response.json['data']['contracts'][-1]['id']
authorization = self.app.authorization
self.app.authorization = ('Basic', ('administrator', ''))
response = self.app.patch_json('/auctions/{}'.format(auction_id), {"data": {"suspended": True}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['suspended'], True)
self.assertNotIn('next_check', response.json['data'])
response = self.app.patch_json('/auctions/{}'.format(auction_id), {"data": {"suspended": False}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['suspended'], False)
self.app.authorization = authorization
# create auction contract document for test
response = self.app.post('/auctions/{}/contracts/{}/documents?acc_token={}'.format(auction_id, contract_id, owner_token), upload_files=[('file', 'name.doc', 'content')], status=201)
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
self.assertIn(doc_id, response.headers['Location'])
# after stand slill period
self.app.authorization = ('Basic', ('chronograph', ''))
self.set_status('complete', {'status': 'active.awarded'})
# time travel
auction = self.db.get(auction_id)
for i in auction.get('awards', []):
i['complaintPeriod']['endDate'] = i['complaintPeriod']['startDate']
self.db.save(auction)
# sign contract
self.app.authorization = ('Basic', ('broker', ''))
# Upload document
response = self.app.post_json(
'/auctions/{}/contracts/{}/documents?acc_token={}'.format(self.auction_id, contract_id, owner_token),
params={
'data': {
'documentType': 'contractSigned',
'title': 'Signed contract',
'format': 'application/msword',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32
}
})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['title'], 'Signed contract')
self.assertEqual(response.json['data']['documentType'], 'contractSigned')
# Patch dateSigned field
signature_date = get_now().isoformat()
response = self.app.patch_json('/auctions/{}/contracts/{}?acc_token={}'.format(
self.auction_id, contract_id, owner_token
), {"data": {"dateSigned": signature_date}})
self.assertEqual(response.status, '200 OK')
response = self.app.patch_json(
'/auctions/{}/contracts/{}?acc_token={}'.format(auction_id, contract_id, owner_token),
{"data": {"status": "active"}}
)
self.assertEqual(response.json['data']['status'], 'active')
# check status
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.get('/auctions/{}'.format(auction_id))
self.assertEqual(response.json['data']['status'], 'complete')
| 52.564978
| 205
| 0.66314
| 5,379
| 47,729
| 5.764826
| 0.065626
| 0.093844
| 0.134251
| 0.067916
| 0.846625
| 0.813474
| 0.788449
| 0.773034
| 0.750395
| 0.736754
| 0
| 0.012072
| 0.159987
| 47,729
| 907
| 206
| 52.622933
| 0.761355
| 0.136395
| 0
| 0.684615
| 0
| 0
| 0.265951
| 0.047243
| 0
| 0
| 0
| 0
| 0.313846
| 1
| 0.016923
| false
| 0
| 0.013846
| 0
| 0.030769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d4c24c3f3a907177d3c449a69aebdecb18608d89
| 38
|
py
|
Python
|
tests/magicbot_test.py
|
frc1418/2019-robot
|
2eaaeaa7570d8cf77eb656aee88d093345bc4bba
|
[
"MIT"
] | 1
|
2018-12-16T12:50:20.000Z
|
2018-12-16T12:50:20.000Z
|
tests/magicbot_test.py
|
frc1418/2018-robot
|
7415c14c4e4a64432a07b77292fd6e332606103e
|
[
"MIT"
] | 4
|
2019-01-06T22:16:05.000Z
|
2019-01-20T03:11:16.000Z
|
tests/magicbot_test.py
|
frc1418/2019-robot
|
2eaaeaa7570d8cf77eb656aee88d093345bc4bba
|
[
"MIT"
] | 2
|
2018-12-04T20:34:40.000Z
|
2020-01-21T20:27:38.000Z
|
from magicbot.magicbot_tests import *
| 19
| 37
| 0.842105
| 5
| 38
| 6.2
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 38
| 1
| 38
| 38
| 0.911765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d4ce03236373c82d77689a2a5cb5896adc6c9a93
| 21,930
|
py
|
Python
|
src/sage/modular/modform_hecketriangle/graded_ring.py
|
defeo/sage
|
d8822036a9843bd4d75845024072515ede56bcb9
|
[
"BSL-1.0"
] | 2
|
2018-06-30T01:37:35.000Z
|
2018-06-30T01:37:39.000Z
|
src/sage/modular/modform_hecketriangle/graded_ring.py
|
boothby/sage
|
1b1e6f608d1ef8ee664bb19e991efbbc68cbd51f
|
[
"BSL-1.0"
] | null | null | null |
src/sage/modular/modform_hecketriangle/graded_ring.py
|
boothby/sage
|
1b1e6f608d1ef8ee664bb19e991efbbc68cbd51f
|
[
"BSL-1.0"
] | null | null | null |
r"""
Graded rings of modular forms for Hecke triangle groups
AUTHORS:
- Jonas Jermann (2013): initial version
"""
from __future__ import absolute_import
#*****************************************************************************
# Copyright (C) 2013-2014 Jonas Jermann <jjermann2@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.rings.all import ZZ, QQ, infinity
from sage.rings.ring import CommutativeAlgebra
from sage.categories.all import CommutativeAlgebras
from sage.structure.unique_representation import UniqueRepresentation
from sage.misc.cachefunc import cached_method
from .hecke_triangle_groups import HeckeTriangleGroup
from .abstract_ring import FormsRing_abstract
def canonical_parameters(group, base_ring, red_hom, n=None):
r"""
Return a canonical version of the parameters.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.graded_ring import canonical_parameters
sage: canonical_parameters(4, ZZ, 1)
(Hecke triangle group for n = 4, Integer Ring, True, 4)
sage: canonical_parameters(infinity, RR, 0)
(Hecke triangle group for n = +Infinity, Real Field with 53 bits of precision, False, +Infinity)
"""
if not (n is None):
group = n
if (group == infinity):
group = HeckeTriangleGroup(infinity)
else:
try:
group = HeckeTriangleGroup(ZZ(group))
except TypeError:
group = HeckeTriangleGroup(group.n())
red_hom = bool(red_hom)
n = group.n()
return (group, base_ring, red_hom, n)
class QuasiMeromorphicModularFormsRing(FormsRing_abstract, CommutativeAlgebra, UniqueRepresentation):
r"""
Graded ring of (Hecke) quasi meromorphic modular forms
for the given group and base ring.
"""
@staticmethod
def __classcall__(cls, group = HeckeTriangleGroup(3), base_ring = ZZ, red_hom = False, n=None):
r"""
Return a (cached) instance with canonical parameters.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.graded_ring import (canonical_parameters, QuasiMeromorphicModularFormsRing)
sage: (group, base_ring, red_hom, n) = canonical_parameters(4, ZZ, 1)
sage: QuasiMeromorphicModularFormsRing(4, ZZ, 1) == QuasiMeromorphicModularFormsRing(group, base_ring, red_hom, n)
True
"""
(group, base_ring, red_hom, n) = canonical_parameters(group, base_ring, red_hom, n)
return super(FormsRing_abstract,cls).__classcall__(cls, group=group, base_ring=base_ring, red_hom=red_hom, n=n)
def __init__(self, group, base_ring, red_hom, n):
r"""
Return the graded ring of (Hecke) quasi meromorphic modular forms
for the given ``group`` and ``base_ring``.
INPUT:
- ``group`` -- The Hecke triangle group (default: ``HeckeTriangleGroup(3)``)
- ``base_ring`` -- The base_ring (default: ``ZZ``).
- ``red_hom`` -- If True then results of binary operations are considered
homogeneous whenever it makes sense (default: False).
This is mainly used by the spaces of homogeneous elements.
OUTPUT:
The corresponding graded ring of (Hecke) quasi meromorphic modular forms
for the given ``group`` and ``base_ring``.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.graded_ring import QuasiMeromorphicModularFormsRing
sage: MR = QuasiMeromorphicModularFormsRing(4, ZZ, 1)
sage: MR
QuasiMeromorphicModularFormsRing(n=4) over Integer Ring
sage: MR.analytic_type()
quasi meromorphic modular
sage: MR.category()
Category of commutative algebras over Fraction Field of Univariate Polynomial Ring in d over Integer Ring
sage: QuasiMeromorphicModularFormsRing(n=infinity)
QuasiMeromorphicModularFormsRing(n=+Infinity) over Integer Ring
"""
FormsRing_abstract.__init__(self, group=group, base_ring=base_ring, red_hom=red_hom, n=n)
CommutativeAlgebra.__init__(self, base_ring=self.coeff_ring(), category=CommutativeAlgebras(self.coeff_ring()))
self._analytic_type = self.AT(["quasi", "mero"])
class QuasiWeakModularFormsRing(FormsRing_abstract, CommutativeAlgebra, UniqueRepresentation):
r"""
Graded ring of (Hecke) quasi weakly holomorphic modular forms
for the given group and base ring.
"""
@staticmethod
def __classcall__(cls, group = HeckeTriangleGroup(3), base_ring = ZZ, red_hom = False, n=None):
r"""
Return a (cached) instance with canonical parameters.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.graded_ring import (canonical_parameters, QuasiWeakModularFormsRing)
sage: (group, base_ring, red_hom, n) = canonical_parameters(5, CC, 0)
sage: QuasiWeakModularFormsRing(5, CC, 0) == QuasiWeakModularFormsRing(group, base_ring, red_hom, n)
True
"""
(group, base_ring, red_hom, n) = canonical_parameters(group, base_ring, red_hom, n)
return super(FormsRing_abstract,cls).__classcall__(cls, group=group, base_ring=base_ring, red_hom=red_hom, n=n)
def __init__(self, group, base_ring, red_hom, n):
r"""
Return the graded ring of (Hecke) quasi weakly holomorphic modular forms
for the given ``group`` and ``base_ring``.
INPUT:
- ``group`` -- The Hecke triangle group (default: ``HeckeTriangleGroup(3)``)
- ``base_ring`` -- The base_ring (default: ``ZZ``).
- ``red_hom`` -- If True then results of binary operations are considered
homogeneous whenever it makes sense (default: False).
This is mainly used by the spaces of homogeneous elements.
OUTPUT:
The corresponding graded ring of (Hecke) quasi weakly holomorphic modular forms
for the given ``group`` and ``base_ring``.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.graded_ring import QuasiWeakModularFormsRing
sage: MR = QuasiWeakModularFormsRing(5, CC, 0)
sage: MR
QuasiWeakModularFormsRing(n=5) over Complex Field with 53 bits of precision
sage: MR.analytic_type()
quasi weakly holomorphic modular
sage: MR.category()
Category of commutative algebras over Fraction Field of Univariate Polynomial Ring in d over Complex Field with 53 bits of precision
"""
FormsRing_abstract.__init__(self, group=group, base_ring=base_ring, red_hom=red_hom, n=n)
CommutativeAlgebra.__init__(self, base_ring=self.coeff_ring(), category=CommutativeAlgebras(self.coeff_ring()))
self._analytic_type = self.AT(["quasi", "weak"])
class QuasiModularFormsRing(FormsRing_abstract, CommutativeAlgebra, UniqueRepresentation):
r"""
Graded ring of (Hecke) quasi modular forms
for the given group and base ring
"""
@staticmethod
def __classcall__(cls, group = HeckeTriangleGroup(3), base_ring = ZZ, red_hom = False, n=None):
r"""
Return a (cached) instance with canonical parameters.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.graded_ring import (canonical_parameters, QuasiModularFormsRing)
sage: (group, base_ring, red_hom, n) = canonical_parameters(6, ZZ, True)
sage: QuasiModularFormsRing(6, ZZ, True) == QuasiModularFormsRing(group, base_ring, red_hom, n)
True
"""
(group, base_ring, red_hom, n) = canonical_parameters(group, base_ring, red_hom, n)
return super(FormsRing_abstract,cls).__classcall__(cls, group=group, base_ring=base_ring, red_hom=red_hom, n=n)
def __init__(self, group, base_ring, red_hom, n):
r"""
Return the graded ring of (Hecke) quasi modular forms
for the given ``group`` and ``base_ring``.
INPUT:
- ``group`` -- The Hecke triangle group (default: ``HeckeTriangleGroup(3)``)
- ``base_ring`` -- The base_ring (default: ``ZZ``).
- ``red_hom`` -- If True then results of binary operations are considered
homogeneous whenever it makes sense (default: False).
This is mainly used by the spaces of homogeneous elements.
OUTPUT:
The corresponding graded ring of (Hecke) quasi modular forms
for the given ``group`` and ``base_ring``.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.graded_ring import QuasiModularFormsRing
sage: MR = QuasiModularFormsRing(6, ZZ, True)
sage: MR
QuasiModularFormsRing(n=6) over Integer Ring
sage: MR.analytic_type()
quasi modular
sage: MR.category()
Category of commutative algebras over Fraction Field of Univariate Polynomial Ring in d over Integer Ring
"""
FormsRing_abstract.__init__(self, group=group, base_ring=base_ring, red_hom=red_hom, n=n)
CommutativeAlgebra.__init__(self, base_ring=self.coeff_ring(), category=CommutativeAlgebras(self.coeff_ring()))
self._analytic_type = self.AT(["quasi", "holo"])
class QuasiCuspFormsRing(FormsRing_abstract, CommutativeAlgebra, UniqueRepresentation):
r"""
Graded ring of (Hecke) quasi cusp forms
for the given group and base ring.
"""
@staticmethod
def __classcall__(cls, group = HeckeTriangleGroup(3), base_ring = ZZ, red_hom = False, n=None):
r"""
Return a (cached) instance with canonical parameters.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.graded_ring import (canonical_parameters, QuasiCuspFormsRing)
sage: (group, base_ring, red_hom, n) = canonical_parameters(7, ZZ, 1)
sage: QuasiCuspFormsRing(7, ZZ, 1) == QuasiCuspFormsRing(group, base_ring, red_hom, n)
True
"""
(group, base_ring, red_hom, n) = canonical_parameters(group, base_ring, red_hom, n)
return super(FormsRing_abstract,cls).__classcall__(cls, group=group, base_ring=base_ring, red_hom=red_hom, n=n)
def __init__(self, group, base_ring, red_hom, n):
r"""
Return the graded ring of (Hecke) quasi cusp forms
for the given ``group`` and ``base_ring``.
INPUT:
- ``group`` -- The Hecke triangle group (default: ``HeckeTriangleGroup(3)``)
- ``base_ring`` -- The base_ring (default: ``ZZ``).
- ``red_hom`` -- If True then results of binary operations are considered
homogeneous whenever it makes sense (default: False).
This is mainly used by the spaces of homogeneous elements.
OUTPUT:
The corresponding graded ring of (Hecke) quasi cusp forms
for the given ``group`` and ``base_ring``.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.graded_ring import QuasiCuspFormsRing
sage: MR = QuasiCuspFormsRing(7, ZZ, 1)
sage: MR
QuasiCuspFormsRing(n=7) over Integer Ring
sage: MR.analytic_type()
quasi cuspidal
sage: MR.category()
Category of commutative algebras over Fraction Field of Univariate Polynomial Ring in d over Integer Ring
"""
FormsRing_abstract.__init__(self, group=group, base_ring=base_ring, red_hom=red_hom, n=n)
CommutativeAlgebra.__init__(self, base_ring=self.coeff_ring(), category=CommutativeAlgebras(self.coeff_ring()))
self._analytic_type = self.AT(["quasi", "cusp"])
class MeromorphicModularFormsRing(FormsRing_abstract, CommutativeAlgebra, UniqueRepresentation):
r"""
Graded ring of (Hecke) meromorphic modular forms
for the given group and base ring
"""
@staticmethod
def __classcall__(cls, group = HeckeTriangleGroup(3), base_ring = ZZ, red_hom = False, n=None):
r"""
Return a (cached) instance with canonical parameters.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.graded_ring import (canonical_parameters, MeromorphicModularFormsRing)
sage: (group, base_ring, red_hom, n) = canonical_parameters(4, ZZ, 1)
sage: MeromorphicModularFormsRing(4, ZZ, 1) == MeromorphicModularFormsRing(group, base_ring, red_hom, n)
True
"""
(group, base_ring, red_hom, n) = canonical_parameters(group, base_ring, red_hom, n)
return super(FormsRing_abstract,cls).__classcall__(cls, group=group, base_ring=base_ring, red_hom=red_hom, n=n)
def __init__(self, group, base_ring, red_hom, n):
r"""
Return the graded ring of (Hecke) meromorphic modular forms
for the given ``group`` and ``base_ring``.
INPUT:
- ``group`` -- The Hecke triangle group (default: ``HeckeTriangleGroup(3)``)
- ``base_ring`` -- The base_ring (default: ``ZZ``).
- ``red_hom`` -- If True then results of binary operations are considered
homogeneous whenever it makes sense (default: False).
This is mainly used by the spaces of homogeneous elements.
OUTPUT:
The corresponding graded ring of (Hecke) meromorphic modular forms
for the given ``group`` and ``base_ring``.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.graded_ring import MeromorphicModularFormsRing
sage: MR = MeromorphicModularFormsRing(4, ZZ, 1)
sage: MR
MeromorphicModularFormsRing(n=4) over Integer Ring
sage: MR.analytic_type()
meromorphic modular
sage: MR.category()
Category of commutative algebras over Fraction Field of Univariate Polynomial Ring in d over Integer Ring
"""
FormsRing_abstract.__init__(self, group=group, base_ring=base_ring, red_hom=red_hom, n=n)
CommutativeAlgebra.__init__(self, base_ring=self.coeff_ring(), category=CommutativeAlgebras(self.coeff_ring()))
self._analytic_type = self.AT(["mero"])
class WeakModularFormsRing(FormsRing_abstract, CommutativeAlgebra, UniqueRepresentation):
r"""
Graded ring of (Hecke) weakly holomorphic modular forms
for the given group and base ring
"""
@staticmethod
def __classcall__(cls, group = HeckeTriangleGroup(3), base_ring = ZZ, red_hom = False, n=None):
r"""
Return a (cached) instance with canonical parameters.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.graded_ring import (canonical_parameters, WeakModularFormsRing)
sage: (group, base_ring, red_hom, n) = canonical_parameters(5, ZZ, 0)
sage: WeakModularFormsRing(5, ZZ, 0) == WeakModularFormsRing(group, base_ring, red_hom, n)
True
"""
(group, base_ring, red_hom, n) = canonical_parameters(group, base_ring, red_hom, n)
return super(FormsRing_abstract,cls).__classcall__(cls, group=group, base_ring=base_ring, red_hom=red_hom, n=n)
def __init__(self, group, base_ring, red_hom, n):
r"""
Return the graded ring of (Hecke) weakly holomorphic modular forms
for the given ``group`` and ``base_ring``.
INPUT:
- ``group`` -- The Hecke triangle group (default: ``HeckeTriangleGroup(3)``)
- ``base_ring`` -- The base_ring (default: ``ZZ``).
- ``red_hom`` -- If True then results of binary operations are considered
homogeneous whenever it makes sense (default: False).
This is mainly used by the spaces of homogeneous elements.
OUTPUT:
The corresponding graded ring of (Hecke) weakly holomorphic modular forms
for the given ``group`` and ``base_ring``.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.graded_ring import WeakModularFormsRing
sage: MR = WeakModularFormsRing(5, ZZ, 0)
sage: MR
WeakModularFormsRing(n=5) over Integer Ring
sage: MR.analytic_type()
weakly holomorphic modular
sage: MR.category()
Category of commutative algebras over Fraction Field of Univariate Polynomial Ring in d over Integer Ring
"""
FormsRing_abstract.__init__(self, group=group, base_ring=base_ring, red_hom=red_hom, n=n)
CommutativeAlgebra.__init__(self, base_ring=self.coeff_ring(), category=CommutativeAlgebras(self.coeff_ring()))
self._analytic_type = self.AT(["weak"])
class ModularFormsRing(FormsRing_abstract, CommutativeAlgebra, UniqueRepresentation):
r"""
Graded ring of (Hecke) modular forms
for the given group and base ring
"""
@staticmethod
def __classcall__(cls, group = HeckeTriangleGroup(3), base_ring = ZZ, red_hom = False, n=None):
r"""
Return a (cached) instance with canonical parameters.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.graded_ring import ModularFormsRing
sage: ModularFormsRing(3, ZZ, 0) == ModularFormsRing()
True
"""
(group, base_ring, red_hom, n) = canonical_parameters(group, base_ring, red_hom, n)
return super(FormsRing_abstract,cls).__classcall__(cls, group=group, base_ring=base_ring, red_hom=red_hom, n=n)
def __init__(self, group, base_ring, red_hom, n):
r"""
Return the graded ring of (Hecke) modular forms
for the given ``group`` and ``base_ring``.
INPUT:
- ``group`` -- The Hecke triangle group (default: ``HeckeTriangleGroup(3)``)
- ``base_ring`` -- The base_ring (default: ``ZZ``).
- ``red_hom`` -- If True then results of binary operations are considered
homogeneous whenever it makes sense (default: False).
This is mainly used by the spaces of homogeneous elements.
OUTPUT:
The corresponding graded ring of (Hecke) modular forms
for the given ``group`` and ``base_ring``.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.graded_ring import ModularFormsRing
sage: MR = ModularFormsRing()
sage: MR
ModularFormsRing(n=3) over Integer Ring
sage: MR.analytic_type()
modular
sage: MR.category()
Category of commutative algebras over Fraction Field of Univariate Polynomial Ring in d over Integer Ring
"""
FormsRing_abstract.__init__(self, group=group, base_ring=base_ring, red_hom=red_hom, n=n)
CommutativeAlgebra.__init__(self, base_ring=self.coeff_ring(), category=CommutativeAlgebras(self.coeff_ring()))
self._analytic_type = self.AT(["holo"])
class CuspFormsRing(FormsRing_abstract, CommutativeAlgebra, UniqueRepresentation):
r"""
Graded ring of (Hecke) cusp forms
for the given group and base ring
"""
@staticmethod
def __classcall__(cls, group = HeckeTriangleGroup(3), base_ring = ZZ, red_hom = False, n=None):
r"""
Return a (cached) instance with canonical parameters.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.graded_ring import (canonical_parameters, CuspFormsRing)
sage: (group, base_ring, red_hom, n) = canonical_parameters(5, CC, True)
sage: CuspFormsRing(5, CC, True) == CuspFormsRing(group, base_ring, red_hom, n)
True
"""
(group, base_ring, red_hom, n) = canonical_parameters(group, base_ring, red_hom, n)
return super(FormsRing_abstract,cls).__classcall__(cls, group=group, base_ring=base_ring, red_hom=red_hom, n=n)
def __init__(self, group, base_ring, red_hom, n):
r"""
Return the graded ring of (Hecke) cusp forms
for the given ``group`` and ``base_ring``.
INPUT:
- ``group`` -- The Hecke triangle group (default: ``HeckeTriangleGroup(3)``)
- ``base_ring`` -- The base_ring (default: ``ZZ``).
- ``red_hom`` -- If True then results of binary operations are considered
homogeneous whenever it makes sense (default: False).
This is mainly used by the spaces of homogeneous elements.
OUTPUT:
The corresponding graded ring of (Hecke) cusp forms
for the given ``group`` and ``base_ring``.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.graded_ring import CuspFormsRing
sage: MR = CuspFormsRing(5, CC, True)
sage: MR
CuspFormsRing(n=5) over Complex Field with 53 bits of precision
sage: MR.analytic_type()
cuspidal
sage: MR.category()
Category of commutative algebras over Fraction Field of Univariate Polynomial Ring in d over Complex Field with 53 bits of precision
sage: CuspFormsRing(n=infinity, base_ring=CC, red_hom=True)
CuspFormsRing(n=+Infinity) over Complex Field with 53 bits of precision
"""
FormsRing_abstract.__init__(self, group=group, base_ring=base_ring, red_hom=red_hom, n=n)
CommutativeAlgebra.__init__(self, base_ring=self.coeff_ring(), category=CommutativeAlgebras(self.coeff_ring()))
self._analytic_type = self.AT(["cusp"])
| 41.455577
| 144
| 0.648199
| 2,555
| 21,930
| 5.365166
| 0.068102
| 0.075285
| 0.029107
| 0.057193
| 0.817406
| 0.809819
| 0.806463
| 0.801649
| 0.792895
| 0.764882
| 0
| 0.005679
| 0.253215
| 21,930
| 528
| 145
| 41.534091
| 0.831349
| 0.580985
| 0
| 0.470588
| 0
| 0
| 0.007372
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.067227
| 0
| 0.352941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d4dbdd60a54934d592e65c4c76467cd41dcd6b5c
| 8,327
|
py
|
Python
|
backpack/extensions/secondorder/hbp/conv2d.py
|
rioyokotalab/backpack
|
000a1dbe7b2d6e5b309151df800edf866b9b514c
|
[
"MIT"
] | null | null | null |
backpack/extensions/secondorder/hbp/conv2d.py
|
rioyokotalab/backpack
|
000a1dbe7b2d6e5b309151df800edf866b9b514c
|
[
"MIT"
] | null | null | null |
backpack/extensions/secondorder/hbp/conv2d.py
|
rioyokotalab/backpack
|
000a1dbe7b2d6e5b309151df800edf866b9b514c
|
[
"MIT"
] | null | null | null |
from backpack.core.derivatives.conv2d import Conv2DDerivatives
from backpack.extensions.secondorder.hbp.hbp_options import (
BackpropStrategy,
ExpectationApproximation,
)
from backpack.extensions.secondorder.hbp.hbpbase import HBPBaseModule
from backpack.utils import conv as convUtils
from backpack.utils.ein import einsum
class HBPConv2d(HBPBaseModule):
def __init__(self):
super().__init__(derivatives=Conv2DDerivatives(), params=["weight", "bias"])
def weight(self, ext, module, g_inp, g_out, backproped):
bp_strategy = ext.get_backprop_strategy()
if BackpropStrategy.is_batch_average(bp_strategy):
return self._weight_for_batch_average(ext, module, backproped)
elif BackpropStrategy.is_sqrt(bp_strategy):
return self._weight_for_sqrt(ext, module, backproped)
# TODO: Require tests
def _weight_for_batch_average(self, ext, module, backproped):
kron_factors = [self._factor_from_batch_average(module, backproped)]
kron_factors += self._factors_from_input(ext, module)
return kron_factors
def _weight_for_sqrt(self, ext, module, backproped):
kron_factors = [self._factor_from_sqrt(module, backproped)]
kron_factors += self._factors_from_input(ext, module)
return kron_factors
def _factors_from_input(self, ext, module):
X = convUtils.unfold_func(module)(module.input0)
batch = X.size(0)
ea_strategy = ext.get_ea_strategy()
if ExpectationApproximation.should_average_param_jac(ea_strategy):
raise NotImplementedError("Undefined")
else:
yield einsum("bik,bjk->ij", (X, X)) / batch
def _factor_from_sqrt(self, module, backproped):
sqrt_ggn = backproped
sqrt_ggn = convUtils.separate_channels_and_pixels(module, sqrt_ggn)
sqrt_ggn = einsum("cbij->cbi", (sqrt_ggn,))
return einsum("cbi,cbl->il", (sqrt_ggn, sqrt_ggn))
def bias(self, ext, module, g_inp, g_out, backproped):
bp_strategy = ext.get_backprop_strategy()
if BackpropStrategy.is_batch_average(bp_strategy):
return self._bias_for_batch_average(module, backproped)
elif BackpropStrategy.is_sqrt(bp_strategy):
return self._bias_for_sqrt(module, backproped)
def _bias_for_sqrt(self, module, backproped):
return [self._factor_from_sqrt(module, backproped)]
# TODO: Require tests
def _bias_for_batch_average(self, module, backproped):
return [self._factor_from_batch_average(module, backproped)]
def _factor_from_batch_average(self, module, backproped):
_, out_c, out_x, out_y = module.output.size()
out_pixels = out_x * out_y
# sum over spatial coordinates
result = backproped.view(out_c, out_pixels, out_c, out_pixels).sum([1, 3])
return result.contiguous()
class HBPConv2dEfficient(HBPBaseModule):
def __init__(self):
super().__init__(derivatives=Conv2DDerivatives(),
params=["weight", "bias"])
self._attr = 'kron_factors_from_sqrt'
def _set_bias_flag(self, module, value):
attr = '_bias_is_called_before_weight'
setattr(module, attr, value)
def _get_bias_flag(self, module):
attr = '_bias_is_called_before_weight'
return getattr(module, attr, False)
def _set_weight_flag(self, module, value):
attr = '_weight_is_called_before_weight'
setattr(module, attr, value)
def _get_weight_flag(self, module):
attr = '_weight_is_called_before_weight'
return getattr(module, attr, False)
def weight(self, ext, module, g_inp, g_out, backproped):
bp_strategy = ext.get_backprop_strategy()
attr = self._attr
kron_factors = None
if not self._get_bias_flag(module):
self._set_weight_flag(module, True)
if BackpropStrategy.is_batch_average(bp_strategy):
kron_factors = self._weight_for_batch_average(ext, module, backproped)
elif BackpropStrategy.is_sqrt(bp_strategy):
kron_factors = self._weight_for_sqrt(ext, module, backproped)
setattr(module, attr, kron_factors)
else:
kron_factors = getattr(module, attr)
self._set_bias_flag(module, False)
delattr(module, attr)
kron_factors += self._factors_from_input(ext, module)
return kron_factors
# TODO: Require tests
def _weight_for_batch_average(self, ext, module, backproped):
kron_factors = [self._factor_from_batch_average(module, backproped)]
return kron_factors
def _weight_for_sqrt(self, ext, module, backproped):
kron_factors = [self._factor_from_sqrt(module, backproped)]
return kron_factors
def _factors_from_input(self, ext, module):
X = convUtils.unfold_func(module)(module.input0)
batch = X.size(0)
ea_strategy = ext.get_ea_strategy()
if ExpectationApproximation.should_average_param_jac(ea_strategy):
raise NotImplementedError("Undefined")
else:
yield einsum('bik,bjk->ij', (X, X)) / batch
def _factor_from_sqrt(self, module, backproped):
sqrt_ggn = backproped
sqrt_ggn = convUtils.separate_channels_and_pixels(module, sqrt_ggn)
sqrt_ggn = einsum('bijc->bic', (sqrt_ggn, ))
return einsum('bic,blc->il', (sqrt_ggn, sqrt_ggn))
def bias(self, ext, module, g_inp, g_out, backproped):
bp_strategy = ext.get_backprop_strategy()
attr = self._attr
kron_factors = None
if not self._get_weight_flag(module):
self._set_bias_flag(module, True)
if BackpropStrategy.is_batch_average(bp_strategy):
kron_factors = self._bias_for_batch_average(module, backproped)
elif BackpropStrategy.is_sqrt(bp_strategy):
kron_factors = self._bias_for_sqrt(module, backproped)
setattr(module, attr, kron_factors)
else:
kron_factors = getattr(module, attr)
self._set_weight_flag(module, False)
delattr(module, attr)
return kron_factors
def _bias_for_sqrt(self, module, backproped):
return [self._factor_from_sqrt(module, backproped)]
# TODO: Require tests
def _bias_for_batch_average(self, module, backproped):
return [self._factor_from_batch_average(module, backproped)]
def _factor_from_batch_average(self, module, backproped):
_, out_c, out_x, out_y = module.output.size()
out_pixels = out_x * out_y
# sum over spatial coordinates
result = backproped.view(out_c, out_pixels, out_c,
out_pixels).sum([1, 3])
return result.contiguous()
class HBPFRConv2d(HBPConv2dEfficient):
def _weight_for_batch_average(self, ext, module, backproped):
raise NotImplementedError("Undefined")
def _bias_for_batch_average(self, module, backproped):
raise NotImplementedError("Undefined")
def _factors_from_input(self, ext, module):
ea_strategy = ext.get_ea_strategy()
if ExpectationApproximation.should_average_param_jac(ea_strategy):
raise NotImplementedError("Undefined")
else:
attr = 'last_X'
last_X = getattr(module, attr, None)
X = convUtils.unfold_func(module)(module.input0)
batch = X.size(0)
if last_X is None:
setattr(module, attr, X)
yield einsum('bik,bjk->ij', (X, X)) / batch
else:
delattr(module, attr)
yield einsum('bik,bjk->ij', (X, last_X)) / batch
def _factor_from_sqrt(self, module, backproped):
attr = 'last_sqrt_ggn'
last_sqrt_ggn = getattr(module, attr, None)
sqrt_ggn = backproped
sqrt_ggn = convUtils.separate_channels_and_pixels(module, sqrt_ggn)
sqrt_ggn = einsum('bijc->bic', (sqrt_ggn, ))
if last_sqrt_ggn is None:
setattr(module, attr, sqrt_ggn)
return einsum('bic,blc->il', (sqrt_ggn, sqrt_ggn))
else:
delattr(module, attr)
return einsum('bic,blc->il', (sqrt_ggn, last_sqrt_ggn))
EXTENSIONS = [HBPConv2d()]
| 36.682819
| 86
| 0.663985
| 1,005
| 8,327
| 5.150249
| 0.112438
| 0.095827
| 0.030139
| 0.022025
| 0.887558
| 0.845247
| 0.803323
| 0.781298
| 0.768161
| 0.750773
| 0
| 0.003012
| 0.242344
| 8,327
| 226
| 87
| 36.845133
| 0.817404
| 0.016453
| 0
| 0.69697
| 0
| 0
| 0.041672
| 0.017353
| 0
| 0
| 0
| 0.004425
| 0
| 1
| 0.169697
| false
| 0
| 0.030303
| 0.024242
| 0.351515
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
be02130f18d29216cf4b1a931264005f4a51370c
| 16,038
|
py
|
Python
|
grs/smac.py
|
Tristanovsk/grs
|
ba5da28f6df0438e15404324c3488c799fb81212
|
[
"MIT"
] | 4
|
2021-06-14T20:43:22.000Z
|
2021-07-05T09:32:41.000Z
|
grs/smac.py
|
Tristanovsk/grs
|
ba5da28f6df0438e15404324c3488c799fb81212
|
[
"MIT"
] | null | null | null |
grs/smac.py
|
Tristanovsk/grs
|
ba5da28f6df0438e15404324c3488c799fb81212
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
'''Correction for gaseous absorption based on SMAC method (Rahman and Dedieu, 1994)
'''
from math import *
import numpy as np
# =============================================================================================
def PdeZ(Z):
"""
PdeZ : Atmospheric pressure (in hpa) as a function of altitude (in meters)
"""
p = 1013.25 * pow(1 - 0.0065 * Z / 288.15, 5.31)
return (p)
# =============================================================================================
class coeff:
'''
library for atmospheric correction using SMAC method (Rahman and Dedieu, 1994)
Contains :
smac_inv : inverse smac model for atmospheric correction
TOA==>Surface
smac dir : direct smac model
Surface==>TOA
coefs : reads smac coefficients
PdeZ : # PdeZ : Atmospheric pressure (in hpa) as a function of altitude (in meters)
Written by O.Hagolle CNES, from the original SMAC C routine
=============================================================================================
'''
def __init__(self, smac_filename):
with file(smac_filename) as f:
lines = f.readlines()
# H20
temp = lines[0].strip().split()
self.ah2o = float(temp[0])
self.nh2o = float(temp[1])
# O3
temp = lines[1].strip().split()
self.ao3 = float(temp[0])
self.no3 = float(temp[1])
# O2
temp = lines[2].strip().split()
self.ao2 = float(temp[0])
self.no2 = float(temp[1])
self.po2 = float(temp[2])
# CO2
temp = lines[3].strip().split()
self.aco2 = float(temp[0])
self.nco2 = float(temp[1])
self.pco2 = float(temp[2])
# NH4
temp = lines[4].strip().split()
self.ach4 = float(temp[0])
self.nch4 = float(temp[1])
self.pch4 = float(temp[2])
# NO2
temp = lines[5].strip().split()
self.ano2 = float(temp[0])
self.nno2 = float(temp[1])
self.pno2 = float(temp[2])
# CO
temp = lines[6].strip().split()
self.aco = float(temp[0])
self.nco = float(temp[1])
self.pco = float(temp[2])
# rayleigh and aerosol scattering
temp = lines[7].strip().split()
self.a0s = float(temp[0])
self.a1s = float(temp[1])
self.a2s = float(temp[2])
self.a3s = float(temp[3])
temp = lines[8].strip().split()
self.a0T = float(temp[0])
self.a1T = float(temp[1])
self.a2T = float(temp[2])
self.a3T = float(temp[3])
temp = lines[9].strip().split()
self.taur = float(temp[0])
self.sr = float(temp[0])
temp = lines[10].strip().split()
self.a0taup = float(temp[0])
self.a1taup = float(temp[1])
temp = lines[11].strip().split()
self.wo = float(temp[0])
self.gc = float(temp[1])
temp = lines[12].strip().split()
self.a0P = float(temp[0])
self.a1P = float(temp[1])
self.a2P = float(temp[2])
temp = lines[13].strip().split()
self.a3P = float(temp[0])
self.a4P = float(temp[1])
temp = lines[14].strip().split()
self.Rest1 = float(temp[0])
self.Rest2 = float(temp[1])
temp = lines[15].strip().split()
self.Rest3 = float(temp[0])
self.Rest4 = float(temp[1])
temp = lines[16].strip().split()
self.Resr1 = float(temp[0])
self.Resr2 = float(temp[1])
self.Resr3 = float(temp[2])
temp = lines[17].strip().split()
self.Resa1 = float(temp[0])
self.Resa2 = float(temp[1])
temp = lines[18].strip().split()
self.Resa3 = float(temp[0])
self.Resa4 = float(temp[1])
# ======================================================================
def smac_inv(r_toa, tetas, phis, tetav, phiv, pressure, taup550, uo3, uh2o, coef):
"""
r_surf=smac_inv( r_toa, tetas, phis, tetav, phiv,pressure,taup550, uo3, uh2o, coef)
Corrections atmosphériques
"""
ah2o = coef.ah2o
nh2o = coef.nh2o
ao3 = coef.ao3
no3 = coef.no3
ao2 = coef.ao2
no2 = coef.no2
po2 = coef.po2
aco2 = coef.aco2
nco2 = coef.nco2
pco2 = coef.pco2
ach4 = coef.ach4
nch4 = coef.nch4
pch4 = coef.pch4
ano2 = coef.ano2
nno2 = coef.nno2
pno2 = coef.pno2
aco = coef.aco
nco = coef.nco
pco = coef.pco
a0s = coef.a0s
a1s = coef.a1s
a2s = coef.a2s
a3s = coef.a3s
a0T = coef.a0T
a1T = coef.a1T
a2T = coef.a2T
a3T = coef.a3T
taur = coef.taur
sr = coef.sr
a0taup = coef.a0taup
a1taup = coef.a1taup
wo = coef.wo
gc = coef.gc
a0P = coef.a0P
a1P = coef.a1P
a2P = coef.a2P
a3P = coef.a3P
a4P = coef.a4P
Rest1 = coef.Rest1
Rest2 = coef.Rest2
Rest3 = coef.Rest3
Rest4 = coef.Rest4
Resr1 = coef.Resr1
Resr2 = coef.Resr2
Resr3 = coef.Resr3
Resa1 = coef.Resa1
Resa2 = coef.Resa2
Resa3 = coef.Resa3
Resa4 = coef.Resa4
cdr = pi / 180
crd = 180 / pi
# /*------: calcul de la reflectance de surface smac :--------*/
us = cos(tetas * cdr)
uv = cos(tetav * cdr)
Peq = pressure / 1013.25
# /*------: 1) air mass */
m = 1 / us + 1 / uv
# /*------: 2) aerosol optical depth in the spectral band, taup :--------*/
taup = (a0taup) + (a1taup) * taup550
# /*------: 3) gaseous transmissions (downward and upward paths) :--------*/
to3 = 1.
th2o = 1.
to2 = 1.
tco2 = 1.
tch4 = 1.
uo2 = (Peq ** (po2))
uco2 = (Peq ** (pco2))
uch4 = (Peq ** (pch4))
uno2 = (Peq ** (pno2))
uco = (Peq ** (pco))
# /*------: 4) if uh2o <= 0 and uo3 <=0 no gaseous absorption is computed :--------*/
to3 = exp((ao3) * ((uo3 * m) ** (no3)))
th2o = exp((ah2o) * ((uh2o * m) ** (nh2o)))
to2 = exp((ao2) * ((uo2 * m) ** (no2)))
tco2 = exp((aco2) * ((uco2 * m) ** (nco2)))
tch4 = exp((ach4) * ((uch4 * m) ** (nch4)))
tno2 = exp((ano2) * ((uno2 * m) ** (nno2)))
tco = exp((aco) * ((uco * m) ** (nco)))
tg = th2o * to3 * to2 * tco2 * tch4 * tco * tno2
# /*------: 5) Total scattering transmission :--------*/
ttetas = (a0T) + (a1T) * taup550 / us + ((a2T) * Peq + (a3T)) / (1. + us) # /* downward */
ttetav = (a0T) + (a1T) * taup550 / uv + ((a2T) * Peq + (a3T)) / (1. + uv) # /* upward */
# /*------: 6) spherical albedo of the atmosphere :--------*/
s = (a0s) * Peq + (a3s) + (a1s) * taup550 + (a2s) * (taup550 ** 2)
# /*------: 7) scattering angle cosine :--------*/
cksi = - ((us * uv) + (sqrt(1. - us * us) * sqrt(1. - uv * uv) * cos((phis - phiv) * cdr)))
if (cksi < -1):
cksi = -1.0
# /*------: 8) scattering angle in degree :--------*/
ksiD = crd * acos(cksi)
# /*------: 9) rayleigh atmospheric reflectance :--------*/
ray_phase = 0.7190443 * (1. + (cksi * cksi)) + 0.0412742
ray_ref = (taur * ray_phase) / (4 * us * uv)
ray_ref = ray_ref * pressure / 1013.25
taurz = (taur) * Peq
# /*------: 10) Residu Rayleigh :--------*/
Res_ray = Resr1 + Resr2 * taur * ray_phase / (us * uv) + Resr3 * ((taur * ray_phase / (us * uv)) ** 2)
# /*------: 11) aerosol atmospheric reflectance :--------*/
aer_phase = a0P + a1P * ksiD + a2P * ksiD * ksiD + a3P * (ksiD ** 3) + a4P * (ksiD ** 4)
ak2 = (1. - wo) * (3. - wo * 3 * gc)
ak = sqrt(ak2)
e = -3 * us * us * wo / (4 * (1. - ak2 * us * us))
f = -(1. - wo) * 3 * gc * us * us * wo / (4 * (1. - ak2 * us * us))
dp = e / (3 * us) + us * f
d = e + f
b = 2 * ak / (3. - wo * 3 * gc)
delta = np.exp(ak * taup) * (1. + b) * (1. + b) - np.exp(-ak * taup) * (1. - b) * (1. - b)
ww = wo / 4.
ss = us / (1. - ak2 * us * us)
q1 = 2. + 3 * us + (1. - wo) * 3 * gc * us * (1. + 2 * us)
q2 = 2. - 3 * us - (1. - wo) * 3 * gc * us * (1. - 2 * us)
q3 = q2 * np.exp(-taup / us)
c1 = ((ww * ss) / delta) * (q1 * np.exp(ak * taup) * (1. + b) + q3 * (1. - b))
c2 = -((ww * ss) / delta) * (q1 * np.exp(-ak * taup) * (1. - b) + q3 * (1. + b))
cp1 = c1 * ak / (3. - wo * 3 * gc)
cp2 = -c2 * ak / (3. - wo * 3 * gc)
z = d - wo * 3 * gc * uv * dp + wo * aer_phase / 4.
x = c1 - wo * 3 * gc * uv * cp1
y = c2 - wo * 3 * gc * uv * cp2
aa1 = uv / (1. + ak * uv)
aa2 = uv / (1. - ak * uv)
aa3 = us * uv / (us + uv)
aer_ref = x * aa1 * (1. - np.exp(-taup / aa1))
aer_ref = aer_ref + y * aa2 * (1. - np.exp(-taup / aa2))
aer_ref = aer_ref + z * aa3 * (1. - np.exp(-taup / aa3))
aer_ref = aer_ref / (us * uv)
# /*------: 12) Residu Aerosol :--------*/
Res_aer = (Resa1 + Resa2 * (taup * m * cksi) + Resa3 * ((taup * m * cksi) ** 2)) + Resa4 * ((taup * m * cksi) ** 3)
# /*------: 13) Terme de couplage molecule / aerosol :--------*/
tautot = taup + taurz
Res_6s = (Rest1 + Rest2 * (tautot * m * cksi) + Rest3 * ((tautot * m * cksi) ** 2)) + Rest4 * (
(tautot * m * cksi) ** 3)
# /*------: 14) total atmospheric reflectance :--------*/
atm_ref = ray_ref - Res_ray + aer_ref - Res_aer + Res_6s
# /*------: 15) Surface reflectance :--------*/
r_surf = r_toa - (atm_ref * tg)
r_surf = r_surf / ((tg * ttetas * ttetav) + (r_surf * s))
return r_surf
# =======================================================================================================
def smac_dir(r_surf, tetas, phis, tetav, phiv, pressure, taup550, uo3, uh2o, coef):
"""
r_toa=smac_dir ( r_surf, tetas, phis, tetav, phiv,pressure,taup550, uo3, uh2o, coef)
Application des effets atmosphériques
"""
ah2o = coef.ah2o
nh2o = coef.nh2o
ao3 = coef.ao3
no3 = coef.no3
ao2 = coef.ao2
no2 = coef.no2
po2 = coef.po2
aco2 = coef.aco2
nco2 = coef.nco2
pco2 = coef.pco2
ach4 = coef.ach4
nch4 = coef.nch4
pch4 = coef.pch4
ano2 = coef.ano2
nno2 = coef.nno2
pno2 = coef.pno2
aco = coef.aco
nco = coef.nco
pco = coef.pco
a0s = coef.a0s
a1s = coef.a1s
a2s = coef.a2s
a3s = coef.a3s
a0T = coef.a0T
a1T = coef.a1T
a2T = coef.a2T
a3T = coef.a3T
taur = coef.taur
sr = coef.sr
a0taup = coef.a0taup
a1taup = coef.a1taup
wo = coef.wo
gc = coef.gc
a0P = coef.a0P
a1P = coef.a1P
a2P = coef.a2P
a3P = coef.a3P
a4P = coef.a4P
Rest1 = coef.Rest1
Rest2 = coef.Rest2
Rest3 = coef.Rest3
Rest4 = coef.Rest4
Resr1 = coef.Resr1
Resr2 = coef.Resr2
Resr3 = coef.Resr3
Resa1 = coef.Resa1
Resa2 = coef.Resa2
Resa3 = coef.Resa3
Resa4 = coef.Resa4
cdr = pi / 180
crd = 180 / pi
# /*------: calcul de la reflectance de surface smac :--------*/
us = cos(tetas * cdr)
uv = cos(tetav * cdr)
Peq = pressure / 1013.25
# /*------: 1) air mass */
m = 1 / us + 1 / uv
# /*------: 2) aerosol optical depth in the spectral band, taup :--------*/
taup = (a0taup) + (a1taup) * taup550
# /*------: 3) gaseous transmissions (downward and upward paths) :--------*/
to3 = 1.
th2o = 1.
to2 = 1.
tco2 = 1.
tch4 = 1.
uo2 = (Peq ** (po2))
uco2 = (Peq ** (pco2))
uch4 = (Peq ** (pch4))
uno2 = (Peq ** (pno2))
uco = (Peq ** (pco))
# /*------: 4) if uh2o <= 0 and uo3<= 0 no gaseous absorption is computed :--------*/
to3 = exp((ao3) * ((uo3 * m) ** (no3)))
th2o = exp((ah2o) * ((uh2o * m) ** (nh2o)))
to2 = exp((ao2) * ((uo2 * m) ** (no2)))
tco2 = exp((aco2) * ((uco2 * m) ** (nco2)))
tch4 = exp((ach4) * ((uch4 * m) ** (nch4)))
tno2 = exp((ano2) * ((uno2 * m) ** (nno2)))
tco = exp((aco) * ((uco * m) ** (nco)))
tg = th2o * to3 * to2 * tco2 * tch4 * tco * tno2
# /*------: 5) Total scattering transmission :--------*/
ttetas = (a0T) + (a1T) * taup550 / us + ((a2T) * Peq + (a3T)) / (1. + us) # /* downward */
ttetav = (a0T) + (a1T) * taup550 / uv + ((a2T) * Peq + (a3T)) / (1. + uv) # /* upward */
# /*------: 6) spherical albedo of the atmosphere :--------*/
s = (a0s) * Peq + (a3s) + (a1s) * taup550 + (a2s) * (taup550 ** 2)
# /*------: 7) scattering angle cosine :--------*/
cksi = - ((us * uv) + (sqrt(1. - us * us) * sqrt(1. - uv * uv) * cos((phis - phiv - 360) * cdr)))
if (cksi < -1):
cksi = -1.0
# /*------: 8) scattering angle in degree :--------*/
ksiD = crd * acos(cksi)
# /*------: 9) rayleigh atmospheric reflectance :--------*/
ray_phase = 0.7190443 * (1. + (cksi * cksi)) + 0.0412742
ray_ref = (taur * ray_phase) / (4 * us * uv)
ray_ref = ray_ref * pressure / 1013.25
taurz = (taur) * Peq
# /*------: 10) Residu Rayleigh :--------*/
Res_ray = Resr1 + Resr2 * taur * ray_phase / (us * uv) + Resr3 * ((taur * ray_phase / (us * uv)) ** 2)
# /*------: 11) aerosol atmospheric reflectance :--------*/
aer_phase = a0P + a1P * ksiD + a2P * ksiD * ksiD + a3P * (ksiD ** 3) + a4P * (ksiD ** 4)
ak2 = (1. - wo) * (3. - wo * 3 * gc)
ak = sqrt(ak2)
e = -3 * us * us * wo / (4 * (1. - ak2 * us * us))
f = -(1. - wo) * 3 * gc * us * us * wo / (4 * (1. - ak2 * us * us))
dp = e / (3 * us) + us * f
d = e + f
b = 2 * ak / (3. - wo * 3 * gc)
delta = np.exp(ak * taup) * (1. + b) * (1. + b) - np.exp(-ak * taup) * (1. - b) * (1. - b)
ww = wo / 4.
ss = us / (1. - ak2 * us * us)
q1 = 2. + 3 * us + (1. - wo) * 3 * gc * us * (1. + 2 * us)
q2 = 2. - 3 * us - (1. - wo) * 3 * gc * us * (1. - 2 * us)
q3 = q2 * np.exp(-taup / us)
c1 = ((ww * ss) / delta) * (q1 * np.exp(ak * taup) * (1. + b) + q3 * (1. - b))
c2 = -((ww * ss) / delta) * (q1 * np.exp(-ak * taup) * (1. - b) + q3 * (1. + b))
cp1 = c1 * ak / (3. - wo * 3 * gc)
cp2 = -c2 * ak / (3. - wo * 3 * gc)
z = d - wo * 3 * gc * uv * dp + wo * aer_phase / 4.
x = c1 - wo * 3 * gc * uv * cp1
y = c2 - wo * 3 * gc * uv * cp2
aa1 = uv / (1. + ak * uv)
aa2 = uv / (1. - ak * uv)
aa3 = us * uv / (us + uv)
aer_ref = x * aa1 * (1. - np.exp(-taup / aa1))
aer_ref = aer_ref + y * aa2 * (1. - np.exp(-taup / aa2))
aer_ref = aer_ref + z * aa3 * (1. - np.exp(-taup / aa3))
aer_ref = aer_ref / (us * uv)
# /*------: 12) Residu Aerosol :--------*/
Res_aer = (Resa1 + Resa2 * (taup * m * cksi) + Resa3 * ((taup * m * cksi) ** 2)) + Resa4 * ((taup * m * cksi) ** 3)
# /*------: 13) Terme de couplage molecule / aerosol :--------*/
tautot = taup + taurz
Res_6s = (Rest1 + Rest2 * (tautot * m * cksi) + Rest3 * ((tautot * m * cksi) ** 2)) + Rest4 * (
(tautot * m * cksi) ** 3)
# /*------: 14) total atmospheric reflectance :--------*/
atm_ref = ray_ref - Res_ray + aer_ref - Res_aer + Res_6s
# /*------: 15) TOA reflectance :--------*/
r_toa = r_surf * tg * ttetas * ttetav / (1 - r_surf * s) + (atm_ref * tg)
return r_toa
# =============================================================================
if __name__ == "__main__":
# example
theta_s = 45
theta_v = 5
phi_s = 200
phi_v = -160
r_toa = 0.2
######################################lecture des coefs_smac
nom_smac = 'COEFS/coef_FORMOSAT2_B1_CONT.dat'
coefs = coeff(nom_smac)
bd = 1
r_surf = smac_inv(r_toa, theta_s, phi_s, theta_v, phi_v, 1013, 0.1, 0.3, 0.3, coefs)
r_toa2 = smac_dir(r_surf, theta_s, phi_s, theta_v, phi_v, 1013, 0.1, 0.3, 0.3, coefs)
print(r_toa, r_surf, r_toa2)
| 33.764211
| 119
| 0.454358
| 2,109
| 16,038
| 3.400664
| 0.135135
| 0.061489
| 0.027886
| 0.037089
| 0.744004
| 0.712214
| 0.701339
| 0.701339
| 0.701339
| 0.701339
| 0
| 0.081735
| 0.317247
| 16,038
| 474
| 120
| 33.835443
| 0.573242
| 0.232074
| 0
| 0.706587
| 0
| 0
| 0.003302
| 0.002642
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011976
| false
| 0
| 0.005988
| 0
| 0.02994
| 0.002994
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
be03180ecc7afb7cb1c981fad68e1a717d440c30
| 535
|
py
|
Python
|
opera/core/keypoint/__init__.py
|
hikvisionresearch/opera
|
0fb345a7ad0046c6fd674959c0ae19a65adeeacf
|
[
"Apache-2.0"
] | 5
|
2022-03-24T03:08:49.000Z
|
2022-03-30T02:29:38.000Z
|
opera/core/keypoint/__init__.py
|
hikvisionresearch/opera
|
0fb345a7ad0046c6fd674959c0ae19a65adeeacf
|
[
"Apache-2.0"
] | null | null | null |
opera/core/keypoint/__init__.py
|
hikvisionresearch/opera
|
0fb345a7ad0046c6fd674959c0ae19a65adeeacf
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) Hikvision Research Institute. All rights reserved.
from .transforms import (distance2keypoint, transpose_and_gather_feat,
gaussian_radius, draw_umich_gaussian,
draw_short_range_offset, weighted_neg_loss,
bbox_kpt2result, kpt_mapping_back)
__all__ = [
'distance2keypoint', 'transpose_and_gather_feat', 'gaussian_radius',
'draw_umich_gaussian', 'draw_short_range_offset', 'weighted_neg_loss',
'bbox_kpt2result', 'kpt_mapping_back'
]
| 44.583333
| 74
| 0.699065
| 56
| 535
| 6.107143
| 0.553571
| 0.152047
| 0.169591
| 0.204678
| 0.777778
| 0.777778
| 0.777778
| 0.777778
| 0.777778
| 0.777778
| 0
| 0.009639
| 0.224299
| 535
| 11
| 75
| 48.636364
| 0.814458
| 0.119626
| 0
| 0
| 0
| 0
| 0.313433
| 0.102345
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.